* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, 2015, Intel Corporation.
+ * Copyright (c) 2012, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*
* lustre/ofd/ofd_dev.c
*
#define DEBUG_SUBSYSTEM S_FILTER
#include <obd_class.h>
-#include <lustre_param.h>
+#include <obd_cksum.h>
+#include <uapi/linux/lustre/lustre_param.h>
#include <lustre_fid.h>
#include <lustre_lfsck.h>
-#include <lustre/lustre_idl.h>
#include <lustre_dlm.h>
#include <lustre_quota.h>
#include <lustre_nodemap.h>
+#include <lustre_log.h>
+#include <linux/falloc.h>
#include "ofd_internal.h"
/* Slab for OFD object allocation */
static struct kmem_cache *ofd_object_kmem;
-
static struct lu_kmem_descr ofd_caches[] = {
{
.ckd_cache = &ofd_object_kmem,
* \retval negative value on error
*/
static int ofd_stack_init(const struct lu_env *env,
- struct ofd_device *m, struct lustre_cfg *cfg)
+ struct ofd_device *m, struct lustre_cfg *cfg,
+ u32 *lmd_flags)
{
const char *dev = lustre_cfg_string(cfg, 0);
struct lu_device *d;
}
lmd = s2lsi(lmi->lmi_sb)->lsi_lmd;
- if (lmd != NULL && lmd->lmd_flags & LMD_FLG_SKIP_LFSCK)
- m->ofd_skip_lfsck = 1;
+ if (lmd) {
+ if (lmd->lmd_flags & LMD_FLG_SKIP_LFSCK)
+ m->ofd_skip_lfsck = 1;
+ if (lmd->lmd_flags & LMD_FLG_NO_PRECREATE)
+ m->ofd_no_precreate = 1;
+ *lmd_flags = lmd->lmd_flags;
+ }
/* find bottom osd */
OBD_ALLOC(osdname, MTI_NAME_MAXLEN);
if (obd->obd_fail)
strcat(flags, "A");
lustre_cfg_bufs_set_string(&bufs, 1, flags);
- lcfg = lustre_cfg_new(LCFG_CLEANUP, &bufs);
- if (lcfg == NULL)
+ OBD_ALLOC(lcfg, lustre_cfg_len(bufs.lcfg_bufcount, bufs.lcfg_buflen));
+ if (!lcfg)
RETURN_EXIT;
+ lustre_cfg_init(lcfg, LCFG_CLEANUP, &bufs);
LASSERT(top);
top->ld_ops->ldo_process_config(env, top, lcfg);
- lustre_cfg_free(lcfg);
+ OBD_FREE(lcfg, lustre_cfg_len(lcfg->lcfg_bufcount, lcfg->lcfg_buflens));
- lu_site_purge(env, top->ld_site, ~0);
- if (!cfs_hash_is_empty(top->ld_site->ls_obj_hash)) {
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, D_ERROR, NULL);
- lu_site_print(env, top->ld_site, &msgdata, lu_cdebug_printer);
+ if (m->ofd_los != NULL) {
+ local_oid_storage_fini(env, m->ofd_los);
+ m->ofd_los = NULL;
}
+ lu_site_purge(env, top->ld_site, ~0);
+ lu_site_print(env, top->ld_site, &top->ld_site->ls_obj_hash.nelems,
+ D_OTHER, lu_cdebug_printer);
LASSERT(m->ofd_osd_exp);
obd_disconnect(m->ofd_osd_exp);
EXIT;
}
+static void ofd_stack_pre_fini(const struct lu_env *env, struct ofd_device *m,
+ struct lu_device *top)
+{
+ struct lustre_cfg_bufs bufs;
+ struct lustre_cfg *lcfg;
+ ENTRY;
+
+ LASSERT(top);
+
+ lustre_cfg_bufs_reset(&bufs, ofd_name(m));
+ lustre_cfg_bufs_set_string(&bufs, 1, NULL);
+ OBD_ALLOC(lcfg, lustre_cfg_len(bufs.lcfg_bufcount, bufs.lcfg_buflen));
+ if (!lcfg) {
+ CERROR("%s: failed to trigger LCFG_PRE_CLEANUP\n", ofd_name(m));
+ } else {
+ lustre_cfg_init(lcfg, LCFG_PRE_CLEANUP, &bufs);
+ top->ld_ops->ldo_process_config(env, top, lcfg);
+ OBD_FREE(lcfg, lustre_cfg_len(lcfg->lcfg_bufcount,
+ lcfg->lcfg_buflens));
+ }
+
+ EXIT;
+}
+
/* For interoperability, see mdt_interop_param[]. */
static struct cfg_interop_param ofd_interop_param[] = {
{ "ost.quota_type", NULL },
sval = strchr(param, '=');
if (sval != NULL) {
paramlen = sval - param;
- if (strncmp(param, "writethrough_cache_enable",
- paramlen) == 0 ||
- strncmp(param, "readcache_max_filesize",
- paramlen) == 0 ||
- strncmp(param, "read_cache_enable",
- paramlen) == 0 ||
- strncmp(param, "brw_stats", paramlen) == 0)
+ if (strncmp(param, "brw_stats", paramlen) == 0)
return true;
}
}
switch (cfg->lcfg_command) {
case LCFG_PARAM: {
- struct obd_device *obd = ofd_obd(m);
/* For interoperability */
- struct cfg_interop_param *ptr = NULL;
- struct lustre_cfg *old_cfg = NULL;
- char *param = NULL;
+ struct cfg_interop_param *ptr = NULL;
+ struct lustre_cfg *old_cfg = NULL;
+ char *param = NULL;
+ ssize_t count;
param = lustre_cfg_string(cfg, 1);
if (param == NULL) {
break;
}
- rc = class_process_proc_param(PARAM_OST, obd->obd_vars, cfg,
- d->ld_obd);
- if (rc > 0 || rc == -ENOSYS) {
- CDEBUG(D_CONFIG, "pass param %s down the stack.\n",
- param);
- /* we don't understand; pass it on */
- rc = next->ld_ops->ldo_process_config(env, next, cfg);
+ count = class_modify_config(cfg, PARAM_OST,
+ &d->ld_obd->obd_kset.kobj);
+ if (count > 0) {
+ rc = 0;
+ break;
}
+ CDEBUG(D_CONFIG, "pass param %s down the stack.\n",
+ param);
+ /* we don't understand; pass it on */
+ rc = next->ld_ops->ldo_process_config(env, next, cfg);
break;
}
case LCFG_SPTLRPC_CONF: {
RETURN(rc);
}
+static void ofd_object_free_rcu(struct rcu_head *head)
+{
+ struct ofd_object *of = container_of(head, struct ofd_object,
+ ofo_header.loh_rcu);
+
+ kmem_cache_free(ofd_object_kmem, of);
+}
+
/**
* Implementation of lu_object_operations::loo_object_free.
*
lu_object_fini(o);
lu_object_header_fini(h);
- OBD_SLAB_FREE_PTR(of, ofd_object_kmem);
+ OBD_FREE_PRE(of, sizeof(*of), "slab-freed");
+ call_rcu(&of->ofo_header.loh_rcu, ofd_object_free_rcu);
EXIT;
}
return (*p)(env, cookie, LUSTRE_OST_NAME"-object@%p", o);
}
-static struct lu_object_operations ofd_obj_ops = {
+static const struct lu_object_operations ofd_obj_ops = {
.loo_object_init = ofd_object_init,
.loo_object_free = ofd_object_free,
.loo_object_print = ofd_object_print
lu_object_init(o, h, d);
lu_object_add_top(h, o);
o->lo_ops = &ofd_obj_ops;
+ range_lock_tree_init(&of->ofo_write_tree);
RETURN(o);
} else {
RETURN(NULL);
LASSERTF(rc == 0, "register namespace failed: rc = %d\n", rc);
target_recovery_init(&ofd->ofd_lut, tgt_request_handle);
+ OBD_FAIL_TIMEOUT_ORSET(OBD_FAIL_OST_PREPARE_DELAY, OBD_FAIL_ONCE,
+ (OBD_TIMEOUT_DEFAULT + 1) / 4);
LASSERT(obd->obd_no_conn);
spin_lock(&obd->obd_dev_lock);
obd->obd_no_conn = 0;
* Grant space for object precreation on the self export.
* The initial reserved space (i.e. 10MB for zfs and 280KB for ldiskfs)
* is enough to create 10k objects. More space is then acquired for
- * precreation in ofd_grant_create().
+ * precreation in tgt_grant_create().
*/
memset(&oti->fti_ocd, 0, sizeof(oti->fti_ocd));
oti->fti_ocd.ocd_grant = OST_MAX_PRECREATE / 2;
- oti->fti_ocd.ocd_grant *= ofd->ofd_dt_conf.ddp_inodespace;
+ oti->fti_ocd.ocd_grant *= ofd->ofd_lut.lut_dt_conf.ddp_inodespace;
oti->fti_ocd.ocd_connect_flags = OBD_CONNECT_GRANT |
OBD_CONNECT_GRANT_PARAM;
- ofd_grant_connect(env, dev->ld_obd->obd_self_export, &oti->fti_ocd,
+ tgt_grant_connect(env, dev->ld_obd->obd_self_export, &oti->fti_ocd,
true);
rc = next->ld_ops->ldo_recovery_complete(env, next);
RETURN(rc);
/**
* lu_device_operations matrix for OFD device.
*/
-static struct lu_device_operations ofd_lu_ops = {
+static const struct lu_device_operations ofd_lu_ops = {
.ldo_object_alloc = ofd_object_alloc,
.ldo_process_config = ofd_process_config,
.ldo_recovery_complete = ofd_recovery_complete,
.ldo_prepare = ofd_prepare,
};
-LPROC_SEQ_FOPS(lprocfs_nid_stats_clear);
-
-/**
- * Initialize all needed procfs entries for OFD device.
- *
- * \param[in] ofd OFD device
- *
- * \retval 0 if successful
- * \retval negative value on error
- */
-static int ofd_procfs_init(struct ofd_device *ofd)
-{
- struct obd_device *obd = ofd_obd(ofd);
- struct proc_dir_entry *entry;
- int rc = 0;
-
- ENTRY;
-
- /* lprocfs must be setup before the ofd so state can be safely added
- * to /proc incrementally as the ofd is setup */
- obd->obd_vars = lprocfs_ofd_obd_vars;
- rc = lprocfs_obd_setup(obd);
- if (rc) {
- CERROR("%s: lprocfs_obd_setup failed: %d.\n",
- obd->obd_name, rc);
- RETURN(rc);
- }
-
- rc = lprocfs_alloc_obd_stats(obd, LPROC_OFD_STATS_LAST);
- if (rc) {
- CERROR("%s: lprocfs_alloc_obd_stats failed: %d.\n",
- obd->obd_name, rc);
- GOTO(obd_cleanup, rc);
- }
-
- obd->obd_uses_nid_stats = 1;
-
- entry = lprocfs_register("exports", obd->obd_proc_entry, NULL, NULL);
- if (IS_ERR(entry)) {
- rc = PTR_ERR(entry);
- CERROR("%s: error %d setting up lprocfs for %s\n",
- obd->obd_name, rc, "exports");
- GOTO(obd_cleanup, rc);
- }
- obd->obd_proc_exports_entry = entry;
-
- entry = lprocfs_add_simple(obd->obd_proc_exports_entry, "clear",
- obd, &lprocfs_nid_stats_clear_fops);
- if (IS_ERR(entry)) {
- rc = PTR_ERR(entry);
- CERROR("%s: add proc entry 'clear' failed: %d.\n",
- obd->obd_name, rc);
- GOTO(obd_cleanup, rc);
- }
-
- ofd_stats_counter_init(obd->obd_stats);
-
- rc = lprocfs_job_stats_init(obd, LPROC_OFD_STATS_LAST,
- ofd_stats_counter_init);
- if (rc)
- GOTO(obd_cleanup, rc);
- RETURN(0);
-obd_cleanup:
- lprocfs_obd_cleanup(obd);
- lprocfs_free_obd_stats(obd);
-
- return rc;
-}
-
+#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 14, 53, 0)
/**
* Expose OSD statistics to OFD layer.
*
*/
static void ofd_procfs_add_brw_stats_symlink(struct ofd_device *ofd)
{
- struct obd_device *obd = ofd_obd(ofd);
- struct obd_device *osd_obd = ofd->ofd_osd_exp->exp_obd;
+ struct obd_device *obd = ofd_obd(ofd);
+ struct obd_device *osd_obd = ofd->ofd_osd_exp->exp_obd;
+ struct kobj_type *osd_type;
+ int i;
+
+ osd_type = get_ktype(&ofd->ofd_osd->dd_kobj);
+ for (i = 0; osd_type->default_attrs[i]; i++) {
+ if (strcmp(osd_type->default_attrs[i]->name,
+ "read_cache_enable") == 0) {
+ ofd->ofd_read_cache_enable =
+ osd_type->default_attrs[i];
+ }
+
+ if (strcmp(osd_type->default_attrs[i]->name,
+ "readcache_max_filesize") == 0) {
+ ofd->ofd_read_cache_max_filesize =
+ osd_type->default_attrs[i];
+ }
+
+ if (strcmp(osd_type->default_attrs[i]->name,
+ "writethrough_cache_enable") == 0) {
+ ofd->ofd_write_cache_enable =
+ osd_type->default_attrs[i];
+ }
+ }
if (obd->obd_proc_entry == NULL)
return;
lprocfs_add_symlink("brw_stats", obd->obd_proc_entry,
"../../%s/%s/brw_stats",
osd_obd->obd_type->typ_name, obd->obd_name);
-
- lprocfs_add_symlink("read_cache_enable", obd->obd_proc_entry,
- "../../%s/%s/read_cache_enable",
- osd_obd->obd_type->typ_name, obd->obd_name);
-
- lprocfs_add_symlink("readcache_max_filesize",
- obd->obd_proc_entry,
- "../../%s/%s/readcache_max_filesize",
- osd_obd->obd_type->typ_name, obd->obd_name);
-
- lprocfs_add_symlink("writethrough_cache_enable",
- obd->obd_proc_entry,
- "../../%s/%s/writethrough_cache_enable",
- osd_obd->obd_type->typ_name, obd->obd_name);
}
+#endif
/**
* Cleanup all procfs entries in OFD.
{
struct obd_device *obd = ofd_obd(ofd);
+ tgt_tunables_fini(&ofd->ofd_lut);
lprocfs_free_per_client_stats(obd);
lprocfs_obd_cleanup(obd);
lprocfs_free_obd_stats(obd);
*/
int ofd_fid_init(const struct lu_env *env, struct ofd_device *ofd)
{
- struct seq_server_site *ss = &ofd->ofd_seq_site;
- struct lu_device *lu = &ofd->ofd_dt_dev.dd_lu_dev;
- char *obd_name = ofd_name(ofd);
- char *name = NULL;
- int rc = 0;
+ struct seq_server_site *ss = &ofd->ofd_seq_site;
+ struct lu_device *lu = &ofd->ofd_dt_dev.dd_lu_dev;
+ char *obd_name = ofd_name(ofd);
+ char *name = NULL;
+ int len = strlen(obd_name) + 7;
+ int rc = 0;
ss = &ofd->ofd_seq_site;
lu->ld_site->ld_seq_site = ss;
ss->ss_lu = lu->ld_site;
ss->ss_node_id = ofd->ofd_lut.lut_lsd.lsd_osd_index;
- OBD_ALLOC(name, sizeof(obd_name) * 2 + 10);
+ OBD_ALLOC(name, len);
if (name == NULL)
return -ENOMEM;
rc = seq_server_init(env, ss->ss_server_seq, ofd->ofd_osd, obd_name,
LUSTRE_SEQ_SERVER, ss);
if (rc) {
- CERROR("%s : seq server init error %d\n", obd_name, rc);
+ CERROR("%s: seq server init error: rc = %d\n", obd_name, rc);
GOTO(out_server, rc);
}
ss->ss_server_seq->lss_space.lsr_index = ss->ss_node_id;
if (ss->ss_client_seq == NULL)
GOTO(out_server, rc = -ENOMEM);
- /*
- * It always printed as "%p", so that the name is unique in the kernel,
- * even if the filesystem is mounted twice. So sizeof(.) * 2 is enough.
- */
- snprintf(name, sizeof(obd_name) * 2 + 7, "%p-super", obd_name);
- rc = seq_client_init(ss->ss_client_seq, NULL, LUSTRE_SEQ_DATA,
- name, NULL);
- if (rc) {
- CERROR("%s : seq client init error %d\n", obd_name, rc);
- GOTO(out_client, rc);
- }
+ snprintf(name, len, "%s-super", obd_name);
+ seq_client_init(ss->ss_client_seq, NULL, LUSTRE_SEQ_DATA,
+ name, NULL);
rc = seq_server_set_cli(env, ss->ss_server_seq, ss->ss_client_seq);
if (rc) {
-out_client:
seq_client_fini(ss->ss_client_seq);
OBD_FREE_PTR(ss->ss_client_seq);
ss->ss_client_seq = NULL;
ss->ss_server_seq = NULL;
}
out_name:
- OBD_FREE(name, sizeof(obd_name) * 2 + 10);
+ OBD_FREE(name, len);
return rc;
}
void *key, *val = NULL;
int keylen, vallen, rc = 0;
bool is_grant_shrink;
+ ktime_t kstart = ktime_get();
ENTRY;
if (is_grant_shrink) {
body = req_capsule_client_get(tsi->tsi_pill, &RMF_OST_BODY);
+ /*
+ * Because we already sync grant info with client when
+ * reconnect, grant info will be cleared for resent
+ * req, otherwise, outdated grant count in the rpc
+ * would de-sync grant counters
+ */
+ if (lustre_msg_get_flags(req->rq_reqmsg) &
+ (MSG_RESENT | MSG_REPLAY)) {
+ DEBUG_REQ(D_CACHE, req,
+ "clear resent/replay req grant info");
+ body->oa.o_valid &= ~OBD_MD_FLGRANT;
+ }
+
repbody = req_capsule_server_get(tsi->tsi_pill, &RMF_OST_BODY);
*repbody = *body;
/** handle grant shrink, similar to a read request */
- ofd_grant_prepare_read(tsi->tsi_env, tsi->tsi_exp,
+ tgt_grant_prepare_read(tsi->tsi_env, tsi->tsi_exp,
&repbody->oa);
} else if (KEY_IS(KEY_EVICT_BY_NID)) {
if (vallen > 0)
obd_export_evict_by_nid(tsi->tsi_exp->exp_obd, val);
rc = 0;
- } else if (KEY_IS(KEY_SPTLRPC_CONF)) {
- rc = tgt_adapt_sptlrpc_conf(tsi->tsi_tgt, 0);
} else {
CERROR("%s: Unsupported key %s\n",
tgt_name(tsi->tsi_tgt), (char *)key);
rc = -EOPNOTSUPP;
}
ofd_counter_incr(tsi->tsi_exp, LPROC_OFD_STATS_SET_INFO,
- tsi->tsi_jobid, 1);
+ tsi->tsi_jobid, ktime_us_delta(ktime_get(), kstart));
RETURN(rc);
}
return rc;
}
-struct locked_region {
- struct list_head list;
- struct lustre_handle lh;
-};
-/**
- * Lock single extent and save lock handle in the list.
- *
- * This is supplemental function for lock_zero_regions(). It allocates
- * new locked_region structure and locks it with extent lock, then adds
- * it to the list of all such regions.
- *
- * \param[in] ns LDLM namespace
- * \param[in] res_id resource ID
- * \param[in] begin start of region
- * \param[in] end end of region
- * \param[in] locked list head of regions list
- *
- * \retval 0 if successful locking
- * \retval negative value on error
- */
-static int lock_region(struct ldlm_namespace *ns, struct ldlm_res_id *res_id,
- unsigned long long begin, unsigned long long end,
- struct list_head *locked)
+static int ofd_lock_unlock_region(const struct lu_env *env,
+ struct ldlm_namespace *ns,
+ struct ldlm_res_id *res_id,
+ unsigned long long begin,
+ unsigned long long end)
{
- struct locked_region *region = NULL;
__u64 flags = 0;
int rc;
+ struct lustre_handle lh = { 0 };
LASSERT(begin <= end);
- OBD_ALLOC_PTR(region);
- if (region == NULL)
- return -ENOMEM;
- rc = tgt_extent_lock(ns, res_id, begin, end, ®ion->lh,
- LCK_PR, &flags);
+ rc = tgt_extent_lock(env, ns, res_id, begin, end, &lh, LCK_PR, &flags);
if (rc != 0)
return rc;
- CDEBUG(D_OTHER, "ost lock [%llu,%llu], lh=%p\n", begin, end,
- ®ion->lh);
- list_add(®ion->list, locked);
+ CDEBUG(D_OTHER, "ost lock [%llu,%llu], lh=%p\n", begin, end, &lh);
+ tgt_data_unlock(&lh, LCK_PR);
return 0;
}
* \retval 0 if successful
* \retval negative value on error
*/
-static int lock_zero_regions(struct ldlm_namespace *ns,
+static int lock_zero_regions(const struct lu_env *env,
+ struct ldlm_namespace *ns,
struct ldlm_res_id *res_id,
- struct fiemap *fiemap,
- struct list_head *locked)
+ struct fiemap *fiemap)
{
__u64 begin = fiemap->fm_start;
unsigned int i;
if (fiemap_start[i].fe_logical > begin) {
CDEBUG(D_OTHER, "ost lock [%llu,%llu]\n",
begin, fiemap_start[i].fe_logical);
- rc = lock_region(ns, res_id, begin,
- fiemap_start[i].fe_logical, locked);
+ rc = ofd_lock_unlock_region(env, ns, res_id, begin,
+ fiemap_start[i].fe_logical);
if (rc)
RETURN(rc);
}
if (begin < (fiemap->fm_start + fiemap->fm_length)) {
CDEBUG(D_OTHER, "ost lock [%llu,%llu]\n",
begin, fiemap->fm_start + fiemap->fm_length);
- rc = lock_region(ns, res_id, begin,
- fiemap->fm_start + fiemap->fm_length, locked);
+ rc = ofd_lock_unlock_region(env, ns, res_id, begin,
+ fiemap->fm_start + fiemap->fm_length);
}
RETURN(rc);
}
-/**
- * Unlock all previously locked sparse areas for given resource.
- *
- * This function goes through list of locked regions, unlocking and freeing
- * them one-by-one.
- *
- * \param[in] ns LDLM namespace
- * \param[in] locked list head of regions list
- */
-static void
-unlock_zero_regions(struct ldlm_namespace *ns, struct list_head *locked)
-{
- struct locked_region *entry, *temp;
-
- list_for_each_entry_safe(entry, temp, locked, list) {
- CDEBUG(D_OTHER, "ost unlock lh=%p\n", &entry->lh);
- tgt_extent_unlock(&entry->lh, LCK_PR);
- list_del(&entry->list);
- OBD_FREE_PTR(entry);
- }
-}
/**
* OFD request handler for OST_GET_INFO RPC.
void *key;
int keylen;
int replylen, rc = 0;
+ ktime_t kstart = ktime_get();
ENTRY;
* flushed back from client, then call fiemap again. */
if (fm_key->lfik_oa.o_valid & OBD_MD_FLFLAGS &&
fm_key->lfik_oa.o_flags & OBD_FL_SRVLOCK) {
- struct list_head locked;
-
- INIT_LIST_HEAD(&locked);
ost_fid_build_resid(fid, &fti->fti_resid);
- rc = lock_zero_regions(ofd->ofd_namespace,
- &fti->fti_resid, fiemap,
- &locked);
- if (rc == 0 && !list_empty(&locked)) {
+ rc = lock_zero_regions(tsi->tsi_env, ofd->ofd_namespace,
+ &fti->fti_resid, fiemap);
+ if (rc == 0)
rc = ofd_fiemap_get(tsi->tsi_env, ofd, fid,
fiemap);
- unlock_zero_regions(ofd->ofd_namespace,
- &locked);
- }
}
} else if (KEY_IS(KEY_LAST_FID)) {
struct ofd_device *ofd = ofd_exp(exp);
rc = -EOPNOTSUPP;
}
ofd_counter_incr(tsi->tsi_exp, LPROC_OFD_STATS_GET_INFO,
- tsi->tsi_jobid, 1);
+ tsi->tsi_jobid, ktime_us_delta(ktime_get(), kstart));
RETURN(rc);
}
struct ofd_object *fo;
__u64 flags = 0;
enum ldlm_mode lock_mode = LCK_PR;
+ ktime_t kstart = ktime_get();
bool srvlock;
int rc;
ENTRY;
if (unlikely(tsi->tsi_ost_body->oa.o_flags & OBD_FL_FLUSH))
lock_mode = LCK_PW;
- rc = tgt_extent_lock(tsi->tsi_tgt->lut_obd->obd_namespace,
+ rc = tgt_extent_lock(tsi->tsi_env,
+ tsi->tsi_tgt->lut_obd->obd_namespace,
&tsi->tsi_resid, 0, OBD_OBJECT_EOF, &lh,
lock_mode, &flags);
if (rc != 0)
__u64 curr_version;
obdo_from_la(&repbody->oa, &fti->fti_attr,
- OFD_VALID_FLAGS | LA_UID | LA_GID);
- tgt_drop_id(tsi->tsi_exp, &repbody->oa);
+ OFD_VALID_FLAGS | LA_UID | LA_GID | LA_PROJID);
/* Store object version in reply */
curr_version = dt_version_get(tsi->tsi_env,
repbody->oa.o_valid |= OBD_MD_FLDATAVERSION;
repbody->oa.o_data_version = curr_version;
}
+
+ if (fo->ofo_ff.ff_layout_version > 0) {
+ repbody->oa.o_valid |= OBD_MD_LAYOUT_VERSION;
+ repbody->oa.o_layout_version =
+ fo->ofo_ff.ff_layout_version + fo->ofo_ff.ff_range;
+
+ CDEBUG(D_INODE, DFID": get layout version: %u\n",
+ PFID(&tsi->tsi_fid),
+ repbody->oa.o_layout_version);
+ }
}
ofd_object_put(tsi->tsi_env, fo);
out:
if (srvlock)
- tgt_extent_unlock(&lh, lock_mode);
+ tgt_data_unlock(&lh, lock_mode);
ofd_counter_incr(tsi->tsi_exp, LPROC_OFD_STATS_GETATTR,
- tsi->tsi_jobid, 1);
+ tsi->tsi_jobid, ktime_us_delta(ktime_get(), kstart));
repbody->oa.o_valid |= OBD_MD_FLFLAGS;
repbody->oa.o_flags = OBD_FL_FLUSH;
struct ost_body *repbody;
struct ldlm_resource *res;
struct ofd_object *fo;
- struct filter_fid *ff = NULL;
+ ktime_t kstart = ktime_get();
int rc = 0;
ENTRY;
la_from_obdo(&fti->fti_attr, &body->oa, body->oa.o_valid);
fti->fti_attr.la_valid &= ~LA_TYPE;
- if (body->oa.o_valid & OBD_MD_FLFID) {
- ff = &fti->fti_mds_fid;
- ofd_prepare_fidea(ff, &body->oa);
- }
-
/* setting objects attributes (including owner/group) */
- rc = ofd_attr_set(tsi->tsi_env, fo, &fti->fti_attr, ff);
+ rc = ofd_attr_set(tsi->tsi_env, fo, &fti->fti_attr, &body->oa);
if (rc != 0)
GOTO(out_put, rc);
obdo_from_la(&repbody->oa, &fti->fti_attr,
- OFD_VALID_FLAGS | LA_UID | LA_GID);
- tgt_drop_id(tsi->tsi_exp, &repbody->oa);
+ OFD_VALID_FLAGS | LA_UID | LA_GID | LA_PROJID);
ofd_counter_incr(tsi->tsi_exp, LPROC_OFD_STATS_SETATTR,
- tsi->tsi_jobid, 1);
+ tsi->tsi_jobid, ktime_us_delta(ktime_get(), kstart));
EXIT;
out_put:
ofd_object_put(tsi->tsi_env, fo);
rc = ofd_seq_last_oid_write(env, ofd, oseq);
} else {
/* don't reuse orphan object, return last used objid */
- ostid_set_id(oi, last);
- rc = 0;
+ rc = ostid_set_id(oi, last);
}
GOTO(out_put, rc);
u64 seq = ostid_seq(&oa->o_oi);
u64 oid = ostid_id(&oa->o_oi);
struct ofd_seq *oseq;
- int rc = 0, diff;
int sync_trans = 0;
long granted = 0;
+ ktime_t kstart = ktime_get();
+ s64 diff;
+ int rc = 0;
ENTRY;
if (OBD_FAIL_CHECK(OBD_FAIL_OST_EROFS))
RETURN(-EROFS);
+ if (ofd->ofd_no_precreate)
+ return -EPERM;
+
repbody = req_capsule_server_get(tsi->tsi_pill, &RMF_OST_BODY);
if (repbody == NULL)
RETURN(-ENOMEM);
rep_oa = &repbody->oa;
rep_oa->o_oi = oa->o_oi;
- LASSERT(seq >= FID_SEQ_OST_MDT0);
LASSERT(oa->o_valid & OBD_MD_FLGROUP);
CDEBUG(D_INFO, "ofd_create("DOSTID")\n", POSTID(&oa->o_oi));
oseq = ofd_seq_load(tsi->tsi_env, ofd, seq);
if (IS_ERR(oseq)) {
- CERROR("%s: Can't find FID Sequence "LPX64": rc = %ld\n",
+ CERROR("%s: Can't find FID Sequence %#llx: rc = %ld\n",
ofd_name(ofd), seq, PTR_ERR(oseq));
GOTO(out_sem, rc = -EINVAL);
}
(oa->o_flags & OBD_FL_RECREATE_OBJS)) {
if (!ofd_obd(ofd)->obd_recovering ||
oid > ofd_seq_last_oid(oseq)) {
- CERROR("%s: recreate objid "DOSTID" > last id "LPU64
+ CERROR("%s: recreate objid "DOSTID" > last id %llu"
"\n", ofd_name(ofd), POSTID(&oa->o_oi),
ofd_seq_last_oid(oseq));
GOTO(out_nolock, rc = -EINVAL);
oseq->os_destroys_in_progress = 1;
mutex_lock(&oseq->os_create_lock);
if (!oseq->os_destroys_in_progress) {
- CERROR("%s:["LPU64"] destroys_in_progress already"
- " cleared\n", ofd_name(ofd), seq);
- ostid_set_id(&rep_oa->o_oi, ofd_seq_last_oid(oseq));
- GOTO(out, rc = 0);
+ CDEBUG(D_HA,
+ "%s:[%llu] destroys_in_progress already cleared\n",
+ ofd_name(ofd), seq);
+ rc = ostid_set_id(&rep_oa->o_oi,
+ ofd_seq_last_oid(oseq));
+ GOTO(out, rc);
}
diff = oid - ofd_seq_last_oid(oseq);
- CDEBUG(D_HA, "ofd_last_id() = "LPU64" -> diff = %d\n",
- ofd_seq_last_oid(oseq), diff);
+ CDEBUG(D_HA, "ofd_last_id() = %llu -> diff = %lld\n",
+ ofd_seq_last_oid(oseq), diff);
if (-diff > OST_MAX_PRECREATE) {
+ LCONSOLE(D_INFO, "%s: too large difference between MDS "
+ "LAST_ID "DFID" (%llu) and OST LAST_ID "DFID" "
+ "(%llu), trust the OST\n",
+ ofd_name(ofd), PFID(&oa->o_oi.oi_fid), oid,
+ PFID(&oseq->os_oi.oi_fid),
+ ofd_seq_last_oid(oseq));
+
/* Let MDS know that we are so far ahead. */
- ostid_set_id(&rep_oa->o_oi, ofd_seq_last_oid(oseq) + 1);
- rc = 0;
+ rc = ostid_set_id(&rep_oa->o_oi,
+ ofd_seq_last_oid(oseq) + 1);
} else if (diff < 0) {
rc = ofd_orphans_destroy(tsi->tsi_env, exp,
ofd, rep_oa);
GOTO(out, rc = -EINVAL);
}
- if (diff < 0) {
+ if (diff <= -OST_MAX_PRECREATE) {
/* LU-5648 */
CERROR("%s: invalid precreate request for "
- DOSTID", last_id " LPU64 ". "
+ DOSTID", last_id %llu. "
"Likely MDS last_id corruption\n",
ofd_name(ofd), POSTID(&oa->o_oi),
ofd_seq_last_oid(oseq));
GOTO(out, rc = -EINVAL);
+ } else if (diff < 0) {
+ LCONSOLE(D_INFO,
+ "%s: MDS LAST_ID "DFID" (%llu) is %lld behind OST LAST_ID "DFID" (%llu), trust the OST\n",
+ ofd_name(ofd), PFID(&oa->o_oi.oi_fid),
+ oid, -diff, PFID(&oseq->os_oi.oi_fid),
+ ofd_seq_last_oid(oseq));
+ /* Let MDS know that we are so far ahead. */
+ rc = ostid_set_id(&rep_oa->o_oi,
+ ofd_seq_last_oid(oseq) + 1);
}
}
}
if (diff > 0) {
- cfs_time_t enough_time = cfs_time_shift(DISK_TIMEOUT);
- u64 next_id;
- int created = 0;
- int count;
+ time64_t enough_time = ktime_get_seconds() + DISK_TIMEOUT;
+ u64 next_id;
+ int created = 0;
+ int count;
+ int rc2;
if (!(oa->o_valid & OBD_MD_FLFLAGS) ||
!(oa->o_flags & OBD_FL_DELORPHAN)) {
/* don't enforce grant during orphan recovery */
- granted = ofd_grant_create(tsi->tsi_env,
- ofd_obd(ofd)->obd_self_export,
- &diff);
+ granted = tgt_grant_create(tsi->tsi_env,
+ ofd_obd(ofd)->obd_self_export,
+ &diff);
if (granted < 0) {
rc = granted;
granted = 0;
CDEBUG(D_HA, "%s: failed to acquire grant "
- "space for precreate (%d): rc = %d\n",
+ "space for precreate (%lld): rc = %d\n",
ofd_name(ofd), diff, rc);
diff = 0;
}
* (possibly filling the OST), only precreate the last batch.
* LFSCK will eventually clean up any orphans. LU-14 */
if (diff > 5 * OST_MAX_PRECREATE) {
+ /* Message below is checked in conf-sanity test_122b */
+ LCONSOLE_WARN("%s: precreate FID "DOSTID" is over %lld higher than LAST_ID "DOSTID", only precreating the last %u objects. OST replaced or reformatted?\n",
+ ofd_name(ofd), POSTID(&oa->o_oi), diff,
+ POSTID(&oseq->os_oi),
+ OST_MAX_PRECREATE / 2);
diff = OST_MAX_PRECREATE / 2;
- LCONSOLE_WARN("%s: Too many FIDs to precreate "
- "OST replaced or reformatted: "
- "LFSCK will clean up",
- ofd_name(ofd));
-
- CDEBUG(D_HA, "%s: precreate FID "DOSTID" is over "
- "%u larger than the LAST_ID "DOSTID", only "
- "precreating the last %u objects.\n",
- ofd_name(ofd), POSTID(&oa->o_oi),
- 5 * OST_MAX_PRECREATE,
- POSTID(&oseq->os_oi), diff);
ofd_seq_last_oid_set(oseq, ostid_id(&oa->o_oi) - diff);
}
while (diff > 0) {
next_id = ofd_seq_last_oid(oseq) + 1;
- count = ofd_precreate_batch(ofd, diff);
+ count = ofd_precreate_batch(ofd, (int)diff);
- CDEBUG(D_HA, "%s: reserve %d objects in group "LPX64
- " at "LPU64"\n", ofd_name(ofd),
+ CDEBUG(D_HA, "%s: reserve %d objects in group %#llx"
+ " at %llu\n", ofd_name(ofd),
count, seq, next_id);
if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
- && cfs_time_after(jiffies, enough_time)) {
- CDEBUG(D_HA, "%s: Slow creates, %d/%d objects"
+ && ktime_get_seconds() > enough_time) {
+ CDEBUG(D_HA, "%s: Slow creates, %d/%lld objects"
" created at a rate of %d/s\n",
ofd_name(ofd), created, diff + created,
created / DISK_TIMEOUT);
lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
LCONSOLE_WARN("%s: can't create the same count of"
" objects when replaying the request"
- " (diff is %d). see LU-4621\n",
+ " (diff is %lld). see LU-4621\n",
ofd_name(ofd), diff);
if (created > 0)
if (!(oa->o_valid & OBD_MD_FLFLAGS) ||
!(oa->o_flags & OBD_FL_DELORPHAN)) {
- ofd_grant_commit(ofd_obd(ofd)->obd_self_export, granted,
- rc);
+ tgt_grant_commit(ofd_obd(ofd)->obd_self_export,
+ granted, rc);
granted = 0;
}
- ostid_set_id(&rep_oa->o_oi, ofd_seq_last_oid(oseq));
+ rc2 = ostid_set_id(&rep_oa->o_oi, ofd_seq_last_oid(oseq));
+ rc = rc ? : rc2;
}
EXIT;
ofd_counter_incr(exp, LPROC_OFD_STATS_CREATE,
- tsi->tsi_jobid, 1);
+ tsi->tsi_jobid, ktime_us_delta(ktime_get(), kstart));
+ if (unlikely(!oseq->os_last_id_synced))
+ oseq->os_last_id_synced = 1;
out:
mutex_unlock(&oseq->os_create_lock);
out_nolock:
- if (rc == 0) {
-#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 8, 53, 0)
- struct ofd_thread_info *info = ofd_info(tsi->tsi_env);
- struct lu_fid *fid = &info->fti_fid;
-
- /* For compatible purpose, it needs to convert back to
- * OST ID before put it on wire. */
- *fid = rep_oa->o_oi.oi_fid;
- fid_to_ostid(fid, &rep_oa->o_oi);
-#endif
+ if (rc == 0)
rep_oa->o_valid |= OBD_MD_FLID | OBD_MD_FLGROUP;
- }
ofd_seq_put(tsi->tsi_env, oseq);
out_sem:
struct ofd_device *ofd = ofd_exp(tsi->tsi_exp);
struct ofd_thread_info *fti = tsi2ofd_info(tsi);
struct lu_fid *fid = &fti->fti_fid;
+ ktime_t kstart = ktime_get();
u64 oid;
u32 count;
int rc = 0;
}
ofd_counter_incr(tsi->tsi_exp, LPROC_OFD_STATS_DESTROY,
- tsi->tsi_jobid, 1);
+ tsi->tsi_jobid, ktime_us_delta(ktime_get(), kstart));
GOTO(out, rc);
*/
static int ofd_statfs_hdl(struct tgt_session_info *tsi)
{
+ ktime_t kstart = ktime_get();
struct obd_statfs *osfs;
int rc;
ENTRY;
+ OBD_FAIL_TIMEOUT(OBD_FAIL_OST_STATFS_DELAY, 10);
+
osfs = req_capsule_server_get(tsi->tsi_pill, &RMF_OBD_STATFS);
rc = ofd_statfs(tsi->tsi_env, tsi->tsi_exp, osfs,
- cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS), 0);
+ ktime_get_seconds() - OBD_STATFS_CACHE_SECONDS, 0);
if (rc != 0)
CERROR("%s: statfs failed: rc = %d\n",
tgt_name(tsi->tsi_tgt), rc);
rc = -EINPROGRESS;
ofd_counter_incr(tsi->tsi_exp, LPROC_OFD_STATS_STATFS,
- tsi->tsi_jobid, 1);
+ tsi->tsi_jobid, ktime_us_delta(ktime_get(), kstart));
RETURN(rc);
}
struct ofd_thread_info *fti = tsi2ofd_info(tsi);
struct ofd_device *ofd = ofd_exp(tsi->tsi_exp);
struct ofd_object *fo = NULL;
+ ktime_t kstart = ktime_get();
int rc = 0;
ENTRY;
GOTO(put, rc);
ofd_counter_incr(tsi->tsi_exp, LPROC_OFD_STATS_SYNC,
- tsi->tsi_jobid, 1);
+ tsi->tsi_jobid, ktime_us_delta(ktime_get(), kstart));
if (fo == NULL)
RETURN(0);
}
/**
+ * OFD request handler for OST_FALLOCATE RPC.
+ *
+ * This is part of request processing. Validate request fields,
+ * preallocate the given OFD object and pack reply.
+ *
+ * \param[in] tsi target session environment for this request
+ *
+ * \retval 0 if successful
+ * \retval negative value on error
+ */
+static int ofd_fallocate_hdl(struct tgt_session_info *tsi)
+{
+ struct obdo *oa = &tsi->tsi_ost_body->oa;
+ struct ost_body *repbody;
+ struct ofd_thread_info *info = tsi2ofd_info(tsi);
+ struct ldlm_namespace *ns = tsi->tsi_tgt->lut_obd->obd_namespace;
+ struct ldlm_resource *res;
+ struct ofd_object *fo;
+ __u64 flags = 0;
+ __u64 valid;
+ struct lustre_handle lh = { 0, };
+ int rc, mode;
+ __u64 start, end;
+ bool srvlock;
+ ktime_t kstart = ktime_get();
+
+ repbody = req_capsule_server_get(tsi->tsi_pill, &RMF_OST_BODY);
+ if (repbody == NULL)
+ RETURN(err_serious(-ENOMEM));
+
+ /*
+ * fallocate start and end are passed in o_size, o_blocks
+ * on the wire.
+ */
+ if ((oa->o_valid & (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS)) !=
+ (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS))
+ RETURN(err_serious(-EPROTO));
+
+ start = oa->o_size;
+ end = oa->o_blocks;
+ mode = oa->o_falloc_mode;
+ /*
+ * mode == 0 (which is standard prealloc) and PUNCH is supported
+ * Rest of mode options are not supported yet.
+ */
+ if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
+ RETURN(-EOPNOTSUPP);
+
+ /* PUNCH_HOLE mode should always be accompanied with KEEP_SIZE flag
+ * Check that and add the missing flag for such invalid call with
+ * warning.
+ */
+ if (mode & FALLOC_FL_PUNCH_HOLE && !(mode & FALLOC_FL_KEEP_SIZE)) {
+ CWARN("%s: PUNCH mode misses KEEP_SIZE flag, setting it\n",
+ tsi->tsi_tgt->lut_obd->obd_name);
+ mode |= FALLOC_FL_KEEP_SIZE;
+ }
+
+ repbody->oa.o_oi = oa->o_oi;
+ repbody->oa.o_valid = OBD_MD_FLID;
+
+ srvlock = oa->o_valid & OBD_MD_FLFLAGS &&
+ oa->o_flags & OBD_FL_SRVLOCK;
+
+ if (srvlock) {
+ rc = tgt_extent_lock(tsi->tsi_env, ns, &tsi->tsi_resid,
+ start, end, &lh, LCK_PW, &flags);
+ if (rc != 0)
+ RETURN(rc);
+ }
+
+ fo = ofd_object_find_exists(tsi->tsi_env, ofd_exp(tsi->tsi_exp),
+ &tsi->tsi_fid);
+ if (IS_ERR(fo))
+ GOTO(out, rc = PTR_ERR(fo));
+
+ valid = OBD_MD_FLUID | OBD_MD_FLGID | OBD_MD_FLPROJID |
+ OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME;
+ la_from_obdo(&info->fti_attr, oa, valid);
+
+ rc = ofd_object_fallocate(tsi->tsi_env, fo, start, end, mode,
+ &info->fti_attr, oa);
+ if (rc)
+ GOTO(out_put, rc);
+
+ rc = ofd_attr_get(tsi->tsi_env, fo, &info->fti_attr);
+ if (rc == 0)
+ obdo_from_la(&repbody->oa, &info->fti_attr, OFD_VALID_FLAGS);
+ else
+ rc = 0;
+
+ ofd_counter_incr(tsi->tsi_exp, LPROC_OFD_STATS_PREALLOC,
+ tsi->tsi_jobid, ktime_us_delta(ktime_get(), kstart));
+
+ EXIT;
+out_put:
+ ofd_object_put(tsi->tsi_env, fo);
+out:
+ if (srvlock)
+ tgt_data_unlock(&lh, LCK_PW);
+ if (rc == 0) {
+ res = ldlm_resource_get(ns, NULL, &tsi->tsi_resid,
+ LDLM_EXTENT, 0);
+ if (!IS_ERR(res)) {
+ struct ost_lvb *res_lvb;
+
+ ldlm_res_lvbo_update(res, NULL, 0);
+ res_lvb = res->lr_lvb_data;
+ /* Blocks */
+ repbody->oa.o_valid |= OBD_MD_FLBLOCKS;
+ repbody->oa.o_blocks = res_lvb->lvb_blocks;
+ /* Size */
+ repbody->oa.o_valid |= OBD_MD_FLSIZE;
+ repbody->oa.o_size = res_lvb->lvb_size;
+
+ ldlm_resource_putref(res);
+ }
+ }
+
+ RETURN(rc);
+}
+
+/**
* OFD request handler for OST_PUNCH RPC.
*
* This is part of request processing. Validate request fields,
struct ldlm_namespace *ns = tsi->tsi_tgt->lut_obd->obd_namespace;
struct ldlm_resource *res;
struct ofd_object *fo;
- struct filter_fid *ff = NULL;
__u64 flags = 0;
struct lustre_handle lh = { 0, };
- int rc;
__u64 start, end;
bool srvlock;
+ ktime_t kstart = ktime_get();
+ int rc;
ENTRY;
- /* check that we do support OBD_CONNECT_TRUNCLOCK. */
- CLASSERT(OST_CONNECT_SUPPORTED & OBD_CONNECT_TRUNCLOCK);
+ OBD_FAIL_TIMEOUT(OBD_FAIL_OST_PAUSE_PUNCH, cfs_fail_val);
if ((oa->o_valid & (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS)) !=
(OBD_MD_FLSIZE | OBD_MD_FLBLOCKS))
oa->o_flags & OBD_FL_SRVLOCK;
if (srvlock) {
- rc = tgt_extent_lock(ns, &tsi->tsi_resid, start, end, &lh,
- LCK_PW, &flags);
+ rc = tgt_extent_lock(tsi->tsi_env, ns, &tsi->tsi_resid, start,
+ end, &lh, LCK_PW, &flags);
if (rc != 0)
RETURN(rc);
}
- CDEBUG(D_INODE, "calling punch for object "DFID", valid = "LPX64
- ", start = "LPD64", end = "LPD64"\n", PFID(&tsi->tsi_fid),
+ CDEBUG(D_INODE, "calling punch for object "DFID", valid = %#llx"
+ ", start = %lld, end = %lld\n", PFID(&tsi->tsi_fid),
oa->o_valid, start, end);
fo = ofd_object_find_exists(tsi->tsi_env, ofd_exp(tsi->tsi_exp),
info->fti_attr.la_size = start;
info->fti_attr.la_valid |= LA_SIZE;
- if (oa->o_valid & OBD_MD_FLFID) {
- ff = &info->fti_mds_fid;
- ofd_prepare_fidea(ff, oa);
- }
-
rc = ofd_object_punch(tsi->tsi_env, fo, start, end, &info->fti_attr,
- ff, (struct obdo *)oa);
+ (struct obdo *)oa);
if (rc)
GOTO(out_put, rc);
ofd_counter_incr(tsi->tsi_exp, LPROC_OFD_STATS_PUNCH,
- tsi->tsi_jobid, 1);
+ tsi->tsi_jobid, ktime_us_delta(ktime_get(), kstart));
EXIT;
out_put:
ofd_object_put(tsi->tsi_env, fo);
out:
if (srvlock)
- tgt_extent_unlock(&lh, LCK_PW);
+ tgt_data_unlock(&lh, LCK_PW);
if (rc == 0) {
/* we do not call this before to avoid lu_object_find() in
* ->lvbo_update() holding another reference on the object.
res = ldlm_resource_get(ns, NULL, &tsi->tsi_resid,
LDLM_EXTENT, 0);
if (!IS_ERR(res)) {
+ struct ost_lvb *res_lvb;
+
ldlm_res_lvbo_update(res, NULL, 0);
+ res_lvb = res->lr_lvb_data;
+ repbody->oa.o_valid |= OBD_MD_FLBLOCKS;
+ repbody->oa.o_blocks = res_lvb->lvb_blocks;
ldlm_resource_putref(res);
}
}
return rc;
}
+static int ofd_ladvise_prefetch(const struct lu_env *env,
+ struct ofd_object *fo,
+ struct niobuf_local *lnb,
+ __u64 start, __u64 end, enum dt_bufs_type dbt)
+{
+ struct ofd_thread_info *info = ofd_info(env);
+ pgoff_t start_index, end_index, pages;
+ struct niobuf_remote rnb;
+ unsigned long nr_local;
+ int rc = 0;
+
+ if (end <= start)
+ RETURN(-EINVAL);
+
+ ofd_read_lock(env, fo);
+ if (!ofd_object_exists(fo))
+ GOTO(out_unlock, rc = -ENOENT);
+
+ rc = ofd_attr_get(env, fo, &info->fti_attr);
+ if (rc)
+ GOTO(out_unlock, rc);
+
+ if (end > info->fti_attr.la_size)
+ end = info->fti_attr.la_size;
+
+ if (end <= start)
+ GOTO(out_unlock, rc);
+
+ /* We need page aligned offset and length */
+ start_index = start >> PAGE_SHIFT;
+ end_index = (end - 1) >> PAGE_SHIFT;
+ pages = end_index - start_index + 1;
+ while (pages > 0) {
+ nr_local = pages <= PTLRPC_MAX_BRW_PAGES ? pages :
+ PTLRPC_MAX_BRW_PAGES;
+ rnb.rnb_offset = start_index << PAGE_SHIFT;
+ rnb.rnb_len = nr_local << PAGE_SHIFT;
+ rc = dt_bufs_get(env, ofd_object_child(fo), &rnb, lnb,
+ PTLRPC_MAX_BRW_PAGES, dbt);
+ if (unlikely(rc < 0))
+ break;
+ nr_local = rc;
+ rc = dt_read_prep(env, ofd_object_child(fo), lnb, nr_local);
+ dt_bufs_put(env, ofd_object_child(fo), lnb, nr_local);
+ if (unlikely(rc))
+ break;
+ start_index += nr_local;
+ pages -= nr_local;
+ }
+
+out_unlock:
+ ofd_read_unlock(env, fo);
+ RETURN(rc);
+}
+
/**
* OFD request handler for OST_LADVISE RPC.
*
*/
static int ofd_ladvise_hdl(struct tgt_session_info *tsi)
{
- struct ptlrpc_request *req = tgt_ses_req(tsi);
- struct obd_export *exp = tsi->tsi_exp;
- struct ofd_device *ofd = ofd_exp(exp);
- struct ost_body *body, *repbody;
- struct ofd_thread_info *info;
- struct ofd_object *fo;
- const struct lu_env *env = req->rq_svc_thread->t_env;
- int rc = 0;
- struct lu_ladvise *ladvise;
- int num_advise;
- struct ladvise_hdr *ladvise_hdr;
- int i;
+ struct ptlrpc_request *req = tgt_ses_req(tsi);
+ struct obd_export *exp = tsi->tsi_exp;
+ struct ofd_device *ofd = ofd_exp(exp);
+ struct ost_body *body, *repbody;
+ struct ofd_thread_info *info;
+ struct ofd_object *fo;
+ struct ptlrpc_thread *svc_thread = req->rq_svc_thread;
+ const struct lu_env *env = svc_thread->t_env;
+ struct tgt_thread_big_cache *tbc = svc_thread->t_data;
+ enum dt_bufs_type dbt = DT_BUFS_TYPE_READAHEAD;
+ struct lu_ladvise *ladvise;
+ int num_advise;
+ struct ladvise_hdr *ladvise_hdr;
+ struct obd_ioobj ioo;
+ struct lustre_handle lockh = { 0 };
+ __u64 flags = 0;
+ int i;
+ struct dt_object *dob;
+ __u64 start;
+ __u64 end;
+ int rc = 0;
ENTRY;
+ CFS_FAIL_TIMEOUT(OBD_FAIL_OST_LADVISE_PAUSE, cfs_fail_val);
body = tsi->tsi_ost_body;
if ((body->oa.o_valid & OBD_MD_FLID) != OBD_MD_FLID)
num_advise = req_capsule_get_size(&req->rq_pill,
&RMF_OST_LADVISE, RCL_CLIENT) /
- sizeof(*ladvise);
+ sizeof(*ladvise);
if (num_advise < ladvise_hdr->lah_count)
RETURN(err_serious(-EPROTO));
RETURN(rc);
}
LASSERT(fo != NULL);
+ dob = ofd_object_child(fo);
+
+ if (ptlrpc_connection_is_local(exp->exp_connection))
+ dbt |= DT_BUFS_TYPE_LOCAL;
for (i = 0; i < num_advise; i++, ladvise++) {
- if (ladvise->lla_end <= ladvise->lla_start) {
+ start = ladvise->lla_start;
+ end = ladvise->lla_end;
+ if (end <= start) {
rc = err_serious(-EPROTO);
break;
}
default:
rc = -ENOTSUPP;
break;
+ case LU_LADVISE_WILLREAD:
+ if (tbc == NULL)
+ RETURN(-ENOMEM);
+
+ ioo.ioo_oid = body->oa.o_oi;
+ ioo.ioo_bufcnt = 1;
+ rc = tgt_extent_lock(env, exp->exp_obd->obd_namespace,
+ &tsi->tsi_resid, start, end - 1,
+ &lockh, LCK_PR, &flags);
+ if (rc != 0)
+ break;
+
+ req->rq_status = ofd_ladvise_prefetch(env, fo,
+ tbc->local,
+ start, end, dbt);
+ tgt_data_unlock(&lockh, LCK_PR);
+ break;
+ case LU_LADVISE_DONTNEED:
+ rc = dt_ladvise(env, dob, ladvise->lla_start,
+ ladvise->lla_end, LU_LADVISE_DONTNEED);
+ break;
}
if (rc != 0)
break;
*/
static int ofd_quotactl(struct tgt_session_info *tsi)
{
- struct obd_quotactl *oqctl, *repoqc;
- struct lu_nodemap *nodemap =
- tsi->tsi_exp->exp_target_data.ted_nodemap;
- int id;
- int rc;
+ struct obd_quotactl *oqctl, *repoqc;
+ struct lu_nodemap *nodemap;
+ ktime_t kstart = ktime_get();
+ int id;
+ int rc;
ENTRY;
*repoqc = *oqctl;
+ nodemap = nodemap_get_from_exp(tsi->tsi_exp);
+ if (IS_ERR(nodemap))
+ RETURN(PTR_ERR(nodemap));
+
id = repoqc->qc_id;
if (oqctl->qc_type == USRQUOTA)
id = nodemap_map_id(nodemap, NODEMAP_UID,
id = nodemap_map_id(nodemap, NODEMAP_GID,
NODEMAP_CLIENT_TO_FS,
repoqc->qc_id);
+ else if (oqctl->qc_type == PRJQUOTA)
+ id = nodemap_map_id(nodemap, NODEMAP_PROJID,
+ NODEMAP_CLIENT_TO_FS,
+ repoqc->qc_id);
+
+ nodemap_putref(nodemap);
if (repoqc->qc_id != id)
swap(repoqc->qc_id, id);
rc = lquotactl_slv(tsi->tsi_env, tsi->tsi_tgt->lut_bottom, repoqc);
ofd_counter_incr(tsi->tsi_exp, LPROC_OFD_STATS_QUOTACTL,
- tsi->tsi_jobid, 1);
+ tsi->tsi_jobid, ktime_us_delta(ktime_get(), kstart));
if (repoqc->qc_id != id)
swap(repoqc->qc_id, id);
}
/**
- * Calculate the amount of time for lock prolongation.
- *
- * This is helper for ofd_prolong_extent_locks() function to get
- * the timeout extra time.
- *
- * \param[in] req current request
- *
- * \retval amount of time to extend the timeout with
- */
-static inline int prolong_timeout(struct ptlrpc_request *req,
- struct ldlm_lock *lock)
-{
- struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
-
- if (AT_OFF)
- return obd_timeout / 2;
-
- /* We are in the middle of the process - BL AST is sent, CANCEL
- is ahead. Take half of AT + IO process time. */
- return at_est2timeout(at_get(&svcpt->scp_at_estimate)) +
- (ldlm_bl_timeout(lock) >> 1);
-}
-
-/**
- * Prolong single lock timeout.
- *
- * This is supplemental function to the ofd_prolong_locks(). It prolongs
- * a single lock.
- *
- * \param[in] tsi target session environment for this request
- * \param[in] lock LDLM lock to prolong
- * \param[in] extent related extent
- * \param[in] timeout timeout value to add
- *
- * \retval 0 if lock is not suitable for prolongation
- * \retval 1 if lock was prolonged successfully
- */
-static int ofd_prolong_one_lock(struct tgt_session_info *tsi,
- struct ldlm_lock *lock,
- struct ldlm_extent *extent)
-{
- int timeout = prolong_timeout(tgt_ses_req(tsi), lock);
-
- if (lock->l_flags & LDLM_FL_DESTROYED) /* lock already cancelled */
- return 0;
-
- /* XXX: never try to grab resource lock here because we're inside
- * exp_bl_list_lock; in ldlm_lockd.c to handle waiting list we take
- * res lock and then exp_bl_list_lock. */
-
- if (!(lock->l_flags & LDLM_FL_AST_SENT))
- /* ignore locks not being cancelled */
- return 0;
-
- LDLM_DEBUG(lock, "refreshed for req x"LPU64" ext("LPU64"->"LPU64") "
- "to %ds.\n", tgt_ses_req(tsi)->rq_xid, extent->start,
- extent->end, timeout);
-
- /* OK. this is a possible lock the user holds doing I/O
- * let's refresh eviction timer for it */
- ldlm_refresh_waiting_lock(lock, timeout);
- return 1;
-}
-
-/**
* Prolong lock timeout for the given extent.
*
* This function finds all locks related with incoming request and
* request may cover multiple locks.
*
* \param[in] tsi target session environment for this request
- * \param[in] start start of extent
- * \param[in] end end of extent
+ * \param[in] data struct of data to prolong locks
*
- * \retval number of prolonged locks
*/
-static int ofd_prolong_extent_locks(struct tgt_session_info *tsi,
- __u64 start, __u64 end)
+static void ofd_prolong_extent_locks(struct tgt_session_info *tsi,
+ struct ldlm_prolong_args *data)
{
- struct obd_export *exp = tsi->tsi_exp;
struct obdo *oa = &tsi->tsi_ost_body->oa;
- struct ldlm_extent extent = {
- .start = start,
- .end = end
- };
struct ldlm_lock *lock;
- int lock_count = 0;
ENTRY;
+ data->lpa_timeout = prolong_timeout(tgt_ses_req(tsi));
+ data->lpa_export = tsi->tsi_exp;
+ data->lpa_resid = tsi->tsi_resid;
+
+ CDEBUG(D_RPCTRACE, "Prolong locks for req %p with x%llu"
+ " ext(%llu->%llu)\n", tgt_ses_req(tsi),
+ tgt_ses_req(tsi)->rq_xid, data->lpa_extent.start,
+ data->lpa_extent.end);
+
if (oa->o_valid & OBD_MD_FLHANDLE) {
/* mostly a request should be covered by only one lock, try
* fast path. */
if (lock != NULL) {
/* Fast path to check if the lock covers the whole IO
* region exclusively. */
- if (lock->l_granted_mode == LCK_PW &&
- ldlm_extent_contain(&lock->l_policy_data.l_extent,
- &extent)) {
+ if (ldlm_extent_contain(&lock->l_policy_data.l_extent,
+ &data->lpa_extent)) {
/* bingo */
- LASSERT(lock->l_export == exp);
- lock_count = ofd_prolong_one_lock(tsi, lock,
- &extent);
+ LASSERT(lock->l_export == data->lpa_export);
+ ldlm_lock_prolong_one(lock, data);
+ LDLM_LOCK_PUT(lock);
+ if (data->lpa_locks_cnt > 0)
+ RETURN_EXIT;
+ /* The lock was destroyed probably lets try
+ * resource tree. */
+ } else {
+ lock->l_last_used = ktime_get();
LDLM_LOCK_PUT(lock);
- RETURN(lock_count);
}
- lock->l_last_used = cfs_time_current();
- LDLM_LOCK_PUT(lock);
}
}
- spin_lock_bh(&exp->exp_bl_list_lock);
- list_for_each_entry(lock, &exp->exp_bl_list, l_exp_list) {
- LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
- LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
-
- /* ignore waiting locks, no more granted locks in the list */
- if (lock->l_granted_mode != lock->l_req_mode)
- break;
-
- if (!ldlm_res_eq(&tsi->tsi_resid, &lock->l_resource->lr_name))
- continue;
-
- if (!ldlm_extent_overlap(&lock->l_policy_data.l_extent,
- &extent))
- continue;
-
- lock_count += ofd_prolong_one_lock(tsi, lock, &extent);
- }
- spin_unlock_bh(&exp->exp_bl_list_lock);
-
- RETURN(lock_count);
+ ldlm_resource_prolong(data);
+ EXIT;
}
/**
if (!ostid_res_name_eq(&ioo->ioo_oid, &lock->l_resource->lr_name))
RETURN(0);
- /* a bulk write can only hold a reference on a PW extent lock */
- mode = LCK_PW;
+ /* a bulk write can only hold a reference on a PW extent lock
+ * or GROUP lock.
+ */
+ mode = LCK_PW | LCK_GROUP;
if (opc == OST_READ)
/* whereas a bulk read can be protected by either a PR or PW
* extent lock */
* Implementation of ptlrpc_hpreq_ops::hpreq_lock_check for OFD RW requests.
*
* Check for whether the given PTLRPC request (\a req) is blocking
- * an LDLM lock cancel.
+ * an LDLM lock cancel. Also checks whether the request is covered by an LDLM
+ * lock.
*
* \param[in] req the incoming request
*
* \retval 1 if \a req is blocking an LDLM lock cancel
* \retval 0 if it is not
+ * \retval -ESTALE if lock is not found
*/
static int ofd_rw_hpreq_check(struct ptlrpc_request *req)
{
struct tgt_session_info *tsi;
struct obd_ioobj *ioo;
struct niobuf_remote *rnb;
- __u64 start, end;
- int lock_count;
+ int opc;
+ struct ldlm_prolong_args pa = { 0 };
ENTRY;
* Use LASSERT below because malformed RPCs should have
* been filtered out in tgt_hpreq_handler().
*/
+ opc = lustre_msg_get_opc(req->rq_reqmsg);
+ LASSERT(opc == OST_READ || opc == OST_WRITE);
+
ioo = req_capsule_client_get(&req->rq_pill, &RMF_OBD_IOOBJ);
LASSERT(ioo != NULL);
LASSERT(rnb != NULL);
LASSERT(!(rnb->rnb_flags & OBD_BRW_SRVLOCK));
- start = rnb->rnb_offset;
+ pa.lpa_mode = LCK_PW | LCK_GROUP;
+ if (opc == OST_READ)
+ pa.lpa_mode |= LCK_PR;
+
+ pa.lpa_extent.start = rnb->rnb_offset;
rnb += ioo->ioo_bufcnt - 1;
- end = rnb->rnb_offset + rnb->rnb_len - 1;
+ pa.lpa_extent.end = rnb->rnb_offset + rnb->rnb_len - 1;
- DEBUG_REQ(D_RPCTRACE, req, "%s %s: refresh rw locks: "DFID
- " ("LPU64"->"LPU64")\n",
- tgt_name(tsi->tsi_tgt), current->comm,
- PFID(&tsi->tsi_fid), start, end);
+ DEBUG_REQ(D_RPCTRACE, req,
+ "%s %s: refresh rw locks for "DFID" (%llu->%llu)",
+ tgt_name(tsi->tsi_tgt), current->comm, PFID(&tsi->tsi_fid),
+ pa.lpa_extent.start, pa.lpa_extent.end);
- lock_count = ofd_prolong_extent_locks(tsi, start, end);
+ ofd_prolong_extent_locks(tsi, &pa);
- CDEBUG(D_DLMTRACE, "%s: refreshed %u locks timeout for req %p.\n",
- tgt_name(tsi->tsi_tgt), lock_count, req);
+ CDEBUG(D_DLMTRACE, "%s: refreshed %u locks timeout for req %p\n",
+ tgt_name(tsi->tsi_tgt), pa.lpa_blocks_cnt, req);
+
+ if (pa.lpa_blocks_cnt > 0)
+ RETURN(1);
- RETURN(lock_count > 0);
+ RETURN(pa.lpa_locks_cnt > 0 ? 0 : -ESTALE);
}
/**
struct ldlm_lock *lock)
{
struct tgt_session_info *tsi;
+ struct obdo *oa;
+ struct ldlm_extent ext;
+
+ ENTRY;
/* Don't use tgt_ses_info() to get session info, because lock_match()
* can be called while request has no processing thread yet. */
LASSERT(tsi->tsi_ost_body != NULL);
if (tsi->tsi_ost_body->oa.o_valid & OBD_MD_FLHANDLE &&
tsi->tsi_ost_body->oa.o_handle.cookie == lock->l_handle.h_cookie)
- return 1;
+ RETURN(1);
- return 0;
+ oa = &tsi->tsi_ost_body->oa;
+ ext.start = oa->o_size;
+ ext.end = oa->o_blocks;
+
+ LASSERT(lock->l_resource != NULL);
+ if (!ostid_res_name_eq(&oa->o_oi, &lock->l_resource->lr_name))
+ RETURN(0);
+
+ if (!(lock->l_granted_mode & (LCK_PW | LCK_GROUP)))
+ RETURN(0);
+
+ RETURN(ldlm_extent_overlap(&lock->l_policy_data.l_extent, &ext));
}
/**
* Implementation of ptlrpc_hpreq_ops::hpreq_lock_check for OST_PUNCH request.
*
* High-priority queue request check for whether the given punch request
- * (\a req) is blocking an LDLM lock cancel.
+ * (\a req) is blocking an LDLM lock cancel. Also checks whether the request is
+ * covered by an LDLM lock.
+ *
+
*
* \param[in] req the incoming request
*
* \retval 1 if \a req is blocking an LDLM lock cancel
* \retval 0 if it is not
+ * \retval -ESTALE if lock is not found
*/
static int ofd_punch_hpreq_check(struct ptlrpc_request *req)
{
struct tgt_session_info *tsi;
struct obdo *oa;
- int lock_count;
+ struct ldlm_prolong_args pa = { 0 };
ENTRY;
LASSERT(!(oa->o_valid & OBD_MD_FLFLAGS &&
oa->o_flags & OBD_FL_SRVLOCK));
+ pa.lpa_mode = LCK_PW | LCK_GROUP;
+ pa.lpa_extent.start = oa->o_size;
+ pa.lpa_extent.end = oa->o_blocks;
+
CDEBUG(D_DLMTRACE,
- "%s: refresh locks: "LPU64"/"LPU64" ("LPU64"->"LPU64")\n",
+ "%s: refresh locks: %llu/%llu (%llu->%llu)\n",
tgt_name(tsi->tsi_tgt), tsi->tsi_resid.name[0],
- tsi->tsi_resid.name[1], oa->o_size, oa->o_blocks);
+ tsi->tsi_resid.name[1], pa.lpa_extent.start, pa.lpa_extent.end);
- lock_count = ofd_prolong_extent_locks(tsi, oa->o_size, oa->o_blocks);
+ ofd_prolong_extent_locks(tsi, &pa);
CDEBUG(D_DLMTRACE, "%s: refreshed %u locks timeout for req %p.\n",
- tgt_name(tsi->tsi_tgt), lock_count, req);
+ tgt_name(tsi->tsi_tgt), pa.lpa_blocks_cnt, req);
- RETURN(lock_count > 0);
+ if (pa.lpa_blocks_cnt > 0)
+ RETURN(1);
+
+ RETURN(pa.lpa_locks_cnt > 0 ? 0 : -ESTALE);
}
/**
LASSERT(rnb != NULL); /* must exist after request preprocessing */
/* no high priority if server lock is needed */
- if (rnb->rnb_flags & OBD_BRW_SRVLOCK)
+ if (rnb->rnb_flags & OBD_BRW_SRVLOCK ||
+ (lustre_msg_get_flags(tgt_ses_req(tsi)->rq_reqmsg)
+ & MSG_REPLAY))
return;
}
tgt_ses_req(tsi)->rq_ops = &ofd_hpreq_rw;
{
LASSERT(tsi->tsi_ost_body != NULL); /* must exists if we are here */
/* no high-priority if server lock is needed */
- if (tsi->tsi_ost_body->oa.o_valid & OBD_MD_FLFLAGS &&
- tsi->tsi_ost_body->oa.o_flags & OBD_FL_SRVLOCK)
+ if ((tsi->tsi_ost_body->oa.o_valid & OBD_MD_FLFLAGS &&
+ tsi->tsi_ost_body->oa.o_flags & OBD_FL_SRVLOCK) ||
+ tgt_conn_flags(tsi) & OBD_CONNECT_MDS ||
+ lustre_msg_get_flags(tgt_ses_req(tsi)->rq_reqmsg) & MSG_REPLAY)
return;
tgt_ses_req(tsi)->rq_ops = &ofd_hpreq_punch;
}
0, OST_SET_INFO, ofd_set_info_hdl,
&RQF_OBD_SET_INFO, LUSTRE_OST_VERSION),
TGT_OST_HDL(0, OST_GET_INFO, ofd_get_info_hdl),
-TGT_OST_HDL(HABEO_CORPUS| HABEO_REFERO, OST_GETATTR, ofd_getattr_hdl),
-TGT_OST_HDL(HABEO_CORPUS| HABEO_REFERO | MUTABOR,
+TGT_OST_HDL(HAS_BODY | HAS_REPLY, OST_GETATTR, ofd_getattr_hdl),
+TGT_OST_HDL(HAS_BODY | HAS_REPLY | IS_MUTABLE,
OST_SETATTR, ofd_setattr_hdl),
-TGT_OST_HDL(0 | HABEO_REFERO | MUTABOR,
+TGT_OST_HDL(HAS_REPLY | IS_MUTABLE,
OST_CREATE, ofd_create_hdl),
-TGT_OST_HDL(0 | HABEO_REFERO | MUTABOR,
+TGT_OST_HDL(HAS_REPLY | IS_MUTABLE,
OST_DESTROY, ofd_destroy_hdl),
-TGT_OST_HDL(0 | HABEO_REFERO, OST_STATFS, ofd_statfs_hdl),
-TGT_OST_HDL_HP(HABEO_CORPUS| HABEO_REFERO,
- OST_BRW_READ, tgt_brw_read,
+TGT_OST_HDL(HAS_REPLY, OST_STATFS, ofd_statfs_hdl),
+TGT_OST_HDL_HP(HAS_BODY | HAS_REPLY, OST_BRW_READ, tgt_brw_read,
ofd_hp_brw),
/* don't set CORPUS flag for brw_write because -ENOENT may be valid case */
-TGT_OST_HDL_HP(HABEO_CORPUS| MUTABOR, OST_BRW_WRITE, tgt_brw_write,
+TGT_OST_HDL_HP(HAS_BODY | IS_MUTABLE, OST_BRW_WRITE, tgt_brw_write,
ofd_hp_brw),
-TGT_OST_HDL_HP(HABEO_CORPUS| HABEO_REFERO | MUTABOR,
+TGT_OST_HDL_HP(HAS_BODY | HAS_REPLY | IS_MUTABLE,
OST_PUNCH, ofd_punch_hdl,
ofd_hp_punch),
-TGT_OST_HDL(HABEO_CORPUS| HABEO_REFERO, OST_SYNC, ofd_sync_hdl),
-TGT_OST_HDL(0 | HABEO_REFERO, OST_QUOTACTL, ofd_quotactl),
-TGT_OST_HDL(HABEO_CORPUS | HABEO_REFERO, OST_LADVISE, ofd_ladvise_hdl),
+TGT_OST_HDL(HAS_BODY | HAS_REPLY, OST_SYNC, ofd_sync_hdl),
+TGT_OST_HDL(HAS_REPLY, OST_QUOTACTL, ofd_quotactl),
+TGT_OST_HDL(HAS_BODY | HAS_REPLY, OST_LADVISE, ofd_ladvise_hdl),
+TGT_OST_HDL(HAS_BODY | HAS_REPLY | IS_MUTABLE, OST_FALLOCATE, ofd_fallocate_hdl),
+TGT_OST_HDL(HAS_BODY | HAS_REPLY, OST_SEEK, tgt_lseek),
};
static struct tgt_opc_slice ofd_common_slice[] = {
info->fti_xid = 0;
info->fti_pre_version = 0;
- info->fti_used = 0;
memset(&info->fti_attr, 0, sizeof info->fti_attr);
}
static int ofd_init0(const struct lu_env *env, struct ofd_device *m,
struct lu_device_type *ldt, struct lustre_cfg *cfg)
{
- const char *dev = lustre_cfg_string(cfg, 0);
- struct ofd_thread_info *info = NULL;
- struct obd_device *obd;
- struct obd_statfs *osfs;
- int rc;
+ const char *dev = lustre_cfg_string(cfg, 0);
+ struct ofd_thread_info *info = NULL;
+ struct obd_device *obd;
+ struct tg_grants_data *tgd = &m->ofd_lut.lut_tgd;
+ struct lu_fid fid;
+ struct nm_config_file *nodemap_config;
+ struct obd_device_target *obt;
+ u32 lmd_flags = 0;
+ int rc;
ENTRY;
if (rc != 0)
RETURN(rc);
- obd->u.obt.obt_magic = OBT_MAGIC;
-
- m->ofd_fmd_max_num = OFD_FMD_MAX_NUM_DEFAULT;
- m->ofd_fmd_max_age = OFD_FMD_MAX_AGE_DEFAULT;
+ obt = &obd->u.obt;
+ obt->obt_magic = OBT_MAGIC;
spin_lock_init(&m->ofd_flags_lock);
m->ofd_raid_degraded = 0;
- m->ofd_syncjournal = 0;
+ m->ofd_sync_journal = 0;
ofd_slc_set(m);
- m->ofd_grant_compat_disable = 0;
m->ofd_soft_sync_limit = OFD_SOFT_SYNC_LIMIT_DEFAULT;
- m->ofd_brw_size = ONE_MB_BRW_SIZE;
-
- /* statfs data */
- spin_lock_init(&m->ofd_osfs_lock);
- m->ofd_osfs_age = cfs_time_shift_64(-1000);
- m->ofd_osfs_unstable = 0;
- m->ofd_statfs_inflight = 0;
- m->ofd_osfs_inflight = 0;
-
- /* grant data */
- spin_lock_init(&m->ofd_grant_lock);
- m->ofd_tot_dirty = 0;
- m->ofd_tot_granted = 0;
- m->ofd_tot_pending = 0;
+
m->ofd_seq_count = 0;
- init_waitqueue_head(&m->ofd_inconsistency_thread.t_ctl_waitq);
INIT_LIST_HEAD(&m->ofd_inconsistency_list);
spin_lock_init(&m->ofd_inconsistency_lock);
+ m->ofd_access_log_mask = -1; /* Log all accesses if enabled. */
+
spin_lock_init(&m->ofd_batch_lock);
init_rwsem(&m->ofd_lastid_rwsem);
/* set this lu_device to obd, because error handling need it */
obd->obd_lu_dev = &m->ofd_dt_dev.dd_lu_dev;
- rc = ofd_procfs_init(m);
- if (rc) {
- CERROR("Can't init ofd lprocfs, rc %d\n", rc);
- RETURN(rc);
- }
-
/* No connection accepted until configurations will finish */
spin_lock(&obd->obd_dev_lock);
obd->obd_no_conn = 1;
info = ofd_info_init(env, NULL);
if (info == NULL)
- GOTO(err_fini_proc, rc = -EFAULT);
+ RETURN(-EFAULT);
- rc = ofd_stack_init(env, m, cfg);
+ rc = ofd_stack_init(env, m, cfg, &lmd_flags);
if (rc) {
- CERROR("Can't init device stack, rc %d\n", rc);
- GOTO(err_fini_proc, rc);
+ CERROR("%s: can't init device stack, rc %d\n",
+ obd->obd_name, rc);
+ RETURN(rc);
}
+#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 14, 53, 0)
ofd_procfs_add_brw_stats_symlink(m);
-
- /* populate cached statfs data */
- osfs = &ofd_info(env)->fti_u.osfs;
- rc = ofd_statfs_internal(env, m, osfs, 0, NULL);
- if (rc != 0) {
- CERROR("%s: can't get statfs data, rc %d\n", obd->obd_name, rc);
- GOTO(err_fini_stack, rc);
- }
- if (!IS_PO2(osfs->os_bsize)) {
- CERROR("%s: blocksize (%d) is not a power of 2\n",
- obd->obd_name, osfs->os_bsize);
- GOTO(err_fini_stack, rc = -EPROTO);
- }
- m->ofd_blockbits = fls(osfs->os_bsize) - 1;
-
- m->ofd_precreate_batch = OFD_PRECREATE_BATCH_DEFAULT;
- if (osfs->os_bsize * osfs->os_blocks < OFD_PRECREATE_SMALL_FS)
- m->ofd_precreate_batch = OFD_PRECREATE_BATCH_SMALL;
+#endif
snprintf(info->fti_u.name, sizeof(info->fti_u.name), "%s-%s",
"filter"/*LUSTRE_OST_NAME*/, obd->obd_uuid.uuid);
LDLM_NAMESPACE_SERVER,
LDLM_NAMESPACE_GREEDY,
LDLM_NS_TYPE_OST);
- if (m->ofd_namespace == NULL)
- GOTO(err_fini_stack, rc = -ENOMEM);
+ if (IS_ERR(m->ofd_namespace)) {
+ rc = PTR_ERR(m->ofd_namespace);
+ CERROR("%s: unable to create server namespace: rc = %d\n",
+ obd->obd_name, rc);
+ m->ofd_namespace = NULL;
+ GOTO(err_fini_stack, rc);
+ }
/* set obd_namespace for compatibility with old code */
obd->obd_namespace = m->ofd_namespace;
ldlm_register_intent(m->ofd_namespace, ofd_intent_policy);
ptlrpc_init_client(LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
"filter_ldlm_cb_client", &obd->obd_ldlm_client);
- dt_conf_get(env, m->ofd_osd, &m->ofd_dt_conf);
-
rc = tgt_init(env, &m->ofd_lut, obd, m->ofd_osd, ofd_common_slice,
OBD_FAIL_OST_ALL_REQUEST_NET,
OBD_FAIL_OST_ALL_REPLY_NET);
if (rc)
GOTO(err_free_ns, rc);
- rc = ofd_fs_setup(env, m, obd);
+ if (lmd_flags & LMD_FLG_SKIP_LFSCK)
+ m->ofd_skip_lfsck = 1;
+ if (lmd_flags & LMD_FLG_LOCAL_RECOV)
+ m->ofd_lut.lut_local_recovery = 1;
+
+ rc = ofd_tunables_init(m);
if (rc)
GOTO(err_fini_lut, rc);
- rc = ofd_start_inconsistency_verification_thread(m);
+ tgd->tgd_reserved_pcnt = 0;
+
+ m->ofd_brw_size = m->ofd_lut.lut_dt_conf.ddp_brw_size;
+ m->ofd_precreate_batch = OFD_PRECREATE_BATCH_DEFAULT;
+ if (tgd->tgd_osfs.os_bsize * tgd->tgd_osfs.os_blocks <
+ OFD_PRECREATE_SMALL_FS)
+ m->ofd_precreate_batch = OFD_PRECREATE_BATCH_SMALL;
+ m->ofd_atime_diff = OFD_DEF_ATIME_DIFF;
+
+ rc = ofd_fs_setup(env, m, obd);
+ if (rc)
+ GOTO(err_fini_proc, rc);
+
+ fid.f_seq = FID_SEQ_LOCAL_NAME;
+ fid.f_oid = 1;
+ fid.f_ver = 0;
+ rc = local_oid_storage_init(env, m->ofd_osd, &fid,
+ &m->ofd_los);
if (rc != 0)
GOTO(err_fini_fs, rc);
- tgt_adapt_sptlrpc_conf(&m->ofd_lut, 1);
+ nodemap_config = nm_config_file_register_tgt(env, m->ofd_osd,
+ m->ofd_los);
+ if (IS_ERR(nodemap_config)) {
+ rc = PTR_ERR(nodemap_config);
+ if (rc != -EROFS)
+ GOTO(err_fini_los, rc);
+ } else {
+ obt->obt_nodemap_config_file = nodemap_config;
+ }
+
+ rc = ofd_start_inconsistency_verification_thread(m);
+ if (rc != 0)
+ GOTO(err_fini_nm, rc);
+
+ tgt_adapt_sptlrpc_conf(&m->ofd_lut);
RETURN(0);
+err_fini_nm:
+ nm_config_file_deregister_tgt(env, obt->obt_nodemap_config_file);
+ obt->obt_nodemap_config_file = NULL;
+err_fini_los:
+ local_oid_storage_fini(env, m->ofd_los);
+ m->ofd_los = NULL;
err_fini_fs:
ofd_fs_cleanup(env, m);
+err_fini_proc:
+ ofd_procfs_fini(m);
err_fini_lut:
tgt_fini(env, &m->ofd_lut);
err_free_ns:
obd->obd_namespace = m->ofd_namespace = NULL;
err_fini_stack:
ofd_stack_fini(env, m, &m->ofd_osd->dd_lu_dev);
-err_fini_proc:
- ofd_procfs_fini(m);
return rc;
}
stop.ls_status = LS_PAUSED;
stop.ls_flags = 0;
lfsck_stop(env, m->ofd_osd, &stop);
+ ofd_stack_pre_fini(env, m, &m->ofd_dt_dev.dd_lu_dev);
target_recovery_fini(obd);
if (m->ofd_namespace != NULL)
ldlm_namespace_free_prior(m->ofd_namespace, NULL,
obd_exports_barrier(obd);
obd_zombie_barrier();
+ ofd_procfs_fini(m);
tgt_fini(env, &m->ofd_lut);
ofd_stop_inconsistency_verification_thread(m);
lfsck_degister(env, m->ofd_osd);
ofd_fs_cleanup(env, m);
+ nm_config_file_deregister_tgt(env, obd->u.obt.obt_nodemap_config_file);
+ obd->u.obt.obt_nodemap_config_file = NULL;
if (m->ofd_namespace != NULL) {
ldlm_namespace_free_post(m->ofd_namespace);
d->ld_obd->obd_namespace = m->ofd_namespace = NULL;
}
+ ofd_access_log_delete(m->ofd_access_log);
+ m->ofd_access_log = NULL;
+
ofd_stack_fini(env, m, &m->ofd_dt_dev.dd_lu_dev);
- ofd_procfs_fini(m);
+
LASSERT(atomic_read(&d->ld_ref) == 0);
server_put_mount(obd->obd_name, true);
EXIT;
/* type constructor/destructor: ofd_type_init(), ofd_type_fini() */
LU_TYPE_INIT_FINI(ofd, &ofd_thread_key);
-static struct lu_device_type_operations ofd_device_type_ops = {
+static const struct lu_device_type_operations ofd_device_type_ops = {
.ldto_init = ofd_type_init,
.ldto_fini = ofd_type_fini,
*/
static int __init ofd_init(void)
{
- int rc;
+ int rc;
rc = lu_kmem_init(ofd_caches);
if (rc)
return rc;
- rc = ofd_fmd_init();
- if (rc) {
- lu_kmem_fini(ofd_caches);
- return(rc);
- }
+ rc = ofd_access_log_module_init();
+ if (rc)
+ goto out_caches;
- rc = class_register_type(&ofd_obd_ops, NULL, true, NULL,
+ rc = class_register_type(&ofd_obd_ops, NULL, true,
LUSTRE_OST_NAME, &ofd_device_type);
+ if (rc)
+ goto out_ofd_access_log;
+
+ return 0;
+
+out_ofd_access_log:
+ ofd_access_log_module_exit();
+out_caches:
+ lu_kmem_fini(ofd_caches);
+
return rc;
}
*/
static void __exit ofd_exit(void)
{
- ofd_fmd_exit();
- lu_kmem_fini(ofd_caches);
class_unregister_type(LUSTRE_OST_NAME);
+ ofd_access_log_module_exit();
+ lu_kmem_fini(ofd_caches);
}
MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");