* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, 2014 Intel Corporation.
+ * Copyright (c) 2012, 2016, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* The OBD Filter Device (OFD) module belongs to the Object Storage
* Server stack and connects the RPC oriented Unified Target (TGT)
* layer (see lustre/include/lu_target.h) to the storage oriented OSD
- * layer (see lustre/doc/osd-api.txt).
+ * layer (see Documentation/osd-api.txt).
*
* TGT
* | DT and OBD APIs
struct lu_device *d;
struct ofd_thread_info *info = ofd_info(env);
struct lustre_mount_info *lmi;
+ struct lustre_mount_data *lmd;
int rc;
char *osdname;
RETURN(-ENODEV);
}
+ lmd = s2lsi(lmi->lmi_sb)->lsi_lmd;
+ if (lmd != NULL && lmd->lmd_flags & LMD_FLG_SKIP_LFSCK)
+ m->ofd_skip_lfsck = 1;
+
/* find bottom osd */
OBD_ALLOC(osdname, MTI_NAME_MAXLEN);
if (osdname == NULL)
strcat(flags, "A");
lustre_cfg_bufs_set_string(&bufs, 1, flags);
lcfg = lustre_cfg_new(LCFG_CLEANUP, &bufs);
- if (!lcfg) {
- CERROR("Cannot alloc lcfg!\n");
+ if (lcfg == NULL)
RETURN_EXIT;
- }
LASSERT(top);
top->ld_ops->ldo_process_config(env, top, lcfg);
static int ofd_recovery_complete(const struct lu_env *env,
struct lu_device *dev)
{
+ struct ofd_thread_info *oti = ofd_info(env);
struct ofd_device *ofd = ofd_dev(dev);
struct lu_device *next = &ofd->ofd_osd->dd_lu_dev;
- int rc = 0, max_precreate;
+ int rc = 0;
ENTRY;
/*
* Grant space for object precreation on the self export.
- * This initial reserved space (i.e. 10MB for zfs and 280KB for ldiskfs)
+ * The initial reserved space (i.e. 10MB for zfs and 280KB for ldiskfs)
* is enough to create 10k objects. More space is then acquired for
* precreation in ofd_grant_create().
*/
- max_precreate = OST_MAX_PRECREATE * ofd->ofd_dt_conf.ddp_inodespace / 2;
- ofd_grant_connect(env, dev->ld_obd->obd_self_export, max_precreate,
- false);
+ memset(&oti->fti_ocd, 0, sizeof(oti->fti_ocd));
+ oti->fti_ocd.ocd_grant = OST_MAX_PRECREATE / 2;
+ oti->fti_ocd.ocd_grant *= ofd->ofd_dt_conf.ddp_inodespace;
+ oti->fti_ocd.ocd_connect_flags = OBD_CONNECT_GRANT |
+ OBD_CONNECT_GRANT_PARAM;
+ ofd_grant_connect(env, dev->ld_obd->obd_self_export, &oti->fti_ocd,
+ true);
rc = next->ld_ops->ldo_recovery_complete(env, next);
RETURN(rc);
}
obd->obd_uses_nid_stats = 1;
- entry = lprocfs_seq_register("exports", obd->obd_proc_entry, NULL,
- NULL);
+ entry = lprocfs_register("exports", obd->obd_proc_entry, NULL, NULL);
if (IS_ERR(entry)) {
rc = PTR_ERR(entry);
CERROR("%s: error %d setting up lprocfs for %s\n",
ss->ss_lu = lu->ld_site;
ss->ss_node_id = ofd->ofd_lut.lut_lsd.lsd_osd_index;
+ OBD_ALLOC(name, sizeof(obd_name) * 2 + 10);
+ if (name == NULL)
+ return -ENOMEM;
+
OBD_ALLOC_PTR(ss->ss_server_seq);
if (ss->ss_server_seq == NULL)
- GOTO(out_free, rc = -ENOMEM);
-
- OBD_ALLOC(name, strlen(obd_name) + 10);
- if (!name) {
- OBD_FREE_PTR(ss->ss_server_seq);
- ss->ss_server_seq = NULL;
- GOTO(out_free, rc = -ENOMEM);
- }
+ GOTO(out_name, rc = -ENOMEM);
rc = seq_server_init(env, ss->ss_server_seq, ofd->ofd_osd, obd_name,
LUSTRE_SEQ_SERVER, ss);
if (rc) {
CERROR("%s : seq server init error %d\n", obd_name, rc);
- GOTO(out_free, rc);
+ GOTO(out_server, rc);
}
ss->ss_server_seq->lss_space.lsr_index = ss->ss_node_id;
OBD_ALLOC_PTR(ss->ss_client_seq);
if (ss->ss_client_seq == NULL)
- GOTO(out_free, rc = -ENOMEM);
+ GOTO(out_server, rc = -ENOMEM);
- snprintf(name, strlen(obd_name) + 6, "%p-super", obd_name);
+ /*
+ * It always printed as "%p", so that the name is unique in the kernel,
+ * even if the filesystem is mounted twice. So sizeof(.) * 2 is enough.
+ */
+ snprintf(name, sizeof(obd_name) * 2 + 7, "%p-super", obd_name);
rc = seq_client_init(ss->ss_client_seq, NULL, LUSTRE_SEQ_DATA,
name, NULL);
if (rc) {
CERROR("%s : seq client init error %d\n", obd_name, rc);
- GOTO(out_free, rc);
+ GOTO(out_client, rc);
}
- OBD_FREE(name, strlen(obd_name) + 10);
- name = NULL;
rc = seq_server_set_cli(env, ss->ss_server_seq, ss->ss_client_seq);
-out_free:
if (rc) {
- if (ss->ss_server_seq) {
- seq_server_fini(ss->ss_server_seq, env);
- OBD_FREE_PTR(ss->ss_server_seq);
- ss->ss_server_seq = NULL;
- }
-
- if (ss->ss_client_seq) {
- seq_client_fini(ss->ss_client_seq);
- OBD_FREE_PTR(ss->ss_client_seq);
- ss->ss_client_seq = NULL;
- }
-
- if (name) {
- OBD_FREE(name, strlen(obd_name) + 10);
- name = NULL;
- }
+out_client:
+ seq_client_fini(ss->ss_client_seq);
+ OBD_FREE_PTR(ss->ss_client_seq);
+ ss->ss_client_seq = NULL;
+out_server:
+ seq_server_fini(ss->ss_server_seq, env);
+ OBD_FREE_PTR(ss->ss_server_seq);
+ ss->ss_server_seq = NULL;
}
+out_name:
+ OBD_FREE(name, sizeof(obd_name) * 2 + 10);
return rc;
}
void *key, *val = NULL;
int keylen, vallen, rc = 0;
bool is_grant_shrink;
- struct ofd_device *ofd = ofd_exp(tsi->tsi_exp);
ENTRY;
if (vallen > 0)
obd_export_evict_by_nid(tsi->tsi_exp->exp_obd, val);
rc = 0;
- } else if (KEY_IS(KEY_CAPA_KEY)) {
- rc = ofd_update_capa_key(ofd, val);
} else if (KEY_IS(KEY_SPTLRPC_CONF)) {
rc = tgt_adapt_sptlrpc_conf(tsi->tsi_tgt, 0);
} else {
* \retval negative value on error
*/
int ofd_fiemap_get(const struct lu_env *env, struct ofd_device *ofd,
- struct lu_fid *fid, struct ll_user_fiemap *fiemap)
+ struct lu_fid *fid, struct fiemap *fiemap)
{
struct ofd_object *fo;
int rc;
*/
static int lock_zero_regions(struct ldlm_namespace *ns,
struct ldlm_res_id *res_id,
- struct ll_user_fiemap *fiemap,
+ struct fiemap *fiemap,
struct list_head *locked)
{
__u64 begin = fiemap->fm_start;
unsigned int i;
int rc = 0;
- struct ll_fiemap_extent *fiemap_start = fiemap->fm_extents;
+ struct fiemap_extent *fiemap_start = fiemap->fm_extents;
ENTRY;
RCL_CLIENT);
if (KEY_IS(KEY_LAST_ID)) {
- obd_id *last_id;
+ u64 *last_id;
struct ofd_seq *oseq;
req_capsule_extend(tsi->tsi_pill, &RQF_OST_GET_INFO_LAST_ID);
last_id = req_capsule_server_get(tsi->tsi_pill, &RMF_OBD_ID);
oseq = ofd_seq_load(tsi->tsi_env, ofd,
- (obd_seq)exp->exp_filter_data.fed_group);
+ (u64)exp->exp_filter_data.fed_group);
if (IS_ERR(oseq))
rc = -EFAULT;
else
ofd_seq_put(tsi->tsi_env, oseq);
} else if (KEY_IS(KEY_FIEMAP)) {
struct ll_fiemap_info_key *fm_key;
- struct ll_user_fiemap *fiemap;
+ struct fiemap *fiemap;
struct lu_fid *fid;
req_capsule_extend(tsi->tsi_pill, &RQF_OST_GET_INFO_FIEMAP);
fm_key = req_capsule_client_get(tsi->tsi_pill, &RMF_FIEMAP_KEY);
- rc = tgt_validate_obdo(tsi, &fm_key->oa);
+ rc = tgt_validate_obdo(tsi, &fm_key->lfik_oa);
if (rc)
RETURN(err_serious(rc));
- fid = &fm_key->oa.o_oi.oi_fid;
+ fid = &fm_key->lfik_oa.o_oi.oi_fid;
CDEBUG(D_INODE, "get FIEMAP of object "DFID"\n", PFID(fid));
- replylen = fiemap_count_to_size(fm_key->fiemap.fm_extent_count);
+ replylen = fiemap_count_to_size(
+ fm_key->lfik_fiemap.fm_extent_count);
req_capsule_set_size(tsi->tsi_pill, &RMF_FIEMAP_VAL,
RCL_SERVER, replylen);
if (fiemap == NULL)
RETURN(-ENOMEM);
- *fiemap = fm_key->fiemap;
+ *fiemap = fm_key->lfik_fiemap;
rc = ofd_fiemap_get(tsi->tsi_env, ofd, fid, fiemap);
/* LU-3219: Lock the sparse areas to make sure dirty
* flushed back from client, then call fiemap again. */
- if (fm_key->oa.o_valid & OBD_MD_FLFLAGS &&
- fm_key->oa.o_flags & OBD_FL_SRVLOCK) {
+ if (fm_key->lfik_oa.o_valid & OBD_MD_FLFLAGS &&
+ fm_key->lfik_oa.o_flags & OBD_FL_SRVLOCK) {
struct list_head locked;
INIT_LIST_HEAD(&locked);
struct lustre_handle lh = { 0 };
struct ofd_object *fo;
__u64 flags = 0;
- ldlm_mode_t lock_mode = LCK_PR;
+ enum ldlm_mode lock_mode = LCK_PR;
bool srvlock;
int rc;
ENTRY;
obdo_from_la(&repbody->oa, &fti->fti_attr,
OFD_VALID_FLAGS | LA_UID | LA_GID);
- tgt_drop_id(tsi->tsi_exp, &repbody->oa);
/* Store object version in reply */
curr_version = dt_version_get(tsi->tsi_env,
obdo_from_la(&repbody->oa, &fti->fti_attr,
OFD_VALID_FLAGS | LA_UID | LA_GID);
- tgt_drop_id(tsi->tsi_exp, &repbody->oa);
ofd_counter_incr(tsi->tsi_exp, LPROC_OFD_STATS_SETATTR,
tsi->tsi_jobid, 1);
struct lu_fid *fid = &info->fti_fid;
struct ost_id *oi = &oa->o_oi;
struct ofd_seq *oseq;
- obd_seq seq = ostid_seq(oi);
- obd_id end_id = ostid_id(oi);
- obd_id last;
- obd_id oid;
+ u64 seq = ostid_seq(oi);
+ u64 end_id = ostid_id(oi);
+ u64 last;
+ u64 oid;
int skip_orphan;
int rc = 0;
struct obdo *rep_oa;
struct obd_export *exp = tsi->tsi_exp;
struct ofd_device *ofd = ofd_exp(exp);
- obd_seq seq = ostid_seq(&oa->o_oi);
- obd_id oid = ostid_id(&oa->o_oi);
+ u64 seq = ostid_seq(&oa->o_oi);
+ u64 oid = ostid_id(&oa->o_oi);
struct ofd_seq *oseq;
int rc = 0, diff;
int sync_trans = 0;
+ long granted = 0;
ENTRY;
oseq = ofd_seq_load(tsi->tsi_env, ofd, seq);
if (IS_ERR(oseq)) {
- CERROR("%s: Can't find FID Sequence "LPX64": rc = %ld\n",
+ CERROR("%s: Can't find FID Sequence %#llx: rc = %ld\n",
ofd_name(ofd), seq, PTR_ERR(oseq));
GOTO(out_sem, rc = -EINVAL);
}
(oa->o_flags & OBD_FL_RECREATE_OBJS)) {
if (!ofd_obd(ofd)->obd_recovering ||
oid > ofd_seq_last_oid(oseq)) {
- CERROR("%s: recreate objid "DOSTID" > last id "LPU64
+ CERROR("%s: recreate objid "DOSTID" > last id %llu"
"\n", ofd_name(ofd), POSTID(&oa->o_oi),
ofd_seq_last_oid(oseq));
GOTO(out_nolock, rc = -EINVAL);
oseq->os_destroys_in_progress = 1;
mutex_lock(&oseq->os_create_lock);
if (!oseq->os_destroys_in_progress) {
- CERROR("%s:["LPU64"] destroys_in_progress already"
+ CERROR("%s:[%llu] destroys_in_progress already"
" cleared\n", ofd_name(ofd), seq);
ostid_set_id(&rep_oa->o_oi, ofd_seq_last_oid(oseq));
GOTO(out, rc = 0);
}
diff = oid - ofd_seq_last_oid(oseq);
- CDEBUG(D_HA, "ofd_last_id() = "LPU64" -> diff = %d\n",
+ CDEBUG(D_HA, "ofd_last_id() = %llu -> diff = %d\n",
ofd_seq_last_oid(oseq), diff);
if (-diff > OST_MAX_PRECREATE) {
/* Let MDS know that we are so far ahead. */
ofd_name(ofd), POSTID(&oa->o_oi));
GOTO(out, rc = -EINVAL);
}
+
+ if (diff < 0) {
+ /* LU-5648 */
+ CERROR("%s: invalid precreate request for "
+ DOSTID", last_id %llu. "
+ "Likely MDS last_id corruption\n",
+ ofd_name(ofd), POSTID(&oa->o_oi),
+ ofd_seq_last_oid(oseq));
+ GOTO(out, rc = -EINVAL);
+ }
}
}
if (diff > 0) {
cfs_time_t enough_time = cfs_time_shift(DISK_TIMEOUT);
- obd_id next_id;
+ u64 next_id;
int created = 0;
int count;
if (!(oa->o_valid & OBD_MD_FLFLAGS) ||
!(oa->o_flags & OBD_FL_DELORPHAN)) {
/* don't enforce grant during orphan recovery */
- rc = ofd_grant_create(tsi->tsi_env,
- ofd_obd(ofd)->obd_self_export,
- &diff);
- if (rc) {
+ granted = ofd_grant_create(tsi->tsi_env,
+ ofd_obd(ofd)->obd_self_export,
+ &diff);
+ if (granted < 0) {
+ rc = granted;
+ granted = 0;
CDEBUG(D_HA, "%s: failed to acquire grant "
"space for precreate (%d): rc = %d\n",
ofd_name(ofd), diff, rc);
* LFSCK will eventually clean up any orphans. LU-14 */
if (diff > 5 * OST_MAX_PRECREATE) {
diff = OST_MAX_PRECREATE / 2;
- LCONSOLE_WARN("%s: precreate FID "DOSTID" is over %u "
- "larger than the LAST_ID "DOSTID", only "
- "precreating the last %u objects.\n",
- ofd_name(ofd), POSTID(&oa->o_oi),
- 5 * OST_MAX_PRECREATE,
- POSTID(&oseq->os_oi), diff);
+ LCONSOLE_WARN("%s: Too many FIDs to precreate "
+ "OST replaced or reformatted: "
+ "LFSCK will clean up",
+ ofd_name(ofd));
+
+ CDEBUG(D_HA, "%s: precreate FID "DOSTID" is over "
+ "%u larger than the LAST_ID "DOSTID", only "
+ "precreating the last %u objects.\n",
+ ofd_name(ofd), POSTID(&oa->o_oi),
+ 5 * OST_MAX_PRECREATE,
+ POSTID(&oseq->os_oi), diff);
ofd_seq_last_oid_set(oseq, ostid_id(&oa->o_oi) - diff);
}
next_id = ofd_seq_last_oid(oseq) + 1;
count = ofd_precreate_batch(ofd, diff);
- CDEBUG(D_HA, "%s: reserve %d objects in group "LPX64
- " at "LPU64"\n", ofd_name(ofd),
+ CDEBUG(D_HA, "%s: reserve %d objects in group %#llx"
+ " at %llu\n", ofd_name(ofd),
count, seq, next_id);
if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)
ofd_name(ofd), rc);
if (!(oa->o_valid & OBD_MD_FLFLAGS) ||
- !(oa->o_flags & OBD_FL_DELORPHAN))
- ofd_grant_commit(tsi->tsi_env,
- ofd_obd(ofd)->obd_self_export, rc);
+ !(oa->o_flags & OBD_FL_DELORPHAN)) {
+ ofd_grant_commit(ofd_obd(ofd)->obd_self_export, granted,
+ rc);
+ granted = 0;
+ }
ostid_set_id(&rep_oa->o_oi, ofd_seq_last_oid(oseq));
}
struct ofd_device *ofd = ofd_exp(tsi->tsi_exp);
struct ofd_thread_info *fti = tsi2ofd_info(tsi);
struct lu_fid *fid = &fti->fti_fid;
- obd_id oid;
- obd_count count;
+ u64 oid;
+ u32 count;
int rc = 0;
ENTRY;
ENTRY;
+ OBD_FAIL_TIMEOUT(OBD_FAIL_OST_PAUSE_PUNCH, cfs_fail_val);
+
/* check that we do support OBD_CONNECT_TRUNCLOCK. */
CLASSERT(OST_CONNECT_SUPPORTED & OBD_CONNECT_TRUNCLOCK);
RETURN(rc);
}
- CDEBUG(D_INODE, "calling punch for object "DFID", valid = "LPX64
- ", start = "LPD64", end = "LPD64"\n", PFID(&tsi->tsi_fid),
+ CDEBUG(D_INODE, "calling punch for object "DFID", valid = %#llx"
+ ", start = %lld, end = %lld\n", PFID(&tsi->tsi_fid),
oa->o_valid, start, end);
fo = ofd_object_find_exists(tsi->tsi_env, ofd_exp(tsi->tsi_exp),
return rc;
}
+static int ofd_ladvise_prefetch(const struct lu_env *env,
+ struct ofd_object *fo,
+ struct niobuf_local *lnb,
+ __u64 start, __u64 end)
+{
+ struct ofd_thread_info *info = ofd_info(env);
+ pgoff_t start_index, end_index, pages;
+ struct niobuf_remote rnb;
+ unsigned long nr_local;
+ int rc = 0;
+
+ if (end <= start)
+ RETURN(-EINVAL);
+
+ ofd_read_lock(env, fo);
+ if (!ofd_object_exists(fo))
+ GOTO(out_unlock, rc = -ENOENT);
+
+ rc = ofd_attr_get(env, fo, &info->fti_attr);
+ if (rc)
+ GOTO(out_unlock, rc);
+
+ if (end > info->fti_attr.la_size)
+ end = info->fti_attr.la_size;
+
+ if (end == 0)
+ GOTO(out_unlock, rc);
+
+ /* We need page aligned offset and length */
+ start_index = start >> PAGE_SHIFT;
+ end_index = (end - 1) >> PAGE_SHIFT;
+ pages = end_index - start_index + 1;
+ while (pages > 0) {
+ nr_local = pages <= PTLRPC_MAX_BRW_PAGES ? pages :
+ PTLRPC_MAX_BRW_PAGES;
+ rnb.rnb_offset = start_index << PAGE_SHIFT;
+ rnb.rnb_len = nr_local << PAGE_SHIFT;
+ rc = dt_bufs_get(env, ofd_object_child(fo), &rnb, lnb, 0);
+ if (unlikely(rc < 0))
+ break;
+ nr_local = rc;
+ rc = dt_read_prep(env, ofd_object_child(fo), lnb, nr_local);
+ dt_bufs_put(env, ofd_object_child(fo), lnb, nr_local);
+ if (unlikely(rc))
+ break;
+ start_index += nr_local;
+ pages -= nr_local;
+ }
+
+out_unlock:
+ ofd_read_unlock(env, fo);
+ RETURN(rc);
+}
+
+/**
+ * OFD request handler for OST_LADVISE RPC.
+ *
+ * Tune cache or perfetch policies according to advices.
+ *
+ * \param[in] tsi target session environment for this request
+ *
+ * \retval 0 if successful
+ * \retval negative errno on error
+ */
+static int ofd_ladvise_hdl(struct tgt_session_info *tsi)
+{
+ struct ptlrpc_request *req = tgt_ses_req(tsi);
+ struct obd_export *exp = tsi->tsi_exp;
+ struct ofd_device *ofd = ofd_exp(exp);
+ struct ost_body *body, *repbody;
+ struct ofd_thread_info *info;
+ struct ofd_object *fo;
+ struct ptlrpc_thread *svc_thread = req->rq_svc_thread;
+ const struct lu_env *env = svc_thread->t_env;
+ struct tgt_thread_big_cache *tbc = svc_thread->t_data;
+ int rc = 0;
+ struct lu_ladvise *ladvise;
+ int num_advise;
+ struct ladvise_hdr *ladvise_hdr;
+ struct obd_ioobj ioo;
+ struct lustre_handle lockh = { 0 };
+ __u64 flags = 0;
+ int i;
+ struct dt_object *dob;
+ __u64 start;
+ __u64 end;
+ ENTRY;
+
+ CFS_FAIL_TIMEOUT(OBD_FAIL_OST_LADVISE_PAUSE, cfs_fail_val);
+ body = tsi->tsi_ost_body;
+
+ if ((body->oa.o_valid & OBD_MD_FLID) != OBD_MD_FLID)
+ RETURN(err_serious(-EPROTO));
+
+ ladvise_hdr = req_capsule_client_get(tsi->tsi_pill,
+ &RMF_OST_LADVISE_HDR);
+ if (ladvise_hdr == NULL)
+ RETURN(err_serious(-EPROTO));
+
+ if (ladvise_hdr->lah_magic != LADVISE_MAGIC ||
+ ladvise_hdr->lah_count < 1)
+ RETURN(err_serious(-EPROTO));
+
+ if ((ladvise_hdr->lah_flags & (~LF_MASK)) != 0)
+ RETURN(err_serious(-EPROTO));
+
+ ladvise = req_capsule_client_get(tsi->tsi_pill, &RMF_OST_LADVISE);
+ if (ladvise == NULL)
+ RETURN(err_serious(-EPROTO));
+
+ num_advise = req_capsule_get_size(&req->rq_pill,
+ &RMF_OST_LADVISE, RCL_CLIENT) /
+ sizeof(*ladvise);
+ if (num_advise < ladvise_hdr->lah_count)
+ RETURN(err_serious(-EPROTO));
+
+ repbody = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
+ repbody->oa = body->oa;
+
+ info = ofd_info_init(env, exp);
+
+ rc = ostid_to_fid(&info->fti_fid, &body->oa.o_oi,
+ ofd->ofd_lut.lut_lsd.lsd_osd_index);
+ if (rc != 0)
+ RETURN(rc);
+
+ fo = ofd_object_find(env, ofd, &info->fti_fid);
+ if (IS_ERR(fo)) {
+ rc = PTR_ERR(fo);
+ RETURN(rc);
+ }
+ LASSERT(fo != NULL);
+ dob = ofd_object_child(fo);
+
+ for (i = 0; i < num_advise; i++, ladvise++) {
+ start = ladvise->lla_start;
+ end = ladvise->lla_end;
+ if (end <= start) {
+ rc = err_serious(-EPROTO);
+ break;
+ }
+
+ /* Handle different advice types */
+ switch (ladvise->lla_advice) {
+ default:
+ rc = -ENOTSUPP;
+ break;
+ case LU_LADVISE_WILLREAD:
+ if (tbc == NULL)
+ RETURN(-ENOMEM);
+
+ ioo.ioo_oid = body->oa.o_oi;
+ ioo.ioo_bufcnt = 1;
+ rc = tgt_extent_lock(exp->exp_obd->obd_namespace,
+ &tsi->tsi_resid, start, end - 1,
+ &lockh, LCK_PR, &flags);
+ if (rc != 0)
+ break;
+
+ req->rq_status = ofd_ladvise_prefetch(env, fo,
+ tbc->local,
+ start, end);
+ tgt_extent_unlock(&lockh, LCK_PR);
+ break;
+ case LU_LADVISE_DONTNEED:
+ rc = dt_ladvise(env, dob, ladvise->lla_start,
+ ladvise->lla_end, LU_LADVISE_DONTNEED);
+ break;
+ }
+ if (rc != 0)
+ break;
+ }
+
+ ofd_object_put(env, fo);
+ req->rq_status = rc;
+ RETURN(rc);
+}
+
/**
* OFD request handler for OST_QUOTACTL RPC.
*
*/
static int ofd_quotactl(struct tgt_session_info *tsi)
{
- struct obd_quotactl *oqctl, *repoqc;
- struct lu_nodemap *nodemap =
- tsi->tsi_exp->exp_target_data.ted_nodemap;
- int id;
- int rc;
+ struct obd_quotactl *oqctl, *repoqc;
+ struct lu_nodemap *nodemap;
+ int id;
+ int rc;
ENTRY;
if (repoqc == NULL)
RETURN(err_serious(-ENOMEM));
- /* report success for quota on/off for interoperability with current MDT
- * stack */
- if (oqctl->qc_cmd == Q_QUOTAON || oqctl->qc_cmd == Q_QUOTAOFF)
- RETURN(0);
-
*repoqc = *oqctl;
+ nodemap = nodemap_get_from_exp(tsi->tsi_exp);
+ if (IS_ERR(nodemap))
+ RETURN(PTR_ERR(nodemap));
+
id = repoqc->qc_id;
if (oqctl->qc_type == USRQUOTA)
id = nodemap_map_id(nodemap, NODEMAP_UID,
NODEMAP_CLIENT_TO_FS,
repoqc->qc_id);
+ nodemap_putref(nodemap);
+
if (repoqc->qc_id != id)
swap(repoqc->qc_id, id);
*
* \retval amount of time to extend the timeout with
*/
-static inline int prolong_timeout(struct ptlrpc_request *req,
- struct ldlm_lock *lock)
+static inline int prolong_timeout(struct ptlrpc_request *req)
{
struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
+ time_t req_timeout;
if (AT_OFF)
return obd_timeout / 2;
- /* We are in the middle of the process - BL AST is sent, CANCEL
- is ahead. Take half of AT + IO process time. */
- return at_est2timeout(at_get(&svcpt->scp_at_estimate)) +
- (ldlm_bl_timeout(lock) >> 1);
-}
-
-/**
- * Prolong single lock timeout.
- *
- * This is supplemental function to the ofd_prolong_locks(). It prolongs
- * a single lock.
- *
- * \param[in] tsi target session environment for this request
- * \param[in] lock LDLM lock to prolong
- * \param[in] extent related extent
- * \param[in] timeout timeout value to add
- *
- * \retval 0 if lock is not suitable for prolongation
- * \retval 1 if lock was prolonged successfully
- */
-static int ofd_prolong_one_lock(struct tgt_session_info *tsi,
- struct ldlm_lock *lock,
- struct ldlm_extent *extent)
-{
- int timeout = prolong_timeout(tgt_ses_req(tsi), lock);
-
- if (lock->l_flags & LDLM_FL_DESTROYED) /* lock already cancelled */
- return 0;
-
- /* XXX: never try to grab resource lock here because we're inside
- * exp_bl_list_lock; in ldlm_lockd.c to handle waiting list we take
- * res lock and then exp_bl_list_lock. */
-
- if (!(lock->l_flags & LDLM_FL_AST_SENT))
- /* ignore locks not being cancelled */
- return 0;
-
- LDLM_DEBUG(lock, "refreshed for req x"LPU64" ext("LPU64"->"LPU64") "
- "to %ds.\n", tgt_ses_req(tsi)->rq_xid, extent->start,
- extent->end, timeout);
-
- /* OK. this is a possible lock the user holds doing I/O
- * let's refresh eviction timer for it */
- ldlm_refresh_waiting_lock(lock, timeout);
- return 1;
+ req_timeout = req->rq_deadline - req->rq_arrival_time.tv_sec;
+ return max_t(time_t, at_est2timeout(at_get(&svcpt->scp_at_estimate)),
+ req_timeout);
}
/**
* request may cover multiple locks.
*
* \param[in] tsi target session environment for this request
- * \param[in] start start of extent
- * \param[in] end end of extent
+ * \param[in] data struct of data to prolong locks
*
- * \retval number of prolonged locks
*/
-static int ofd_prolong_extent_locks(struct tgt_session_info *tsi,
- __u64 start, __u64 end)
+static void ofd_prolong_extent_locks(struct tgt_session_info *tsi,
+ struct ldlm_prolong_args *data)
{
- struct obd_export *exp = tsi->tsi_exp;
struct obdo *oa = &tsi->tsi_ost_body->oa;
- struct ldlm_extent extent = {
- .start = start,
- .end = end
- };
struct ldlm_lock *lock;
- int lock_count = 0;
ENTRY;
+ data->lpa_timeout = prolong_timeout(tgt_ses_req(tsi));
+ data->lpa_export = tsi->tsi_exp;
+ data->lpa_resid = tsi->tsi_resid;
+
+ CDEBUG(D_RPCTRACE, "Prolong locks for req %p with x%llu"
+ " ext(%llu->%llu)\n", tgt_ses_req(tsi),
+ tgt_ses_req(tsi)->rq_xid, data->lpa_extent.start,
+ data->lpa_extent.end);
+
if (oa->o_valid & OBD_MD_FLHANDLE) {
/* mostly a request should be covered by only one lock, try
* fast path. */
if (lock != NULL) {
/* Fast path to check if the lock covers the whole IO
* region exclusively. */
- if (lock->l_granted_mode == LCK_PW &&
- ldlm_extent_contain(&lock->l_policy_data.l_extent,
- &extent)) {
+ if (ldlm_extent_contain(&lock->l_policy_data.l_extent,
+ &data->lpa_extent)) {
/* bingo */
- LASSERT(lock->l_export == exp);
- lock_count = ofd_prolong_one_lock(tsi, lock,
- &extent);
+ LASSERT(lock->l_export == data->lpa_export);
+ ldlm_lock_prolong_one(lock, data);
LDLM_LOCK_PUT(lock);
- RETURN(lock_count);
+ RETURN_EXIT;
}
+ lock->l_last_used = cfs_time_current();
LDLM_LOCK_PUT(lock);
}
}
- spin_lock_bh(&exp->exp_bl_list_lock);
- list_for_each_entry(lock, &exp->exp_bl_list, l_exp_list) {
- LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
- LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
-
- if (!ldlm_res_eq(&tsi->tsi_resid, &lock->l_resource->lr_name))
- continue;
-
- if (!ldlm_extent_overlap(&lock->l_policy_data.l_extent,
- &extent))
- continue;
-
- lock_count += ofd_prolong_one_lock(tsi, lock, &extent);
- }
- spin_unlock_bh(&exp->exp_bl_list_lock);
-
- RETURN(lock_count);
+ ldlm_resource_prolong(data);
+ EXIT;
}
/**
static int ofd_rw_hpreq_lock_match(struct ptlrpc_request *req,
struct ldlm_lock *lock)
{
- struct niobuf_remote *rnb;
- struct obd_ioobj *ioo;
- ldlm_mode_t mode;
- struct ldlm_extent ext;
- __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
+ struct niobuf_remote *rnb;
+ struct obd_ioobj *ioo;
+ enum ldlm_mode mode;
+ struct ldlm_extent ext;
+ __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
ENTRY;
if (!ostid_res_name_eq(&ioo->ioo_oid, &lock->l_resource->lr_name))
RETURN(0);
- /* a bulk write can only hold a reference on a PW extent lock */
- mode = LCK_PW;
+ /* a bulk write can only hold a reference on a PW extent lock
+ * or GROUP lock.
+ */
+ mode = LCK_PW | LCK_GROUP;
if (opc == OST_READ)
/* whereas a bulk read can be protected by either a PR or PW
* extent lock */
* Implementation of ptlrpc_hpreq_ops::hpreq_lock_check for OFD RW requests.
*
* Check for whether the given PTLRPC request (\a req) is blocking
- * an LDLM lock cancel.
+ * an LDLM lock cancel. Also checks whether the request is covered by an LDLM
+ * lock.
*
* \param[in] req the incoming request
*
* \retval 1 if \a req is blocking an LDLM lock cancel
* \retval 0 if it is not
+ * \retval -ESTALE if lock is not found
*/
static int ofd_rw_hpreq_check(struct ptlrpc_request *req)
{
struct tgt_session_info *tsi;
struct obd_ioobj *ioo;
struct niobuf_remote *rnb;
- __u64 start, end;
- int lock_count;
+ int opc;
+ struct ldlm_prolong_args pa = { 0 };
ENTRY;
* Use LASSERT below because malformed RPCs should have
* been filtered out in tgt_hpreq_handler().
*/
+ opc = lustre_msg_get_opc(req->rq_reqmsg);
+ LASSERT(opc == OST_READ || opc == OST_WRITE);
+
ioo = req_capsule_client_get(&req->rq_pill, &RMF_OBD_IOOBJ);
LASSERT(ioo != NULL);
LASSERT(rnb != NULL);
LASSERT(!(rnb->rnb_flags & OBD_BRW_SRVLOCK));
- start = rnb->rnb_offset;
+ pa.lpa_mode = LCK_PW | LCK_GROUP;
+ if (opc == OST_READ)
+ pa.lpa_mode |= LCK_PR;
+
+ pa.lpa_extent.start = rnb->rnb_offset;
rnb += ioo->ioo_bufcnt - 1;
- end = rnb->rnb_offset + rnb->rnb_len - 1;
+ pa.lpa_extent.end = rnb->rnb_offset + rnb->rnb_len - 1;
DEBUG_REQ(D_RPCTRACE, req, "%s %s: refresh rw locks: "DFID
- " ("LPU64"->"LPU64")\n",
- tgt_name(tsi->tsi_tgt), current->comm,
- PFID(&tsi->tsi_fid), start, end);
+ " (%llu->%llu)\n", tgt_name(tsi->tsi_tgt),
+ current->comm, PFID(&tsi->tsi_fid), pa.lpa_extent.start,
+ pa.lpa_extent.end);
- lock_count = ofd_prolong_extent_locks(tsi, start, end);
+ ofd_prolong_extent_locks(tsi, &pa);
CDEBUG(D_DLMTRACE, "%s: refreshed %u locks timeout for req %p.\n",
- tgt_name(tsi->tsi_tgt), lock_count, req);
+ tgt_name(tsi->tsi_tgt), pa.lpa_blocks_cnt, req);
+
+ if (pa.lpa_blocks_cnt > 0)
+ RETURN(1);
- RETURN(lock_count > 0);
+ RETURN(pa.lpa_locks_cnt > 0 ? 0 : -ESTALE);
}
/**
struct ldlm_lock *lock)
{
struct tgt_session_info *tsi;
+ struct obdo *oa;
+ struct ldlm_extent ext;
+
+ ENTRY;
/* Don't use tgt_ses_info() to get session info, because lock_match()
* can be called while request has no processing thread yet. */
LASSERT(tsi->tsi_ost_body != NULL);
if (tsi->tsi_ost_body->oa.o_valid & OBD_MD_FLHANDLE &&
tsi->tsi_ost_body->oa.o_handle.cookie == lock->l_handle.h_cookie)
- return 1;
+ RETURN(1);
- return 0;
+ oa = &tsi->tsi_ost_body->oa;
+ ext.start = oa->o_size;
+ ext.end = oa->o_blocks;
+
+ LASSERT(lock->l_resource != NULL);
+ if (!ostid_res_name_eq(&oa->o_oi, &lock->l_resource->lr_name))
+ RETURN(0);
+
+ if (!(lock->l_granted_mode & (LCK_PW | LCK_GROUP)))
+ RETURN(0);
+
+ RETURN(ldlm_extent_overlap(&lock->l_policy_data.l_extent, &ext));
}
/**
* Implementation of ptlrpc_hpreq_ops::hpreq_lock_check for OST_PUNCH request.
*
* High-priority queue request check for whether the given punch request
- * (\a req) is blocking an LDLM lock cancel.
+ * (\a req) is blocking an LDLM lock cancel. Also checks whether the request is
+ * covered by an LDLM lock.
+ *
+
*
* \param[in] req the incoming request
*
* \retval 1 if \a req is blocking an LDLM lock cancel
* \retval 0 if it is not
+ * \retval -ESTALE if lock is not found
*/
static int ofd_punch_hpreq_check(struct ptlrpc_request *req)
{
struct tgt_session_info *tsi;
struct obdo *oa;
- int lock_count;
+ struct ldlm_prolong_args pa = { 0 };
ENTRY;
LASSERT(!(oa->o_valid & OBD_MD_FLFLAGS &&
oa->o_flags & OBD_FL_SRVLOCK));
+ pa.lpa_mode = LCK_PW | LCK_GROUP;
+ pa.lpa_extent.start = oa->o_size;
+ pa.lpa_extent.end = oa->o_blocks;
+
CDEBUG(D_DLMTRACE,
- "%s: refresh locks: "LPU64"/"LPU64" ("LPU64"->"LPU64")\n",
+ "%s: refresh locks: %llu/%llu (%llu->%llu)\n",
tgt_name(tsi->tsi_tgt), tsi->tsi_resid.name[0],
- tsi->tsi_resid.name[1], oa->o_size, oa->o_blocks);
+ tsi->tsi_resid.name[1], pa.lpa_extent.start, pa.lpa_extent.end);
- lock_count = ofd_prolong_extent_locks(tsi, oa->o_size, oa->o_blocks);
+ ofd_prolong_extent_locks(tsi, &pa);
CDEBUG(D_DLMTRACE, "%s: refreshed %u locks timeout for req %p.\n",
- tgt_name(tsi->tsi_tgt), lock_count, req);
+ tgt_name(tsi->tsi_tgt), pa.lpa_blocks_cnt, req);
- RETURN(lock_count > 0);
+ if (pa.lpa_blocks_cnt > 0)
+ RETURN(1);
+
+ RETURN(pa.lpa_locks_cnt > 0 ? 0 : -ESTALE);
}
/**
LASSERT(rnb != NULL); /* must exist after request preprocessing */
/* no high priority if server lock is needed */
- if (rnb->rnb_flags & OBD_BRW_SRVLOCK)
+ if (rnb->rnb_flags & OBD_BRW_SRVLOCK ||
+ (lustre_msg_get_flags(tgt_ses_req(tsi)->rq_reqmsg)
+ & MSG_REPLAY))
return;
}
tgt_ses_req(tsi)->rq_ops = &ofd_hpreq_rw;
{
LASSERT(tsi->tsi_ost_body != NULL); /* must exists if we are here */
/* no high-priority if server lock is needed */
- if (tsi->tsi_ost_body->oa.o_valid & OBD_MD_FLFLAGS &&
- tsi->tsi_ost_body->oa.o_flags & OBD_FL_SRVLOCK)
+ if ((tsi->tsi_ost_body->oa.o_valid & OBD_MD_FLFLAGS &&
+ tsi->tsi_ost_body->oa.o_flags & OBD_FL_SRVLOCK) ||
+ tgt_conn_flags(tsi) & OBD_CONNECT_MDS ||
+ lustre_msg_get_flags(tgt_ses_req(tsi)->rq_reqmsg) & MSG_REPLAY)
return;
tgt_ses_req(tsi)->rq_ops = &ofd_hpreq_punch;
}
ofd_hp_punch),
TGT_OST_HDL(HABEO_CORPUS| HABEO_REFERO, OST_SYNC, ofd_sync_hdl),
TGT_OST_HDL(0 | HABEO_REFERO, OST_QUOTACTL, ofd_quotactl),
+TGT_OST_HDL(HABEO_CORPUS | HABEO_REFERO, OST_LADVISE, ofd_ladvise_hdl),
};
static struct tgt_opc_slice ofd_common_slice[] = {
.tos_hs = tgt_lfsck_handlers
},
{
+ .tos_opc_start = SEC_FIRST_OPC,
+ .tos_opc_end = SEC_LAST_OPC,
+ .tos_hs = tgt_sec_ctx_handlers
+ },
+ {
.tos_hs = NULL
}
};
static int ofd_init0(const struct lu_env *env, struct ofd_device *m,
struct lu_device_type *ldt, struct lustre_cfg *cfg)
{
- const char *dev = lustre_cfg_string(cfg, 0);
- struct ofd_thread_info *info = NULL;
- struct obd_device *obd;
- struct obd_statfs *osfs;
- int rc;
+ const char *dev = lustre_cfg_string(cfg, 0);
+ struct ofd_thread_info *info = NULL;
+ struct obd_device *obd;
+ struct obd_statfs *osfs;
+ struct lu_fid fid;
+ struct nm_config_file *nodemap_config;
+ int rc;
ENTRY;
spin_lock_init(&m->ofd_batch_lock);
init_rwsem(&m->ofd_lastid_rwsem);
- obd->u.filter.fo_fl_oss_capa = 0;
- INIT_LIST_HEAD(&obd->u.filter.fo_capa_keys);
- obd->u.filter.fo_capa_hash = init_capa_hash();
- if (obd->u.filter.fo_capa_hash == NULL)
- RETURN(-ENOMEM);
-
m->ofd_dt_dev.dd_lu_dev.ld_ops = &ofd_lu_ops;
m->ofd_dt_dev.dd_lu_dev.ld_obd = obd;
/* set this lu_device to obd, because error handling need it */
info = ofd_info_init(env, NULL);
if (info == NULL)
- RETURN(-EFAULT);
+ GOTO(err_fini_proc, rc = -EFAULT);
rc = ofd_stack_init(env, m, cfg);
if (rc) {
CERROR("%s: can't get statfs data, rc %d\n", obd->obd_name, rc);
GOTO(err_fini_stack, rc);
}
- if (!IS_PO2(osfs->os_bsize)) {
+ if (!is_power_of_2(osfs->os_bsize)) {
CERROR("%s: blocksize (%d) is not a power of 2\n",
obd->obd_name, osfs->os_bsize);
GOTO(err_fini_stack, rc = -EPROTO);
}
m->ofd_blockbits = fls(osfs->os_bsize) - 1;
+ if (ONE_MB_BRW_SIZE < (1U << m->ofd_blockbits))
+ m->ofd_brw_size = 1U << m->ofd_blockbits;
+ else
+ m->ofd_brw_size = ONE_MB_BRW_SIZE;
+
m->ofd_precreate_batch = OFD_PRECREATE_BATCH_DEFAULT;
if (osfs->os_bsize * osfs->os_blocks < OFD_PRECREATE_SMALL_FS)
m->ofd_precreate_batch = OFD_PRECREATE_BATCH_SMALL;
dt_conf_get(env, m->ofd_osd, &m->ofd_dt_conf);
- /* Allow at most ddp_grant_reserved% of the available filesystem space
- * to be granted to clients, so that any errors in the grant overhead
- * calculations do not allow granting more space to clients than can be
- * written. Assumes that in aggregate the grant overhead calculations do
- * not have more than ddp_grant_reserved% estimation error in them. */
- m->ofd_grant_ratio =
- ofd_grant_ratio_conv(m->ofd_dt_conf.ddp_grant_reserved);
-
rc = tgt_init(env, &m->ofd_lut, obd, m->ofd_osd, ofd_common_slice,
OBD_FAIL_OST_ALL_REQUEST_NET,
OBD_FAIL_OST_ALL_REPLY_NET);
if (rc)
GOTO(err_fini_lut, rc);
- rc = ofd_start_inconsistency_verification_thread(m);
+ fid.f_seq = FID_SEQ_LOCAL_NAME;
+ fid.f_oid = 1;
+ fid.f_ver = 0;
+ rc = local_oid_storage_init(env, m->ofd_osd, &fid,
+ &m->ofd_los);
if (rc != 0)
GOTO(err_fini_fs, rc);
+ nodemap_config = nm_config_file_register_tgt(env, m->ofd_osd,
+ m->ofd_los);
+ if (IS_ERR(nodemap_config))
+ GOTO(err_fini_los, rc = PTR_ERR(nodemap_config));
+
+ obd->u.obt.obt_nodemap_config_file = nodemap_config;
+
+ rc = ofd_start_inconsistency_verification_thread(m);
+ if (rc != 0)
+ GOTO(err_fini_nm, rc);
+
+ tgt_adapt_sptlrpc_conf(&m->ofd_lut, 1);
+
RETURN(0);
+err_fini_nm:
+ nm_config_file_deregister_tgt(env, obd->u.obt.obt_nodemap_config_file);
+ obd->u.obt.obt_nodemap_config_file = NULL;
+err_fini_los:
+ local_oid_storage_fini(env, m->ofd_los);
+ m->ofd_los = NULL;
err_fini_fs:
ofd_fs_cleanup(env, m);
err_fini_lut:
tgt_fini(env, &m->ofd_lut);
err_free_ns:
- ldlm_namespace_free(m->ofd_namespace, 0, obd->obd_force);
+ ldlm_namespace_free(m->ofd_namespace, NULL, obd->obd_force);
obd->obd_namespace = m->ofd_namespace = NULL;
err_fini_stack:
ofd_stack_fini(env, m, &m->ofd_osd->dd_lu_dev);
stop.ls_flags = 0;
lfsck_stop(env, m->ofd_osd, &stop);
target_recovery_fini(obd);
+ if (m->ofd_namespace != NULL)
+ ldlm_namespace_free_prior(m->ofd_namespace, NULL,
+ d->ld_obd->obd_force);
+
obd_exports_barrier(obd);
obd_zombie_barrier();
ofd_stop_inconsistency_verification_thread(m);
lfsck_degister(env, m->ofd_osd);
ofd_fs_cleanup(env, m);
+ nm_config_file_deregister_tgt(env, obd->u.obt.obt_nodemap_config_file);
+ obd->u.obt.obt_nodemap_config_file = NULL;
- ofd_free_capa_keys(m);
- cleanup_capa_hash(obd->u.filter.fo_capa_hash);
+ if (m->ofd_los != NULL) {
+ local_oid_storage_fini(env, m->ofd_los);
+ m->ofd_los = NULL;
+ }
if (m->ofd_namespace != NULL) {
- ldlm_namespace_free(m->ofd_namespace, NULL,
- d->ld_obd->obd_force);
+ ldlm_namespace_free_post(m->ofd_namespace);
d->ld_obd->obd_namespace = m->ofd_namespace = NULL;
}
class_unregister_type(LUSTRE_OST_NAME);
}
-MODULE_AUTHOR("Whamcloud, Inc. <http://www.whamcloud.com/>");
+MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
MODULE_DESCRIPTION("Lustre Object Filtering Device");
+MODULE_VERSION(LUSTRE_VERSION_STRING);
MODULE_LICENSE("GPL");
module_init(ofd_init);