static int ofd_recovery_complete(const struct lu_env *env,
struct lu_device *dev)
{
+ struct ofd_thread_info *oti = ofd_info(env);
struct ofd_device *ofd = ofd_dev(dev);
struct lu_device *next = &ofd->ofd_osd->dd_lu_dev;
- int rc = 0, max_precreate;
+ int rc = 0;
ENTRY;
/*
* Grant space for object precreation on the self export.
- * This initial reserved space (i.e. 10MB for zfs and 280KB for ldiskfs)
+ * The initial reserved space (i.e. 10MB for zfs and 280KB for ldiskfs)
* is enough to create 10k objects. More space is then acquired for
* precreation in ofd_grant_create().
*/
- max_precreate = OST_MAX_PRECREATE * ofd->ofd_dt_conf.ddp_inodespace / 2;
- ofd_grant_connect(env, dev->ld_obd->obd_self_export, max_precreate,
- false);
+ memset(&oti->fti_ocd, 0, sizeof(oti->fti_ocd));
+ oti->fti_ocd.ocd_grant = OST_MAX_PRECREATE / 2;
+ oti->fti_ocd.ocd_grant *= ofd->ofd_dt_conf.ddp_inodespace;
+ oti->fti_ocd.ocd_connect_flags = OBD_CONNECT_GRANT |
+ OBD_CONNECT_GRANT_PARAM;
+ ofd_grant_connect(env, dev->ld_obd->obd_self_export, &oti->fti_ocd,
+ true);
rc = next->ld_ops->ldo_recovery_complete(env, next);
RETURN(rc);
}
ss->ss_lu = lu->ld_site;
ss->ss_node_id = ofd->ofd_lut.lut_lsd.lsd_osd_index;
+ OBD_ALLOC(name, sizeof(obd_name) * 2 + 10);
+ if (name == NULL)
+ return -ENOMEM;
+
OBD_ALLOC_PTR(ss->ss_server_seq);
if (ss->ss_server_seq == NULL)
- GOTO(out_free, rc = -ENOMEM);
-
- OBD_ALLOC(name, strlen(obd_name) + 10);
- if (!name) {
- OBD_FREE_PTR(ss->ss_server_seq);
- ss->ss_server_seq = NULL;
- GOTO(out_free, rc = -ENOMEM);
- }
+ GOTO(out_name, rc = -ENOMEM);
rc = seq_server_init(env, ss->ss_server_seq, ofd->ofd_osd, obd_name,
LUSTRE_SEQ_SERVER, ss);
if (rc) {
CERROR("%s : seq server init error %d\n", obd_name, rc);
- GOTO(out_free, rc);
+ GOTO(out_server, rc);
}
ss->ss_server_seq->lss_space.lsr_index = ss->ss_node_id;
OBD_ALLOC_PTR(ss->ss_client_seq);
if (ss->ss_client_seq == NULL)
- GOTO(out_free, rc = -ENOMEM);
+ GOTO(out_server, rc = -ENOMEM);
- snprintf(name, strlen(obd_name) + 6, "%p-super", obd_name);
+ /*
+ * It always printed as "%p", so that the name is unique in the kernel,
+ * even if the filesystem is mounted twice. So sizeof(.) * 2 is enough.
+ */
+ snprintf(name, sizeof(obd_name) * 2 + 7, "%p-super", obd_name);
rc = seq_client_init(ss->ss_client_seq, NULL, LUSTRE_SEQ_DATA,
name, NULL);
if (rc) {
CERROR("%s : seq client init error %d\n", obd_name, rc);
- GOTO(out_free, rc);
+ GOTO(out_client, rc);
}
- OBD_FREE(name, strlen(obd_name) + 10);
- name = NULL;
rc = seq_server_set_cli(env, ss->ss_server_seq, ss->ss_client_seq);
-out_free:
if (rc) {
- if (ss->ss_server_seq) {
- seq_server_fini(ss->ss_server_seq, env);
- OBD_FREE_PTR(ss->ss_server_seq);
- ss->ss_server_seq = NULL;
- }
-
- if (ss->ss_client_seq) {
- seq_client_fini(ss->ss_client_seq);
- OBD_FREE_PTR(ss->ss_client_seq);
- ss->ss_client_seq = NULL;
- }
-
- if (name) {
- OBD_FREE(name, strlen(obd_name) + 10);
- name = NULL;
- }
+out_client:
+ seq_client_fini(ss->ss_client_seq);
+ OBD_FREE_PTR(ss->ss_client_seq);
+ ss->ss_client_seq = NULL;
+out_server:
+ seq_server_fini(ss->ss_server_seq, env);
+ OBD_FREE_PTR(ss->ss_server_seq);
+ ss->ss_server_seq = NULL;
}
+out_name:
+ OBD_FREE(name, sizeof(obd_name) * 2 + 10);
return rc;
}
obdo_from_la(&repbody->oa, &fti->fti_attr,
OFD_VALID_FLAGS | LA_UID | LA_GID);
- tgt_drop_id(tsi->tsi_exp, &repbody->oa);
/* Store object version in reply */
curr_version = dt_version_get(tsi->tsi_env,
obdo_from_la(&repbody->oa, &fti->fti_attr,
OFD_VALID_FLAGS | LA_UID | LA_GID);
- tgt_drop_id(tsi->tsi_exp, &repbody->oa);
ofd_counter_incr(tsi->tsi_exp, LPROC_OFD_STATS_SETATTR,
tsi->tsi_jobid, 1);
ENTRY;
+ OBD_FAIL_TIMEOUT(OBD_FAIL_OST_PAUSE_PUNCH, cfs_fail_val);
+
/* check that we do support OBD_CONNECT_TRUNCLOCK. */
CLASSERT(OST_CONNECT_SUPPORTED & OBD_CONNECT_TRUNCLOCK);
return rc;
}
+static int ofd_ladvise_prefetch(const struct lu_env *env,
+ struct ofd_object *fo,
+ __u64 start, __u64 end)
+{
+ struct ofd_thread_info *info = ofd_info(env);
+ pgoff_t start_index, end_index, pages;
+ struct niobuf_remote rnb;
+ unsigned long nr_local;
+ struct niobuf_local *lnb;
+ int rc = 0;
+
+ if (end <= start)
+ RETURN(-EINVAL);
+
+ OBD_ALLOC_LARGE(lnb, sizeof(*lnb) * PTLRPC_MAX_BRW_PAGES);
+ if (lnb == NULL)
+ RETURN(-ENOMEM);
+
+ ofd_read_lock(env, fo);
+ if (!ofd_object_exists(fo))
+ GOTO(out_unlock, rc = -ENOENT);
+
+ rc = ofd_attr_get(env, fo, &info->fti_attr);
+ if (rc)
+ GOTO(out_unlock, rc);
+
+ if (end > info->fti_attr.la_size)
+ end = info->fti_attr.la_size;
+
+ if (end == 0)
+ GOTO(out_unlock, rc);
+
+ /* We need page aligned offset and length */
+ start_index = start >> PAGE_SHIFT;
+ end_index = (end - 1) >> PAGE_SHIFT;
+ pages = end_index - start_index + 1;
+ while (pages > 0) {
+ nr_local = pages <= PTLRPC_MAX_BRW_PAGES ? pages :
+ PTLRPC_MAX_BRW_PAGES;
+ rnb.rnb_offset = start_index << PAGE_SHIFT;
+ rnb.rnb_len = nr_local << PAGE_SHIFT;
+ rc = dt_bufs_get(env, ofd_object_child(fo), &rnb, lnb, 0);
+ if (unlikely(rc < 0))
+ break;
+ nr_local = rc;
+ rc = dt_read_prep(env, ofd_object_child(fo), lnb, nr_local);
+ dt_bufs_put(env, ofd_object_child(fo), lnb, nr_local);
+ if (unlikely(rc))
+ break;
+ start_index += nr_local;
+ pages -= nr_local;
+ }
+
+out_unlock:
+ ofd_read_unlock(env, fo);
+ OBD_FREE_LARGE(lnb, sizeof(*lnb) * PTLRPC_MAX_BRW_PAGES);
+ RETURN(rc);
+}
+
+/**
+ * OFD request handler for OST_LADVISE RPC.
+ *
+ * Tune cache or perfetch policies according to advices.
+ *
+ * \param[in] tsi target session environment for this request
+ *
+ * \retval 0 if successful
+ * \retval negative errno on error
+ */
+static int ofd_ladvise_hdl(struct tgt_session_info *tsi)
+{
+ struct ptlrpc_request *req = tgt_ses_req(tsi);
+ struct obd_export *exp = tsi->tsi_exp;
+ struct ofd_device *ofd = ofd_exp(exp);
+ struct ost_body *body, *repbody;
+ struct ofd_thread_info *info;
+ struct ofd_object *fo;
+ const struct lu_env *env = req->rq_svc_thread->t_env;
+ int rc = 0;
+ struct lu_ladvise *ladvise;
+ int num_advise;
+ struct ladvise_hdr *ladvise_hdr;
+ struct obd_ioobj ioo;
+ struct lustre_handle lockh = { 0 };
+ __u64 flags = 0;
+ int i;
+ struct dt_object *dob;
+ ENTRY;
+
+ CFS_FAIL_TIMEOUT(OBD_FAIL_OST_LADVISE_PAUSE, cfs_fail_val);
+ body = tsi->tsi_ost_body;
+
+ if ((body->oa.o_valid & OBD_MD_FLID) != OBD_MD_FLID)
+ RETURN(err_serious(-EPROTO));
+
+ ladvise_hdr = req_capsule_client_get(tsi->tsi_pill,
+ &RMF_OST_LADVISE_HDR);
+ if (ladvise_hdr == NULL)
+ RETURN(err_serious(-EPROTO));
+
+ if (ladvise_hdr->lah_magic != LADVISE_MAGIC ||
+ ladvise_hdr->lah_count < 1)
+ RETURN(err_serious(-EPROTO));
+
+ if ((ladvise_hdr->lah_flags & (~LF_MASK)) != 0)
+ RETURN(err_serious(-EPROTO));
+
+ ladvise = req_capsule_client_get(tsi->tsi_pill, &RMF_OST_LADVISE);
+ if (ladvise == NULL)
+ RETURN(err_serious(-EPROTO));
+
+ num_advise = req_capsule_get_size(&req->rq_pill,
+ &RMF_OST_LADVISE, RCL_CLIENT) /
+ sizeof(*ladvise);
+ if (num_advise < ladvise_hdr->lah_count)
+ RETURN(err_serious(-EPROTO));
+
+ repbody = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY);
+ repbody->oa = body->oa;
+
+ info = ofd_info_init(env, exp);
+
+ rc = ostid_to_fid(&info->fti_fid, &body->oa.o_oi,
+ ofd->ofd_lut.lut_lsd.lsd_osd_index);
+ if (rc != 0)
+ RETURN(rc);
+
+ fo = ofd_object_find(env, ofd, &info->fti_fid);
+ if (IS_ERR(fo)) {
+ rc = PTR_ERR(fo);
+ RETURN(rc);
+ }
+ LASSERT(fo != NULL);
+ dob = ofd_object_child(fo);
+
+ for (i = 0; i < num_advise; i++, ladvise++) {
+ if (ladvise->lla_end <= ladvise->lla_start) {
+ rc = err_serious(-EPROTO);
+ break;
+ }
+
+ /* Handle different advice types */
+ switch (ladvise->lla_advice) {
+ default:
+ rc = -ENOTSUPP;
+ break;
+ case LU_LADVISE_WILLREAD:
+ ioo.ioo_oid = body->oa.o_oi;
+ ioo.ioo_bufcnt = 1;
+ rc = tgt_extent_lock(exp->exp_obd->obd_namespace,
+ &tsi->tsi_resid,
+ ladvise->lla_start,
+ ladvise->lla_end - 1,
+ &lockh, LCK_PR, &flags);
+ if (rc != 0)
+ break;
+
+ req->rq_status = ofd_ladvise_prefetch(env,
+ fo,
+ ladvise->lla_start,
+ ladvise->lla_end);
+ tgt_extent_unlock(&lockh, LCK_PR);
+ break;
+ case LU_LADVISE_DONTNEED:
+ rc = dt_ladvise(env, dob, ladvise->lla_start,
+ ladvise->lla_end, LU_LADVISE_DONTNEED);
+ break;
+ }
+ if (rc != 0)
+ break;
+ }
+
+ ofd_object_put(env, fo);
+ req->rq_status = rc;
+ RETURN(rc);
+}
+
/**
* OFD request handler for OST_QUOTACTL RPC.
*
*/
static int ofd_quotactl(struct tgt_session_info *tsi)
{
- struct obd_quotactl *oqctl, *repoqc;
- struct lu_nodemap *nodemap =
- tsi->tsi_exp->exp_target_data.ted_nodemap;
- int id;
- int rc;
+ struct obd_quotactl *oqctl, *repoqc;
+ struct lu_nodemap *nodemap;
+ int id;
+ int rc;
ENTRY;
*repoqc = *oqctl;
+ nodemap = nodemap_get_from_exp(tsi->tsi_exp);
+ if (IS_ERR(nodemap))
+ RETURN(PTR_ERR(nodemap));
+
id = repoqc->qc_id;
if (oqctl->qc_type == USRQUOTA)
id = nodemap_map_id(nodemap, NODEMAP_UID,
NODEMAP_CLIENT_TO_FS,
repoqc->qc_id);
+ nodemap_putref(nodemap);
+
if (repoqc->qc_id != id)
swap(repoqc->qc_id, id);
*
* \retval amount of time to extend the timeout with
*/
-static inline int prolong_timeout(struct ptlrpc_request *req,
- struct ldlm_lock *lock)
+static inline int prolong_timeout(struct ptlrpc_request *req)
{
struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
if (AT_OFF)
return obd_timeout / 2;
- /* We are in the middle of the process - BL AST is sent, CANCEL
- is ahead. Take half of AT + IO process time. */
- return at_est2timeout(at_get(&svcpt->scp_at_estimate)) +
- (ldlm_bl_timeout(lock) >> 1);
-}
-
-/**
- * Prolong single lock timeout.
- *
- * This is supplemental function to the ofd_prolong_locks(). It prolongs
- * a single lock.
- *
- * \param[in] tsi target session environment for this request
- * \param[in] lock LDLM lock to prolong
- * \param[in] extent related extent
- * \param[in] timeout timeout value to add
- *
- * \retval 0 if lock is not suitable for prolongation
- * \retval 1 if lock was prolonged successfully
- */
-static int ofd_prolong_one_lock(struct tgt_session_info *tsi,
- struct ldlm_lock *lock,
- struct ldlm_extent *extent)
-{
- int timeout = prolong_timeout(tgt_ses_req(tsi), lock);
-
- if (lock->l_flags & LDLM_FL_DESTROYED) /* lock already cancelled */
- return 0;
-
- /* XXX: never try to grab resource lock here because we're inside
- * exp_bl_list_lock; in ldlm_lockd.c to handle waiting list we take
- * res lock and then exp_bl_list_lock. */
-
- if (!(lock->l_flags & LDLM_FL_AST_SENT))
- /* ignore locks not being cancelled */
- return 0;
-
- LDLM_DEBUG(lock, "refreshed for req x"LPU64" ext("LPU64"->"LPU64") "
- "to %ds.\n", tgt_ses_req(tsi)->rq_xid, extent->start,
- extent->end, timeout);
-
- /* OK. this is a possible lock the user holds doing I/O
- * let's refresh eviction timer for it */
- ldlm_refresh_waiting_lock(lock, timeout);
- return 1;
+ return at_est2timeout(at_get(&svcpt->scp_at_estimate));
}
/**
* request may cover multiple locks.
*
* \param[in] tsi target session environment for this request
- * \param[in] start start of extent
- * \param[in] end end of extent
+ * \param[in] data struct of data to prolong locks
*
- * \retval number of prolonged locks
*/
-static int ofd_prolong_extent_locks(struct tgt_session_info *tsi,
- __u64 start, __u64 end)
+static void ofd_prolong_extent_locks(struct tgt_session_info *tsi,
+ struct ldlm_prolong_args *data)
{
- struct obd_export *exp = tsi->tsi_exp;
struct obdo *oa = &tsi->tsi_ost_body->oa;
- struct ldlm_extent extent = {
- .start = start,
- .end = end
- };
struct ldlm_lock *lock;
- int lock_count = 0;
ENTRY;
+ data->lpa_timeout = prolong_timeout(tgt_ses_req(tsi));
+ data->lpa_export = tsi->tsi_exp;
+ data->lpa_resid = tsi->tsi_resid;
+
+ CDEBUG(D_RPCTRACE, "Prolong locks for req %p with x"LPU64
+ " ext("LPU64"->"LPU64")\n", tgt_ses_req(tsi),
+ tgt_ses_req(tsi)->rq_xid, data->lpa_extent.start,
+ data->lpa_extent.end);
+
if (oa->o_valid & OBD_MD_FLHANDLE) {
/* mostly a request should be covered by only one lock, try
* fast path. */
if (lock != NULL) {
/* Fast path to check if the lock covers the whole IO
* region exclusively. */
- if (lock->l_granted_mode == LCK_PW &&
- ldlm_extent_contain(&lock->l_policy_data.l_extent,
- &extent)) {
+ if (ldlm_extent_contain(&lock->l_policy_data.l_extent,
+ &data->lpa_extent)) {
/* bingo */
- LASSERT(lock->l_export == exp);
- lock_count = ofd_prolong_one_lock(tsi, lock,
- &extent);
+ LASSERT(lock->l_export == data->lpa_export);
+ ldlm_lock_prolong_one(lock, data);
LDLM_LOCK_PUT(lock);
- RETURN(lock_count);
+ RETURN_EXIT;
}
lock->l_last_used = cfs_time_current();
LDLM_LOCK_PUT(lock);
}
}
- spin_lock_bh(&exp->exp_bl_list_lock);
- list_for_each_entry(lock, &exp->exp_bl_list, l_exp_list) {
- LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
- LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
-
- /* ignore waiting locks, no more granted locks in the list */
- if (lock->l_granted_mode != lock->l_req_mode)
- break;
-
- if (!ldlm_res_eq(&tsi->tsi_resid, &lock->l_resource->lr_name))
- continue;
-
- if (!ldlm_extent_overlap(&lock->l_policy_data.l_extent,
- &extent))
- continue;
-
- lock_count += ofd_prolong_one_lock(tsi, lock, &extent);
- }
- spin_unlock_bh(&exp->exp_bl_list_lock);
-
- RETURN(lock_count);
+ ldlm_resource_prolong(data);
+ EXIT;
}
/**
if (!ostid_res_name_eq(&ioo->ioo_oid, &lock->l_resource->lr_name))
RETURN(0);
- /* a bulk write can only hold a reference on a PW extent lock */
- mode = LCK_PW;
+ /* a bulk write can only hold a reference on a PW extent lock
+ * or GROUP lock.
+ */
+ mode = LCK_PW | LCK_GROUP;
if (opc == OST_READ)
/* whereas a bulk read can be protected by either a PR or PW
* extent lock */
* Implementation of ptlrpc_hpreq_ops::hpreq_lock_check for OFD RW requests.
*
* Check for whether the given PTLRPC request (\a req) is blocking
- * an LDLM lock cancel.
+ * an LDLM lock cancel. Also checks whether the request is covered by an LDLM
+ * lock.
*
* \param[in] req the incoming request
*
* \retval 1 if \a req is blocking an LDLM lock cancel
* \retval 0 if it is not
+ * \retval -ESTALE if lock is not found
*/
static int ofd_rw_hpreq_check(struct ptlrpc_request *req)
{
struct tgt_session_info *tsi;
struct obd_ioobj *ioo;
struct niobuf_remote *rnb;
- __u64 start, end;
- int lock_count;
+ int opc;
+ struct ldlm_prolong_args pa = { 0 };
ENTRY;
* Use LASSERT below because malformed RPCs should have
* been filtered out in tgt_hpreq_handler().
*/
+ opc = lustre_msg_get_opc(req->rq_reqmsg);
+ LASSERT(opc == OST_READ || opc == OST_WRITE);
+
ioo = req_capsule_client_get(&req->rq_pill, &RMF_OBD_IOOBJ);
LASSERT(ioo != NULL);
LASSERT(rnb != NULL);
LASSERT(!(rnb->rnb_flags & OBD_BRW_SRVLOCK));
- start = rnb->rnb_offset;
+ pa.lpa_mode = LCK_PW | LCK_GROUP;
+ if (opc == OST_READ)
+ pa.lpa_mode |= LCK_PR;
+
+ pa.lpa_extent.start = rnb->rnb_offset;
rnb += ioo->ioo_bufcnt - 1;
- end = rnb->rnb_offset + rnb->rnb_len - 1;
+ pa.lpa_extent.end = rnb->rnb_offset + rnb->rnb_len - 1;
DEBUG_REQ(D_RPCTRACE, req, "%s %s: refresh rw locks: "DFID
- " ("LPU64"->"LPU64")\n",
- tgt_name(tsi->tsi_tgt), current->comm,
- PFID(&tsi->tsi_fid), start, end);
+ " ("LPU64"->"LPU64")\n", tgt_name(tsi->tsi_tgt),
+ current->comm, PFID(&tsi->tsi_fid), pa.lpa_extent.start,
+ pa.lpa_extent.end);
- lock_count = ofd_prolong_extent_locks(tsi, start, end);
+ ofd_prolong_extent_locks(tsi, &pa);
CDEBUG(D_DLMTRACE, "%s: refreshed %u locks timeout for req %p.\n",
- tgt_name(tsi->tsi_tgt), lock_count, req);
+ tgt_name(tsi->tsi_tgt), pa.lpa_blocks_cnt, req);
+
+ if (pa.lpa_blocks_cnt > 0)
+ RETURN(1);
- RETURN(lock_count > 0);
+ RETURN(pa.lpa_locks_cnt > 0 ? 0 : -ESTALE);
}
/**
struct ldlm_lock *lock)
{
struct tgt_session_info *tsi;
+ struct obdo *oa;
+ struct ldlm_extent ext;
+
+ ENTRY;
/* Don't use tgt_ses_info() to get session info, because lock_match()
* can be called while request has no processing thread yet. */
LASSERT(tsi->tsi_ost_body != NULL);
if (tsi->tsi_ost_body->oa.o_valid & OBD_MD_FLHANDLE &&
tsi->tsi_ost_body->oa.o_handle.cookie == lock->l_handle.h_cookie)
- return 1;
+ RETURN(1);
- return 0;
+ oa = &tsi->tsi_ost_body->oa;
+ ext.start = oa->o_size;
+ ext.end = oa->o_blocks;
+
+ LASSERT(lock->l_resource != NULL);
+ if (!ostid_res_name_eq(&oa->o_oi, &lock->l_resource->lr_name))
+ RETURN(0);
+
+ if (!(lock->l_granted_mode & (LCK_PW | LCK_GROUP)))
+ RETURN(0);
+
+ RETURN(ldlm_extent_overlap(&lock->l_policy_data.l_extent, &ext));
}
/**
* Implementation of ptlrpc_hpreq_ops::hpreq_lock_check for OST_PUNCH request.
*
* High-priority queue request check for whether the given punch request
- * (\a req) is blocking an LDLM lock cancel.
+ * (\a req) is blocking an LDLM lock cancel. Also checks whether the request is
+ * covered by an LDLM lock.
+ *
+
*
* \param[in] req the incoming request
*
* \retval 1 if \a req is blocking an LDLM lock cancel
* \retval 0 if it is not
+ * \retval -ESTALE if lock is not found
*/
static int ofd_punch_hpreq_check(struct ptlrpc_request *req)
{
struct tgt_session_info *tsi;
struct obdo *oa;
- int lock_count;
+ struct ldlm_prolong_args pa = { 0 };
ENTRY;
LASSERT(!(oa->o_valid & OBD_MD_FLFLAGS &&
oa->o_flags & OBD_FL_SRVLOCK));
+ pa.lpa_mode = LCK_PW | LCK_GROUP;
+ pa.lpa_extent.start = oa->o_size;
+ pa.lpa_extent.end = oa->o_blocks;
+
CDEBUG(D_DLMTRACE,
"%s: refresh locks: "LPU64"/"LPU64" ("LPU64"->"LPU64")\n",
tgt_name(tsi->tsi_tgt), tsi->tsi_resid.name[0],
- tsi->tsi_resid.name[1], oa->o_size, oa->o_blocks);
+ tsi->tsi_resid.name[1], pa.lpa_extent.start, pa.lpa_extent.end);
- lock_count = ofd_prolong_extent_locks(tsi, oa->o_size, oa->o_blocks);
+ ofd_prolong_extent_locks(tsi, &pa);
CDEBUG(D_DLMTRACE, "%s: refreshed %u locks timeout for req %p.\n",
- tgt_name(tsi->tsi_tgt), lock_count, req);
+ tgt_name(tsi->tsi_tgt), pa.lpa_blocks_cnt, req);
- RETURN(lock_count > 0);
+ if (pa.lpa_blocks_cnt > 0)
+ RETURN(1);
+
+ RETURN(pa.lpa_locks_cnt > 0 ? 0 : -ESTALE);
}
/**
LASSERT(rnb != NULL); /* must exist after request preprocessing */
/* no high priority if server lock is needed */
- if (rnb->rnb_flags & OBD_BRW_SRVLOCK)
+ if (rnb->rnb_flags & OBD_BRW_SRVLOCK ||
+ (lustre_msg_get_flags(tgt_ses_req(tsi)->rq_reqmsg)
+ & MSG_REPLAY))
return;
}
tgt_ses_req(tsi)->rq_ops = &ofd_hpreq_rw;
{
LASSERT(tsi->tsi_ost_body != NULL); /* must exists if we are here */
/* no high-priority if server lock is needed */
- if (tsi->tsi_ost_body->oa.o_valid & OBD_MD_FLFLAGS &&
- tsi->tsi_ost_body->oa.o_flags & OBD_FL_SRVLOCK)
+ if ((tsi->tsi_ost_body->oa.o_valid & OBD_MD_FLFLAGS &&
+ tsi->tsi_ost_body->oa.o_flags & OBD_FL_SRVLOCK) ||
+ tgt_conn_flags(tsi) & OBD_CONNECT_MDS ||
+ lustre_msg_get_flags(tgt_ses_req(tsi)->rq_reqmsg) & MSG_REPLAY)
return;
tgt_ses_req(tsi)->rq_ops = &ofd_hpreq_punch;
}
ofd_hp_punch),
TGT_OST_HDL(HABEO_CORPUS| HABEO_REFERO, OST_SYNC, ofd_sync_hdl),
TGT_OST_HDL(0 | HABEO_REFERO, OST_QUOTACTL, ofd_quotactl),
+TGT_OST_HDL(HABEO_CORPUS | HABEO_REFERO, OST_LADVISE, ofd_ladvise_hdl),
};
static struct tgt_opc_slice ofd_common_slice[] = {
static int ofd_init0(const struct lu_env *env, struct ofd_device *m,
struct lu_device_type *ldt, struct lustre_cfg *cfg)
{
- const char *dev = lustre_cfg_string(cfg, 0);
- struct ofd_thread_info *info = NULL;
- struct obd_device *obd;
- struct obd_statfs *osfs;
- int rc;
+ const char *dev = lustre_cfg_string(cfg, 0);
+ struct ofd_thread_info *info = NULL;
+ struct obd_device *obd;
+ struct obd_statfs *osfs;
+ struct lu_fid fid;
+ struct nm_config_file *nodemap_config;
+ int rc;
ENTRY;
}
m->ofd_blockbits = fls(osfs->os_bsize) - 1;
+ if (ONE_MB_BRW_SIZE < (1U << m->ofd_blockbits))
+ m->ofd_brw_size = 1U << m->ofd_blockbits;
+ else
+ m->ofd_brw_size = ONE_MB_BRW_SIZE;
+
m->ofd_precreate_batch = OFD_PRECREATE_BATCH_DEFAULT;
if (osfs->os_bsize * osfs->os_blocks < OFD_PRECREATE_SMALL_FS)
m->ofd_precreate_batch = OFD_PRECREATE_BATCH_SMALL;
dt_conf_get(env, m->ofd_osd, &m->ofd_dt_conf);
- /* Allow at most ddp_grant_reserved% of the available filesystem space
- * to be granted to clients, so that any errors in the grant overhead
- * calculations do not allow granting more space to clients than can be
- * written. Assumes that in aggregate the grant overhead calculations do
- * not have more than ddp_grant_reserved% estimation error in them. */
- m->ofd_grant_ratio =
- ofd_grant_ratio_conv(m->ofd_dt_conf.ddp_grant_reserved);
-
rc = tgt_init(env, &m->ofd_lut, obd, m->ofd_osd, ofd_common_slice,
OBD_FAIL_OST_ALL_REQUEST_NET,
OBD_FAIL_OST_ALL_REPLY_NET);
if (rc)
GOTO(err_fini_lut, rc);
- rc = ofd_start_inconsistency_verification_thread(m);
+ fid.f_seq = FID_SEQ_LOCAL_NAME;
+ fid.f_oid = 1;
+ fid.f_ver = 0;
+ rc = local_oid_storage_init(env, m->ofd_osd, &fid,
+ &m->ofd_los);
if (rc != 0)
GOTO(err_fini_fs, rc);
+ nodemap_config = nm_config_file_register_tgt(env, m->ofd_osd,
+ m->ofd_los);
+ if (IS_ERR(nodemap_config))
+ GOTO(err_fini_los, rc = PTR_ERR(nodemap_config));
+
+ obd->u.obt.obt_nodemap_config_file = nodemap_config;
+
+ rc = ofd_start_inconsistency_verification_thread(m);
+ if (rc != 0)
+ GOTO(err_fini_nm, rc);
+
tgt_adapt_sptlrpc_conf(&m->ofd_lut, 1);
RETURN(0);
+err_fini_nm:
+ nm_config_file_deregister_tgt(env, obd->u.obt.obt_nodemap_config_file);
+ obd->u.obt.obt_nodemap_config_file = NULL;
+err_fini_los:
+ local_oid_storage_fini(env, m->ofd_los);
+ m->ofd_los = NULL;
err_fini_fs:
ofd_fs_cleanup(env, m);
err_fini_lut:
ofd_stop_inconsistency_verification_thread(m);
lfsck_degister(env, m->ofd_osd);
ofd_fs_cleanup(env, m);
+ nm_config_file_deregister_tgt(env, obd->u.obt.obt_nodemap_config_file);
+ obd->u.obt.obt_nodemap_config_file = NULL;
+
+ if (m->ofd_los != NULL) {
+ local_oid_storage_fini(env, m->ofd_los);
+ m->ofd_los = NULL;
+ }
if (m->ofd_namespace != NULL) {
ldlm_namespace_free_post(m->ofd_namespace);