+/**
+ * Calculate the amount of time for lock prolongation.
+ *
+ * This is helper for ofd_prolong_extent_locks() function to get
+ * the timeout extra time.
+ *
+ * \param[in] req current request
+ *
+ * \retval amount of time to extend the timeout with
+ */
+static inline int prolong_timeout(struct ptlrpc_request *req,
+ struct ldlm_lock *lock)
+{
+ struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
+
+ if (AT_OFF)
+ return obd_timeout / 2;
+
+ /* We are in the middle of the process - BL AST is sent, CANCEL
+ is ahead. Take half of AT + IO process time. */
+ return at_est2timeout(at_get(&svcpt->scp_at_estimate)) +
+ (ldlm_bl_timeout(lock) >> 1);
+}
+
+/**
+ * Prolong single lock timeout.
+ *
+ * This is supplemental function to the ofd_prolong_locks(). It prolongs
+ * a single lock.
+ *
+ * \param[in] tsi target session environment for this request
+ * \param[in] lock LDLM lock to prolong
+ * \param[in] extent related extent
+ * \param[in] timeout timeout value to add
+ *
+ * \retval 0 if lock is not suitable for prolongation
+ * \retval 1 if lock was prolonged successfully
+ */
+static int ofd_prolong_one_lock(struct tgt_session_info *tsi,
+ struct ldlm_lock *lock,
+ struct ldlm_extent *extent)
+{
+ int timeout = prolong_timeout(tgt_ses_req(tsi), lock);
+
+ if (lock->l_flags & LDLM_FL_DESTROYED) /* lock already cancelled */
+ return 0;
+
+ /* XXX: never try to grab resource lock here because we're inside
+ * exp_bl_list_lock; in ldlm_lockd.c to handle waiting list we take
+ * res lock and then exp_bl_list_lock. */
+
+ if (!(lock->l_flags & LDLM_FL_AST_SENT))
+ /* ignore locks not being cancelled */
+ return 0;
+
+ LDLM_DEBUG(lock, "refreshed for req x"LPU64" ext("LPU64"->"LPU64") "
+ "to %ds.\n", tgt_ses_req(tsi)->rq_xid, extent->start,
+ extent->end, timeout);
+
+ /* OK. this is a possible lock the user holds doing I/O
+ * let's refresh eviction timer for it */
+ ldlm_refresh_waiting_lock(lock, timeout);
+ return 1;
+}
+
+/**
+ * Prolong lock timeout for the given extent.
+ *
+ * This function finds all locks related with incoming request and
+ * prolongs their timeout.
+ *
+ * If a client is holding a lock for a long time while it sends
+ * read or write RPCs to the OST for the object under this lock,
+ * then we don't want the OST to evict the client. Otherwise,
+ * if the network or disk is very busy then the client may not
+ * be able to make any progress to clear out dirty pages under
+ * the lock and the application will fail.
+ *
+ * Every time a Bulk Read/Write (BRW) request arrives for the object
+ * covered by the lock, extend the timeout on that lock. The RPC should
+ * contain a lock handle for the lock it is using, but this
+ * isn't handled correctly by all client versions, and the
+ * request may cover multiple locks.
+ *
+ * \param[in] tsi target session environment for this request
+ * \param[in] start start of extent
+ * \param[in] end end of extent
+ *
+ * \retval number of prolonged locks
+ */
+static int ofd_prolong_extent_locks(struct tgt_session_info *tsi,
+ __u64 start, __u64 end)
+{
+ struct obd_export *exp = tsi->tsi_exp;
+ struct obdo *oa = &tsi->tsi_ost_body->oa;
+ struct ldlm_extent extent = {
+ .start = start,
+ .end = end
+ };
+ struct ldlm_lock *lock;
+ int lock_count = 0;
+
+ ENTRY;
+
+ if (oa->o_valid & OBD_MD_FLHANDLE) {
+ /* mostly a request should be covered by only one lock, try
+ * fast path. */
+ lock = ldlm_handle2lock(&oa->o_handle);
+ if (lock != NULL) {
+ /* Fast path to check if the lock covers the whole IO
+ * region exclusively. */
+ if (lock->l_granted_mode == LCK_PW &&
+ ldlm_extent_contain(&lock->l_policy_data.l_extent,
+ &extent)) {
+ /* bingo */
+ LASSERT(lock->l_export == exp);
+ lock_count = ofd_prolong_one_lock(tsi, lock,
+ &extent);
+ LDLM_LOCK_PUT(lock);
+ RETURN(lock_count);
+ }
+ LDLM_LOCK_PUT(lock);
+ }
+ }
+
+ spin_lock_bh(&exp->exp_bl_list_lock);
+ list_for_each_entry(lock, &exp->exp_bl_list, l_exp_list) {
+ LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
+ LASSERT(lock->l_resource->lr_type == LDLM_EXTENT);
+
+ if (!ldlm_res_eq(&tsi->tsi_resid, &lock->l_resource->lr_name))
+ continue;
+
+ if (!ldlm_extent_overlap(&lock->l_policy_data.l_extent,
+ &extent))
+ continue;
+
+ lock_count += ofd_prolong_one_lock(tsi, lock, &extent);
+ }
+ spin_unlock_bh(&exp->exp_bl_list_lock);
+
+ RETURN(lock_count);
+}
+
+/**
+ * Implementation of ptlrpc_hpreq_ops::hpreq_lock_match for OFD RW requests.
+ *
+ * Determine if \a lock and the lock from request \a req are equivalent
+ * by comparing their resource names, modes, and extents.
+ *
+ * It is used to give priority to read and write RPCs being done
+ * under this lock so that the client can drop the contended
+ * lock more quickly and let other clients use it. This improves
+ * overall performance in the case where the first client gets a
+ * very large lock extent that prevents other clients from
+ * submitting their writes.
+ *
+ * \param[in] req ptlrpc_request being processed
+ * \param[in] lock contended lock to match
+ *
+ * \retval 1 if lock is matched
+ * \retval 0 otherwise
+ */
+static int ofd_rw_hpreq_lock_match(struct ptlrpc_request *req,
+ struct ldlm_lock *lock)
+{
+ struct niobuf_remote *rnb;
+ struct obd_ioobj *ioo;
+ ldlm_mode_t mode;
+ struct ldlm_extent ext;
+ __u32 opc = lustre_msg_get_opc(req->rq_reqmsg);
+
+ ENTRY;
+
+ ioo = req_capsule_client_get(&req->rq_pill, &RMF_OBD_IOOBJ);
+ LASSERT(ioo != NULL);
+
+ rnb = req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE);
+ LASSERT(rnb != NULL);
+
+ ext.start = rnb->rnb_offset;
+ rnb += ioo->ioo_bufcnt - 1;
+ ext.end = rnb->rnb_offset + rnb->rnb_len - 1;
+
+ LASSERT(lock->l_resource != NULL);
+ if (!ostid_res_name_eq(&ioo->ioo_oid, &lock->l_resource->lr_name))
+ RETURN(0);
+
+ /* a bulk write can only hold a reference on a PW extent lock */
+ mode = LCK_PW;
+ if (opc == OST_READ)
+ /* whereas a bulk read can be protected by either a PR or PW
+ * extent lock */
+ mode |= LCK_PR;
+
+ if (!(lock->l_granted_mode & mode))
+ RETURN(0);
+
+ RETURN(ldlm_extent_overlap(&lock->l_policy_data.l_extent, &ext));
+}
+
+/**
+ * Implementation of ptlrpc_hpreq_ops::hpreq_lock_check for OFD RW requests.
+ *
+ * Check for whether the given PTLRPC request (\a req) is blocking
+ * an LDLM lock cancel.
+ *
+ * \param[in] req the incoming request
+ *
+ * \retval 1 if \a req is blocking an LDLM lock cancel
+ * \retval 0 if it is not
+ */
+static int ofd_rw_hpreq_check(struct ptlrpc_request *req)
+{
+ struct tgt_session_info *tsi;
+ struct obd_ioobj *ioo;
+ struct niobuf_remote *rnb;
+ __u64 start, end;
+ int lock_count;
+
+ ENTRY;
+
+ /* Don't use tgt_ses_info() to get session info, because lock_match()
+ * can be called while request has no processing thread yet. */
+ tsi = lu_context_key_get(&req->rq_session, &tgt_session_key);
+
+ /*
+ * Use LASSERT below because malformed RPCs should have
+ * been filtered out in tgt_hpreq_handler().
+ */
+ ioo = req_capsule_client_get(&req->rq_pill, &RMF_OBD_IOOBJ);
+ LASSERT(ioo != NULL);
+
+ rnb = req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE);
+ LASSERT(rnb != NULL);
+ LASSERT(!(rnb->rnb_flags & OBD_BRW_SRVLOCK));
+
+ start = rnb->rnb_offset;
+ rnb += ioo->ioo_bufcnt - 1;
+ end = rnb->rnb_offset + rnb->rnb_len - 1;
+
+ DEBUG_REQ(D_RPCTRACE, req, "%s %s: refresh rw locks: "DFID
+ " ("LPU64"->"LPU64")\n",
+ tgt_name(tsi->tsi_tgt), current->comm,
+ PFID(&tsi->tsi_fid), start, end);
+
+ lock_count = ofd_prolong_extent_locks(tsi, start, end);
+
+ CDEBUG(D_DLMTRACE, "%s: refreshed %u locks timeout for req %p.\n",
+ tgt_name(tsi->tsi_tgt), lock_count, req);
+
+ RETURN(lock_count > 0);
+}
+
+/**
+ * Implementation of ptlrpc_hpreq_ops::hpreq_lock_fini for OFD RW requests.
+ *
+ * Called after the request has been handled. It refreshes lock timeout again
+ * so that client has more time to send lock cancel RPC.
+ *
+ * \param[in] req request which is being processed.
+ */
+static void ofd_rw_hpreq_fini(struct ptlrpc_request *req)
+{
+ ofd_rw_hpreq_check(req);
+}
+
+/**
+ * Implementation of ptlrpc_hpreq_ops::hpreq_lock_match for OST_PUNCH request.
+ *
+ * This function checks if the given lock is the same by its resname, mode
+ * and extent as one taken from the request.
+ * It is used to give priority to punch/truncate RPCs that might lead to
+ * the fastest release of that lock when a lock is contended.
+ *
+ * \param[in] req ptlrpc_request being processed
+ * \param[in] lock contended lock to match
+ *
+ * \retval 1 if lock is matched
+ * \retval 0 otherwise
+ */
+static int ofd_punch_hpreq_lock_match(struct ptlrpc_request *req,
+ struct ldlm_lock *lock)
+{
+ struct tgt_session_info *tsi;
+
+ /* Don't use tgt_ses_info() to get session info, because lock_match()
+ * can be called while request has no processing thread yet. */
+ tsi = lu_context_key_get(&req->rq_session, &tgt_session_key);
+
+ /*
+ * Use LASSERT below because malformed RPCs should have
+ * been filtered out in tgt_hpreq_handler().
+ */
+ LASSERT(tsi->tsi_ost_body != NULL);
+ if (tsi->tsi_ost_body->oa.o_valid & OBD_MD_FLHANDLE &&
+ tsi->tsi_ost_body->oa.o_handle.cookie == lock->l_handle.h_cookie)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * Implementation of ptlrpc_hpreq_ops::hpreq_lock_check for OST_PUNCH request.
+ *
+ * High-priority queue request check for whether the given punch request
+ * (\a req) is blocking an LDLM lock cancel.
+ *
+ * \param[in] req the incoming request
+ *
+ * \retval 1 if \a req is blocking an LDLM lock cancel
+ * \retval 0 if it is not
+ */
+static int ofd_punch_hpreq_check(struct ptlrpc_request *req)
+{
+ struct tgt_session_info *tsi;
+ struct obdo *oa;
+ int lock_count;
+
+ ENTRY;
+
+ /* Don't use tgt_ses_info() to get session info, because lock_match()
+ * can be called while request has no processing thread yet. */
+ tsi = lu_context_key_get(&req->rq_session, &tgt_session_key);
+ LASSERT(tsi != NULL);
+ oa = &tsi->tsi_ost_body->oa;
+
+ LASSERT(!(oa->o_valid & OBD_MD_FLFLAGS &&
+ oa->o_flags & OBD_FL_SRVLOCK));
+
+ CDEBUG(D_DLMTRACE,
+ "%s: refresh locks: "LPU64"/"LPU64" ("LPU64"->"LPU64")\n",
+ tgt_name(tsi->tsi_tgt), tsi->tsi_resid.name[0],
+ tsi->tsi_resid.name[1], oa->o_size, oa->o_blocks);
+
+ lock_count = ofd_prolong_extent_locks(tsi, oa->o_size, oa->o_blocks);
+
+ CDEBUG(D_DLMTRACE, "%s: refreshed %u locks timeout for req %p.\n",
+ tgt_name(tsi->tsi_tgt), lock_count, req);
+
+ RETURN(lock_count > 0);
+}
+
+/**
+ * Implementation of ptlrpc_hpreq_ops::hpreq_lock_fini for OST_PUNCH request.
+ *
+ * Called after the request has been handled. It refreshes lock timeout again
+ * so that client has more time to send lock cancel RPC.
+ *
+ * \param[in] req request which is being processed.
+ */
+static void ofd_punch_hpreq_fini(struct ptlrpc_request *req)
+{
+ ofd_punch_hpreq_check(req);
+}
+
+static struct ptlrpc_hpreq_ops ofd_hpreq_rw = {
+ .hpreq_lock_match = ofd_rw_hpreq_lock_match,
+ .hpreq_check = ofd_rw_hpreq_check,
+ .hpreq_fini = ofd_rw_hpreq_fini
+};
+
+static struct ptlrpc_hpreq_ops ofd_hpreq_punch = {
+ .hpreq_lock_match = ofd_punch_hpreq_lock_match,
+ .hpreq_check = ofd_punch_hpreq_check,
+ .hpreq_fini = ofd_punch_hpreq_fini
+};
+
+/**
+ * Assign high priority operations to an IO request.
+ *
+ * Check if the incoming request is a candidate for
+ * high-priority processing. If it is, assign it a high
+ * priority operations table.
+ *
+ * \param[in] tsi target session environment for this request
+ */
+static void ofd_hp_brw(struct tgt_session_info *tsi)
+{
+ struct niobuf_remote *rnb;
+ struct obd_ioobj *ioo;
+
+ ENTRY;
+
+ ioo = req_capsule_client_get(tsi->tsi_pill, &RMF_OBD_IOOBJ);
+ LASSERT(ioo != NULL); /* must exist after request preprocessing */
+ if (ioo->ioo_bufcnt > 0) {
+ rnb = req_capsule_client_get(tsi->tsi_pill, &RMF_NIOBUF_REMOTE);
+ LASSERT(rnb != NULL); /* must exist after request preprocessing */
+
+ /* no high priority if server lock is needed */
+ if (rnb->rnb_flags & OBD_BRW_SRVLOCK)
+ return;
+ }
+ tgt_ses_req(tsi)->rq_ops = &ofd_hpreq_rw;
+}
+
+/**
+ * Assign high priority operations to an punch request.
+ *
+ * Check if the incoming request is a candidate for
+ * high-priority processing. If it is, assign it a high
+ * priority operations table.
+ *
+ * \param[in] tsi target session environment for this request
+ */
+static void ofd_hp_punch(struct tgt_session_info *tsi)
+{
+ LASSERT(tsi->tsi_ost_body != NULL); /* must exists if we are here */
+ /* no high-priority if server lock is needed */
+ if (tsi->tsi_ost_body->oa.o_valid & OBD_MD_FLFLAGS &&
+ tsi->tsi_ost_body->oa.o_flags & OBD_FL_SRVLOCK)
+ return;
+ tgt_ses_req(tsi)->rq_ops = &ofd_hpreq_punch;
+}
+