+/* lock object for open */
+static int mdt_object_open_lock(struct mdt_thread_info *info,
+ struct mdt_object *obj,
+ struct mdt_lock_handle *lhc,
+ __u64 *ibits)
+{
+ struct md_attr *ma = &info->mti_attr;
+ __u64 open_flags = info->mti_spec.sp_cr_flags;
+ enum ldlm_mode lm = LCK_CR;
+ bool acq_lease = !!(open_flags & MDS_OPEN_LEASE);
+ bool try_layout = false;
+ bool create_layout = false;
+ int rc = 0;
+ ENTRY;
+
+ *ibits = 0;
+ mdt_lock_handle_init(lhc);
+
+ if (req_is_replay(mdt_info_req(info)))
+ RETURN(0);
+
+ if (S_ISREG(lu_object_attr(&obj->mot_obj))) {
+ if (ma->ma_need & MA_LOV && !(ma->ma_valid & MA_LOV) &&
+ md_should_create(open_flags))
+ create_layout = true;
+ if (exp_connect_layout(info->mti_exp) && !create_layout &&
+ ma->ma_need & MA_LOV)
+ try_layout = true;
+ }
+
+ if (acq_lease) {
+ /* lease open, acquire write mode of open sem */
+ down_write(&obj->mot_open_sem);
+
+ /* Lease exists and ask for new lease */
+ if (atomic_read(&obj->mot_lease_count) > 0) {
+ /* only exclusive open is supported, so lease
+ * are conflicted to each other */
+ GOTO(out, rc = -EBUSY);
+ }
+
+ /* Lease must be with open lock */
+ if (!(open_flags & MDS_OPEN_LOCK)) {
+ CERROR("%s: Request lease for file:"DFID ", but open lock "
+ "is missed, open_flags = %#llo : rc = %d\n",
+ mdt_obd_name(info->mti_mdt),
+ PFID(mdt_object_fid(obj)), open_flags, -EPROTO);
+ GOTO(out, rc = -EPROTO);
+ }
+
+ /* XXX: only exclusive open is supported. */
+ lm = LCK_EX;
+ *ibits = MDS_INODELOCK_OPEN;
+
+ /* never grant LCK_EX layout lock to client */
+ try_layout = false;
+ } else { /* normal open */
+ /* normal open holds read mode of open sem */
+ down_read(&obj->mot_open_sem);
+
+ if (open_flags & MDS_OPEN_LOCK) {
+ if (open_flags & FMODE_WRITE)
+ lm = LCK_CW;
+ else if (open_flags & MDS_FMODE_EXEC)
+ lm = LCK_PR;
+ else
+ lm = LCK_CR;
+
+ *ibits = MDS_INODELOCK_LOOKUP | MDS_INODELOCK_OPEN;
+ } else if (atomic_read(&obj->mot_lease_count) > 0) {
+ if (open_flags & FMODE_WRITE)
+ lm = LCK_CW;
+ else
+ lm = LCK_CR;
+
+ /* revoke lease */
+ *ibits = MDS_INODELOCK_OPEN;
+ try_layout = false;
+
+ lhc = &info->mti_lh[MDT_LH_LOCAL];
+ }
+ CDEBUG(D_INODE, "normal open:"DFID" lease count: %d, lm: %d\n",
+ PFID(mdt_object_fid(obj)),
+ atomic_read(&obj->mot_open_count), lm);
+ }
+
+ mdt_lock_reg_init(lhc, lm);
+
+ /* one problem to return layout lock on open is that it may result
+ * in too many layout locks cached on the client side. */
+ if (!OBD_FAIL_CHECK(OBD_FAIL_MDS_NO_LL_OPEN) && try_layout) {
+ /* return lookup lock to validate inode at the client side,
+ * this is pretty important otherwise mdt will return layout
+ * lock for each open.
+ * However this is a double-edged sword because changing
+ * permission will revoke huge # of LOOKUP locks. */
+ *ibits |= MDS_INODELOCK_LAYOUT | MDS_INODELOCK_LOOKUP;
+ if (!mdt_object_lock_try(info, obj, lhc, *ibits)) {
+ *ibits &= ~(MDS_INODELOCK_LAYOUT|MDS_INODELOCK_LOOKUP);
+ if (*ibits != 0)
+ rc = mdt_object_lock(info, obj, lhc, *ibits);
+ }
+ } else if (*ibits != 0) {
+ rc = mdt_object_lock(info, obj, lhc, *ibits);
+ }
+
+ CDEBUG(D_INODE, "%s: Requested bits lock:"DFID ", ibits = %#llx"
+ ", open_flags = %#llo, try_layout = %d : rc = %d\n",
+ mdt_obd_name(info->mti_mdt), PFID(mdt_object_fid(obj)),
+ *ibits, open_flags, try_layout, rc);
+
+ /* will change layout, revoke layout locks by enqueuing EX lock. */
+ if (rc == 0 && create_layout) {
+ struct mdt_lock_handle *ll = &info->mti_lh[MDT_LH_LAYOUT];
+
+ CDEBUG(D_INODE, "Will create layout, get EX layout lock:"DFID
+ ", open_flags = %#llo\n",
+ PFID(mdt_object_fid(obj)), open_flags);
+
+ /* We cannot enqueue another lock for the same resource we
+ * already have a lock for, due to mechanics of waiting list
+ * iterating in ldlm, see LU-3601.
+ * As such we'll drop the open lock we just got above here,
+ * it's ok not to have this open lock as it's main purpose is to
+ * flush unused cached client open handles. */
+ if (lustre_handle_is_used(&lhc->mlh_reg_lh))
+ mdt_object_unlock(info, obj, lhc, 1);
+
+ LASSERT(!try_layout);
+ mdt_lock_handle_init(ll);
+ mdt_lock_reg_init(ll, LCK_EX);
+ rc = mdt_object_lock(info, obj, ll, MDS_INODELOCK_LAYOUT);
+
+ OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_LL_BLOCK, 2);
+ }
+
+ /* Check if there is any other open handles after acquiring
+ * open lock. At this point, caching open handles have been revoked
+ * by open lock.
+ * XXX: Now only exclusive open is supported. Need to check the
+ * type of open for generic lease support. */
+ if (rc == 0 && acq_lease) {
+ struct ptlrpc_request *req = mdt_info_req(info);
+ struct mdt_export_data *med = &req->rq_export->exp_mdt_data;
+ struct mdt_file_data *mfd;
+ bool is_replay_or_resent;
+ int open_count = 0;
+
+ /* For lease: application can open a file and then apply lease,
+ * @handle contains original open handle in that case.
+ * In recovery, open REQ will be replayed and the lease REQ may
+ * be resent that means the open handle is already stale, so we
+ * need to fix it up here by finding new handle. */
+ is_replay_or_resent = req_is_replay(req) ||
+ lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT;
+
+ /* if the request is _not_ a replay request, rr_handle
+ * may be used to hold an openhandle which is issuing the
+ * lease request, so that this openhandle doesn't count. */
+ mfd = mdt_handle2mfd(med, info->mti_rr.rr_handle,
+ is_replay_or_resent);
+ if (mfd != NULL)
+ ++open_count;
+
+ CDEBUG(D_INODE, "acq_lease "DFID": openers: %d, want: %d\n",
+ PFID(mdt_object_fid(obj)),
+ atomic_read(&obj->mot_open_count), open_count);
+
+ if (atomic_read(&obj->mot_open_count) > open_count)
+ GOTO(out, rc = -EBUSY);
+ }
+ GOTO(out, rc);
+
+out:
+ RETURN(rc);
+}
+
+static void mdt_object_open_unlock(struct mdt_thread_info *info,
+ struct mdt_object *obj,
+ struct mdt_lock_handle *lhc,
+ __u64 ibits, int rc)
+{
+ __u64 open_flags = info->mti_spec.sp_cr_flags;
+ struct mdt_lock_handle *ll = &info->mti_lh[MDT_LH_LOCAL];
+ ENTRY;
+
+ if (req_is_replay(mdt_info_req(info)))
+ RETURN_EXIT;
+
+ /* Release local lock - the lock put in MDT_LH_LOCAL will never
+ * return to client side. */
+ if (lustre_handle_is_used(&ll->mlh_reg_lh))
+ mdt_object_unlock(info, obj, ll, 1);
+
+ ll = &info->mti_lh[MDT_LH_LAYOUT];
+ /* Release local layout lock, layout was created */
+ if (lustre_handle_is_used(&ll->mlh_reg_lh)) {
+ LASSERT(!(ibits & MDS_INODELOCK_LAYOUT));
+ mdt_object_unlock(info, obj, ll, 1);
+ }
+
+ if (open_flags & MDS_OPEN_LEASE)
+ up_write(&obj->mot_open_sem);
+ else
+ up_read(&obj->mot_open_sem);
+
+ /* Cross-ref case, the lock should be returned to the client */
+ if (ibits == 0 || rc == -MDT_EREMOTE_OPEN)
+ RETURN_EXIT;
+
+ if (!(open_flags & MDS_OPEN_LOCK) && !(ibits & MDS_INODELOCK_LAYOUT)) {
+ /* for the open request, the lock will only return to client
+ * if open or layout lock is granted. */
+ rc = 1;
+ }
+
+ if (rc != 0 || !lustre_handle_is_used(&lhc->mlh_reg_lh)) {
+ struct ldlm_reply *ldlm_rep;
+
+ ldlm_rep = req_capsule_server_get(info->mti_pill, &RMF_DLM_REP);
+ mdt_clear_disposition(info, ldlm_rep, DISP_OPEN_LOCK);
+ if (lustre_handle_is_used(&lhc->mlh_reg_lh))
+ mdt_object_unlock(info, obj, lhc, 1);
+ }
+ RETURN_EXIT;
+}
+
+/**
+ * Check release is permitted for the current HSM flags.
+ */
+static bool mdt_hsm_release_allow(const struct md_attr *ma)
+{
+ if (!(ma->ma_valid & MA_HSM))
+ return false;
+
+ if (ma->ma_hsm.mh_flags & (HS_DIRTY|HS_NORELEASE|HS_LOST))
+ return false;
+
+ if (!(ma->ma_hsm.mh_flags & HS_ARCHIVED))
+ return false;
+
+ return true;
+}
+
+static int mdt_open_by_fid_lock(struct mdt_thread_info *info,
+ struct ldlm_reply *rep,
+ struct mdt_lock_handle *lhc)