+ if (fpo->fpo_is_fmr) {
+ struct ib_pool_fmr *pfmr;
+
+ spin_unlock(&fps->fps_lock);
+ pfmr = ib_fmr_pool_map_phys(fpo->fmr.fpo_fmr_pool,
+ pages, npages, iov);
+ if (likely(!IS_ERR(pfmr))) {
+ fmr->fmr_key = is_rx ? pfmr->fmr->rkey
+ : pfmr->fmr->lkey;
+ fmr->fmr_frd = NULL;
+ fmr->fmr_pfmr = pfmr;
+ fmr->fmr_pool = fpo;
+ return 0;
+ }
+ rc = PTR_ERR(pfmr);
+ } else {
+ if (!list_empty(&fpo->fast_reg.fpo_pool_list)) {
+ struct ib_send_wr *wr;
+ struct kib_fast_reg_descriptor *frd;
+ struct ib_fast_reg_page_list *frpl;
+ struct ib_mr *mr;
+
+ frd = list_first_entry(&fpo->fast_reg.fpo_pool_list,
+ struct kib_fast_reg_descriptor,
+ frd_list);
+ list_del(&frd->frd_list);
+ spin_unlock(&fps->fps_lock);
+
+ frpl = frd->frd_frpl;
+ mr = frd->frd_mr;
+
+ if (!frd->frd_valid) {
+ struct ib_send_wr *inv_wr;
+ __u32 key = is_rx ? mr->rkey : mr->lkey;
+
+ inv_wr = &frd->frd_inv_wr;
+ memset(inv_wr, 0, sizeof(*inv_wr));
+ inv_wr->opcode = IB_WR_LOCAL_INV;
+ inv_wr->wr_id = IBLND_WID_MR;
+ inv_wr->ex.invalidate_rkey = key;
+
+ /* Bump the key */
+ key = ib_inc_rkey(key);
+ ib_update_fast_reg_key(mr, key);
+ }
+
+ LASSERT(npages <= frpl->max_page_list_len);
+ memcpy(frpl->page_list, pages,
+ sizeof(*pages) * npages);
+
+ /* Prepare FastReg WR */
+ wr = &frd->frd_fastreg_wr;
+ memset(wr, 0, sizeof(*wr));
+ wr->opcode = IB_WR_FAST_REG_MR;
+ wr->wr_id = IBLND_WID_MR;
+ wr->wr.fast_reg.iova_start = iov;
+ wr->wr.fast_reg.page_list = frpl;
+ wr->wr.fast_reg.page_list_len = npages;
+ wr->wr.fast_reg.page_shift = PAGE_SHIFT;
+ wr->wr.fast_reg.length = nob;
+ wr->wr.fast_reg.rkey = is_rx ? mr->rkey
+ : mr->lkey;
+ wr->wr.fast_reg.access_flags =
+ (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE);
+
+ fmr->fmr_key = is_rx ? mr->rkey : mr->lkey;
+ fmr->fmr_frd = frd;
+ fmr->fmr_pfmr = NULL;
+ fmr->fmr_pool = fpo;
+ return 0;
+ }
+ spin_unlock(&fps->fps_lock);
+ rc = -EBUSY;
+ }