{
LASSERT(fpo->fpo_map_count == 0);
- if (!IS_ERR_OR_NULL(fpo->fmr.fpo_fmr_pool)) {
+ if (fpo->fpo_is_fmr && fpo->fmr.fpo_fmr_pool) {
ib_destroy_fmr_pool(fpo->fmr.fpo_fmr_pool);
- fpo->fmr.fpo_fmr_pool = NULL;
} else {
struct kib_fast_reg_descriptor *frd, *tmp;
int i = 0;
else
CERROR("FMRs are not supported\n");
}
+ fpo->fpo_is_fmr = true;
return rc;
}
struct kib_fast_reg_descriptor *frd, *tmp;
int i, rc;
+ fpo->fpo_is_fmr = false;
+
INIT_LIST_HEAD(&fpo->fast_reg.fpo_pool_list);
fpo->fast_reg.fpo_pool_size = 0;
for (i = 0; i < fps->fps_pool_size; i++) {
return;
fps = fpo->fpo_owner;
- if (!IS_ERR_OR_NULL(fpo->fmr.fpo_fmr_pool)) {
+ if (fpo->fpo_is_fmr) {
if (fmr->fmr_pfmr) {
rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
LASSERT(!rc);
fpo->fpo_deadline = ktime_get_seconds() + IBLND_POOL_DEADLINE;
fpo->fpo_map_count++;
- if (!IS_ERR_OR_NULL(fpo->fmr.fpo_fmr_pool)) {
+ if (fpo->fpo_is_fmr) {
struct ib_pool_fmr *pfmr;
spin_unlock(&fps->fps_lock);
time64_t fpo_deadline; /* deadline of this pool */
int fpo_failed; /* fmr pool is failed */
int fpo_map_count; /* # of mapped FMR */
+ bool fpo_is_fmr; /* True if FMR pools allocated */
};
struct kib_fmr {