+void
+kiblnd_event_handler(struct ib_event_handler *handler, struct ib_event *event)
+{
+ rwlock_t *g_lock = &kiblnd_data.kib_global_lock;
+ struct kib_hca_dev *hdev;
+ unsigned long flags;
+
+ hdev = container_of(handler, struct kib_hca_dev, ibh_event_handler);
+
+ write_lock_irqsave(g_lock, flags);
+
+ switch (event->event) {
+ case IB_EVENT_DEVICE_FATAL:
+ CDEBUG(D_NET, "IB device fatal\n");
+ hdev->ibh_state = IBLND_DEV_FATAL;
+ kiblnd_set_ni_fatal_on(hdev, 1);
+ break;
+ case IB_EVENT_PORT_ACTIVE:
+ CDEBUG(D_NET, "IB port active\n");
+ if (event->element.port_num == hdev->ibh_port) {
+ hdev->ibh_state = IBLND_DEV_PORT_ACTIVE;
+ kiblnd_set_ni_fatal_on(hdev, 0);
+ }
+ break;
+ case IB_EVENT_PORT_ERR:
+ CDEBUG(D_NET, "IB port err\n");
+ if (event->element.port_num == hdev->ibh_port) {
+ hdev->ibh_state = IBLND_DEV_PORT_DOWN;
+ kiblnd_set_ni_fatal_on(hdev, 1);
+ }
+ break;
+ default:
+ break;
+ }
+ write_unlock_irqrestore(g_lock, flags);
+}
+
+static int
+kiblnd_hdev_get_attr(struct kib_hca_dev *hdev)
+{
+ struct ib_device_attr *dev_attr;
+ int rc = 0;
+ int rc2 = 0;
+
+ /* It's safe to assume a HCA can handle a page size
+ * matching that of the native system */
+ hdev->ibh_page_shift = PAGE_SHIFT;
+ hdev->ibh_page_size = 1 << PAGE_SHIFT;
+ hdev->ibh_page_mask = ~((__u64)hdev->ibh_page_size - 1);
+
+#ifndef HAVE_IB_DEVICE_ATTRS
+ LIBCFS_ALLOC(dev_attr, sizeof(*dev_attr));
+ if (dev_attr == NULL) {
+ CERROR("Out of memory\n");
+ return -ENOMEM;
+ }
+
+ rc = ib_query_device(hdev->ibh_ibdev, dev_attr);
+ if (rc != 0) {
+ CERROR("Failed to query IB device: %d\n", rc);
+ goto out_clean_attr;
+ }
+#else
+ dev_attr = &hdev->ibh_ibdev->attrs;
+#endif
+
+ hdev->ibh_mr_size = dev_attr->max_mr_size;
+ hdev->ibh_max_qp_wr = dev_attr->max_qp_wr;
+
+ /* Setup device Memory Registration capabilities */
+#ifdef HAVE_IB_DEVICE_OPS
+ if (hdev->ibh_ibdev->ops.alloc_fmr &&
+ hdev->ibh_ibdev->ops.dealloc_fmr &&
+ hdev->ibh_ibdev->ops.map_phys_fmr &&
+ hdev->ibh_ibdev->ops.unmap_fmr) {
+#else
+ if (hdev->ibh_ibdev->alloc_fmr &&
+ hdev->ibh_ibdev->dealloc_fmr &&
+ hdev->ibh_ibdev->map_phys_fmr &&
+ hdev->ibh_ibdev->unmap_fmr) {
+#endif
+ LCONSOLE_INFO("Using FMR for registration\n");
+ hdev->ibh_dev->ibd_dev_caps |= IBLND_DEV_CAPS_FMR_ENABLED;
+ } else if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
+ LCONSOLE_INFO("Using FastReg for registration\n");
+ hdev->ibh_dev->ibd_dev_caps |= IBLND_DEV_CAPS_FASTREG_ENABLED;
+#ifndef HAVE_IB_ALLOC_FAST_REG_MR
+#ifdef IB_DEVICE_SG_GAPS_REG
+ if (dev_attr->device_cap_flags & IB_DEVICE_SG_GAPS_REG)
+ hdev->ibh_dev->ibd_dev_caps |= IBLND_DEV_CAPS_FASTREG_GAPS_SUPPORT;
+#endif
+#endif
+ } else {
+ rc = -ENOSYS;
+ }
+
+ rc2 = kiblnd_port_get_attr(hdev);
+ if (rc2 != 0)
+ return rc2;
+
+ if (rc != 0)
+ rc = -EINVAL;
+
+#ifndef HAVE_IB_DEVICE_ATTRS
+out_clean_attr:
+ LIBCFS_FREE(dev_attr, sizeof(*dev_attr));
+#endif
+
+ if (rc == -ENOSYS)
+ CERROR("IB device does not support FMRs nor FastRegs, can't "
+ "register memory: %d\n", rc);
+ else if (rc == -EINVAL)
+ CERROR("Invalid mr size: %#llx\n", hdev->ibh_mr_size);
+ return rc;
+}
+
+#ifdef HAVE_IB_GET_DMA_MR