+ LASSERT (npages > 0);
+
+ for (i = 0; i < npages; i++) {
+ nb->nb_kiov[i].kiov_page = alloc_page(GFP_KERNEL);
+ nb->nb_kiov[i].kiov_offset = 0;
+ nb->nb_kiov[i].kiov_len = PAGE_SIZE;
+
+ if (nb->nb_kiov[i].kiov_page == NULL) {
+ CERROR("Can't allocate page\n");
+ gmnal_free_netbuf_pages(nb, i);
+ return -ENOMEM;
+ }
+
+ CDEBUG(D_NET,"[%3d] page %p, phys "LPX64", @ "LPX64"\n",
+ i, nb->nb_kiov[i].kiov_page,
+ lnet_page2phys(nb->nb_kiov[i].kiov_page),
+ gmni->gmni_netaddr_base);
+
+ gmrc = gm_register_memory_ex_phys(
+ gmni->gmni_port,
+ lnet_page2phys(nb->nb_kiov[i].kiov_page),
+ PAGE_SIZE,
+ gmni->gmni_netaddr_base);
+ CDEBUG(D_NET,"[%3d] page %p: %d\n",
+ i, nb->nb_kiov[i].kiov_page, gmrc);
+
+ if (gmrc != GM_SUCCESS) {
+ CERROR("Can't map page: %d(%s)\n", gmrc,
+ gmnal_gmstatus2str(gmrc));
+ gmnal_free_netbuf_pages(nb, i+1);
+ return -ENOMEM;
+ }
+
+ if (i == 0)
+ nb->nb_netaddr = gmni->gmni_netaddr_base;
+
+ gmni->gmni_netaddr_base += PAGE_SIZE;
+ }
+
+ return 0;