#define __LIBCFS_LINUX_MISC_H__
#include <linux/fs.h>
+/* Since Commit 2f8b544477e6 ("block,fs: untangle fs.h and blk_types.h")
+ * fs.h doesn't include blk_types.h, but we need it.
+ */
+#include <linux/blk_types.h>
#include <linux/mutex.h>
#include <linux/user_namespace.h>
#include <linux/uio.h>
int src_niov, struct kvec *src,
unsigned int offset, unsigned int len);
-unsigned int lnet_kiov_nob (unsigned int niov, lnet_kiov_t *iov);
-int lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
- int src_niov, lnet_kiov_t *src,
- unsigned int offset, unsigned int len);
+unsigned int lnet_kiov_nob(unsigned int niov, struct bio_vec *iov);
+int lnet_extract_kiov(int dst_niov, struct bio_vec *dst,
+ int src_niov, struct bio_vec *src,
+ unsigned int offset, unsigned int len);
void lnet_copy_iov2iov(unsigned int ndiov, struct kvec *diov,
unsigned int doffset,
unsigned int soffset, unsigned int nob);
void lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov,
unsigned int iovoffset,
- unsigned int nkiov, lnet_kiov_t *kiov,
+ unsigned int nkiov, struct bio_vec *kiov,
unsigned int kiovoffset, unsigned int nob);
-void lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov,
+void lnet_copy_iov2kiov(unsigned int nkiov, struct bio_vec *kiov,
unsigned int kiovoffset,
unsigned int niov, struct kvec *iov,
unsigned int iovoffset, unsigned int nob);
-void lnet_copy_kiov2kiov(unsigned int ndkiov, lnet_kiov_t *dkiov,
+void lnet_copy_kiov2kiov(unsigned int ndkiov, struct bio_vec *dkiov,
unsigned int doffset,
- unsigned int nskiov, lnet_kiov_t *skiov,
+ unsigned int nskiov, struct bio_vec *skiov,
unsigned int soffset, unsigned int nob);
static inline void
static inline void
lnet_copy_kiov2flat(int dlen, void *dest, unsigned int doffset,
- unsigned int nsiov, lnet_kiov_t *skiov,
+ unsigned int nsiov, struct bio_vec *skiov,
unsigned int soffset, unsigned int nob)
{
struct kvec diov = { .iov_base = dest, .iov_len = dlen };
}
static inline void
-lnet_copy_flat2kiov(unsigned int ndiov, lnet_kiov_t *dkiov,
+lnet_copy_flat2kiov(unsigned int ndiov, struct bio_vec *dkiov,
unsigned int doffset, int slen, void *src,
unsigned int soffset, unsigned int nob)
{
unsigned int msg_offset;
unsigned int msg_niov;
struct kvec *msg_iov;
- lnet_kiov_t *msg_kiov;
+ struct bio_vec *msg_kiov;
struct lnet_event msg_ev;
struct lnet_hdr msg_hdr;
struct lnet_handle_md md_bulk_handle;
union {
struct kvec iov[LNET_MAX_IOV];
- lnet_kiov_t kiov[LNET_MAX_IOV];
+ struct bio_vec kiov[LNET_MAX_IOV];
} md_iov;
};
* credit if the LND does flow control. */
int (*lnd_recv)(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
int delayed, unsigned int niov,
- struct kvec *iov, lnet_kiov_t *kiov,
+ struct kvec *iov, struct bio_vec *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen);
/* lnet_parse() has had to delay processing of this message
struct lnet_rtrbuf {
struct list_head rb_list; /* chain on rbp_bufs */
struct lnet_rtrbufpool *rb_pool; /* owning pool */
- lnet_kiov_t rb_kiov[0]; /* the buffer space */
+ struct bio_vec rb_kiov[0]; /* the buffer space */
};
#define LNET_PEER_HASHSIZE 503 /* prime! */
* Specify the memory region associated with the memory descriptor.
* If the options field has:
* - LNET_MD_KIOV bit set: The start field points to the starting
- * address of an array of lnet_kiov_t and the length field specifies
+ * address of an array of struct bio_vec and the length field specifies
* the number of entries in the array. The length can't be bigger
- * than LNET_MAX_IOV. The lnet_kiov_t is used to describe page-based
+ * than LNET_MAX_IOV. The struct bio_vec is used to describe page-based
* fragments that are not necessarily mapped in virtal memory.
* - LNET_MD_IOVEC bit set: The start field points to the starting
* address of an array of struct kvec and the length field specifies
* acknowledgment. Acknowledgments are never sent for GET operations.
* The data sent in the REPLY serves as an implicit acknowledgment.
* - LNET_MD_KIOV: The start and length fields specify an array of
- * lnet_kiov_t.
+ * struct bio_vec.
* - LNET_MD_IOVEC: The start and length fields specify an array of
* struct iovec.
* - LNET_MD_MAX_SIZE: The max_size field is valid.
/** Infinite threshold on MD operations. See struct lnet_md::threshold */
#define LNET_MD_THRESH_INF (-1)
-/**
- * A page-based fragment of a MD.
- */
-typedef struct {
- /** Pointer to the page where the fragment resides */
- struct page *kiov_page;
- /** Length in bytes of the fragment */
- unsigned int kiov_len;
- /**
- * Starting offset of the fragment within the page. Note that the
- * end of the fragment must not pass the end of the page; i.e.,
- * kiov_len + kiov_offset <= PAGE_SIZE.
- */
- unsigned int kiov_offset;
-} lnet_kiov_t;
/** @} lnet_md */
/** \addtogroup lnet_eq
struct lnet_msg *lntmsg, void **new_private);
int kgnilnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
int delayed, unsigned int niov,
- struct kvec *iov, lnet_kiov_t *kiov,
+ struct kvec *iov, struct bio_vec *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen);
-__u16 kgnilnd_cksum_kiov(unsigned int nkiov, lnet_kiov_t *kiov, unsigned int offset, unsigned int nob, int dump_blob);
+__u16 kgnilnd_cksum_kiov(unsigned int nkiov, struct bio_vec *kiov,
+ unsigned int offset, unsigned int nob, int dump_blob);
/* purgatory functions */
void kgnilnd_add_purgatory_locked(kgn_conn_t *conn, kgn_peer_t *peer);
}
__u16
-kgnilnd_cksum_kiov(unsigned int nkiov, lnet_kiov_t *kiov,
- unsigned int offset, unsigned int nob, int dump_blob)
+kgnilnd_cksum_kiov(unsigned int nkiov, struct bio_vec *kiov,
+ unsigned int offset, unsigned int nob, int dump_blob)
{
__wsum cksum = 0;
__wsum tmpck;
/* if loops changes, please change kgnilnd_setup_phys_buffer */
- while (offset >= kiov->kiov_len) {
- offset -= kiov->kiov_len;
+ while (offset >= kiov->bv_len) {
+ offset -= kiov->bv_len;
nkiov--;
kiov++;
LASSERT(nkiov > 0);
}
- /* ignore nob here, if nob < (kiov_len - offset), kiov == 1 */
- odd = (unsigned long) (kiov[0].kiov_len - offset) & 1;
+ /* ignore nob here, if nob < (bv_len - offset), kiov == 1 */
+ odd = (unsigned long) (kiov[0].bv_len - offset) & 1;
if ((odd || *kgnilnd_tunables.kgn_vmap_cksum) && nkiov > 1) {
struct page **pages = kgnilnd_data.kgn_cksum_map_pages[get_cpu()];
get_cpu(), kgnilnd_data.kgn_cksum_map_pages);
CDEBUG(D_BUFFS, "odd %d len %u offset %u nob %u\n",
- odd, kiov[0].kiov_len, offset, nob);
+ odd, kiov[0].bv_len, offset, nob);
for (i = 0; i < nkiov; i++) {
- pages[i] = kiov[i].kiov_page;
+ pages[i] = kiov[i].bv_page;
}
addr = vmap(pages, nkiov, VM_MAP, PAGE_KERNEL);
}
atomic_inc(&kgnilnd_data.kgn_nvmap_cksum);
- tmpck = _kgnilnd_cksum(0, (void *) addr + kiov[0].kiov_offset + offset, nob);
+ tmpck = _kgnilnd_cksum(0, ((void *) addr + kiov[0].bv_offset +
+ offset), nob);
cksum = tmpck;
if (dump_blob) {
kgnilnd_dump_blob(D_BUFFS, "flat kiov RDMA payload",
- (void *)addr + kiov[0].kiov_offset + offset, nob);
+ (void *)addr + kiov[0].bv_offset +
+ offset, nob);
}
CDEBUG(D_BUFFS, "cksum 0x%x (+0x%x) for addr 0x%p+%u len %u offset %u\n",
- cksum, tmpck, addr, kiov[0].kiov_offset, nob, offset);
+ cksum, tmpck, addr, kiov[0].bv_offset, nob, offset);
vunmap(addr);
} else {
do {
- fraglen = min(kiov->kiov_len - offset, nob);
+ fraglen = min(kiov->bv_len - offset, nob);
/* make dang sure we don't send a bogus checksum if somehow we get
* an odd length fragment on anything but the last entry in a kiov -
* we know from kgnilnd_setup_rdma_buffer that we can't have non
* PAGE_SIZE pages in the middle, so if nob < PAGE_SIZE, it is the last one */
LASSERTF(!(fraglen&1) || (nob < PAGE_SIZE),
- "odd fraglen %u on nkiov %d, nob %u kiov_len %u offset %u kiov 0x%p\n",
- fraglen, nkiov, nob, kiov->kiov_len, offset, kiov);
+ "odd fraglen %u on nkiov %d, nob %u bv_len %u offset %u kiov 0x%p\n",
+ fraglen, nkiov, nob, kiov->bv_len,
+ offset, kiov);
- addr = (void *)kmap(kiov->kiov_page) + kiov->kiov_offset + offset;
+ addr = (void *)kmap(kiov->bv_page) + kiov->bv_offset +
+ offset;
tmpck = _kgnilnd_cksum(cksum, addr, fraglen);
CDEBUG(D_BUFFS,
"cksum 0x%x (+0x%x) for page 0x%p+%u (0x%p) len %u offset %u\n",
- cksum, tmpck, kiov->kiov_page, kiov->kiov_offset, addr,
- fraglen, offset);
+ cksum, tmpck, kiov->bv_page, kiov->bv_offset,
+ addr, fraglen, offset);
cksum = tmpck;
if (dump_blob)
kgnilnd_dump_blob(D_BUFFS, "kiov cksum", addr, fraglen);
- kunmap(kiov->kiov_page);
+ kunmap(kiov->bv_page);
kiov++;
nkiov--;
int
kgnilnd_setup_immediate_buffer(kgn_tx_t *tx, unsigned int niov,
- struct kvec *iov, lnet_kiov_t *kiov,
+ struct kvec *iov, struct bio_vec *kiov,
unsigned int offset, unsigned int nob)
{
kgn_msg_t *msg = &tx->tx_msg;
} else if (kiov != NULL) {
if ((niov > 0) && unlikely(niov > (nob/PAGE_SIZE))) {
- niov = ((nob + offset + kiov->kiov_offset + PAGE_SIZE - 1) /
- PAGE_SIZE);
+ niov = round_up(nob + offset + kiov->bv_offset,
+ PAGE_SIZE);
}
LASSERTF(niov > 0 && niov < GNILND_MAX_IMMEDIATE/PAGE_SIZE,
"bad niov %d msg %p kiov %p iov %p offset %d nob%d\n",
niov, msg, kiov, iov, offset, nob);
- while (offset >= kiov->kiov_len) {
- offset -= kiov->kiov_len;
+ while (offset >= kiov->bv_len) {
+ offset -= kiov->bv_len;
niov--;
kiov++;
LASSERT(niov > 0);
}
for (i = 0; i < niov; i++) {
- /* We can't have a kiov_offset on anything but the first entry,
- * otherwise we'll have a hole at the end of the mapping as we only map
- * whole pages.
- * Also, if we have a kiov_len < PAGE_SIZE but we need to map more
- * than kiov_len, we will also have a whole at the end of that page
- * which isn't allowed */
- if ((kiov[i].kiov_offset != 0 && i > 0) ||
- (kiov[i].kiov_offset + kiov[i].kiov_len != PAGE_SIZE && i < niov - 1)) {
- CNETERR("Can't make payload contiguous in I/O VM: page %d, offset %u, nob %u, kiov_offset %u, kiov_len %u\n",
- i, offset, nob, kiov->kiov_offset, kiov->kiov_len);
+ /* We can't have a bv_offset on anything but the first
+ * entry, otherwise we'll have a hole at the end of the
+ * mapping as we only map whole pages.
+ * Also, if we have a bv_len < PAGE_SIZE but we need to
+ * map more than bv_len, we will also have a whole at
+ * the end of that page which isn't allowed
+ */
+ if ((kiov[i].bv_offset != 0 && i > 0) ||
+ (kiov[i].bv_offset + kiov[i].bv_len != PAGE_SIZE &&
+ i < niov - 1)) {
+ CNETERR("Can't make payload contiguous in I/O VM:page %d, offset %u, nob %u, bv_offset %u bv_len %u\n",
+ i, offset, nob, kiov->bv_offset,
+ kiov->bv_len);
RETURN(-EINVAL);
}
- tx->tx_imm_pages[i] = kiov[i].kiov_page;
+ tx->tx_imm_pages[i] = kiov[i].bv_page;
}
/* hijack tx_phys for the later unmap */
if (niov == 1) {
/* tx->phyx being equal to NULL is the signal for unmap to discern between kmap and vmap */
tx->tx_phys = NULL;
- tx->tx_buffer = (void *)kmap(tx->tx_imm_pages[0]) + kiov[0].kiov_offset + offset;
+ tx->tx_buffer = (void *)kmap(tx->tx_imm_pages[0]) +
+ kiov[0].bv_offset + offset;
atomic_inc(&kgnilnd_data.kgn_nkmap_short);
GNIDBG_TX(D_NET, tx, "kmapped page for %d bytes for kiov 0x%p, buffer 0x%p",
nob, kiov, tx->tx_buffer);
}
atomic_inc(&kgnilnd_data.kgn_nvmap_short);
- /* make sure we take into account the kiov offset as the start of the buffer */
- tx->tx_buffer = (void *)tx->tx_phys + kiov[0].kiov_offset + offset;
- GNIDBG_TX(D_NET, tx, "mapped %d pages for %d bytes from kiov 0x%p to 0x%p, buffer 0x%p",
- niov, nob, kiov, tx->tx_phys, tx->tx_buffer);
+ /* make sure we take into account the kiov offset as the
+ * start of the buffer
+ */
+ tx->tx_buffer = (void *)tx->tx_phys + kiov[0].bv_offset
+ + offset;
+ GNIDBG_TX(D_NET, tx,
+ "mapped %d pages for %d bytes from kiov 0x%p to 0x%p, buffer 0x%p",
+ niov, nob, kiov, tx->tx_phys, tx->tx_buffer);
}
tx->tx_buftype = GNILND_BUF_IMMEDIATE_KIOV;
tx->tx_nob = nob;
}
int
-kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, lnet_kiov_t *kiov,
+kgnilnd_setup_phys_buffer(kgn_tx_t *tx, int nkiov, struct bio_vec *kiov,
unsigned int offset, unsigned int nob)
{
gni_mem_segment_t *phys;
/* if loops changes, please change kgnilnd_cksum_kiov
* and kgnilnd_setup_immediate_buffer */
- while (offset >= kiov->kiov_len) {
- offset -= kiov->kiov_len;
+ while (offset >= kiov->bv_len) {
+ offset -= kiov->bv_len;
nkiov--;
kiov++;
LASSERT(nkiov > 0);
tx->tx_buftype = GNILND_BUF_PHYS_UNMAPPED;
tx->tx_nob = nob;
- /* kiov_offset is start of 'valid' buffer, so index offset past that */
- tx->tx_buffer = (void *)((unsigned long)(kiov->kiov_offset + offset));
+ /* bv_offset is start of 'valid' buffer, so index offset past that */
+ tx->tx_buffer = (void *)((unsigned long)(kiov->bv_offset + offset));
phys = tx->tx_phys;
CDEBUG(D_NET, "tx 0x%p buffer 0x%p map start kiov 0x%p+%u niov %d offset %u\n",
- tx, tx->tx_buffer, kiov, kiov->kiov_offset, nkiov, offset);
+ tx, tx->tx_buffer, kiov, kiov->bv_offset, nkiov, offset);
do {
- fraglen = min(kiov->kiov_len - offset, nob);
-
- /* We can't have a kiov_offset on anything but the first entry,
- * otherwise we'll have a hole at the end of the mapping as we only map
- * whole pages. Only the first page is allowed to have an offset -
- * we'll add that into tx->tx_buffer and that will get used when we
- * map in the segments (see kgnilnd_map_buffer).
- * Also, if we have a kiov_len < PAGE_SIZE but we need to map more
- * than kiov_len, we will also have a whole at the end of that page
- * which isn't allowed */
+ fraglen = min(kiov->bv_len - offset, nob);
+
+ /* We can't have a bv_offset on anything but the first entry,
+ * otherwise we'll have a hole at the end of the mapping as we
+ * only map whole pages. Only the first page is allowed to
+ * have an offset - we'll add that into tx->tx_buffer and that
+ * will get used when we map in the segments (see
+ * kgnilnd_map_buffer). Also, if we have a bv_len < PAGE_SIZE
+ * but we need to map more than bv_len, we will also have a
+ * whole at the end of that page which isn't allowed
+ */
if ((phys != tx->tx_phys) &&
- ((kiov->kiov_offset != 0) ||
- ((kiov->kiov_len < PAGE_SIZE) && (nob > kiov->kiov_len)))) {
- CERROR("Can't make payload contiguous in I/O VM: page %d, offset %u, nob %u, kiov_offset %u, kiov_len %u\n",
+ ((kiov->bv_offset != 0) ||
+ ((kiov->bv_len < PAGE_SIZE) && (nob > kiov->bv_len)))) {
+ CERROR("Can't make payload contiguous in I/O VM:page %d, offset %u, nob %u, bv_offset %u bv_len %u\n",
(int)(phys - tx->tx_phys),
- offset, nob, kiov->kiov_offset, kiov->kiov_len);
+ offset, nob, kiov->bv_offset, kiov->bv_len);
rc = -EINVAL;
GOTO(error, rc);
}
GOTO(error, rc);
}
- CDEBUG(D_BUFFS, "page 0x%p kiov_offset %u kiov_len %u nob %u "
- "nkiov %u offset %u\n",
- kiov->kiov_page, kiov->kiov_offset, kiov->kiov_len, nob, nkiov, offset);
+ CDEBUG(D_BUFFS,
+ "page 0x%p bv_offset %u bv_len %u nob %u nkiov %u offset %u\n",
+ kiov->bv_page, kiov->bv_offset, kiov->bv_len, nob, nkiov,
+ offset);
- phys->address = page_to_phys(kiov->kiov_page);
+ phys->address = page_to_phys(kiov->bv_page);
phys++;
kiov++;
nkiov--;
static inline int
kgnilnd_setup_rdma_buffer(kgn_tx_t *tx, unsigned int niov,
- struct kvec *iov, lnet_kiov_t *kiov,
+ struct kvec *iov, struct bio_vec *kiov,
unsigned int offset, unsigned int nob)
{
int rc;
static void
kgnilnd_parse_lnet_rdma(struct lnet_msg *lntmsg, unsigned int *niov,
unsigned int *offset, unsigned int *nob,
- lnet_kiov_t **kiov, int put_len)
+ struct bio_vec **kiov, int put_len)
{
/* GETs are weird, see kgnilnd_send */
if (lntmsg->msg_type == LNET_MSG_GET) {
static inline void
kgnilnd_compute_rdma_cksum(kgn_tx_t *tx, int put_len)
{
- unsigned int niov, offset, nob;
- lnet_kiov_t *kiov;
- struct lnet_msg *lntmsg = tx->tx_lntmsg[0];
- int dump_cksum = (*kgnilnd_tunables.kgn_checksum_dump > 1);
+ unsigned int niov, offset, nob;
+ struct bio_vec *kiov;
+ struct lnet_msg *lntmsg = tx->tx_lntmsg[0];
+ int dump_cksum = (*kgnilnd_tunables.kgn_checksum_dump > 1);
GNITX_ASSERTF(tx, ((tx->tx_msg.gnm_type == GNILND_MSG_PUT_DONE) ||
(tx->tx_msg.gnm_type == GNILND_MSG_GET_DONE) ||
int rc = 0;
__u16 cksum;
unsigned int niov, offset, nob;
- lnet_kiov_t *kiov;
- struct lnet_msg *lntmsg = tx->tx_lntmsg[0];
+ struct bio_vec *kiov;
+ struct lnet_msg *lntmsg = tx->tx_lntmsg[0];
int dump_on_err = *kgnilnd_tunables.kgn_checksum_dump;
/* we can only match certain requests */
int routing = lntmsg->msg_routing;
unsigned int niov = lntmsg->msg_niov;
struct kvec *iov = lntmsg->msg_iov;
- lnet_kiov_t *kiov = lntmsg->msg_kiov;
+ struct bio_vec *kiov = lntmsg->msg_kiov;
unsigned int offset = lntmsg->msg_offset;
unsigned int nob = lntmsg->msg_len;
unsigned int msg_vmflush = lntmsg->msg_vmflush;
kgn_msg_t *rxmsg = rx->grx_msg;
unsigned int niov = lntmsg->msg_niov;
struct kvec *iov = lntmsg->msg_iov;
- lnet_kiov_t *kiov = lntmsg->msg_kiov;
+ struct bio_vec *kiov = lntmsg->msg_kiov;
unsigned int offset = lntmsg->msg_offset;
unsigned int nob = lntmsg->msg_len;
int done_type;
int
kgnilnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
int delayed, unsigned int niov,
- struct kvec *iov, lnet_kiov_t *kiov,
+ struct kvec *iov, struct bio_vec *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen)
{
kgn_rx_t *rx = private;
int
kgnilnd_recv_bte_get(kgn_tx_t *tx) {
unsigned niov, offset, nob;
- lnet_kiov_t *kiov;
+ struct bio_vec *kiov;
struct lnet_msg *lntmsg = tx->tx_lntmsg[0];
kgnilnd_parse_lnet_rdma(lntmsg, &niov, &offset, &nob, &kiov, tx->tx_nob_rdma);
static int
_kgnilnd_proc_run_cksum_test(int caseno, int nloops, int nob)
{
- lnet_kiov_t *src, *dest;
+ struct bio_vec *src, *dest;
struct timespec begin, end, diff;
int niov;
int rc = 0;
}
for (i = 0; i < LNET_MAX_IOV; i++) {
- src[i].kiov_offset = 0;
- src[i].kiov_len = PAGE_SIZE;
- src[i].kiov_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ src[i].bv_offset = 0;
+ src[i].bv_len = PAGE_SIZE;
+ src[i].bv_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (src[i].kiov_page == NULL) {
+ if (src[i].bv_page == NULL) {
CERROR("couldn't allocate page %d\n", i);
GOTO(unwind, rc = -ENOMEM);
}
- dest[i].kiov_offset = 0;
- dest[i].kiov_len = PAGE_SIZE;
- dest[i].kiov_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ dest[i].bv_offset = 0;
+ dest[i].bv_len = PAGE_SIZE;
+ dest[i].bv_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (dest[i].kiov_page == NULL) {
+ if (dest[i].bv_page == NULL) {
CERROR("couldn't allocate page %d\n", i);
GOTO(unwind, rc = -ENOMEM);
}
}
/* setup real data */
- src[0].kiov_offset = 317;
- dest[0].kiov_offset = 592;
+ src[0].bv_offset = 317;
+ dest[0].bv_offset = 592;
switch (caseno) {
default:
/* odd -> even */
break;
case 1:
/* odd -> odd */
- dest[0].kiov_offset -= 1;
+ dest[0].bv_offset -= 1;
break;
case 2:
/* even -> even */
- src[0].kiov_offset += 1;
+ src[0].bv_offset += 1;
break;
case 3:
/* even -> odd */
- src[0].kiov_offset += 1;
- dest[0].kiov_offset -= 1;
+ src[0].bv_offset += 1;
+ dest[0].bv_offset -= 1;
}
- src[0].kiov_len = PAGE_SIZE - src[0].kiov_offset;
- dest[0].kiov_len = PAGE_SIZE - dest[0].kiov_offset;
+ src[0].bv_len = PAGE_SIZE - src[0].bv_offset;
+ dest[0].bv_len = PAGE_SIZE - dest[0].bv_offset;
for (i = 0; i < niov; i++) {
- memset(page_address(src[i].kiov_page) + src[i].kiov_offset,
- 0xf0 + i, src[i].kiov_len);
+ memset(page_address(src[i].bv_page) + src[i].bv_offset,
+ 0xf0 + i, src[i].bv_len);
}
lnet_copy_kiov2kiov(niov, dest, 0, niov, src, 0, nob);
getnstimeofday(&begin);
for (n = 0; n < nloops; n++) {
- CDEBUG(D_BUFFS, "case %d loop %d src %d dest %d nob %d niov %d\n",
- caseno, n, src[0].kiov_offset, dest[0].kiov_offset, nob, niov);
+ CDEBUG(D_BUFFS,
+ "case %d loop %d src %d dest %d nob %d niov %d\n",
+ caseno, n, src[0].bv_offset, dest[0].bv_offset, nob,
+ niov);
cksum = kgnilnd_cksum_kiov(niov, src, 0, nob - (n % nob), 1);
cksum2 = kgnilnd_cksum_kiov(niov, dest, 0, nob - (n % nob), 1);
unwind:
CDEBUG(D_NET, "freeing %d pages\n", i);
for (i -= 1; i >= 0; i--) {
- if (src[i].kiov_page != NULL) {
- __free_page(src[i].kiov_page);
- }
- if (dest[i].kiov_page != NULL) {
- __free_page(dest[i].kiov_page);
- }
+ if (src[i].bv_page)
+ __free_page(src[i].bv_page);
+
+ if (dest[i].bv_page)
+ __free_page(dest[i].bv_page);
}
if (src != NULL)
int kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg);
int kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
int delayed, unsigned int niov, struct kvec *iov,
- lnet_kiov_t *kiov, unsigned int offset, unsigned int mlen,
+ struct bio_vec *kiov, unsigned int offset, unsigned int mlen,
unsigned int rlen);
static int kiblnd_setup_rd_kiov(struct lnet_ni *ni, struct kib_tx *tx,
struct kib_rdma_desc *rd, int nkiov,
- lnet_kiov_t *kiov, int offset, int nob)
+ struct bio_vec *kiov, int offset, int nob)
{
struct kib_net *net = ni->ni_data;
struct scatterlist *sg;
LASSERT(nkiov > 0);
LASSERT(net != NULL);
- while (offset >= kiov->kiov_len) {
- offset -= kiov->kiov_len;
+ while (offset >= kiov->bv_len) {
+ offset -= kiov->bv_len;
nkiov--;
kiov++;
LASSERT(nkiov > 0);
do {
LASSERT(nkiov > 0);
- fragnob = min((int)(kiov->kiov_len - offset), nob);
+ fragnob = min((int)(kiov->bv_len - offset), nob);
/*
* We're allowed to start at a non-aligned page offset in
* the first fragment and end at a non-aligned page offset
* in the last fragment.
*/
- if ((fragnob < (int)(kiov->kiov_len - offset)) &&
+ if ((fragnob < (int)(kiov->bv_len - offset)) &&
nkiov < max_nkiov && nob > fragnob) {
CDEBUG(D_NET, "fragnob %d < available page %d: with"
" remaining %d kiovs with %d nob left\n",
- fragnob, (int)(kiov->kiov_len - offset),
+ fragnob, (int)(kiov->bv_len - offset),
nkiov, nob);
tx->tx_gaps = true;
}
- sg_set_page(sg, kiov->kiov_page, fragnob,
- kiov->kiov_offset + offset);
+ sg_set_page(sg, kiov->bv_page, fragnob,
+ kiov->bv_offset + offset);
sg = sg_next(sg);
if (!sg) {
CERROR("lacking enough sg entries to map tx\n");
int routing = lntmsg->msg_routing;
unsigned int payload_niov = lntmsg->msg_niov;
struct kvec *payload_iov = lntmsg->msg_iov;
- lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
+ struct bio_vec *payload_kiov = lntmsg->msg_kiov;
unsigned int payload_offset = lntmsg->msg_offset;
unsigned int payload_nob = lntmsg->msg_len;
struct kib_msg *ibmsg;
kiblnd_reply(struct lnet_ni *ni, struct kib_rx *rx, struct lnet_msg *lntmsg)
{
struct lnet_process_id target = lntmsg->msg_target;
- unsigned int niov = lntmsg->msg_niov;
+ unsigned int niov = lntmsg->msg_niov;
struct kvec *iov = lntmsg->msg_iov;
- lnet_kiov_t *kiov = lntmsg->msg_kiov;
- unsigned int offset = lntmsg->msg_offset;
- unsigned int nob = lntmsg->msg_len;
+ struct bio_vec *kiov = lntmsg->msg_kiov;
+ unsigned int offset = lntmsg->msg_offset;
+ unsigned int nob = lntmsg->msg_len;
struct kib_tx *tx;
- int rc;
+ int rc;
tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid);
if (tx == NULL) {
int
kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
- int delayed, unsigned int niov, struct kvec *iov, lnet_kiov_t *kiov,
+ int delayed, unsigned int niov, struct kvec *iov, struct bio_vec *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen)
{
struct kib_rx *rx = private;
/* A packet just assembled for transmission is represented by 1 or more
* struct kvec fragments (the first frag contains the portals header),
- * followed by 0 or more lnet_kiov_t fragments.
+ * followed by 0 or more struct bio_vec fragments.
*
* On the receive side, initially 1 struct kvec fragment is posted for
* receive (the header). Once the header has been received, the payload is
- * received into either struct kvec or lnet_kiov_t fragments, depending on
+ * received into either struct kvec or struct bio_vec fragments, depending on
* what the header matched or whether the message needs forwarding. */
struct ksock_conn; /* forward ref */
unsigned short tx_zc_capable:1; /* payload is large enough for ZC */
unsigned short tx_zc_checked:1; /* Have I checked if I should ZC? */
unsigned short tx_nonblk:1; /* it's a non-blocking ACK */
- lnet_kiov_t *tx_kiov; /* packet page frags */
+ struct bio_vec *tx_kiov; /* packet page frags */
struct ksock_conn *tx_conn; /* owning conn */
struct lnet_msg *tx_lnetmsg; /* lnet message for lnet_finalize() */
time64_t tx_deadline; /* when (in secs) tx times out */
union {
struct {
struct kvec iov; /* virt hdr */
- lnet_kiov_t kiov[0]; /* paged payload */
+ struct bio_vec kiov[0]; /* paged payload */
} paged;
struct {
struct kvec iov[1]; /* virt hdr + payload */
* header, or up to LNET_MAX_IOV frags of payload of either type. */
union ksock_rxiovspace {
struct kvec iov[LNET_MAX_IOV];
- lnet_kiov_t kiov[LNET_MAX_IOV];
+ struct bio_vec kiov[LNET_MAX_IOV];
};
#define SOCKNAL_RX_KSM_HEADER 1 /* reading ksock message header */
int ksnc_rx_nob_wanted; /* bytes actually wanted */
int ksnc_rx_niov; /* # kvec frags */
struct kvec *ksnc_rx_iov; /* the kvec frags */
- int ksnc_rx_nkiov; /* # page frags */
- lnet_kiov_t *ksnc_rx_kiov; /* the page frags */
+ int ksnc_rx_nkiov; /* # page frags */
+ struct bio_vec *ksnc_rx_kiov; /* the page frags */
union ksock_rxiovspace ksnc_rx_iov_space;/* space for frag descriptors */
- __u32 ksnc_rx_csum; /* partial checksum for incoming data */
+ __u32 ksnc_rx_csum; /* partial checksum for incoming
+ * data */
struct lnet_msg *ksnc_lnet_msg; /* rx lnet_finalize arg*/
struct ksock_msg ksnc_msg; /* incoming message buffer:
* V2.x message takes the
int ksocknal_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg);
int ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
int delayed, unsigned int niov,
- struct kvec *iov, lnet_kiov_t *kiov,
+ struct kvec *iov, struct bio_vec *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen);
int ksocknal_accept(struct lnet_ni *ni, struct socket *sock);
ksocknal_send_kiov(struct ksock_conn *conn, struct ksock_tx *tx,
struct kvec *scratch_iov)
{
- lnet_kiov_t *kiov = tx->tx_kiov;
+ struct bio_vec *kiov = tx->tx_kiov;
int nob;
int rc;
do {
LASSERT(tx->tx_nkiov > 0);
- if (nob < (int)kiov->kiov_len) {
- kiov->kiov_offset += nob;
- kiov->kiov_len -= nob;
+ if (nob < (int)kiov->bv_len) {
+ kiov->bv_offset += nob;
+ kiov->bv_len -= nob;
return rc;
}
- nob -= (int)kiov->kiov_len;
+ nob -= (int)kiov->bv_len;
tx->tx_kiov = ++kiov;
tx->tx_nkiov--;
} while (nob != 0);
ksocknal_recv_kiov(struct ksock_conn *conn, struct page **rx_scratch_pgs,
struct kvec *scratch_iov)
{
- lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
+ struct bio_vec *kiov = conn->ksnc_rx_kiov;
int nob;
int rc;
LASSERT(conn->ksnc_rx_nkiov > 0);
do {
LASSERT(conn->ksnc_rx_nkiov > 0);
- if (nob < (int) kiov->kiov_len) {
- kiov->kiov_offset += nob;
- kiov->kiov_len -= nob;
+ if (nob < (int) kiov->bv_len) {
+ kiov->bv_offset += nob;
+ kiov->bv_len -= nob;
return -EAGAIN;
}
- nob -= kiov->kiov_len;
+ nob -= kiov->bv_len;
conn->ksnc_rx_kiov = ++kiov;
conn->ksnc_rx_nkiov--;
} while (nob != 0);
int mpflag = 1;
int type = lntmsg->msg_type;
struct lnet_process_id target = lntmsg->msg_target;
- unsigned int payload_niov = lntmsg->msg_niov;
+ unsigned int payload_niov = lntmsg->msg_niov;
struct kvec *payload_iov = lntmsg->msg_iov;
- lnet_kiov_t *payload_kiov = lntmsg->msg_kiov;
- unsigned int payload_offset = lntmsg->msg_offset;
- unsigned int payload_nob = lntmsg->msg_len;
- struct ksock_tx *tx;
- int desc_size;
- int rc;
+ struct bio_vec *payload_kiov = lntmsg->msg_kiov;
+ unsigned int payload_offset = lntmsg->msg_offset;
+ unsigned int payload_nob = lntmsg->msg_len;
+ struct ksock_tx *tx;
+ int desc_size;
+ int rc;
/* NB 'private' is different depending on what we're sending.
* Just ignore it... */
int
ksocknal_recv(struct lnet_ni *ni, void *private, struct lnet_msg *msg,
int delayed, unsigned int niov, struct kvec *iov,
- lnet_kiov_t *kiov, unsigned int offset, unsigned int mlen,
+ struct bio_vec *kiov, unsigned int offset, unsigned int mlen,
unsigned int rlen)
{
struct ksock_conn *conn = private;
struct kvec *scratchiov)
{
struct socket *sock = conn->ksnc_sock;
- lnet_kiov_t *kiov = tx->tx_kiov;
+ struct bio_vec *kiov = tx->tx_kiov;
int rc;
int nob;
if (tx->tx_msg.ksm_zc_cookies[0] != 0) {
/* Zero copy is enabled */
struct sock *sk = sock->sk;
- struct page *page = kiov->kiov_page;
- int offset = kiov->kiov_offset;
- int fragsize = kiov->kiov_len;
+ struct page *page = kiov->bv_page;
+ int offset = kiov->bv_offset;
+ int fragsize = kiov->bv_len;
int msgflg = MSG_DONTWAIT;
CDEBUG(D_NET, "page %p + offset %x for %d\n",
- page, offset, kiov->kiov_len);
+ page, offset, kiov->bv_len);
if (!list_empty(&conn->ksnc_tx_queue) ||
fragsize < tx->tx_resid)
int i;
for (nob = i = 0; i < niov; i++) {
- scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
- kiov[i].kiov_offset;
- nob += scratchiov[i].iov_len = kiov[i].kiov_len;
+ scratchiov[i].iov_base = kmap(kiov[i].bv_page) +
+ kiov[i].bv_offset;
+ nob += scratchiov[i].iov_len = kiov[i].bv_len;
}
if (!list_empty(&conn->ksnc_tx_queue) ||
rc = kernel_sendmsg(sock, &msg, scratchiov, niov, nob);
for (i = 0; i < niov; i++)
- kunmap(kiov[i].kiov_page);
+ kunmap(kiov[i].bv_page);
}
return rc;
}
}
static void *
-ksocknal_lib_kiov_vmap(lnet_kiov_t *kiov, int niov,
+ksocknal_lib_kiov_vmap(struct bio_vec *kiov, int niov,
struct kvec *iov, struct page **pages)
{
void *addr;
niov < *ksocknal_tunables.ksnd_zc_recv_min_nfrags)
return NULL;
- for (nob = i = 0; i < niov; i++) {
- if ((kiov[i].kiov_offset != 0 && i > 0) ||
- (kiov[i].kiov_offset + kiov[i].kiov_len !=
+ for (nob = i = 0; i < niov; i++) {
+ if ((kiov[i].bv_offset != 0 && i > 0) ||
+ (kiov[i].bv_offset + kiov[i].bv_len !=
PAGE_SIZE && i < niov - 1))
- return NULL;
+ return NULL;
- pages[i] = kiov[i].kiov_page;
- nob += kiov[i].kiov_len;
- }
+ pages[i] = kiov[i].bv_page;
+ nob += kiov[i].bv_len;
+ }
- addr = vmap(pages, niov, VM_MAP, PAGE_KERNEL);
- if (addr == NULL)
- return NULL;
+ addr = vmap(pages, niov, VM_MAP, PAGE_KERNEL);
+ if (addr == NULL)
+ return NULL;
- iov->iov_base = addr + kiov[0].kiov_offset;
- iov->iov_len = nob;
+ iov->iov_base = addr + kiov[0].bv_offset;
+ iov->iov_len = nob;
- return addr;
+ return addr;
}
int
#endif
unsigned int niov = conn->ksnc_rx_nkiov;
#endif
- lnet_kiov_t *kiov = conn->ksnc_rx_kiov;
+ struct bio_vec *kiov = conn->ksnc_rx_kiov;
struct msghdr msg = {
.msg_flags = 0
};
} else {
for (nob = i = 0; i < niov; i++) {
- nob += scratchiov[i].iov_len = kiov[i].kiov_len;
- scratchiov[i].iov_base = kmap(kiov[i].kiov_page) +
- kiov[i].kiov_offset;
+ nob += scratchiov[i].iov_len = kiov[i].bv_len;
+ scratchiov[i].iov_base = kmap(kiov[i].bv_page) +
+ kiov[i].bv_offset;
}
n = niov;
}
rc = kernel_recvmsg(conn->ksnc_sock, &msg, scratchiov, n, nob,
MSG_DONTWAIT);
- if (conn->ksnc_msg.ksm_csum != 0) {
- for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
- LASSERT (i < niov);
+ if (conn->ksnc_msg.ksm_csum != 0) {
+ for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
+ LASSERT(i < niov);
- /* Dang! have to kmap again because I have nowhere to stash the
- * mapped address. But by doing it while the page is still
- * mapped, the kernel just bumps the map count and returns me
- * the address it stashed. */
- base = kmap(kiov[i].kiov_page) + kiov[i].kiov_offset;
- fragnob = kiov[i].kiov_len;
- if (fragnob > sum)
- fragnob = sum;
+ /* Dang! have to kmap again because I have nowhere to
+ * stash the mapped address. But by doing it while the
+ * page is still mapped, the kernel just bumps the map
+ * count and returns me the address it stashed.
+ */
+ base = kmap(kiov[i].bv_page) + kiov[i].bv_offset;
+ fragnob = kiov[i].bv_len;
+ if (fragnob > sum)
+ fragnob = sum;
- conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
- base, fragnob);
+ conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
+ base, fragnob);
- kunmap(kiov[i].kiov_page);
- }
- }
+ kunmap(kiov[i].bv_page);
+ }
+ }
- if (addr != NULL) {
- ksocknal_lib_kiov_vunmap(addr);
- } else {
- for (i = 0; i < niov; i++)
- kunmap(kiov[i].kiov_page);
- }
+ if (addr != NULL) {
+ ksocknal_lib_kiov_vunmap(addr);
+ } else {
+ for (i = 0; i < niov; i++)
+ kunmap(kiov[i].bv_page);
+ }
- return (rc);
+ return rc;
}
void
if (tx->tx_kiov != NULL) {
for (i = 0; i < tx->tx_nkiov; i++) {
- base = kmap(tx->tx_kiov[i].kiov_page) +
- tx->tx_kiov[i].kiov_offset;
+ base = kmap(tx->tx_kiov[i].bv_page) +
+ tx->tx_kiov[i].bv_offset;
- csum = ksocknal_csum(csum, base, tx->tx_kiov[i].kiov_len);
+ csum = ksocknal_csum(csum, base, tx->tx_kiov[i].bv_len);
- kunmap(tx->tx_kiov[i].kiov_page);
+ kunmap(tx->tx_kiov[i].bv_page);
}
} else {
for (i = 1; i < tx->tx_niov; i++)
/*
* There are three cases to handle:
- * 1. The MD is using lnet_kiov_t
+ * 1. The MD is using struct bio_vec
* 2. The MD is using struct kvec
* 3. Contiguous buffer allocated via vmalloc
*
* DMAed.
*/
if ((md->md_options & LNET_MD_KIOV) != 0) {
- lnet_kiov_t *kiov = md->md_iov.kiov;
+ struct bio_vec *kiov = md->md_iov.kiov;
- while (offset >= kiov->kiov_len) {
- offset -= kiov->kiov_len;
+ while (offset >= kiov->bv_len) {
+ offset -= kiov->bv_len;
niov--;
kiov++;
if (niov == 0) {
}
cpt = cfs_cpt_of_node(lnet_cpt_table(),
- page_to_nid(kiov->kiov_page));
+ page_to_nid(kiov->bv_page));
} else {
struct kvec *iov = md->md_iov.iov;
unsigned long vaddr;
for (i = 0; i < (int)niov; i++) {
/* We take the page pointer on trust */
- if (lmd->md_iov.kiov[i].kiov_offset +
- lmd->md_iov.kiov[i].kiov_len > PAGE_SIZE)
+ if (lmd->md_iov.kiov[i].bv_offset +
+ lmd->md_iov.kiov[i].bv_len > PAGE_SIZE)
return -EINVAL; /* invalid length */
- total_length += lmd->md_iov.kiov[i].kiov_len;
+ total_length += lmd->md_iov.kiov[i].bv_len;
}
lmd->md_length = total_length;
unsigned int
-lnet_kiov_nob(unsigned int niov, lnet_kiov_t *kiov)
+lnet_kiov_nob(unsigned int niov, struct bio_vec *kiov)
{
unsigned int nob = 0;
LASSERT(niov == 0 || kiov != NULL);
while (niov-- > 0)
- nob += (kiov++)->kiov_len;
+ nob += (kiov++)->bv_len;
return (nob);
}
EXPORT_SYMBOL(lnet_kiov_nob);
void
-lnet_copy_kiov2kiov(unsigned int ndiov, lnet_kiov_t *diov, unsigned int doffset,
- unsigned int nsiov, lnet_kiov_t *siov, unsigned int soffset,
+lnet_copy_kiov2kiov(unsigned int ndiov, struct bio_vec *diov,
+ unsigned int doffset,
+ unsigned int nsiov, struct bio_vec *siov,
+ unsigned int soffset,
unsigned int nob)
{
/* NB diov, siov are READ-ONLY */
LASSERT (!in_interrupt ());
LASSERT (ndiov > 0);
- while (doffset >= diov->kiov_len) {
- doffset -= diov->kiov_len;
+ while (doffset >= diov->bv_len) {
+ doffset -= diov->bv_len;
diov++;
ndiov--;
LASSERT(ndiov > 0);
}
LASSERT(nsiov > 0);
- while (soffset >= siov->kiov_len) {
- soffset -= siov->kiov_len;
+ while (soffset >= siov->bv_len) {
+ soffset -= siov->bv_len;
siov++;
nsiov--;
LASSERT(nsiov > 0);
do {
LASSERT(ndiov > 0);
LASSERT(nsiov > 0);
- this_nob = min3(diov->kiov_len - doffset,
- siov->kiov_len - soffset,
+ this_nob = min3(diov->bv_len - doffset,
+ siov->bv_len - soffset,
nob);
if (daddr == NULL)
- daddr = ((char *)kmap(diov->kiov_page)) +
- diov->kiov_offset + doffset;
+ daddr = ((char *)kmap(diov->bv_page)) +
+ diov->bv_offset + doffset;
if (saddr == NULL)
- saddr = ((char *)kmap(siov->kiov_page)) +
- siov->kiov_offset + soffset;
+ saddr = ((char *)kmap(siov->bv_page)) +
+ siov->bv_offset + soffset;
/* Vanishing risk of kmap deadlock when mapping 2 pages.
* However in practice at least one of the kiovs will be mapped
memcpy (daddr, saddr, this_nob);
nob -= this_nob;
- if (diov->kiov_len > doffset + this_nob) {
+ if (diov->bv_len > doffset + this_nob) {
daddr += this_nob;
doffset += this_nob;
} else {
- kunmap(diov->kiov_page);
+ kunmap(diov->bv_page);
daddr = NULL;
diov++;
ndiov--;
doffset = 0;
}
- if (siov->kiov_len > soffset + this_nob) {
+ if (siov->bv_len > soffset + this_nob) {
saddr += this_nob;
soffset += this_nob;
} else {
- kunmap(siov->kiov_page);
+ kunmap(siov->bv_page);
saddr = NULL;
siov++;
nsiov--;
} while (nob > 0);
if (daddr != NULL)
- kunmap(diov->kiov_page);
+ kunmap(diov->bv_page);
if (saddr != NULL)
- kunmap(siov->kiov_page);
+ kunmap(siov->bv_page);
}
EXPORT_SYMBOL(lnet_copy_kiov2kiov);
void
lnet_copy_kiov2iov (unsigned int niov, struct kvec *iov, unsigned int iovoffset,
- unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
+ unsigned int nkiov, struct bio_vec *kiov,
+ unsigned int kiovoffset,
unsigned int nob)
{
/* NB iov, kiov are READ-ONLY */
}
LASSERT(nkiov > 0);
- while (kiovoffset >= kiov->kiov_len) {
- kiovoffset -= kiov->kiov_len;
+ while (kiovoffset >= kiov->bv_len) {
+ kiovoffset -= kiov->bv_len;
kiov++;
nkiov--;
LASSERT(nkiov > 0);
LASSERT(niov > 0);
LASSERT(nkiov > 0);
this_nob = min3((unsigned int)iov->iov_len - iovoffset,
- (unsigned int)kiov->kiov_len - kiovoffset,
+ (unsigned int)kiov->bv_len - kiovoffset,
nob);
if (addr == NULL)
- addr = ((char *)kmap(kiov->kiov_page)) +
- kiov->kiov_offset + kiovoffset;
+ addr = ((char *)kmap(kiov->bv_page)) +
+ kiov->bv_offset + kiovoffset;
memcpy((char *)iov->iov_base + iovoffset, addr, this_nob);
nob -= this_nob;
iovoffset = 0;
}
- if (kiov->kiov_len > kiovoffset + this_nob) {
+ if (kiov->bv_len > kiovoffset + this_nob) {
addr += this_nob;
kiovoffset += this_nob;
} else {
- kunmap(kiov->kiov_page);
+ kunmap(kiov->bv_page);
addr = NULL;
kiov++;
nkiov--;
} while (nob > 0);
if (addr != NULL)
- kunmap(kiov->kiov_page);
+ kunmap(kiov->bv_page);
}
EXPORT_SYMBOL(lnet_copy_kiov2iov);
void
-lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov, unsigned int kiovoffset,
+lnet_copy_iov2kiov(unsigned int nkiov, struct bio_vec *kiov,
+ unsigned int kiovoffset,
unsigned int niov, struct kvec *iov, unsigned int iovoffset,
unsigned int nob)
{
LASSERT (!in_interrupt ());
LASSERT (nkiov > 0);
- while (kiovoffset >= kiov->kiov_len) {
- kiovoffset -= kiov->kiov_len;
+ while (kiovoffset >= kiov->bv_len) {
+ kiovoffset -= kiov->bv_len;
kiov++;
nkiov--;
LASSERT(nkiov > 0);
do {
LASSERT(nkiov > 0);
LASSERT(niov > 0);
- this_nob = min3((unsigned int)kiov->kiov_len - kiovoffset,
+ this_nob = min3((unsigned int)kiov->bv_len - kiovoffset,
(unsigned int)iov->iov_len - iovoffset,
nob);
if (addr == NULL)
- addr = ((char *)kmap(kiov->kiov_page)) +
- kiov->kiov_offset + kiovoffset;
+ addr = ((char *)kmap(kiov->bv_page)) +
+ kiov->bv_offset + kiovoffset;
memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
nob -= this_nob;
- if (kiov->kiov_len > kiovoffset + this_nob) {
+ if (kiov->bv_len > kiovoffset + this_nob) {
addr += this_nob;
kiovoffset += this_nob;
} else {
- kunmap(kiov->kiov_page);
+ kunmap(kiov->bv_page);
addr = NULL;
kiov++;
nkiov--;
} while (nob > 0);
if (addr != NULL)
- kunmap(kiov->kiov_page);
+ kunmap(kiov->bv_page);
}
EXPORT_SYMBOL(lnet_copy_iov2kiov);
int
-lnet_extract_kiov(int dst_niov, lnet_kiov_t *dst,
- int src_niov, lnet_kiov_t *src,
+lnet_extract_kiov(int dst_niov, struct bio_vec *dst,
+ int src_niov, struct bio_vec *src,
unsigned int offset, unsigned int len)
{
/* Initialise 'dst' to the subset of 'src' starting at 'offset',
return (0); /* no frags */
LASSERT(src_niov > 0);
- while (offset >= src->kiov_len) { /* skip initial frags */
- offset -= src->kiov_len;
+ while (offset >= src->bv_len) { /* skip initial frags */
+ offset -= src->bv_len;
src_niov--;
src++;
LASSERT(src_niov > 0);
LASSERT(src_niov > 0);
LASSERT((int)niov <= dst_niov);
- frag_len = src->kiov_len - offset;
- dst->kiov_page = src->kiov_page;
- dst->kiov_offset = src->kiov_offset + offset;
+ frag_len = src->bv_len - offset;
+ dst->bv_page = src->bv_page;
+ dst->bv_offset = src->bv_offset + offset;
if (len <= frag_len) {
- dst->kiov_len = len;
- LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE);
+ dst->bv_len = len;
+ LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
return niov;
}
- dst->kiov_len = frag_len;
- LASSERT(dst->kiov_offset + dst->kiov_len <= PAGE_SIZE);
+ dst->bv_len = frag_len;
+ LASSERT(dst->bv_offset + dst->bv_len <= PAGE_SIZE);
len -= frag_len;
dst++;
int delayed, unsigned int offset, unsigned int mlen,
unsigned int rlen)
{
- unsigned int niov = 0;
+ unsigned int niov = 0;
struct kvec *iov = NULL;
- lnet_kiov_t *kiov = NULL;
- int rc;
+ struct bio_vec *kiov = NULL;
+ int rc;
LASSERT (!in_interrupt ());
LASSERT (mlen == 0 || msg != NULL);
static int
lolnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg,
int delayed, unsigned int niov,
- struct kvec *iov, lnet_kiov_t *kiov,
+ struct kvec *iov, struct bio_vec *kiov,
unsigned int offset, unsigned int mlen, unsigned int rlen)
{
struct lnet_msg *sendmsg = private;
int sz = offsetof(struct lnet_rtrbuf, rb_kiov[npages]);
while (--npages >= 0)
- __free_page(rb->rb_kiov[npages].kiov_page);
+ __free_page(rb->rb_kiov[npages].bv_page);
LIBCFS_FREE(rb, sz);
}
GFP_KERNEL | __GFP_ZERO);
if (page == NULL) {
while (--i >= 0)
- __free_page(rb->rb_kiov[i].kiov_page);
+ __free_page(rb->rb_kiov[i].bv_page);
LIBCFS_FREE(rb, sz);
return NULL;
}
- rb->rb_kiov[i].kiov_len = PAGE_SIZE;
- rb->rb_kiov[i].kiov_offset = 0;
- rb->rb_kiov[i].kiov_page = page;
+ rb->rb_kiov[i].bv_len = PAGE_SIZE;
+ rb->rb_kiov[i].bv_offset = 0;
+ rb->rb_kiov[i].bv_page = page;
}
return rb;
int off;
int len;
- pg = bk->bk_iovs[i].kiov_page;
- off = bk->bk_iovs[i].kiov_offset;
- len = bk->bk_iovs[i].kiov_len;
+ pg = bk->bk_iovs[i].bv_page;
+ off = bk->bk_iovs[i].bv_offset;
+ len = bk->bk_iovs[i].bv_len;
brw_fill_page(pg, off, len, pattern, magic);
}
}
int off;
int len;
- pg = bk->bk_iovs[i].kiov_page;
- off = bk->bk_iovs[i].kiov_offset;
- len = bk->bk_iovs[i].kiov_len;
+ pg = bk->bk_iovs[i].bv_page;
+ off = bk->bk_iovs[i].bv_offset;
+ len = bk->bk_iovs[i].bv_len;
if (brw_check_page(pg, off, len, pattern, magic) != 0) {
CERROR("Bulk page %p (%d/%d) is corrupted!\n",
pg, i, bk->bk_niov);
LASSERT(list_empty(&crpc->crp_link));
for (i = 0; i < bulk->bk_niov; i++) {
- if (bulk->bk_iovs[i].kiov_page == NULL)
+ if (bulk->bk_iovs[i].bv_page == NULL)
continue;
- __free_page(bulk->bk_iovs[i].kiov_page);
+ __free_page(bulk->bk_iovs[i].bv_page);
}
srpc_client_rpc_decref(crpc->crp_rpc);
}
static struct lnet_process_id_packed *
-lstcon_next_id(int idx, int nkiov, lnet_kiov_t *kiov)
+lstcon_next_id(int idx, int nkiov, struct bio_vec *kiov)
{
struct lnet_process_id_packed *pid;
int i;
LASSERT (i < nkiov);
- pid = (struct lnet_process_id_packed *)page_address(kiov[i].kiov_page);
+ pid = (struct lnet_process_id_packed *)page_address(kiov[i].bv_page);
return &pid[idx % SFW_ID_PER_PAGE];
}
static int
lstcon_dstnodes_prep(struct lstcon_group *grp, int idx,
- int dist, int span, int nkiov, lnet_kiov_t *kiov)
+ int dist, int span, int nkiov, struct bio_vec *kiov)
{
struct lnet_process_id_packed *pid;
struct lstcon_ndlink *ndl;
PAGE_SIZE : min_t(int, nob, PAGE_SIZE);
nob -= len;
- bulk->bk_iovs[i].kiov_offset = 0;
- bulk->bk_iovs[i].kiov_len = len;
- bulk->bk_iovs[i].kiov_page =
+ bulk->bk_iovs[i].bv_offset = 0;
+ bulk->bk_iovs[i].bv_len = len;
+ bulk->bk_iovs[i].bv_page =
alloc_page(GFP_KERNEL);
- if (bulk->bk_iovs[i].kiov_page == NULL) {
+ if (bulk->bk_iovs[i].bv_page == NULL) {
lstcon_rpc_put(*crpc);
return -ENOMEM;
}
struct lnet_process_id_packed id;
int j;
- dests = page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].kiov_page);
+ dests = page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].bv_page);
LASSERT (dests != NULL); /* my pages are within KVM always */
id = dests[i % SFW_ID_PER_PAGE];
if (msg->msg_magic != SRPC_MSG_MAGIC)
LASSERT(off < PAGE_SIZE);
LASSERT(nob > 0 && nob <= PAGE_SIZE);
- bk->bk_iovs[i].kiov_offset = off;
- bk->bk_iovs[i].kiov_page = pg;
- bk->bk_iovs[i].kiov_len = nob;
+ bk->bk_iovs[i].bv_offset = off;
+ bk->bk_iovs[i].bv_page = pg;
+ bk->bk_iovs[i].bv_len = nob;
return nob;
}
LASSERT(bk != NULL);
for (i = 0; i < bk->bk_niov; i++) {
- pg = bk->bk_iovs[i].kiov_page;
+ pg = bk->bk_iovs[i].bv_page;
if (pg == NULL)
break;
/* bulk descriptor */
struct srpc_bulk {
- int bk_len; /* len of bulk data */
- struct lnet_handle_md bk_mdh;
- int bk_sink; /* sink/source */
- int bk_niov; /* # iov in bk_iovs */
- lnet_kiov_t bk_iovs[0];
+ int bk_len; /* len of bulk data */
+ struct lnet_handle_md bk_mdh;
+ int bk_sink; /* sink/source */
+ int bk_niov; /* # iov in bk_iovs */
+ struct bio_vec bk_iovs[0];
};
/* message buffer descriptor */
struct lnet_handle_md bd_mds[PTLRPC_BULK_OPS_COUNT];
/* encrypted iov, size is either 0 or bd_iov_count. */
- lnet_kiov_t *bd_enc_vec;
- lnet_kiov_t *bd_vec;
+ struct bio_vec *bd_enc_vec;
+ struct bio_vec *bd_vec;
};
enum {
for (i = 0; i < page_count; i++) {
void *pz;
if (desc)
- pz = page_zone(desc->bd_vec[i].kiov_page);
+ pz = page_zone(desc->bd_vec[i].bv_page);
else
pz = page_zone(aa->aa_ppga[i]->pg);
int i;
for (i = 0; i < desc->bd_iov_count ; i++)
- put_page(desc->bd_vec[i].kiov_page);
+ put_page(desc->bd_vec[i].bv_page);
}
static int ptlrpc_prep_bulk_frag_pages(struct ptlrpc_bulk_desc *desc,
struct page *page, int pageoffset, int len,
int pin)
{
- lnet_kiov_t *kiov;
+ struct bio_vec *kiov;
LASSERT(desc->bd_iov_count < desc->bd_max_iov);
LASSERT(page != NULL);
if (pin)
get_page(page);
- kiov->kiov_page = page;
- kiov->kiov_offset = pageoffset;
- kiov->kiov_len = len;
+ kiov->bv_page = page;
+ kiov->bv_offset = pageoffset;
+ kiov->bv_len = len;
desc->bd_iov_count++;
}
typedef int (*digest_hash)(
struct ahash_request *req, rawobj_t *hdr,
int msgcnt, rawobj_t *msgs,
- int iovcnt, lnet_kiov_t *iovs);
+ int iovcnt, struct bio_vec *iovs);
/* The mechanism-independent gss-api context: */
struct gss_ctx {
int msgcnt,
rawobj_t *msgs,
int iovcnt,
- lnet_kiov_t *iovs,
+ struct bio_vec *iovs,
rawobj_t *mic_token);
__u32 lgss_verify_mic(
struct gss_ctx *ctx,
int msgcnt,
rawobj_t *msgs,
int iovcnt,
- lnet_kiov_t *iovs,
+ struct bio_vec *iovs,
rawobj_t *mic_token);
__u32 lgss_wrap(
struct gss_ctx *ctx,
int msgcnt,
rawobj_t *msgs,
int iovcnt,
- lnet_kiov_t *iovs,
+ struct bio_vec *iovs,
rawobj_t *mic_token);
__u32 (*gss_verify_mic)(
struct gss_ctx *ctx,
int msgcnt,
rawobj_t *msgs,
int iovcnt,
- lnet_kiov_t *iovs,
+ struct bio_vec *iovs,
rawobj_t *mic_token);
__u32 (*gss_wrap)(
struct gss_ctx *ctx,
/* fix the actual data size */
for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
- if (desc->bd_vec[i].kiov_len + nob >
+ if (desc->bd_vec[i].bv_len + nob >
desc->bd_nob_transferred) {
- desc->bd_vec[i].kiov_len =
+ desc->bd_vec[i].bv_len =
desc->bd_nob_transferred - nob;
}
- nob += desc->bd_vec[i].kiov_len;
+ nob += desc->bd_vec[i].bv_len;
}
token.data = bsdv->bsd_data;
int gss_digest_hash(struct ahash_request *req,
rawobj_t *hdr, int msgcnt, rawobj_t *msgs,
- int iovcnt, lnet_kiov_t *iovs)
+ int iovcnt, struct bio_vec *iovs)
{
struct scatterlist sg[1];
struct sg_table sgt;
}
for (i = 0; i < iovcnt; i++) {
- if (iovs[i].kiov_len == 0)
+ if (iovs[i].bv_len == 0)
continue;
sg_init_table(sg, 1);
- sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
- iovs[i].kiov_offset);
+ sg_set_page(&sg[0], iovs[i].bv_page, iovs[i].bv_len,
+ iovs[i].bv_offset);
- ahash_request_set_crypt(req, sg, NULL, iovs[i].kiov_len);
+ ahash_request_set_crypt(req, sg, NULL, iovs[i].bv_len);
rc = crypto_ahash_update(req);
if (rc)
return rc;
int gss_digest_hash_compat(struct ahash_request *req,
rawobj_t *hdr, int msgcnt, rawobj_t *msgs,
- int iovcnt, lnet_kiov_t *iovs)
+ int iovcnt, struct bio_vec *iovs)
{
struct scatterlist sg[1];
struct sg_table sgt;
}
for (i = 0; i < iovcnt; i++) {
- if (iovs[i].kiov_len == 0)
+ if (iovs[i].bv_len == 0)
continue;
sg_init_table(sg, 1);
- sg_set_page(&sg[0], iovs[i].kiov_page, iovs[i].kiov_len,
- iovs[i].kiov_offset);
+ sg_set_page(&sg[0], iovs[i].bv_page, iovs[i].bv_len,
+ iovs[i].bv_offset);
- ahash_request_set_crypt(req, sg, NULL, iovs[i].kiov_len);
+ ahash_request_set_crypt(req, sg, NULL, iovs[i].bv_len);
rc = crypto_ahash_update(req);
if (rc)
return rc;
int gss_crypt_generic(struct crypto_blkcipher *tfm, int decrypt, const void *iv,
const void *in, void *out, size_t length);
int gss_digest_hash(struct ahash_request *req, rawobj_t *hdr,
- int msgcnt, rawobj_t *msgs, int iovcnt, lnet_kiov_t *iovs);
+ int msgcnt, rawobj_t *msgs, int iovcnt,
+ struct bio_vec *iovs);
int gss_digest_hash_compat(struct ahash_request *req,
rawobj_t *hdr, int msgcnt, rawobj_t *msgs,
- int iovcnt, lnet_kiov_t *iovs);
+ int iovcnt, struct bio_vec *iovs);
int gss_add_padding(rawobj_t *msg, int msg_buflen, int blocksize);
int gss_crypt_rawobjs(struct crypto_blkcipher *tfm, __u8 *iv,
int inobj_cnt, rawobj_t *inobjs, rawobj_t *outobj,
struct gss_keyblock *kb,
struct krb5_header *khdr,
int msgcnt, rawobj_t *msgs,
- int iovcnt, lnet_kiov_t *iovs,
+ int iovcnt, struct bio_vec *iovs,
rawobj_t *cksum,
digest_hash hash_func)
{
int msgcnt,
rawobj_t *msgs,
int iovcnt,
- lnet_kiov_t *iovs,
+ struct bio_vec *iovs,
rawobj_t *token)
{
struct krb5_ctx *kctx = gctx->internal_ctx_id;
int msgcnt,
rawobj_t *msgs,
int iovcnt,
- lnet_kiov_t *iovs,
+ struct bio_vec *iovs,
rawobj_t *token)
{
struct krb5_ctx *kctx = gctx->internal_ctx_id;
/* encrypt clear pages */
for (i = 0; i < desc->bd_iov_count; i++) {
sg_init_table(&src, 1);
- sg_set_page(&src, desc->bd_vec[i].kiov_page,
- (desc->bd_vec[i].kiov_len +
+ sg_set_page(&src, desc->bd_vec[i].bv_page,
+ (desc->bd_vec[i].bv_len +
blocksize - 1) &
(~(blocksize - 1)),
- desc->bd_vec[i].kiov_offset);
+ desc->bd_vec[i].bv_offset);
if (adj_nob)
nob += src.length;
sg_init_table(&dst, 1);
- sg_set_page(&dst, desc->bd_enc_vec[i].kiov_page,
+ sg_set_page(&dst, desc->bd_enc_vec[i].bv_page,
src.length, src.offset);
- desc->bd_enc_vec[i].kiov_offset = dst.offset;
- desc->bd_enc_vec[i].kiov_len = dst.length;
+ desc->bd_enc_vec[i].bv_offset = dst.offset;
+ desc->bd_enc_vec[i].bv_len = dst.length;
rc = crypto_blkcipher_encrypt_iv(&ciph_desc, &dst, &src,
src.length);
* desc->bd_nob_transferred is the size of cipher text received.
* desc->bd_nob is the target size of plain text supposed to be.
*
- * if adj_nob != 0, we adjust each page's kiov_len to the actual
+ * if adj_nob != 0, we adjust each page's bv_len to the actual
* plain text size.
* - for client read: we don't know data size for each page, so
- * bd_iov[]->kiov_len is set to PAGE_SIZE, but actual data received might
+ * bd_iov[]->bv_len is set to PAGE_SIZE, but actual data received might
* be smaller, so we need to adjust it according to
- * bd_u.bd_kiov.bd_enc_vec[]->kiov_len.
+ * bd_u.bd_kiov.bd_enc_vec[]->bv_len.
* this means we DO NOT support the situation that server send an odd size
* data in a page which is not the last one.
* - for server write: we knows exactly data size for each page being expected,
- * thus kiov_len is accurate already, so we should not adjust it at all.
- * and bd_u.bd_kiov.bd_enc_vec[]->kiov_len should be
- * round_up(bd_iov[]->kiov_len) which
+ * thus bv_len is accurate already, so we should not adjust it at all.
+ * and bd_u.bd_kiov.bd_enc_vec[]->bv_len should be
+ * round_up(bd_iov[]->bv_len) which
* should have been done by prep_bulk().
*/
static
for (i = 0; i < desc->bd_iov_count && ct_nob < desc->bd_nob_transferred;
i++) {
- if (desc->bd_enc_vec[i].kiov_offset % blocksize
+ if (desc->bd_enc_vec[i].bv_offset % blocksize
!= 0 ||
- desc->bd_enc_vec[i].kiov_len % blocksize
+ desc->bd_enc_vec[i].bv_len % blocksize
!= 0) {
CERROR("page %d: odd offset %u len %u, blocksize %d\n",
- i, desc->bd_enc_vec[i].kiov_offset,
- desc->bd_enc_vec[i].kiov_len,
+ i, desc->bd_enc_vec[i].bv_offset,
+ desc->bd_enc_vec[i].bv_len,
blocksize);
return -EFAULT;
}
if (adj_nob) {
- if (ct_nob + desc->bd_enc_vec[i].kiov_len >
+ if (ct_nob + desc->bd_enc_vec[i].bv_len >
desc->bd_nob_transferred)
- desc->bd_enc_vec[i].kiov_len =
+ desc->bd_enc_vec[i].bv_len =
desc->bd_nob_transferred - ct_nob;
- desc->bd_vec[i].kiov_len =
- desc->bd_enc_vec[i].kiov_len;
- if (pt_nob + desc->bd_enc_vec[i].kiov_len >
+ desc->bd_vec[i].bv_len =
+ desc->bd_enc_vec[i].bv_len;
+ if (pt_nob + desc->bd_enc_vec[i].bv_len >
desc->bd_nob)
- desc->bd_vec[i].kiov_len =
+ desc->bd_vec[i].bv_len =
desc->bd_nob - pt_nob;
} else {
/* this should be guaranteed by LNET */
LASSERT(ct_nob + desc->bd_enc_vec[i].
- kiov_len <=
+ bv_len <=
desc->bd_nob_transferred);
- LASSERT(desc->bd_vec[i].kiov_len <=
- desc->bd_enc_vec[i].kiov_len);
+ LASSERT(desc->bd_vec[i].bv_len <=
+ desc->bd_enc_vec[i].bv_len);
}
- if (desc->bd_enc_vec[i].kiov_len == 0)
+ if (desc->bd_enc_vec[i].bv_len == 0)
continue;
sg_init_table(&src, 1);
- sg_set_page(&src, desc->bd_enc_vec[i].kiov_page,
- desc->bd_enc_vec[i].kiov_len,
- desc->bd_enc_vec[i].kiov_offset);
+ sg_set_page(&src, desc->bd_enc_vec[i].bv_page,
+ desc->bd_enc_vec[i].bv_len,
+ desc->bd_enc_vec[i].bv_offset);
dst = src;
- if (desc->bd_vec[i].kiov_len % blocksize == 0)
+ if (desc->bd_vec[i].bv_len % blocksize == 0)
sg_assign_page(&dst,
- desc->bd_vec[i].kiov_page);
+ desc->bd_vec[i].bv_page);
rc = crypto_blkcipher_decrypt_iv(&ciph_desc, &dst, &src,
src.length);
return rc;
}
- if (desc->bd_vec[i].kiov_len % blocksize != 0) {
- memcpy(page_address(desc->bd_vec[i].kiov_page) +
- desc->bd_vec[i].kiov_offset,
+ if (desc->bd_vec[i].bv_len % blocksize != 0) {
+ memcpy(page_address(desc->bd_vec[i].bv_page) +
+ desc->bd_vec[i].bv_offset,
page_address(desc->bd_enc_vec[i].
- kiov_page) +
- desc->bd_vec[i].kiov_offset,
- desc->bd_vec[i].kiov_len);
+ bv_page) +
+ desc->bd_vec[i].bv_offset,
+ desc->bd_vec[i].bv_len);
}
- ct_nob += desc->bd_enc_vec[i].kiov_len;
- pt_nob += desc->bd_vec[i].kiov_len;
+ ct_nob += desc->bd_enc_vec[i].bv_len;
+ pt_nob += desc->bd_vec[i].bv_len;
}
if (unlikely(ct_nob != desc->bd_nob_transferred)) {
/* if needed, clear up the rest unused iovs */
if (adj_nob)
while (i < desc->bd_iov_count)
- desc->bd_vec[i++].kiov_len = 0;
+ desc->bd_vec[i++].bv_len = 0;
/* decrypt tail (krb5 header) */
rc = gss_setup_sgtable(&sg_src, &src, cipher->data + blocksize,
blocksize = crypto_blkcipher_blocksize(kctx->kc_keye.kb_tfm);
for (i = 0; i < desc->bd_iov_count; i++) {
- LASSERT(desc->bd_enc_vec[i].kiov_page);
+ LASSERT(desc->bd_enc_vec[i].bv_page);
/*
* offset should always start at page boundary of either
* client or server side.
*/
- if (desc->bd_vec[i].kiov_offset & blocksize) {
+ if (desc->bd_vec[i].bv_offset & blocksize) {
CERROR("odd offset %d in page %d\n",
- desc->bd_vec[i].kiov_offset, i);
+ desc->bd_vec[i].bv_offset, i);
return GSS_S_FAILURE;
}
- desc->bd_enc_vec[i].kiov_offset =
- desc->bd_vec[i].kiov_offset;
- desc->bd_enc_vec[i].kiov_len =
- (desc->bd_vec[i].kiov_len +
+ desc->bd_enc_vec[i].bv_offset =
+ desc->bd_vec[i].bv_offset;
+ desc->bd_enc_vec[i].bv_len =
+ (desc->bd_vec[i].bv_len +
blocksize - 1) & (~(blocksize - 1));
}
/* gss_get_mic: compute a mic over message and return mic_token. */
__u32 lgss_get_mic(struct gss_ctx *context_handle,
- int msgcnt,
- rawobj_t *msg,
- int iovcnt,
- lnet_kiov_t *iovs,
- rawobj_t *mic_token)
+ int msgcnt,
+ rawobj_t *msg,
+ int iovcnt,
+ struct bio_vec *iovs,
+ rawobj_t *mic_token)
{
LASSERT(context_handle);
LASSERT(context_handle->mech_type);
/* gss_verify_mic: check whether the provided mic_token verifies message. */
__u32 lgss_verify_mic(struct gss_ctx *context_handle,
- int msgcnt,
- rawobj_t *msg,
- int iovcnt,
- lnet_kiov_t *iovs,
- rawobj_t *mic_token)
+ int msgcnt,
+ rawobj_t *msg,
+ int iovcnt,
+ struct bio_vec *iovs,
+ rawobj_t *mic_token)
{
LASSERT(context_handle);
LASSERT(context_handle->mech_type);
static
__u32 gss_get_mic_null(struct gss_ctx *gss_context, int message_count,
- rawobj_t *messages, int iov_count, lnet_kiov_t *iovs,
+ rawobj_t *messages, int iov_count, struct bio_vec *iovs,
rawobj_t *token)
{
return GSS_S_COMPLETE;
static
__u32 gss_verify_mic_null(struct gss_ctx *gss_context, int message_count,
- rawobj_t *messages, int iov_count, lnet_kiov_t *iovs,
+ rawobj_t *messages, int iov_count,
+ struct bio_vec *iovs,
rawobj_t *token)
{
return GSS_S_COMPLETE;
static
u32 sk_make_hmac(enum cfs_crypto_hash_alg algo, rawobj_t *key, int msg_count,
- rawobj_t *msgs, int iov_count, lnet_kiov_t *iovs,
+ rawobj_t *msgs, int iov_count, struct bio_vec *iovs,
rawobj_t *token, digest_hash hash_func)
{
struct ahash_request *req;
int message_count,
rawobj_t *messages,
int iov_count,
- lnet_kiov_t *iovs,
+ struct bio_vec *iovs,
rawobj_t *token)
{
struct sk_ctx *skc = gss_context->internal_ctx_id;
static
u32 sk_verify_hmac(enum cfs_crypto_hash_alg algo, rawobj_t *key,
int message_count, rawobj_t *messages,
- int iov_count, lnet_kiov_t *iovs,
+ int iov_count, struct bio_vec *iovs,
rawobj_t *token, digest_hash hash_func)
{
rawobj_t checksum = RAWOBJ_EMPTY;
static
u32 sk_verify_bulk_hmac(enum cfs_crypto_hash_alg sc_hmac, rawobj_t *key,
int msgcnt, rawobj_t *msgs, int iovcnt,
- lnet_kiov_t *iovs, int iov_bytes, rawobj_t *token)
+ struct bio_vec *iovs, int iov_bytes, rawobj_t *token)
{
rawobj_t checksum = RAWOBJ_EMPTY;
struct ahash_request *req;
}
for (i = 0; i < iovcnt && iov_bytes > 0; i++) {
- if (iovs[i].kiov_len == 0)
+ if (iovs[i].bv_len == 0)
continue;
- bytes = min_t(int, iov_bytes, iovs[i].kiov_len);
+ bytes = min_t(int, iov_bytes, iovs[i].bv_len);
iov_bytes -= bytes;
sg_init_table(sg, 1);
- sg_set_page(&sg[0], iovs[i].kiov_page, bytes,
- iovs[i].kiov_offset);
+ sg_set_page(&sg[0], iovs[i].bv_page, bytes,
+ iovs[i].bv_offset);
ahash_request_set_crypt(req, sg, NULL, bytes);
rc = crypto_ahash_update(req);
if (rc)
int message_count,
rawobj_t *messages,
int iov_count,
- lnet_kiov_t *iovs,
+ struct bio_vec *iovs,
rawobj_t *token)
{
struct sk_ctx *skc = gss_context->internal_ctx_id;
blocksize = crypto_blkcipher_blocksize(skc->sc_session_kb.kb_tfm);
for (i = 0; i < desc->bd_iov_count; i++) {
- if (desc->bd_vec[i].kiov_offset & blocksize) {
+ if (desc->bd_vec[i].bv_offset & blocksize) {
CERROR("offset %d not blocksize aligned\n",
- desc->bd_vec[i].kiov_offset);
+ desc->bd_vec[i].bv_offset);
return GSS_S_FAILURE;
}
- desc->bd_enc_vec[i].kiov_offset =
- desc->bd_vec[i].kiov_offset;
- desc->bd_enc_vec[i].kiov_len =
- sk_block_mask(desc->bd_vec[i].kiov_len, blocksize);
+ desc->bd_enc_vec[i].bv_offset =
+ desc->bd_vec[i].bv_offset;
+ desc->bd_enc_vec[i].bv_len =
+ sk_block_mask(desc->bd_vec[i].bv_len, blocksize);
}
return GSS_S_COMPLETE;
sg_init_table(&ctxt, 1);
for (i = 0; i < desc->bd_iov_count; i++) {
- sg_set_page(&ptxt, desc->bd_vec[i].kiov_page,
- sk_block_mask(desc->bd_vec[i].kiov_len,
+ sg_set_page(&ptxt, desc->bd_vec[i].bv_page,
+ sk_block_mask(desc->bd_vec[i].bv_len,
blocksize),
- desc->bd_vec[i].kiov_offset);
+ desc->bd_vec[i].bv_offset);
nob += ptxt.length;
- sg_set_page(&ctxt, desc->bd_enc_vec[i].kiov_page,
+ sg_set_page(&ctxt, desc->bd_enc_vec[i].bv_page,
ptxt.length, ptxt.offset);
- desc->bd_enc_vec[i].kiov_offset = ctxt.offset;
- desc->bd_enc_vec[i].kiov_len = ctxt.length;
+ desc->bd_enc_vec[i].bv_offset = ctxt.offset;
+ desc->bd_enc_vec[i].bv_len = ctxt.length;
rc = crypto_blkcipher_encrypt_iv(&cdesc, &ctxt, &ptxt,
ptxt.length);
for (i = 0; i < desc->bd_iov_count && cnob < desc->bd_nob_transferred;
i++) {
- lnet_kiov_t *piov = &desc->bd_vec[i];
- lnet_kiov_t *ciov = &desc->bd_enc_vec[i];
+ struct bio_vec *piov = &desc->bd_vec[i];
+ struct bio_vec *ciov = &desc->bd_enc_vec[i];
- if (ciov->kiov_offset % blocksize != 0 ||
- ciov->kiov_len % blocksize != 0) {
+ if (ciov->bv_offset % blocksize != 0 ||
+ ciov->bv_len % blocksize != 0) {
CERROR("Invalid bulk descriptor vector\n");
return GSS_S_DEFECTIVE_TOKEN;
}
* integrity only mode */
if (adj_nob) {
/* cipher text must not exceed transferred size */
- if (ciov->kiov_len + cnob > desc->bd_nob_transferred)
- ciov->kiov_len =
+ if (ciov->bv_len + cnob > desc->bd_nob_transferred)
+ ciov->bv_len =
desc->bd_nob_transferred - cnob;
- piov->kiov_len = ciov->kiov_len;
+ piov->bv_len = ciov->bv_len;
/* plain text must not exceed bulk's size */
- if (ciov->kiov_len + pnob > desc->bd_nob)
- piov->kiov_len = desc->bd_nob - pnob;
+ if (ciov->bv_len + pnob > desc->bd_nob)
+ piov->bv_len = desc->bd_nob - pnob;
} else {
/* Taken from krb5_decrypt since it was not verified
* whether or not LNET guarantees these */
- if (ciov->kiov_len + cnob > desc->bd_nob_transferred ||
- piov->kiov_len > ciov->kiov_len) {
+ if (ciov->bv_len + cnob > desc->bd_nob_transferred ||
+ piov->bv_len > ciov->bv_len) {
CERROR("Invalid decrypted length\n");
return GSS_S_FAILURE;
}
}
- if (ciov->kiov_len == 0)
+ if (ciov->bv_len == 0)
continue;
sg_init_table(&ctxt, 1);
- sg_set_page(&ctxt, ciov->kiov_page, ciov->kiov_len,
- ciov->kiov_offset);
+ sg_set_page(&ctxt, ciov->bv_page, ciov->bv_len,
+ ciov->bv_offset);
ptxt = ctxt;
/* In the event the plain text size is not a multiple
* of blocksize we decrypt in place and copy the result
* after the decryption */
- if (piov->kiov_len % blocksize == 0)
- sg_assign_page(&ptxt, piov->kiov_page);
+ if (piov->bv_len % blocksize == 0)
+ sg_assign_page(&ptxt, piov->bv_page);
rc = crypto_blkcipher_decrypt_iv(&cdesc, &ptxt, &ctxt,
ctxt.length);
return GSS_S_FAILURE;
}
- if (piov->kiov_len % blocksize != 0) {
- memcpy(page_address(piov->kiov_page) +
- piov->kiov_offset,
- page_address(ciov->kiov_page) +
- ciov->kiov_offset,
- piov->kiov_len);
+ if (piov->bv_len % blocksize != 0) {
+ memcpy(page_address(piov->bv_page) +
+ piov->bv_offset,
+ page_address(ciov->bv_page) +
+ ciov->bv_offset,
+ piov->bv_len);
}
- cnob += ciov->kiov_len;
- pnob += piov->kiov_len;
+ cnob += ciov->bv_len;
+ pnob += piov->bv_len;
}
/* if needed, clear up the rest unused iovs */
if (adj_nob)
while (i < desc->bd_iov_count)
- desc->bd_vec[i++].kiov_len = 0;
+ desc->bd_vec[i++].bv_len = 0;
if (unlikely(cnob != desc->bd_nob_transferred)) {
CERROR("%d cipher text transferred but only %d decrypted\n",
for (i = 0; i < desc->bd_iov_count; i++) {
LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
- desc->bd_enc_vec[i].kiov_page =
+ desc->bd_enc_vec[i].bv_page =
page_pools.epp_pools[p_idx][g_idx];
page_pools.epp_pools[p_idx][g_idx] = NULL;
LASSERT(page_pools.epp_pools[p_idx]);
for (i = 0; i < desc->bd_iov_count; i++) {
- LASSERT(desc->bd_enc_vec[i].kiov_page != NULL);
+ LASSERT(desc->bd_enc_vec[i].bv_page);
LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL);
page_pools.epp_pools[p_idx][g_idx] =
- desc->bd_enc_vec[i].kiov_page;
+ desc->bd_enc_vec[i].bv_page;
if (++g_idx == PAGES_PER_POOL) {
p_idx++;
for (i = 0; i < desc->bd_iov_count; i++) {
cfs_crypto_hash_update_page(req,
- desc->bd_vec[i].kiov_page,
- desc->bd_vec[i].kiov_offset &
+ desc->bd_vec[i].bv_page,
+ desc->bd_vec[i].bv_offset &
~PAGE_MASK,
- desc->bd_vec[i].kiov_len);
+ desc->bd_vec[i].bv_len);
}
if (hashsize > buflen) {
unsigned int off, i;
for (i = 0; i < desc->bd_iov_count; i++) {
- if (desc->bd_vec[i].kiov_len == 0)
+ if (desc->bd_vec[i].bv_len == 0)
continue;
- ptr = kmap(desc->bd_vec[i].kiov_page);
- off = desc->bd_vec[i].kiov_offset & ~PAGE_MASK;
+ ptr = kmap(desc->bd_vec[i].bv_page);
+ off = desc->bd_vec[i].bv_offset & ~PAGE_MASK;
ptr[off] ^= 0x1;
- kunmap(desc->bd_vec[i].kiov_page);
+ kunmap(desc->bd_vec[i].bv_page);
return;
}
}
/* fix the actual data size */
for (i = 0, nob = 0; i < desc->bd_iov_count; i++) {
- if (desc->bd_vec[i].kiov_len +
+ if (desc->bd_vec[i].bv_len +
nob > desc->bd_nob_transferred) {
- desc->bd_vec[i].kiov_len =
+ desc->bd_vec[i].bv_len =
desc->bd_nob_transferred - nob;
}
- nob += desc->bd_vec[i].kiov_len;
+ nob += desc->bd_vec[i].bv_len;
}
rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg,