+
+ for (i = 0; i < npages; i++) {
+ if (!pages[i])
+ break;
+ put_page(pages[i]);
+ }
+
+#if defined(HAVE_DIO_ITER)
+ kvfree(pages);
+#else
+ OBD_FREE_LARGE(pages, npages * sizeof(*pages));
+#endif
+}
+
+static ssize_t ll_get_user_pages(int rw, struct iov_iter *iter,
+ struct page ***pages, ssize_t *npages,
+ size_t maxsize)
+{
+#if defined(HAVE_DIO_ITER)
+ size_t start;
+ size_t result;
+
+ /*
+ * iov_iter_get_pages_alloc() is introduced in 3.16 similar
+ * to HAVE_DIO_ITER.
+ */
+ result = iov_iter_get_pages_alloc(iter, pages, maxsize, &start);
+ if (result > 0)
+ *npages = DIV_ROUND_UP(result + start, PAGE_SIZE);
+
+ return result;
+#else
+ unsigned long addr;
+ size_t page_count;
+ size_t size;
+ long result;
+
+ if (!maxsize)
+ return 0;
+
+ if (!iter->nr_segs)
+ return 0;
+
+ addr = (unsigned long)iter->iov->iov_base + iter->iov_offset;
+ if (addr & ~PAGE_MASK)
+ return -EINVAL;
+
+ size = min_t(size_t, maxsize, iter->iov->iov_len);
+ page_count = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ OBD_ALLOC_LARGE(*pages, page_count * sizeof(**pages));
+ if (*pages == NULL)
+ return -ENOMEM;
+
+ down_read(¤t->mm->mmap_sem);
+ result = get_user_pages(current, current->mm, addr, page_count,
+ rw == READ, 0, *pages, NULL);
+ up_read(¤t->mm->mmap_sem);
+
+ if (unlikely(result != page_count)) {
+ ll_free_user_pages(*pages, page_count);
+ *pages = NULL;
+
+ if (result >= 0)
+ return -EFAULT;
+
+ return result;
+ }
+ *npages = page_count;
+
+ return size;
+#endif
+}
+
+/* iov_iter_alignment() is introduced in 3.16 similar to HAVE_DIO_ITER */
+#if defined(HAVE_DIO_ITER)
+static unsigned long ll_iov_iter_alignment(const struct iov_iter *i)
+{
+ return iov_iter_alignment(i);
+}
+#else /* copied from alignment_iovec() */
+static unsigned long ll_iov_iter_alignment(const struct iov_iter *i)
+{
+ const struct iovec *iov = i->iov;
+ unsigned long res;
+ size_t size = i->count;
+ size_t n;
+
+ if (!size)
+ return 0;
+
+ res = (unsigned long)iov->iov_base + i->iov_offset;
+ n = iov->iov_len - i->iov_offset;
+ if (n >= size)
+ return res | size;
+
+ size -= n;
+ res |= n;
+ while (size > (++iov)->iov_len) {
+ res |= (unsigned long)iov->iov_base | iov->iov_len;
+ size -= iov->iov_len;
+ }
+ res |= (unsigned long)iov->iov_base | size;
+
+ return res;
+}
+#endif
+
+#ifndef HAVE_AIO_COMPLETE
+static inline void aio_complete(struct kiocb *iocb, ssize_t res, ssize_t res2)
+{
+ if (iocb->ki_complete)
+ iocb->ki_complete(iocb, res, res2);
+}
+#endif
+
+/** direct IO pages */
+struct ll_dio_pages {
+ struct cl_dio_aio *ldp_aio;
+ /*
+ * page array to be written. we don't support
+ * partial pages except the last one.
+ */
+ struct page **ldp_pages;
+ /** # of pages in the array. */
+ size_t ldp_count;
+ /* the file offset of the first page. */
+ loff_t ldp_file_offset;
+};
+
+static void ll_aio_end(const struct lu_env *env, struct cl_sync_io *anchor)
+{
+ struct cl_dio_aio *aio = container_of(anchor, typeof(*aio), cda_sync);
+ ssize_t ret = anchor->csi_sync_rc;
+