]) # LIBCFS_SEC_RELEASE_SECCTX
#
+# LIBCFS_HAVE_KMAP_LOCAL
+#
+# Linux commit v5.10-rc2-80-gf3ba3c710ac5
+# mm/highmem: Provide kmap_local*
+#
+AC_DEFUN([LIBCFS_SRC_HAVE_KMAP_LOCAL], [
+ LB2_LINUX_TEST_SRC([kmap_local_page], [
+ #include <linux/highmem.h>
+ ],[
+ struct page *pg = NULL;
+ void *kaddr = kmap_local_page(pg);
+
+ kunmap_local(kaddr);
+ ],[-Werror])
+])
+AC_DEFUN([LIBCFS_HAVE_KMAP_LOCAL], [
+ LB2_MSG_LINUX_TEST_RESULT([if 'kmap_local*' are available],
+ [kmap_local_page], [
+ AC_DEFINE(HAVE_KMAP_LOCAL, 1,
+ [kmap_local_* functions are available])
+ ## Map ll_k[un]map_local* to kmap_local*
+ AC_DEFINE([ll_kmap_local_page(p)], [kmap_local_page(p)],
+ [alias for kmap_local_page()])
+ AC_DEFINE([ll_kunmap_local(kaddr)], [kunmap_local((kaddr))],
+ [alias for kunmap_local()])
+ ],[
+ ## Map k[un]map_local* to k[un]map
+ AC_DEFINE([kmap_local_page(p)], [kmap(p)],
+ [need kmap_local_page()])
+ AC_DEFINE([kunmap_local(kaddr)], [kunmap(kmap_to_page(kaddr))],
+ [need kunmap_local()])
+ ## Map ll_k[un]map_local* to k[un]map_atomic for older kernels
+ ## that do not have k[un]map_local* available.
+ AC_DEFINE([ll_kmap_local_page(p)], [kmap_atomic(p)],
+ [need kmap_local_page map to atomic])
+ AC_DEFINE([ll_kunmap_local(kaddr)], [kunmap_atomic((kaddr))],
+ [need kunmap_local map to atomic])
+ ])
+]) # LIBCFS_HAVE_KMAP_LOCAL
+
+#
# LIBCFS_HAVE_KFREE_SENSITIVE
#
# kernel v5.10-rc1~3
LIBCFS_SRC_KEY_NEED_UNLINK
LIBCFS_SRC_SEC_RELEASE_SECCTX
# 5.10
+ LIBCFS_SRC_HAVE_KMAP_LOCAL
LIBCFS_SRC_HAVE_KFREE_SENSITIVE
LIBCFS_SRC_HAVE_CRYPTO_SHA2_HEADER
LIBCFS_SRC_HAVE_LIST_CMP_FUNC_T
LIBCFS_KEY_NEED_UNLINK
LIBCFS_SEC_RELEASE_SECCTX
# 5.10
+ LIBCFS_HAVE_KMAP_LOCAL
LIBCFS_HAVE_KFREE_SENSITIVE
LIBCFS_HAVE_CRYPTO_SHA2_HEADER
LIBCFS_HAVE_LIST_CMP_FUNC_T
__LASSERT_TAGE_INVARIANT(tage);
- buf = kmap(tage->page);
+ buf = kmap_local_page(tage->page);
rc = cfs_kernel_write(filp, buf, tage->used, &filp->f_pos);
- kunmap(tage->page);
+ kunmap_local(buf);
if (rc != (int)tage->used) {
pr_warn("Lustre: wanted to write %u but wrote %d\n",
tage->used, rc);
else if (f_pos > i_size_read(de->d_inode))
f_pos = i_size_read(de->d_inode);
- buf = kmap(tage->page);
+ buf = kmap_local_page(tage->page);
rc = cfs_kernel_write(filp, buf, tage->used,
&f_pos);
- kunmap(tage->page);
+ kunmap_local(buf);
if (rc != (int)tage->used) {
pr_warn("Lustre: wanted to write %u but wrote %d\n",
tage->used, rc);
vunmap(addr);
} else {
do {
+ void *kaddr;
+
fraglen = min(kiov->bv_len - offset, nob);
/* make dang sure we don't send a bogus checksum if somehow we get
"odd fraglen %u on nkiov %d, nob %u bv_len %u offset %u kiov 0x%p\n",
fraglen, nkiov, nob, kiov->bv_len,
offset, kiov);
-
- addr = (void *)kmap(kiov->bv_page) + kiov->bv_offset +
- offset;
+ kaddr = kmap_local_page(kiov->bv_page)
+ addr = kaddr + kiov->bv_offset + offset;
tmpck = _kgnilnd_cksum(cksum, addr, fraglen);
CDEBUG(D_BUFFS,
if (dump_blob)
kgnilnd_dump_blob(D_BUFFS, "kiov cksum", addr, fraglen);
- kunmap(kiov->bv_page);
+ kunmap_local(kaddr);
kiov++;
nkiov--;
/* hijack tx_phys for the later unmap */
if (niov == 1) {
+ void *kaddr = kmap_local_page(tx->tx_imm_pages[0]);
+
/* tx->phyx being equal to NULL is the signal for unmap to discern between kmap and vmap */
tx->tx_phys = NULL;
- tx->tx_buffer = (void *)kmap(tx->tx_imm_pages[0]) +
- kiov[0].bv_offset + offset;
+ tx->tx_buffer = kaddr + kiov[0].bv_offset + offset;
atomic_inc(&kgnilnd_data.kgn_nkmap_short);
GNIDBG_TX(D_NET, tx, "kmapped page for %d bytes for kiov 0x%p, buffer 0x%p",
nob, kiov, tx->tx_buffer);
if (tx->tx_phys != NULL) {
vunmap(tx->tx_phys);
} else if (tx->tx_phys == NULL && tx->tx_buffer != NULL) {
- kunmap(tx->tx_imm_pages[0]);
+ kunmap_local(tx->tx_buffer);
}
/* clear to prevent kgnilnd_free_tx from thinking
* this is a RDMA descriptor */
int i;
for (nob = i = 0; i < niov; i++) {
- scratchiov[i].iov_base = kmap(kiov[i].bv_page) +
- kiov[i].bv_offset;
+ void *kaddr = kmap_local_page(kiov[i].bv_page);
+
+ scratchiov[i].iov_base = kaddr + kiov[i].bv_offset;
nob += scratchiov[i].iov_len = kiov[i].bv_len;
}
rc = kernel_sendmsg(sock, &msg, scratchiov, niov, nob);
- for (i = 0; i < niov; i++)
- kunmap(kiov[i].bv_page);
+ for (i = niov; i > 0; )
+ kunmap_local(scratchiov[--i].iov_base);
}
return rc;
}
* or leave them alone.
*/
addr = ksocknal_lib_kiov_vmap(kiov, niov, scratchiov, pages);
- if (addr != NULL) {
+ if (addr) {
nob = scratchiov[0].iov_len;
n = 1;
-
} else {
for (nob = i = 0; i < niov; i++) {
+ void *kaddr = kmap_local_page(kiov[i].bv_page);
+
+ scratchiov[i].iov_base = kaddr + kiov[i].bv_offset;
nob += scratchiov[i].iov_len = kiov[i].bv_len;
- scratchiov[i].iov_base = kmap(kiov[i].bv_page) +
- kiov[i].bv_offset;
}
n = niov;
}
rc = kernel_recvmsg(conn->ksnc_sock, &msg, scratchiov, n, nob,
MSG_DONTWAIT);
+ if (!addr) {
+ for (i = niov; i > 0; )
+ kunmap_local(scratchiov[--i].iov_base);
+ }
+
if (conn->ksnc_msg.ksm_csum != 0) {
for (i = 0, sum = rc; sum > 0; i++, sum -= fragnob) {
+ void *kaddr = kmap_local_page(kiov[i].bv_page);
+
LASSERT(i < niov);
/* Dang! have to kmap again because I have nowhere to
* page is still mapped, the kernel just bumps the map
* count and returns me the address it stashed.
*/
- base = kmap(kiov[i].bv_page) + kiov[i].bv_offset;
+ base = kaddr + kiov[i].bv_offset;
fragnob = kiov[i].bv_len;
if (fragnob > sum)
fragnob = sum;
conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
base, fragnob);
- kunmap(kiov[i].bv_page);
+ kunmap_local(kaddr);
}
}
- if (addr != NULL) {
+ if (addr)
ksocknal_lib_kiov_vunmap(addr);
- } else {
- for (i = 0; i < niov; i++)
- kunmap(kiov[i].bv_page);
- }
return rc;
}
tx->tx_hdr.iov_len);
for (i = 0; i < tx->tx_nkiov; i++) {
- base = kmap(tx->tx_kiov[i].bv_page) +
- tx->tx_kiov[i].bv_offset;
+ void *kaddr = kmap_local_page(tx->tx_kiov[i].bv_page);
+ base = kaddr + tx->tx_kiov[i].bv_offset;
csum = ksocknal_csum(csum, base, tx->tx_kiov[i].bv_len);
-
- kunmap(tx->tx_kiov[i].bv_page);
+ kunmap_local(kaddr);
}
if (*ksocknal_tunables.ksnd_inject_csum_error) {
unsigned int nob)
{
/* NB diov, siov are READ-ONLY */
- unsigned int this_nob;
- char *daddr = NULL;
- char *saddr = NULL;
+ unsigned int this_nob;
+ char *daddr = NULL;
+ char *saddr = NULL;
+ void *dkaddr;
+ void *skaddr;
if (nob == 0)
return;
siov->bv_len - soffset,
nob);
- if (daddr == NULL)
- daddr = ((char *)kmap(diov->bv_page)) +
- diov->bv_offset + doffset;
- if (saddr == NULL)
- saddr = ((char *)kmap(siov->bv_page)) +
- siov->bv_offset + soffset;
+ if (!daddr) {
+ dkaddr = kmap_local_page(diov->bv_page);
+ daddr = dkaddr + diov->bv_offset + doffset;
+ }
+ if (!saddr) {
+ skaddr = kmap_local_page(siov->bv_page);
+ saddr = skaddr + siov->bv_offset + soffset;
+ }
/* Vanishing risk of kmap deadlock when mapping 2 pages.
* However in practice at least one of the kiovs will be mapped
daddr += this_nob;
doffset += this_nob;
} else {
- kunmap(diov->bv_page);
+ kunmap_local(dkaddr);
+ dkaddr = NULL;
daddr = NULL;
diov++;
ndiov--;
saddr += this_nob;
soffset += this_nob;
} else {
- kunmap(siov->bv_page);
+ kunmap_local(skaddr);
+ skaddr = NULL;
saddr = NULL;
siov++;
nsiov--;
}
} while (nob > 0);
- if (daddr != NULL)
- kunmap(diov->bv_page);
- if (saddr != NULL)
- kunmap(siov->bv_page);
+ if (daddr)
+ kunmap_local(dkaddr);
+ if (saddr)
+ kunmap_local(skaddr);
}
EXPORT_SYMBOL(lnet_copy_kiov2kiov);
void
-lnet_copy_kiov2iov (unsigned int niov, struct kvec *iov, unsigned int iovoffset,
- unsigned int nkiov, struct bio_vec *kiov,
- unsigned int kiovoffset,
- unsigned int nob)
+lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset,
+ unsigned int nkiov, struct bio_vec *kiov,
+ unsigned int kiovoffset,
+ unsigned int nob)
{
/* NB iov, kiov are READ-ONLY */
- unsigned int this_nob;
- char *addr = NULL;
+ unsigned int this_nob;
+ void *kaddr = NULL;
+ char *addr = NULL;
if (nob == 0)
return;
(unsigned int)kiov->bv_len - kiovoffset,
nob);
- if (addr == NULL)
- addr = ((char *)kmap(kiov->bv_page)) +
- kiov->bv_offset + kiovoffset;
+ if (addr == NULL) {
+ kaddr = kmap_local_page(kiov->bv_page);
+ addr = kaddr + kiov->bv_offset + kiovoffset;
+ }
memcpy((char *)iov->iov_base + iovoffset, addr, this_nob);
nob -= this_nob;
addr += this_nob;
kiovoffset += this_nob;
} else {
- kunmap(kiov->bv_page);
+ kunmap_local(kaddr);
+ kaddr = NULL;
addr = NULL;
kiov++;
nkiov--;
} while (nob > 0);
if (addr != NULL)
- kunmap(kiov->bv_page);
+ kunmap_local(kaddr);
}
EXPORT_SYMBOL(lnet_copy_kiov2iov);
unsigned int nob)
{
/* NB kiov, iov are READ-ONLY */
- unsigned int this_nob;
- char *addr = NULL;
+ unsigned int this_nob;
+ void *kaddr = NULL;
+ char *addr = NULL;
if (nob == 0)
return;
(unsigned int)iov->iov_len - iovoffset,
nob);
- if (addr == NULL)
- addr = ((char *)kmap(kiov->bv_page)) +
- kiov->bv_offset + kiovoffset;
+ if (!addr) {
+ kaddr = kmap_local_page(kiov->bv_page);
+ addr = kaddr + kiov->bv_offset + kiovoffset;
+ }
- memcpy (addr, (char *)iov->iov_base + iovoffset, this_nob);
+ memcpy(addr, (char *)iov->iov_base + iovoffset, this_nob);
nob -= this_nob;
if (kiov->bv_len > kiovoffset + this_nob) {
addr += this_nob;
kiovoffset += this_nob;
} else {
- kunmap(kiov->bv_page);
+ kunmap_local(kaddr);
+ kaddr = NULL;
addr = NULL;
kiov++;
nkiov--;
}
} while (nob > 0);
- if (addr != NULL)
- kunmap(kiov->bv_page);
+ if (addr)
+ kunmap_local(kaddr);
}
EXPORT_SYMBOL(lnet_copy_iov2kiov);
goto out_err;
}
- buf = kmap(page);
+ buf = kmap_local_page(page);
memset(buf, 0xAD, PAGE_SIZE);
- kunmap(page);
+ kunmap_local(buf);
for (start = jiffies, end = start + cfs_time_seconds(1) / 4,
bcount = 0; time_before(jiffies, end) && err == 0; bcount++) {
/* for dt_index_walk / mdd_readpage */
void *rdpg_page_get(const struct lu_rdpg *rdpg, unsigned int index);
-void rdpg_page_put(const struct lu_rdpg *rdpg, unsigned int index);
+void rdpg_page_put(const struct lu_rdpg *rdpg, unsigned int index, void *kaddr);
enum lu_xattr_flags {
LU_XATTR_REPLACE = BIT(0),
#endif
#ifdef POISON_BULK
-#define POISON_PAGE(page, val) do { memset(kmap(page), val, PAGE_SIZE); \
- kunmap(page); } while (0)
+#define POISON_PAGE(page, val) do { \
+ void *kaddr = kmap_local_page(page, 0); \
+ memset(kaddr, val, PAGE_SIZE); \
+ kunmap_local(kaddr); \
+} while (0)
#else
#define POISON_PAGE(page, val) do { } while (0)
#endif
void ll_release_page(struct inode *inode, struct page *page,
bool remove)
{
- kunmap(page);
-
/* Always remove the page for striped dir, because the page is
* built from temporarily in LMV layer
*/
inode = page2inode(page);
- kaddr = kmap_atomic(page);
+ kaddr = ll_kmap_local_page(page);
memcpy(kaddr, lnb->lnb_data, lnb->lnb_len);
if (lnb->lnb_len < PAGE_SIZE)
memset(kaddr + lnb->lnb_len, 0,
PAGE_SIZE - lnb->lnb_len);
- kunmap_atomic(kaddr);
+ ll_kunmap_local(kaddr);
if (inode && IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) {
if (!ll_has_encryption_key(inode)) {
lock_page(page);
SetPageUptodate(page);
- dp = kmap_atomic(page);
+ dp = ll_kmap_local_page(page);
memcpy(dp, data, PAGE_SIZE);
hash = le64_to_cpu(dp->ldp_hash_start);
- kunmap_atomic(dp);
+ ll_kunmap_local(dp);
offset = hash_x_index(hash, is_hash64);
* purposes here we can treat it like i_size.
*/
if (attr->cat_kms <= offset) {
- char *kaddr = kmap_atomic(pg->cp_vmpage);
+ char *kaddr = ll_kmap_local_page(pg->cp_vmpage);
memset(kaddr, 0, PAGE_SIZE);
- kunmap_atomic(kaddr);
+ ll_kunmap_local(kaddr);
GOTO(out, result = 0);
}
static inline void stripe_dirent_unload(struct stripe_dirent *stripe)
{
if (stripe->sd_page) {
- kunmap(stripe->sd_page);
+ if (stripe->sd_dp) {
+ kunmap_local(stripe->sd_dp);
+ stripe->sd_dp = NULL;
+ }
put_page(stripe->sd_page);
stripe->sd_page = NULL;
stripe->sd_ent = NULL;
LASSERT(!ent);
do {
- if (stripe->sd_page) {
+ if (stripe->sd_page && stripe->sd_dp) {
__u64 end = le64_to_cpu(stripe->sd_dp->ldp_hash_end);
/* @hash should be the last dirent hash */
op_data->op_fid2 = oinfo->lmo_fid;
op_data->op_data = oinfo->lmo_root;
+ stripe->sd_dp = NULL;
rc = md_read_page(tgt->ltd_exp, op_data, ctxt->ldc_mrinfo, hash,
&stripe->sd_page);
if (rc)
break;
- stripe->sd_dp = page_address(stripe->sd_page);
+ stripe->sd_dp = kmap_local_page(stripe->sd_page);
ent = stripe_dirent_get(ctxt, lu_dirent_start(stripe->sd_dp),
stripe_index);
/* in case a page filled with ., .. and dummy, read next */
RETURN(-ENOMEM);
/* Initialize the entry page */
- dp = kmap(page);
+ dp = kmap_local_page(page);
memset(dp, 0, sizeof(*dp));
dp->ldp_hash_start = cpu_to_le64(offset);
dp->ldp_flags = cpu_to_le32(dp->ldp_flags);
dp->ldp_hash_end = cpu_to_le64(ctxt->ldc_hash);
+ kunmap_local(dp);
put_lmv_dir_ctxt(ctxt);
OBD_FREE(ctxt, offsetof(typeof(*ctxt), ldc_stripes[stripe_count]));
RETURN(0);
free_page:
- kunmap(page);
+ kunmap_local(dp);
__free_page(page);
return rc;
BUILD_BUG_ON(!__same_type(cl_page->cp_lov_index, CP_LOV_INDEX_EMPTY));
cl_page->cp_lov_index = CP_LOV_INDEX_EMPTY;
- addr = kmap(cl_page->cp_vmpage);
+ addr = kmap_local_page(cl_page->cp_vmpage);
memset(addr, 0, PAGE_SIZE);
- kunmap(cl_page->cp_vmpage);
+ kunmap_local(addr);
SetPageUptodate(cl_page->cp_vmpage);
RETURN(0);
}
*/
wait_on_page_locked(page);
if (PageUptodate(page)) {
- dp = kmap(page);
+ u32 dflags;
+
+ dp = kmap_local_page(page);
if (BITS_PER_LONG == 32 && hash64) {
*start = le64_to_cpu(dp->ldp_hash_start) >> 32;
*end = le64_to_cpu(dp->ldp_hash_end) >> 32;
*start = le64_to_cpu(dp->ldp_hash_start);
*end = le64_to_cpu(dp->ldp_hash_end);
}
+ dflags = le32_to_cpu(dp->ldp_flags);
+ kunmap_local(dp);
if (unlikely(*start == 1 && *hash == 0))
*hash = *start;
else
"offset %lx [%#llx %#llx], hash %#llx\n", offset,
*start, *end, *hash);
if (*hash > *end) {
- kunmap(page);
mdc_release_page(page, 0);
page = NULL;
} else if (*end != *start && *hash == *end) {
* mdc_read_page_remote() will issue RPC to
* fetch the page we want.
*/
- kunmap(page);
- mdc_release_page(page,
- le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
+ mdc_release_page(page, dflags & LDF_COLLIDE);
page = NULL;
}
} else {
int i;
for (i = 0; i < cfs_pgs; i++) {
- struct lu_dirpage *dp = kmap(pages[i]);
+ void *addr = kmap_local_page(pages[i]);
+ struct lu_dirpage *dp = addr;
struct lu_dirpage *first = dp;
struct lu_dirent *end_dirent = NULL;
struct lu_dirent *ent;
first->ldp_flags &= ~cpu_to_le32(LDF_COLLIDE);
first->ldp_flags |= flags & cpu_to_le32(LDF_COLLIDE);
- kunmap(pages[i]);
+ kunmap_local(addr);
}
LASSERTF(lu_pgs == 0, "left = %d\n", lu_pgs);
}
SetPageUptodate(page);
- dp = kmap(page);
+ dp = kmap_local_page(page);
hash = le64_to_cpu(dp->ldp_hash_start);
- kunmap(page);
+ kunmap_local(dp);
offset = hash_x_index(hash, rp->rp_hash64);
struct lustre_handle lockh;
struct ptlrpc_request *enq_req = NULL;
struct readpage_param rp_param;
+ void *addr = NULL;
int rc;
ENTRY;
}
wait_on_page_locked(page);
- (void)kmap(page);
if (!PageUptodate(page)) {
CERROR("%s: page not updated: "DFID" at %llu: rc %d\n",
exp->exp_obd->obd_name, PFID(&op_data->op_fid1),
}
hash_collision:
- dp = page_address(page);
+ addr = kmap_local_page(page);
+ dp = addr;
if (BITS_PER_LONG == 32 && rp_param.rp_hash64) {
start = le64_to_cpu(dp->ldp_hash_start) >> 32;
end = le64_to_cpu(dp->ldp_hash_end) >> 32;
*
* XXX not yet.
*/
+ kunmap_local(addr);
goto fail;
}
+ kunmap_local(addr);
*ppage = page;
out_unlock:
ldlm_lock_decref(&lockh, it.it_lock_mode);
return rc;
fail:
- kunmap(page);
mdc_release_page(page, 1);
rc = -EIO;
goto out_unlock;
dp->ldp_hash_start = cpu_to_le64(rdpg->rp_hash);
dp->ldp_hash_end = cpu_to_le64(MDS_DIR_END_OFF);
dp->ldp_flags = cpu_to_le32(LDF_EMPTY);
- rdpg_page_put(rdpg, 0);
+ rdpg_page_put(rdpg, 0, dp);
GOTO(out_unlock, rc = LU_PAGE_SIZE);
}
rc = dt_index_walk(env, mdd_object_child(mdd_obj), rdpg,
mdd_dir_page_build, NULL);
if (rc >= 0) {
- struct lu_dirpage *dp;
+ struct lu_dirpage *dp;
dp = (struct lu_dirpage *)rdpg_page_get(rdpg, 0);
dp->ldp_hash_start = cpu_to_le64(rdpg->rp_hash);
dp->ldp_flags = cpu_to_le32(LDF_EMPTY);
rc = min_t(unsigned int, LU_PAGE_SIZE, rdpg->rp_count);
}
- rdpg_page_put(rdpg, 0);
+ rdpg_page_put(rdpg, 0, dp);
}
GOTO(out_unlock, rc);
GOTO(buf_put, rc);
/* copy data to the buffer finally */
for (i = 0; i < nr_local; i++) {
- char *p = kmap(lnb[i].lnb_page);
+ char *p = kmap_local_page(lnb[i].lnb_page);
long off;
LASSERT(lnb[i].lnb_page_offset == 0);
memset(p + off, 0, PAGE_SIZE - off);
memcpy(buf + (i << PAGE_SHIFT), p, lnb[i].lnb_len);
- kunmap(lnb[i].lnb_page);
+ kunmap_local(p);
copied += lnb[i].lnb_len;
}
CDEBUG(D_INFO, "Read %i (wanted %u) bytes from %llu\n", copied,
int rc2;
union lu_page *ptr;
- ptr = kmap(pages[i]);
+ ptr = kmap_local_page(pages[i]);
rc2 = mgc_apply_recover_logs(obd, cld, res->mcr_offset, ptr,
min_t(int, ealen, PAGE_SIZE),
mne_swab);
- kunmap(pages[i]);
+ kunmap_local(ptr);
if (rc2 < 0) {
CWARN("%s: error processing %s log recovery: rc = %d\n",
obd->obd_name,
union lu_page *ptr;
int rc2;
- ptr = kmap(pages[i]);
+ ptr = kmap_local_page(pages[i]);
rc2 = nodemap_process_idx_pages(new_config, ptr,
&recent_nodemap);
- kunmap(pages[i]);
+ kunmap_local(ptr);
if (rc2 < 0) {
CWARN("%s: error processing %s log nodemap: rc = %d\n",
obd->obd_name,
__u64 version = res->mcr_offset;
bool nobuf = false;
void *buf = NULL;
+ void *vaddr = NULL;
int bytes_in_unit = 0;
int units_in_page = 0;
int index = 0;
/* destroy previous map */
if (index > 0)
- kunmap(pages[index - 1]);
+ kunmap_local(vaddr);
/* reassign buffer */
- buf = kmap(pages[index]);
+ buf = vaddr = kmap_local_page(pages[index]);
++index;
units_in_page = PAGE_SIZE / unit_size;
bytes_in_unit, index, nrpages, units_total);
}
if (index > 0)
- kunmap(pages[index - 1]);
+ kunmap_local(vaddr);
out:
LASSERT(version <= tbl->mn_version);
res->mcr_size = tbl->mn_version;
{
if (rdpg->rp_npages) {
LASSERT(index < rdpg->rp_npages);
- return kmap(rdpg->rp_pages[index]);
+ return kmap_local_page(rdpg->rp_pages[index]);
}
- LASSERT(index * PAGE_SIZE < rdpg->rp_count);
+ LASSERT(index << PAGE_SHIFT < rdpg->rp_count);
- return rdpg->rp_data + index * PAGE_SIZE;
+ return rdpg->rp_data + (index << PAGE_SHIFT);
}
EXPORT_SYMBOL(rdpg_page_get);
-void rdpg_page_put(const struct lu_rdpg *rdpg, unsigned int index)
+void rdpg_page_put(const struct lu_rdpg *rdpg, unsigned int index, void *kaddr)
{
if (rdpg->rp_npages)
- kunmap(rdpg->rp_pages[index]);
+ kunmap_local(kaddr);
}
EXPORT_SYMBOL(rdpg_page_put);
* rc < 0 -> error.
*/
for (pageidx = 0; rc == 0 && bytes > 0; pageidx++) {
+ void *addr;
union lu_page *lp;
int i;
- lp = rdpg_page_get(rdpg, pageidx);
+ lp = addr = rdpg_page_get(rdpg, pageidx);
/* fill lu pages */
for (i = 0; i < LU_PAGE_COUNT; i++, lp++, bytes-=LU_PAGE_SIZE) {
rc = filler(env, obj, lp,
/* end of index */
break;
}
- rdpg_page_put(rdpg, pageidx);
+ rdpg_page_put(rdpg, pageidx, addr);
}
out:
int i;
if (nlupgs_mod) {
+ void *kaddr;
+
pgidx = nlupgs / LU_PAGE_COUNT;
LASSERT(pgidx < npages);
- lp = kmap(pages[pgidx]);
+ lp = kaddr = kmap_local_page(pages[pgidx]);
remain_nlupgs = LU_PAGE_COUNT - nlupgs_mod;
/* initialize the header for the remain lu_pages */
lip->lip_magic = LIP_MAGIC;
}
- kunmap(pages[pgidx]);
+ kunmap_local(kaddr);
}
}
#else
{
unsigned int off = start;
unsigned int end = start + length;
+ void *addr;
char *data_buf;
__be16 *guard_buf = guard_start;
unsigned int data_size;
int guard_used = 0;
int rc = 0;
- data_buf = kmap(page) + start;
+ addr = kmap_local_page(page);
+ data_buf = addr + start;
while (off < end) {
if (guard_used >= guard_number) {
rc = -E2BIG;
}
*used_number = guard_used;
out:
- kunmap(page);
+ kunmap_local(addr);
return rc;
}
GOTO(out, rc);
}
- buffer = kmap(__page);
+ buffer = kmap_local_page(__page);
guard_start = (__be16 *)buffer;
guard_number = PAGE_SIZE / sizeof(*guard_start);
for (i = 0; i < repeat_number; i++) {
used_number = 0;
}
}
- kunmap(__page);
+ kunmap_local(buffer);
if (rc)
GOTO(out_final, rc);
goto out;
}
- buf = kmap(page);
+ buf = kmap_local_page(page);
memset(buf, 0xAD, PAGE_SIZE);
- kunmap(page);
+ kunmap_local(buf);
for (start = jiffies, end = start + cfs_time_seconds(1) / 4,
bcount = 0; time_before(jiffies, end) && rc == 0; bcount++) {
if (bytes > PAGE_SIZE-offset)
bytes = PAGE_SIZE-offset;
- maddr = kmap(page);
+ maddr = kmap_local_page(page);
if (write) {
copy_to_user_page(vma, page, addr,
maddr + offset, buf, bytes);
copy_from_user_page(vma, page, addr,
buf, maddr + offset, bytes);
}
- kunmap(page);
+ kunmap_local(maddr);
put_page(page);
len -= bytes;
buf += bytes;
echo_page_debug_setup(struct page *page, int rw, u64 id,
__u64 offset, int len)
{
- int page_offset = offset & ~PAGE_MASK;
- char *addr = ((char *)kmap(page)) + page_offset;
+ int page_offset = offset & ~PAGE_MASK;
+ char *kaddr = kmap_local_page(page);
+ char *addr = kaddr + page_offset;
if (len % OBD_ECHO_BLOCK_SIZE != 0)
CERROR("Unexpected block size %d\n", len);
len -= OBD_ECHO_BLOCK_SIZE;
}
- kunmap(page);
+ kunmap_local(kaddr);
}
static int
echo_page_debug_check(struct page *page, u64 id,
__u64 offset, int len)
{
- int page_offset = offset & ~PAGE_MASK;
- char *addr = ((char *)kmap(page)) + page_offset;
- int rc = 0;
- int rc2;
+ int page_offset = offset & ~PAGE_MASK;
+ char *kaddr = kmap_local_page(page);
+ char *addr = kaddr + page_offset;
+ int rc = 0;
+ int rc2;
if (len % OBD_ECHO_BLOCK_SIZE != 0)
CERROR("Unexpected block size %d\n", len);
len -= OBD_ECHO_BLOCK_SIZE;
}
- kunmap(page);
+ kunmap_local(kaddr);
return rc;
}
for (i = 0; i < count; i++, (*pgs) ++, res++) {
struct page *page = res->lnb_page;
- void *addr;
+ void *addr;
if (!page) {
CERROR("null page objid %llu:%p, buf %d/%d\n",
return -EFAULT;
}
- addr = kmap(page);
+ addr = kmap_local_page(page);
CDEBUG(D_PAGE, "$$$$ use page %p, addr %p@%llu\n",
res->lnb_page, addr, res->lnb_file_offset);
rc = vrc;
}
- kunmap(page);
+ kunmap_local(addr);
/* NB see comment above regarding persistent pages */
__free_page(page);
}
*/
CERROR("cleaning up %u pages (%d obdos)\n", *pages, objcount);
for (i = 0; i < *pages; i++) {
- kunmap(res[i].lnb_page);
/*
* NB if this is a persistent page, __free_page() will just
* lose the extra ref gained above
int echo_persistent_pages_init(void)
{
struct page *pg;
- int i;
+ void *kaddr;
+ int i;
for (i = 0; i < ECHO_PERSISTENT_PAGES; i++) {
gfp_t gfp_mask = (i < ECHO_PERSISTENT_PAGES / 2) ?
return -ENOMEM;
}
- memset(kmap(pg), 0, PAGE_SIZE);
- kunmap(pg);
+ kaddr = kmap_local_page(pg);
+ memset(kaddr, 0, PAGE_SIZE);
+ kunmap_local(kaddr);
/* set mapping so page is not considered encrypted */
pg->mapping = ECHO_MAPPING_UNENCRYPTED;
/* no partial pages on the client */
LASSERT(count == PAGE_SIZE);
- addr = kmap(page);
+ addr = kmap_local_page(page);
for (delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
if (rw == OBD_BRW_WRITE) {
stripe_off, stripe_id);
}
- kunmap(page);
+ kunmap_local(addr);
}
static int
/* no partial pages on the client */
LASSERT(count == PAGE_SIZE);
- addr = kmap(page);
+ addr = kmap_local_page(page);
for (rc = delta = 0; delta < PAGE_SIZE; delta += OBD_ECHO_BLOCK_SIZE) {
stripe_off = offset + delta;
}
}
- kunmap(page);
+ kunmap_local(addr);
return rc;
}
static void handle_short_read(int nob_read, size_t page_count,
struct brw_page **pga)
{
+ void *kaddr;
char *ptr;
int i = 0;
LASSERT(page_count > 0);
if (pga[i]->bp_count > nob_read) {
+ kaddr = kmap_local_page(pga[i]->bp_page);
/* EOF inside this page */
- ptr = kmap(pga[i]->bp_page) +
- (pga[i]->bp_off & ~PAGE_MASK);
+ ptr = kaddr + (pga[i]->bp_off & ~PAGE_MASK);
memset(ptr + nob_read, 0, pga[i]->bp_count - nob_read);
- kunmap(pga[i]->bp_page);
+ kunmap_local(kaddr);
page_count--;
i++;
break;
/* zero remaining pages */
while (page_count-- > 0) {
- ptr = kmap(pga[i]->bp_page) + (pga[i]->bp_off & ~PAGE_MASK);
+ kaddr = kmap_local_page(pga[i]->bp_page);
+ ptr = kaddr + (pga[i]->bp_off & ~PAGE_MASK);
memset(ptr, 0, pga[i]->bp_count);
- kunmap(pga[i]->bp_page);
+ kunmap_local(kaddr);
i++;
}
}
GOTO(out, rc);
}
- buffer = kmap(__page);
+ buffer = kmap_local_page(__page);
guard_start = (__be16 *)buffer;
guard_number = PAGE_SIZE / sizeof(*guard_start);
CDEBUG(D_PAGE | (resend ? D_HA : 0),
*/
if (unlikely(i == 0 && opc == OST_READ &&
CFS_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE))) {
- unsigned char *ptr = kmap(pga[i]->bp_page);
+ void *ptr = kmap_local_page(pga[i]->bp_page);
memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
- kunmap(pga[i]->bp_page);
+ kunmap_local(ptr);
}
/*
pg_count--;
i++;
}
- kunmap(__page);
+ kunmap_local(buffer);
if (rc)
GOTO(out_hash, rc);
*/
if (i == 0 && opc == OST_READ &&
CFS_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) {
- unsigned char *ptr = kmap(pga[i]->bp_page);
+ void *ptr = kmap_local_page(pga[i]->bp_page);
int off = pga[i]->bp_off & ~PAGE_MASK;
memcpy(ptr + off, "bad1", min_t(typeof(nob), 4, nob));
- kunmap(pga[i]->bp_page);
+ kunmap_local(ptr);
}
cfs_crypto_hash_update_page(req, pga[i]->bp_page,
pga[i]->bp_off & ~PAGE_MASK,
LASSERT((pga[0]->bp_flag & OBD_BRW_SRVLOCK) ==
(pg->bp_flag & OBD_BRW_SRVLOCK));
if (short_io_size != 0 && opc == OST_WRITE) {
- unsigned char *ptr = kmap_atomic(pg->bp_page);
+ unsigned char *ptr = ll_kmap_local_page(pg->bp_page);
LASSERT(short_io_size >= requested_nob + pg->bp_count);
memcpy(short_io_buf + requested_nob,
ptr + poff,
pg->bp_count);
- kunmap_atomic(ptr);
+ ll_kunmap_local(ptr);
} else if (short_io_size == 0) {
desc->bd_frag_ops->add_kiov_frag(desc, pg->bp_page,
poff, pg->bp_count);
for (i = 0; i < page_count; i++) {
len = pga[i]->bp_count;
- buf = kmap(pga[i]->bp_page);
+ buf = kmap_local_page(pga[i]->bp_page);
while (len != 0) {
rc = cfs_kernel_write(filp, buf, len, &filp->f_pos);
if (rc < 0) {
len -= rc;
buf += rc;
}
- kunmap(pga[i]->bp_page);
+ kunmap_local(buf);
}
rc = vfs_fsync_range(filp, 0, LLONG_MAX, 1);
CDEBUG(D_CACHE, "page %p count %d\n",
aa->aa_ppga[i]->bp_page, count);
- ptr = kmap_atomic(aa->aa_ppga[i]->bp_page);
+ ptr = ll_kmap_local_page(aa->aa_ppga[i]->bp_page);
memcpy(ptr + (aa->aa_ppga[i]->bp_off & ~PAGE_MASK), buf,
count);
- kunmap_atomic((void *) ptr);
+ ll_kunmap_local((void *) ptr);
buf += count;
nob -= count;
bio_for_each_segment_all(bvec, bio, iter_all) {
struct page *page = bvec->bv_page;
- kaddr = kmap(page);
+ kaddr = kmap_local_page(page);
addr = kaddr;
*addr = ~(*addr);
- kunmap(page);
+ kunmap_local(kaddr);
break;
}
}
nblocks = 1;
if (blocks[block_idx + i] == 0) { /* hole */
+ void *addr;
struct niobuf_local *lnb =
iobuf->dr_lnbs[page_idx];
+
CDEBUG(D_INODE,
"hole at page_idx %d, block_idx %d, at offset %llu\n",
page_idx, block_idx,
page_idx, block_idx, i,
(unsigned long long)start_blocks,
(unsigned long long)count, npages);
- memset(kmap(page) + page_offset, 0, blocksize);
- kunmap(page);
+ addr = kmap_local_page(page);
+ memset(addr + page_offset, 0, blocksize);
+ kunmap_local(addr);
continue;
}
osd_iobuf_add_page(iobuf, &lnb[i]);
} else {
long off;
- char *p = kmap(lnb[i].lnb_page);
+ char *p = kmap_local_page(lnb[i].lnb_page);
off = lnb[i].lnb_page_offset;
if (off)
~PAGE_MASK;
if (off)
memset(p + off, 0, PAGE_SIZE - off);
- kunmap(lnb[i].lnb_page);
+ kunmap_local(p);
}
}
end = ktime_get();
continue;
if (lnb[i].lnb_page->mapping == (void *)obj) {
+ void *addr = kmap_local_page(lnb[i].lnb_page);
+
osd_dmu_write(osd, obj->oo_dn, lnb[i].lnb_file_offset,
- lnb[i].lnb_len, kmap(lnb[i].lnb_page) +
+ lnb[i].lnb_len, addr +
lnb[i].lnb_page_offset, oh->ot_tx);
- kunmap(lnb[i].lnb_page);
+ kunmap_local(addr);
iosize += lnb[i].lnb_len;
abufsz = lnb[i].lnb_len; /* to drop cache below */
} else if (lnb[i].lnb_data) {
__u64 ooi_next;
struct dt_object *ooi_obj;
void *ooi_ent;
- struct page *ooi_cur_page;
+ void *ooi_cur_kaddr;
struct lu_idxpage *ooi_cur_idxpage;
struct page **ooi_pages;
};
if (pages != NULL) {
for (i = 0; i < npages; i++) {
- if (pages[i] != NULL) {
- if (pages[i] == it->ooi_cur_page) {
- kunmap(pages[i]);
- it->ooi_cur_page = NULL;
+ if (pages[i]) {
+ if (it->ooi_cur_kaddr) {
+ kunmap_local(it->ooi_cur_kaddr);
+ it->ooi_cur_kaddr = NULL;
}
__free_page(pages[i]);
}
process_page:
if (it->ooi_pos_lu_page < LU_PAGE_COUNT) {
- it->ooi_cur_idxpage = (void *)it->ooi_cur_page +
+ it->ooi_cur_idxpage = (void *)it->ooi_cur_kaddr +
LU_PAGE_SIZE * it->ooi_pos_lu_page;
if (it->ooi_swab)
lustre_swab_lip_header(it->ooi_cur_idxpage);
goto process_idxpage;
}
- kunmap(it->ooi_cur_page);
- it->ooi_cur_page = NULL;
+ kunmap_local(it->ooi_cur_kaddr);
+ it->ooi_cur_kaddr = NULL;
it->ooi_pos_page++;
start:
pages = it->ooi_pages;
if (it->ooi_pos_page < it->ooi_valid_npages) {
- it->ooi_cur_page = kmap(pages[it->ooi_pos_page]);
+ it->ooi_cur_kaddr =
+ kmap_local_page(pages[it->ooi_pos_page]);
it->ooi_pos_lu_page = 0;
goto process_page;
}
it->ooi_valid_npages = 0;
it->ooi_swab = 0;
it->ooi_ent = NULL;
- it->ooi_cur_page = NULL;
+ it->ooi_cur_kaddr = NULL;
it->ooi_cur_idxpage = NULL;
it->ooi_pages = NULL;
}
if (desc->bd_vec[i].bv_len == 0)
continue;
- ptr = kmap(desc->bd_vec[i].bv_page);
+ ptr = kmap_local_page(desc->bd_vec[i].bv_page);
off = desc->bd_vec[i].bv_offset & ~PAGE_MASK;
ptr[off] ^= 0x1;
- kunmap(desc->bd_vec[i].bv_page);
+ kunmap_local(ptr);
return;
}
}
size = ii->ii_recsize + ii->ii_keysize;
for (i = 0; i < npages; i++) {
- union lu_page *lip = kmap(pages[i]);
+ void *kaddr = kmap_local_page(pages[i]);
+ union lu_page *lip = kaddr;
for (j = 0; j < LU_PAGE_COUNT; j++) {
if (need_swab)
lip++;
}
out:
- kunmap(pages[i]);
+ kunmap_local(kaddr);
if (rc)
break;
}
struct page *np = tgt_page_to_corrupt;
if (np) {
- char *ptr = kmap_atomic(local_nb[i].lnb_page);
- char *ptr2 = page_address(np);
+ char *ptr = ll_kmap_local_page(local_nb[i].lnb_page);
+ char *ptr2 = ll_kmap_local_page(np);
memcpy(ptr2 + off, ptr + off, len);
memcpy(ptr2 + off, "bad3", min(4, len));
- kunmap_atomic(ptr);
+ ll_kunmap_local(ptr2);
+ ll_kunmap_local(ptr);
/* LU-8376 to preserve original index for
* display in dump_all_bulk_pages() */
struct page *np = tgt_page_to_corrupt;
if (np) {
- char *ptr = kmap_atomic(local_nb[i].lnb_page);
- char *ptr2 = page_address(np);
+ char *ptr = ll_kmap_local_page(local_nb[i].lnb_page);
+ char *ptr2 = ll_kmap_local_page(np);
memcpy(ptr2 + off, ptr + off, len);
memcpy(ptr2 + off, "bad4", min(4, len));
- kunmap_atomic(ptr);
+ ll_kunmap_local(ptr2);
+ ll_kunmap_local(ptr);
/* LU-8376 to preserve original index for
* display in dump_all_bulk_pages() */
}
for (i = 0; i < count; i++) {
+ void *addr = kmap_local_page(local_nb[i].lnb_page);
+
len = local_nb[i].lnb_len;
- buf = kmap(local_nb[i].lnb_page);
+ buf = addr;
while (len != 0) {
rc = cfs_kernel_write(filp, buf, len, &filp->f_pos);
if (rc < 0) {
len -= rc;
buf += rc;
}
- kunmap(local_nb[i].lnb_page);
+ kunmap_local(addr);
}
rc = vfs_fsync_range(filp, 0, LLONG_MAX, 1);
if (len > size)
return -EINVAL;
- ptr = kmap_atomic(local[i].lnb_page);
+ ptr = ll_kmap_local_page(local[i].lnb_page);
memcpy(buf, ptr + off, len);
- kunmap_atomic(ptr);
+ ll_kunmap_local(ptr);
buf += len;
size -= len;
}
goto out;
}
- buffer = kmap(__page);
+ buffer = kmap_local_page(__page);
guard_start = (__be16 *)buffer;
guard_number = PAGE_SIZE / sizeof(*guard_start);
if (unlikely(resend))
struct page *np = tgt_page_to_corrupt;
if (np) {
- char *ptr = kmap_atomic(local_nb[i].lnb_page);
- char *ptr2 = page_address(np);
+ char *ptr = ll_kmap_local_page(local_nb[i].lnb_page);
+ char *ptr2 = ll_kmap_local_page(np);
memcpy(ptr2 + off, ptr + off, len);
memcpy(ptr2 + off, "bad3", min(4, len));
- kunmap_atomic(ptr);
+ ll_kunmap_local(ptr2);
+ ll_kunmap_local(ptr);
/* LU-8376 to preserve original index for
* display in dump_all_bulk_pages() */
struct page *np = tgt_page_to_corrupt;
if (np) {
- char *ptr = kmap_atomic(local_nb[i].lnb_page);
- char *ptr2 = page_address(np);
+ char *ptr = ll_kmap_local_page(local_nb[i].lnb_page);
+ char *ptr2 = ll_kmap_local_page(np);
memcpy(ptr2 + off, ptr + off, len);
memcpy(ptr2 + off, "bad4", min(4, len));
- kunmap_atomic(ptr);
+ ll_kunmap_local(ptr2);
+ ll_kunmap_local(ptr);
/* LU-8376 to preserve original index for
* display in dump_all_bulk_pages() */
}
}
}
- kunmap(__page);
+ kunmap_local(buffer);
if (rc)
GOTO(out_hash, rc);
CDEBUG(D_PAGE, "index %d offset = %d len = %d left = %d\n",
i, off, len, size);
- ptr = kmap_atomic(local[i].lnb_page);
- if (ptr == NULL)
- return -EINVAL;
+ ptr = ll_kmap_local_page(local[i].lnb_page);
memcpy(ptr + off, buf, len < size ? len : size);
- kunmap_atomic(ptr);
+ ll_kunmap_local(ptr);
buf += len;
size -= len;
}