#define DEBUG_SUBSYSTEM S_OSC
#include <linux/workqueue.h>
+#include <libcfs/libcfs.h>
+#include <linux/falloc.h>
#include <lprocfs_status.h>
#include <lustre_debug.h>
#include <lustre_dlm.h>
#include <obd_cksum.h>
#include <obd_class.h>
#include <lustre_osc.h>
+#include <linux/falloc.h>
#include "osc_internal.h"
sa->sa_upcall = upcall;
sa->sa_cookie = cookie;
- if (rqset == PTLRPCD_SET)
- ptlrpcd_add_req(req);
- else
- ptlrpc_set_add_req(rqset, req);
+ ptlrpc_set_add_req(rqset, req);
}
RETURN(0);
la->la_upcall = upcall;
la->la_cookie = cookie;
- if (rqset == PTLRPCD_SET)
- ptlrpcd_add_req(req);
- else
- ptlrpc_set_add_req(rqset, req);
+ ptlrpc_set_add_req(rqset, req);
RETURN(0);
}
}
EXPORT_SYMBOL(osc_punch_send);
+/**
+ * osc_fallocate_base() - Handles fallocate request.
+ *
+ * @exp: Export structure
+ * @oa: Attributes passed to OSS from client (obdo structure)
+ * @upcall: Primary & supplementary group information
+ * @cookie: Exclusive identifier
+ * @rqset: Request list.
+ * @mode: Operation done on given range.
+ *
+ * osc_fallocate_base() - Handles fallocate requests only. Only block
+ * allocation or standard preallocate operation is supported currently.
+ * Other mode flags is not supported yet. ftruncate(2) or truncate(2)
+ * is supported via SETATTR request.
+ *
+ * Return: Non-zero on failure and O on success.
+ */
+int osc_fallocate_base(struct obd_export *exp, struct obdo *oa,
+ obd_enqueue_update_f upcall, void *cookie, int mode)
+{
+ struct ptlrpc_request *req;
+ struct osc_setattr_args *sa;
+ struct ost_body *body;
+ struct obd_import *imp = class_exp2cliimp(exp);
+ int rc;
+ ENTRY;
+
+ /*
+ * Only mode == 0 (which is standard prealloc) is supported now.
+ * Punch is not supported yet.
+ */
+ if (mode & ~FALLOC_FL_KEEP_SIZE)
+ RETURN(-EOPNOTSUPP);
+ oa->o_falloc_mode = mode;
+
+ req = ptlrpc_request_alloc(class_exp2cliimp(exp),
+ &RQF_OST_FALLOCATE);
+ if (req == NULL)
+ RETURN(-ENOMEM);
+
+ rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_FALLOCATE);
+ if (rc != 0) {
+ ptlrpc_request_free(req);
+ RETURN(rc);
+ }
+
+ body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
+ LASSERT(body);
+
+ lustre_set_wire_obdo(&imp->imp_connect_data, &body->oa, oa);
+
+ ptlrpc_request_set_replen(req);
+
+ req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret;
+ BUILD_BUG_ON(sizeof(*sa) > sizeof(req->rq_async_args));
+ sa = ptlrpc_req_async_args(sa, req);
+ sa->sa_oa = oa;
+ sa->sa_upcall = upcall;
+ sa->sa_cookie = cookie;
+
+ ptlrpcd_add_req(req);
+
+ RETURN(0);
+}
+
static int osc_sync_interpret(const struct lu_env *env,
struct ptlrpc_request *req, void *args, int rc)
{
fa->fa_upcall = upcall;
fa->fa_cookie = cookie;
- if (rqset == PTLRPCD_SET)
- ptlrpcd_add_req(req);
- else
- ptlrpc_set_add_req(rqset, req);
+ ptlrpc_set_add_req(rqset, req);
RETURN (0);
}
req->rq_interpret_reply = osc_destroy_interpret;
if (!osc_can_send_destroy(cli)) {
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
-
/*
* Wait until the number of on-going destroy RPCs drops
* under max_rpc_in_flight
*/
- rc = l_wait_event_exclusive(cli->cl_destroy_waitq,
- osc_can_send_destroy(cli), &lwi);
+ rc = l_wait_event_abortable_exclusive(
+ cli->cl_destroy_waitq,
+ osc_can_send_destroy(cli));
if (rc) {
ptlrpc_req_finished(req);
- RETURN(rc);
+ RETURN(-EINTR);
}
}
return 0;
if (!OCD_HAS_FLAG(&client->cl_import->imp_connect_data, GRANT_SHRINK) ||
- client->cl_import->imp_grant_shrink_disabled)
+ client->cl_import->imp_grant_shrink_disabled) {
+ osc_update_next_shrink(client);
return 0;
+ }
if (ktime_get_seconds() >= next_shrink - 5) {
/* Get the current RPC size directly, instead of going via:
}
spin_unlock(&cli->cl_loi_list_lock);
- CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld."
- "chunk bits: %d cl_max_extent_pages: %d\n",
- cli_name(cli),
- cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits,
- cli->cl_max_extent_pages);
+ CDEBUG(D_CACHE,
+ "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld. chunk bits: %d cl_max_extent_pages: %d\n",
+ cli_name(cli),
+ cli->cl_avail_grant, cli->cl_lost_grant, cli->cl_chunkbits,
+ cli->cl_max_extent_pages);
if (OCD_HAS_FLAG(ocd, GRANT_SHRINK) && list_empty(&cli->cl_grant_chain))
osc_add_grant_list(cli);
RETURN(rc);
}
+static inline void osc_release_bounce_pages(struct brw_page **pga,
+ u32 page_count)
+{
+#ifdef HAVE_LUSTRE_CRYPTO
+ int i;
+
+ for (i = 0; i < page_count; i++) {
+ if (pga[i]->pg->mapping)
+ /* bounce pages are unmapped */
+ continue;
+ if (pga[i]->flag & OBD_BRW_SYNC)
+ /* sync transfer cannot have encrypted pages */
+ continue;
+ llcrypt_finalize_bounce_page(&pga[i]->pg);
+ pga[i]->count -= pga[i]->bp_count_diff;
+ pga[i]->off += pga[i]->bp_off_diff;
+ }
+#endif
+}
+
static int
osc_brw_prep_request(int cmd, struct client_obd *cli, struct obdo *oa,
u32 page_count, struct brw_page **pga,
struct ptlrpc_request **reqp, int resend)
{
- struct ptlrpc_request *req;
- struct ptlrpc_bulk_desc *desc;
- struct ost_body *body;
- struct obd_ioobj *ioobj;
- struct niobuf_remote *niobuf;
+ struct ptlrpc_request *req;
+ struct ptlrpc_bulk_desc *desc;
+ struct ost_body *body;
+ struct obd_ioobj *ioobj;
+ struct niobuf_remote *niobuf;
int niocount, i, requested_nob, opc, rc, short_io_size = 0;
- struct osc_brw_async_args *aa;
- struct req_capsule *pill;
- struct brw_page *pg_prev;
+ struct osc_brw_async_args *aa;
+ struct req_capsule *pill;
+ struct brw_page *pg_prev;
void *short_io_buf;
const char *obd_name = cli->cl_import->imp_obd->obd_name;
+ struct inode *inode;
- ENTRY;
- if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
- RETURN(-ENOMEM); /* Recoverable */
- if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
- RETURN(-EINVAL); /* Fatal */
+ ENTRY;
+ inode = page2inode(pga[0]->pg);
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
+ RETURN(-ENOMEM); /* Recoverable */
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
+ RETURN(-EINVAL); /* Fatal */
if ((cmd & OBD_BRW_WRITE) != 0) {
opc = OST_WRITE;
if (req == NULL)
RETURN(-ENOMEM);
+ if (opc == OST_WRITE && inode && IS_ENCRYPTED(inode)) {
+ for (i = 0; i < page_count; i++) {
+ struct brw_page *pg = pga[i];
+ struct page *data_page = NULL;
+ bool retried = false;
+ bool lockedbymyself;
+
+retry_encrypt:
+ /* The page can already be locked when we arrive here.
+ * This is possible when cl_page_assume/vvp_page_assume
+ * is stuck on wait_on_page_writeback with page lock
+ * held. In this case there is no risk for the lock to
+ * be released while we are doing our encryption
+ * processing, because writeback against that page will
+ * end in vvp_page_completion_write/cl_page_completion,
+ * which means only once the page is fully processed.
+ */
+ lockedbymyself = trylock_page(pg->pg);
+ data_page =
+ llcrypt_encrypt_pagecache_blocks(pg->pg,
+ PAGE_SIZE, 0,
+ GFP_NOFS);
+ if (lockedbymyself)
+ unlock_page(pg->pg);
+ if (IS_ERR(data_page)) {
+ rc = PTR_ERR(data_page);
+ if (rc == -ENOMEM && !retried) {
+ retried = true;
+ rc = 0;
+ goto retry_encrypt;
+ }
+ ptlrpc_request_free(req);
+ RETURN(rc);
+ }
+ /* len is forced to PAGE_SIZE, and poff to 0
+ * so store the old, clear text info
+ */
+ pg->pg = data_page;
+ pg->bp_count_diff = PAGE_SIZE - pg->count;
+ pg->count = PAGE_SIZE;
+ pg->bp_off_diff = pg->off & ~PAGE_MASK;
+ pg->off = pg->off & PAGE_MASK;
+ }
+ }
+
for (niocount = i = 1; i < page_count; i++) {
if (!can_merge_pages(pga[i - 1], pga[i]))
niocount++;
desc = ptlrpc_prep_bulk_imp(req, page_count,
cli->cl_import->imp_connect_data.ocd_brw_size >> LNET_MTU_BITS,
(opc == OST_WRITE ? PTLRPC_BULK_GET_SOURCE :
- PTLRPC_BULK_PUT_SINK) |
- PTLRPC_BULK_BUF_KIOV,
+ PTLRPC_BULK_PUT_SINK),
OST_BULK_PORTAL,
&ptlrpc_bulk_kiov_pin_ops);
LASSERT((pga[0]->flag & OBD_BRW_SRVLOCK) ==
(pg->flag & OBD_BRW_SRVLOCK));
if (short_io_size != 0 && opc == OST_WRITE) {
- unsigned char *ptr = ll_kmap_atomic(pg->pg, KM_USER0);
+ unsigned char *ptr = kmap_atomic(pg->pg);
LASSERT(short_io_size >= requested_nob + pg->count);
memcpy(short_io_buf + requested_nob,
ptr + poff,
pg->count);
- ll_kunmap_atomic(ptr, KM_USER0);
+ kunmap_atomic(ptr);
} else if (short_io_size == 0) {
desc->bd_frag_ops->add_kiov_frag(desc, pg->pg, poff,
pg->count);
&req->rq_import->imp_connection->c_peer;
struct ost_body *body;
u32 client_cksum = 0;
+ struct inode *inode;
ENTRY;
CDEBUG(D_CACHE, "page %p count %d\n",
aa->aa_ppga[i]->pg, count);
- ptr = ll_kmap_atomic(aa->aa_ppga[i]->pg, KM_USER0);
+ ptr = kmap_atomic(aa->aa_ppga[i]->pg);
memcpy(ptr + (aa->aa_ppga[i]->off & ~PAGE_MASK), buf,
count);
- ll_kunmap_atomic((void *) ptr, KM_USER0);
+ kunmap_atomic((void *) ptr);
buf += count;
nob -= count;
} else {
rc = 0;
}
+
+ inode = page2inode(aa->aa_ppga[0]->pg);
+ if (inode && IS_ENCRYPTED(inode)) {
+ int idx;
+
+ if (!llcrypt_has_encryption_key(inode)) {
+ CDEBUG(D_SEC, "no enc key for ino %lu\n", inode->i_ino);
+ GOTO(out, rc);
+ }
+ for (idx = 0; idx < aa->aa_page_count; idx++) {
+ struct brw_page *pg = aa->aa_ppga[idx];
+ __u64 *p, *q;
+
+ /* do not decrypt if page is all 0s */
+ p = q = page_address(pg->pg);
+ while (p - q < PAGE_SIZE / sizeof(*p)) {
+ if (*p != 0)
+ break;
+ p++;
+ }
+ if (p - q == PAGE_SIZE / sizeof(*p))
+ continue;
+
+ rc = llcrypt_decrypt_pagecache_blocks(pg->pg,
+ PAGE_SIZE, 0);
+ if (rc)
+ GOTO(out, rc);
+ }
+ }
+
out:
if (rc >= 0)
lustre_get_wire_obdo(&req->rq_import->imp_connect_data,
RETURN(rc);
list_for_each_entry(oap, &aa->aa_oaps, oap_rpc_item) {
- if (oap->oap_request != NULL) {
- LASSERTF(request == oap->oap_request,
- "request %p != oap_request %p\n",
- request, oap->oap_request);
- if (oap->oap_interrupted) {
- ptlrpc_req_finished(new_req);
- RETURN(-EINTR);
- }
- }
- }
+ if (oap->oap_request != NULL) {
+ LASSERTF(request == oap->oap_request,
+ "request %p != oap_request %p\n",
+ request, oap->oap_request);
+ }
+ }
/*
* New request takes over pga and oaps from old request.
* Note that copying a list_head doesn't work, need to move it...
static void osc_release_ppga(struct brw_page **ppga, size_t count)
{
- LASSERT(ppga != NULL);
- OBD_FREE(ppga, sizeof(*ppga) * count);
+ LASSERT(ppga != NULL);
+ OBD_FREE_PTR_ARRAY(ppga, count);
}
static int brw_interpret(const struct lu_env *env,
rc = osc_brw_fini_request(req, rc);
CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
+
+ /* restore clear text pages */
+ osc_release_bounce_pages(aa->aa_ppga, aa->aa_page_count);
+
/*
* When server returns -EINPROGRESS, client should always retry
* regardless of the number of times the bulk was resent already.
struct cl_req_attr *crattr = NULL;
loff_t starting_offset = OBD_OBJECT_EOF;
loff_t ending_offset = 0;
- int mpflag = 0;
+ /* '1' for consistency with code that checks !mpflag to restore */
+ int mpflag = 1;
int mem_tight = 0;
int page_count = 0;
bool soft_sync = false;
- bool interrupted = false;
bool ndelay = false;
int i;
int grant = 0;
mem_tight |= ext->oe_memalloc;
grant += ext->oe_grants;
page_count += ext->oe_nr_pages;
- layout_version = MAX(layout_version, ext->oe_layout_version);
+ layout_version = max(layout_version, ext->oe_layout_version);
if (obj == NULL)
obj = ext->oe_obj;
}
soft_sync = osc_over_unstable_soft_limit(cli);
if (mem_tight)
- mpflag = cfs_memory_pressure_get_and_set();
+ mpflag = memalloc_noreclaim_save();
- OBD_ALLOC(pga, sizeof(*pga) * page_count);
+ OBD_ALLOC_PTR_ARRAY(pga, page_count);
if (pga == NULL)
GOTO(out, rc = -ENOMEM);
else
LASSERT(oap->oap_page_off + oap->oap_count ==
PAGE_SIZE);
- if (oap->oap_interrupted)
- interrupted = true;
}
if (ext->oe_ndelay)
ndelay = true;
req->rq_interpret_reply = brw_interpret;
req->rq_memalloc = mem_tight != 0;
oap->oap_request = ptlrpc_request_addref(req);
- if (interrupted && !req->rq_intr)
- ptlrpc_mark_interrupted(req);
if (ndelay) {
req->rq_no_resend = req->rq_no_delay = 1;
/* probably set a shorter timeout value.
EXIT;
out:
- if (mem_tight != 0)
- cfs_memory_pressure_restore(mpflag);
+ if (mem_tight)
+ memalloc_noreclaim_restore(mpflag);
if (rc != 0) {
LASSERT(req == NULL);
if (oa)
OBD_SLAB_FREE_PTR(oa, osc_obdo_kmem);
- if (pga)
- OBD_FREE(pga, sizeof(*pga) * page_count);
+ if (pga) {
+ osc_release_bounce_pages(pga, page_count);
+ osc_release_ppga(pga, page_count);
+ }
/* this should happen rarely and is pretty bad, it makes the
* pending list not follow the dirty order */
while (!list_empty(ext_list)) {
RETURN(rc);
}
-struct ptlrpc_request_set *PTLRPCD_SET = (void *)1;
-
/* When enqueuing asynchronously, locks are not ordered, we can obtain a lock
* from the 2nd OSC before a lock from the 1st one. This does not deadlock with
* other synchronous requests, however keeping some locks and trying to obtain
}
req->rq_interpret_reply = osc_enqueue_interpret;
- if (rqset == PTLRPCD_SET)
- ptlrpcd_add_req(req);
- else
- ptlrpc_set_add_req(rqset, req);
+ ptlrpc_set_add_req(rqset, req);
} else if (intent) {
ptlrpc_req_finished(req);
}
default:
rc = -ENOTTY;
CDEBUG(D_INODE, "%s: unrecognised ioctl %#x by %s: rc = %d\n",
- obd->obd_name, cmd, current_comm(), rc);
+ obd->obd_name, cmd, current->comm, rc);
break;
}
.nr_to_scan = shrink_param(sc, nr_to_scan),
.gfp_mask = shrink_param(sc, gfp_mask)
};
-#if !defined(HAVE_SHRINKER_WANT_SHRINK_PTR) && !defined(HAVE_SHRINK_CONTROL)
- struct shrinker *shrinker = NULL;
-#endif
-
(void)osc_cache_shrink_scan(shrinker, &scv);
return osc_cache_shrink_count(shrinker, &scv);