#include <libcfs/libcfs.h>
#include <linux/falloc.h>
#include <lprocfs_status.h>
-#include <lustre_debug.h>
#include <lustre_dlm.h>
#include <lustre_fid.h>
#include <lustre_ha.h>
oa->o_valid |= bits;
spin_lock(&cli->cl_loi_list_lock);
- if (OCD_HAS_FLAG(&cli->cl_import->imp_connect_data, GRANT_PARAM))
+ if (cli->cl_ocd_grant_param)
oa->o_dirty = cli->cl_dirty_grant;
else
oa->o_dirty = cli->cl_dirty_pages << PAGE_SHIFT;
nrpages *= cli->cl_max_rpcs_in_flight + 1;
nrpages = max(nrpages, cli->cl_dirty_max_pages);
undirty = nrpages << PAGE_SHIFT;
- if (OCD_HAS_FLAG(&cli->cl_import->imp_connect_data,
- GRANT_PARAM)) {
+ if (cli->cl_ocd_grant_param) {
int nrextents;
/* take extent tax into account when asking for more
* grant space */
- nrextents = (nrpages + cli->cl_max_extent_pages - 1) /
+ nrextents = (nrpages + cli->cl_max_extent_pages - 1) /
cli->cl_max_extent_pages;
undirty += nrextents * cli->cl_grant_extent_tax;
}
~(PTLRPC_MAX_BRW_SIZE * 4UL));
}
oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant;
- oa->o_dropped = cli->cl_lost_grant;
- cli->cl_lost_grant = 0;
+ /* o_dropped AKA o_misc is 32 bits, but cl_lost_grant is 64 bits */
+ if (cli->cl_lost_grant > INT_MAX) {
+ CDEBUG(D_CACHE,
+ "%s: avoided o_dropped overflow: cl_lost_grant %lu\n",
+ cli_name(cli), cli->cl_lost_grant);
+ oa->o_dropped = INT_MAX;
+ } else {
+ oa->o_dropped = cli->cl_lost_grant;
+ }
+ cli->cl_lost_grant -= oa->o_dropped;
spin_unlock(&cli->cl_loi_list_lock);
- CDEBUG(D_CACHE, "dirty: %llu undirty: %u dropped %u grant: %llu\n",
- oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
+ CDEBUG(D_CACHE, "%s: dirty: %llu undirty: %u dropped %u grant: %llu"
+ " cl_lost_grant %lu\n", cli_name(cli), oa->o_dirty,
+ oa->o_undirty, oa->o_dropped, oa->o_grant, cli->cl_lost_grant);
}
void osc_update_next_shrink(struct client_obd *cli)
spin_lock(&cli->cl_loi_list_lock);
cli->cl_avail_grant = ocd->ocd_grant;
if (cli->cl_import->imp_state != LUSTRE_IMP_EVICTED) {
- cli->cl_avail_grant -= cli->cl_reserved_grant;
+ unsigned long consumed = cli->cl_reserved_grant;
+
if (OCD_HAS_FLAG(ocd, GRANT_PARAM))
- cli->cl_avail_grant -= cli->cl_dirty_grant;
+ consumed += cli->cl_dirty_grant;
else
- cli->cl_avail_grant -=
- cli->cl_dirty_pages << PAGE_SHIFT;
+ consumed += cli->cl_dirty_pages << PAGE_SHIFT;
+ if (cli->cl_avail_grant < consumed) {
+ CERROR("%s: granted %ld but already consumed %ld\n",
+ cli_name(cli), cli->cl_avail_grant, consumed);
+ cli->cl_avail_grant = 0;
+ } else {
+ cli->cl_avail_grant -= consumed;
+ }
}
if (OCD_HAS_FLAG(ocd, GRANT_PARAM)) {
~chunk_mask) & chunk_mask;
/* determine maximum extent size, in #pages */
size = (u64)ocd->ocd_grant_max_blks << ocd->ocd_grant_blkbits;
- cli->cl_max_extent_pages = size >> PAGE_SHIFT;
- if (cli->cl_max_extent_pages == 0)
- cli->cl_max_extent_pages = 1;
+ cli->cl_max_extent_pages = (size >> PAGE_SHIFT) ?: 1;
+ cli->cl_ocd_grant_param = 1;
} else {
+ cli->cl_ocd_grant_param = 0;
cli->cl_grant_extent_tax = 0;
cli->cl_chunkbits = PAGE_SHIFT;
cli->cl_max_extent_pages = DT_MAX_BRW_PAGES;
int i;
for (i = 0; i < page_count; i++) {
- if (pga[i]->pg->mapping)
- /* bounce pages are unmapped */
- continue;
- if (pga[i]->flag & OBD_BRW_SYNC)
- /* sync transfer cannot have encrypted pages */
- continue;
- llcrypt_finalize_bounce_page(&pga[i]->pg);
+ /* Bounce pages allocated by a call to
+ * llcrypt_encrypt_pagecache_blocks() in osc_brw_prep_request()
+ * are identified thanks to the PageChecked flag.
+ */
+ if (PageChecked(pga[i]->pg))
+ llcrypt_finalize_bounce_page(&pga[i]->pg);
pga[i]->count -= pga[i]->bp_count_diff;
pga[i]->off += pga[i]->bp_off_diff;
}
void *short_io_buf;
const char *obd_name = cli->cl_import->imp_obd->obd_name;
struct inode *inode;
+ bool directio = false;
ENTRY;
inode = page2inode(pga[0]->pg);
+ if (inode == NULL) {
+ /* Try to get reference to inode from cl_page if we are
+ * dealing with direct IO, as handled pages are not
+ * actual page cache pages.
+ */
+ struct osc_async_page *oap = brw_page2oap(pga[0]);
+ struct cl_page *clpage = oap2cl_page(oap);
+
+ inode = clpage->cp_inode;
+ if (inode)
+ directio = true;
+ }
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
RETURN(-ENOMEM); /* Recoverable */
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
struct page *data_page = NULL;
bool retried = false;
bool lockedbymyself;
+ u32 nunits = (pg->off & ~PAGE_MASK) + pg->count;
+ struct address_space *map_orig = NULL;
+ pgoff_t index_orig;
retry_encrypt:
+ if (nunits & ~LUSTRE_ENCRYPTION_MASK)
+ nunits = (nunits & LUSTRE_ENCRYPTION_MASK) +
+ LUSTRE_ENCRYPTION_UNIT_SIZE;
/* The page can already be locked when we arrive here.
* This is possible when cl_page_assume/vvp_page_assume
* is stuck on wait_on_page_writeback with page lock
* which means only once the page is fully processed.
*/
lockedbymyself = trylock_page(pg->pg);
+ if (directio) {
+ map_orig = pg->pg->mapping;
+ pg->pg->mapping = inode->i_mapping;
+ index_orig = pg->pg->index;
+ pg->pg->index = pg->off >> PAGE_SHIFT;
+ }
data_page =
llcrypt_encrypt_pagecache_blocks(pg->pg,
- PAGE_SIZE, 0,
+ nunits, 0,
GFP_NOFS);
+ if (directio) {
+ pg->pg->mapping = map_orig;
+ pg->pg->index = index_orig;
+ }
if (lockedbymyself)
unlock_page(pg->pg);
if (IS_ERR(data_page)) {
ptlrpc_request_free(req);
RETURN(rc);
}
+ /* Set PageChecked flag on bounce page for
+ * disambiguation in osc_release_bounce_pages().
+ */
+ SetPageChecked(data_page);
pg->pg = data_page;
/* there should be no gap in the middle of page array */
if (i == page_count - 1) {
oa->o_size = oap->oap_count +
oap->oap_obj_off + oap->oap_page_off;
}
- /* len is forced to PAGE_SIZE, and poff to 0
+ /* len is forced to nunits, and relative offset to 0
* so store the old, clear text info
*/
- pg->bp_count_diff = PAGE_SIZE - pg->count;
- pg->count = PAGE_SIZE;
+ pg->bp_count_diff = nunits - pg->count;
+ pg->count = nunits;
+ pg->bp_off_diff = pg->off & ~PAGE_MASK;
+ pg->off = pg->off & PAGE_MASK;
+ }
+ } else if (opc == OST_READ && inode && IS_ENCRYPTED(inode)) {
+ for (i = 0; i < page_count; i++) {
+ struct brw_page *pg = pga[i];
+ u32 nunits = (pg->off & ~PAGE_MASK) + pg->count;
+
+ if (nunits & ~LUSTRE_ENCRYPTION_MASK)
+ nunits = (nunits & LUSTRE_ENCRYPTION_MASK) +
+ LUSTRE_ENCRYPTION_UNIT_SIZE;
+ /* count/off are forced to cover the whole encryption
+ * unit size so that all encrypted data is stored on the
+ * OST, so adjust bp_{count,off}_diff for the size of
+ * the clear text.
+ */
+ pg->bp_count_diff = nunits - pg->count;
+ pg->count = nunits;
pg->bp_off_diff = pg->off & ~PAGE_MASK;
pg->off = pg->off & PAGE_MASK;
}
req_capsule_set_size(pill, &RMF_NIOBUF_REMOTE, RCL_CLIENT,
niocount * sizeof(*niobuf));
- for (i = 0; i < page_count; i++)
+ for (i = 0; i < page_count; i++) {
short_io_size += pga[i]->count;
+ if (!inode || !IS_ENCRYPTED(inode)) {
+ pga[i]->bp_count_diff = 0;
+ pga[i]->bp_off_diff = 0;
+ }
+ }
/* Check if read/write is small enough to be a short io. */
if (short_io_size > cli->cl_max_short_io_bytes || niocount > 1 ||
* file/fid, not during the resends/retries. */
snprintf(dbgcksum_file_name, sizeof(dbgcksum_file_name),
"%s-checksum_dump-osc-"DFID":[%llu-%llu]-%x-%x",
- (strncmp(libcfs_debug_file_path_arr, "NONE", 4) != 0 ?
- libcfs_debug_file_path_arr :
- LIBCFS_DEBUG_FILE_PATH_DEFAULT),
+ (strncmp(libcfs_debug_file_path, "NONE", 4) != 0 ?
+ libcfs_debug_file_path : LIBCFS_DEBUG_FILE_PATH_DEFAULT),
oa->o_valid & OBD_MD_FLFID ? oa->o_parent_seq : 0ULL,
oa->o_valid & OBD_MD_FLFID ? oa->o_parent_oid : 0,
oa->o_valid & OBD_MD_FLFID ? oa->o_parent_ver : 0,
struct ost_body *body;
u32 client_cksum = 0;
struct inode *inode;
+ unsigned int blockbits = 0, blocksize = 0;
ENTRY;
}
inode = page2inode(aa->aa_ppga[0]->pg);
+ if (inode == NULL) {
+ /* Try to get reference to inode from cl_page if we are
+ * dealing with direct IO, as handled pages are not
+ * actual page cache pages.
+ */
+ struct osc_async_page *oap = brw_page2oap(aa->aa_ppga[0]);
+
+ inode = oap2cl_page(oap)->cp_inode;
+ if (inode) {
+ blockbits = inode->i_blkbits;
+ blocksize = 1 << blockbits;
+ }
+ }
if (inode && IS_ENCRYPTED(inode)) {
int idx;
}
for (idx = 0; idx < aa->aa_page_count; idx++) {
struct brw_page *pg = aa->aa_ppga[idx];
- __u64 *p, *q;
-
- /* do not decrypt if page is all 0s */
- p = q = page_address(pg->pg);
- while (p - q < PAGE_SIZE / sizeof(*p)) {
- if (*p != 0)
+ unsigned int offs = 0;
+
+ while (offs < PAGE_SIZE) {
+ /* do not decrypt if page is all 0s */
+ if (memchr_inv(page_address(pg->pg) + offs, 0,
+ LUSTRE_ENCRYPTION_UNIT_SIZE) == NULL) {
+ /* if page is empty forward info to
+ * upper layers (ll_io_zero_page) by
+ * clearing PagePrivate2
+ */
+ if (!offs)
+ ClearPagePrivate2(pg->pg);
break;
- p++;
- }
- if (p - q == PAGE_SIZE / sizeof(*p))
- continue;
+ }
- rc = llcrypt_decrypt_pagecache_blocks(pg->pg,
- PAGE_SIZE, 0);
- if (rc)
- GOTO(out, rc);
+ if (blockbits) {
+ /* This is direct IO case. Directly call
+ * decrypt function that takes inode as
+ * input parameter. Page does not need
+ * to be locked.
+ */
+ u64 lblk_num =
+ ((u64)(pg->off >> PAGE_SHIFT) <<
+ (PAGE_SHIFT - blockbits)) +
+ (offs >> blockbits);
+ unsigned int i;
+
+ for (i = offs;
+ i < offs +
+ LUSTRE_ENCRYPTION_UNIT_SIZE;
+ i += blocksize, lblk_num++) {
+ rc =
+ llcrypt_decrypt_block_inplace(
+ inode, pg->pg,
+ blocksize, i,
+ lblk_num);
+ if (rc)
+ break;
+ }
+ } else {
+ rc = llcrypt_decrypt_pagecache_blocks(
+ pg->pg,
+ LUSTRE_ENCRYPTION_UNIT_SIZE,
+ offs);
+ }
+ if (rc)
+ GOTO(out, rc);
+
+ offs += LUSTRE_ENCRYPTION_UNIT_SIZE;
+ }
}
}
list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
list_del_init(&ext->oe_link);
osc_extent_finish(env, ext, 1,
- rc && req->rq_no_delay ? -EWOULDBLOCK : rc);
+ rc && req->rq_no_delay ? -EAGAIN : rc);
}
LASSERT(list_empty(&aa->aa_exts));
LASSERT(list_empty(&aa->aa_oaps));
struct ost_lvb *lvb = aa->oa_lvb;
__u32 lvb_len = sizeof(*lvb);
__u64 flags = 0;
+ struct ldlm_enqueue_info einfo = {
+ .ei_type = aa->oa_type,
+ .ei_mode = mode,
+ };
ENTRY;
}
/* Complete obtaining the lock procedure. */
- rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_type, 1,
- aa->oa_mode, aa->oa_flags, lvb, lvb_len,
- lockh, rc);
+ rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, &einfo, 1, aa->oa_flags,
+ lvb, lvb_len, lockh, rc);
/* Complete osc stuff. */
rc = osc_enqueue_fini(req, aa->oa_upcall, aa->oa_cookie, lockh, mode,
aa->oa_flags, aa->oa_speculative, rc);
if (intent != 0)
match_flags |= LDLM_FL_BLOCK_GRANTED;
mode = ldlm_lock_match(obd->obd_namespace, match_flags, res_id,
- einfo->ei_type, policy, mode, &lockh, 0);
+ einfo->ei_type, policy, mode, &lockh);
if (mode) {
struct ldlm_lock *matched;
if (*flags & (LDLM_FL_TEST_LOCK | LDLM_FL_MATCH_LOCK))
RETURN(-ENOLCK);
- if (intent) {
- req = ptlrpc_request_alloc(class_exp2cliimp(exp),
- &RQF_LDLM_ENQUEUE_LVB);
- if (req == NULL)
- RETURN(-ENOMEM);
-
- rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
- if (rc) {
- ptlrpc_request_free(req);
- RETURN(rc);
- }
-
- req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
- sizeof *lvb);
- ptlrpc_request_set_replen(req);
- }
-
/* users of osc_enqueue() can pass this flag for ldlm_lock_match() */
*flags &= ~LDLM_FL_BLOCK_GRANTED;
req->rq_interpret_reply = osc_enqueue_interpret;
ptlrpc_set_add_req(rqset, req);
- } else if (intent) {
- ptlrpc_req_finished(req);
}
RETURN(rc);
}
rc = osc_enqueue_fini(req, upcall, cookie, &lockh, einfo->ei_mode,
flags, speculative, rc);
- if (intent)
- ptlrpc_req_finished(req);
RETURN(rc);
}
struct ldlm_res_id *res_id, enum ldlm_type type,
union ldlm_policy_data *policy, enum ldlm_mode mode,
__u64 *flags, struct osc_object *obj,
- struct lustre_handle *lockh, int unref)
+ struct lustre_handle *lockh, enum ldlm_match_flags match_flags)
{
struct obd_device *obd = exp->exp_obd;
__u64 lflags = *flags;
policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
policy->l_extent.end |= ~PAGE_MASK;
- /* Next, search for already existing extent locks that will cover us */
- /* If we're trying to read, we also search for an existing PW lock. The
- * VFS and page cache already protect us locally, so lots of readers/
- * writers can share a single PW lock. */
- rc = mode;
- if (mode == LCK_PR)
- rc |= LCK_PW;
- rc = ldlm_lock_match(obd->obd_namespace, lflags,
- res_id, type, policy, rc, lockh, unref);
+ /* Next, search for already existing extent locks that will cover us */
+ rc = ldlm_lock_match_with_skip(obd->obd_namespace, lflags, 0,
+ res_id, type, policy, mode, lockh,
+ match_flags);
if (rc == 0 || lflags & LDLM_FL_TEST_LOCK)
RETURN(rc);
struct obd_device *obd = class_exp2obd(exp);
struct obd_statfs *msfs;
struct ptlrpc_request *req;
- struct obd_import *imp = NULL;
+ struct obd_import *imp, *imp0;
int rc;
ENTRY;
-
- /*Since the request might also come from lprocfs, so we need
- *sync this with client_disconnect_export Bug15684*/
- down_read(&obd->u.cli.cl_sem);
- if (obd->u.cli.cl_import)
- imp = class_import_get(obd->u.cli.cl_import);
- up_read(&obd->u.cli.cl_sem);
- if (!imp)
- RETURN(-ENODEV);
+ /*Since the request might also come from lprocfs, so we need
+ *sync this with client_disconnect_export Bug15684
+ */
+ with_imp_locked(obd, imp0, rc)
+ imp = class_import_get(imp0);
+ if (rc)
+ RETURN(rc);
/* We could possibly pass max_age in the request (as an absolute
* timestamp or a "seconds.usec ago") so the target can avoid doing
.o_quotactl = osc_quotactl,
};
-static struct shrinker *osc_cache_shrinker;
LIST_HEAD(osc_shrink_list);
DEFINE_SPINLOCK(osc_shrink_lock);
-#ifndef HAVE_SHRINKER_COUNT
-static int osc_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
+#ifdef HAVE_SHRINKER_COUNT
+static struct shrinker osc_cache_shrinker = {
+ .count_objects = osc_cache_shrink_count,
+ .scan_objects = osc_cache_shrink_scan,
+ .seeks = DEFAULT_SEEKS,
+};
+#else
+static int osc_cache_shrink(struct shrinker *shrinker,
+ struct shrink_control *sc)
{
- struct shrink_control scv = {
- .nr_to_scan = shrink_param(sc, nr_to_scan),
- .gfp_mask = shrink_param(sc, gfp_mask)
- };
- (void)osc_cache_shrink_scan(shrinker, &scv);
+ (void)osc_cache_shrink_scan(shrinker, sc);
- return osc_cache_shrink_count(shrinker, &scv);
+ return osc_cache_shrink_count(shrinker, sc);
}
+
+static struct shrinker osc_cache_shrinker = {
+ .shrink = osc_cache_shrink,
+ .seeks = DEFAULT_SEEKS,
+};
#endif
static int __init osc_init(void)
unsigned int reqpool_size;
unsigned int reqsize;
int rc;
- DEF_SHRINKER_VAR(osc_shvar, osc_cache_shrink,
- osc_cache_shrink_count, osc_cache_shrink_scan);
ENTRY;
/* print an address of _any_ initialized kernel symbol from this
if (rc)
RETURN(rc);
- rc = class_register_type(&osc_obd_ops, NULL, true, NULL,
+ rc = class_register_type(&osc_obd_ops, NULL, true,
LUSTRE_OSC_NAME, &osc_device_type);
if (rc)
GOTO(out_kmem, rc);
- osc_cache_shrinker = set_shrinker(DEFAULT_SEEKS, &osc_shvar);
+ rc = register_shrinker(&osc_cache_shrinker);
+ if (rc)
+ GOTO(out_type, rc);
/* This is obviously too much memory, only prevent overflow here */
if (osc_reqpool_mem_max >= 1 << 12 || osc_reqpool_mem_max == 0)
- GOTO(out_type, rc = -EINVAL);
+ GOTO(out_shrinker, rc = -EINVAL);
reqpool_size = osc_reqpool_mem_max << 20;
ptlrpc_add_rqs_to_pool);
if (osc_rq_pool == NULL)
- GOTO(out_type, rc = -ENOMEM);
+ GOTO(out_shrinker, rc = -ENOMEM);
rc = osc_start_grant_work();
if (rc != 0)
out_req_pool:
ptlrpc_free_rq_pool(osc_rq_pool);
+out_shrinker:
+ unregister_shrinker(&osc_cache_shrinker);
out_type:
class_unregister_type(LUSTRE_OSC_NAME);
out_kmem:
static void __exit osc_exit(void)
{
osc_stop_grant_work();
- remove_shrinker(osc_cache_shrinker);
+ unregister_shrinker(&osc_cache_shrinker);
class_unregister_type(LUSTRE_OSC_NAME);
lu_kmem_fini(osc_caches);
ptlrpc_free_rq_pool(osc_rq_pool);