static int ll_create_it(struct inode *dir, struct dentry *dentry,
struct lookup_intent *it,
- void *secctx, __u32 secctxlen);
+ void *secctx, __u32 secctxlen, bool encrypt);
/* called from iget5_locked->find_inode() under inode_lock spinlock */
static int ll_test_inode(struct inode *inode, void *opaque)
struct lookup_intent *it,
struct inode *parent, struct dentry **de,
void *secctx, __u32 secctxlen,
- ktime_t kstart)
+ ktime_t kstart, bool encrypt)
{
struct inode *inode = NULL;
__u64 bits = 0;
/* we have lookup look - unhide dentry */
if (bits & MDS_INODELOCK_LOOKUP)
d_lustre_revalidate(*de);
+
+ if (encrypt) {
+ rc = llcrypt_get_encryption_info(inode);
+ if (rc)
+ GOTO(out, rc);
+ if (!llcrypt_has_encryption_key(inode))
+ GOTO(out, rc = -ENOKEY);
+ }
} else if (!it_disposition(it, DISP_OPEN_CREATE)) {
/*
* If file was created on the server, the dentry is revalidated
static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
struct lookup_intent *it,
void **secctx, __u32 *secctxlen,
- struct pcc_create_attach *pca)
+ struct pcc_create_attach *pca,
+ bool encrypt)
{
ktime_t kstart = ktime_get();
struct lookup_intent lookup_it = { .it_op = IT_LOOKUP };
rc = ll_lookup_it_finish(req, it, parent, &dentry,
secctx != NULL ? *secctx : NULL,
secctxlen != NULL ? *secctxlen : 0,
- kstart);
- if (rc != 0) {
- ll_intent_release(it);
- GOTO(out, retval = ERR_PTR(rc));
- }
-
- if ((it->it_op & IT_OPEN) && dentry->d_inode &&
- !S_ISREG(dentry->d_inode->i_mode) &&
- !S_ISDIR(dentry->d_inode->i_mode)) {
- ll_release_openhandle(dentry, it);
- }
- ll_lookup_finish_locks(it, dentry);
+ kstart, encrypt);
+ if (rc != 0) {
+ ll_intent_release(it);
+ GOTO(out, retval = ERR_PTR(rc));
+ }
+
+ if ((it->it_op & IT_OPEN) && dentry->d_inode &&
+ !S_ISREG(dentry->d_inode->i_mode) &&
+ !S_ISDIR(dentry->d_inode->i_mode)) {
+ ll_release_openhandle(dentry, it);
+ }
+ ll_lookup_finish_locks(it, dentry);
GOTO(out, retval = (dentry == save) ? NULL : dentry);
itp = NULL;
else
itp = ⁢
- de = ll_lookup_it(parent, dentry, itp, NULL, NULL, NULL);
+ de = ll_lookup_it(parent, dentry, itp, NULL, NULL, NULL, false);
if (itp != NULL)
ll_intent_release(itp);
long long lookup_flags = LOOKUP_OPEN;
void *secctx = NULL;
__u32 secctxlen = 0;
- struct ll_sb_info *sbi;
+ struct ll_sb_info *sbi = NULL;
struct pcc_create_attach pca = { NULL, NULL };
+ bool encrypt = false;
int rc = 0;
ENTRY;
it->it_flags = (open_flags & ~O_ACCMODE) | OPEN_FMODE(open_flags);
it->it_flags &= ~MDS_OPEN_FL_INTERNAL;
+ if (IS_ENCRYPTED(dir)) {
+ /* we know that we are going to create a regular file because
+ * we set S_IFREG bit on it->it_create_mode above
+ */
+ rc = llcrypt_get_encryption_info(dir);
+ if (rc)
+ GOTO(out_release, rc);
+ if (!llcrypt_has_encryption_key(dir))
+ GOTO(out_release, rc = -ENOKEY);
+ encrypt = true;
+ rc = 0;
+ }
+
/* Dentry added to dcache tree in ll_lookup_it */
- de = ll_lookup_it(dir, dentry, it, &secctx, &secctxlen, &pca);
+ de = ll_lookup_it(dir, dentry, it, &secctx, &secctxlen, &pca, encrypt);
if (IS_ERR(de))
rc = PTR_ERR(de);
else if (de != NULL)
if (!rc) {
if (it_disposition(it, DISP_OPEN_CREATE)) {
/* Dentry instantiated in ll_create_it. */
- rc = ll_create_it(dir, dentry, it, secctx, secctxlen);
+ rc = ll_create_it(dir, dentry, it, secctx, secctxlen,
+ encrypt);
security_release_secctx(secctx, secctxlen);
if (rc) {
/* We dget in ll_splice_alias. */
*/
static int ll_create_it(struct inode *dir, struct dentry *dentry,
struct lookup_intent *it,
- void *secctx, __u32 secctxlen)
+ void *secctx, __u32 secctxlen, bool encrypt)
{
struct inode *inode;
__u64 bits = 0;
d_instantiate(dentry, inode);
+ if (encrypt) {
+ rc = llcrypt_inherit_context(dir, inode, dentry, true);
+ if (rc)
+ RETURN(rc);
+ }
+
if (!(ll_i2sbi(inode)->ll_flags & LL_SBI_FILE_SECCTX)) {
rc = ll_inode_init_security(dentry, inode, dir);
if (rc)
const char *tgt, umode_t mode, int rdev, __u32 opc)
{
struct qstr *name = &dchild->d_name;
- struct ptlrpc_request *request = NULL;
- struct md_op_data *op_data;
- struct inode *inode = NULL;
- struct ll_sb_info *sbi = ll_i2sbi(dir);
- int tgt_len = 0;
- int err;
+ struct ptlrpc_request *request = NULL;
+ struct md_op_data *op_data = NULL;
+ struct inode *inode = NULL;
+ struct ll_sb_info *sbi = ll_i2sbi(dir);
+ int tgt_len = 0;
+ int encrypt = 0;
+ int err;
- ENTRY;
- if (unlikely(tgt != NULL))
- tgt_len = strlen(tgt) + 1;
+ ENTRY;
+ if (unlikely(tgt != NULL))
+ tgt_len = strlen(tgt) + 1;
again:
op_data = ll_prep_md_op_data(NULL, dir, NULL, name->name,
GOTO(err_exit, err);
}
+ if ((IS_ENCRYPTED(dir) &&
+ (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) ||
+ (unlikely(llcrypt_dummy_context_enabled(dir)) && S_ISDIR(mode))) {
+ err = llcrypt_get_encryption_info(dir);
+ if (err)
+ GOTO(err_exit, err);
+ if (!llcrypt_has_encryption_key(dir))
+ GOTO(err_exit, err = -ENOKEY);
+ encrypt = 1;
+ }
+
err = md_create(sbi->ll_md_exp, op_data, tgt, tgt_len, mode,
from_kuid(&init_user_ns, current_fsuid()),
from_kgid(&init_user_ns, current_fsgid()),
d_instantiate(dchild, inode);
+ if (encrypt) {
+ err = llcrypt_inherit_context(dir, inode, NULL, true);
+ if (err)
+ GOTO(err_exit, err);
+ }
+
if (!(sbi->ll_flags & LL_SBI_FILE_SECCTX)) {
err = ll_inode_init_security(dchild, inode, dir);
if (err)
if (!IS_ERR_OR_NULL(op_data))
ll_finish_md_op_data(op_data);
- return err;
+ RETURN(err);
}
static int ll_mknod(struct inode *dir, struct dentry *dchild, umode_t mode,
PFID(ll_inode2fid(src)), src,
PFID(ll_inode2fid(dir)), dir, new_dentry);
+ err = llcrypt_prepare_link(old_dentry, dir, new_dentry);
+ if (err)
+ RETURN(err);
+
op_data = ll_prep_md_op_data(NULL, src, dir, name->name, name->len,
0, LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
if (unlikely(d_mountpoint(src_dchild) || d_mountpoint(tgt_dchild)))
RETURN(-EBUSY);
+#ifdef HAVE_IOPS_RENAME_WITH_FLAGS
+ err = llcrypt_prepare_rename(src, src_dchild, tgt, tgt_dchild, flags);
+#else
+ err = llcrypt_prepare_rename(src, src_dchild, tgt, tgt_dchild, 0);
+#endif
+ if (err)
+ RETURN(err);
+
op_data = ll_prep_md_op_data(NULL, src, tgt, NULL, 0, 0,
LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
RETURN(rc);
}
+static inline void osc_release_bounce_pages(struct brw_page **pga,
+ u32 page_count)
+{
+#ifdef HAVE_LUSTRE_CRYPTO
+ int i;
+
+ for (i = 0; i < page_count; i++) {
+ if (pga[i]->pg->mapping)
+ /* bounce pages are unmapped */
+ continue;
+ if (pga[i]->flag & OBD_BRW_SYNC)
+ /* sync transfer cannot have encrypted pages */
+ continue;
+ llcrypt_finalize_bounce_page(&pga[i]->pg);
+ pga[i]->count -= pga[i]->bp_count_diff;
+ pga[i]->off += pga[i]->bp_off_diff;
+ }
+#endif
+}
+
static int
osc_brw_prep_request(int cmd, struct client_obd *cli, struct obdo *oa,
u32 page_count, struct brw_page **pga,
struct ptlrpc_request **reqp, int resend)
{
- struct ptlrpc_request *req;
- struct ptlrpc_bulk_desc *desc;
- struct ost_body *body;
- struct obd_ioobj *ioobj;
- struct niobuf_remote *niobuf;
+ struct ptlrpc_request *req;
+ struct ptlrpc_bulk_desc *desc;
+ struct ost_body *body;
+ struct obd_ioobj *ioobj;
+ struct niobuf_remote *niobuf;
int niocount, i, requested_nob, opc, rc, short_io_size = 0;
- struct osc_brw_async_args *aa;
- struct req_capsule *pill;
- struct brw_page *pg_prev;
+ struct osc_brw_async_args *aa;
+ struct req_capsule *pill;
+ struct brw_page *pg_prev;
void *short_io_buf;
const char *obd_name = cli->cl_import->imp_obd->obd_name;
+ struct inode *inode;
- ENTRY;
- if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
- RETURN(-ENOMEM); /* Recoverable */
- if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
- RETURN(-EINVAL); /* Fatal */
+ ENTRY;
+ inode = page2inode(pga[0]->pg);
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
+ RETURN(-ENOMEM); /* Recoverable */
+ if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ2))
+ RETURN(-EINVAL); /* Fatal */
if ((cmd & OBD_BRW_WRITE) != 0) {
opc = OST_WRITE;
if (req == NULL)
RETURN(-ENOMEM);
+ if (opc == OST_WRITE && inode && IS_ENCRYPTED(inode)) {
+ for (i = 0; i < page_count; i++) {
+ struct brw_page *pg = pga[i];
+ struct page *data_page = NULL;
+ bool retried = false;
+ bool lockedbymyself;
+
+retry_encrypt:
+ /* The page can already be locked when we arrive here.
+ * This is possible when cl_page_assume/vvp_page_assume
+ * is stuck on wait_on_page_writeback with page lock
+ * held. In this case there is no risk for the lock to
+ * be released while we are doing our encryption
+ * processing, because writeback against that page will
+ * end in vvp_page_completion_write/cl_page_completion,
+ * which means only once the page is fully processed.
+ */
+ lockedbymyself = trylock_page(pg->pg);
+ data_page =
+ llcrypt_encrypt_pagecache_blocks(pg->pg,
+ PAGE_SIZE, 0,
+ GFP_NOFS);
+ if (lockedbymyself)
+ unlock_page(pg->pg);
+ if (IS_ERR(data_page)) {
+ rc = PTR_ERR(data_page);
+ if (rc == -ENOMEM && !retried) {
+ retried = true;
+ rc = 0;
+ goto retry_encrypt;
+ }
+ ptlrpc_request_free(req);
+ RETURN(rc);
+ }
+ /* len is forced to PAGE_SIZE, and poff to 0
+ * so store the old, clear text info
+ */
+ pg->pg = data_page;
+ pg->bp_count_diff = PAGE_SIZE - pg->count;
+ pg->count = PAGE_SIZE;
+ pg->bp_off_diff = pg->off & ~PAGE_MASK;
+ pg->off = pg->off & PAGE_MASK;
+ }
+ }
+
for (niocount = i = 1; i < page_count; i++) {
if (!can_merge_pages(pga[i - 1], pga[i]))
niocount++;
rc = osc_brw_fini_request(req, rc);
CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc);
+
+ /* restore clear text pages */
+ osc_release_bounce_pages(aa->aa_ppga, aa->aa_page_count);
+
/*
* When server returns -EINPROGRESS, client should always retry
* regardless of the number of times the bulk was resent already.
if (oa)
OBD_SLAB_FREE_PTR(oa, osc_obdo_kmem);
- if (pga)
- OBD_FREE_PTR_ARRAY(pga, page_count);
+ if (pga) {
+ osc_release_bounce_pages(pga, page_count);
+ osc_release_ppga(pga, page_count);
+ }
/* this should happen rarely and is pretty bad, it makes the
* pending list not follow the dirty order */
while (!list_empty(ext_list)) {