Add an SBI flag so hybrid IO can be fully disabled.
Test-Parameters: trivial
Signed-off-by: Patrick Farrell <pfarrell@whamcloud.com>
Signed-off-by: Qian Yingjin <qian@ddn.com>
Change-Id: I2825b4cf261f98d71a18cd66d6fe3632dfabc37a
Reviewed-on: https://review.whamcloud.com/c/fs/lustre-release/+/52592
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Oleg Drokin <green@whamcloud.com>
Reviewed-by: Shaun Tancheff <shaun.tancheff@hpe.com>
* because they're visible in userspace. so we check for IOCB_DIRECT
*/
#ifdef IOCB_DIRECT
+ struct inode *inode = file_inode(file);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
+
ENTRY;
/* it doesn't make sense to switch unless it's READ or WRITE */
if (iocb->ki_flags & IOCB_DIRECT)
RETURN(false);
+ if (!test_bit(LL_SBI_HYBRID_IO, sbi->ll_flags))
+ RETURN(false);
#endif
RETURN(false);
}
int ll_security_secctx_name_filter(struct ll_sb_info *sbi, int xattr_type,
const char *suffix);
+static inline bool obd_connect_has_unaligned_dio(struct obd_connect_data *data)
+{
+ return data->ocd_connect_flags & OBD_CONNECT_FLAGS2 &&
+ data->ocd_connect_flags2 & OBD_CONNECT2_UNALIGNED_DIO;
+}
+
static inline bool obd_connect_has_enc(struct obd_connect_data *data)
{
#ifdef HAVE_LUSTRE_CRYPTO
LL_SBI_PARALLEL_DIO, /* parallel (async) O_DIRECT RPCs */
LL_SBI_ENCRYPT_NAME, /* name encryption */
LL_SBI_UNALIGNED_DIO, /* unaligned DIO */
+ LL_SBI_HYBRID_IO, /* allow BIO as DIO */
LL_SBI_NUM_FLAGS
};
set_bit(LL_SBI_STATFS_PROJECT, sbi->ll_flags);
ll_sbi_set_encrypt(sbi, true);
ll_sbi_set_name_encrypt(sbi, true);
+ set_bit(LL_SBI_HYBRID_IO, sbi->ll_flags);
/* root squash */
sbi->ll_squash.rsi_uid = 0;
LCONSOLE_WARN("Test dummy encryption mode enabled\n");
}
+ /* If unaligned DIO is not supported, hybrid IO will result in EINVAL,
+ * so turn hybrid IO off by default. If the user turns it back on, they
+ * will get EINVAL, but should be able to figure out the cause.
+ */
+ if (test_bit(LL_SBI_HYBRID_IO, sbi->ll_flags) &&
+ !obd_connect_has_unaligned_dio(&sbi->ll_dt_obd->u.lov.lov_ocd))
+ clear_bit(LL_SBI_HYBRID_IO, sbi->ll_flags);
+
sbi->ll_dt_exp->exp_connect_data = *data;
/* Don't change value if it was specified in the config log */
{LL_SBI_TINY_WRITE, "tiny_write"},
{LL_SBI_FILE_HEAT, "file_heat"},
{LL_SBI_PARALLEL_DIO, "parallel_dio"},
+ {LL_SBI_HYBRID_IO, "hybrid_io"},
{LL_SBI_ENCRYPT_NAME, "name_encrypt"},
{LL_SBI_UNALIGNED_DIO, "unaligned_dio"},
};
}
LUSTRE_RW_ATTR(parallel_dio);
+static ssize_t hybrid_io_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
+ ll_kset.kobj);
+
+ return snprintf(buf, PAGE_SIZE, "%u\n",
+ test_bit(LL_SBI_HYBRID_IO, sbi->ll_flags));
+}
+
+static ssize_t hybrid_io_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
+ ll_kset.kobj);
+ bool val;
+ int rc;
+
+ rc = kstrtobool(buffer, &val);
+ if (rc)
+ return rc;
+
+ spin_lock(&sbi->ll_lock);
+ if (val)
+ set_bit(LL_SBI_HYBRID_IO, sbi->ll_flags);
+ else
+ clear_bit(LL_SBI_HYBRID_IO, sbi->ll_flags);
+ spin_unlock(&sbi->ll_lock);
+
+ return count;
+}
+LUSTRE_RW_ATTR(hybrid_io);
+
static ssize_t max_read_ahead_async_active_show(struct kobject *kobj,
struct attribute *attr,
char *buf)
&lustre_attr_tiny_write.attr,
&lustre_attr_parallel_dio.attr,
&lustre_attr_unaligned_dio.attr,
+ &lustre_attr_hybrid_io.attr,
&lustre_attr_file_heat.attr,
&lustre_attr_heat_decay_percentage.attr,
&lustre_attr_heat_period_second.attr,