-LPROC_SEQ_FOPS_WR_ONLY(zfs, osd_force_sync);
-
-LPROC_SEQ_FOPS_RO_TYPE(zfs, dt_blksize);
-LPROC_SEQ_FOPS_RO_TYPE(zfs, dt_kbytestotal);
-LPROC_SEQ_FOPS_RO_TYPE(zfs, dt_kbytesfree);
-LPROC_SEQ_FOPS_RO_TYPE(zfs, dt_kbytesavail);
-LPROC_SEQ_FOPS_RO_TYPE(zfs, dt_filestotal);
-LPROC_SEQ_FOPS_RO_TYPE(zfs, dt_filesfree);
-
-struct lprocfs_vars lprocfs_osd_obd_vars[] = {
- { .name = "blocksize",
- .fops = &zfs_dt_blksize_fops },
- { .name = "kbytestotal",
- .fops = &zfs_dt_kbytestotal_fops },
- { .name = "kbytesfree",
- .fops = &zfs_dt_kbytesfree_fops },
- { .name = "kbytesavail",
- .fops = &zfs_dt_kbytesavail_fops },
- { .name = "filestotal",
- .fops = &zfs_dt_filestotal_fops },
- { .name = "filesfree",
- .fops = &zfs_dt_filesfree_fops },
- { .name = "fstype",
- .fops = &zfs_osd_fstype_fops },
- { .name = "mntdev",
- .fops = &zfs_osd_mntdev_fops },
- { .name = "force_sync",
- .fops = &zfs_osd_force_sync_fops },
+LUSTRE_WO_ATTR(force_sync);
+
+static ssize_t nonrotational_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct dt_device *dt = container_of(kobj, struct dt_device,
+ dd_kobj);
+ struct osd_device *osd = osd_dt_dev(dt);
+
+ LASSERT(osd);
+ if (!osd->od_os)
+ return -EINPROGRESS;
+
+ return sprintf(buf, "%u\n", osd->od_nonrotational);
+}
+
+static ssize_t nonrotational_store(struct kobject *kobj,
+ struct attribute *attr, const char *buffer,
+ size_t count)
+{
+ struct dt_device *dt = container_of(kobj, struct dt_device,
+ dd_kobj);
+ struct osd_device *osd = osd_dt_dev(dt);
+ bool val;
+ int rc;
+
+ LASSERT(osd);
+ if (!osd->od_os)
+ return -EINPROGRESS;
+
+ rc = kstrtobool(buffer, &val);
+ if (rc)
+ return rc;
+
+ osd->od_nonrotational = val;
+ return count;
+}
+LUSTRE_RW_ATTR(nonrotational);
+
+static ssize_t index_backup_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct dt_device *dt = container_of(kobj, struct dt_device,
+ dd_kobj);
+ struct osd_device *dev = osd_dt_dev(dt);
+
+ LASSERT(dev);
+ if (!dev->od_os)
+ return -EINPROGRESS;
+
+ return sprintf(buf, "%d\n", dev->od_index_backup_policy);
+}
+
+ssize_t index_backup_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ struct dt_device *dt = container_of(kobj, struct dt_device,
+ dd_kobj);
+ struct osd_device *dev = osd_dt_dev(dt);
+ int val;
+ int rc;
+
+ LASSERT(dev);
+ if (!dev->od_os)
+ return -EINPROGRESS;
+
+ rc = kstrtoint(buffer, 0, &val);
+ if (rc)
+ return rc;
+
+ dev->od_index_backup_policy = val;
+ return count;
+}
+LUSTRE_RW_ATTR(index_backup);
+
+static int zfs_osd_readcache_seq_show(struct seq_file *m, void *data)
+{
+ struct osd_device *osd = osd_dt_dev((struct dt_device *)m->private);
+
+ LASSERT(osd != NULL);
+ if (unlikely(osd->od_os == NULL))
+ return -EINPROGRESS;
+
+ seq_printf(m, "%llu\n", osd->od_readcache_max_filesize);
+ return 0;
+}
+
+static ssize_t
+zfs_osd_readcache_seq_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *off)
+{
+ struct seq_file *m = file->private_data;
+ struct dt_device *dt = m->private;
+ struct osd_device *osd = osd_dt_dev(dt);
+ char kernbuf[22] = "";
+ u64 val;
+ int rc;
+
+ LASSERT(osd != NULL);
+ if (unlikely(osd->od_os == NULL))
+ return -EINPROGRESS;
+
+ if (count >= sizeof(kernbuf))
+ return -EINVAL;
+
+ if (copy_from_user(kernbuf, buffer, count))
+ return -EFAULT;
+ kernbuf[count] = 0;
+
+ rc = sysfs_memparse(kernbuf, count, &val, "B");
+ if (rc < 0)
+ return rc;
+
+ osd->od_readcache_max_filesize = val > OSD_MAX_CACHE_SIZE ?
+ OSD_MAX_CACHE_SIZE : val;
+ return count;
+}
+LDEBUGFS_SEQ_FOPS(zfs_osd_readcache);
+
+static struct attribute *zfs_attrs[] = {
+ &lustre_attr_fstype.attr,
+ &lustre_attr_mntdev.attr,
+ &lustre_attr_force_sync.attr,
+ &lustre_attr_nonrotational.attr,
+ &lustre_attr_index_backup.attr,
+ &lustre_attr_auto_scrub.attr,
+ NULL,
+};
+
+struct ldebugfs_vars ldebugfs_osd_obd_vars[] = {
+ { .name = "oi_scrub",
+ .fops = &zfs_osd_oi_scrub_fops },
+ { .name = "readcache_max_filesize",
+ .fops = &zfs_osd_readcache_fops },