The "ofd.*.sync_on_lock_cancel" tunable was inadvertently replaced
during procfs->sysfs changes in 2.12 with "sync_lock_cancel". Restore
the "sync_on_lock_cancel" tunable since it has existed since the 2.0
release and is definitely in use with several systems.
It isn't just a matter of restoring the old tunable name, since the
"mdt.*.sync_lock_cancel" name is also used since 2.8 and the code for
the two tunables was recently consolidated in the server target code.
Instead, keep the common "sync_lock_cancel" tunable name, add backward
compatibility for "sync_on_lock_cancel" for a number of releases, and
print a deprecation warning if the old name is used.
Fix up sanity.sh test_80 to check for both the old and new names,
but only if we actually need to change this tunable for ZFS, along
with minor test script style cleanups.
Fixes:
7059644e9ad3 ("LU-8066 ofd: migrate from proc to sysfs")
Change-Id: Iffe65f6268d94075c71b96d42fe60ef11ac39448
Signed-off-by: Andreas Dilger <adilger@whamcloud.com>
Reviewed-on: https://review.whamcloud.com/36748
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Shaun Tancheff <stancheff@cray.com>
Reviewed-by: James Simmons <jsimmons@infradead.org>
ssize_t grant_compat_disable_store(struct kobject *kobj,
struct attribute *attr,
const char *buffer, size_t count);
+#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 16, 53, 0)
+ssize_t sync_lock_cancel_show(struct kobject *kobj,
+ struct attribute *attr, char *buf);
+ssize_t sync_lock_cancel_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t count);
+#endif
/* FMD */
void tgt_fmd_update(struct obd_export *exp, const struct lu_fid *fid,
#define OSC_MAX_DIRTY_MB_MAX 2048 /* arbitrary, but < MAX_LONG bytes */
#define OSC_DEFAULT_RESENDS 10
-/* possible values for fo_sync_lock_cancel */
+/* possible values for lut_sync_lock_cancel */
enum {
NEVER_SYNC_ON_CANCEL = 0,
BLOCKING_SYNC_ON_CANCEL = 1,
return count;
}
-
LPROC_SEQ_FOPS(ofd_brw_size);
+#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 16, 53, 0)
+static bool sync_on_lock_cancel_warned;
+static ssize_t sync_on_lock_cancel_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ if (!sync_on_lock_cancel_warned) {
+ sync_on_lock_cancel_warned = true;
+ pr_info("ofd: 'obdfilter.*.sync_on_lock_cancel' is deprecated, use 'obdfilter.*.sync_lock_cancel' instead\n");
+ }
+ return sync_lock_cancel_show(kobj, attr, buf);
+}
+
+static ssize_t sync_on_lock_cancel_store(struct kobject *kobj,
+ struct attribute *attr,
+ const char *buffer, size_t count)
+{
+ if (!sync_on_lock_cancel_warned) {
+ sync_on_lock_cancel_warned = true;
+ pr_info("ofd: 'obdfilter.*.sync_on_lock_cancel' is deprecated, use 'obdfilter.*.sync_lock_cancel' instead\n");
+ }
+ return sync_lock_cancel_store(kobj, attr, buffer, count);
+}
+LUSTRE_RW_ATTR(sync_on_lock_cancel);
+#endif
+
/**
* Show the limit of soft sync RPCs.
*
&lustre_attr_fstype.attr,
&lustre_attr_no_precreate.attr,
&lustre_attr_sync_journal.attr,
+#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 16, 53, 0)
+ &lustre_attr_sync_on_lock_cancel.attr,
+#endif
&lustre_attr_soft_sync_limit.attr,
&lustre_attr_lfsck_speed_limit.attr,
&lustre_attr_job_cleanup_interval.attr,
* \retval 0 and buffer filled with data on success
* \retval negative value on error
*/
-static ssize_t sync_lock_cancel_show(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ssize_t sync_lock_cancel_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
{
struct obd_device *obd = container_of(kobj, struct obd_device,
obd_kset.kobj);
return sprintf(buf, "%s\n",
sync_on_cancel_states[tgt->lut_sync_lock_cancel]);
}
+EXPORT_SYMBOL(sync_lock_cancel_show);
/**
* Change policy for handling dirty data under a lock being cancelled.
* \retval \a count on success
* \retval negative value on error
*/
-static ssize_t sync_lock_cancel_store(struct kobject *kobj,
- struct attribute *attr,
- const char *buffer, size_t count)
+ssize_t sync_lock_cancel_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t count)
{
struct obd_device *obd = container_of(kobj, struct obd_device,
obd_kset.kobj);
spin_unlock(&tgt->lut_flags_lock);
return count;
}
+EXPORT_SYMBOL(sync_lock_cancel_store);
LUSTRE_RW_ATTR(sync_lock_cancel);
/**
[ $PARALLEL == "yes" ] && skip "skip parallel run"
# relax strong synchronous semantics for slow backends like ZFS
- local soc="obdfilter.*.sync_on_lock_cancel"
- local soc_old=$(do_facet ost1 lctl get_param -n $soc | head -n1)
- local hosts=
- if [ "$soc_old" != "never" ] &&
- [ "$ost1_FSTYPE" != "ldiskfs" ]; then
- hosts=$(for host in $(seq -f "ost%g" 1 $OSTCOUNT); do
- facet_active_host $host; done | sort -u)
- do_nodes $hosts lctl set_param $soc=never
- fi
+ if [ "$ost1_FSTYPE" != "ldiskfs" ]; then
+ local soc="obdfilter.*.sync_lock_cancel"
+ local save=$(do_facet ost1 $LCTL get_param -n $soc | head -n1)
- dd if=/dev/zero of=$DIR/$tfile bs=1M count=1 seek=1M
- sync; sleep 1; sync
- local BEFORE=`date +%s`
- cancel_lru_locks osc
- local AFTER=`date +%s`
- local DIFF=$((AFTER-BEFORE))
- if [ $DIFF -gt 1 ] ; then
- error "elapsed for 1M@1T = $DIFF"
- fi
+ # "sync_on_lock_cancel" was broken by v2_11_55_0-26-g7059644e9a
+ if [ -z "$save" ]; then
+ soc="obdfilter.*.sync_on_lock_cancel"
+ save=$(do_facet ost1 $LCTL get_param -n $soc | head -n1)
+ fi
- [ -n "$hosts" ] && do_nodes $hosts lctl set_param $soc=$soc_old
+ if [ "$save" != "never" ]; then
+ local hosts=$(comma_list $(osts_nodes))
- rm -f $DIR/$tfile
+ do_nodes $hosts $LCTL set_param $soc=never
+ stack_trap "do_nodes $hosts $LCTL set_param $soc=$save"
+ fi
+ fi
+
+ dd if=/dev/zero of=$DIR/$tfile bs=1M count=1 seek=1M
+ sync; sleep 1; sync
+ local before=$(date +%s)
+ cancel_lru_locks osc
+ local after=$(date +%s)
+ local diff=$((after - before))
+ [ $diff -le 1 ] || error "elapsed for 1M@1T = $diff"
+
+ rm -f $DIR/$tfile
}
-run_test 80 "Page eviction is equally fast at high offsets too ===="
+run_test 80 "Page eviction is equally fast at high offsets too"
test_81a() { # LU-456
[ $PARALLEL == "yes" ] && skip "skip parallel run"