Reading a 'long' (or unsigned long) is always an atomic operation.
There is never a need to take a spinlock to just read a single 'long'.
There are several procfs/debugfs/sysfs handlers which needlessly take
a spinlock for this purpose.
This patch:
- removes the taking of the spinlock
- changes the printf to scnprintf() as appropriate
- directly returns the value returned by scnprintf rather than
storing it in a variable
- accesses the 'long' as an arg to the scnprintf(), rather than
introducing a variabe to hold it.
Test-Parameters: trivial
Signed-off-by: Mr NeilBrown <neilb@suse.de>
Change-Id: If4a6454b46844864e1177536a9c7b91e4c97de86
Reviewed-on: https://review.whamcloud.com/39743
Tested-by: jenkins <devops@whamcloud.com>
Tested-by: Maloo <maloo@whamcloud.com>
Reviewed-by: Andreas Dilger <adilger@whamcloud.com>
Reviewed-by: Arshad Hussain <arshad.super@gmail.com>
Reviewed-by: James Simmons <jsimmons@infradead.org>
{
struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
ll_kset.kobj);
{
struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
ll_kset.kobj);
- unsigned long ra_max_mb;
- spin_lock(&sbi->ll_lock);
- ra_max_mb = PAGES_TO_MiB(sbi->ll_ra_info.ra_max_pages);
- spin_unlock(&sbi->ll_lock);
-
- return snprintf(buf, PAGE_SIZE, "%lu\n", ra_max_mb);
+ return scnprintf(buf, PAGE_SIZE, "%lu\n",
+ PAGES_TO_MiB(sbi->ll_ra_info.ra_max_pages));
}
static ssize_t max_read_ahead_mb_store(struct kobject *kobj,
}
static ssize_t max_read_ahead_mb_store(struct kobject *kobj,
{
struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
ll_kset.kobj);
{
struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
ll_kset.kobj);
- unsigned long ra_max_file_mb;
- spin_lock(&sbi->ll_lock);
- ra_max_file_mb = PAGES_TO_MiB(sbi->ll_ra_info.ra_max_pages_per_file);
- spin_unlock(&sbi->ll_lock);
-
- return snprintf(buf, PAGE_SIZE, "%lu\n", ra_max_file_mb);
+ return scnprintf(buf, PAGE_SIZE, "%lu\n",
+ PAGES_TO_MiB(sbi->ll_ra_info.ra_max_pages_per_file));
}
static ssize_t max_read_ahead_per_file_mb_store(struct kobject *kobj,
}
static ssize_t max_read_ahead_per_file_mb_store(struct kobject *kobj,
{
struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
ll_kset.kobj);
{
struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
ll_kset.kobj);
- unsigned long ra_max_whole_mb;
-
- spin_lock(&sbi->ll_lock);
- ra_max_whole_mb = PAGES_TO_MiB(sbi->ll_ra_info.ra_max_read_ahead_whole_pages);
- spin_unlock(&sbi->ll_lock);
- return snprintf(buf, PAGE_SIZE, "%lu\n", ra_max_whole_mb);
+ return scnprintf(buf, PAGE_SIZE, "%lu\n",
+ PAGES_TO_MiB(sbi->ll_ra_info.ra_max_read_ahead_whole_pages));
}
static ssize_t max_read_ahead_whole_mb_store(struct kobject *kobj,
}
static ssize_t max_read_ahead_whole_mb_store(struct kobject *kobj,
{
struct obd_device *obd = m->private;
struct client_obd *cli = &obd->u.cli;
{
struct obd_device *obd = m->private;
struct client_obd *cli = &obd->u.cli;
- spin_lock(&cli->cl_loi_list_lock);
- val = PAGES_TO_MiB(cli->cl_dirty_max_pages);
- spin_unlock(&cli->cl_loi_list_lock);
-
- seq_printf(m, "%lu\n", val);
+ seq_printf(m, "%lu\n", PAGES_TO_MiB(cli->cl_dirty_max_pages));
struct obd_device *obd = container_of(kobj, struct obd_device,
obd_kset.kobj);
struct client_obd *cli = &obd->u.cli;
struct obd_device *obd = container_of(kobj, struct obd_device,
obd_kset.kobj);
struct client_obd *cli = &obd->u.cli;
- spin_lock(&cli->cl_loi_list_lock);
- len = sprintf(buf, "%u\n", cli->cl_max_rpcs_in_flight);
- spin_unlock(&cli->cl_loi_list_lock);
- return len;
+ return scnprintf(buf, PAGE_SIZE, "%u\n", cli->cl_max_rpcs_in_flight);
}
static ssize_t max_rpcs_in_flight_store(struct kobject *kobj,
}
static ssize_t max_rpcs_in_flight_store(struct kobject *kobj,
struct obd_device *obd = container_of(kobj, struct obd_device,
obd_kset.kobj);
struct client_obd *cli = &obd->u.cli;
struct obd_device *obd = container_of(kobj, struct obd_device,
obd_kset.kobj);
struct client_obd *cli = &obd->u.cli;
- spin_lock(&cli->cl_loi_list_lock);
- val = PAGES_TO_MiB(cli->cl_dirty_max_pages);
- spin_unlock(&cli->cl_loi_list_lock);
-
- return sprintf(buf, "%lu\n", val);
+ return scnprintf(buf, PAGE_SIZE, "%lu\n",
+ PAGES_TO_MiB(cli->cl_dirty_max_pages));
}
static ssize_t max_dirty_mb_store(struct kobject *kobj,
}
static ssize_t max_dirty_mb_store(struct kobject *kobj,
struct obd_device *obd = container_of(kobj, struct obd_device,
obd_kset.kobj);
struct client_obd *cli = &obd->u.cli;
struct obd_device *obd = container_of(kobj, struct obd_device,
obd_kset.kobj);
struct client_obd *cli = &obd->u.cli;
- ssize_t len;
-
- spin_lock(&cli->cl_loi_list_lock);
- len = sprintf(buf, "%lu\n", cli->cl_dirty_pages << PAGE_SHIFT);
- spin_unlock(&cli->cl_loi_list_lock);
+ return scnprintf(buf, PAGE_SIZE, "%lu\n",
+ cli->cl_dirty_pages << PAGE_SHIFT);
}
LUSTRE_RO_ATTR(cur_dirty_bytes);
}
LUSTRE_RO_ATTR(cur_dirty_bytes);
struct obd_device *obd = m->private;
struct client_obd *cli = &obd->u.cli;
struct obd_device *obd = m->private;
struct client_obd *cli = &obd->u.cli;
- spin_lock(&cli->cl_loi_list_lock);
seq_printf(m, "%lu\n", cli->cl_avail_grant);
seq_printf(m, "%lu\n", cli->cl_avail_grant);
- spin_unlock(&cli->cl_loi_list_lock);
return rc;
/* this is only for shrinking grant */
return rc;
/* this is only for shrinking grant */
- spin_lock(&cli->cl_loi_list_lock);
- if (val >= cli->cl_avail_grant) {
- spin_unlock(&cli->cl_loi_list_lock);
+ if (val >= cli->cl_avail_grant)
- }
-
- spin_unlock(&cli->cl_loi_list_lock);
with_imp_locked(obd, imp, rc)
if (imp->imp_state == LUSTRE_IMP_FULL)
with_imp_locked(obd, imp, rc)
if (imp->imp_state == LUSTRE_IMP_FULL)
struct obd_device *obd = container_of(kobj, struct obd_device,
obd_kset.kobj);
struct client_obd *cli = &obd->u.cli;
struct obd_device *obd = container_of(kobj, struct obd_device,
obd_kset.kobj);
struct client_obd *cli = &obd->u.cli;
- spin_lock(&cli->cl_loi_list_lock);
- len = sprintf(buf, "%lu\n", cli->cl_lost_grant);
- spin_unlock(&cli->cl_loi_list_lock);
- return len;
+ return scnprintf(buf, PAGE_SIZE, "%lu\n", cli->cl_lost_grant);
}
LUSTRE_RO_ATTR(cur_lost_grant_bytes);
}
LUSTRE_RO_ATTR(cur_lost_grant_bytes);