From 79c00733d0b4f8662b630fa2c6c1d475eb7556d2 Mon Sep 17 00:00:00 2001 From: "John L. Hammond" Date: Tue, 4 Nov 2014 20:10:43 -0500 Subject: [PATCH] LU-2675 obd: remove client_obd_lock_t Remove the definition of client_obd_lock_t and the functions client_obd_list_{init,lock,unlock,done}(). Use spinlock_t for the cl_{loi,lru}_list_lock members of struct client_obd and call spin_{lock,unlock}() directly. Signed-off-by: John L. Hammond Change-Id: I3c4b9cf531b6d62c3481a40f4a1c448cf864beec Reviewed-on: http://review.whamcloud.com/12231 Tested-by: Jenkins Reviewed-by: Dmitry Eremin Tested-by: Maloo Reviewed-by: James Simmons Reviewed-by: Oleg Drokin --- lustre/include/obd.h | 9 ++---- lustre/ldlm/ldlm_lib.c | 4 +-- lustre/obdclass/genops.c | 22 +++++++-------- lustre/obdclass/lprocfs_status.c | 21 +++++++------- lustre/osc/lproc_osc.c | 49 ++++++++++++++++---------------- lustre/osc/osc_cache.c | 48 ++++++++++++++++---------------- lustre/osc/osc_page.c | 24 ++++++++-------- lustre/osc/osc_request.c | 60 ++++++++++++++++++++-------------------- 8 files changed, 118 insertions(+), 119 deletions(-) diff --git a/lustre/include/obd.h b/lustre/include/obd.h index fa96ced..88df7cb 100644 --- a/lustre/include/obd.h +++ b/lustre/include/obd.h @@ -37,6 +37,7 @@ #ifndef __OBD_H #define __OBD_H +#include #include #include @@ -273,14 +274,10 @@ struct client_obd { * blocking everywhere, but we don't want to slow down fast-path of * our main platform.) * - * Exact type of ->cl_loi_list_lock is defined in arch/obd.h together - * with client_obd_list_{un,}lock() and - * client_obd_list_lock_{init,done}() functions. - * * NB by Jinshan: though field names are still _loi_, but actually * osc_object{}s are in the list. */ - client_obd_lock_t cl_loi_list_lock; + spinlock_t cl_loi_list_lock; struct list_head cl_loi_ready_list; struct list_head cl_loi_hp_ready_list; struct list_head cl_loi_write_list; @@ -307,7 +304,7 @@ struct client_obd { atomic_long_t cl_lru_in_list; atomic_long_t cl_unstable_count; struct list_head cl_lru_list; /* lru page list */ - client_obd_lock_t cl_lru_list_lock; /* page list protector */ + spinlock_t cl_lru_list_lock; /* page list protector */ atomic_t cl_lru_shrinkers; /* number of in flight destroy rpcs is limited to max_rpcs_in_flight */ diff --git a/lustre/ldlm/ldlm_lib.c b/lustre/ldlm/ldlm_lib.c index 089f4cb..4731969 100644 --- a/lustre/ldlm/ldlm_lib.c +++ b/lustre/ldlm/ldlm_lib.c @@ -353,7 +353,7 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list); INIT_LIST_HEAD(&cli->cl_loi_write_list); INIT_LIST_HEAD(&cli->cl_loi_read_list); - client_obd_list_lock_init(&cli->cl_loi_list_lock); + spin_lock_init(&cli->cl_loi_list_lock); atomic_set(&cli->cl_pending_w_pages, 0); atomic_set(&cli->cl_pending_r_pages, 0); cli->cl_r_in_flight = 0; @@ -372,7 +372,7 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg) atomic_long_set(&cli->cl_lru_busy, 0); atomic_long_set(&cli->cl_lru_in_list, 0); INIT_LIST_HEAD(&cli->cl_lru_list); - client_obd_list_lock_init(&cli->cl_lru_list_lock); + spin_lock_init(&cli->cl_lru_list_lock); atomic_long_set(&cli->cl_unstable_count, 0); init_waitqueue_head(&cli->cl_destroy_waitq); diff --git a/lustre/obdclass/genops.c b/lustre/obdclass/genops.c index 4398a92..37c11ad 100644 --- a/lustre/obdclass/genops.c +++ b/lustre/obdclass/genops.c @@ -1893,9 +1893,9 @@ static bool obd_request_slot_avail(struct client_obd *cli, { bool avail; - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); avail = !!list_empty(&orsw->orsw_entry); - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); return avail; }; @@ -1913,17 +1913,17 @@ int obd_get_request_slot(struct client_obd *cli) struct l_wait_info lwi; int rc; - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); if (cli->cl_r_in_flight < cli->cl_max_rpcs_in_flight) { cli->cl_r_in_flight++; - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); return 0; } init_waitqueue_head(&orsw.orsw_waitq); list_add_tail(&orsw.orsw_entry, &cli->cl_loi_read_list); orsw.orsw_signaled = false; - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL); rc = l_wait_event(orsw.orsw_waitq, @@ -1933,7 +1933,7 @@ int obd_get_request_slot(struct client_obd *cli) /* Here, we must take the lock to avoid the on-stack 'orsw' to be * freed but other (such as obd_put_request_slot) is using it. */ - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); if (rc != 0) { if (!orsw.orsw_signaled) { if (list_empty(&orsw.orsw_entry)) @@ -1948,7 +1948,7 @@ int obd_get_request_slot(struct client_obd *cli) rc = -EINTR; } - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); return rc; } @@ -1958,7 +1958,7 @@ void obd_put_request_slot(struct client_obd *cli) { struct obd_request_slot_waiter *orsw; - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); cli->cl_r_in_flight--; /* If there is free slot, wakeup the first waiter. */ @@ -1970,7 +1970,7 @@ void obd_put_request_slot(struct client_obd *cli) cli->cl_r_in_flight++; wake_up(&orsw->orsw_waitq); } - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); } EXPORT_SYMBOL(obd_put_request_slot); @@ -1990,7 +1990,7 @@ int obd_set_max_rpcs_in_flight(struct client_obd *cli, __u32 max) if (max > OBD_MAX_RIF_MAX || max < 1) return -ERANGE; - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); old = cli->cl_max_rpcs_in_flight; cli->cl_max_rpcs_in_flight = max; diff = max - old; @@ -2006,7 +2006,7 @@ int obd_set_max_rpcs_in_flight(struct client_obd *cli, __u32 max) cli->cl_r_in_flight++; wake_up(&orsw->orsw_waitq); } - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); return 0; } diff --git a/lustre/obdclass/lprocfs_status.c b/lustre/obdclass/lprocfs_status.c index 4717196..b469fad 100644 --- a/lustre/obdclass/lprocfs_status.c +++ b/lustre/obdclass/lprocfs_status.c @@ -2049,16 +2049,17 @@ void lprocfs_oh_clear(struct obd_histogram *oh) EXPORT_SYMBOL(lprocfs_oh_clear); int lprocfs_obd_rd_max_pages_per_rpc(char *page, char **start, off_t off, - int count, int *eof, void *data) + int count, int *eof, void *data) { - struct obd_device *dev = data; - struct client_obd *cli = &dev->u.cli; - int rc; + struct obd_device *dev = data; + struct client_obd *cli = &dev->u.cli; + int rc; - client_obd_list_lock(&cli->cl_loi_list_lock); - rc = snprintf(page, count, "%d\n", cli->cl_max_pages_per_rpc); - client_obd_list_unlock(&cli->cl_loi_list_lock); - return rc; + spin_lock(&cli->cl_loi_list_lock); + rc = snprintf(page, count, "%d\n", cli->cl_max_pages_per_rpc); + spin_unlock(&cli->cl_loi_list_lock); + + return rc; } EXPORT_SYMBOL(lprocfs_obd_rd_max_pages_per_rpc); @@ -2068,9 +2069,9 @@ int lprocfs_obd_max_pages_per_rpc_seq_show(struct seq_file *m, void *data) struct client_obd *cli = &dev->u.cli; int rc; - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); rc = seq_printf(m, "%d\n", cli->cl_max_pages_per_rpc); - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); return rc; } EXPORT_SYMBOL(lprocfs_obd_max_pages_per_rpc_seq_show); diff --git a/lustre/osc/lproc_osc.c b/lustre/osc/lproc_osc.c index 4fe5d48..7e0c550 100644 --- a/lustre/osc/lproc_osc.c +++ b/lustre/osc/lproc_osc.c @@ -84,9 +84,9 @@ static int osc_max_rpcs_in_flight_seq_show(struct seq_file *m, void *v) struct client_obd *cli = &dev->u.cli; int rc; - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); rc = seq_printf(m, "%u\n", cli->cl_max_rpcs_in_flight); - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); return rc; } @@ -110,10 +110,10 @@ static ssize_t osc_max_rpcs_in_flight_seq_write(struct file *file, if (pool && val > cli->cl_max_rpcs_in_flight) pool->prp_populate(pool, val-cli->cl_max_rpcs_in_flight); - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); cli->cl_max_rpcs_in_flight = val; client_adjust_max_dirty(cli); - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); LPROCFS_CLIMP_EXIT(dev); return count; @@ -127,9 +127,9 @@ static int osc_max_dirty_mb_seq_show(struct seq_file *m, void *v) long val; int mult; - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); val = cli->cl_dirty_max_pages; - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); mult = 1 << (20 - PAGE_CACHE_SHIFT); return lprocfs_seq_read_frac_helper(m, val, mult); @@ -153,10 +153,10 @@ static ssize_t osc_max_dirty_mb_seq_write(struct file *file, pages_number > totalram_pages / 4) /* 1/4 of RAM */ return -ERANGE; - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); cli->cl_dirty_max_pages = pages_number; osc_wake_cache_waiters(cli); - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); return count; } @@ -236,9 +236,9 @@ static int osc_cur_dirty_bytes_seq_show(struct seq_file *m, void *v) struct client_obd *cli = &dev->u.cli; int rc; - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); rc = seq_printf(m, "%lu\n", cli->cl_dirty_pages << PAGE_CACHE_SHIFT); - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); return rc; } LPROC_SEQ_FOPS_RO(osc_cur_dirty_bytes); @@ -249,9 +249,9 @@ static int osc_cur_grant_bytes_seq_show(struct seq_file *m, void *v) struct client_obd *cli = &dev->u.cli; int rc; - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); rc = seq_printf(m, "%lu\n", cli->cl_avail_grant); - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); return rc; } @@ -272,12 +272,13 @@ static ssize_t osc_cur_grant_bytes_seq_write(struct file *file, return rc; /* this is only for shrinking grant */ - client_obd_list_lock(&cli->cl_loi_list_lock); - if (val >= cli->cl_avail_grant) { - client_obd_list_unlock(&cli->cl_loi_list_lock); - return 0; - } - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); + if (val >= cli->cl_avail_grant) { + spin_unlock(&cli->cl_loi_list_lock); + return 0; + } + + spin_unlock(&cli->cl_loi_list_lock); LPROCFS_CLIMP_CHECK(obd); if (cli->cl_import->imp_state == LUSTRE_IMP_FULL) @@ -295,9 +296,9 @@ static int osc_cur_lost_grant_bytes_seq_show(struct seq_file *m, void *v) struct client_obd *cli = &dev->u.cli; int rc; - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); rc = seq_printf(m, "%lu\n", cli->cl_lost_grant); - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); return rc; } LPROC_SEQ_FOPS_RO(osc_cur_lost_grant_bytes); @@ -526,10 +527,10 @@ static ssize_t osc_obd_max_pages_per_rpc_seq_write(struct file *file, LPROCFS_CLIMP_EXIT(dev); return -ERANGE; } - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); cli->cl_max_pages_per_rpc = val; client_adjust_max_dirty(cli); - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); LPROCFS_CLIMP_EXIT(dev); return count; @@ -649,7 +650,7 @@ static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v) do_gettimeofday(&now); - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); seq_printf(seq, "snapshot_time: %lu.%lu (secs.usecs)\n", now.tv_sec, now.tv_usec); @@ -731,7 +732,7 @@ static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v) break; } - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); return 0; } diff --git a/lustre/osc/osc_cache.c b/lustre/osc/osc_cache.c index adfdba7..f2bad36 100644 --- a/lustre/osc/osc_cache.c +++ b/lustre/osc/osc_cache.c @@ -1355,7 +1355,7 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap, static void osc_consume_write_grant(struct client_obd *cli, struct brw_page *pga) { - assert_spin_locked(&cli->cl_loi_list_lock.lock); + assert_spin_locked(&cli->cl_loi_list_lock); LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT)); atomic_long_inc(&obd_dirty_pages); cli->cl_dirty_pages++; @@ -1372,7 +1372,7 @@ static void osc_release_write_grant(struct client_obd *cli, { ENTRY; - assert_spin_locked(&cli->cl_loi_list_lock.lock); + assert_spin_locked(&cli->cl_loi_list_lock); if (!(pga->flag & OBD_BRW_FROM_GRANT)) { EXIT; return; @@ -1426,11 +1426,11 @@ static void __osc_unreserve_grant(struct client_obd *cli, void osc_unreserve_grant(struct client_obd *cli, unsigned int reserved, unsigned int unused) { - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); __osc_unreserve_grant(cli, reserved, unused); if (unused > 0) osc_wake_cache_waiters(cli); - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); } /** @@ -1451,7 +1451,7 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages, { int grant = (1 << cli->cl_chunkbits) + cli->cl_extent_tax; - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); atomic_long_sub(nr_pages, &obd_dirty_pages); cli->cl_dirty_pages -= nr_pages; cli->cl_lost_grant += lost_grant; @@ -1462,7 +1462,7 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages, cli->cl_avail_grant += grant; } osc_wake_cache_waiters(cli); - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); CDEBUG(D_CACHE, "lost %u grant: %lu avail: %lu dirty: %lu\n", lost_grant, cli->cl_lost_grant, cli->cl_avail_grant, cli->cl_dirty_pages << PAGE_CACHE_SHIFT); @@ -1474,9 +1474,9 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages, */ static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap) { - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); osc_release_write_grant(cli, &oap->oap_brw_page); - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); } /** @@ -1514,9 +1514,9 @@ static int osc_enter_cache_try(struct client_obd *cli, static int ocw_granted(struct client_obd *cli, struct osc_cache_waiter *ocw) { int rc; - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); rc = list_empty(&ocw->ocw_entry); - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); return rc; } @@ -1540,7 +1540,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli, OSC_DUMP_GRANT(D_CACHE, cli, "need:%d.\n", bytes); - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); /* force the caller to try sync io. this can jump the list * of queued writes and create a discontiguous rpc stream */ @@ -1565,7 +1565,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli, while (cli->cl_dirty_pages > 0 || cli->cl_w_in_flight > 0) { list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters); ocw.ocw_rc = 0; - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); osc_io_unplug_async(env, cli, NULL); @@ -1574,7 +1574,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli, rc = l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi); - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); /* l_wait_event is interrupted by signal, or timed out */ if (rc < 0) { @@ -1610,7 +1610,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli, } EXIT; out: - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); OSC_DUMP_GRANT(D_CACHE, cli, "returned %d.\n", rc); RETURN(rc); } @@ -1771,9 +1771,9 @@ static int osc_list_maint(struct client_obd *cli, struct osc_object *osc) { int is_ready; - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); is_ready = __osc_list_maint(cli, osc); - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); return is_ready; } @@ -1823,10 +1823,10 @@ static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli, oap->oap_interrupted = 0; if (oap->oap_cmd & OBD_BRW_WRITE && xid > 0) { - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); osc_process_ar(&cli->cl_ar, xid, rc); osc_process_ar(&loi->loi_ar, xid, rc); - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); } rc = osc_completion(env, oap, oap->oap_cmd, rc); @@ -2141,7 +2141,7 @@ __must_hold(&cli->cl_loi_list_lock) } cl_object_get(obj); - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); lu_object_ref_add_at(&obj->co_lu, &link, "check", current); /* attempt some read/write balancing by alternating between @@ -2186,7 +2186,7 @@ __must_hold(&cli->cl_loi_list_lock) lu_object_ref_del_at(&obj->co_lu, &link, "check", current); cl_object_put(env, obj); - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); } } @@ -2202,9 +2202,9 @@ static int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli, /* disable osc_lru_shrink() temporarily to avoid * potential stack overrun problem. LU-2859 */ atomic_inc(&cli->cl_lru_shrinkers); - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); osc_check_rpcs(env, cli, pol); - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); atomic_dec(&cli->cl_lru_shrinkers); } else { CDEBUG(D_CACHE, "Queue writeback work for client %p.\n", cli); @@ -2340,9 +2340,9 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, grants = 0; /* it doesn't need any grant to dirty this page */ - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); rc = osc_enter_cache_try(cli, oap, grants, 0); - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); if (rc == 0) { /* try failed */ grants = 0; need_release = 1; diff --git a/lustre/osc/osc_page.c b/lustre/osc/osc_page.c index e405d4c..b543093 100644 --- a/lustre/osc/osc_page.c +++ b/lustre/osc/osc_page.c @@ -564,11 +564,11 @@ void osc_lru_add_batch(struct client_obd *cli, struct list_head *plist) } if (npages > 0) { - client_obd_list_lock(&cli->cl_lru_list_lock); + spin_lock(&cli->cl_lru_list_lock); list_splice_tail(&lru, &cli->cl_lru_list); atomic_long_sub(npages, &cli->cl_lru_busy); atomic_long_add(npages, &cli->cl_lru_in_list); - client_obd_list_unlock(&cli->cl_lru_list_lock); + spin_unlock(&cli->cl_lru_list_lock); /* XXX: May set force to be true for better performance */ if (osc_cache_too_much(cli)) @@ -590,14 +590,14 @@ static void __osc_lru_del(struct client_obd *cli, struct osc_page *opg) static void osc_lru_del(struct client_obd *cli, struct osc_page *opg) { if (opg->ops_in_lru) { - client_obd_list_lock(&cli->cl_lru_list_lock); + spin_lock(&cli->cl_lru_list_lock); if (!list_empty(&opg->ops_lru)) { __osc_lru_del(cli, opg); } else { LASSERT(atomic_long_read(&cli->cl_lru_busy) > 0); atomic_long_dec(&cli->cl_lru_busy); } - client_obd_list_unlock(&cli->cl_lru_list_lock); + spin_unlock(&cli->cl_lru_list_lock); atomic_long_inc(cli->cl_lru_left); /* this is a great place to release more LRU pages if @@ -619,9 +619,9 @@ static void osc_lru_use(struct client_obd *cli, struct osc_page *opg) /* If page is being transfered for the first time, * ops_lru should be empty */ if (opg->ops_in_lru && !list_empty(&opg->ops_lru)) { - client_obd_list_lock(&cli->cl_lru_list_lock); + spin_lock(&cli->cl_lru_list_lock); __osc_lru_del(cli, opg); - client_obd_list_unlock(&cli->cl_lru_list_lock); + spin_unlock(&cli->cl_lru_list_lock); atomic_long_inc(&cli->cl_lru_busy); } } @@ -700,7 +700,7 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli, pvec = (struct cl_page **)osc_env_info(env)->oti_pvec; io = &osc_env_info(env)->oti_io; - client_obd_list_lock(&cli->cl_lru_list_lock); + spin_lock(&cli->cl_lru_list_lock); maxscan = min(target << 1, atomic_long_read(&cli->cl_lru_in_list)); while (!list_empty(&cli->cl_lru_list)) { struct cl_page *page; @@ -722,7 +722,7 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli, struct cl_object *tmp = page->cp_obj; cl_object_get(tmp); - client_obd_list_unlock(&cli->cl_lru_list_lock); + spin_unlock(&cli->cl_lru_list_lock); if (clobj != NULL) { discard_pagevec(env, io, pvec, index); @@ -738,7 +738,7 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli, io->ci_ignore_layout = 1; rc = cl_io_init(env, io, CIT_MISC, clobj); - client_obd_list_lock(&cli->cl_lru_list_lock); + spin_lock(&cli->cl_lru_list_lock); if (rc != 0) break; @@ -769,17 +769,17 @@ long osc_lru_shrink(const struct lu_env *env, struct client_obd *cli, /* Don't discard and free the page with cl_lru_list held */ pvec[index++] = page; if (unlikely(index == OTI_PVEC_SIZE)) { - client_obd_list_unlock(&cli->cl_lru_list_lock); + spin_unlock(&cli->cl_lru_list_lock); discard_pagevec(env, io, pvec, index); index = 0; - client_obd_list_lock(&cli->cl_lru_list_lock); + spin_lock(&cli->cl_lru_list_lock); } if (++count >= target) break; } - client_obd_list_unlock(&cli->cl_lru_list_lock); + spin_unlock(&cli->cl_lru_list_lock); if (clobj != NULL) { discard_pagevec(env, io, pvec, index); diff --git a/lustre/osc/osc_request.c b/lustre/osc/osc_request.c index cc065cc..b71705e 100644 --- a/lustre/osc/osc_request.c +++ b/lustre/osc/osc_request.c @@ -697,7 +697,7 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa, LASSERT(!(oa->o_valid & bits)); oa->o_valid |= bits; - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); oa->o_dirty = cli->cl_dirty_pages << PAGE_CACHE_SHIFT; if (unlikely(cli->cl_dirty_pages - cli->cl_dirty_transit > cli->cl_dirty_max_pages)) { @@ -732,7 +732,7 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa, oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant; oa->o_dropped = cli->cl_lost_grant; cli->cl_lost_grant = 0; - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); CDEBUG(D_CACHE,"dirty: "LPU64" undirty: %u dropped %u grant: "LPU64"\n", oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant); @@ -748,9 +748,9 @@ void osc_update_next_shrink(struct client_obd *cli) static void __osc_update_grant(struct client_obd *cli, obd_size grant) { - client_obd_list_lock(&cli->cl_loi_list_lock); - cli->cl_avail_grant += grant; - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); + cli->cl_avail_grant += grant; + spin_unlock(&cli->cl_loi_list_lock); } static void osc_update_grant(struct client_obd *cli, struct ost_body *body) @@ -788,10 +788,10 @@ out: static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa) { - client_obd_list_lock(&cli->cl_loi_list_lock); - oa->o_grant = cli->cl_avail_grant / 4; - cli->cl_avail_grant -= oa->o_grant; - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); + oa->o_grant = cli->cl_avail_grant / 4; + cli->cl_avail_grant -= oa->o_grant; + spin_unlock(&cli->cl_loi_list_lock); if (!(oa->o_valid & OBD_MD_FLFLAGS)) { oa->o_valid |= OBD_MD_FLFLAGS; oa->o_flags = 0; @@ -809,10 +809,10 @@ static int osc_shrink_grant(struct client_obd *cli) __u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) * (cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT); - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); if (cli->cl_avail_grant <= target_bytes) target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); return osc_shrink_grant_to_target(cli, target_bytes); } @@ -823,7 +823,7 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes) struct ost_body *body; ENTRY; - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); /* Don't shrink if we are already above or below the desired limit * We don't want to shrink below a single RPC, as that will negatively * impact block allocation and long-term performance. */ @@ -831,10 +831,10 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes) target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; if (target_bytes >= cli->cl_avail_grant) { - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); RETURN(0); } - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); OBD_ALLOC_PTR(body); if (!body) @@ -842,10 +842,10 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes) osc_announce_cached(cli, &body->oa, 0); - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); body->oa.o_grant = cli->cl_avail_grant - target_bytes; cli->cl_avail_grant = target_bytes; - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) { body->oa.o_valid |= OBD_MD_FLFLAGS; body->oa.o_flags = 0; @@ -933,7 +933,7 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd) * race is tolerable here: if we're evicted, but imp_state already * left EVICTED state, then cl_dirty_pages must be 0 already. */ - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); if (cli->cl_import->imp_state == LUSTRE_IMP_EVICTED) cli->cl_avail_grant = ocd->ocd_grant; else @@ -951,7 +951,7 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd) /* determine the appropriate chunk size used by osc_extent. */ cli->cl_chunkbits = max_t(int, PAGE_CACHE_SHIFT, ocd->ocd_blocksize); - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld." "chunk bits: %d.\n", cli->cl_import->imp_obd->obd_name, @@ -1736,7 +1736,7 @@ static int brw_interpret(const struct lu_env *env, osc_release_ppga(aa->aa_ppga, aa->aa_page_count); ptlrpc_lprocfs_brw(req, req->rq_bulk->bd_nob_transferred); - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); /* We need to decrement before osc_ap_completion->osc_wake_cache_waiters * is called so we know whether to go to sync BRWs or wait for more * RPCs to complete */ @@ -1745,7 +1745,7 @@ static int brw_interpret(const struct lu_env *env, else cli->cl_r_in_flight--; osc_wake_cache_waiters(cli); - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); osc_io_unplug(env, cli, NULL, PDL_POLICY_SAME); RETURN(rc); @@ -1920,7 +1920,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, if (tmp != NULL) tmp->oap_request = ptlrpc_request_addref(req); - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); starting_offset >>= PAGE_CACHE_SHIFT; if (cmd == OBD_BRW_READ) { cli->cl_r_in_flight++; @@ -1935,7 +1935,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli, lprocfs_oh_tally_log2(&cli->cl_write_offset_hist, starting_offset + 1); } - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_unlock(&cli->cl_loi_list_lock); DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %ur/%uw in flight", page_count, aa, cli->cl_r_in_flight, @@ -2763,13 +2763,13 @@ static int osc_reconnect(const struct lu_env *env, if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) { long lost_grant; - client_obd_list_lock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); data->ocd_grant = (cli->cl_avail_grant + (cli->cl_dirty_pages << PAGE_CACHE_SHIFT)) ?: 2 * cli_brw_size(obd); - lost_grant = cli->cl_lost_grant; - cli->cl_lost_grant = 0; - client_obd_list_unlock(&cli->cl_loi_list_lock); + lost_grant = cli->cl_lost_grant; + cli->cl_lost_grant = 0; + spin_unlock(&cli->cl_loi_list_lock); CDEBUG(D_RPCTRACE, "ocd_connect_flags: "LPX64" ocd_version: %d" " ocd_grant: %d, lost: %ld.\n", data->ocd_connect_flags, @@ -2820,10 +2820,10 @@ static int osc_import_event(struct obd_device *obd, switch (event) { case IMP_EVENT_DISCON: { cli = &obd->u.cli; - client_obd_list_lock(&cli->cl_loi_list_lock); - cli->cl_avail_grant = 0; - cli->cl_lost_grant = 0; - client_obd_list_unlock(&cli->cl_loi_list_lock); + spin_lock(&cli->cl_loi_list_lock); + cli->cl_avail_grant = 0; + cli->cl_lost_grant = 0; + spin_unlock(&cli->cl_loi_list_lock); break; } case IMP_EVENT_INACTIVE: { -- 1.8.3.1