#ifndef __OBD_H
#define __OBD_H
+#include <linux/spinlock.h>
#include <linux/obd.h>
#include <lustre/lustre_idl.h>
* blocking everywhere, but we don't want to slow down fast-path of
* our main platform.)
*
- * Exact type of ->cl_loi_list_lock is defined in arch/obd.h together
- * with client_obd_list_{un,}lock() and
- * client_obd_list_lock_{init,done}() functions.
- *
* NB by Jinshan: though field names are still _loi_, but actually
* osc_object{}s are in the list.
*/
- client_obd_lock_t cl_loi_list_lock;
+ spinlock_t cl_loi_list_lock;
struct list_head cl_loi_ready_list;
struct list_head cl_loi_hp_ready_list;
struct list_head cl_loi_write_list;
atomic_long_t cl_lru_in_list;
atomic_long_t cl_unstable_count;
struct list_head cl_lru_list; /* lru page list */
- client_obd_lock_t cl_lru_list_lock; /* page list protector */
+ spinlock_t cl_lru_list_lock; /* page list protector */
atomic_t cl_lru_shrinkers;
/* number of in flight destroy rpcs is limited to max_rpcs_in_flight */
INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
INIT_LIST_HEAD(&cli->cl_loi_write_list);
INIT_LIST_HEAD(&cli->cl_loi_read_list);
- client_obd_list_lock_init(&cli->cl_loi_list_lock);
+ spin_lock_init(&cli->cl_loi_list_lock);
atomic_set(&cli->cl_pending_w_pages, 0);
atomic_set(&cli->cl_pending_r_pages, 0);
cli->cl_r_in_flight = 0;
atomic_long_set(&cli->cl_lru_busy, 0);
atomic_long_set(&cli->cl_lru_in_list, 0);
INIT_LIST_HEAD(&cli->cl_lru_list);
- client_obd_list_lock_init(&cli->cl_lru_list_lock);
+ spin_lock_init(&cli->cl_lru_list_lock);
atomic_long_set(&cli->cl_unstable_count, 0);
init_waitqueue_head(&cli->cl_destroy_waitq);
{
bool avail;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
avail = !!list_empty(&orsw->orsw_entry);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return avail;
};
struct l_wait_info lwi;
int rc;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
if (cli->cl_r_in_flight < cli->cl_max_rpcs_in_flight) {
cli->cl_r_in_flight++;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return 0;
}
init_waitqueue_head(&orsw.orsw_waitq);
list_add_tail(&orsw.orsw_entry, &cli->cl_loi_read_list);
orsw.orsw_signaled = false;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
rc = l_wait_event(orsw.orsw_waitq,
/* Here, we must take the lock to avoid the on-stack 'orsw' to be
* freed but other (such as obd_put_request_slot) is using it. */
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
if (rc != 0) {
if (!orsw.orsw_signaled) {
if (list_empty(&orsw.orsw_entry))
rc = -EINTR;
}
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return rc;
}
{
struct obd_request_slot_waiter *orsw;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
cli->cl_r_in_flight--;
/* If there is free slot, wakeup the first waiter. */
cli->cl_r_in_flight++;
wake_up(&orsw->orsw_waitq);
}
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
}
EXPORT_SYMBOL(obd_put_request_slot);
if (max > OBD_MAX_RIF_MAX || max < 1)
return -ERANGE;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
old = cli->cl_max_rpcs_in_flight;
cli->cl_max_rpcs_in_flight = max;
diff = max - old;
cli->cl_r_in_flight++;
wake_up(&orsw->orsw_waitq);
}
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return 0;
}
EXPORT_SYMBOL(lprocfs_oh_clear);
int lprocfs_obd_rd_max_pages_per_rpc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+ int count, int *eof, void *data)
{
- struct obd_device *dev = data;
- struct client_obd *cli = &dev->u.cli;
- int rc;
+ struct obd_device *dev = data;
+ struct client_obd *cli = &dev->u.cli;
+ int rc;
- client_obd_list_lock(&cli->cl_loi_list_lock);
- rc = snprintf(page, count, "%d\n", cli->cl_max_pages_per_rpc);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- return rc;
+ spin_lock(&cli->cl_loi_list_lock);
+ rc = snprintf(page, count, "%d\n", cli->cl_max_pages_per_rpc);
+ spin_unlock(&cli->cl_loi_list_lock);
+
+ return rc;
}
EXPORT_SYMBOL(lprocfs_obd_rd_max_pages_per_rpc);
struct client_obd *cli = &dev->u.cli;
int rc;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
rc = seq_printf(m, "%d\n", cli->cl_max_pages_per_rpc);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return rc;
}
EXPORT_SYMBOL(lprocfs_obd_max_pages_per_rpc_seq_show);
struct client_obd *cli = &dev->u.cli;
int rc;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
rc = seq_printf(m, "%u\n", cli->cl_max_rpcs_in_flight);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return rc;
}
if (pool && val > cli->cl_max_rpcs_in_flight)
pool->prp_populate(pool, val-cli->cl_max_rpcs_in_flight);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
cli->cl_max_rpcs_in_flight = val;
client_adjust_max_dirty(cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
LPROCFS_CLIMP_EXIT(dev);
return count;
long val;
int mult;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
val = cli->cl_dirty_max_pages;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
mult = 1 << (20 - PAGE_CACHE_SHIFT);
return lprocfs_seq_read_frac_helper(m, val, mult);
pages_number > totalram_pages / 4) /* 1/4 of RAM */
return -ERANGE;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
cli->cl_dirty_max_pages = pages_number;
osc_wake_cache_waiters(cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return count;
}
struct client_obd *cli = &dev->u.cli;
int rc;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
rc = seq_printf(m, "%lu\n", cli->cl_dirty_pages << PAGE_CACHE_SHIFT);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return rc;
}
LPROC_SEQ_FOPS_RO(osc_cur_dirty_bytes);
struct client_obd *cli = &dev->u.cli;
int rc;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
rc = seq_printf(m, "%lu\n", cli->cl_avail_grant);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return rc;
}
return rc;
/* this is only for shrinking grant */
- client_obd_list_lock(&cli->cl_loi_list_lock);
- if (val >= cli->cl_avail_grant) {
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- return 0;
- }
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
+ if (val >= cli->cl_avail_grant) {
+ spin_unlock(&cli->cl_loi_list_lock);
+ return 0;
+ }
+
+ spin_unlock(&cli->cl_loi_list_lock);
LPROCFS_CLIMP_CHECK(obd);
if (cli->cl_import->imp_state == LUSTRE_IMP_FULL)
struct client_obd *cli = &dev->u.cli;
int rc;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
rc = seq_printf(m, "%lu\n", cli->cl_lost_grant);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return rc;
}
LPROC_SEQ_FOPS_RO(osc_cur_lost_grant_bytes);
LPROCFS_CLIMP_EXIT(dev);
return -ERANGE;
}
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
cli->cl_max_pages_per_rpc = val;
client_adjust_max_dirty(cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
LPROCFS_CLIMP_EXIT(dev);
return count;
do_gettimeofday(&now);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
seq_printf(seq, "snapshot_time: %lu.%lu (secs.usecs)\n",
now.tv_sec, now.tv_usec);
break;
}
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return 0;
}
static void osc_consume_write_grant(struct client_obd *cli,
struct brw_page *pga)
{
- assert_spin_locked(&cli->cl_loi_list_lock.lock);
+ assert_spin_locked(&cli->cl_loi_list_lock);
LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
atomic_long_inc(&obd_dirty_pages);
cli->cl_dirty_pages++;
{
ENTRY;
- assert_spin_locked(&cli->cl_loi_list_lock.lock);
+ assert_spin_locked(&cli->cl_loi_list_lock);
if (!(pga->flag & OBD_BRW_FROM_GRANT)) {
EXIT;
return;
void osc_unreserve_grant(struct client_obd *cli,
unsigned int reserved, unsigned int unused)
{
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
__osc_unreserve_grant(cli, reserved, unused);
if (unused > 0)
osc_wake_cache_waiters(cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
}
/**
{
int grant = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
atomic_long_sub(nr_pages, &obd_dirty_pages);
cli->cl_dirty_pages -= nr_pages;
cli->cl_lost_grant += lost_grant;
cli->cl_avail_grant += grant;
}
osc_wake_cache_waiters(cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
CDEBUG(D_CACHE, "lost %u grant: %lu avail: %lu dirty: %lu\n",
lost_grant, cli->cl_lost_grant,
cli->cl_avail_grant, cli->cl_dirty_pages << PAGE_CACHE_SHIFT);
*/
static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap)
{
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
osc_release_write_grant(cli, &oap->oap_brw_page);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
}
/**
static int ocw_granted(struct client_obd *cli, struct osc_cache_waiter *ocw)
{
int rc;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
rc = list_empty(&ocw->ocw_entry);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return rc;
}
OSC_DUMP_GRANT(D_CACHE, cli, "need:%d.\n", bytes);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
/* force the caller to try sync io. this can jump the list
* of queued writes and create a discontiguous rpc stream */
while (cli->cl_dirty_pages > 0 || cli->cl_w_in_flight > 0) {
list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
ocw.ocw_rc = 0;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
osc_io_unplug_async(env, cli, NULL);
rc = l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
/* l_wait_event is interrupted by signal, or timed out */
if (rc < 0) {
}
EXIT;
out:
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
OSC_DUMP_GRANT(D_CACHE, cli, "returned %d.\n", rc);
RETURN(rc);
}
{
int is_ready;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
is_ready = __osc_list_maint(cli, osc);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return is_ready;
}
oap->oap_interrupted = 0;
if (oap->oap_cmd & OBD_BRW_WRITE && xid > 0) {
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
osc_process_ar(&cli->cl_ar, xid, rc);
osc_process_ar(&loi->loi_ar, xid, rc);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
}
rc = osc_completion(env, oap, oap->oap_cmd, rc);
}
cl_object_get(obj);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
lu_object_ref_add_at(&obj->co_lu, &link, "check", current);
/* attempt some read/write balancing by alternating between
lu_object_ref_del_at(&obj->co_lu, &link, "check", current);
cl_object_put(env, obj);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
}
}
/* disable osc_lru_shrink() temporarily to avoid
* potential stack overrun problem. LU-2859 */
atomic_inc(&cli->cl_lru_shrinkers);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
osc_check_rpcs(env, cli, pol);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
atomic_dec(&cli->cl_lru_shrinkers);
} else {
CDEBUG(D_CACHE, "Queue writeback work for client %p.\n", cli);
grants = 0;
/* it doesn't need any grant to dirty this page */
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
rc = osc_enter_cache_try(cli, oap, grants, 0);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
if (rc == 0) { /* try failed */
grants = 0;
need_release = 1;
}
if (npages > 0) {
- client_obd_list_lock(&cli->cl_lru_list_lock);
+ spin_lock(&cli->cl_lru_list_lock);
list_splice_tail(&lru, &cli->cl_lru_list);
atomic_long_sub(npages, &cli->cl_lru_busy);
atomic_long_add(npages, &cli->cl_lru_in_list);
- client_obd_list_unlock(&cli->cl_lru_list_lock);
+ spin_unlock(&cli->cl_lru_list_lock);
/* XXX: May set force to be true for better performance */
if (osc_cache_too_much(cli))
static void osc_lru_del(struct client_obd *cli, struct osc_page *opg)
{
if (opg->ops_in_lru) {
- client_obd_list_lock(&cli->cl_lru_list_lock);
+ spin_lock(&cli->cl_lru_list_lock);
if (!list_empty(&opg->ops_lru)) {
__osc_lru_del(cli, opg);
} else {
LASSERT(atomic_long_read(&cli->cl_lru_busy) > 0);
atomic_long_dec(&cli->cl_lru_busy);
}
- client_obd_list_unlock(&cli->cl_lru_list_lock);
+ spin_unlock(&cli->cl_lru_list_lock);
atomic_long_inc(cli->cl_lru_left);
/* this is a great place to release more LRU pages if
/* If page is being transfered for the first time,
* ops_lru should be empty */
if (opg->ops_in_lru && !list_empty(&opg->ops_lru)) {
- client_obd_list_lock(&cli->cl_lru_list_lock);
+ spin_lock(&cli->cl_lru_list_lock);
__osc_lru_del(cli, opg);
- client_obd_list_unlock(&cli->cl_lru_list_lock);
+ spin_unlock(&cli->cl_lru_list_lock);
atomic_long_inc(&cli->cl_lru_busy);
}
}
pvec = (struct cl_page **)osc_env_info(env)->oti_pvec;
io = &osc_env_info(env)->oti_io;
- client_obd_list_lock(&cli->cl_lru_list_lock);
+ spin_lock(&cli->cl_lru_list_lock);
maxscan = min(target << 1, atomic_long_read(&cli->cl_lru_in_list));
while (!list_empty(&cli->cl_lru_list)) {
struct cl_page *page;
struct cl_object *tmp = page->cp_obj;
cl_object_get(tmp);
- client_obd_list_unlock(&cli->cl_lru_list_lock);
+ spin_unlock(&cli->cl_lru_list_lock);
if (clobj != NULL) {
discard_pagevec(env, io, pvec, index);
io->ci_ignore_layout = 1;
rc = cl_io_init(env, io, CIT_MISC, clobj);
- client_obd_list_lock(&cli->cl_lru_list_lock);
+ spin_lock(&cli->cl_lru_list_lock);
if (rc != 0)
break;
/* Don't discard and free the page with cl_lru_list held */
pvec[index++] = page;
if (unlikely(index == OTI_PVEC_SIZE)) {
- client_obd_list_unlock(&cli->cl_lru_list_lock);
+ spin_unlock(&cli->cl_lru_list_lock);
discard_pagevec(env, io, pvec, index);
index = 0;
- client_obd_list_lock(&cli->cl_lru_list_lock);
+ spin_lock(&cli->cl_lru_list_lock);
}
if (++count >= target)
break;
}
- client_obd_list_unlock(&cli->cl_lru_list_lock);
+ spin_unlock(&cli->cl_lru_list_lock);
if (clobj != NULL) {
discard_pagevec(env, io, pvec, index);
LASSERT(!(oa->o_valid & bits));
oa->o_valid |= bits;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
oa->o_dirty = cli->cl_dirty_pages << PAGE_CACHE_SHIFT;
if (unlikely(cli->cl_dirty_pages - cli->cl_dirty_transit >
cli->cl_dirty_max_pages)) {
oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant;
oa->o_dropped = cli->cl_lost_grant;
cli->cl_lost_grant = 0;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
CDEBUG(D_CACHE,"dirty: "LPU64" undirty: %u dropped %u grant: "LPU64"\n",
oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
static void __osc_update_grant(struct client_obd *cli, obd_size grant)
{
- client_obd_list_lock(&cli->cl_loi_list_lock);
- cli->cl_avail_grant += grant;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
+ cli->cl_avail_grant += grant;
+ spin_unlock(&cli->cl_loi_list_lock);
}
static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
{
- client_obd_list_lock(&cli->cl_loi_list_lock);
- oa->o_grant = cli->cl_avail_grant / 4;
- cli->cl_avail_grant -= oa->o_grant;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
+ oa->o_grant = cli->cl_avail_grant / 4;
+ cli->cl_avail_grant -= oa->o_grant;
+ spin_unlock(&cli->cl_loi_list_lock);
if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
oa->o_valid |= OBD_MD_FLFLAGS;
oa->o_flags = 0;
__u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
(cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
if (cli->cl_avail_grant <= target_bytes)
target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return osc_shrink_grant_to_target(cli, target_bytes);
}
struct ost_body *body;
ENTRY;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
/* Don't shrink if we are already above or below the desired limit
* We don't want to shrink below a single RPC, as that will negatively
* impact block allocation and long-term performance. */
target_bytes = cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT;
if (target_bytes >= cli->cl_avail_grant) {
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
RETURN(0);
}
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
OBD_ALLOC_PTR(body);
if (!body)
osc_announce_cached(cli, &body->oa, 0);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
body->oa.o_grant = cli->cl_avail_grant - target_bytes;
cli->cl_avail_grant = target_bytes;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
body->oa.o_valid |= OBD_MD_FLFLAGS;
body->oa.o_flags = 0;
* race is tolerable here: if we're evicted, but imp_state already
* left EVICTED state, then cl_dirty_pages must be 0 already.
*/
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
if (cli->cl_import->imp_state == LUSTRE_IMP_EVICTED)
cli->cl_avail_grant = ocd->ocd_grant;
else
/* determine the appropriate chunk size used by osc_extent. */
cli->cl_chunkbits = max_t(int, PAGE_CACHE_SHIFT, ocd->ocd_blocksize);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld."
"chunk bits: %d.\n", cli->cl_import->imp_obd->obd_name,
osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
ptlrpc_lprocfs_brw(req, req->rq_bulk->bd_nob_transferred);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
/* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
* is called so we know whether to go to sync BRWs or wait for more
* RPCs to complete */
else
cli->cl_r_in_flight--;
osc_wake_cache_waiters(cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
osc_io_unplug(env, cli, NULL, PDL_POLICY_SAME);
RETURN(rc);
if (tmp != NULL)
tmp->oap_request = ptlrpc_request_addref(req);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
starting_offset >>= PAGE_CACHE_SHIFT;
if (cmd == OBD_BRW_READ) {
cli->cl_r_in_flight++;
lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
starting_offset + 1);
}
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %ur/%uw in flight",
page_count, aa, cli->cl_r_in_flight,
if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
long lost_grant;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
data->ocd_grant = (cli->cl_avail_grant +
(cli->cl_dirty_pages << PAGE_CACHE_SHIFT)) ?:
2 * cli_brw_size(obd);
- lost_grant = cli->cl_lost_grant;
- cli->cl_lost_grant = 0;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ lost_grant = cli->cl_lost_grant;
+ cli->cl_lost_grant = 0;
+ spin_unlock(&cli->cl_loi_list_lock);
CDEBUG(D_RPCTRACE, "ocd_connect_flags: "LPX64" ocd_version: %d"
" ocd_grant: %d, lost: %ld.\n", data->ocd_connect_flags,
switch (event) {
case IMP_EVENT_DISCON: {
cli = &obd->u.cli;
- client_obd_list_lock(&cli->cl_loi_list_lock);
- cli->cl_avail_grant = 0;
- cli->cl_lost_grant = 0;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
+ cli->cl_avail_grant = 0;
+ cli->cl_lost_grant = 0;
+ spin_unlock(&cli->cl_loi_list_lock);
break;
}
case IMP_EVENT_INACTIVE: {