* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011 Whamcloud, Inc.
+ * Copyright (c) 2012, Intel Corporation.
*
*/
/*
struct client_obd *cli = osc_cli(ext->oe_obj);
struct osc_async_page *oap;
struct osc_async_page *tmp;
- struct osc_async_page *last = NULL;
int nr_pages = ext->oe_nr_pages;
int lost_grant = 0;
int blocksize = cli->cl_import->imp_obd->obd_osfs.os_bsize ? : 4096;
+ __u64 last_off = 0;
+ int last_count = -1;
ENTRY;
OSC_EXTENT_DUMP(D_CACHE, ext, "extent finished.\n");
oap_pending_item) {
cfs_list_del_init(&oap->oap_rpc_item);
cfs_list_del_init(&oap->oap_pending_item);
- if (last == NULL || last->oap_obj_off < oap->oap_obj_off)
- last = oap;
+ if (last_off <= oap->oap_obj_off) {
+ last_off = oap->oap_obj_off;
+ last_count = oap->oap_count;
+ }
--ext->oe_nr_pages;
osc_ap_completion(env, cli, oap, sent, rc);
if (!sent) {
lost_grant = ext->oe_grants;
} else if (blocksize < CFS_PAGE_SIZE &&
- last->oap_count != CFS_PAGE_SIZE) {
+ last_count != CFS_PAGE_SIZE) {
/* For short writes we shouldn't count parts of pages that
* span a whole chunk on the OST side, or our accounting goes
* wrong. Should match the code in filter_grant_check. */
rc = osc_make_ready(env, oap, OBD_BRW_WRITE);
switch (rc) {
case 0:
- cfs_spin_lock(&oap->oap_lock);
+ spin_lock(&oap->oap_lock);
oap->oap_async_flags |= ASYNC_READY;
- cfs_spin_unlock(&oap->oap_lock);
+ spin_unlock(&oap->oap_lock);
break;
case -EALREADY:
LASSERT((oap->oap_async_flags & ASYNC_READY) != 0);
/* Clear opg->ops_transfer_pinned before VM lock is released. */
opg->ops_transfer_pinned = 0;
- cfs_spin_lock(&obj->oo_seatbelt);
+ spin_lock(&obj->oo_seatbelt);
LASSERT(opg->ops_submitter != NULL);
LASSERT(!cfs_list_empty(&opg->ops_inflight));
cfs_list_del_init(&opg->ops_inflight);
opg->ops_submitter = NULL;
- cfs_spin_unlock(&obj->oo_seatbelt);
+ spin_unlock(&obj->oo_seatbelt);
opg->ops_submit_time = 0;
srvlock = oap->oap_brw_flags & OBD_BRW_SRVLOCK;
} else {
cli->cl_avail_grant += unused;
}
- if (unused > 0)
- osc_wake_cache_waiters(cli);
}
void osc_unreserve_grant(struct client_obd *cli,
{
client_obd_list_lock(&cli->cl_loi_list_lock);
__osc_unreserve_grant(cli, reserved, unused);
+ if (unused > 0)
+ osc_wake_cache_waiters(cli);
client_obd_list_unlock(&cli->cl_loi_list_lock);
}
cli->cl_avail_grant, cli->cl_dirty);
}
-/* The companion to osc_enter_cache(), called when @oap is no longer part of
- * the dirty accounting. Writeback completes or truncate happens before
- * writing starts. Must be called with the loi lock held. */
+/**
+ * The companion to osc_enter_cache(), called when @oap is no longer part of
+ * the dirty accounting due to error.
+ */
static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap)
{
+ client_obd_list_lock(&cli->cl_loi_list_lock);
osc_release_write_grant(cli, &oap->oap_brw_page);
+ client_obd_list_unlock(&cli->cl_loi_list_lock);
}
/**
return rc;
}
-/* Caller must hold loi_list_lock - we drop/regain it if we need to wait for
- * grant or cache space. */
+/**
+ * The main entry to reserve dirty page accounting. Usually the grant reserved
+ * in this function will be freed in bulk in osc_free_grant() unless it fails
+ * to add osc cache, in that case, it will be freed in osc_exit_cache().
+ *
+ * The process will be put into sleep if it's already run out of grant.
+ */
static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
struct osc_async_page *oap, int bytes)
{
}
/* As the transfer for this page is being done, clear the flags */
- cfs_spin_lock(&oap->oap_lock);
+ spin_lock(&oap->oap_lock);
oap->oap_async_flags = 0;
- cfs_spin_unlock(&oap->oap_lock);
+ spin_unlock(&oap->oap_lock);
oap->oap_interrupted = 0;
if (oap->oap_cmd & OBD_BRW_WRITE && xid > 0) {
CFS_INIT_LIST_HEAD(&oap->oap_pending_item);
CFS_INIT_LIST_HEAD(&oap->oap_rpc_item);
- cfs_spin_lock_init(&oap->oap_lock);
+ spin_lock_init(&oap->oap_lock);
CDEBUG(D_INFO, "oap %p page %p obj off "LPU64"\n",
oap, page, oap->oap_obj_off);
RETURN(0);
if (rc)
GOTO(out, rc);
- cfs_spin_lock(&oap->oap_lock);
+ spin_lock(&oap->oap_lock);
oap->oap_async_flags |= ASYNC_READY|ASYNC_URGENT;
- cfs_spin_unlock(&oap->oap_lock);
+ spin_unlock(&oap->oap_lock);
if (cfs_memory_pressure_get())
ext->oe_memalloc = 1;