*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, 2014, Intel Corporation.
+ * Copyright (c) 2012, 2017, Intel Corporation.
*
*/
/*
#define DEBUG_SUBSYSTEM S_OSC
-#include "osc_cl_internal.h"
+#include <lustre_osc.h>
+#include <lustre_dlm.h>
+
#include "osc_internal.h"
static int extent_debug; /* set it to be true for more debug */
static int osc_io_unplug_async(const struct lu_env *env,
struct client_obd *cli, struct osc_object *osc);
static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
- unsigned int lost_grant);
+ unsigned int lost_grant, unsigned int dirty_grant);
-static void osc_extent_tree_dump0(int level, struct osc_object *obj,
+static void osc_extent_tree_dump0(int mask, struct osc_object *obj,
const char *func, int line);
-#define osc_extent_tree_dump(lvl, obj) \
- osc_extent_tree_dump0(lvl, obj, __func__, __LINE__)
+#define osc_extent_tree_dump(mask, obj) \
+ osc_extent_tree_dump0(mask, obj, __func__, __LINE__)
+
+static void osc_unreserve_grant(struct client_obd *cli, unsigned int reserved,
+ unsigned int unused);
/** \addtogroup osc
* @{
{
char *buf = flags;
*buf++ = ext->oe_rw ? 'r' : 'w';
- if (ext->oe_intree)
+ if (!RB_EMPTY_NODE(&ext->oe_node))
*buf++ = 'i';
if (ext->oe_sync)
*buf++ = 'S';
return flags;
}
-static inline char list_empty_marker(struct list_head *list)
-{
- return list_empty(list) ? '-' : '+';
-}
-
#define EXTSTR "[%lu -> %lu/%lu]"
#define EXTPARA(ext) (ext)->oe_start, (ext)->oe_end, (ext)->oe_max_end
static const char *oes_strings[] = {
"inv", "active", "cache", "locking", "lockdone", "rpc", "trunc", NULL };
-#define OSC_EXTENT_DUMP(lvl, extent, fmt, ...) do { \
+#define OSC_EXTENT_DUMP_WITH_LOC(file, func, line, mask, extent, fmt, ...) do {\
+ static struct cfs_debug_limit_state cdls; \
struct osc_extent *__ext = (extent); \
char __buf[16]; \
\
- CDEBUG(lvl, \
+ __CDEBUG_WITH_LOC(file, func, line, mask, &cdls, \
"extent %p@{" EXTSTR ", " \
"[%d|%d|%c|%s|%s|%p], [%d|%d|%c|%c|%p|%u|%p]} " fmt, \
/* ----- extent part 0 ----- */ \
__ext, EXTPARA(__ext), \
/* ----- part 1 ----- */ \
- atomic_read(&__ext->oe_refc), \
- atomic_read(&__ext->oe_users), \
+ kref_read(&__ext->oe_refc), \
+ atomic_read(&__ext->oe_users), \
list_empty_marker(&__ext->oe_link), \
oes_strings[__ext->oe_state], ext_flags(__ext, __buf), \
__ext->oe_obj, \
__ext->oe_dlmlock, __ext->oe_mppr, __ext->oe_owner, \
/* ----- part 4 ----- */ \
## __VA_ARGS__); \
- if (lvl == D_ERROR && __ext->oe_dlmlock != NULL) \
- LDLM_ERROR(__ext->oe_dlmlock, "extent: %p\n", __ext); \
+ if (mask == D_ERROR && __ext->oe_dlmlock != NULL) \
+ LDLM_ERROR(__ext->oe_dlmlock, "extent: %p", __ext); \
else \
- LDLM_DEBUG(__ext->oe_dlmlock, "extent: %p\n", __ext); \
+ LDLM_DEBUG(__ext->oe_dlmlock, "extent: %p", __ext); \
} while (0)
+#define OSC_EXTENT_DUMP(mask, ext, fmt, ...) \
+ OSC_EXTENT_DUMP_WITH_LOC(__FILE__, __func__, __LINE__, \
+ mask, ext, fmt, ## __VA_ARGS__)
+
#undef EASSERTF
#define EASSERTF(expr, ext, fmt, args...) do { \
if (!(expr)) { \
static inline struct osc_extent *rb_extent(struct rb_node *n)
{
- if (n == NULL)
- return NULL;
-
- return container_of(n, struct osc_extent, oe_node);
+ return rb_entry_safe(n, struct osc_extent, oe_node);
}
static inline struct osc_extent *next_extent(struct osc_extent *ext)
if (ext == NULL)
return NULL;
- LASSERT(ext->oe_intree);
+ LASSERT(!RB_EMPTY_NODE(&ext->oe_node));
return rb_extent(rb_next(&ext->oe_node));
}
if (ext == NULL)
return NULL;
- LASSERT(ext->oe_intree);
+ LASSERT(!RB_EMPTY_NODE(&ext->oe_node));
return rb_extent(rb_prev(&ext->oe_node));
}
size_t page_count;
int rc = 0;
- if (!osc_object_is_locked(obj))
- GOTO(out, rc = 9);
+ assert_osc_object_is_locked(obj);
if (ext->oe_state >= OES_STATE_MAX)
GOTO(out, rc = 10);
- if (atomic_read(&ext->oe_refc) <= 0)
+ if (kref_read(&ext->oe_refc) <= 0)
GOTO(out, rc = 20);
- if (atomic_read(&ext->oe_refc) < atomic_read(&ext->oe_users))
+ if (kref_read(&ext->oe_refc) < atomic_read(&ext->oe_users))
GOTO(out, rc = 30);
switch (ext->oe_state) {
GOTO(out, rc = 60);
if (ext->oe_fsync_wait && !ext->oe_urgent && !ext->oe_hp)
GOTO(out, rc = 65);
+ /* fallthrough */
default:
if (atomic_read(&ext->oe_users) > 0)
GOTO(out, rc = 70);
if (ext->oe_sync && ext->oe_grants > 0)
GOTO(out, rc = 90);
- if (ext->oe_dlmlock != NULL) {
+ if (ext->oe_dlmlock != NULL &&
+ ext->oe_dlmlock->l_resource->lr_type == LDLM_EXTENT &&
+ !ldlm_is_failed(ext->oe_dlmlock)) {
struct ldlm_extent *extent;
extent = &ext->oe_dlmlock->l_policy_data.l_extent;
out:
if (rc != 0)
- OSC_EXTENT_DUMP(D_ERROR, ext,
- "%s:%d sanity check %p failed with rc = %d\n",
- func, line, ext, rc);
+ OSC_EXTENT_DUMP_WITH_LOC(__FILE__, func, line, D_ERROR, ext,
+ "sanity check %p failed: rc = %d\n",
+ ext, rc);
return rc;
}
__res; \
})
+static inline bool
+overlapped(const struct osc_extent *ex1, const struct osc_extent *ex2)
+{
+ return !(ex1->oe_end < ex2->oe_start || ex2->oe_end < ex1->oe_start);
+}
/**
* sanity check - to make sure there is no overlapped extent in the tree.
{
struct osc_extent *tmp;
- LASSERT(osc_object_is_locked(obj));
+ assert_osc_object_is_locked(obj);
if (!extent_debug)
return 0;
for (tmp = first_extent(obj); tmp != NULL; tmp = next_extent(tmp)) {
if (tmp == ext)
continue;
- if (tmp->oe_end >= ext->oe_start &&
- tmp->oe_start <= ext->oe_end)
+ if (overlapped(tmp, ext))
return 1;
}
return 0;
static void osc_extent_state_set(struct osc_extent *ext, int state)
{
- LASSERT(osc_object_is_locked(ext->oe_obj));
+ assert_osc_object_is_locked(ext->oe_obj);
LASSERT(state >= OES_INV && state < OES_STATE_MAX);
/* Never try to sanity check a state changing extent :-) */
/* LASSERT(sanity_check_nolock(ext) == 0); */
/* TODO: validate the state machine */
- ext->oe_state = state;
- wake_up_all(&ext->oe_waitq);
+ smp_store_release(&ext->oe_state, state);
+ wake_up(&ext->oe_waitq);
}
static struct osc_extent *osc_extent_alloc(struct osc_object *obj)
{
struct osc_extent *ext;
- OBD_SLAB_ALLOC_PTR_GFP(ext, osc_extent_kmem, GFP_IOFS);
+ OBD_SLAB_ALLOC_PTR_GFP(ext, osc_extent_kmem, GFP_NOFS);
if (ext == NULL)
return NULL;
RB_CLEAR_NODE(&ext->oe_node);
ext->oe_obj = obj;
- atomic_set(&ext->oe_refc, 1);
+ cl_object_get(osc2cl(obj));
+ kref_init(&ext->oe_refc);
atomic_set(&ext->oe_users, 0);
INIT_LIST_HEAD(&ext->oe_link);
ext->oe_state = OES_INV;
return ext;
}
-static void osc_extent_free(struct osc_extent *ext)
+static void osc_extent_free(struct kref *kref)
{
+ struct osc_extent *ext = container_of(kref, struct osc_extent,
+ oe_refc);
+
+ LASSERT(list_empty(&ext->oe_link));
+ LASSERT(atomic_read(&ext->oe_users) == 0);
+ LASSERT(ext->oe_state == OES_INV);
+ LASSERT(RB_EMPTY_NODE(&ext->oe_node));
+
+ if (ext->oe_dlmlock) {
+ lu_ref_del(&ext->oe_dlmlock->l_reference,
+ "osc_extent", ext);
+ LDLM_LOCK_PUT(ext->oe_dlmlock);
+ ext->oe_dlmlock = NULL;
+ }
+#if 0
+ /* If/When cl_object_put drops the need for 'env',
+ * this code can be enabled, and matching code in
+ * osc_extent_put removed.
+ */
+ cl_object_put(osc2cl(ext->oe_obj));
+
OBD_SLAB_FREE_PTR(ext, osc_extent_kmem);
+#endif
}
static struct osc_extent *osc_extent_get(struct osc_extent *ext)
{
- LASSERT(atomic_read(&ext->oe_refc) >= 0);
- atomic_inc(&ext->oe_refc);
+ LASSERT(kref_read(&ext->oe_refc) >= 0);
+ kref_get(&ext->oe_refc);
return ext;
}
static void osc_extent_put(const struct lu_env *env, struct osc_extent *ext)
{
- LASSERT(atomic_read(&ext->oe_refc) > 0);
- if (atomic_dec_and_test(&ext->oe_refc)) {
- LASSERT(list_empty(&ext->oe_link));
- LASSERT(atomic_read(&ext->oe_users) == 0);
- LASSERT(ext->oe_state == OES_INV);
- LASSERT(!ext->oe_intree);
-
- if (ext->oe_dlmlock != NULL) {
- lu_ref_add(&ext->oe_dlmlock->l_reference,
- "osc_extent", ext);
- LDLM_LOCK_PUT(ext->oe_dlmlock);
- ext->oe_dlmlock = NULL;
- }
- osc_extent_free(ext);
+ LASSERT(kref_read(&ext->oe_refc) > 0);
+ if (kref_put(&ext->oe_refc, osc_extent_free)) {
+ /* This should be in osc_extent_free(), but
+ * while we need to pass 'env' it cannot be.
+ */
+ cl_object_put(env, osc2cl(ext->oe_obj));
+
+ OBD_SLAB_FREE_PTR(ext, osc_extent_kmem);
}
}
*/
static void osc_extent_put_trust(struct osc_extent *ext)
{
- LASSERT(atomic_read(&ext->oe_refc) > 1);
- LASSERT(osc_object_is_locked(ext->oe_obj));
- atomic_dec(&ext->oe_refc);
+ LASSERT(kref_read(&ext->oe_refc) > 1);
+ assert_osc_object_is_locked(ext->oe_obj);
+ osc_extent_put(NULL, ext);
}
/**
struct rb_node *n = obj->oo_root.rb_node;
struct osc_extent *tmp, *p = NULL;
- LASSERT(osc_object_is_locked(obj));
+ assert_osc_object_is_locked(obj);
while (n != NULL) {
tmp = rb_extent(n);
if (index < tmp->oe_start) {
struct rb_node *parent = NULL;
struct osc_extent *tmp;
- LASSERT(ext->oe_intree == 0);
+ LASSERT(RB_EMPTY_NODE(&ext->oe_node));
LASSERT(ext->oe_obj == obj);
- LASSERT(osc_object_is_locked(obj));
+ assert_osc_object_is_locked(obj);
while (*n != NULL) {
tmp = rb_extent(*n);
parent = *n;
rb_link_node(&ext->oe_node, parent, n);
rb_insert_color(&ext->oe_node, &obj->oo_root);
osc_extent_get(ext);
- ext->oe_intree = 1;
}
/* caller must have held object lock. */
static void osc_extent_erase(struct osc_extent *ext)
{
struct osc_object *obj = ext->oe_obj;
- LASSERT(osc_object_is_locked(obj));
- if (ext->oe_intree) {
+ assert_osc_object_is_locked(obj);
+ if (!RB_EMPTY_NODE(&ext->oe_node)) {
rb_erase(&ext->oe_node, &obj->oo_root);
- ext->oe_intree = 0;
+ RB_CLEAR_NODE(&ext->oe_node);
/* rbtree held a refcount */
osc_extent_put_trust(ext);
}
{
struct osc_object *obj = ext->oe_obj;
- LASSERT(osc_object_is_locked(obj));
+ assert_osc_object_is_locked(obj);
LASSERT(ext->oe_state == OES_ACTIVE || ext->oe_state == OES_CACHE);
if (ext->oe_state == OES_CACHE) {
osc_extent_state_set(ext, OES_ACTIVE);
static void __osc_extent_remove(struct osc_extent *ext)
{
- LASSERT(osc_object_is_locked(ext->oe_obj));
+ assert_osc_object_is_locked(ext->oe_obj);
LASSERT(list_empty(&ext->oe_pages));
osc_extent_erase(ext);
list_del_init(&ext->oe_link);
/**
* This function is used to merge extents to get better performance. It checks
- * if @cur and @victim are contiguous at chunk level.
+ * if @cur and @victim are contiguous at block level.
*/
static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur,
struct osc_extent *victim)
{
- struct osc_object *obj = cur->oe_obj;
- pgoff_t chunk_start;
- pgoff_t chunk_end;
- int ppc_bits;
+ struct osc_object *obj = cur->oe_obj;
+ struct client_obd *cli = osc_cli(obj);
+ pgoff_t chunk_start;
+ pgoff_t chunk_end;
+ int ppc_bits;
LASSERT(cur->oe_state == OES_CACHE);
- LASSERT(osc_object_is_locked(obj));
+ assert_osc_object_is_locked(obj);
if (victim == NULL)
return -EINVAL;
if (cur->oe_max_end != victim->oe_max_end)
return -ERANGE;
+ /*
+ * In the rare case max_pages_per_rpc (mppr) is changed, don't
+ * merge extents until after old ones have been sent, or the
+ * "extents are aligned to RPCs" checks are unhappy.
+ */
+ if (cur->oe_mppr != victim->oe_mppr)
+ return -ERANGE;
+
LASSERT(cur->oe_dlmlock == victim->oe_dlmlock);
- ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_CACHE_SHIFT;
+ ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_SHIFT;
chunk_start = cur->oe_start >> ppc_bits;
chunk_end = cur->oe_end >> ppc_bits;
if (chunk_start != (victim->oe_end >> ppc_bits) + 1 &&
chunk_end + 1 != victim->oe_start >> ppc_bits)
return -ERANGE;
+ /* overall extent size should not exceed the max supported limit
+ * reported by the server */
+ if (cur->oe_end - cur->oe_start + 1 +
+ victim->oe_end - victim->oe_start + 1 > cli->cl_max_extent_pages)
+ return -ERANGE;
+
OSC_EXTENT_DUMP(D_CACHE, victim, "will be merged by %p.\n", cur);
cur->oe_start = min(cur->oe_start, victim->oe_start);
cur->oe_end = max(cur->oe_end, victim->oe_end);
- cur->oe_grants += victim->oe_grants;
+ /* per-extent tax should be accounted only once for the whole extent */
+ cur->oe_grants += victim->oe_grants - cli->cl_grant_extent_tax;
cur->oe_nr_pages += victim->oe_nr_pages;
/* only the following bits are needed to merge */
cur->oe_urgent |= victim->oe_urgent;
cur->oe_memalloc |= victim->oe_memalloc;
list_splice_init(&victim->oe_pages, &cur->oe_pages);
- list_del_init(&victim->oe_link);
victim->oe_nr_pages = 0;
osc_extent_get(victim);
int osc_extent_release(const struct lu_env *env, struct osc_extent *ext)
{
struct osc_object *obj = ext->oe_obj;
+ struct client_obd *cli = osc_cli(obj);
int rc = 0;
ENTRY;
* osc_cache_truncate_start(). */
osc_extent_state_set(ext, OES_TRUNC);
ext->oe_trunc_pending = 0;
+ osc_object_unlock(obj);
} else {
+ int grant = 0;
+
osc_extent_state_set(ext, OES_CACHE);
osc_update_pending(obj, OBD_BRW_WRITE,
ext->oe_nr_pages);
/* try to merge the previous and next extent. */
- osc_extent_merge(env, ext, prev_extent(ext));
- osc_extent_merge(env, ext, next_extent(ext));
+ if (osc_extent_merge(env, ext, prev_extent(ext)) == 0)
+ grant += cli->cl_grant_extent_tax;
+ if (osc_extent_merge(env, ext, next_extent(ext)) == 0)
+ grant += cli->cl_grant_extent_tax;
- if (ext->oe_urgent)
+ if (ext->oe_hp)
+ list_move_tail(&ext->oe_link,
+ &obj->oo_hp_exts);
+ else if (ext->oe_urgent)
list_move_tail(&ext->oe_link,
&obj->oo_urgent_exts);
+ else if (ext->oe_nr_pages == ext->oe_mppr) {
+ list_move_tail(&ext->oe_link,
+ &obj->oo_full_exts);
+ }
+ osc_object_unlock(obj);
+ if (grant > 0)
+ osc_unreserve_grant(cli, 0, grant);
}
- osc_object_unlock(obj);
- osc_io_unplug_async(env, osc_cli(obj), obj);
+ osc_io_unplug_async(env, cli, obj);
}
osc_extent_put(env, ext);
RETURN(rc);
}
-static inline int overlapped(struct osc_extent *ex1, struct osc_extent *ex2)
-{
- return !(ex1->oe_end < ex2->oe_start || ex2->oe_end < ex1->oe_start);
-}
-
/**
* Find or create an extent which includes @index, core function to manage
* extent tree.
descr = &olck->ols_cl.cls_lock->cll_descr;
LASSERT(descr->cld_mode >= CLM_WRITE);
- LASSERT(cli->cl_chunkbits >= PAGE_CACHE_SHIFT);
- ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
+ LASSERTF(cli->cl_chunkbits >= PAGE_SHIFT,
+ "chunkbits: %u\n", cli->cl_chunkbits);
+ ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
chunk_mask = ~((1 << ppc_bits) - 1);
chunksize = 1 << cli->cl_chunkbits;
chunk = index >> ppc_bits;
- /* align end to rpc edge, rpc size may not be a power 2 integer. */
+ /* align end to RPC edge. */
max_pages = cli->cl_max_pages_per_rpc;
- LASSERT((max_pages & ~chunk_mask) == 0);
+ if ((max_pages & ~chunk_mask) != 0) {
+ CERROR("max_pages: %#x chunkbits: %u chunk_mask: %#lx\n",
+ max_pages, cli->cl_chunkbits, chunk_mask);
+ RETURN(ERR_PTR(-EINVAL));
+ }
max_end = index - (index % max_pages) + max_pages - 1;
max_end = min_t(pgoff_t, max_end, descr->cld_end);
cur->oe_start = descr->cld_start;
if (cur->oe_end > max_end)
cur->oe_end = max_end;
- cur->oe_grants = 0;
+ cur->oe_grants = chunksize + cli->cl_grant_extent_tax;
cur->oe_mppr = max_pages;
if (olck->ols_dlmlock != NULL) {
LASSERT(olck->ols_hold);
}
/* grants has been allocated by caller */
- LASSERTF(*grants >= chunksize + cli->cl_extent_tax,
- "%u/%u/%u.\n", *grants, chunksize, cli->cl_extent_tax);
+ LASSERTF(*grants >= chunksize + cli->cl_grant_extent_tax,
+ "%u/%u/%u.\n", *grants, chunksize, cli->cl_grant_extent_tax);
LASSERTF((max_end - cur->oe_start) < max_pages, EXTSTR"\n",
EXTPARA(cur));
restart:
osc_object_lock(obj);
ext = osc_extent_search(obj, cur->oe_start);
- if (ext == NULL)
+ if (!ext)
ext = first_extent(obj);
- while (ext != NULL) {
+ for (; ext; ext = next_extent(ext)) {
pgoff_t ext_chk_start = ext->oe_start >> ppc_bits;
- pgoff_t ext_chk_end = ext->oe_end >> ppc_bits;
+ pgoff_t ext_chk_end = ext->oe_end >> ppc_bits;
LASSERT(sanity_check_nolock(ext) == 0);
- if (chunk > ext_chk_end + 1)
+ if (chunk > ext_chk_end + 1 || chunk < ext_chk_start)
break;
/* if covering by different locks, no chance to match */
EASSERTF(!overlapped(ext, cur), ext,
EXTSTR"\n", EXTPARA(cur));
- ext = next_extent(ext);
continue;
}
/* discontiguous chunks? */
- if (chunk + 1 < ext_chk_start) {
- ext = next_extent(ext);
+ if (chunk + 1 < ext_chk_start)
continue;
- }
/* ok, from now on, ext and cur have these attrs:
* 1. covered by the same lock
}
/* non-overlapped extent */
- if (ext->oe_state != OES_CACHE || ext->oe_fsync_wait) {
+ if (ext->oe_state != OES_CACHE || ext->oe_fsync_wait)
/* we can't do anything for a non OES_CACHE extent, or
* if there is someone waiting for this extent to be
* flushed, try next one. */
- ext = next_extent(ext);
- continue;
- }
-
- /* check if they belong to the same rpc slot before trying to
- * merge. the extents are not overlapped and contiguous at
- * chunk level to get here. */
- if (ext->oe_max_end != max_end) {
- /* if they don't belong to the same RPC slot or
- * max_pages_per_rpc has ever changed, do not merge. */
- ext = next_extent(ext);
continue;
- }
-
- /* it's required that an extent must be contiguous at chunk
- * level so that we know the whole extent is covered by grant
- * (the pages in the extent are NOT required to be contiguous).
- * Otherwise, it will be too much difficult to know which
- * chunks have grants allocated. */
- /* try to do front merge - extend ext's start */
- if (chunk + 1 == ext_chk_start) {
- /* ext must be chunk size aligned */
- EASSERT((ext->oe_start & ~chunk_mask) == 0, ext);
-
- /* pull ext's start back to cover cur */
- ext->oe_start = cur->oe_start;
- ext->oe_grants += chunksize;
+ if (osc_extent_merge(env, ext, cur) == 0) {
+ LASSERT(*grants >= chunksize);
*grants -= chunksize;
-
found = osc_extent_hold(ext);
- } else if (chunk == ext_chk_end + 1) {
- /* rear merge */
- ext->oe_end = cur->oe_end;
- ext->oe_grants += chunksize;
- *grants -= chunksize;
- /* try to merge with the next one because we just fill
- * in a gap */
+ /*
+ * Try to merge with the next one too because we
+ * might have just filled in a gap.
+ */
if (osc_extent_merge(env, ext, next_extent(ext)) == 0)
/* we can save extent tax from next extent */
- *grants += cli->cl_extent_tax;
+ *grants += cli->cl_grant_extent_tax;
- found = osc_extent_hold(ext);
- }
- if (found != NULL)
break;
-
- ext = next_extent(ext);
+ }
}
osc_extent_tree_dump(D_CACHE, obj);
} else if (conflict == NULL) {
/* create a new extent */
EASSERT(osc_extent_is_overlapped(obj, cur) == 0, cur);
- cur->oe_grants = chunksize + cli->cl_extent_tax;
+ LASSERT(*grants >= cur->oe_grants);
*grants -= cur->oe_grants;
- LASSERT(*grants >= 0);
cur->oe_state = OES_CACHE;
found = osc_extent_hold(cur);
out:
osc_extent_put(env, cur);
- LASSERT(*grants >= 0);
return found;
}
int nr_pages = ext->oe_nr_pages;
int lost_grant = 0;
int blocksize = cli->cl_import->imp_obd->obd_osfs.os_bsize ? : 4096;
- __u64 last_off = 0;
+ loff_t last_off = 0;
int last_count = -1;
ENTRY;
if (!sent) {
lost_grant = ext->oe_grants;
- } else if (blocksize < PAGE_CACHE_SIZE &&
- last_count != PAGE_CACHE_SIZE) {
+ } else if (blocksize < PAGE_SIZE &&
+ last_count != PAGE_SIZE) {
/* For short writes we shouldn't count parts of pages that
* span a whole chunk on the OST side, or our accounting goes
* wrong. Should match the code in filter_grant_check. */
- int offset = last_off & ~CFS_PAGE_MASK;
+ int offset = last_off & ~PAGE_MASK;
int count = last_count + (offset & (blocksize - 1));
int end = (offset + last_count) & (blocksize - 1);
if (end)
count += blocksize - end;
- lost_grant = PAGE_CACHE_SIZE - count;
+ lost_grant = PAGE_SIZE - count;
}
if (ext->oe_grants > 0)
- osc_free_grant(cli, nr_pages, lost_grant);
+ osc_free_grant(cli, nr_pages, lost_grant, ext->oe_grants);
osc_extent_remove(ext);
/* put the refcount for RPC */
RETURN(0);
}
-static int extent_wait_cb(struct osc_extent *ext, enum osc_extent_state state)
-{
- int ret;
-
- osc_object_lock(ext->oe_obj);
- ret = ext->oe_state == state;
- osc_object_unlock(ext->oe_obj);
-
- return ret;
-}
-
/**
* Wait for the extent's state to become @state.
*/
enum osc_extent_state state)
{
struct osc_object *obj = ext->oe_obj;
- struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(600), NULL,
- LWI_ON_SIGNAL_NOOP, NULL);
int rc = 0;
ENTRY;
osc_extent_release(env, ext);
/* wait for the extent until its state becomes @state */
- rc = l_wait_event(ext->oe_waitq, extent_wait_cb(ext, state), &lwi);
- if (rc == -ETIMEDOUT) {
+ rc = wait_event_idle_timeout(ext->oe_waitq,
+ smp_load_acquire(&ext->oe_state) == state,
+ cfs_time_seconds(600));
+ if (rc == 0) {
OSC_EXTENT_DUMP(D_ERROR, ext,
"%s: wait ext to %u timedout, recovery in progress?\n",
- osc_export(obj)->exp_obd->obd_name, state);
+ cli_name(osc_cli(obj)), state);
- lwi = LWI_INTR(NULL, NULL);
- rc = l_wait_event(ext->oe_waitq, extent_wait_cb(ext, state),
- &lwi);
+ wait_event_idle(ext->oe_waitq,
+ smp_load_acquire(&ext->oe_state) == state);
}
- if (rc == 0 && ext->oe_rc < 0)
+ if (ext->oe_rc < 0)
rc = ext->oe_rc;
+ else
+ rc = 0;
RETURN(rc);
}
static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
bool partial)
{
- struct cl_env_nest nest;
struct lu_env *env;
struct cl_io *io;
struct osc_object *obj = ext->oe_obj;
struct client_obd *cli = osc_cli(obj);
struct osc_async_page *oap;
struct osc_async_page *tmp;
+ struct pagevec *pvec;
int pages_in_chunk = 0;
int ppc_bits = cli->cl_chunkbits -
- PAGE_CACHE_SHIFT;
+ PAGE_SHIFT;
__u64 trunc_chunk = trunc_index >> ppc_bits;
int grants = 0;
int nr_pages = 0;
int rc = 0;
+ __u16 refcheck;
ENTRY;
LASSERT(sanity_check(ext) == 0);
/* Request new lu_env.
* We can't use that env from osc_cache_truncate_start() because
* it's from lov_io_sub and not fully initialized. */
- env = cl_env_nested_get(&nest);
- io = &osc_env_info(env)->oti_io;
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ RETURN(PTR_ERR(env));
+
+ io = osc_env_thread_io(env);
io->ci_obj = cl_object_top(osc2cl(obj));
+ io->ci_ignore_layout = 1;
+ pvec = &osc_env_info(env)->oti_pagevec;
+ ll_pagevec_init(pvec, 0);
rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
if (rc < 0)
GOTO(out, rc);
- /* discard all pages with index greater then trunc_index */
+ /* discard all pages with index greater than trunc_index */
list_for_each_entry_safe(oap, tmp, &ext->oe_pages,
oap_pending_item) {
pgoff_t index = osc_index(oap2osc(oap));
}
lu_ref_del(&page->cp_reference, "truncate", current);
- cl_page_put(env, page);
+ cl_pagevec_put(env, page, pvec);
--ext->oe_nr_pages;
++nr_pages;
}
+ pagevec_release(pvec);
+
EASSERTF(ergo(ext->oe_start >= trunc_index + !!partial,
ext->oe_nr_pages == 0),
ext, "trunc_index %lu, partial %d\n", trunc_index, partial);
osc_object_unlock(obj);
if (grants > 0 || nr_pages > 0)
- osc_free_grant(cli, nr_pages, grants);
+ osc_free_grant(cli, nr_pages, grants, grants);
out:
cl_io_fini(env, io);
- cl_env_nested_put(&nest, env);
+ cl_env_put(env, &refcheck);
RETURN(rc);
}
if (!(last->oap_async_flags & ASYNC_COUNT_STABLE)) {
int last_oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE);
LASSERT(last_oap_count > 0);
- LASSERT(last->oap_page_off + last_oap_count <= PAGE_CACHE_SIZE);
+ LASSERT(last->oap_page_off + last_oap_count <= PAGE_SIZE);
last->oap_count = last_oap_count;
spin_lock(&last->oap_lock);
last->oap_async_flags |= ASYNC_COUNT_STABLE;
* because it's known they are not the last page */
list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
- oap->oap_count = PAGE_CACHE_SIZE - oap->oap_page_off;
+ oap->oap_count = PAGE_SIZE - oap->oap_page_off;
spin_lock(&oap->oap_lock);
oap->oap_async_flags |= ASYNC_COUNT_STABLE;
spin_unlock(&oap->oap_lock);
struct osc_object *obj = ext->oe_obj;
struct client_obd *cli = osc_cli(obj);
struct osc_extent *next;
- int ppc_bits = cli->cl_chunkbits - PAGE_CACHE_SHIFT;
+ int ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
pgoff_t chunk = index >> ppc_bits;
pgoff_t end_chunk;
pgoff_t end_index;
GOTO(out, rc = 0);
LASSERT(end_chunk + 1 == chunk);
+
/* try to expand this extent to cover @index */
end_index = min(ext->oe_max_end, ((chunk + 1) << ppc_bits) - 1);
+ /* don't go over the maximum extent size reported by server */
+ if (end_index - ext->oe_start + 1 > cli->cl_max_extent_pages)
+ GOTO(out, rc = -ERANGE);
+
next = next_extent(ext);
if (next != NULL && next->oe_start <= end_index)
/* complex mode - overlapped with the next extent,
ext->oe_end = end_index;
ext->oe_grants += chunksize;
+ LASSERT(*grants >= chunksize);
*grants -= chunksize;
- LASSERT(*grants >= 0);
EASSERTF(osc_extent_is_overlapped(obj, ext) == 0, ext,
"overlapped after expanding for %lu.\n", index);
EXIT;
RETURN(rc);
}
-static void osc_extent_tree_dump0(int level, struct osc_object *obj,
+static void osc_extent_tree_dump0(int mask, struct osc_object *obj,
const char *func, int line)
{
struct osc_extent *ext;
int cnt;
- CDEBUG(level, "Dump object %p extents at %s:%d, mppr: %u.\n",
+ if (!cfs_cdebug_show(mask, DEBUG_SUBSYSTEM))
+ return;
+
+ CDEBUG(mask, "Dump object %p extents at %s:%d, mppr: %u.\n",
obj, func, line, osc_cli(obj)->cl_max_pages_per_rpc);
/* osc_object_lock(obj); */
cnt = 1;
for (ext = first_extent(obj); ext != NULL; ext = next_extent(ext))
- OSC_EXTENT_DUMP(level, ext, "in tree %d.\n", cnt++);
+ OSC_EXTENT_DUMP(mask, ext, "in tree %d.\n", cnt++);
cnt = 1;
list_for_each_entry(ext, &obj->oo_hp_exts, oe_link)
- OSC_EXTENT_DUMP(level, ext, "hp %d.\n", cnt++);
+ OSC_EXTENT_DUMP(mask, ext, "hp %d.\n", cnt++);
cnt = 1;
list_for_each_entry(ext, &obj->oo_urgent_exts, oe_link)
- OSC_EXTENT_DUMP(level, ext, "urgent %d.\n", cnt++);
+ OSC_EXTENT_DUMP(mask, ext, "urgent %d.\n", cnt++);
cnt = 1;
list_for_each_entry(ext, &obj->oo_reading_exts, oe_link)
- OSC_EXTENT_DUMP(level, ext, "reading %d.\n", cnt++);
+ OSC_EXTENT_DUMP(mask, ext, "reading %d.\n", cnt++);
/* osc_object_unlock(obj); */
}
ENTRY;
result = cl_page_make_ready(env, page, CRT_WRITE);
if (result == 0)
- opg->ops_submit_time = cfs_time_current();
+ opg->ops_submit_time = ktime_get();
RETURN(result);
}
pgoff_t index = osc_index(oap2osc(oap));
struct cl_object *obj;
struct cl_attr *attr = &osc_env_info(env)->oti_attr;
-
int result;
loff_t kms;
return 0;
else if (cl_offset(obj, index + 1) > kms)
/* catch sub-page write at end of file */
- return kms % PAGE_CACHE_SIZE;
+ return kms & ~PAGE_MASK;
else
- return PAGE_CACHE_SIZE;
+ return PAGE_SIZE;
}
static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
{
struct osc_page *opg = oap2osc_page(oap);
struct cl_page *page = oap2cl_page(oap);
- struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
enum cl_req_type crt;
int srvlock;
"cp_state:%u, cmd:%d\n", page->cp_state, cmd);
LASSERT(opg->ops_transfer_pinned);
- /*
- * page->cp_req can be NULL if io submission failed before
- * cl_req was allocated.
- */
- if (page->cp_req != NULL)
- cl_req_page_done(env, page);
- LASSERT(page->cp_req == NULL);
-
crt = cmd == OBD_BRW_READ ? CRT_READ : CRT_WRITE;
/* Clear opg->ops_transfer_pinned before VM lock is released. */
opg->ops_transfer_pinned = 0;
- spin_lock(&obj->oo_seatbelt);
- LASSERT(opg->ops_submitter != NULL);
- LASSERT(!list_empty(&opg->ops_inflight));
- list_del_init(&opg->ops_inflight);
- opg->ops_submitter = NULL;
- spin_unlock(&obj->oo_seatbelt);
-
- opg->ops_submit_time = 0;
+ opg->ops_submit_time = ktime_set(0, 0);
srvlock = oap->oap_brw_flags & OBD_BRW_SRVLOCK;
/* statistic */
lu_ref_del(&page->cp_reference, "transfer", page);
cl_page_completion(env, page, crt, rc);
+ cl_page_put(env, page);
RETURN(0);
}
-#define OSC_DUMP_GRANT(lvl, cli, fmt, args...) do { \
+#define OSC_DUMP_GRANT(mask, cli, fmt, args...) do { \
struct client_obd *__tmp = (cli); \
- CDEBUG(lvl, "%s: grant { dirty: %ld/%ld dirty_pages: %ld/%lu " \
- "dropped: %ld avail: %ld, reserved: %ld, flight: %d }" \
- "lru {in list: %ld, left: %ld, waiters: %d }"fmt"\n", \
- __tmp->cl_import->imp_obd->obd_name, \
+ CDEBUG(mask, "%s: grant { dirty: %ld/%ld dirty_pages: %ld/%lu " \
+ "dropped: %ld avail: %ld, dirty_grant: %ld, " \
+ "reserved: %ld, flight: %d } lru {in list: %ld, " \
+ "left: %ld, waiters: %d }" fmt "\n", \
+ cli_name(__tmp), \
__tmp->cl_dirty_pages, __tmp->cl_dirty_max_pages, \
atomic_long_read(&obd_dirty_pages), obd_max_dirty_pages, \
__tmp->cl_lost_grant, __tmp->cl_avail_grant, \
+ __tmp->cl_dirty_grant, \
__tmp->cl_reserved_grant, __tmp->cl_w_in_flight, \
atomic_long_read(&__tmp->cl_lru_in_list), \
atomic_long_read(&__tmp->cl_lru_busy), \
{
assert_spin_locked(&cli->cl_loi_list_lock);
LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
- atomic_long_inc(&obd_dirty_pages);
cli->cl_dirty_pages++;
pga->flag |= OBD_BRW_FROM_GRANT;
CDEBUG(D_CACHE, "using %lu grant credits for brw %p page %p\n",
- PAGE_CACHE_SIZE, pga, pga->pg);
+ PAGE_SIZE, pga, pga->pg);
osc_update_next_shrink(cli);
}
pga->flag &= ~OBD_BRW_FROM_GRANT;
atomic_long_dec(&obd_dirty_pages);
cli->cl_dirty_pages--;
- if (pga->flag & OBD_BRW_NOCACHE) {
- pga->flag &= ~OBD_BRW_NOCACHE;
- atomic_long_dec(&obd_dirty_transit_pages);
- cli->cl_dirty_transit--;
- }
EXIT;
}
if (unused > reserved) {
cli->cl_avail_grant += reserved;
cli->cl_lost_grant += unused - reserved;
+ cli->cl_dirty_grant -= unused - reserved;
} else {
cli->cl_avail_grant += unused;
+ cli->cl_dirty_grant += reserved - unused;
}
}
-static void osc_unreserve_grant(struct client_obd *cli,
- unsigned int reserved, unsigned int unused)
+static void osc_unreserve_grant_nolock(struct client_obd *cli,
+ unsigned int reserved,
+ unsigned int unused)
{
- spin_lock(&cli->cl_loi_list_lock);
__osc_unreserve_grant(cli, reserved, unused);
if (unused > 0)
osc_wake_cache_waiters(cli);
+}
+
+static void osc_unreserve_grant(struct client_obd *cli,
+ unsigned int reserved, unsigned int unused)
+{
+ spin_lock(&cli->cl_loi_list_lock);
+ osc_unreserve_grant_nolock(cli, reserved, unused);
spin_unlock(&cli->cl_loi_list_lock);
}
* used, we should return these grants to OST. There're two cases where grants
* can be lost:
* 1. truncate;
- * 2. blocksize at OST is less than PAGE_CACHE_SIZE and a partial page was
+ * 2. blocksize at OST is less than PAGE_SIZE and a partial page was
* written. In this case OST may use less chunks to serve this partial
* write. OSTs don't actually know the page size on the client side. so
* clients have to calculate lost grant by the blocksize on the OST.
* See filter_grant_check() for details.
*/
static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
- unsigned int lost_grant)
+ unsigned int lost_grant, unsigned int dirty_grant)
{
- unsigned long grant = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
+ unsigned long grant;
+
+ grant = (1 << cli->cl_chunkbits) + cli->cl_grant_extent_tax;
spin_lock(&cli->cl_loi_list_lock);
atomic_long_sub(nr_pages, &obd_dirty_pages);
cli->cl_dirty_pages -= nr_pages;
cli->cl_lost_grant += lost_grant;
+ cli->cl_dirty_grant -= dirty_grant;
if (cli->cl_avail_grant < grant && cli->cl_lost_grant >= grant) {
/* borrow some grant from truncate to avoid the case that
* truncate uses up all avail grant */
}
osc_wake_cache_waiters(cli);
spin_unlock(&cli->cl_loi_list_lock);
- CDEBUG(D_CACHE, "lost %u grant: %lu avail: %lu dirty: %lu\n",
+ CDEBUG(D_CACHE, "lost %u grant: %lu avail: %lu dirty: %lu/%lu\n",
lost_grant, cli->cl_lost_grant,
- cli->cl_avail_grant, cli->cl_dirty_pages << PAGE_CACHE_SHIFT);
+ cli->cl_avail_grant, cli->cl_dirty_pages << PAGE_SHIFT,
+ cli->cl_dirty_grant);
}
/**
*/
static int osc_enter_cache_try(struct client_obd *cli,
struct osc_async_page *oap,
- int bytes, int transient)
+ int bytes)
{
int rc;
- OSC_DUMP_GRANT(D_CACHE, cli, "need:%d.\n", bytes);
+ OSC_DUMP_GRANT(D_CACHE, cli, "need:%d\n", bytes);
rc = osc_reserve_grant(cli, bytes);
if (rc < 0)
return 0;
- if (cli->cl_dirty_pages < cli->cl_dirty_max_pages &&
- 1 + atomic_long_read(&obd_dirty_pages) <= obd_max_dirty_pages) {
- osc_consume_write_grant(cli, &oap->oap_brw_page);
- if (transient) {
- cli->cl_dirty_transit++;
- atomic_long_inc(&obd_dirty_transit_pages);
- oap->oap_brw_flags |= OBD_BRW_NOCACHE;
- }
- rc = 1;
- } else {
- __osc_unreserve_grant(cli, bytes, bytes);
- rc = 0;
+ if (cli->cl_dirty_pages < cli->cl_dirty_max_pages) {
+ if (atomic_long_add_return(1, &obd_dirty_pages) <=
+ obd_max_dirty_pages) {
+ osc_consume_write_grant(cli, &oap->oap_brw_page);
+ rc = 1;
+ goto out;
+ } else
+ atomic_long_dec(&obd_dirty_pages);
}
+ __osc_unreserve_grant(cli, bytes, bytes);
+
+out:
return rc;
}
-static int ocw_granted(struct client_obd *cli, struct osc_cache_waiter *ocw)
+/* Following two inlines exist to pass code fragments
+ * to wait_event_idle_exclusive_timeout_cmd(). Passing
+ * code fragments as macro args can look confusing, so
+ * we provide inlines to encapsulate them.
+ */
+static inline void cli_unlock_and_unplug(const struct lu_env *env,
+ struct client_obd *cli,
+ struct osc_async_page *oap)
{
- int rc;
- spin_lock(&cli->cl_loi_list_lock);
- rc = list_empty(&ocw->ocw_entry);
spin_unlock(&cli->cl_loi_list_lock);
- return rc;
+ osc_io_unplug_async(env, cli, NULL);
+ CDEBUG(D_CACHE,
+ "%s: sleeping for cache space for %p\n",
+ cli_name(cli), oap);
}
+static inline void cli_lock_after_unplug(struct client_obd *cli)
+{
+ spin_lock(&cli->cl_loi_list_lock);
+}
/**
* The main entry to reserve dirty page accounting. Usually the grant reserved
* in this function will be freed in bulk in osc_free_grant() unless it fails
struct osc_async_page *oap, int bytes)
{
struct osc_object *osc = oap->oap_obj;
- struct lov_oinfo *loi = osc->oo_oinfo;
- struct osc_cache_waiter ocw;
- struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(600), NULL,
- LWI_ON_SIGNAL_NOOP, NULL);
+ struct lov_oinfo *loi = osc->oo_oinfo;
int rc = -EDQUOT;
+ int remain;
+ bool entered = false;
+ /* We cannot wait for a long time here since we are holding ldlm lock
+ * across the actual IO. If no requests complete fast (e.g. due to
+ * overloaded OST that takes a long time to process everything, we'd
+ * get evicted if we wait for a normal obd_timeout or some such.
+ * So we try to wait half the time it would take the client to be
+ * evicted by server which is half obd_timeout when AT is off
+ * or at least ldlm_enqueue_min with AT on.
+ * See LU-13131 */
+ unsigned long timeout = cfs_time_seconds(AT_OFF ? obd_timeout / 2 :
+ ldlm_enqueue_min / 2);
+
ENTRY;
- OSC_DUMP_GRANT(D_CACHE, cli, "need:%d.\n", bytes);
+ OSC_DUMP_GRANT(D_CACHE, cli, "need:%d\n", bytes);
spin_lock(&cli->cl_loi_list_lock);
* of queued writes and create a discontiguous rpc stream */
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_NO_GRANT) ||
cli->cl_dirty_max_pages == 0 ||
- cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync)
+ cli->cl_ar.ar_force_sync || loi->loi_ar.ar_force_sync) {
+ OSC_DUMP_GRANT(D_CACHE, cli, "forced sync i/o\n");
GOTO(out, rc = -EDQUOT);
+ }
- /* Hopefully normal case - cache space and write credits available */
- if (osc_enter_cache_try(cli, oap, bytes, 0))
- GOTO(out, rc = 0);
-
- /* We can get here for two reasons: too many dirty pages in cache, or
+ /*
+ * We can wait here for two reasons: too many dirty pages in cache, or
* run out of grants. In both cases we should write dirty pages out.
* Adding a cache waiter will trigger urgent write-out no matter what
* RPC size will be.
- * The exiting condition is no avail grants and no dirty pages caching,
- * that really means there is no space on the OST. */
- init_waitqueue_head(&ocw.ocw_waitq);
- ocw.ocw_oap = oap;
- ocw.ocw_grant = bytes;
- while (cli->cl_dirty_pages > 0 || cli->cl_w_in_flight > 0) {
- list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
- ocw.ocw_rc = 0;
- spin_unlock(&cli->cl_loi_list_lock);
-
- osc_io_unplug_async(env, cli, NULL);
-
- CDEBUG(D_CACHE, "%s: sleeping for cache space @ %p for %p\n",
- cli->cl_import->imp_obd->obd_name, &ocw, oap);
-
- rc = l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
-
- spin_lock(&cli->cl_loi_list_lock);
-
- /* l_wait_event is interrupted by signal, or timed out */
- if (rc < 0) {
- switch (rc) {
- case -ETIMEDOUT:
- OSC_DUMP_GRANT(D_ERROR, cli,
- "try to reserve %d.\n", bytes);
- osc_extent_tree_dump(D_ERROR, osc);
- rc = -EDQUOT;
- break;
- case -EINTR:
- /* Ensures restartability - LU-3581 */
- rc = -ERESTARTSYS;
- break;
- default:
- CDEBUG(D_CACHE, "%s: event for cache space @"
- " %p never arrived due to %d\n",
- cli->cl_import->imp_obd->obd_name,
- &ocw, rc);
- break;
- }
- list_del_init(&ocw.ocw_entry);
- GOTO(out, rc);
- }
-
- LASSERT(list_empty(&ocw.ocw_entry));
- rc = ocw.ocw_rc;
-
- if (rc != -EDQUOT)
- GOTO(out, rc);
- if (osc_enter_cache_try(cli, oap, bytes, 0))
- GOTO(out, rc = 0);
+ * The exiting condition (other than success) is no avail grants
+ * and no dirty pages caching, that really means there is no space
+ * on the OST.
+ */
+ remain = wait_event_idle_exclusive_timeout_cmd(
+ cli->cl_cache_waiters,
+ (entered = osc_enter_cache_try(cli, oap, bytes)) ||
+ (cli->cl_dirty_pages == 0 && cli->cl_w_in_flight == 0),
+ timeout,
+ cli_unlock_and_unplug(env, cli, oap),
+ cli_lock_after_unplug(cli));
+
+ if (entered) {
+ if (remain == timeout)
+ OSC_DUMP_GRANT(D_CACHE, cli, "granted from cache\n");
+ else
+ OSC_DUMP_GRANT(D_CACHE, cli,
+ "finally got grant space\n");
+ wake_up(&cli->cl_cache_waiters);
+ rc = 0;
+ } else if (remain == 0) {
+ OSC_DUMP_GRANT(D_CACHE, cli,
+ "timeout, fall back to sync i/o\n");
+ osc_extent_tree_dump(D_CACHE, osc);
+ /* fall back to synchronous I/O */
+ } else {
+ OSC_DUMP_GRANT(D_CACHE, cli,
+ "no grant space, fall back to sync i/o\n");
+ wake_up_all(&cli->cl_cache_waiters);
}
EXIT;
out:
spin_unlock(&cli->cl_loi_list_lock);
- OSC_DUMP_GRANT(D_CACHE, cli, "returned %d.\n", rc);
RETURN(rc);
}
-/* caller must hold loi_list_lock */
-void osc_wake_cache_waiters(struct client_obd *cli)
-{
- struct list_head *l, *tmp;
- struct osc_cache_waiter *ocw;
-
- ENTRY;
- list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
- ocw = list_entry(l, struct osc_cache_waiter, ocw_entry);
- list_del_init(&ocw->ocw_entry);
-
- ocw->ocw_rc = -EDQUOT;
- /* we can't dirty more */
- if ((cli->cl_dirty_pages >= cli->cl_dirty_max_pages) ||
- (1 + atomic_long_read(&obd_dirty_pages) >
- obd_max_dirty_pages)) {
- CDEBUG(D_CACHE, "no dirty room: dirty: %ld "
- "osc max %ld, sys max %ld\n",
- cli->cl_dirty_pages, cli->cl_dirty_max_pages,
- obd_max_dirty_pages);
- goto wakeup;
- }
-
- ocw->ocw_rc = 0;
- if (!osc_enter_cache_try(cli, ocw->ocw_oap, ocw->ocw_grant, 0))
- ocw->ocw_rc = -EDQUOT;
-
-wakeup:
- CDEBUG(D_CACHE, "wake up %p for oap %p, avail grant %ld, %d\n",
- ocw, ocw->ocw_oap, cli->cl_avail_grant, ocw->ocw_rc);
-
- wake_up(&ocw->ocw_waitq);
- }
-
- EXIT;
-}
-
static int osc_max_rpc_in_flight(struct client_obd *cli, struct osc_object *osc)
{
int hprpc = !!list_empty(&osc->oo_hp_exts);
}
/* trigger a write rpc stream as long as there are dirtiers
* waiting for space. as they're waiting, they're not going to
- * create more pages to coalesce with what's waiting.. */
- if (!list_empty(&cli->cl_cache_waiters)) {
+ * create more pages to coalesce with what's waiting..
+ */
+ if (waitqueue_active(&cli->cl_cache_waiters)) {
CDEBUG(D_CACHE, "cache waiters forcing RPC\n");
RETURN(1);
}
- if (atomic_read(&osc->oo_nr_writes) >=
- cli->cl_max_pages_per_rpc)
+ if (!list_empty(&osc->oo_full_exts)) {
+ CDEBUG(D_CACHE, "full extent ready, make an RPC\n");
RETURN(1);
+ }
} else {
if (atomic_read(&osc->oo_nr_reads) == 0)
RETURN(0);
spin_lock(&oap->oap_lock);
oap->oap_async_flags = 0;
spin_unlock(&oap->oap_lock);
- oap->oap_interrupted = 0;
if (oap->oap_cmd & OBD_BRW_WRITE && xid > 0) {
spin_lock(&cli->cl_loi_list_lock);
EXIT;
}
+struct extent_rpc_data {
+ struct list_head *erd_rpc_list;
+ unsigned int erd_page_count;
+ unsigned int erd_max_pages;
+ unsigned int erd_max_chunks;
+ unsigned int erd_max_extents;
+};
+
+static inline unsigned osc_extent_chunks(const struct osc_extent *ext)
+{
+ struct client_obd *cli = osc_cli(ext->oe_obj);
+ unsigned ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
+
+ return (ext->oe_end >> ppc_bits) - (ext->oe_start >> ppc_bits) + 1;
+}
+
+static inline bool
+can_merge(const struct osc_extent *ext, const struct osc_extent *in_rpc)
+{
+ if (ext->oe_no_merge || in_rpc->oe_no_merge)
+ return false;
+
+ if (ext->oe_srvlock != in_rpc->oe_srvlock)
+ return false;
+
+ if (ext->oe_ndelay != in_rpc->oe_ndelay)
+ return false;
+
+ if (!ext->oe_grants != !in_rpc->oe_grants)
+ return false;
+
+ if (ext->oe_dio != in_rpc->oe_dio)
+ return false;
+
+ /* It's possible to have overlap on DIO */
+ if (in_rpc->oe_dio && overlapped(ext, in_rpc))
+ return false;
+
+ if (ext->oe_is_rdma_only != in_rpc->oe_is_rdma_only)
+ return false;
+
+ return true;
+}
+
/**
* Try to add extent to one RPC. We need to think about the following things:
* - # of pages must not be over max_pages_per_rpc
*/
static int try_to_add_extent_for_io(struct client_obd *cli,
struct osc_extent *ext,
- struct list_head *rpclist,
- unsigned int *pc, unsigned int *max_pages)
+ struct extent_rpc_data *data)
{
struct osc_extent *tmp;
- struct osc_async_page *oap = list_first_entry(&ext->oe_pages,
- struct osc_async_page,
- oap_pending_item);
+ unsigned int chunk_count;
ENTRY;
EASSERT((ext->oe_state == OES_CACHE || ext->oe_state == OES_LOCK_DONE),
ext);
+ OSC_EXTENT_DUMP(D_CACHE, ext, "trying to add this extent\n");
+
+ if (data->erd_max_extents == 0)
+ RETURN(0);
+
+ chunk_count = osc_extent_chunks(ext);
+ EASSERTF(data->erd_page_count != 0 ||
+ chunk_count <= data->erd_max_chunks, ext,
+ "The first extent to be fit in a RPC contains %u chunks, "
+ "which is over the limit %u.\n", chunk_count,
+ data->erd_max_chunks);
+ if (chunk_count > data->erd_max_chunks)
+ RETURN(0);
- *max_pages = max(ext->oe_mppr, *max_pages);
- if (*pc + ext->oe_nr_pages > *max_pages)
+ data->erd_max_pages = max(ext->oe_mppr, data->erd_max_pages);
+ EASSERTF(data->erd_page_count != 0 ||
+ ext->oe_nr_pages <= data->erd_max_pages, ext,
+ "The first extent to be fit in a RPC contains %u pages, "
+ "which is over the limit %u.\n", ext->oe_nr_pages,
+ data->erd_max_pages);
+ if (data->erd_page_count + ext->oe_nr_pages > data->erd_max_pages)
RETURN(0);
- list_for_each_entry(tmp, rpclist, oe_link) {
- struct osc_async_page *oap2;
- oap2 = list_first_entry(&tmp->oe_pages, struct osc_async_page,
- oap_pending_item);
+ list_for_each_entry(tmp, data->erd_rpc_list, oe_link) {
EASSERT(tmp->oe_owner == current, tmp);
-#if 0
- if (overlapped(tmp, ext)) {
- OSC_EXTENT_DUMP(D_ERROR, tmp, "overlapped %p.\n", ext);
- EASSERT(0, ext);
- }
-#endif
- if (oap2cl_page(oap)->cp_type != oap2cl_page(oap2)->cp_type) {
- CDEBUG(D_CACHE, "Do not permit different type of IO"
- " for a same RPC\n");
- RETURN(0);
- }
- if (tmp->oe_srvlock != ext->oe_srvlock ||
- !tmp->oe_grants != !ext->oe_grants)
+ if (!can_merge(ext, tmp))
RETURN(0);
-
- /* remove break for strict check */
- break;
}
- *pc += ext->oe_nr_pages;
- list_move_tail(&ext->oe_link, rpclist);
+ data->erd_max_extents--;
+ data->erd_max_chunks -= chunk_count;
+ data->erd_page_count += ext->oe_nr_pages;
+ list_move_tail(&ext->oe_link, data->erd_rpc_list);
ext->oe_owner = current;
RETURN(1);
}
{
struct client_obd *cli = osc_cli(obj);
struct osc_extent *ext;
- unsigned int page_count = 0;
- unsigned int max_pages = cli->cl_max_pages_per_rpc;
-
- LASSERT(osc_object_is_locked(obj));
+ struct extent_rpc_data data = {
+ .erd_rpc_list = rpclist,
+ .erd_page_count = 0,
+ .erd_max_pages = cli->cl_max_pages_per_rpc,
+ .erd_max_chunks = osc_max_write_chunks(cli),
+ .erd_max_extents = 256,
+ };
+
+ assert_osc_object_is_locked(obj);
while (!list_empty(&obj->oo_hp_exts)) {
ext = list_entry(obj->oo_hp_exts.next, struct osc_extent,
oe_link);
- LASSERT(ext->oe_state == OES_CACHE);
- if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count,
- &max_pages))
- return page_count;
- EASSERT(ext->oe_nr_pages <= max_pages, ext);
+ if (!try_to_add_extent_for_io(cli, ext, &data))
+ return data.erd_page_count;
+ EASSERT(ext->oe_nr_pages <= data.erd_max_pages, ext);
}
- if (page_count == max_pages)
- return page_count;
+ if (data.erd_page_count == data.erd_max_pages)
+ return data.erd_page_count;
while (!list_empty(&obj->oo_urgent_exts)) {
ext = list_entry(obj->oo_urgent_exts.next,
struct osc_extent, oe_link);
- if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count,
- &max_pages))
- return page_count;
-
- if (!ext->oe_intree)
- continue;
-
- while ((ext = next_extent(ext)) != NULL) {
- if ((ext->oe_state != OES_CACHE) ||
- (!list_empty(&ext->oe_link) &&
- ext->oe_owner != NULL))
- continue;
-
- if (!try_to_add_extent_for_io(cli, ext, rpclist,
- &page_count, &max_pages))
- return page_count;
- }
+ if (!try_to_add_extent_for_io(cli, ext, &data))
+ return data.erd_page_count;
+ }
+ if (data.erd_page_count == data.erd_max_pages)
+ return data.erd_page_count;
+
+ /* One key difference between full extents and other extents: full
+ * extents can usually only be added if the rpclist was empty, so if we
+ * can't add one, we continue on to trying to add normal extents. This
+ * is so we don't miss adding extra extents to an RPC containing high
+ * priority or urgent extents. */
+ while (!list_empty(&obj->oo_full_exts)) {
+ ext = list_entry(obj->oo_full_exts.next,
+ struct osc_extent, oe_link);
+ if (!try_to_add_extent_for_io(cli, ext, &data))
+ break;
}
- if (page_count == max_pages)
- return page_count;
+ if (data.erd_page_count == data.erd_max_pages)
+ return data.erd_page_count;
- ext = first_extent(obj);
- while (ext != NULL) {
+ for (ext = first_extent(obj);
+ ext;
+ ext = next_extent(ext)) {
if ((ext->oe_state != OES_CACHE) ||
/* this extent may be already in current rpclist */
- (!list_empty(&ext->oe_link) && ext->oe_owner != NULL)) {
- ext = next_extent(ext);
+ (!list_empty(&ext->oe_link) && ext->oe_owner))
continue;
- }
- if (!try_to_add_extent_for_io(cli, ext, rpclist, &page_count,
- &max_pages))
- return page_count;
-
- ext = next_extent(ext);
+ if (!try_to_add_extent_for_io(cli, ext, &data))
+ return data.erd_page_count;
}
- return page_count;
+ return data.erd_page_count;
}
static int
osc_send_write_rpc(const struct lu_env *env, struct client_obd *cli,
- struct osc_object *osc, pdl_policy_t pol)
+ struct osc_object *osc)
__must_hold(osc)
{
- struct list_head rpclist = LIST_HEAD_INIT(rpclist);
+ LIST_HEAD(rpclist);
struct osc_extent *ext;
struct osc_extent *tmp;
struct osc_extent *first = NULL;
int rc = 0;
ENTRY;
- LASSERT(osc_object_is_locked(osc));
+ assert_osc_object_is_locked(osc);
page_count = get_write_extents(osc, &rpclist);
LASSERT(equi(page_count == 0, list_empty(&rpclist)));
if (!list_empty(&rpclist)) {
LASSERT(page_count > 0);
- rc = osc_build_rpc(env, cli, &rpclist, OBD_BRW_WRITE, pol);
+ rc = osc_build_rpc(env, cli, &rpclist, OBD_BRW_WRITE);
LASSERT(list_empty(&rpclist));
}
*/
static int
osc_send_read_rpc(const struct lu_env *env, struct client_obd *cli,
- struct osc_object *osc, pdl_policy_t pol)
+ struct osc_object *osc)
__must_hold(osc)
{
struct osc_extent *ext;
struct osc_extent *next;
- struct list_head rpclist = LIST_HEAD_INIT(rpclist);
- unsigned int page_count = 0;
- unsigned int max_pages = cli->cl_max_pages_per_rpc;
+ LIST_HEAD(rpclist);
+ struct extent_rpc_data data = {
+ .erd_rpc_list = &rpclist,
+ .erd_page_count = 0,
+ .erd_max_pages = cli->cl_max_pages_per_rpc,
+ .erd_max_chunks = UINT_MAX,
+ .erd_max_extents = UINT_MAX,
+ };
int rc = 0;
ENTRY;
- LASSERT(osc_object_is_locked(osc));
- list_for_each_entry_safe(ext, next,
- &osc->oo_reading_exts, oe_link) {
+ assert_osc_object_is_locked(osc);
+ list_for_each_entry_safe(ext, next, &osc->oo_reading_exts, oe_link) {
EASSERT(ext->oe_state == OES_LOCK_DONE, ext);
- if (!try_to_add_extent_for_io(cli, ext, &rpclist, &page_count,
- &max_pages))
+ if (!try_to_add_extent_for_io(cli, ext, &data))
break;
osc_extent_state_set(ext, OES_RPC);
- EASSERT(ext->oe_nr_pages <= max_pages, ext);
+ EASSERT(ext->oe_nr_pages <= data.erd_max_pages, ext);
}
- LASSERT(page_count <= max_pages);
+ LASSERT(data.erd_page_count <= data.erd_max_pages);
- osc_update_pending(osc, OBD_BRW_READ, -page_count);
+ osc_update_pending(osc, OBD_BRW_READ, -data.erd_page_count);
if (!list_empty(&rpclist)) {
osc_object_unlock(osc);
- LASSERT(page_count > 0);
- rc = osc_build_rpc(env, cli, &rpclist, OBD_BRW_READ, pol);
+ rc = osc_build_rpc(env, cli, &rpclist, OBD_BRW_READ);
LASSERT(list_empty(&rpclist));
osc_object_lock(osc);
/* then if we have cache waiters, return all objects with queued
* writes. This is especially important when many small files
* have filled up the cache and not been fired into rpcs because
- * they don't pass the nr_pending/object threshhold */
- if (!list_empty(&cli->cl_cache_waiters) &&
+ * they don't pass the nr_pending/object threshhold
+ */
+ if (waitqueue_active(&cli->cl_cache_waiters) &&
!list_empty(&cli->cl_loi_write_list))
RETURN(list_to_obj(&cli->cl_loi_write_list, write_item));
}
/* called with the loi list lock held */
-static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli,
- pdl_policy_t pol)
+static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli)
__must_hold(&cli->cl_loi_list_lock)
{
struct osc_object *osc;
OSC_IO_DEBUG(osc, "%lu in flight\n", rpcs_in_flight(cli));
- if (osc_max_rpc_in_flight(cli, osc)) {
+ /* even if we have reached our max in flight RPCs, we still
+ * allow all high-priority RPCs through to prevent their
+ * starvation and leading to server evicting us for not
+ * writing out pages in a timely manner LU-13131 */
+ if (osc_max_rpc_in_flight(cli, osc) &&
+ list_empty(&osc->oo_hp_exts)) {
__osc_list_maint(cli, osc);
break;
}
* do io on writes while there are cache waiters */
osc_object_lock(osc);
if (osc_makes_rpc(cli, osc, OBD_BRW_WRITE)) {
- rc = osc_send_write_rpc(env, cli, osc, pol);
+ rc = osc_send_write_rpc(env, cli, osc);
if (rc < 0) {
CERROR("Write request failed with %d\n", rc);
}
}
if (osc_makes_rpc(cli, osc, OBD_BRW_READ)) {
- rc = osc_send_read_rpc(env, cli, osc, pol);
+ rc = osc_send_read_rpc(env, cli, osc);
if (rc < 0)
CERROR("Read request failed with %d\n", rc);
}
}
}
-static int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli,
- struct osc_object *osc, pdl_policy_t pol, int async)
+int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli,
+ struct osc_object *osc, int async)
{
int rc = 0;
return 0;
if (!async) {
- /* disable osc_lru_shrink() temporarily to avoid
- * potential stack overrun problem. LU-2859 */
- atomic_inc(&cli->cl_lru_shrinkers);
spin_lock(&cli->cl_loi_list_lock);
- osc_check_rpcs(env, cli, pol);
+ osc_check_rpcs(env, cli);
spin_unlock(&cli->cl_loi_list_lock);
- atomic_dec(&cli->cl_lru_shrinkers);
} else {
CDEBUG(D_CACHE, "Queue writeback work for client %p.\n", cli);
LASSERT(cli->cl_writeback_work != NULL);
}
return rc;
}
-
-static int osc_io_unplug_async(const struct lu_env *env,
- struct client_obd *cli, struct osc_object *osc)
-{
- /* XXX: policy is no use actually. */
- return osc_io_unplug0(env, cli, osc, PDL_POLICY_ROUND, 1);
-}
-
-void osc_io_unplug(const struct lu_env *env, struct client_obd *cli,
- struct osc_object *osc, pdl_policy_t pol)
-{
- (void)osc_io_unplug0(env, cli, osc, pol, 0);
-}
+EXPORT_SYMBOL(osc_io_unplug0);
int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
struct page *page, loff_t offset)
oap->oap_page = page;
oap->oap_obj_off = offset;
- LASSERT(!(offset & ~CFS_PAGE_MASK));
-
- if (!client_is_remote(exp) && cfs_capable(CFS_CAP_SYS_RESOURCE))
- oap->oap_brw_flags = OBD_BRW_NOQUOTA;
+ LASSERT(!(offset & ~PAGE_MASK));
INIT_LIST_HEAD(&oap->oap_pending_item);
INIT_LIST_HEAD(&oap->oap_rpc_item);
spin_lock_init(&oap->oap_lock);
- CDEBUG(D_INFO, "oap %p page %p obj off "LPU64"\n",
+ CDEBUG(D_INFO, "oap %p page %p obj off %llu\n",
oap, page, oap->oap_obj_off);
RETURN(0);
}
+EXPORT_SYMBOL(osc_prep_async_page);
int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
- struct osc_page *ops)
+ struct osc_page *ops, cl_commit_cbt cb)
{
struct osc_io *oio = osc_env_io(env);
struct osc_extent *ext = NULL;
struct osc_async_page *oap = &ops->ops_oap;
struct client_obd *cli = oap->oap_cli;
struct osc_object *osc = oap->oap_obj;
+ struct pagevec *pvec = &osc_env_info(env)->oti_pagevec;
pgoff_t index;
unsigned int tmp;
unsigned int grants = 0;
- int brw_flags = OBD_BRW_ASYNC;
+ u32 brw_flags = OBD_BRW_ASYNC;
int cmd = OBD_BRW_WRITE;
int need_release = 0;
int rc = 0;
/* Set the OBD_BRW_SRVLOCK before the page is queued. */
brw_flags |= ops->ops_srvlock ? OBD_BRW_SRVLOCK : 0;
- if (!client_is_remote(osc_export(osc)) &&
- cfs_capable(CFS_CAP_SYS_RESOURCE)) {
+ if (oio->oi_cap_sys_resource || io->ci_noquota) {
brw_flags |= OBD_BRW_NOQUOTA;
cmd |= OBD_BRW_NOQUOTA;
}
if (!(cmd & OBD_BRW_NOQUOTA)) {
struct cl_object *obj;
struct cl_attr *attr;
- unsigned int qid[MAXQUOTAS];
+ unsigned int qid[LL_MAXQUOTAS];
obj = cl_object_top(&osc->oo_cl);
attr = &osc_env_info(env)->oti_attr;
qid[USRQUOTA] = attr->cat_uid;
qid[GRPQUOTA] = attr->cat_gid;
- if (rc == 0 && osc_quota_chkdq(cli, qid) == NO_QUOTA)
+ qid[PRJQUOTA] = attr->cat_projid;
+ if (rc == 0 && osc_quota_chkdq(cli, qid) == -EDQUOT)
rc = -EDQUOT;
if (rc)
RETURN(rc);
oap->oap_cmd = cmd;
oap->oap_page_off = ops->ops_from;
- oap->oap_count = ops->ops_to - ops->ops_from;
+ oap->oap_count = ops->ops_to - ops->ops_from + 1;
/* No need to hold a lock here,
* since this page is not in any list yet. */
oap->oap_async_flags = 0;
if (ext != NULL && ext->oe_start <= index && ext->oe_max_end >= index) {
/* one chunk plus extent overhead must be enough to write this
* page */
- grants = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
+ grants = (1 << cli->cl_chunkbits) + cli->cl_grant_extent_tax;
if (ext->oe_end >= index)
grants = 0;
/* it doesn't need any grant to dirty this page */
spin_lock(&cli->cl_loi_list_lock);
- rc = osc_enter_cache_try(cli, oap, grants, 0);
- spin_unlock(&cli->cl_loi_list_lock);
+ rc = osc_enter_cache_try(cli, oap, grants);
if (rc == 0) { /* try failed */
grants = 0;
need_release = 1;
} else {
OSC_EXTENT_DUMP(D_CACHE, ext,
"expanded for %lu.\n", index);
- osc_unreserve_grant(cli, grants, tmp);
+ osc_unreserve_grant_nolock(cli, grants, tmp);
grants = 0;
}
}
+ spin_unlock(&cli->cl_loi_list_lock);
rc = 0;
} else if (ext != NULL) {
/* index is located outside of active extent */
}
if (ext == NULL) {
- tmp = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
+ tmp = (1 << cli->cl_chunkbits) + cli->cl_grant_extent_tax;
/* try to find new extent to cover this page */
LASSERT(oio->oi_active == NULL);
rc = 0;
if (grants == 0) {
- /* we haven't allocated grant for this page. */
+ /* We haven't allocated grant for this page, and we
+ * must not hold a page lock while we do enter_cache,
+ * so we must mark dirty & unlock any pages in the
+ * write commit pagevec. */
+ if (pagevec_count(pvec)) {
+ cb(env, io, pvec);
+ pagevec_reinit(pvec);
+ }
rc = osc_enter_cache(env, cli, oap, tmp);
if (rc == 0)
grants = tmp;
++ext->oe_nr_pages;
list_add_tail(&oap->oap_pending_item, &ext->oe_pages);
osc_object_unlock(osc);
+
+ if (!ext->oe_layout_version)
+ ext->oe_layout_version = io->ci_layout_version;
}
+
RETURN(rc);
}
struct osc_object *obj, struct osc_page *ops)
{
struct osc_async_page *oap = &ops->ops_oap;
- struct osc_extent *ext = NULL;
int rc = 0;
ENTRY;
CDEBUG(D_INFO, "teardown oap %p page %p at index %lu.\n",
oap, ops, osc_index(oap2osc(oap)));
- osc_object_lock(obj);
if (!list_empty(&oap->oap_rpc_item)) {
CDEBUG(D_CACHE, "oap %p is not in cache.\n", oap);
rc = -EBUSY;
} else if (!list_empty(&oap->oap_pending_item)) {
+ struct osc_extent *ext = NULL;
+
+ osc_object_lock(obj);
ext = osc_extent_lookup(obj, osc_index(oap2osc(oap)));
+ osc_object_unlock(obj);
/* only truncated pages are allowed to be taken out.
* See osc_extent_truncate() and osc_cache_truncate_start()
* for details. */
osc_index(oap2osc(oap)));
rc = -EBUSY;
}
+ if (ext != NULL)
+ osc_extent_put(env, ext);
}
- osc_object_unlock(obj);
- if (ext != NULL)
- osc_extent_put(env, ext);
RETURN(rc);
}
oap->oap_async_flags |= ASYNC_READY|ASYNC_URGENT;
spin_unlock(&oap->oap_lock);
- if (memory_pressure_get())
+ if (current->flags & PF_MEMALLOC)
ext->oe_memalloc = 1;
ext->oe_urgent = 1;
return rc;
}
-/**
- * this is called when a sync waiter receives an interruption. Its job is to
- * get the caller woken as soon as possible. If its page hasn't been put in an
- * rpc yet it can dequeue immediately. Otherwise it has to mark the rpc as
- * desiring interruption which will forcefully complete the rpc once the rpc
- * has timed out.
- */
-int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops)
-{
- struct osc_async_page *oap = &ops->ops_oap;
- struct osc_object *obj = oap->oap_obj;
- struct client_obd *cli = osc_cli(obj);
- struct osc_extent *ext;
- struct osc_extent *found = NULL;
- struct list_head *plist;
- pgoff_t index = osc_index(ops);
- int rc = -EBUSY;
- int cmd;
- ENTRY;
-
- LASSERT(!oap->oap_interrupted);
- oap->oap_interrupted = 1;
-
- /* Find out the caching extent */
- osc_object_lock(obj);
- if (oap->oap_cmd & OBD_BRW_WRITE) {
- plist = &obj->oo_urgent_exts;
- cmd = OBD_BRW_WRITE;
- } else {
- plist = &obj->oo_reading_exts;
- cmd = OBD_BRW_READ;
- }
- list_for_each_entry(ext, plist, oe_link) {
- if (ext->oe_start <= index && ext->oe_end >= index) {
- LASSERT(ext->oe_state == OES_LOCK_DONE);
- /* For OES_LOCK_DONE state extent, it has already held
- * a refcount for RPC. */
- found = osc_extent_get(ext);
- break;
- }
- }
- if (found != NULL) {
- list_del_init(&found->oe_link);
- osc_update_pending(obj, cmd, -found->oe_nr_pages);
- osc_object_unlock(obj);
-
- osc_extent_finish(env, found, 0, -EINTR);
- osc_extent_put(env, found);
- rc = 0;
- } else {
- osc_object_unlock(obj);
- /* ok, it's been put in an rpc. only one oap gets a request
- * reference */
- if (oap->oap_request != NULL) {
- ptlrpc_mark_interrupted(oap->oap_request);
- ptlrpcd_wake(oap->oap_request);
- ptlrpc_req_finished(oap->oap_request);
- oap->oap_request = NULL;
- }
- }
-
- osc_list_maint(cli, obj);
- RETURN(rc);
-}
-
-int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
- struct list_head *list, int cmd, int brw_flags)
+int osc_queue_sync_pages(const struct lu_env *env, const struct cl_io *io,
+ struct osc_object *obj, struct list_head *list,
+ int brw_flags)
{
struct client_obd *cli = osc_cli(obj);
struct osc_extent *ext;
struct osc_async_page *oap;
int page_count = 0;
int mppr = cli->cl_max_pages_per_rpc;
+ bool can_merge = true;
pgoff_t start = CL_PAGE_EOF;
pgoff_t end = 0;
ENTRY;
list_for_each_entry(oap, list, oap_pending_item) {
- pgoff_t index = osc_index(oap2osc(oap));
+ struct osc_page *opg = oap2osc_page(oap);
+ pgoff_t index = osc_index(opg);
+
if (index > end)
end = index;
if (index < start)
start = index;
++page_count;
mppr <<= (page_count > mppr);
+
+ if (unlikely(opg->ops_from > 0 ||
+ opg->ops_to < PAGE_SIZE - 1))
+ can_merge = false;
}
ext = osc_extent_alloc(obj);
if (ext == NULL) {
- list_for_each_entry(oap, list, oap_pending_item) {
+ struct osc_async_page *tmp;
+
+ list_for_each_entry_safe(oap, tmp, list, oap_pending_item) {
list_del_init(&oap->oap_pending_item);
osc_ap_completion(env, cli, oap, 0, -ENOMEM);
}
RETURN(-ENOMEM);
}
- ext->oe_rw = !!(cmd & OBD_BRW_READ);
+ ext->oe_rw = !!(brw_flags & OBD_BRW_READ);
ext->oe_sync = 1;
+ ext->oe_no_merge = !can_merge;
ext->oe_urgent = 1;
ext->oe_start = start;
ext->oe_end = ext->oe_max_end = end;
ext->oe_obj = obj;
ext->oe_srvlock = !!(brw_flags & OBD_BRW_SRVLOCK);
+ ext->oe_ndelay = !!(brw_flags & OBD_BRW_NDELAY);
+ ext->oe_dio = !!(brw_flags & OBD_BRW_NOCACHE);
+ if (ext->oe_dio && !ext->oe_rw) { /* direct io write */
+ int grants;
+ int ppc;
+
+ ppc = 1 << (cli->cl_chunkbits - PAGE_SHIFT);
+ grants = cli->cl_grant_extent_tax;
+ grants += (1 << cli->cl_chunkbits) *
+ ((page_count + ppc - 1) / ppc);
+
+ spin_lock(&cli->cl_loi_list_lock);
+ if (osc_reserve_grant(cli, grants) == 0) {
+ list_for_each_entry(oap, list, oap_pending_item) {
+ osc_consume_write_grant(cli,
+ &oap->oap_brw_page);
+ atomic_long_inc(&obd_dirty_pages);
+ }
+ osc_unreserve_grant_nolock(cli, grants, 0);
+ ext->oe_grants = grants;
+ }
+ spin_unlock(&cli->cl_loi_list_lock);
+ }
+
+ ext->oe_is_rdma_only = !!(brw_flags & OBD_BRW_RDMA_ONLY);
ext->oe_nr_pages = page_count;
ext->oe_mppr = mppr;
list_splice_init(list, &ext->oe_pages);
+ ext->oe_layout_version = io->ci_layout_version;
osc_object_lock(obj);
/* Reuse the initial refcount for RPC, don't drop it */
osc_extent_state_set(ext, OES_LOCK_DONE);
- if (cmd & OBD_BRW_WRITE) {
- list_add_tail(&ext->oe_link, &obj->oo_urgent_exts);
+ if (!ext->oe_rw) { /* write */
+ if (!ext->oe_srvlock && !ext->oe_dio) {
+ /* The most likely case here is from lack of grants
+ * so we are either out of quota or out of space.
+ * Since this means we are holding locks across
+ * potentially multi-striped IO, we must send out
+ * everything out instantly to avoid prolonged
+ * waits resulting in lock eviction (likely since
+ * the extended wait in osc_cache_enter() did not
+ * yield any additional grant due to a timeout.
+ * LU-13131 */
+ ext->oe_hp = 1;
+ list_add_tail(&ext->oe_link, &obj->oo_hp_exts);
+ } else {
+ list_add_tail(&ext->oe_link, &obj->oo_urgent_exts);
+ }
osc_update_pending(obj, OBD_BRW_WRITE, page_count);
} else {
list_add_tail(&ext->oe_link, &obj->oo_reading_exts);
/**
* Called by osc_io_setattr_start() to freeze and destroy covering extents.
*/
-int osc_cache_truncate_start(const struct lu_env *env, struct osc_io *oio,
- struct osc_object *obj, __u64 size)
+int osc_cache_truncate_start(const struct lu_env *env, struct osc_object *obj,
+ __u64 size, struct osc_extent **extp)
{
struct client_obd *cli = osc_cli(obj);
struct osc_extent *ext;
struct osc_extent *waiting = NULL;
pgoff_t index;
- struct list_head list = LIST_HEAD_INIT(list);
+ LIST_HEAD(list);
int result = 0;
bool partial;
ENTRY;
* a page already having been flushed by write_page().
* We have to wait for this extent because we can't
* truncate that page. */
- LASSERT(!ext->oe_hp);
OSC_EXTENT_DUMP(D_CACHE, ext,
"waiting for busy extent\n");
waiting = osc_extent_get(ext);
break;
}
- OSC_EXTENT_DUMP(D_CACHE, ext, "try to trunc:"LPU64".\n", size);
+ OSC_EXTENT_DUMP(D_CACHE, ext, "try to trunc:%llu.\n", size);
osc_extent_get(ext);
if (ext->oe_state == OES_ACTIVE) {
osc_update_pending(obj, OBD_BRW_WRITE,
-ext->oe_nr_pages);
}
- EASSERT(list_empty(&ext->oe_link), ext);
- list_add_tail(&ext->oe_link, &list);
+ /* This extent could be on the full extents list, that's OK */
+ EASSERT(!ext->oe_hp && !ext->oe_urgent, ext);
+ if (!list_empty(&ext->oe_link))
+ list_move_tail(&ext->oe_link, &list);
+ else
+ list_add_tail(&ext->oe_link, &list);
ext = next_extent(ext);
}
/* we need to hold this extent in OES_TRUNC state so
* that no writeback will happen. This is to avoid
- * BUG 17397. */
- LASSERT(oio->oi_trunc == NULL);
- oio->oi_trunc = osc_extent_get(ext);
+ * BUG 17397.
+ * Only partial truncate can reach here, if @size is
+ * not zero, the caller should provide a valid @extp. */
+ LASSERT(*extp == NULL);
+ *extp = osc_extent_get(ext);
OSC_EXTENT_DUMP(D_CACHE, ext,
- "trunc at "LPU64"\n", size);
+ "trunc at %llu\n", size);
}
osc_extent_put(env, ext);
}
}
RETURN(result);
}
+EXPORT_SYMBOL(osc_cache_truncate_start);
/**
* Called after osc_io_setattr_end to add oio->oi_trunc back to cache.
*/
-void osc_cache_truncate_end(const struct lu_env *env, struct osc_io *oio,
- struct osc_object *obj)
+void osc_cache_truncate_end(const struct lu_env *env, struct osc_extent *ext)
{
- struct osc_extent *ext = oio->oi_trunc;
-
- oio->oi_trunc = NULL;
if (ext != NULL) {
+ struct osc_object *obj = ext->oe_obj;
bool unplug = false;
EASSERT(ext->oe_nr_pages > 0, ext);
OSC_IO_DEBUG(obj, "sync file range.\n");
RETURN(result);
}
+EXPORT_SYMBOL(osc_cache_wait_range);
/**
* Called to write out a range of osc object.
pgoff_t start, pgoff_t end, int hp, int discard)
{
struct osc_extent *ext;
- struct list_head discard_list = LIST_HEAD_INIT(discard_list);
+ LIST_HEAD(discard_list);
bool unplug = false;
int result = 0;
ENTRY;
EASSERT(!ext->oe_hp, ext);
ext->oe_hp = 1;
list = &obj->oo_hp_exts;
- } else if (!ext->oe_urgent) {
+ } else if (!ext->oe_urgent && !ext->oe_hp) {
ext->oe_urgent = 1;
list = &obj->oo_urgent_exts;
}
list_move_tail(&ext->oe_link, list);
unplug = true;
} else {
+ struct client_obd *cli = osc_cli(obj);
+ int pcc_bits = cli->cl_chunkbits - PAGE_SHIFT;
+ pgoff_t align_by = (1 << pcc_bits);
+ pgoff_t a_start = round_down(start, align_by);
+ pgoff_t a_end = round_up(end, align_by);
+
+ /* overflow case */
+ if (end && !a_end)
+ a_end = CL_PAGE_EOF;
/* the only discarder is lock cancelling, so
- * [start, end] must contain this extent */
- EASSERT(ext->oe_start >= start &&
- ext->oe_max_end <= end, ext);
+ * [start, end], aligned by chunk size, must
+ * contain this extent */
+ LASSERTF(ext->oe_start >= a_start &&
+ ext->oe_end <= a_end,
+ "ext [%lu, %lu] reg [%lu, %lu] "
+ "orig [%lu %lu] align %lu bits "
+ "%d\n", ext->oe_start, ext->oe_end,
+ a_start, a_end, start, end,
+ align_by, pcc_bits);
osc_extent_state_set(ext, OES_LOCKING);
ext->oe_owner = current;
list_move_tail(&ext->oe_link,
}
if (unplug)
- osc_io_unplug(env, osc_cli(obj), obj, PDL_POLICY_ROUND);
+ osc_io_unplug(env, osc_cli(obj), obj);
if (hp || discard) {
int rc;
OSC_IO_DEBUG(obj, "pageout [%lu, %lu], %d.\n", start, end, result);
RETURN(result);
}
+EXPORT_SYMBOL(osc_cache_writeback_range);
/**
* Returns a list of pages by a given [start, end] of \a obj.
*
- * \param resched If not NULL, then we give up before hogging CPU for too
- * long and set *resched = 1, in that case caller should implement a retry
- * logic.
- *
* Gang tree lookup (radix_tree_gang_lookup()) optimization is absolutely
* crucial in the face of [offset, EOF] locks.
*
* Return at least one page in @queue unless there is no covered page.
*/
-int osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io,
- struct osc_object *osc, pgoff_t start, pgoff_t end,
- osc_page_gang_cbt cb, void *cbdata)
+bool osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io,
+ struct osc_object *osc, pgoff_t start, pgoff_t end,
+ osc_page_gang_cbt cb, void *cbdata)
{
struct osc_page *ops;
+ struct pagevec *pagevec;
void **pvec;
pgoff_t idx;
unsigned int nr;
unsigned int i;
unsigned int j;
- int res = CLP_GANG_OKAY;
+ bool res = true;
bool tree_lock = true;
ENTRY;
idx = start;
pvec = osc_env_info(env)->oti_pvec;
+ pagevec = &osc_env_info(env)->oti_pagevec;
+ ll_pagevec_init(pagevec, 0);
spin_lock(&osc->oo_tree_lock);
while ((nr = radix_tree_gang_lookup(&osc->oo_tree, pvec,
idx, OTI_PVEC_SIZE)) > 0) {
for (i = 0; i < j; ++i) {
ops = pvec[i];
- if (res == CLP_GANG_OKAY)
+ if (res)
res = (*cb)(env, io, ops, cbdata);
page = ops->ops_cl.cpl_page;
lu_ref_del(&page->cp_reference, "gang_lookup", current);
- cl_page_put(env, page);
+ cl_pagevec_put(env, page, pagevec);
}
+ pagevec_release(pagevec);
+
if (nr < OTI_PVEC_SIZE || end_of_region)
break;
- if (res == CLP_GANG_OKAY && need_resched())
- res = CLP_GANG_RESCHED;
- if (res != CLP_GANG_OKAY)
+ if (!res)
break;
+ if (need_resched())
+ cond_resched();
spin_lock(&osc->oo_tree_lock);
tree_lock = true;
spin_unlock(&osc->oo_tree_lock);
RETURN(res);
}
+EXPORT_SYMBOL(osc_page_gang_lookup);
/**
* Check if page @page is covered by an extra lock or discard it.
*/
-static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
+static bool check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
struct osc_page *ops, void *cbdata)
{
struct osc_thread_info *info = osc_env_info(env);
struct osc_object *osc = cbdata;
+ struct cl_page *page = ops->ops_cl.cpl_page;
pgoff_t index;
+ bool discard = false;
index = osc_index(ops);
- if (index >= info->oti_fn_index) {
- struct ldlm_lock *tmp;
- struct cl_page *page = ops->ops_cl.cpl_page;
+ /* negative lock caching */
+ if (index < info->oti_ng_index) {
+ discard = true;
+ } else if (index >= info->oti_fn_index) {
+ struct ldlm_lock *tmp;
/* refresh non-overlapped index */
- tmp = osc_dlmlock_at_pgoff(env, osc, index, 0, 0);
+ tmp = osc_dlmlock_at_pgoff(env, osc, index,
+ OSC_DAP_FL_TEST_LOCK |
+ OSC_DAP_FL_AST | OSC_DAP_FL_RIGHT);
if (tmp != NULL) {
__u64 end = tmp->l_policy_data.l_extent.end;
- /* Cache the first-non-overlapped index so as to skip
- * all pages within [index, oti_fn_index). This is safe
- * because if tmp lock is canceled, it will discard
- * these pages. */
- info->oti_fn_index = cl_index(osc2cl(osc), end + 1);
- if (end == OBD_OBJECT_EOF)
- info->oti_fn_index = CL_PAGE_EOF;
+ __u64 start = tmp->l_policy_data.l_extent.start;
+
+ /* no lock covering this page */
+ if (index < cl_index(osc2cl(osc), start)) {
+ /* no lock at @index, first lock at @start */
+ info->oti_ng_index = cl_index(osc2cl(osc),
+ start);
+ discard = true;
+ } else {
+ /* Cache the first-non-overlapped index so as to
+ * skip all pages within [index, oti_fn_index).
+ * This is safe because if tmp lock is canceled,
+ * it will discard these pages.
+ */
+ info->oti_fn_index = cl_index(osc2cl(osc),
+ end + 1);
+ if (end == OBD_OBJECT_EOF)
+ info->oti_fn_index = CL_PAGE_EOF;
+ }
LDLM_LOCK_PUT(tmp);
- } else if (cl_page_own(env, io, page) == 0) {
- /* discard the page */
+ } else {
+ info->oti_ng_index = CL_PAGE_EOF;
+ discard = true;
+ }
+ }
+
+ if (discard) {
+ if (cl_page_own(env, io, page) == 0) {
cl_page_discard(env, io, page);
cl_page_disown(env, io, page);
} else {
}
info->oti_next_index = index + 1;
- return CLP_GANG_OKAY;
+ return true;
}
-static int discard_cb(const struct lu_env *env, struct cl_io *io,
- struct osc_page *ops, void *cbdata)
+bool osc_discard_cb(const struct lu_env *env, struct cl_io *io,
+ struct osc_page *ops, void *cbdata)
{
struct osc_thread_info *info = osc_env_info(env);
struct cl_page *page = ops->ops_cl.cpl_page;
/* page is top page. */
info->oti_next_index = osc_index(ops) + 1;
if (cl_page_own(env, io, page) == 0) {
- KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
- !PageDirty(cl_page_vmpage(page))));
+ if (!ergo(page->cp_type == CPT_CACHEABLE,
+ !PageDirty(cl_page_vmpage(page))))
+ CL_PAGE_DEBUG(D_ERROR, env, page,
+ "discard dirty page?\n");
/* discard the page */
cl_page_discard(env, io, page);
LASSERT(page->cp_state == CPS_FREEING);
}
- return CLP_GANG_OKAY;
+ return true;
}
+EXPORT_SYMBOL(osc_discard_cb);
/**
* Discard pages protected by the given lock. This function traverses radix
* behind this being that lock cancellation cannot be delayed indefinitely).
*/
int osc_lock_discard_pages(const struct lu_env *env, struct osc_object *osc,
- pgoff_t start, pgoff_t end, enum cl_lock_mode mode)
+ pgoff_t start, pgoff_t end, bool discard)
{
struct osc_thread_info *info = osc_env_info(env);
- struct cl_io *io = &info->oti_io;
+ struct cl_io *io = osc_env_thread_io(env);
osc_page_gang_cbt cb;
- int res;
int result;
ENTRY;
if (result != 0)
GOTO(out, result);
- cb = mode == CLM_READ ? check_and_discard_cb : discard_cb;
+ cb = discard ? osc_discard_cb : check_and_discard_cb;
info->oti_fn_index = info->oti_next_index = start;
- do {
- res = osc_page_gang_lookup(env, io, osc,
- info->oti_next_index, end, cb, osc);
- if (info->oti_next_index > end)
- break;
+ info->oti_ng_index = 0;
- if (res == CLP_GANG_RESCHED)
- cond_resched();
- } while (res != CLP_GANG_OKAY);
+ osc_page_gang_lookup(env, io, osc,
+ info->oti_next_index, end, cb, osc);
out:
cl_io_fini(env, io);
RETURN(result);