* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, 2016, Intel Corporation.
+ * Copyright (c) 2012, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
} od_stats;
/* configuration item(s) */
- int od_contention_time;
+ time64_t od_contention_time;
int od_lockless_truncate;
};
/** true if this io is lockless. */
unsigned int oi_lockless:1,
/** true if this io is counted as active IO */
- oi_is_active:1;
+ oi_is_active:1,
+ /** true if this io has CAP_SYS_RESOURCE */
+ oi_cap_sys_resource:1;
/** how many LRU pages are reserved for this IO */
unsigned long oi_lru_reserved;
struct osc_thread_info {
struct ldlm_res_id oti_resname;
union ldlm_policy_data oti_policy;
- struct cl_lock_descr oti_descr;
struct cl_attr oti_attr;
- struct lustre_handle oti_handle;
- struct cl_page_list oti_plist;
struct cl_io oti_io;
+ struct pagevec oti_pagevec;
void *oti_pvec[OTI_PVEC_SIZE];
/**
* Fields used by cl_lock_discard_pages().
* True if locking against this stripe got -EUSERS.
*/
int oo_contended;
- cfs_time_t oo_contention_time;
+ ktime_t oo_contention_time;
#ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
/**
* IO context used for invariant checks in osc_lock_has_pages().
static inline void osc_object_set_contended(struct osc_object *obj)
{
- obj->oo_contention_time = cfs_time_current();
+ obj->oo_contention_time = ktime_get();
/* mb(); */
obj->oo_contended = 1;
}
/**
* Set if the page must be transferred with OBD_BRW_SRVLOCK.
*/
- ops_srvlock:1;
+ ops_srvlock:1,
+ /**
+ * If the page is in osc_object::oo_tree.
+ */
+ ops_intree:1;
/**
* lru page list. See osc_lru_{del|use}() in osc_page.c for usage.
*/
/**
* Submit time - the time when the page is starting RPC. For debugging.
*/
- cfs_time_t ops_submit_time;
+ ktime_t ops_submit_time;
+};
+
+struct osc_brw_async_args {
+ struct obdo *aa_oa;
+ int aa_requested_nob;
+ int aa_nio_count;
+ u32 aa_page_count;
+ int aa_resends;
+ struct brw_page **aa_ppga;
+ struct client_obd *aa_cli;
+ struct list_head aa_oaps;
+ struct list_head aa_exts;
};
extern struct kmem_cache *osc_lock_kmem;
extern struct kmem_cache *osc_session_kmem;
extern struct kmem_cache *osc_extent_kmem;
extern struct kmem_cache *osc_quota_kmem;
+extern struct kmem_cache *osc_obdo_kmem;
extern struct lu_context_key osc_key;
extern struct lu_context_key osc_session_key;
int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
struct page *page, loff_t offset);
int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
- struct osc_page *ops);
-int osc_page_cache_add(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io);
+ struct osc_page *ops, cl_commit_cbt cb);
+int osc_page_cache_add(const struct lu_env *env, struct osc_page *opg,
+ struct cl_io *io, cl_commit_cbt cb);
int osc_teardown_async_page(const struct lu_env *env, struct osc_object *obj,
struct osc_page *ops);
int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
struct osc_page *ops);
-int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
- struct list_head *list, int cmd, int brw_flags);
+int osc_queue_sync_pages(const struct lu_env *env, const struct cl_io *io,
+ struct osc_object *obj, struct list_head *list,
+ int brw_flags);
int osc_cache_truncate_start(const struct lu_env *env, struct osc_object *obj,
__u64 size, struct osc_extent **extp);
void osc_cache_truncate_end(const struct lu_env *env, struct osc_extent *ext);
int osc_io_iter_init(const struct lu_env *env, const struct cl_io_slice *ios);
void osc_io_iter_fini(const struct lu_env *env,
const struct cl_io_slice *ios);
-int osc_io_write_iter_init(const struct lu_env *env,
- const struct cl_io_slice *ios);
-void osc_io_write_iter_fini(const struct lu_env *env,
+int osc_io_rw_iter_init(const struct lu_env *env,
+ const struct cl_io_slice *ios);
+void osc_io_rw_iter_fini(const struct lu_env *env,
const struct cl_io_slice *ios);
int osc_io_fault_start(const struct lu_env *env, const struct cl_io_slice *ios);
void osc_io_setattr_end(const struct lu_env *env,
const struct cl_lock_slice *slice);
void osc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice);
int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data);
+unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock);
/*****************************************************************************
*
oe_hp:1,
/** this extent should be written back asap. set if one of pages is
* called by page WB daemon, or sync write or reading requests. */
- oe_urgent:1;
+ oe_urgent:1,
+ /** Non-delay RPC should be used for this extent. */
+ oe_ndelay:1;
/** how many grants allocated for this extent.
* Grant allocated for this extent. There is no grant allocated
* for reading extents and sync write extents. */
unsigned int oe_nr_pages;
/** list of pending oap pages. Pages in this list are NOT sorted. */
struct list_head oe_pages;
- /** Since an extent has to be written out in atomic, this is used to
- * remember the next page need to be locked to write this extent out.
- * Not used right now.
- */
- struct osc_page *oe_next_page;
/** start and end index of this extent, include start and end
* themselves. Page offset here is the page index of osc_pages.
* oe_start is used as keyword for red-black tree. */
int oe_rc;
/** max pages per rpc when this extent was created */
unsigned int oe_mppr;
+ /** FLR: layout version when this osc_extent is publised */
+ __u32 oe_layout_version;
};
/** @} osc */