* run-time if a larger observed size is advertised by the MDT. */
__u32 cl_max_mds_easize;
+ /* Data-on-MDT specific value to set larger reply buffer for possible
+ * data read along with open/stat requests. By default it tries to use
+ * unused space in reply buffer.
+ * This value is used to ensure that reply buffer has at least as
+ * much free space as value indicates. That free space is gained from
+ * LOV EA buffer which is small for DoM files and on big systems can
+ * provide up to 32KB of extra space in reply buffer.
+ * Default value is 8K now.
+ */
+ __u32 cl_dom_min_inline_repsize;
+
enum lustre_sec_part cl_sp_me;
enum lustre_sec_part cl_sp_to;
struct sptlrpc_flavor cl_flvr_mgc; /* fixed flavor of mgc->mgs */
long cl_reserved_grant;
struct list_head cl_cache_waiters; /* waiting for cache/grant */
time64_t cl_next_shrink_grant; /* seconds */
- struct list_head cl_grant_shrink_list; /* Timeout event list */
+ struct list_head cl_grant_chain;
time64_t cl_grant_shrink_interval; /* seconds */
/* A chunk is an optimal size used by osc_extent to determine
struct proc_dir_entry *obd_proc_exports_entry;
struct dentry *obd_svc_debugfs_entry;
struct lprocfs_stats *obd_svc_stats;
- struct attribute_group obd_attrs_group;
- struct attribute **obd_attrs;
+ const struct attribute **obd_attrs;
struct lprocfs_vars *obd_vars;
atomic_t obd_evict_inprogress;
wait_queue_head_t obd_evict_inprogress_waitq;
return LCK_PR;
else if (it->it_op & IT_GETXATTR)
return LCK_PR;
- else if (it->it_op & IT_SETXATTR)
- return LCK_PW;
LASSERTF(0, "Invalid it_op: %d\n", it->it_op);
return -EINVAL;
*/
static inline bool it_has_reply_body(const struct lookup_intent *it)
{
- return it->it_op & (IT_OPEN | IT_UNLINK | IT_LOOKUP | IT_GETATTR);
+ return it->it_op & (IT_OPEN | IT_LOOKUP | IT_GETATTR);
}
struct md_op_data {
return obd->u.cli.cl_max_pages_per_rpc << PAGE_SHIFT;
}
-/* when RPC size or the max RPCs in flight is increased, the max dirty pages
+/*
+ * When RPC size or the max RPCs in flight is increased, the max dirty pages
* of the client should be increased accordingly to avoid sending fragmented
* RPCs over the network when the client runs out of the maximum dirty space
* when so many RPCs are being generated.
static inline void client_adjust_max_dirty(struct client_obd *cli)
{
/* initializing */
- if (cli->cl_dirty_max_pages <= 0)
- cli->cl_dirty_max_pages = (OSC_MAX_DIRTY_DEFAULT * 1024 * 1024)
- >> PAGE_SHIFT;
- else {
+ if (cli->cl_dirty_max_pages <= 0) {
+ cli->cl_dirty_max_pages =
+ (OSC_MAX_DIRTY_DEFAULT * 1024 * 1024) >> PAGE_SHIFT;
+ } else {
unsigned long dirty_max = cli->cl_max_rpcs_in_flight *
cli->cl_max_pages_per_rpc;
if (cli->cl_dirty_max_pages > totalram_pages / 8)
cli->cl_dirty_max_pages = totalram_pages / 8;
+
+ /* This value is exported to userspace through the max_dirty_mb
+ * parameter. So we round up the number of pages to make it a round
+ * number of MBs. */
+ cli->cl_dirty_max_pages = round_up(cli->cl_dirty_max_pages,
+ 1 << (20 - PAGE_SHIFT));
}
#endif /* __OBD_H */