X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Finclude%2Fobd.h;h=d7eb8b0824d36cd0a17c346717c51e3fa0925703;hp=3abcc4d899d90944e48c0984463fbe4d7251f01d;hb=01def2b635ff0b7bacde158d9124334c42cd5d2b;hpb=5303c139e3cef8ed09b85cf828afbbe24ea0d7db diff --git a/lustre/include/obd.h b/lustre/include/obd.h index 3abcc4d..d7eb8b0 100644 --- a/lustre/include/obd.h +++ b/lustre/include/obd.h @@ -42,10 +42,6 @@ #if defined(__linux__) #include -#elif defined(__APPLE__) -#include -#elif defined(__WINNT__) -#include #else #error Unsupported operating system. #endif @@ -163,8 +159,6 @@ struct obd_info { - while setattr, the flags used for distinguish punch operation */ __u64 oi_flags; - /* Lock handle specific for every OSC lock. */ - struct lustre_handle *oi_lockh; /* lsm data specific for every OSC. */ struct lov_stripe_md *oi_md; /* obdo data specific for every OSC, if needed at all. */ @@ -179,21 +173,19 @@ struct obd_info { /* oss capability, its type is obd_capa in client to avoid copy. * in contrary its type is lustre_capa in OSS. */ void *oi_capa; - /* transfer jobid from ost_sync() to filter_sync()... */ - char *oi_jobid; }; struct obd_type { - struct list_head typ_chain; - struct obd_ops *typ_dt_ops; - struct md_ops *typ_md_ops; - struct proc_dir_entry *typ_procroot; - struct proc_dir_entry *typ_procsym; - __u32 typ_sym_filter; - char *typ_name; - int typ_refcnt; - struct lu_device_type *typ_lu; - spinlock_t obd_type_lock; + struct list_head typ_chain; + struct obd_ops *typ_dt_ops; + struct md_ops *typ_md_ops; + struct proc_dir_entry *typ_procroot; + struct proc_dir_entry *typ_procsym; + __u32 typ_sym_filter; + char *typ_name; + int typ_refcnt; + struct lu_device_type *typ_lu; + spinlock_t obd_type_lock; }; struct brw_page { @@ -203,34 +195,13 @@ struct brw_page { obd_flag flag; }; -/* llog contexts */ -enum llog_ctxt_id { - LLOG_CONFIG_ORIG_CTXT = 0, - LLOG_CONFIG_REPL_CTXT, - LLOG_MDS_OST_ORIG_CTXT, - LLOG_MDS_OST_REPL_CTXT, - LLOG_SIZE_ORIG_CTXT, - LLOG_SIZE_REPL_CTXT, - LLOG_RD1_ORIG_CTXT, - LLOG_RD1_REPL_CTXT, - LLOG_TEST_ORIG_CTXT, - LLOG_TEST_REPL_CTXT, - LLOG_LOVEA_ORIG_CTXT, - LLOG_LOVEA_REPL_CTXT, - LLOG_CHANGELOG_ORIG_CTXT, /**< changelog generation on mdd */ - LLOG_CHANGELOG_REPL_CTXT, /**< changelog access on clients */ - LLOG_CHANGELOG_USER_ORIG_CTXT, /**< for multiple changelog consumers */ - LLOG_AGENT_ORIG_CTXT, /**< agent requests generation on cdt */ - LLOG_MAX_CTXTS -}; - struct timeout_item { - enum timeout_event ti_event; - cfs_time_t ti_timeout; - timeout_cb_t ti_cb; - void *ti_cb_data; - cfs_list_t ti_obd_list; - cfs_list_t ti_chain; + enum timeout_event ti_event; + cfs_time_t ti_timeout; + timeout_cb_t ti_cb; + void *ti_cb_data; + struct list_head ti_obd_list; + struct list_head ti_chain; }; #define OBD_MAX_RIF_DEFAULT 8 @@ -251,7 +222,7 @@ enum { struct mdc_rpc_lock; struct obd_import; struct client_obd { - struct rw_semaphore cl_sem; + struct rw_semaphore cl_sem; struct obd_uuid cl_target_uuid; struct obd_import *cl_import; /* ptlrpc connection state */ int cl_conn_count; @@ -266,28 +237,27 @@ struct client_obd { enum lustre_sec_part cl_sp_to; struct sptlrpc_flavor cl_flvr_mgc; /* fixed flavor of mgc->mgs */ - /* the grant values are protected by loi_list_lock below */ - long cl_dirty; /* all _dirty_ in bytes */ - long cl_dirty_max; /* allowed w/o rpc */ - long cl_dirty_transit; /* dirty synchronous */ - long cl_avail_grant; /* bytes of credit for ost */ - long cl_lost_grant; /* lost credits (trunc) */ + /* the grant values are protected by loi_list_lock below */ + unsigned long cl_dirty_pages; /* all _dirty_ in pages */ + unsigned long cl_dirty_max_pages; /* allowed w/o rpc */ + unsigned long cl_dirty_transit; /* dirty synchronous */ + unsigned long cl_avail_grant; /* bytes of credit for ost */ + unsigned long cl_lost_grant; /* lost credits (trunc) */ /* since we allocate grant by blocks, we don't know how many grant will * be used to add a page into cache. As a solution, we reserve maximum * grant before trying to dirty a page and unreserve the rest. * See osc_{reserve|unreserve}_grant for details. */ - long cl_reserved_grant; - cfs_list_t cl_cache_waiters; /* waiting for cache/grant */ - cfs_time_t cl_next_shrink_grant; /* jiffies */ - cfs_list_t cl_grant_shrink_list; /* Timeout event list */ - int cl_grant_shrink_interval; /* seconds */ + long cl_reserved_grant; + struct list_head cl_cache_waiters; /* waiting for cache/grant */ + cfs_time_t cl_next_shrink_grant; /* jiffies */ + struct list_head cl_grant_shrink_list; /* Timeout event list */ + int cl_grant_shrink_interval; /* seconds */ /* A chunk is an optimal size used by osc_extent to determine * the extent size. A chunk is max(PAGE_CACHE_SIZE, OST block size) */ - int cl_chunkbits; - int cl_chunk; - int cl_extent_tax; /* extent overhead, by bytes */ + int cl_chunkbits; + int cl_extent_tax; /* extent overhead, by bytes */ /* keep track of objects that have lois that contain pages which * have been queued for async brw. this lock also protects the @@ -310,49 +280,49 @@ struct client_obd { * NB by Jinshan: though field names are still _loi_, but actually * osc_object{}s are in the list. */ - client_obd_lock_t cl_loi_list_lock; - cfs_list_t cl_loi_ready_list; - cfs_list_t cl_loi_hp_ready_list; - cfs_list_t cl_loi_write_list; - cfs_list_t cl_loi_read_list; - int cl_r_in_flight; - int cl_w_in_flight; + client_obd_lock_t cl_loi_list_lock; + struct list_head cl_loi_ready_list; + struct list_head cl_loi_hp_ready_list; + struct list_head cl_loi_write_list; + struct list_head cl_loi_read_list; + __u32 cl_r_in_flight; + __u32 cl_w_in_flight; /* just a sum of the loi/lop pending numbers to be exported by /proc */ - atomic_t cl_pending_w_pages; - atomic_t cl_pending_r_pages; - __u32 cl_max_pages_per_rpc; - int cl_max_rpcs_in_flight; - struct obd_histogram cl_read_rpc_hist; - struct obd_histogram cl_write_rpc_hist; - struct obd_histogram cl_read_page_hist; - struct obd_histogram cl_write_page_hist; - struct obd_histogram cl_read_offset_hist; - struct obd_histogram cl_write_offset_hist; + atomic_t cl_pending_w_pages; + atomic_t cl_pending_r_pages; + __u32 cl_max_pages_per_rpc; + __u32 cl_max_rpcs_in_flight; + struct obd_histogram cl_read_rpc_hist; + struct obd_histogram cl_write_rpc_hist; + struct obd_histogram cl_read_page_hist; + struct obd_histogram cl_write_page_hist; + struct obd_histogram cl_read_offset_hist; + struct obd_histogram cl_write_offset_hist; /* lru for osc caching pages */ struct cl_client_cache *cl_cache; - cfs_list_t cl_lru_osc; /* member of cl_cache->ccc_lru */ + struct list_head cl_lru_osc; /* member of cl_cache->ccc_lru */ atomic_t *cl_lru_left; atomic_t cl_lru_busy; atomic_t cl_lru_shrinkers; atomic_t cl_lru_in_list; - cfs_list_t cl_lru_list; /* lru page list */ + struct list_head cl_lru_list; /* lru page list */ client_obd_lock_t cl_lru_list_lock; /* page list protector */ atomic_t cl_unstable_count; /* number of in flight destroy rpcs is limited to max_rpcs_in_flight */ - atomic_t cl_destroy_in_flight; - wait_queue_head_t cl_destroy_waitq; + atomic_t cl_destroy_in_flight; + wait_queue_head_t cl_destroy_waitq; struct mdc_rpc_lock *cl_rpc_lock; struct mdc_rpc_lock *cl_close_lock; /* mgc datastruct */ - struct mutex cl_mgc_mutex; + struct mutex cl_mgc_mutex; struct local_oid_storage *cl_mgc_los; - struct dt_object *cl_mgc_configs_dir; - atomic_t cl_mgc_refcount; - struct obd_export *cl_mgc_mgsexp; + struct dt_object *cl_mgc_configs_dir; + atomic_t cl_mgc_refcount; + struct obd_export *cl_mgc_mgsexp; /* checksumming for data sent over the network */ unsigned int cl_checksum:1; /* 0 = disabled, 1 = enabled */ @@ -390,12 +360,12 @@ struct obd_id_info { }; struct echo_client_obd { - struct obd_export *ec_exp; /* the local connection to osc/lov */ + struct obd_export *ec_exp; /* the local connection to osc/lov */ spinlock_t ec_lock; - cfs_list_t ec_objects; - cfs_list_t ec_locks; - int ec_nstripes; - __u64 ec_unique; + struct list_head ec_objects; + struct list_head ec_locks; + int ec_nstripes; + __u64 ec_unique; }; /* Generic subset of OSTs */ @@ -411,7 +381,7 @@ struct ost_pool { #define OBD_STATFS_CACHE_SECONDS 1 struct lov_tgt_desc { - cfs_list_t ltd_kill; + struct list_head ltd_kill; struct obd_uuid ltd_uuid; struct obd_device *ltd_obd; struct obd_export *ltd_exp; @@ -490,12 +460,11 @@ struct lmv_obd { struct niobuf_local { __u64 lnb_file_offset; __u32 lnb_page_offset; - __u32 len; - __u32 flags; - struct page *page; - struct dentry *dentry; - int lnb_grant_used; - int rc; + __u32 lnb_len; + __u32 lnb_flags; + struct page *lnb_page; + void *lnb_data; + int lnb_rc; }; #define LUSTRE_FLD_NAME "fld" @@ -533,119 +502,21 @@ struct niobuf_local { #define LUSTRE_MGS_OBDNAME "MGS" #define LUSTRE_MGC_OBDNAME "MGC" -static inline int is_osp_on_mdt(char *name) -{ - char *ptr; - - ptr = strrchr(name, '-'); - if (ptr == NULL) { - CERROR("%s is not a obdname\n", name); - return 0; - } - - /* 1.8 OSC/OSP name on MDT is fsname-OSTxxxx-osc */ - if (strncmp(ptr + 1, "osc", 3) == 0) - return 1; - - if (strncmp(ptr + 1, "MDT", 3) != 0) - return 0; - - while (*(--ptr) != '-' && ptr != name); - - if (ptr == name) - return 0; - - if (strncmp(ptr + 1, LUSTRE_OSP_NAME, strlen(LUSTRE_OSP_NAME)) != 0 && - strncmp(ptr + 1, LUSTRE_OSC_NAME, strlen(LUSTRE_OSC_NAME)) != 0) - return 0; - - return 1; -} - -/* Don't conflict with on-wire flags OBD_BRW_WRITE, etc */ -#define N_LOCAL_TEMP_PAGE 0x10000000 - struct obd_trans_info { - __u64 oti_transno; - __u64 oti_xid; - /* Only used on the server side for tracking acks. */ - struct oti_req_ack_lock { - struct lustre_handle lock; - __u32 mode; - } oti_ack_locks[4]; - void *oti_handle; - struct llog_cookie oti_onecookie; - struct llog_cookie *oti_logcookies; - int oti_numcookies; - /** synchronous write is needed */ - unsigned long oti_sync_write:1; - - /* initial thread handling transaction */ - struct ptlrpc_thread * oti_thread; - __u32 oti_conn_cnt; - /** VBR: versions */ - __u64 oti_pre_version; - /** JobID */ - char *oti_jobid; - - struct obd_uuid *oti_ost_uuid; + __u64 oti_xid; + /* Only used on the server side for tracking acks. */ + struct oti_req_ack_lock { + struct lustre_handle lock; + __u32 mode; + } oti_ack_locks[4]; + void *oti_handle; + struct llog_cookie oti_onecookie; + struct llog_cookie *oti_logcookies; + + /** VBR: versions */ + __u64 oti_pre_version; }; -static inline void oti_init(struct obd_trans_info *oti, - struct ptlrpc_request *req) -{ - if (oti == NULL) - return; - memset(oti, 0, sizeof(*oti)); - - if (req == NULL) - return; - - oti->oti_xid = req->rq_xid; - /** VBR: take versions from request */ - if (req->rq_reqmsg != NULL && - lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) { - __u64 *pre_version = lustre_msg_get_versions(req->rq_reqmsg); - oti->oti_pre_version = pre_version ? pre_version[0] : 0; - oti->oti_transno = lustre_msg_get_transno(req->rq_reqmsg); - } - - /** called from mds_create_objects */ - if (req->rq_repmsg != NULL) - oti->oti_transno = lustre_msg_get_transno(req->rq_repmsg); - oti->oti_thread = req->rq_svc_thread; - if (req->rq_reqmsg != NULL) - oti->oti_conn_cnt = lustre_msg_get_conn_cnt(req->rq_reqmsg); -} - -static inline void oti_alloc_cookies(struct obd_trans_info *oti,int num_cookies) -{ - if (!oti) - return; - - if (num_cookies == 1) - oti->oti_logcookies = &oti->oti_onecookie; - else - OBD_ALLOC_LARGE(oti->oti_logcookies, - num_cookies * sizeof(oti->oti_onecookie)); - - oti->oti_numcookies = num_cookies; -} - -static inline void oti_free_cookies(struct obd_trans_info *oti) -{ - if (!oti || !oti->oti_logcookies) - return; - - if (oti->oti_logcookies == &oti->oti_onecookie) - LASSERT(oti->oti_numcookies == 1); - else - OBD_FREE_LARGE(oti->oti_logcookies, - oti->oti_numcookies*sizeof(oti->oti_onecookie)); - oti->oti_logcookies = NULL; - oti->oti_numcookies = 0; -} - /* * Events signalled through obd_notify() upcall-chain. */ @@ -698,7 +569,6 @@ struct target_recovery_data { }; struct obd_llog_group { - int olg_seq; struct llog_ctxt *olg_ctxts[LLOG_MAX_CTXTS]; wait_queue_head_t olg_waitq; spinlock_t olg_lock; @@ -733,7 +603,6 @@ struct obd_device { obd_starting:1, /* started setup */ obd_force:1, /* cleanup with > 0 obd refcount */ obd_fail:1, /* cleanup with failover */ - obd_async_recov:1, /* allow asynchronous orphan cleanup */ obd_no_conn:1, /* deny new connections */ obd_inactive:1, /* device active/inactive * (for /proc/status only!!) */ @@ -750,11 +619,11 @@ struct obd_device { cfs_hash_t *obd_nid_hash; /* nid stats body */ cfs_hash_t *obd_nid_stats_hash; - cfs_list_t obd_nid_stats; - atomic_t obd_refcount; - cfs_list_t obd_exports; - cfs_list_t obd_unlinked_exports; - cfs_list_t obd_delayed_exports; + struct list_head obd_nid_stats; + atomic_t obd_refcount; + struct list_head obd_exports; + struct list_head obd_unlinked_exports; + struct list_head obd_delayed_exports; struct list_head obd_lwp_list; int obd_num_exports; spinlock_t obd_nid_lock; @@ -774,38 +643,39 @@ struct obd_device { struct obd_notify_upcall obd_upcall; struct obd_export *obd_self_export; struct obd_export *obd_lwp_export; - /* list of exports in LRU order, for ping evictor, with obd_dev_lock */ - cfs_list_t obd_exports_timed; - time_t obd_eviction_timer; /* for ping evictor */ + /* list of exports in LRU order, for ping evictor, with obd_dev_lock */ + struct list_head obd_exports_timed; + time_t obd_eviction_timer; /* for ping evictor */ int obd_max_recoverable_clients; atomic_t obd_connected_clients; int obd_stale_clients; - int obd_delayed_clients; /* this lock protects all recovery list_heads, timer and * obd_next_recovery_transno value */ - spinlock_t obd_recovery_task_lock; - __u64 obd_next_recovery_transno; - int obd_replayed_requests; - int obd_requests_queued_for_recovery; - wait_queue_head_t obd_next_transno_waitq; + spinlock_t obd_recovery_task_lock; + __u64 obd_next_recovery_transno; + int obd_replayed_requests; + int obd_requests_queued_for_recovery; + wait_queue_head_t obd_next_transno_waitq; /* protected by obd_recovery_task_lock */ - struct timer_list obd_recovery_timer; - time_t obd_recovery_start; /* seconds */ - time_t obd_recovery_end; /* seconds, for lprocfs_status */ - int obd_recovery_time_hard; - int obd_recovery_timeout; - int obd_recovery_ir_factor; + struct timer_list obd_recovery_timer; + /* seconds */ + time_t obd_recovery_start; + /* seconds, for lprocfs_status */ + time_t obd_recovery_end; + int obd_recovery_time_hard; + int obd_recovery_timeout; + int obd_recovery_ir_factor; /* new recovery stuff from CMD2 */ - struct target_recovery_data obd_recovery_data; - int obd_replayed_locks; - atomic_t obd_req_replay_clients; - atomic_t obd_lock_replay_clients; + struct target_recovery_data obd_recovery_data; + int obd_replayed_locks; + atomic_t obd_req_replay_clients; + atomic_t obd_lock_replay_clients; /* all lists are protected by obd_recovery_task_lock */ - cfs_list_t obd_req_replay_queue; - cfs_list_t obd_lock_replay_queue; - cfs_list_t obd_final_req_queue; + struct list_head obd_req_replay_queue; + struct list_head obd_lock_replay_queue; + struct list_head obd_final_req_queue; union { #ifdef HAVE_SERVER_SUPPORT @@ -833,14 +703,14 @@ struct obd_device { struct lprocfs_seq_vars *obd_vars; atomic_t obd_evict_inprogress; wait_queue_head_t obd_evict_inprogress_waitq; - cfs_list_t obd_evict_list; /* protected with pet_lock */ + struct list_head obd_evict_list; /* protected with pet_lock */ /** * Ldlm pool part. Save last calculated SLV and Limit. */ rwlock_t obd_pool_lock; - int obd_pool_limit; - __u64 obd_pool_slv; + int obd_pool_limit; + __u64 obd_pool_slv; /** * A list of outstanding class_incref()'s against this obd. For @@ -851,9 +721,6 @@ struct obd_device { int obd_conn_inprogress; }; -#define OBD_LLOG_FL_SENDNOW 0x0001 -#define OBD_LLOG_FL_EXIT 0x0002 - enum obd_cleanup_stage { /* Special case hack for MDS LOVs */ OBD_CLEANUP_EARLY, @@ -863,8 +730,6 @@ enum obd_cleanup_stage { /* get/set_info keys */ #define KEY_ASYNC "async" -#define KEY_BLOCKSIZE_BITS "blocksize_bits" -#define KEY_BLOCKSIZE "blocksize" #define KEY_CAPA_KEY "capa_key" #define KEY_CHANGELOG_CLEAR "changelog_clear" #define KEY_FID2PATH "fid2path" @@ -877,13 +742,11 @@ enum obd_cleanup_stage { #define KEY_GRANT_SHRINK "grant_shrink" #define KEY_HSM_COPYTOOL_SEND "hsm_send" #define KEY_INIT_RECOV_BACKUP "init_recov_bk" -#define KEY_INIT_RECOV "initial_recov" #define KEY_INTERMDS "inter_mds" #define KEY_LAST_ID "last_id" #define KEY_LAST_FID "last_fid" #define KEY_LOCK_TO_STRIPE "lock_to_stripe" #define KEY_LOVDESC "lovdesc" -#define KEY_LOV_IDX "lov_idx" #define KEY_MAX_EASIZE "max_easize" #define KEY_DEFAULT_EASIZE "default_easize" #define KEY_MAX_COOKIESIZE "max_cookiesize" @@ -898,11 +761,9 @@ enum obd_cleanup_stage { /* KEY_SET_INFO in lustre_idl.h */ #define KEY_SPTLRPC_CONF "sptlrpc_conf" #define KEY_CONNECT_FLAG "connect_flags" -#define KEY_SYNC_LOCK_CANCEL "sync_lock_cancel" #define KEY_CACHE_SET "cache_set" #define KEY_CACHE_LRU_SHRINK "cache_lru_shrink" -#define KEY_CHANGELOG_INDEX "changelog_index" #define KEY_OSP_CONNECTED "osp_connected" struct lu_context; @@ -964,9 +825,6 @@ struct md_op_data { /* iattr fields and blocks. */ struct iattr op_attr; -#ifdef __KERNEL__ - unsigned int op_attr_flags; -#endif __u64 op_valid; loff_t op_attr_blocks; @@ -982,9 +840,6 @@ struct md_op_data { enum mds_op_bias op_bias; /* Used by readdir */ - __u64 op_hash_offset; - - /* Used by readdir */ __u32 op_npages; /* used to transfer info between the stacks of MD client @@ -1032,8 +887,6 @@ struct obd_ops { __u32 keylen, void *key, __u32 vallen, void *val, struct ptlrpc_request_set *set); - int (*o_attach)(struct obd_device *dev, obd_count len, void *data); - int (*o_detach)(struct obd_device *dev); int (*o_setup) (struct obd_device *dev, struct lustre_cfg *cfg); int (*o_precleanup)(struct obd_device *dev, enum obd_cleanup_stage cleanup_stage); @@ -1083,9 +936,6 @@ struct obd_ops { int (*o_create)(const struct lu_env *env, struct obd_export *exp, struct obdo *oa, struct lov_stripe_md **ea, struct obd_trans_info *oti); - int (*o_create_async)(struct obd_export *exp, struct obd_info *oinfo, - struct lov_stripe_md **ea, - struct obd_trans_info *oti); int (*o_destroy)(const struct lu_env *env, struct obd_export *exp, struct obdo *oa, struct lov_stripe_md *ea, struct obd_trans_info *oti, struct obd_export *md_exp, @@ -1121,12 +971,6 @@ struct obd_ops { int (*o_llog_init)(struct obd_device *obd, struct obd_llog_group *grp, struct obd_device *disk_obd, int *idx); int (*o_llog_finish)(struct obd_device *obd, int count); - int (*o_llog_connect)(struct obd_export *, struct llogd_conn_body *); - - /* metadata-only methods */ - int (*o_pin)(struct obd_export *, const struct lu_fid *fid, - struct obd_capa *, struct obd_client_handle *, int flag); - int (*o_unpin)(struct obd_export *, struct obd_client_handle *, int); int (*o_import_event)(struct obd_device *, struct obd_import *, enum obd_import_event); @@ -1171,7 +1015,6 @@ struct lustre_md { struct mdt_remote_perm *remote_perm; struct obd_capa *mds_capa; struct obd_capa *oss_capa; - __u64 lm_flags; }; struct md_open_data { @@ -1232,12 +1075,9 @@ struct md_ops { int (*m_fsync)(struct obd_export *, const struct lu_fid *, struct obd_capa *, struct ptlrpc_request **); - int (*m_readpage)(struct obd_export *, struct md_op_data *, - struct page **, struct ptlrpc_request **); - - int (*m_read_entry)(struct obd_export *, struct md_op_data *, - struct md_callback *cb_op, struct lu_dirent **ld, - struct page **ppage); + int (*m_read_page)(struct obd_export *, struct md_op_data *, + struct md_callback *cb_op, __u64 hash_offset, + struct page **ppage); int (*m_unlink)(struct obd_export *, struct md_op_data *, struct ptlrpc_request **); @@ -1258,6 +1098,7 @@ struct md_ops { int (*m_revalidate_lock)(struct obd_export *, struct lookup_intent *, struct lu_fid *, __u64 *bits); + #define MD_STATS_LAST_OP m_revalidate_lock int (*m_getstatus)(struct obd_export *, struct lu_fid *, @@ -1274,10 +1115,6 @@ struct md_ops { int (*m_getattr_name)(struct obd_export *, struct md_op_data *, struct ptlrpc_request **); - int (*m_is_subdir)(struct obd_export *, const struct lu_fid *, - const struct lu_fid *, - struct ptlrpc_request **); - int (*m_init_ea_size)(struct obd_export *, int, int, int, int); int (*m_get_lustre_md)(struct obd_export *, struct ptlrpc_request *, @@ -1320,6 +1157,11 @@ struct md_ops { int (*m_get_remote_perm)(struct obd_export *, const struct lu_fid *, struct obd_capa *, __u32, struct ptlrpc_request **); + + int (*m_get_fid_from_lsm)(struct obd_export *, + const struct lmv_stripe_md *, + const char *name, int namelen, + struct lu_fid *fid); }; struct lsm_operations { @@ -1351,15 +1193,6 @@ static inline const struct lsm_operations *lsm_op_find(int magic) } } -/* Requests for obd_extent_calc() */ -#define OBD_CALC_STRIPE_START 1 -#define OBD_CALC_STRIPE_END 2 - -static inline struct lustre_capa *oinfo_capa(struct obd_info *oinfo) -{ - return oinfo->oi_capa; -} - static inline struct md_open_data *obd_mod_alloc(void) { struct md_open_data *mod; @@ -1450,16 +1283,27 @@ static inline int cli_brw_size(struct obd_device *obd) return obd->u.cli.cl_max_pages_per_rpc << PAGE_CACHE_SHIFT; } +/* when RPC size or the max RPCs in flight is increased, the max dirty pages + * of the client should be increased accordingly to avoid sending fragmented + * RPCs over the network when the client runs out of the maximum dirty space + * when so many RPCs are being generated. + */ static inline void client_adjust_max_dirty(struct client_obd *cli) { /* initializing */ - if (cli->cl_dirty_max <= 0) - cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024; - else - cli->cl_dirty_max = cli->cl_max_rpcs_in_flight * - (cli->cl_max_pages_per_rpc << PAGE_CACHE_SHIFT); - if (cli->cl_dirty_max >> PAGE_CACHE_SHIFT > totalram_pages / 8) - cli->cl_dirty_max = totalram_pages << (PAGE_CACHE_SHIFT - 3); + if (cli->cl_dirty_max_pages <= 0) + cli->cl_dirty_max_pages = (OSC_MAX_DIRTY_DEFAULT * 1024 * 1024) + >> PAGE_CACHE_SHIFT; + else { + unsigned long dirty_max = cli->cl_max_rpcs_in_flight * + cli->cl_max_pages_per_rpc; + + if (dirty_max > cli->cl_dirty_max_pages) + cli->cl_dirty_max_pages = dirty_max; + } + + if (cli->cl_dirty_max_pages > totalram_pages / 8) + cli->cl_dirty_max_pages = totalram_pages / 8; } #endif /* __OBD_H */