struct super_block *obt_sb;
atomic_t obt_quotachecking;
struct lustre_quota_ctxt obt_qctxt;
+ lustre_quota_version_t obt_qfmt;
+ struct rw_semaphore obt_rwsem;
};
/* llog contexts */
LLOG_TEST_REPL_CTXT,
LLOG_LOVEA_ORIG_CTXT,
LLOG_LOVEA_REPL_CTXT,
+ LLOG_CHANGELOG_ORIG_CTXT, /**< changelog generation on mdd */
+ LLOG_CHANGELOG_REPL_CTXT, /**< changelog access on clients */
+ LLOG_CHANGELOG_USER_ORIG_CTXT, /**< for multiple changelog consumers */
LLOG_MAX_CTXTS
};
obd_size fo_tot_dirty; /* protected by obd_osfs_lock */
obd_size fo_tot_granted; /* all values in bytes */
obd_size fo_tot_pending;
+ int fo_tot_granted_clients;
obd_size fo_readcache_max_filesize;
int fo_read_cache;
struct list_head fo_capa_keys;
struct hlist_head *fo_capa_hash;
struct llog_commit_master *fo_lcm;
+ int fo_sec_level;
};
+struct timeout_item {
+ enum timeout_event ti_event;
+ cfs_time_t ti_timeout;
+ timeout_cb_t ti_cb;
+ void *ti_cb_data;
+ struct list_head ti_obd_list;
+ struct list_head ti_chain;
+};
#define OSC_MAX_RIF_DEFAULT 8
#define OSC_MAX_RIF_MAX 256
#define OSC_MAX_DIRTY_DEFAULT (OSC_MAX_RIF_DEFAULT * 4)
int cl_max_mds_easize;
int cl_max_mds_cookiesize;
- /* security configuration */
- struct sptlrpc_rule_set cl_sptlrpc_rset;
- enum lustre_sec_part cl_sec_part;
+ enum lustre_sec_part cl_sp_me;
+ enum lustre_sec_part cl_sp_to;
+ struct sptlrpc_flavor cl_flvr_mgc; /* fixed flavor of mgc->mgs */
//struct llog_canceld_ctxt *cl_llcd; /* it's included by obd_llog_ctxt */
void *cl_llcd_offset;
long cl_avail_grant; /* bytes of credit for ost */
long cl_lost_grant; /* lost credits (trunc) */
struct list_head cl_cache_waiters; /* waiting for cache/grant */
+ cfs_time_t cl_next_shrink_grant; /* jiffies */
+ struct list_head cl_grant_shrink_list; /* Timeout event list */
+ struct semaphore cl_grant_sem; /*grant shrink list semaphore*/
/* keep track of objects that have lois that contain pages which
* have been queued for async brw. this lock also protects the
__u32 mds_id;
/* mark pages dirty for write. */
- bitmap_t *mds_lov_page_dirty;
+ bitmap_t *mds_lov_page_dirty;
/* array for store pages with obd_id */
- void **mds_lov_page_array;
+ void **mds_lov_page_array;
/* file for store objid */
struct file *mds_lov_objid_filp;
__u32 mds_lov_objid_count;
+ __u32 mds_lov_objid_max_index;
__u32 mds_lov_objid_lastpage;
__u32 mds_lov_objid_lastidx;
mds_fl_acl:1,
mds_evict_ost_nids:1,
mds_fl_cfglog:1,
- mds_fl_synced:1;
+ mds_fl_synced:1,
+ mds_quota:1,
+ mds_fl_target:1; /* mds have one or
+ * more targets */
struct upcall_cache *mds_identity_cache;
/* for capability keys update */
struct lustre_capa_key *mds_capa_keys;
- struct rw_semaphore mds_notify_lock;
+ struct rw_semaphore mds_notify_lock;
};
/* lov objid */
struct lov_qos_oss {
struct obd_uuid lqo_uuid; /* ptlrpc's c_remote_uuid */
struct list_head lqo_oss_list; /* link to lov_qos */
- __u32 lqo_ost_count; /* number of osts on this oss */
__u64 lqo_bavail; /* total bytes avail on OSS */
__u64 lqo_penalty; /* current penalty */
__u64 lqo_penalty_per_obj; /* penalty decrease every obj*/
+ time_t lqo_used; /* last used time, seconds */
+ __u32 lqo_ost_count; /* number of osts on this oss */
};
struct ltd_qos {
__u64 ltq_penalty; /* current penalty */
__u64 ltq_penalty_per_obj; /* penalty decrease every obj*/
__u64 ltq_weight; /* net weighting */
+ time_t ltq_used; /* last used time, seconds */
unsigned int ltq_usable:1; /* usable for striping */
};
lov_obd->lov_tgts */
unsigned int op_count; /* number of OSTs in the array */
unsigned int op_size; /* allocated size of lp_array */
- rwlock_t op_rwlock; /* to protect lov_pool use */
+ struct rw_semaphore op_rw_sem; /* to protect ost_pool use */
};
/* Round-robin allocator data */
struct rw_semaphore lq_rw_sem;
__u32 lq_active_oss_count;
unsigned int lq_prio_free; /* priority for free space */
+ unsigned int lq_threshold_rr;/* priority for rr */
struct lov_qos_rr lq_rr; /* round robin qos data */
unsigned long lq_dirty:1, /* recalc qos data */
lq_same_space:1,/* the ost's all have approx.
};
struct lov_tgt_desc {
+ struct list_head ltd_kill;
struct obd_uuid ltd_uuid;
struct obd_export *ltd_exp;
struct ltd_qos ltd_qos; /* qos info per target */
#define pool_tgt_size(_p) _p->pool_obds.op_size
#define pool_tgt_count(_p) _p->pool_obds.op_count
#define pool_tgt_array(_p) _p->pool_obds.op_array
-#define pool_tgt_rwlock(_p) _p->pool_obds.op_rwlock
+#define pool_tgt_rw_sem(_p) _p->pool_obds.op_rw_sem
#define pool_tgt(_p, _i) _p->pool_lov->lov_tgts[_p->pool_obds.op_array[_i]]
struct pool_desc {
char pool_name[LOV_MAXPOOLNAME + 1]; /* name of pool */
struct ost_pool pool_obds; /* pool members */
+ atomic_t pool_refcount; /* pool ref. counter */
struct lov_qos_rr pool_rr; /* round robin qos */
struct hlist_node pool_hash; /* access by poolname */
struct list_head pool_list; /* serial access */
lustre_hash_t *lov_pools_hash_body; /* used for key access */
struct list_head lov_pool_list; /* used for sequential access */
cfs_proc_dir_entry_t *lov_pool_proc_entry;
+ enum lustre_sec_part lov_sp_me;
};
struct lmv_tgt_desc {
int oti_numcookies;
/* initial thread handling transaction */
- int oti_thread_id;
+ struct ptlrpc_thread * oti_thread;
__u32 oti_conn_cnt;
struct obd_uuid *oti_ost_uuid;
if (req->rq_repmsg != NULL)
oti->oti_transno = lustre_msg_get_transno(req->rq_repmsg);
- oti->oti_thread_id = req->rq_svc_thread ? req->rq_svc_thread->t_id : -1;
+ oti->oti_thread = req->rq_svc_thread;
if (req->rq_reqmsg != NULL)
oti->oti_conn_cnt = lustre_msg_get_conn_cnt(req->rq_reqmsg);
}
* Events signalled through obd_notify() upcall-chain.
*/
enum obd_notify_event {
+ /* Device connect start */
+ OBD_NOTIFY_CONNECT,
/* Device activated */
OBD_NOTIFY_ACTIVE,
/* Device deactivated */
/* bit-mask flags for config events */
enum config_flags {
- CONFIG_LOG = 0x1, /* finished processing config log */
- CONFIG_SYNC = 0x2 /* mdt synced 1 ost */
+ CONFIG_LOG = 0x1, /* finished processing config log */
+ CONFIG_SYNC = 0x2, /* mdt synced 1 ost */
+ CONFIG_TARGET = 0x4 /* one target is added */
};
/*
};
enum filter_groups {
+ FILTER_GROUP_MDS0 = 0,
FILTER_GROUP_LLOG = 1,
- FILTER_GROUP_ECHO,
- FILTER_GROUP_MDS0
+ FILTER_GROUP_ECHO = 2 ,
+ FILTER_GROUP_MDS1_N_BASE = 3
};
+static inline __u64 obdo_mdsno(struct obdo *oa)
+{
+ if (oa->o_gr)
+ return oa->o_gr - FILTER_GROUP_MDS1_N_BASE;
+ return 0;
+}
+
+static inline int mdt_to_obd_objgrp(int mdtid)
+{
+ if (mdtid)
+ return FILTER_GROUP_MDS1_N_BASE + mdtid;
+ return 0;
+}
+
+/**
+ * In HEAD for CMD, the object is created in group number which is 3>=
+ * or indexing starts from 3. To test this assertions are added to disallow
+ * group 0. But to run 2.0 mds server on 1.8.x disk format (i.e. interop_mode)
+ * object in group 0 needs to be allowed.
+ * So for interop mode following changes needs to be done:
+ * 1. No need to assert on group 0 or allow group 0
+ * 2. The group number indexing starts from 0 instead of 3
+ */
+
+#define CHECK_MDS_GROUP(group) (group == FILTER_GROUP_MDS0 || \
+ group > FILTER_GROUP_MDS1_N_BASE)
+#define LASSERT_MDS_GROUP(group) LASSERT(CHECK_MDS_GROUP(group))
+
struct obd_llog_group {
struct list_head olg_list;
int olg_group;
/* corresponds to one of the obd's */
#define MAX_OBD_NAME 128
#define OBD_DEVICE_MAGIC 0XAB5CD6EF
+#define OBD_DEV_BY_DEVNAME 0xffffd0de
struct obd_device {
struct obd_type *obd_type;
__u32 obd_magic;
struct lu_ref obd_reference;
};
-#define OBD_OPT_FORCE 0x0001
-#define OBD_OPT_FAILOVER 0x0002
-
#define OBD_LLOG_FL_SENDNOW 0x0001
enum obd_cleanup_stage {
};
/* get/set_info keys */
-#define KEY_READ_ONLY "read-only"
-#define KEY_MDS_CONN "mds_conn"
-#define KEY_NEXT_ID "next_id"
-#define KEY_LOVDESC "lovdesc"
-#define KEY_INIT_RECOV "initial_recov"
-#define KEY_INIT_RECOV_BACKUP "init_recov_bk"
-#define KEY_FLUSH_CTX "flush_ctx"
+#define KEY_BLOCKSIZE_BITS "blocksize_bits"
+#define KEY_BLOCKSIZE "blocksize"
#define KEY_CAPA_KEY "capa_key"
+#define KEY_CHANGELOG_CLEAR "changelog_clear"
+#define KEY_CHECKSUM "checksum"
+#define KEY_CLEAR_FS "clear_fs"
#define KEY_CONN_DATA "conn_data"
-#define KEY_MAX_EASIZE "max_easize"
-#define KEY_REVIMP_UPD "revimp_update"
-#define KEY_LOV_IDX "lov_idx"
+#define KEY_EVICT_BY_NID "evict_by_nid"
+#define KEY_FIEMAP "fiemap"
+#define KEY_FLUSH_CTX "flush_ctx"
+#define KEY_INIT_RECOV_BACKUP "init_recov_bk"
+#define KEY_INIT_RECOV "initial_recov"
#define KEY_LAST_ID "last_id"
-#define KEY_READONLY "read-only"
#define KEY_LOCK_TO_STRIPE "lock_to_stripe"
-#define KEY_CHECKSUM "checksum"
-#define KEY_UNLINKED "unlinked"
-#define KEY_EVICT_BY_NID "evict_by_nid"
+#define KEY_LOVDESC "lovdesc"
+#define KEY_LOV_IDX "lov_idx"
+#define KEY_MAX_EASIZE "max_easize"
+#define KEY_MDS_CONN "mds_conn"
+#define KEY_MGSSEC "mgssec"
+#define KEY_NEXT_ID "next_id"
+#define KEY_READ_ONLY "read-only"
#define KEY_REGISTER_TARGET "register_target"
+#define KEY_REVIMP_UPD "revimp_update"
#define KEY_SET_FS "set_fs"
-#define KEY_CLEAR_FS "clear_fs"
-#define KEY_BLOCKSIZE "blocksize"
-#define KEY_BLOCKSIZE_BITS "blocksize_bits"
-#define KEY_FIEMAP "FIEMAP"
+#define KEY_SPTLRPC_CONF "sptlrpc_conf"
+#define KEY_UNLINKED "unlinked"
/* XXX unused ?*/
#define KEY_INTERMDS "inter_mds"
#define KEY_ASYNC "async"
+#define KEY_GRANT_SHRINK "grant_shrink"
struct lu_context;
* granted by the target, which are guaranteed to be a subset of flags
* asked for. If @ocd == NULL, use default parameters. */
int (*o_connect)(const struct lu_env *env,
- struct lustre_handle *conn, struct obd_device *src,
+ struct obd_export **exp, struct obd_device *src,
struct obd_uuid *cluuid, struct obd_connect_data *ocd,
void *localdata);
int (*o_reconnect)(const struct lu_env *env,
struct lov_stripe_md **ea, struct obd_trans_info *oti);
int (*o_destroy)(struct obd_export *exp, struct obdo *oa,
struct lov_stripe_md *ea, struct obd_trans_info *oti,
- struct obd_export *md_exp);
+ struct obd_export *md_exp, void *capa);
int (*o_setattr)(struct obd_export *exp, struct obd_info *oinfo,
struct obd_trans_info *oti);
int (*o_setattr_async)(struct obd_export *exp, struct obd_info *oinfo,
struct obd_uuid *(*o_get_uuid) (struct obd_export *exp);
/* quota methods */
- int (*o_quotacheck)(struct obd_export *, struct obd_quotactl *);
- int (*o_quotactl)(struct obd_export *, struct obd_quotactl *);
+ int (*o_quotacheck)(struct obd_device *, struct obd_export *,
+ struct obd_quotactl *);
+ int (*o_quotactl)(struct obd_device *, struct obd_export *,
+ struct obd_quotactl *);
+ int (*o_quota_adjust_qunit)(struct obd_export *exp,
+ struct quota_adjust_qunit *oqaq,
+ struct lustre_quota_ctxt *qctxt);
+
int (*o_ping)(struct obd_export *exp);
* Also, add a wrapper function in include/linux/obd_class.h. */
};
-/* TODO: lmv_stripe_md should contain mds capabilities for all slave fids */
-struct lmv_stripe_md {
- __u32 mea_magic;
- __u32 mea_count;
- __u32 mea_master;
- __u32 mea_padding;
- struct lu_fid mea_ids[0];
-};
-
enum {
LUSTRE_OPC_MKDIR = (1 << 0),
LUSTRE_OPC_SYMLINK = (1 << 1),
void *opaque);
int (*m_renew_capa)(struct obd_export *, struct obd_capa *oc,
renew_capa_cb_t cb);
+ int (*m_unpack_capa)(struct obd_export *, struct ptlrpc_request *,
+ const struct req_msg_field *, struct obd_capa **);
int (*m_get_remote_perm)(struct obd_export *, const struct lu_fid *,
struct obd_capa *, __u32,
LASSERT(obd_ops);
obd_ops->o_quotacheck = QUOTA_OP(interface, check);
obd_ops->o_quotactl = QUOTA_OP(interface, ctl);
+ obd_ops->o_quota_adjust_qunit = QUOTA_OP(interface, adjust_qunit);
}
static inline __u64 oinfo_mdsno(struct obd_info *oinfo)
{
- return oinfo->oi_oa->o_gr - FILTER_GROUP_MDS0;
+ return obdo_mdsno(oinfo->oi_oa);
}
static inline struct lustre_capa *oinfo_capa(struct obd_info *oinfo)