#include <lustre/lustre_idl.h>
#include <lu_object.h>
+#include <lu_ref.h>
#include <lustre_lib.h>
#include <lustre_export.h>
#include <lustre_quota.h>
struct obd_info;
-typedef int (*obd_enqueue_update_f)(struct obd_info *oinfo, int rc);
+typedef int (*obd_enqueue_update_f)(void *cookie, int rc);
/* obd info for a particular level (lov, osc). */
struct obd_info {
obd_flag flag;
};
-enum async_flags {
- ASYNC_READY = 0x1, /* ap_make_ready will not be called before this
- page is added to an rpc */
- ASYNC_URGENT = 0x2, /* page must be put into an RPC before return */
- ASYNC_COUNT_STABLE = 0x4, /* ap_refresh_count will not be called
- to give the caller a chance to update
- or cancel the size of the io */
- ASYNC_GROUP_SYNC = 0x8, /* ap_completion will not be called, instead
- the page is accounted for in the
- obd_io_group given to
- obd_queue_group_io */
-};
-
-struct obd_async_page_ops {
- int (*ap_make_ready)(void *data, int cmd);
- int (*ap_refresh_count)(void *data, int cmd);
- void (*ap_fill_obdo)(void *data, int cmd, struct obdo *oa);
- void (*ap_update_obdo)(void *data, int cmd, struct obdo *oa,
- obd_valid valid);
- int (*ap_completion)(void *data, int cmd, struct obdo *oa, int rc);
- struct obd_capa *(*ap_lookup_capa)(void *data, int cmd);
-};
-
-/* the `oig' is passed down from a caller of obd rw methods. the callee
- * records enough state such that the caller can sleep on the oig and
- * be woken when all the callees have finished their work */
-struct obd_io_group {
- spinlock_t oig_lock;
- atomic_t oig_refcount;
- int oig_pending;
- int oig_rc;
- struct list_head oig_occ_list;
- cfs_waitq_t oig_waitq;
-};
-
-/* the oig callback context lets the callee of obd rw methods register
- * for callbacks from the caller. */
-struct oig_callback_context {
- struct list_head occ_oig_item;
- /* called when the caller has received a signal while sleeping.
- * callees of this method are encouraged to abort their state
- * in the oig. This may be called multiple times. */
- void (*occ_interrupted)(struct oig_callback_context *occ);
- unsigned long interrupted:1;
-};
-
/* Individual type definitions */
struct ost_server_data;
struct super_block *obt_sb;
atomic_t obt_quotachecking;
struct lustre_quota_ctxt obt_qctxt;
+ lustre_quota_version_t obt_qfmt;
+ struct rw_semaphore obt_rwsem;
};
-typedef void (*obd_pin_extent_cb)(void *data);
-typedef int (*obd_page_removal_cb_t)(void *data, int discard);
-typedef int (*obd_lock_cancel_cb)(struct ldlm_lock *,struct ldlm_lock_desc *,
- void *, int);
-
/* llog contexts */
enum llog_ctxt_id {
LLOG_CONFIG_ORIG_CTXT = 0,
LLOG_TEST_REPL_CTXT,
LLOG_LOVEA_ORIG_CTXT,
LLOG_LOVEA_REPL_CTXT,
+ LLOG_CHANGELOG_ORIG_CTXT, /**< changelog generation on mdd */
+ LLOG_CHANGELOG_REPL_CTXT, /**< changelog access on clients */
+ LLOG_CHANGELOG_USER_ORIG_CTXT, /**< for multiple changelog consumers */
LLOG_MAX_CTXTS
};
obd_size fo_tot_dirty; /* protected by obd_osfs_lock */
obd_size fo_tot_granted; /* all values in bytes */
obd_size fo_tot_pending;
+ int fo_tot_granted_clients;
obd_size fo_readcache_max_filesize;
int fo_read_cache;
struct list_head fo_capa_keys;
struct hlist_head *fo_capa_hash;
struct llog_commit_master *fo_lcm;
+ int fo_sec_level;
};
+struct timeout_item {
+ enum timeout_event ti_event;
+ cfs_time_t ti_timeout;
+ timeout_cb_t ti_cb;
+ void *ti_cb_data;
+ struct list_head ti_obd_list;
+ struct list_head ti_chain;
+};
#define OSC_MAX_RIF_DEFAULT 8
#define OSC_MAX_RIF_MAX 256
#define OSC_MAX_DIRTY_DEFAULT (OSC_MAX_RIF_DEFAULT * 4)
struct mdc_rpc_lock;
struct obd_import;
-struct lustre_cache;
struct client_obd {
struct rw_semaphore cl_sem;
struct obd_uuid cl_target_uuid;
int cl_max_mds_easize;
int cl_max_mds_cookiesize;
- /* security configuration */
- struct sptlrpc_rule_set cl_sptlrpc_rset;
- enum lustre_sec_part cl_sec_part;
+ enum lustre_sec_part cl_sp_me;
+ enum lustre_sec_part cl_sp_to;
+ struct sptlrpc_flavor cl_flvr_mgc; /* fixed flavor of mgc->mgs */
//struct llog_canceld_ctxt *cl_llcd; /* it's included by obd_llog_ctxt */
void *cl_llcd_offset;
/* the grant values are protected by loi_list_lock below */
long cl_dirty; /* all _dirty_ in bytes */
long cl_dirty_max; /* allowed w/o rpc */
+ long cl_dirty_transit; /* dirty synchronous */
long cl_avail_grant; /* bytes of credit for ost */
long cl_lost_grant; /* lost credits (trunc) */
struct list_head cl_cache_waiters; /* waiting for cache/grant */
+ cfs_time_t cl_next_shrink_grant; /* jiffies */
+ struct list_head cl_grant_shrink_list; /* Timeout event list */
+ struct semaphore cl_grant_sem; /*grant shrink list semaphore*/
/* keep track of objects that have lois that contain pages which
* have been queued for async brw. this lock also protects the
__u32 cl_supp_cksum_types;
/* checksum algorithm to be used */
cksum_type_t cl_cksum_type;
-
+
/* also protected by the poorly named _loi_list_lock lock above */
struct osc_async_rc cl_ar;
struct lu_client_seq *cl_seq;
atomic_t cl_resends; /* resend count */
-
- /* Cache of triples */
- struct lustre_cache *cl_cache;
- obd_lock_cancel_cb cl_ext_lock_cancel_cb;
};
#define obd2cli_tgt(obd) ((char *)(obd)->u.cli.cl_target_uuid.uuid)
__u32 mds_id;
/* mark pages dirty for write. */
- bitmap_t *mds_lov_page_dirty;
+ bitmap_t *mds_lov_page_dirty;
/* array for store pages with obd_id */
- void **mds_lov_page_array;
+ void **mds_lov_page_array;
/* file for store objid */
struct file *mds_lov_objid_filp;
__u32 mds_lov_objid_count;
+ __u32 mds_lov_objid_max_index;
__u32 mds_lov_objid_lastpage;
__u32 mds_lov_objid_lastidx;
mds_fl_acl:1,
mds_evict_ost_nids:1,
mds_fl_cfglog:1,
- mds_fl_synced:1;
+ mds_fl_synced:1,
+ mds_quota:1,
+ mds_fl_target:1; /* mds have one or
+ * more targets */
struct upcall_cache *mds_identity_cache;
/* for capability keys update */
struct lustre_capa_key *mds_capa_keys;
- struct rw_semaphore mds_notify_lock;
+ struct rw_semaphore mds_notify_lock;
};
/* lov objid */
struct obd_export *ec_exp; /* the local connection to osc/lov */
spinlock_t ec_lock;
struct list_head ec_objects;
+ struct list_head ec_locks;
int ec_nstripes;
__u64 ec_unique;
};
struct lov_qos_oss {
struct obd_uuid lqo_uuid; /* ptlrpc's c_remote_uuid */
struct list_head lqo_oss_list; /* link to lov_qos */
- __u32 lqo_ost_count; /* number of osts on this oss */
__u64 lqo_bavail; /* total bytes avail on OSS */
__u64 lqo_penalty; /* current penalty */
__u64 lqo_penalty_per_obj; /* penalty decrease every obj*/
+ time_t lqo_used; /* last used time, seconds */
+ __u32 lqo_ost_count; /* number of osts on this oss */
};
struct ltd_qos {
__u64 ltq_penalty; /* current penalty */
__u64 ltq_penalty_per_obj; /* penalty decrease every obj*/
__u64 ltq_weight; /* net weighting */
+ time_t ltq_used; /* last used time, seconds */
unsigned int ltq_usable:1; /* usable for striping */
};
lov_obd->lov_tgts */
unsigned int op_count; /* number of OSTs in the array */
unsigned int op_size; /* allocated size of lp_array */
- rwlock_t op_rwlock; /* to protect lov_pool use */
+ struct rw_semaphore op_rw_sem; /* to protect ost_pool use */
};
/* Round-robin allocator data */
struct rw_semaphore lq_rw_sem;
__u32 lq_active_oss_count;
unsigned int lq_prio_free; /* priority for free space */
+ unsigned int lq_threshold_rr;/* priority for rr */
struct lov_qos_rr lq_rr; /* round robin qos data */
unsigned long lq_dirty:1, /* recalc qos data */
lq_same_space:1,/* the ost's all have approx.
};
struct lov_tgt_desc {
+ struct list_head ltd_kill;
struct obd_uuid ltd_uuid;
struct obd_export *ltd_exp;
struct ltd_qos ltd_qos; /* qos info per target */
#define pool_tgt_size(_p) _p->pool_obds.op_size
#define pool_tgt_count(_p) _p->pool_obds.op_count
#define pool_tgt_array(_p) _p->pool_obds.op_array
-#define pool_tgt_rwlock(_p) _p->pool_obds.op_rwlock
+#define pool_tgt_rw_sem(_p) _p->pool_obds.op_rw_sem
#define pool_tgt(_p, _i) _p->pool_lov->lov_tgts[_p->pool_obds.op_array[_i]]
struct pool_desc {
char pool_name[LOV_MAXPOOLNAME + 1]; /* name of pool */
struct ost_pool pool_obds; /* pool members */
+ atomic_t pool_refcount; /* pool ref. counter */
struct lov_qos_rr pool_rr; /* round robin qos */
struct hlist_node pool_hash; /* access by poolname */
struct list_head pool_list; /* serial access */
__u32 lov_death_row;/* tgts scheduled to be deleted */
__u32 lov_tgt_size; /* size of tgts array */
int lov_connects;
- obd_page_removal_cb_t lov_page_removal_cb;
- obd_pin_extent_cb lov_page_pin_cb;
- obd_lock_cancel_cb lov_lock_cancel_cb;
int lov_pool_count;
lustre_hash_t *lov_pools_hash_body; /* used for key access */
struct list_head lov_pool_list; /* used for sequential access */
cfs_proc_dir_entry_t *lov_pool_proc_entry;
+ enum lustre_sec_part lov_sp_me;
};
struct lmv_tgt_desc {
#define LUSTRE_CMM_NAME "cmm"
#define LUSTRE_MDD_NAME "mdd"
#define LUSTRE_OSD_NAME "osd"
+#define LUSTRE_VVP_NAME "vvp"
#define LUSTRE_LMV_NAME "lmv"
#define LUSTRE_CMM_MDC_NAME "cmm-mdc"
+#define LUSTRE_SLP_NAME "slp"
/* obd device type names */
/* FIXME all the references to LUSTRE_MDS_NAME should be swapped with LUSTRE_MDT_NAME */
int oti_numcookies;
/* initial thread handling transaction */
- int oti_thread_id;
+ struct ptlrpc_thread * oti_thread;
__u32 oti_conn_cnt;
struct obd_uuid *oti_ost_uuid;
if (req->rq_repmsg != NULL)
oti->oti_transno = lustre_msg_get_transno(req->rq_repmsg);
- oti->oti_thread_id = req->rq_svc_thread ? req->rq_svc_thread->t_id : -1;
+ oti->oti_thread = req->rq_svc_thread;
if (req->rq_reqmsg != NULL)
oti->oti_conn_cnt = lustre_msg_get_conn_cnt(req->rq_reqmsg);
}
* Events signalled through obd_notify() upcall-chain.
*/
enum obd_notify_event {
+ /* Device connect start */
+ OBD_NOTIFY_CONNECT,
/* Device activated */
OBD_NOTIFY_ACTIVE,
/* Device deactivated */
/* bit-mask flags for config events */
enum config_flags {
- CONFIG_LOG = 0x1, /* finished processing config log */
- CONFIG_SYNC = 0x2 /* mdt synced 1 ost */
+ CONFIG_LOG = 0x1, /* finished processing config log */
+ CONFIG_SYNC = 0x2, /* mdt synced 1 ost */
+ CONFIG_TARGET = 0x4 /* one target is added */
};
/*
};
enum filter_groups {
+ FILTER_GROUP_MDS0 = 0,
FILTER_GROUP_LLOG = 1,
- FILTER_GROUP_ECHO,
- FILTER_GROUP_MDS0
+ FILTER_GROUP_ECHO = 2 ,
+ FILTER_GROUP_MDS1_N_BASE = 3
};
+static inline __u64 obdo_mdsno(struct obdo *oa)
+{
+ if (oa->o_gr)
+ return oa->o_gr - FILTER_GROUP_MDS1_N_BASE;
+ return 0;
+}
+
+static inline int mdt_to_obd_objgrp(int mdtid)
+{
+ if (mdtid)
+ return FILTER_GROUP_MDS1_N_BASE + mdtid;
+ return 0;
+}
+
+/**
+ * In HEAD for CMD, the object is created in group number which is 3>=
+ * or indexing starts from 3. To test this assertions are added to disallow
+ * group 0. But to run 2.0 mds server on 1.8.x disk format (i.e. interop_mode)
+ * object in group 0 needs to be allowed.
+ * So for interop mode following changes needs to be done:
+ * 1. No need to assert on group 0 or allow group 0
+ * 2. The group number indexing starts from 0 instead of 3
+ */
+
+#define CHECK_MDS_GROUP(group) (group == FILTER_GROUP_MDS0 || \
+ group > FILTER_GROUP_MDS1_N_BASE)
+#define LASSERT_MDS_GROUP(group) LASSERT(CHECK_MDS_GROUP(group))
+
struct obd_llog_group {
struct list_head olg_list;
int olg_group;
/* corresponds to one of the obd's */
#define MAX_OBD_NAME 128
#define OBD_DEVICE_MAGIC 0XAB5CD6EF
+#define OBD_DEV_BY_DEVNAME 0xffffd0de
struct obd_device {
struct obd_type *obd_type;
__u32 obd_magic;
/* XXX encapsulate all this recovery data into one struct */
svc_handler_t obd_recovery_handler;
pid_t obd_processing_task;
-
+
int obd_max_recoverable_clients;
int obd_connected_clients;
int obd_recoverable_clients;
time_t obd_recovery_end; /* seconds, for lprocfs_status */
time_t obd_recovery_max_time; /* seconds, bz13079 */
int obd_recovery_timeout;
-
+
/* new recovery stuff from CMD2 */
struct target_recovery_data obd_recovery_data;
int obd_replayed_locks;
atomic_t obd_evict_inprogress;
cfs_waitq_t obd_evict_inprogress_waitq;
- /**
- * Ldlm pool part. Save last calculated SLV and Limit.
+ /**
+ * Ldlm pool part. Save last calculated SLV and Limit.
*/
rwlock_t obd_pool_lock;
int obd_pool_limit;
__u64 obd_pool_slv;
-};
-#define OBD_OPT_FORCE 0x0001
-#define OBD_OPT_FAILOVER 0x0002
+ /**
+ * A list of outstanding class_incref()'s against this obd. For
+ * debugging.
+ */
+ struct lu_ref obd_reference;
+};
#define OBD_LLOG_FL_SENDNOW 0x0001
};
/* get/set_info keys */
-#define KEY_READ_ONLY "read-only"
-#define KEY_MDS_CONN "mds_conn"
-#define KEY_NEXT_ID "next_id"
-#define KEY_LOVDESC "lovdesc"
-#define KEY_INIT_RECOV "initial_recov"
-#define KEY_INIT_RECOV_BACKUP "init_recov_bk"
-#define KEY_FLUSH_CTX "flush_ctx"
+#define KEY_BLOCKSIZE_BITS "blocksize_bits"
+#define KEY_BLOCKSIZE "blocksize"
#define KEY_CAPA_KEY "capa_key"
+#define KEY_CHANGELOG_CLEAR "changelog_clear"
+#define KEY_CHECKSUM "checksum"
+#define KEY_CLEAR_FS "clear_fs"
#define KEY_CONN_DATA "conn_data"
-#define KEY_MAX_EASIZE "max_easize"
-#define KEY_REVIMP_UPD "revimp_update"
-#define KEY_LOV_IDX "lov_idx"
+#define KEY_EVICT_BY_NID "evict_by_nid"
+#define KEY_FIEMAP "fiemap"
+#define KEY_FLUSH_CTX "flush_ctx"
+#define KEY_INIT_RECOV_BACKUP "init_recov_bk"
+#define KEY_INIT_RECOV "initial_recov"
#define KEY_LAST_ID "last_id"
-#define KEY_READONLY "read-only"
#define KEY_LOCK_TO_STRIPE "lock_to_stripe"
-#define KEY_CHECKSUM "checksum"
-#define KEY_UNLINKED "unlinked"
-#define KEY_EVICT_BY_NID "evict_by_nid"
+#define KEY_LOVDESC "lovdesc"
+#define KEY_LOV_IDX "lov_idx"
+#define KEY_MAX_EASIZE "max_easize"
+#define KEY_MDS_CONN "mds_conn"
+#define KEY_MGSSEC "mgssec"
+#define KEY_NEXT_ID "next_id"
+#define KEY_READ_ONLY "read-only"
#define KEY_REGISTER_TARGET "register_target"
+#define KEY_REVIMP_UPD "revimp_update"
#define KEY_SET_FS "set_fs"
-#define KEY_CLEAR_FS "clear_fs"
-#define KEY_BLOCKSIZE "blocksize"
-#define KEY_BLOCKSIZE_BITS "blocksize_bits"
-#define KEY_FIEMAP "FIEMAP"
+#define KEY_SPTLRPC_CONF "sptlrpc_conf"
+#define KEY_UNLINKED "unlinked"
/* XXX unused ?*/
#define KEY_INTERMDS "inter_mds"
#define KEY_ASYNC "async"
+#define KEY_GRANT_SHRINK "grant_shrink"
struct lu_context;
return LCK_CW;
else if (it->it_op & (IT_READDIR | IT_GETATTR | IT_OPEN | IT_LOOKUP))
return LCK_CR;
-
+
LASSERTF(0, "Invalid it_op: %d\n", it->it_op);
return -EINVAL;
}
* granted by the target, which are guaranteed to be a subset of flags
* asked for. If @ocd == NULL, use default parameters. */
int (*o_connect)(const struct lu_env *env,
- struct lustre_handle *conn, struct obd_device *src,
+ struct obd_export **exp, struct obd_device *src,
struct obd_uuid *cluuid, struct obd_connect_data *ocd,
void *localdata);
int (*o_reconnect)(const struct lu_env *env,
struct obd_export *exp, struct obd_device *src,
struct obd_uuid *cluuid,
- struct obd_connect_data *ocd);
+ struct obd_connect_data *ocd,
+ void *localdata);
int (*o_disconnect)(struct obd_export *exp);
/* Initialize/finalize fids infrastructure. */
int (*o_fid_alloc)(struct obd_export *exp, struct lu_fid *fid,
struct md_op_data *op_data);
- /*
+ /*
* Object with @fid is getting deleted, we may want to do something
* about this.
*/
struct lov_stripe_md **ea, struct obd_trans_info *oti);
int (*o_destroy)(struct obd_export *exp, struct obdo *oa,
struct lov_stripe_md *ea, struct obd_trans_info *oti,
- struct obd_export *md_exp);
+ struct obd_export *md_exp, void *capa);
int (*o_setattr)(struct obd_export *exp, struct obd_info *oinfo,
struct obd_trans_info *oti);
int (*o_setattr_async)(struct obd_export *exp, struct obd_info *oinfo,
int (*o_brw)(int rw, struct obd_export *exp, struct obd_info *oinfo,
obd_count oa_bufs, struct brw_page *pgarr,
struct obd_trans_info *oti);
- int (*o_brw_async)(int rw, struct obd_export *exp,
- struct obd_info *oinfo, obd_count oa_bufs,
- struct brw_page *pgarr, struct obd_trans_info *oti,
- struct ptlrpc_request_set *);
- int (*o_prep_async_page)(struct obd_export *exp,
- struct lov_stripe_md *lsm,
- struct lov_oinfo *loi,
- cfs_page_t *page, obd_off offset,
- struct obd_async_page_ops *ops, void *data,
- void **res, int nocache,
- struct lustre_handle *lockh);
- int (*o_reget_short_lock)(struct obd_export *exp,
- struct lov_stripe_md *lsm,
- void **res, int rw,
- obd_off start, obd_off end,
- void **cookie);
- int (*o_release_short_lock)(struct obd_export *exp,
- struct lov_stripe_md *lsm, obd_off end,
- void *cookie, int rw);
- int (*o_queue_async_io)(struct obd_export *exp,
- struct lov_stripe_md *lsm,
- struct lov_oinfo *loi, void *cookie,
- int cmd, obd_off off, int count,
- obd_flag brw_flags, obd_flag async_flags);
- int (*o_queue_group_io)(struct obd_export *exp,
- struct lov_stripe_md *lsm,
- struct lov_oinfo *loi,
- struct obd_io_group *oig,
- void *cookie, int cmd, obd_off off, int count,
- obd_flag brw_flags, obd_flag async_flags);
- int (*o_trigger_group_io)(struct obd_export *exp,
- struct lov_stripe_md *lsm,
- struct lov_oinfo *loi,
- struct obd_io_group *oig);
- int (*o_set_async_flags)(struct obd_export *exp,
- struct lov_stripe_md *lsm,
- struct lov_oinfo *loi, void *cookie,
- obd_flag async_flags);
- int (*o_teardown_async_page)(struct obd_export *exp,
- struct lov_stripe_md *lsm,
- struct lov_oinfo *loi, void *cookie);
int (*o_merge_lvb)(struct obd_export *exp, struct lov_stripe_md *lsm,
struct ost_lvb *lvb, int kms_only);
int (*o_adjust_kms)(struct obd_export *exp, struct lov_stripe_md *lsm,
int (*o_enqueue)(struct obd_export *, struct obd_info *oinfo,
struct ldlm_enqueue_info *einfo,
struct ptlrpc_request_set *rqset);
- int (*o_match)(struct obd_export *, struct lov_stripe_md *, __u32 type,
- ldlm_policy_data_t *, __u32 mode, int *flags, void *data,
- struct lustre_handle *lockh);
int (*o_change_cbdata)(struct obd_export *, struct lov_stripe_md *,
ldlm_iterator_t it, void *data);
int (*o_cancel)(struct obd_export *, struct lov_stripe_md *md,
struct obd_uuid *(*o_get_uuid) (struct obd_export *exp);
/* quota methods */
- int (*o_quotacheck)(struct obd_export *, struct obd_quotactl *);
- int (*o_quotactl)(struct obd_export *, struct obd_quotactl *);
+ int (*o_quotacheck)(struct obd_device *, struct obd_export *,
+ struct obd_quotactl *);
+ int (*o_quotactl)(struct obd_device *, struct obd_export *,
+ struct obd_quotactl *);
+ int (*o_quota_adjust_qunit)(struct obd_export *exp,
+ struct quota_adjust_qunit *oqaq,
+ struct lustre_quota_ctxt *qctxt);
+
int (*o_ping)(struct obd_export *exp);
- int (*o_register_page_removal_cb)(struct obd_export *exp,
- obd_page_removal_cb_t cb,
- obd_pin_extent_cb pin_cb);
- int (*o_unregister_page_removal_cb)(struct obd_export *exp,
- obd_page_removal_cb_t cb);
- int (*o_register_lock_cancel_cb)(struct obd_export *exp,
- obd_lock_cancel_cb cb);
- int (*o_unregister_lock_cancel_cb)(struct obd_export *exp,
- obd_lock_cancel_cb cb);
/* pools methods */
int (*o_pool_new)(struct obd_device *obd, char *poolname);
int (*o_pool_del)(struct obd_device *obd, char *poolname);
* Also, add a wrapper function in include/linux/obd_class.h. */
};
-/* TODO: lmv_stripe_md should contain mds capabilities for all slave fids */
-struct lmv_stripe_md {
- __u32 mea_magic;
- __u32 mea_count;
- __u32 mea_master;
- __u32 mea_padding;
- struct lu_fid mea_ids[0];
-};
-
enum {
LUSTRE_OPC_MKDIR = (1 << 0),
LUSTRE_OPC_SYMLINK = (1 << 1),
void *opaque);
int (*m_renew_capa)(struct obd_export *, struct obd_capa *oc,
renew_capa_cb_t cb);
+ int (*m_unpack_capa)(struct obd_export *, struct ptlrpc_request *,
+ const struct req_msg_field *, struct obd_capa **);
int (*m_get_remote_perm)(struct obd_export *, const struct lu_fid *,
struct obd_capa *, __u32,
struct lov_mds_md *lmm);
};
-extern struct lsm_operations lsm_v1_ops;
-extern struct lsm_operations lsm_join_ops;
-extern struct lsm_operations lsm_v3_ops;
-static inline struct lsm_operations *lsm_op_find(int magic)
+extern const struct lsm_operations lsm_v1_ops;
+extern const struct lsm_operations lsm_join_ops;
+extern const struct lsm_operations lsm_v3_ops;
+static inline const struct lsm_operations *lsm_op_find(int magic)
{
switch(magic) {
case LOV_MAGIC_V1:
LASSERT(obd_ops);
obd_ops->o_quotacheck = QUOTA_OP(interface, check);
obd_ops->o_quotactl = QUOTA_OP(interface, ctl);
+ obd_ops->o_quota_adjust_qunit = QUOTA_OP(interface, adjust_qunit);
}
static inline __u64 oinfo_mdsno(struct obd_info *oinfo)
{
- return oinfo->oi_oa->o_gr - FILTER_GROUP_MDS0;
+ return obdo_mdsno(oinfo->oi_oa);
}
static inline struct lustre_capa *oinfo_capa(struct obd_info *oinfo)