* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
#define IOC_MDC_TYPE 'i'
#define IOC_MDC_MIN_NR 20
-/* Moved to lustre_user.h
-#define IOC_MDC_LOOKUP _IOWR(IOC_MDC_TYPE, 20, struct obd_ioctl_data *)
-#define IOC_MDC_GETSTRIPE _IOWR(IOC_MDC_TYPE, 21, struct lov_mds_md *) */
#define IOC_MDC_MAX_NR 50
#include <lustre/lustre_idl.h>
#include <lustre_quota.h>
#include <lustre_fld.h>
#include <lustre_capa.h>
-#include <class_hash.h>
#include <libcfs/bitmap.h>
/* this is really local to the OSC */
struct loi_oap_pages {
- struct list_head lop_pending;
- struct list_head lop_urgent;
- struct list_head lop_pending_group;
+ cfs_list_t lop_pending;
+ cfs_list_t lop_urgent;
+ cfs_list_t lop_pending_group;
int lop_num_pending;
};
};
struct lov_oinfo { /* per-stripe data structure */
- __u64 loi_id; /* object ID on the target OST */
- __u64 loi_gr; /* object group on the target OST */
+ struct ost_id loi_oi; /* object ID/Sequence on the target OST */
int loi_ost_idx; /* OST stripe index in lov_tgt_desc->tgts */
int loi_ost_gen; /* generation of this loi_ost_idx */
/* used by the osc to keep track of what objects to build into rpcs */
struct loi_oap_pages loi_read_lop;
struct loi_oap_pages loi_write_lop;
- struct list_head loi_ready_item;
- struct list_head loi_hp_ready_item;
- struct list_head loi_write_item;
- struct list_head loi_read_item;
+ cfs_list_t loi_ready_item;
+ cfs_list_t loi_hp_ready_item;
+ cfs_list_t loi_write_item;
+ cfs_list_t loi_read_item;
unsigned long loi_kms_valid:1;
__u64 loi_kms; /* known minimum size */
struct ost_lvb loi_lvb;
struct osc_async_rc loi_ar;
};
+#define loi_id loi_oi.oi_id
+#define loi_seq loi_oi.oi_seq
static inline void loi_kms_set(struct lov_oinfo *oinfo, __u64 kms)
{
CFS_INIT_LIST_HEAD(&loi->loi_read_item);
}
-/*extent array item for describing the joined file extent info*/
-struct lov_extent {
- __u64 le_start; /* extent start */
- __u64 le_len; /* extent length */
- int le_loi_idx; /* extent #1 loi's index in lsm loi array */
- int le_stripe_count; /* extent stripe count*/
-};
-
-/*Lov array info for describing joined file array EA info*/
-struct lov_array_info {
- struct llog_logid lai_array_id; /* MDS med llog object id */
- unsigned lai_ext_count; /* number of extent count */
- struct lov_extent *lai_ext_array; /* extent desc array */
-};
-
struct lov_stripe_md {
- spinlock_t lsm_lock;
+ cfs_spinlock_t lsm_lock;
pid_t lsm_lock_owner; /* debugging */
struct {
/* Public members. */
__u64 lw_object_id; /* lov object id */
- __u64 lw_object_gr; /* lov object group */
+ __u64 lw_object_seq; /* lov object seq */
__u64 lw_maxbytes; /* maximum possible file size */
/* LOV-private members start here -- only for use in lov/. */
char lw_pool_name[LOV_MAXPOOLNAME]; /* pool name */
} lsm_wire;
- struct lov_array_info *lsm_array; /*Only for joined file array info*/
struct lov_oinfo *lsm_oinfo[0];
};
#define lsm_object_id lsm_wire.lw_object_id
-#define lsm_object_gr lsm_wire.lw_object_gr
+#define lsm_object_seq lsm_wire.lw_object_seq
#define lsm_maxbytes lsm_wire.lw_maxbytes
#define lsm_magic lsm_wire.lw_magic
#define lsm_stripe_size lsm_wire.lw_stripe_size
void lov_stripe_unlock(struct lov_stripe_md *md);
struct obd_type {
- struct list_head typ_chain;
+ cfs_list_t typ_chain;
struct obd_ops *typ_dt_ops;
struct md_ops *typ_md_ops;
cfs_proc_dir_entry_t *typ_procroot;
char *typ_name;
int typ_refcnt;
struct lu_device_type *typ_lu;
- spinlock_t obd_type_lock;
+ cfs_spinlock_t obd_type_lock;
};
struct brw_page {
struct ost_server_data;
+#define OBT_MAGIC 0xBDDECEAE
/* hold common fields for "target" device */
struct obd_device_target {
+ __u32 obt_magic;
struct super_block *obt_sb;
/** last_rcvd file */
struct file *obt_rcvd_filp;
- /** server data in last_rcvd file */
- struct lr_server_data *obt_lsd;
- /** Lock protecting client bitmap */
- spinlock_t obt_client_bitmap_lock;
- /** Bitmap of known clients */
- unsigned long *obt_client_bitmap;
- /** Server last transaction number */
- __u64 obt_last_transno;
- /** Lock protecting last transaction number */
- spinlock_t obt_translock;
- /** Number of mounts */
+ struct lu_target *obt_lut;
__u64 obt_mount_count;
- struct semaphore obt_quotachecking;
+ cfs_semaphore_t obt_quotachecking;
struct lustre_quota_ctxt obt_qctxt;
lustre_quota_version_t obt_qfmt;
- struct rw_semaphore obt_rwsem;
+ cfs_rw_semaphore_t obt_rwsem;
+ struct vfsmount *obt_vfsmnt;
+ struct file *obt_health_check_filp;
};
/* llog contexts */
struct filter_obd {
/* NB this field MUST be first */
struct obd_device_target fo_obt;
- struct lu_target fo_lut;
const char *fo_fstype;
- struct vfsmount *fo_vfsmnt;
int fo_group_count;
cfs_dentry_t *fo_dentry_O;
cfs_dentry_t **fo_dentry_O_groups;
struct filter_subdirs *fo_dentry_O_sub;
- struct semaphore fo_init_lock; /* group initialization lock */
+ cfs_semaphore_t fo_init_lock; /* group initialization lock */
int fo_committed_group;
-
- spinlock_t fo_objidlock; /* protect fo_lastobjid */
- struct file *fo_health_check_filp;
+ cfs_spinlock_t fo_objidlock; /* protect fo_lastobjid */
unsigned long fo_destroys_in_progress;
- struct semaphore fo_create_locks[FILTER_SUBDIR_COUNT];
+ cfs_semaphore_t fo_create_locks[FILTER_SUBDIR_COUNT];
- struct list_head fo_export_list;
+ cfs_list_t fo_export_list;
int fo_subdir_count;
obd_size fo_tot_dirty; /* protected by obd_osfs_lock */
__u64 *fo_last_objids; /* last created objid for groups,
* protected by fo_objidlock */
- struct semaphore fo_alloc_lock;
+ cfs_semaphore_t fo_alloc_lock;
- atomic_t fo_r_in_flight;
- atomic_t fo_w_in_flight;
+ cfs_atomic_t fo_r_in_flight;
+ cfs_atomic_t fo_w_in_flight;
/*
* per-filter pool of kiobuf's allocated by filter_common_setup() and
struct filter_iobuf **fo_iobuf_pool;
int fo_iobuf_count;
- struct list_head fo_llog_list;
- spinlock_t fo_llog_list_lock;
+ cfs_list_t fo_llog_list;
+ cfs_spinlock_t fo_llog_list_lock;
struct brw_stats fo_filter_stats;
struct lustre_quota_ctxt fo_quota_ctxt;
- spinlock_t fo_quotacheck_lock;
- atomic_t fo_quotachecking;
+ cfs_spinlock_t fo_quotacheck_lock;
+ cfs_atomic_t fo_quotachecking;
int fo_fmd_max_num; /* per exp filter_mod_data */
int fo_fmd_max_age; /* jiffies to fmd expiry */
+ unsigned long fo_syncjournal:1, /* sync journal on writes */
+ fo_sync_lock_cancel:2;/* sync on lock cancel */
+
/* sptlrpc stuff */
- rwlock_t fo_sptlrpc_lock;
+ cfs_rwlock_t fo_sptlrpc_lock;
struct sptlrpc_rule_set fo_sptlrpc_rset;
/* capability related */
unsigned int fo_fl_oss_capa;
- struct list_head fo_capa_keys;
- struct hlist_head *fo_capa_hash;
+ cfs_list_t fo_capa_keys;
+ cfs_hlist_head_t *fo_capa_hash;
struct llog_commit_master *fo_lcm;
int fo_sec_level;
};
-#define fo_translock fo_obt.obt_translock
-#define fo_rcvd_filp fo_obt.obt_rcvd_filp
-#define fo_fsd fo_obt.obt_lsd
-#define fo_last_rcvd_slots fo_obt.obt_client_bitmap
-#define fo_mount_count fo_obt.obt_mount_count
-
struct timeout_item {
enum timeout_event ti_event;
cfs_time_t ti_timeout;
timeout_cb_t ti_cb;
void *ti_cb_data;
- struct list_head ti_obd_list;
- struct list_head ti_chain;
+ cfs_list_t ti_obd_list;
+ cfs_list_t ti_chain;
};
#define OSC_MAX_RIF_DEFAULT 8
#define OSC_MAX_DIRTY_MB_MAX 2048 /* arbitrary, but < MAX_LONG bytes */
#define OSC_DEFAULT_RESENDS 10
+/* possible values for fo_sync_lock_cancel */
+enum {
+ NEVER_SYNC_ON_CANCEL = 0,
+ BLOCKING_SYNC_ON_CANCEL = 1,
+ ALWAYS_SYNC_ON_CANCEL = 2,
+ NUM_SYNC_ON_CANCEL_STATES
+};
+
#define MDC_MAX_RIF_DEFAULT 8
#define MDC_MAX_RIF_MAX 512
struct mdc_rpc_lock;
struct obd_import;
struct client_obd {
- struct rw_semaphore cl_sem;
+ cfs_rw_semaphore_t cl_sem;
struct obd_uuid cl_target_uuid;
struct obd_import *cl_import; /* ptlrpc connection state */
int cl_conn_count;
long cl_dirty_transit; /* dirty synchronous */
long cl_avail_grant; /* bytes of credit for ost */
long cl_lost_grant; /* lost credits (trunc) */
- struct list_head cl_cache_waiters; /* waiting for cache/grant */
+ cfs_list_t cl_cache_waiters; /* waiting for cache/grant */
cfs_time_t cl_next_shrink_grant; /* jiffies */
- struct list_head cl_grant_shrink_list; /* Timeout event list */
- struct semaphore cl_grant_sem; /*grant shrink list semaphore*/
+ cfs_list_t cl_grant_shrink_list; /* Timeout event list */
+ cfs_semaphore_t cl_grant_sem; /*grant shrink list cfs_semaphore*/
int cl_grant_shrink_interval; /* seconds */
/* keep track of objects that have lois that contain pages which
* client_obd_list_lock_{init,done}() functions.
*/
client_obd_lock_t cl_loi_list_lock;
- struct list_head cl_loi_ready_list;
- struct list_head cl_loi_hp_ready_list;
- struct list_head cl_loi_write_list;
- struct list_head cl_loi_read_list;
+ cfs_list_t cl_loi_ready_list;
+ cfs_list_t cl_loi_hp_ready_list;
+ cfs_list_t cl_loi_write_list;
+ cfs_list_t cl_loi_read_list;
int cl_r_in_flight;
int cl_w_in_flight;
/* just a sum of the loi/lop pending numbers to be exported by /proc */
struct obd_histogram cl_write_offset_hist;
/* number of in flight destroy rpcs is limited to max_rpcs_in_flight */
- atomic_t cl_destroy_in_flight;
+ cfs_atomic_t cl_destroy_in_flight;
cfs_waitq_t cl_destroy_waitq;
struct mdc_rpc_lock *cl_rpc_lock;
struct osc_creator cl_oscc;
/* mgc datastruct */
- struct semaphore cl_mgc_sem;
+ cfs_semaphore_t cl_mgc_sem;
struct vfsmount *cl_mgc_vfsmnt;
struct dentry *cl_mgc_configs_dir;
- atomic_t cl_mgc_refcount;
+ cfs_atomic_t cl_mgc_refcount;
struct obd_export *cl_mgc_mgsexp;
/* checksumming for data sent over the network */
/* sequence manager */
struct lu_client_seq *cl_seq;
- atomic_t cl_resends; /* resend count */
+ cfs_atomic_t cl_resends; /* resend count */
};
#define obd2cli_tgt(obd) ((char *)(obd)->u.cli.cl_target_uuid.uuid)
#define CL_NOT_QUOTACHECKED 1 /* client->cl_qchk_stat init value */
struct mgs_obd {
+ struct obd_device_target mgs_obt;
struct ptlrpc_service *mgs_service;
struct vfsmount *mgs_vfsmnt;
struct super_block *mgs_sb;
struct dentry *mgs_configs_dir;
struct dentry *mgs_fid_de;
- struct list_head mgs_fs_db_list;
- struct semaphore mgs_sem;
+ cfs_list_t mgs_fs_db_list;
+ cfs_semaphore_t mgs_sem;
cfs_proc_dir_entry_t *mgs_proc_live;
};
struct ptlrpc_service *mds_service;
struct ptlrpc_service *mds_setattr_service;
struct ptlrpc_service *mds_readpage_service;
- struct vfsmount *mds_vfsmnt;
cfs_dentry_t *mds_fid_de;
int mds_max_mdsize;
int mds_max_cookiesize;
__u64 mds_io_epoch;
unsigned long mds_atime_diff;
- struct semaphore mds_epoch_sem;
+ cfs_semaphore_t mds_epoch_sem;
struct ll_fid mds_rootfid;
cfs_dentry_t *mds_pending_dir;
cfs_dentry_t *mds_logs_dir;
cfs_dentry_t *mds_objects_dir;
struct llog_handle *mds_cfg_llh;
- struct obd_device *mds_osc_obd; /* XXX lov_obd */
+ struct obd_device *mds_lov_obd;
struct obd_uuid mds_lov_uuid;
char *mds_profile;
- struct obd_export *mds_osc_exp; /* XXX lov_exp */
+ struct obd_export *mds_lov_exp;
struct lov_desc mds_lov_desc;
__u32 mds_id;
/* mark pages dirty for write. */
- bitmap_t *mds_lov_page_dirty;
+ cfs_bitmap_t *mds_lov_page_dirty;
/* array for store pages with obd_id */
void **mds_lov_page_array;
/* file for store objid */
__u32 mds_lov_objid_lastpage;
__u32 mds_lov_objid_lastidx;
- struct file *mds_health_check_filp;
struct lustre_quota_info mds_quota_info;
- struct semaphore mds_qonoff_sem;
- struct semaphore mds_health_sem;
+ cfs_rw_semaphore_t mds_qonoff_sem;
+ cfs_semaphore_t mds_health_sem;
unsigned long mds_fl_user_xattr:1,
mds_fl_acl:1,
mds_evict_ost_nids:1,
/* for capability keys update */
struct lustre_capa_key *mds_capa_keys;
- struct rw_semaphore mds_notify_lock;
+ cfs_rw_semaphore_t mds_notify_lock;
};
-#define mds_transno_lock mds_obt.obt_translock
-#define mds_rcvd_filp mds_obt.obt_rcvd_filp
-#define mds_server_data mds_obt.obt_lsd
-#define mds_client_bitmap mds_obt.obt_client_bitmap
-#define mds_mount_count mds_obt.obt_mount_count
-#define mds_last_transno mds_obt.obt_last_transno
-
/* lov objid */
extern __u32 mds_max_ost_index;
struct echo_obd {
struct obdo eo_oa;
- spinlock_t eo_lock;
+ cfs_spinlock_t eo_lock;
__u64 eo_lastino;
struct lustre_handle eo_nl_lock;
- atomic_t eo_prep;
+ cfs_atomic_t eo_prep;
};
struct ost_obd {
struct ptlrpc_service *ost_service;
struct ptlrpc_service *ost_create_service;
struct ptlrpc_service *ost_io_service;
- struct semaphore ost_health_sem;
+ cfs_semaphore_t ost_health_sem;
};
struct echo_client_obd {
struct obd_export *ec_exp; /* the local connection to osc/lov */
- spinlock_t ec_lock;
- struct list_head ec_objects;
- struct list_head ec_locks;
+ cfs_spinlock_t ec_lock;
+ cfs_list_t ec_objects;
+ cfs_list_t ec_locks;
int ec_nstripes;
__u64 ec_unique;
};
struct lov_qos_oss {
struct obd_uuid lqo_uuid; /* ptlrpc's c_remote_uuid */
- struct list_head lqo_oss_list; /* link to lov_qos */
+ cfs_list_t lqo_oss_list; /* link to lov_qos */
__u64 lqo_bavail; /* total bytes avail on OSS */
__u64 lqo_penalty; /* current penalty */
- __u64 lqo_penalty_per_obj; /* penalty decrease every obj*/
+ __u64 lqo_penalty_per_obj;/* penalty decrease every obj*/
time_t lqo_used; /* last used time, seconds */
__u32 lqo_ost_count; /* number of osts on this oss */
};
/* Generic subset of OSTs */
struct ost_pool {
- __u32 *op_array; /* array of index of
- lov_obd->lov_tgts */
- unsigned int op_count; /* number of OSTs in the array */
- unsigned int op_size; /* allocated size of lp_array */
- struct rw_semaphore op_rw_sem; /* to protect ost_pool use */
+ __u32 *op_array; /* array of index of
+ lov_obd->lov_tgts */
+ unsigned int op_count; /* number of OSTs in the array */
+ unsigned int op_size; /* allocated size of lp_array */
+ cfs_rw_semaphore_t op_rw_sem; /* to protect ost_pool use */
};
/* Round-robin allocator data */
unsigned long lqr_dirty:1; /* recalc round-robin list */
};
+/* allow statfs data caching for 1 second */
+#define OBD_STATFS_CACHE_SECONDS 1
+
struct lov_statfs_data {
struct obd_info lsd_oi;
struct obd_statfs lsd_statfs;
};
/* Stripe placement optimization */
struct lov_qos {
- struct list_head lq_oss_list; /* list of OSSs that targets use */
- struct rw_semaphore lq_rw_sem;
+ cfs_list_t lq_oss_list; /* list of OSSs that targets use */
+ cfs_rw_semaphore_t lq_rw_sem;
__u32 lq_active_oss_count;
unsigned int lq_prio_free; /* priority for free space */
unsigned int lq_threshold_rr;/* priority for rr */
lq_same_space:1,/* the ost's all have approx.
the same space avail */
lq_reset:1, /* zero current penalties */
- lq_statfs_in_progress:1; /* statfs op in progress */
+ lq_statfs_in_progress:1; /* statfs op in
+ progress */
/* qos statfs data */
struct lov_statfs_data *lq_statfs_data;
cfs_waitq_t lq_statfs_waitq; /* waitqueue to notify statfs
};
struct lov_tgt_desc {
- struct list_head ltd_kill;
+ cfs_list_t ltd_kill;
struct obd_uuid ltd_uuid;
struct obd_device *ltd_obd;
struct obd_export *ltd_exp;
__u32 ltd_gen;
__u32 ltd_index; /* index in lov_obd->tgts */
unsigned long ltd_active:1,/* is this target up for requests */
- ltd_activate:1,/* should this target be activated */
+ ltd_activate:1,/* should target be activated */
ltd_reap:1; /* should this target be deleted */
};
struct pool_desc {
char pool_name[LOV_MAXPOOLNAME + 1]; /* name of pool */
struct ost_pool pool_obds; /* pool members */
- atomic_t pool_refcount; /* pool ref. counter */
+ cfs_atomic_t pool_refcount; /* pool ref. counter */
struct lov_qos_rr pool_rr; /* round robin qos */
- struct hlist_node pool_hash; /* access by poolname */
- struct list_head pool_list; /* serial access */
+ cfs_hlist_node_t pool_hash; /* access by poolname */
+ cfs_list_t pool_list; /* serial access */
cfs_proc_dir_entry_t *pool_proc_entry; /* file in /proc */
struct lov_obd *pool_lov; /* lov obd to which this
pool belong */
struct lov_tgt_desc **lov_tgts; /* sparse array */
struct ost_pool lov_packed; /* all OSTs in a packed
array */
- struct semaphore lov_lock;
+ cfs_semaphore_t lov_lock;
struct obd_connect_data lov_ocd;
struct lov_qos lov_qos; /* qos info per lov */
- atomic_t lov_refcount;
+ cfs_atomic_t lov_refcount;
__u32 lov_tgt_count; /* how many OBD's */
__u32 lov_active_tgt_count; /* how many active */
__u32 lov_death_row;/* tgts scheduled to be deleted */
__u32 lov_tgt_size; /* size of tgts array */
int lov_connects;
int lov_pool_count;
- lustre_hash_t *lov_pools_hash_body; /* used for key access */
- struct list_head lov_pool_list; /* used for sequential access */
+ cfs_hash_t *lov_pools_hash_body; /* used for key access */
+ cfs_list_t lov_pool_list; /* used for sequential access */
cfs_proc_dir_entry_t *lov_pool_proc_entry;
enum lustre_sec_part lov_sp_me;
};
struct lmv_tgt_desc {
struct obd_uuid ltd_uuid;
struct obd_export *ltd_exp;
- int ltd_active; /* is this target up for requests */
+ int ltd_active; /* is this target up for requests */
int ltd_idx;
- struct semaphore ltd_fid_sem;
+ cfs_semaphore_t ltd_fid_sem;
};
enum placement_policy {
struct lmv_obd {
int refcount;
struct lu_client_fld lmv_fld;
- spinlock_t lmv_lock;
+ cfs_spinlock_t lmv_lock;
placement_policy_t lmv_placement;
struct lmv_desc desc;
struct obd_uuid cluuid;
int max_def_easize;
int max_cookiesize;
int server_timeout;
- struct semaphore init_sem;
+ cfs_semaphore_t init_sem;
struct lmv_tgt_desc *tgts;
int tgts_size;
#define LUSTRE_CMM_NAME "cmm"
#define LUSTRE_MDD_NAME "mdd"
-#define LUSTRE_OSD_NAME "osd"
+#define LUSTRE_OSD_NAME "osd-ldiskfs"
#define LUSTRE_VVP_NAME "vvp"
#define LUSTRE_LMV_NAME "lmv"
#define LUSTRE_CMM_MDC_NAME "cmm-mdc"
struct target_recovery_data {
svc_handler_t trd_recovery_handler;
pid_t trd_processing_task;
- struct completion trd_starting;
- struct completion trd_finishing;
+ cfs_completion_t trd_starting;
+ cfs_completion_t trd_finishing;
};
-enum filter_groups {
- FILTER_GROUP_MDS0 = 0,
- FILTER_GROUP_LLOG = 1,
- FILTER_GROUP_ECHO = 2 ,
- FILTER_GROUP_MDS1_N_BASE = 3
-};
-
-static inline __u64 obdo_mdsno(struct obdo *oa)
-{
- if (oa->o_gr) {
- LASSERT(oa->o_gr >= FILTER_GROUP_MDS1_N_BASE);
- return oa->o_gr - FILTER_GROUP_MDS1_N_BASE + 1;
- }
- return 0;
-}
-
-static inline int mdt_to_obd_objgrp(int mdtid)
-{
- /**
- * MDS0 uses group 0 always, other MDSes will use groups from
- * FILTER_GROUP_MDS1_N_BASE
- */
- if (mdtid)
- return FILTER_GROUP_MDS1_N_BASE + mdtid - 1;
- return 0;
-}
-
/**
* In HEAD for CMD, the object is created in group number which is 3>=
* or indexing starts from 3. To test this assertions are added to disallow
* 2. The group number indexing starts from 0 instead of 3
*/
-#define CHECK_MDS_GROUP(group) (group == FILTER_GROUP_MDS0 || \
- group > FILTER_GROUP_MDS1_N_BASE)
-#define LASSERT_MDS_GROUP(group) LASSERT(CHECK_MDS_GROUP(group))
+#define LASSERT_SEQ_IS_MDT(seq) LASSERT(fid_seq_is_mdt(seq))
+
+static inline __u64 objseq_to_mdsno(obd_seq seq)
+{
+ LASSERT_SEQ_IS_MDT(seq);
+ if (seq == FID_SEQ_OST_MDT0)
+ return 0;
+ return seq - FID_SEQ_OST_MDT1 + 1;
+}
+
+static inline int mdt_to_obd_objseq(int mdtid)
+{
+ /**
+ * MDS0 uses seq 0 pre FID-on-OST, other MDSes will use seq from
+ * FID_SEQ_OST_MDT1
+ */
+ if (mdtid)
+ return FID_SEQ_OST_MDT1 + mdtid - 1;
+ return 0;
+}
struct obd_llog_group {
- struct list_head olg_list;
- int olg_group;
+ cfs_list_t olg_list;
+ int olg_seq;
struct llog_ctxt *olg_ctxts[LLOG_MAX_CTXTS];
cfs_waitq_t olg_waitq;
- spinlock_t olg_lock;
+ cfs_spinlock_t olg_lock;
struct obd_export *olg_exp;
int olg_initializing;
- struct semaphore olg_cat_processing;
+ cfs_semaphore_t olg_cat_processing;
};
/* corresponds to one of the obd's */
obd_starting:1, /* started setup */
obd_force:1, /* cleanup with > 0 obd refcount */
obd_fail:1, /* cleanup with failover */
- obd_async_recov:1, /* allow asyncronous orphan cleanup */
+ obd_async_recov:1, /* allow asynchronous orphan cleanup */
obd_no_conn:1, /* deny new connections */
obd_inactive:1, /* device active/inactive
* (for /proc/status only!!) */
obd_process_conf:1; /* device is processing mgs config */
/* uuid-export hash body */
- struct lustre_hash *obd_uuid_hash;
+ cfs_hash_t *obd_uuid_hash;
/* nid-export hash body */
- struct lustre_hash *obd_nid_hash;
+ cfs_hash_t *obd_nid_hash;
/* nid stats body */
- struct lustre_hash *obd_nid_stats_hash;
- struct list_head obd_nid_stats;
- atomic_t obd_refcount;
+ cfs_hash_t *obd_nid_stats_hash;
+ cfs_list_t obd_nid_stats;
+ cfs_atomic_t obd_refcount;
cfs_waitq_t obd_refcount_waitq;
- struct list_head obd_exports;
- struct list_head obd_unlinked_exports;
- struct list_head obd_delayed_exports;
+ cfs_list_t obd_exports;
+ cfs_list_t obd_unlinked_exports;
+ cfs_list_t obd_delayed_exports;
int obd_num_exports;
- spinlock_t obd_nid_lock;
+ cfs_spinlock_t obd_nid_lock;
struct ldlm_namespace *obd_namespace;
struct ptlrpc_client obd_ldlm_client; /* XXX OST/MDS only */
/* a spinlock is OK for what we do now, may need a semaphore later */
- spinlock_t obd_dev_lock;
- struct semaphore obd_dev_sem;
+ cfs_spinlock_t obd_dev_lock;
+ cfs_semaphore_t obd_dev_sem;
__u64 obd_last_committed;
struct fsfilt_operations *obd_fsops;
- spinlock_t obd_osfs_lock;
+ cfs_spinlock_t obd_osfs_lock;
struct obd_statfs obd_osfs; /* locked by obd_osfs_lock */
__u64 obd_osfs_age;
struct lvfs_run_ctxt obd_lvfs_ctxt;
struct obd_llog_group obd_olg; /* default llog group */
- struct obd_device *obd_observer;
+ struct obd_device *obd_observer;
+ cfs_rw_semaphore_t obd_observer_link_sem;
struct obd_notify_upcall obd_upcall;
struct obd_export *obd_self_export;
/* list of exports in LRU order, for ping evictor, with obd_dev_lock */
- struct list_head obd_exports_timed;
+ cfs_list_t obd_exports_timed;
time_t obd_eviction_timer; /* for ping evictor */
- /* XXX encapsulate all this recovery data into one struct */
- svc_handler_t obd_recovery_handler;
- pid_t obd_processing_task;
-
int obd_max_recoverable_clients;
int obd_connected_clients;
int obd_stale_clients;
int obd_delayed_clients;
- spinlock_t obd_processing_task_lock; /* BH lock (timer) */
+ cfs_spinlock_t obd_processing_task_lock; /* BH lock (timer) */
__u64 obd_next_recovery_transno;
int obd_replayed_requests;
int obd_requests_queued_for_recovery;
cfs_timer_t obd_recovery_timer;
time_t obd_recovery_start; /* seconds */
time_t obd_recovery_end; /* seconds, for lprocfs_status */
- time_t obd_recovery_max_time; /* seconds, bz13079 */
+ time_t obd_recovery_time_hard;
int obd_recovery_timeout;
/* new recovery stuff from CMD2 */
struct target_recovery_data obd_recovery_data;
int obd_replayed_locks;
- atomic_t obd_req_replay_clients;
- atomic_t obd_lock_replay_clients;
- struct list_head obd_req_replay_queue;
- struct list_head obd_lock_replay_queue;
- struct list_head obd_final_req_queue;
+ cfs_atomic_t obd_req_replay_clients;
+ cfs_atomic_t obd_lock_replay_clients;
+ cfs_list_t obd_req_replay_queue;
+ cfs_list_t obd_lock_replay_queue;
+ cfs_list_t obd_final_req_queue;
int obd_recovery_stage;
union {
cfs_proc_dir_entry_t *obd_proc_exports_entry;
cfs_proc_dir_entry_t *obd_svc_procroot;
struct lprocfs_stats *obd_svc_stats;
- atomic_t obd_evict_inprogress;
+ cfs_atomic_t obd_evict_inprogress;
cfs_waitq_t obd_evict_inprogress_waitq;
+ cfs_list_t obd_evict_list; /* protected with pet_lock */
/**
* Ldlm pool part. Save last calculated SLV and Limit.
*/
- rwlock_t obd_pool_lock;
+ cfs_rwlock_t obd_pool_lock;
int obd_pool_limit;
__u64 obd_pool_slv;
};
/* get/set_info keys */
+#define KEY_ASYNC "async"
#define KEY_BLOCKSIZE_BITS "blocksize_bits"
#define KEY_BLOCKSIZE "blocksize"
#define KEY_CAPA_KEY "capa_key"
#define KEY_EVICT_BY_NID "evict_by_nid"
#define KEY_FIEMAP "fiemap"
#define KEY_FLUSH_CTX "flush_ctx"
+#define KEY_GRANT_SHRINK "grant_shrink"
+#define KEY_HSM_COPYTOOL_SEND "hsm_send"
#define KEY_INIT_RECOV_BACKUP "init_recov_bk"
#define KEY_INIT_RECOV "initial_recov"
+#define KEY_INTERMDS "inter_mds"
#define KEY_LAST_ID "last_id"
#define KEY_LOCK_TO_STRIPE "lock_to_stripe"
#define KEY_LOVDESC "lovdesc"
#define KEY_REGISTER_TARGET "register_target"
#define KEY_REVIMP_UPD "revimp_update"
#define KEY_SET_FS "set_fs"
+/* KEY_SET_INFO in lustre_idl.h */
#define KEY_SPTLRPC_CONF "sptlrpc_conf"
-#define KEY_UNLINKED "unlinked"
-/* XXX unused ?*/
-#define KEY_INTERMDS "inter_mds"
-#define KEY_ASYNC "async"
-#define KEY_GRANT_SHRINK "grant_shrink"
+#define KEY_CONNECT_FLAG "connect_flags"
+#define KEY_SYNC_LOCK_CANCEL "sync_lock_cancel"
+
struct lu_context;
unsigned int op_attr_flags;
#endif
#endif
+ __u64 op_valid;
loff_t op_attr_blocks;
/* Size-on-MDS epoch and flags. */
struct lookup_intent mi_it;
struct lustre_handle mi_lockh;
struct dentry *mi_dentry;
+ struct inode *mi_dir;
md_enqueue_cb_t mi_cb;
unsigned int mi_generation;
void *mi_cbdata;
};
struct obd_ops {
- struct module *o_owner;
+ cfs_module_t *o_owner;
int (*o_iocontrol)(unsigned int cmd, struct obd_export *exp, int len,
void *karg, void *uarg);
int (*o_get_info)(struct obd_export *, __u32 keylen, void *key,
struct lov_stripe_md *mem_src);
int (*o_unpackmd)(struct obd_export *exp,struct lov_stripe_md **mem_tgt,
struct lov_mds_md *disk_src, int disk_len);
- int (*o_checkmd)(struct obd_export *exp, struct obd_export *md_exp,
- struct lov_stripe_md *mem_tgt);
int (*o_preallocate)(struct lustre_handle *, obd_count *req,
obd_id *ids);
/* FIXME: add fid capability support for create & destroy! */
struct lustre_handle *srconn, struct lov_stripe_md *src,
obd_size start, obd_size end, struct obd_trans_info *);
int (*o_iterate)(struct lustre_handle *conn,
- int (*)(obd_id, obd_gr, void *),
- obd_id *startid, obd_gr group, void *data);
+ int (*)(obd_id, obd_seq, void *),
+ obd_id *startid, obd_seq seq, void *data);
int (*o_preprw)(int cmd, struct obd_export *exp, struct obdo *oa,
int objcount, struct obd_ioobj *obj,
struct niobuf_remote *remote, int *nr_pages,
struct ptlrpc_request_set *rqset);
int (*o_change_cbdata)(struct obd_export *, struct lov_stripe_md *,
ldlm_iterator_t it, void *data);
+ int (*o_find_cbdata)(struct obd_export *, struct lov_stripe_md *,
+ ldlm_iterator_t it, void *data);
int (*o_cancel)(struct obd_export *, struct lov_stripe_md *md,
__u32 mode, struct lustre_handle *);
int (*o_cancel_unused)(struct obd_export *, struct lov_stripe_md *,
- int flags, void *opaque);
+ ldlm_cancel_flags_t flags, void *opaque);
int (*o_init_export)(struct obd_export *exp);
int (*o_destroy_export)(struct obd_export *exp);
int (*o_extent_calc)(struct obd_export *, struct lov_stripe_md *,
struct obd_client_handle *mod_och;
struct ptlrpc_request *mod_open_req;
struct ptlrpc_request *mod_close_req;
+ cfs_atomic_t mod_refcount;
};
struct lookup_intent;
struct obd_capa **);
int (*m_change_cbdata)(struct obd_export *, const struct lu_fid *,
ldlm_iterator_t, void *);
+ int (*m_find_cbdata)(struct obd_export *, const struct lu_fid *,
+ ldlm_iterator_t, void *);
int (*m_close)(struct obd_export *, struct md_op_data *,
struct md_open_data *, struct ptlrpc_request **);
int (*m_create)(struct obd_export *, struct md_op_data *,
struct lookup_intent *, struct md_op_data *,
struct lustre_handle *, void *, int,
struct ptlrpc_request **, int);
- int (*m_getattr)(struct obd_export *, const struct lu_fid *,
- struct obd_capa *, obd_valid, int,
+ int (*m_getattr)(struct obd_export *, struct md_op_data *,
struct ptlrpc_request **);
- int (*m_getattr_name)(struct obd_export *, const struct lu_fid *,
- struct obd_capa *, const char *, int, obd_valid,
- int, __u32, struct ptlrpc_request **);
+ int (*m_getattr_name)(struct obd_export *, struct md_op_data *,
+ struct ptlrpc_request **);
int (*m_intent_lock)(struct obd_export *, struct md_op_data *,
void *, int, struct lookup_intent *, int,
struct ptlrpc_request **,
struct lustre_handle *);
int (*m_cancel_unused)(struct obd_export *, const struct lu_fid *,
- ldlm_policy_data_t *, ldlm_mode_t, int flags,
- void *opaque);
+ ldlm_policy_data_t *, ldlm_mode_t,
+ ldlm_cancel_flags_t flags, void *opaque);
int (*m_renew_capa)(struct obd_export *, struct obd_capa *oc,
renew_capa_cb_t cb);
int (*m_unpack_capa)(struct obd_export *, struct ptlrpc_request *,
struct md_enqueue_info *,
struct ldlm_enqueue_info *);
- int (*m_revalidate_lock)(struct obd_export *,
- struct lookup_intent *,
- struct lu_fid *);
+ int (*m_revalidate_lock)(struct obd_export *, struct lookup_intent *,
+ struct lu_fid *, __u32 *);
/*
* NOTE: If adding ops, add another LPROCFS_MD_OP_INIT() line to
obd_off *);
void (*lsm_stripe_by_offset)(struct lov_stripe_md *, int *, obd_off *,
obd_off *);
- obd_off (*lsm_stripe_offset_by_index)(struct lov_stripe_md *, int);
- obd_off (*lsm_stripe_offset_by_offset)(struct lov_stripe_md *, obd_off);
- int (*lsm_stripe_index_by_offset)(struct lov_stripe_md *, obd_off);
- int (*lsm_revalidate) (struct lov_stripe_md *, struct obd_device *obd);
int (*lsm_lmm_verify) (struct lov_mds_md *lmm, int lmm_bytes,
int *stripe_count);
int (*lsm_unpackmd) (struct lov_obd *lov, struct lov_stripe_md *lsm,
};
extern const struct lsm_operations lsm_v1_ops;
-extern const struct lsm_operations lsm_join_ops;
extern const struct lsm_operations lsm_v3_ops;
static inline const struct lsm_operations *lsm_op_find(int magic)
{
switch(magic) {
case LOV_MAGIC_V1:
return &lsm_v1_ops;
- case LOV_MAGIC_JOIN:
- return &lsm_join_ops;
case LOV_MAGIC_V3:
return &lsm_v3_ops;
default:
- CERROR("Cannot recognize lsm_magic %d\n", magic);
+ CERROR("Cannot recognize lsm_magic %08x\n", magic);
return NULL;
}
}
obd_ops->o_quota_adjust_qunit = QUOTA_OP(interface, adjust_qunit);
}
-static inline __u64 oinfo_mdsno(struct obd_info *oinfo)
+static inline struct lustre_capa *oinfo_capa(struct obd_info *oinfo)
{
- return obdo_mdsno(oinfo->oi_oa);
+ return oinfo->oi_capa;
}
-static inline struct lustre_capa *oinfo_capa(struct obd_info *oinfo)
+static inline struct md_open_data *obd_mod_alloc(void)
{
- return oinfo->oi_capa;
+ struct md_open_data *mod;
+ OBD_ALLOC_PTR(mod);
+ if (mod == NULL)
+ return NULL;
+ cfs_atomic_set(&mod->mod_refcount, 1);
+ return mod;
}
+#define obd_mod_get(mod) cfs_atomic_inc(&(mod)->mod_refcount)
+#define obd_mod_put(mod) \
+({ \
+ if (cfs_atomic_dec_and_test(&(mod)->mod_refcount)) { \
+ if ((mod)->mod_open_req) \
+ ptlrpc_req_finished((mod)->mod_open_req); \
+ OBD_FREE_PTR(mod); \
+ } \
+})
+
+extern void obdo_from_inode(struct obdo *dst, struct inode *src,
+ struct lu_fid *parent, obd_flag valid);
+
#endif /* __OBD_H */