-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define IOC_MDC_TYPE 'i'
#define IOC_MDC_MIN_NR 20
-/* Moved to lustre_user.h
-#define IOC_MDC_LOOKUP _IOWR(IOC_MDC_TYPE, 20, struct obd_ioctl_data *)
-#define IOC_MDC_GETSTRIPE _IOWR(IOC_MDC_TYPE, 21, struct lov_mds_md *) */
#define IOC_MDC_MAX_NR 50
#include <lustre/lustre_idl.h>
-#include <lu_target.h>
+#ifdef HAVE_SERVER_SUPPORT
+# include <lu_target.h>
+#endif
#include <lu_ref.h>
#include <lustre_lib.h>
#include <lustre_export.h>
-#include <lustre_quota.h>
#include <lustre_fld.h>
#include <lustre_capa.h>
#define MAX_OBD_DEVICES 8192
-/* this is really local to the OSC */
-struct loi_oap_pages {
- cfs_list_t lop_pending;
- cfs_list_t lop_urgent;
- cfs_list_t lop_pending_group;
- int lop_num_pending;
-};
-
struct osc_async_rc {
int ar_rc;
int ar_force_sync;
int loi_ost_idx; /* OST stripe index in lov_tgt_desc->tgts */
int loi_ost_gen; /* generation of this loi_ost_idx */
- /* used by the osc to keep track of what objects to build into rpcs */
- struct loi_oap_pages loi_read_lop;
- struct loi_oap_pages loi_write_lop;
- cfs_list_t loi_ready_item;
- cfs_list_t loi_hp_ready_item;
- cfs_list_t loi_write_item;
- cfs_list_t loi_read_item;
-
unsigned long loi_kms_valid:1;
__u64 loi_kms; /* known minimum size */
struct ost_lvb loi_lvb;
static inline void loi_init(struct lov_oinfo *loi)
{
- CFS_INIT_LIST_HEAD(&loi->loi_read_lop.lop_pending);
- CFS_INIT_LIST_HEAD(&loi->loi_read_lop.lop_urgent);
- CFS_INIT_LIST_HEAD(&loi->loi_read_lop.lop_pending_group);
- CFS_INIT_LIST_HEAD(&loi->loi_write_lop.lop_pending);
- CFS_INIT_LIST_HEAD(&loi->loi_write_lop.lop_urgent);
- CFS_INIT_LIST_HEAD(&loi->loi_write_lop.lop_pending_group);
- CFS_INIT_LIST_HEAD(&loi->loi_ready_item);
- CFS_INIT_LIST_HEAD(&loi->loi_hp_ready_item);
- CFS_INIT_LIST_HEAD(&loi->loi_write_item);
- CFS_INIT_LIST_HEAD(&loi->loi_read_item);
}
struct lov_stripe_md {
- cfs_spinlock_t lsm_lock;
+ cfs_atomic_t lsm_refc;
+ spinlock_t lsm_lock;
pid_t lsm_lock_owner; /* debugging */
+ /* maximum possible file size, might change as OSTs status changes,
+ * e.g. disconnected, deactivated */
+ __u64 lsm_maxbytes;
struct {
/* Public members. */
__u64 lw_object_id; /* lov object id */
__u64 lw_object_seq; /* lov object seq */
- __u64 lw_maxbytes; /* maximum possible file size */
/* LOV-private members start here -- only for use in lov/. */
__u32 lw_magic;
__u32 lw_stripe_size; /* size of the stripe */
__u32 lw_pattern; /* striping pattern (RAID0, RAID1) */
- unsigned lw_stripe_count; /* number of objects being striped over */
+ __u16 lw_stripe_count; /* number of objects being striped over */
+ __u16 lw_layout_gen; /* generation of the layout */
char lw_pool_name[LOV_MAXPOOLNAME]; /* pool name */
} lsm_wire;
#define lsm_object_id lsm_wire.lw_object_id
#define lsm_object_seq lsm_wire.lw_object_seq
-#define lsm_maxbytes lsm_wire.lw_maxbytes
#define lsm_magic lsm_wire.lw_magic
+#define lsm_layout_gen lsm_wire.lw_layout_gen
#define lsm_stripe_size lsm_wire.lw_stripe_size
#define lsm_pattern lsm_wire.lw_pattern
#define lsm_stripe_count lsm_wire.lw_stripe_count
- while lock handling, the flags obtained on the enqueue
request are set here.
- while stats, the flags used for control delay/resend.
+ - while setattr, the flags used for distinguish punch operation
*/
- int oi_flags;
+ __u64 oi_flags;
/* Lock handle specific for every OSC lock. */
struct lustre_handle *oi_lockh;
/* lsm data specific for every OSC. */
/* oss capability, its type is obd_capa in client to avoid copy.
* in contrary its type is lustre_capa in OSS. */
void *oi_capa;
+ /* transfer jobid from ost_sync() to filter_sync()... */
+ char *oi_jobid;
};
/* compare all relevant fields. */
return memcmp(&m1->lsm_wire, &m2->lsm_wire, sizeof m1->lsm_wire);
}
+static inline int lov_lum_lsm_cmp(struct lov_user_md *lum,
+ struct lov_stripe_md *lsm)
+{
+ if (lsm->lsm_magic != lum->lmm_magic)
+ return 1;
+ if ((lsm->lsm_stripe_count != 0) && (lum->lmm_stripe_count != 0) &&
+ (lsm->lsm_stripe_count != lum->lmm_stripe_count))
+ return 2;
+ if ((lsm->lsm_stripe_size != 0) && (lum->lmm_stripe_size != 0) &&
+ (lsm->lsm_stripe_size != lum->lmm_stripe_size))
+ return 3;
+ if ((lsm->lsm_pattern != 0) && (lum->lmm_pattern != 0) &&
+ (lsm->lsm_pattern != lum->lmm_pattern))
+ return 4;
+ if ((lsm->lsm_magic == LOV_MAGIC_V3) &&
+ (strncmp(lsm->lsm_pool_name,
+ ((struct lov_user_md_v3 *)lum)->lmm_pool_name,
+ LOV_MAXPOOLNAME) != 0))
+ return 5;
+ return 0;
+}
+
+static inline int lov_lum_swab_if_needed(struct lov_user_md_v3 *lumv3,
+ int *lmm_magic,
+ struct lov_user_md *lum)
+{
+ if (lum && cfs_copy_from_user(lumv3, lum,sizeof(struct lov_user_md_v1)))
+ return -EFAULT;
+
+ *lmm_magic = lumv3->lmm_magic;
+
+ if (*lmm_magic == __swab32(LOV_USER_MAGIC_V1)) {
+ lustre_swab_lov_user_md_v1((struct lov_user_md_v1 *)lumv3);
+ *lmm_magic = LOV_USER_MAGIC_V1;
+ } else if (*lmm_magic == LOV_USER_MAGIC_V3) {
+ if (lum && cfs_copy_from_user(lumv3, lum, sizeof(*lumv3)))
+ return -EFAULT;
+ } else if (*lmm_magic == __swab32(LOV_USER_MAGIC_V3)) {
+ if (lum && cfs_copy_from_user(lumv3, lum, sizeof(*lumv3)))
+ return -EFAULT;
+ lustre_swab_lov_user_md_v3(lumv3);
+ *lmm_magic = LOV_USER_MAGIC_V3;
+ } else if (*lmm_magic != LOV_USER_MAGIC_V1) {
+ CDEBUG(D_IOCTL,
+ "bad userland LOV MAGIC: %#08x != %#08x nor %#08x\n",
+ *lmm_magic, LOV_USER_MAGIC_V1, LOV_USER_MAGIC_V3);
+ return -EINVAL;
+ }
+ return 0;
+}
+
void lov_stripe_lock(struct lov_stripe_md *md);
void lov_stripe_unlock(struct lov_stripe_md *md);
char *typ_name;
int typ_refcnt;
struct lu_device_type *typ_lu;
- cfs_spinlock_t obd_type_lock;
+ spinlock_t obd_type_lock;
};
struct brw_page {
struct ost_server_data;
+struct osd_properties {
+ size_t osd_max_ea_size;
+};
+
+#define OBT_MAGIC 0xBDDECEAE
/* hold common fields for "target" device */
struct obd_device_target {
+ __u32 obt_magic;
+ __u32 obt_instance;
struct super_block *obt_sb;
/** last_rcvd file */
struct file *obt_rcvd_filp;
+#ifdef HAVE_SERVER_SUPPORT
struct lu_target *obt_lut;
+#endif
__u64 obt_mount_count;
- cfs_semaphore_t obt_quotachecking;
- struct lustre_quota_ctxt obt_qctxt;
- lustre_quota_version_t obt_qfmt;
- cfs_rw_semaphore_t obt_rwsem;
+ struct rw_semaphore obt_rwsem;
struct vfsmount *obt_vfsmnt;
struct file *obt_health_check_filp;
+ struct osd_properties obt_osd_properties;
+ struct obd_job_stats obt_jobstats;
};
/* llog contexts */
cfs_dentry_t *fo_dentry_O;
cfs_dentry_t **fo_dentry_O_groups;
struct filter_subdirs *fo_dentry_O_sub;
- cfs_semaphore_t fo_init_lock; /* group initialization lock */
- int fo_committed_group;
+ struct mutex fo_init_lock; /* group initialization lock*/
+ int fo_committed_group;
- cfs_spinlock_t fo_objidlock; /* protect fo_lastobjid */
+ spinlock_t fo_objidlock; /* protect fo_lastobjid */
- unsigned long fo_destroys_in_progress;
- cfs_semaphore_t fo_create_locks[FILTER_SUBDIR_COUNT];
+ unsigned long fo_destroys_in_progress;
+ struct mutex fo_create_locks[FILTER_SUBDIR_COUNT];
cfs_list_t fo_export_list;
int fo_subdir_count;
int fo_tot_granted_clients;
obd_size fo_readcache_max_filesize;
- int fo_read_cache:1, /**< enable read-only cache */
+ spinlock_t fo_flags_lock;
+ unsigned int fo_read_cache:1, /**< enable read-only cache */
fo_writethrough_cache:1,/**< read cache writes */
fo_mds_ost_sync:1, /**< MDS-OST orphan recovery*/
fo_raid_degraded:1;/**< RAID device degraded */
__u64 *fo_last_objids; /* last created objid for groups,
* protected by fo_objidlock */
- cfs_semaphore_t fo_alloc_lock;
+ struct mutex fo_alloc_lock;
cfs_atomic_t fo_r_in_flight;
cfs_atomic_t fo_w_in_flight;
- /*
- * per-filter pool of kiobuf's allocated by filter_common_setup() and
- * torn down by filter_cleanup(). Contains OST_NUM_THREADS elements of
- * which ->fo_iobuf_count were allocated.
- *
- * This pool contains kiobuf used by
- * filter_{prep,commit}rw_{read,write}() and is shared by all OST
- * threads.
- *
- * Locking: none, each OST thread uses only one element, determined by
- * its "ordinal number", ->t_id.
- */
- struct filter_iobuf **fo_iobuf_pool;
- int fo_iobuf_count;
-
- cfs_list_t fo_llog_list;
- cfs_spinlock_t fo_llog_list_lock;
+ /*
+ * per-filter pool of kiobuf's allocated by filter_common_setup() and
+ * torn down by filter_cleanup().
+ *
+ * This pool contains kiobuf used by
+ * filter_{prep,commit}rw_{read,write}() and is shared by all OST
+ * threads.
+ *
+ * Locking: protected by internal lock of cfs_hash, pool can be
+ * found from this hash table by t_id of ptlrpc_thread.
+ */
+ struct cfs_hash *fo_iobuf_hash;
+
+ cfs_list_t fo_llog_list;
+ spinlock_t fo_llog_list_lock;
struct brw_stats fo_filter_stats;
- struct lustre_quota_ctxt fo_quota_ctxt;
- cfs_spinlock_t fo_quotacheck_lock;
- cfs_atomic_t fo_quotachecking;
int fo_fmd_max_num; /* per exp filter_mod_data */
int fo_fmd_max_age; /* jiffies to fmd expiry */
+ unsigned long fo_syncjournal:1, /* sync journal on writes */
+ fo_sync_lock_cancel:2;/* sync on lock cancel */
+
/* sptlrpc stuff */
- cfs_rwlock_t fo_sptlrpc_lock;
+ rwlock_t fo_sptlrpc_lock;
struct sptlrpc_rule_set fo_sptlrpc_rset;
/* capability related */
};
#define OSC_MAX_RIF_DEFAULT 8
+#define MDS_OSC_MAX_RIF_DEFAULT 50
#define OSC_MAX_RIF_MAX 256
#define OSC_MAX_DIRTY_DEFAULT (OSC_MAX_RIF_DEFAULT * 4)
#define OSC_MAX_DIRTY_MB_MAX 2048 /* arbitrary, but < MAX_LONG bytes */
#define OSC_DEFAULT_RESENDS 10
+/* possible values for fo_sync_lock_cancel */
+enum {
+ NEVER_SYNC_ON_CANCEL = 0,
+ BLOCKING_SYNC_ON_CANCEL = 1,
+ ALWAYS_SYNC_ON_CANCEL = 2,
+ NUM_SYNC_ON_CANCEL_STATES
+};
+
#define MDC_MAX_RIF_DEFAULT 8
#define MDC_MAX_RIF_MAX 512
struct mdc_rpc_lock;
struct obd_import;
struct client_obd {
- cfs_rw_semaphore_t cl_sem;
+ struct rw_semaphore cl_sem;
struct obd_uuid cl_target_uuid;
struct obd_import *cl_import; /* ptlrpc connection state */
int cl_conn_count;
long cl_dirty_transit; /* dirty synchronous */
long cl_avail_grant; /* bytes of credit for ost */
long cl_lost_grant; /* lost credits (trunc) */
- cfs_list_t cl_cache_waiters; /* waiting for cache/grant */
- cfs_time_t cl_next_shrink_grant; /* jiffies */
- cfs_list_t cl_grant_shrink_list; /* Timeout event list */
- cfs_semaphore_t cl_grant_sem; /*grant shrink list cfs_semaphore*/
- int cl_grant_shrink_interval; /* seconds */
+
+ /* since we allocate grant by blocks, we don't know how many grant will
+ * be used to add a page into cache. As a solution, we reserve maximum
+ * grant before trying to dirty a page and unreserve the rest.
+ * See osc_{reserve|unreserve}_grant for details. */
+ long cl_reserved_grant;
+ cfs_list_t cl_cache_waiters; /* waiting for cache/grant */
+ cfs_time_t cl_next_shrink_grant; /* jiffies */
+ cfs_list_t cl_grant_shrink_list; /* Timeout event list */
+ int cl_grant_shrink_interval; /* seconds */
+
+ /* A chunk is an optimal size used by osc_extent to determine
+ * the extent size. A chunk is max(CFS_PAGE_SIZE, OST block size) */
+ int cl_chunkbits;
+ int cl_chunk;
+ int cl_extent_tax; /* extent overhead, by bytes */
/* keep track of objects that have lois that contain pages which
* have been queued for async brw. this lock also protects the
* Exact type of ->cl_loi_list_lock is defined in arch/obd.h together
* with client_obd_list_{un,}lock() and
* client_obd_list_lock_{init,done}() functions.
- */
+ *
+ * NB by Jinshan: though field names are still _loi_, but actually
+ * osc_object{}s are in the list.
+ */
client_obd_lock_t cl_loi_list_lock;
cfs_list_t cl_loi_ready_list;
cfs_list_t cl_loi_hp_ready_list;
int cl_r_in_flight;
int cl_w_in_flight;
/* just a sum of the loi/lop pending numbers to be exported by /proc */
- int cl_pending_w_pages;
- int cl_pending_r_pages;
- int cl_max_pages_per_rpc;
+ cfs_atomic_t cl_pending_w_pages;
+ cfs_atomic_t cl_pending_r_pages;
+ int cl_max_pages_per_rpc;
int cl_max_rpcs_in_flight;
struct obd_histogram cl_read_rpc_hist;
struct obd_histogram cl_write_rpc_hist;
struct obd_histogram cl_read_offset_hist;
struct obd_histogram cl_write_offset_hist;
+ /* lru for osc caching pages */
+ struct cl_client_cache *cl_cache;
+ cfs_list_t cl_lru_osc; /* member of cl_cache->ccc_lru */
+ cfs_atomic_t *cl_lru_left;
+ cfs_atomic_t cl_lru_busy;
+ cfs_atomic_t cl_lru_shrinkers;
+ cfs_atomic_t cl_lru_in_list;
+ cfs_list_t cl_lru_list; /* lru page list */
+ client_obd_lock_t cl_lru_list_lock; /* page list protector */
+
/* number of in flight destroy rpcs is limited to max_rpcs_in_flight */
cfs_atomic_t cl_destroy_in_flight;
cfs_waitq_t cl_destroy_waitq;
struct mdc_rpc_lock *cl_rpc_lock;
- struct mdc_rpc_lock *cl_setattr_lock;
struct mdc_rpc_lock *cl_close_lock;
- struct osc_creator cl_oscc;
/* mgc datastruct */
- cfs_semaphore_t cl_mgc_sem;
+ struct semaphore cl_mgc_sem;
struct vfsmount *cl_mgc_vfsmnt;
struct dentry *cl_mgc_configs_dir;
cfs_atomic_t cl_mgc_refcount;
/* also protected by the poorly named _loi_list_lock lock above */
struct osc_async_rc cl_ar;
- /* used by quotacheck */
- int cl_qchk_stat; /* quotacheck stat of the peer */
+ /* used by quotacheck when the servers are older than 2.4 */
+ int cl_qchk_stat; /* quotacheck stat of the peer */
+#define CL_NOT_QUOTACHECKED 1 /* client->cl_qchk_stat init value */
+#if LUSTRE_VERSION_CODE >= OBD_OCD_VERSION(2, 7, 50, 0)
+#warning "please consider removing quotacheck compatibility code"
+#endif
/* sequence manager */
struct lu_client_seq *cl_seq;
cfs_atomic_t cl_resends; /* resend count */
-};
-#define obd2cli_tgt(obd) ((char *)(obd)->u.cli.cl_target_uuid.uuid)
-
-#define CL_NOT_QUOTACHECKED 1 /* client->cl_qchk_stat init value */
-struct mgs_obd {
- struct ptlrpc_service *mgs_service;
- struct vfsmount *mgs_vfsmnt;
- struct super_block *mgs_sb;
- struct dentry *mgs_configs_dir;
- struct dentry *mgs_fid_de;
- cfs_list_t mgs_fs_db_list;
- cfs_semaphore_t mgs_sem;
- cfs_proc_dir_entry_t *mgs_proc_live;
+ /* ptlrpc work for writeback in ptlrpcd context */
+ void *cl_writeback_work;
+ /* hash tables for osc_quota_info */
+ cfs_hash_t *cl_quota_hash[MAXQUOTAS];
};
-
-struct mds_obd {
- /* NB this field MUST be first */
- struct obd_device_target mds_obt;
- struct ptlrpc_service *mds_service;
- struct ptlrpc_service *mds_setattr_service;
- struct ptlrpc_service *mds_readpage_service;
- cfs_dentry_t *mds_fid_de;
- int mds_max_mdsize;
- int mds_max_cookiesize;
- __u64 mds_io_epoch;
- unsigned long mds_atime_diff;
- cfs_semaphore_t mds_epoch_sem;
- struct ll_fid mds_rootfid;
- cfs_dentry_t *mds_pending_dir;
- cfs_dentry_t *mds_logs_dir;
- cfs_dentry_t *mds_objects_dir;
- struct llog_handle *mds_cfg_llh;
- struct obd_device *mds_lov_obd;
- struct obd_uuid mds_lov_uuid;
- char *mds_profile;
- struct obd_export *mds_lov_exp;
- struct lov_desc mds_lov_desc;
- __u32 mds_id;
-
- /* mark pages dirty for write. */
- cfs_bitmap_t *mds_lov_page_dirty;
- /* array for store pages with obd_id */
- void **mds_lov_page_array;
- /* file for store objid */
- struct file *mds_lov_objid_filp;
- __u32 mds_lov_objid_count;
- __u32 mds_lov_objid_max_index;
- __u32 mds_lov_objid_lastpage;
- __u32 mds_lov_objid_lastidx;
-
-
- struct lustre_quota_info mds_quota_info;
- cfs_rw_semaphore_t mds_qonoff_sem;
- cfs_semaphore_t mds_health_sem;
- unsigned long mds_fl_user_xattr:1,
- mds_fl_acl:1,
- mds_evict_ost_nids:1,
- mds_fl_cfglog:1,
- mds_fl_synced:1,
- mds_quota:1,
- mds_fl_target:1; /* mds have one or
- * more targets */
-
- struct upcall_cache *mds_identity_cache;
-
- /* for capability keys update */
- struct lustre_capa_key *mds_capa_keys;
- cfs_rw_semaphore_t mds_notify_lock;
-};
-
-/* lov objid */
-extern __u32 mds_max_ost_index;
-
-#define MDS_LOV_ALLOC_SIZE (CFS_PAGE_SIZE)
-
-#define OBJID_PER_PAGE() (MDS_LOV_ALLOC_SIZE / sizeof(obd_id))
-
-#define MDS_LOV_OBJID_PAGES_COUNT (mds_max_ost_index/OBJID_PER_PAGE())
-
-extern int mds_lov_init_objids(struct obd_device *obd);
-extern void mds_lov_destroy_objids(struct obd_device *obd);
+#define obd2cli_tgt(obd) ((char *)(obd)->u.cli.cl_target_uuid.uuid)
struct obd_id_info {
__u32 idx;
/* */
struct echo_obd {
- struct obdo eo_oa;
- cfs_spinlock_t eo_lock;
- __u64 eo_lastino;
- struct lustre_handle eo_nl_lock;
- cfs_atomic_t eo_prep;
+ struct obd_device_target eo_obt;
+ struct obdo eo_oa;
+ spinlock_t eo_lock;
+ __u64 eo_lastino;
+ struct lustre_handle eo_nl_lock;
+ cfs_atomic_t eo_prep;
};
struct ost_obd {
- struct ptlrpc_service *ost_service;
- struct ptlrpc_service *ost_create_service;
- struct ptlrpc_service *ost_io_service;
- cfs_semaphore_t ost_health_sem;
+ struct ptlrpc_service *ost_service;
+ struct ptlrpc_service *ost_create_service;
+ struct ptlrpc_service *ost_io_service;
+ struct ptlrpc_service *ost_seq_service;
+ struct mutex ost_health_mutex;
};
struct echo_client_obd {
- struct obd_export *ec_exp; /* the local connection to osc/lov */
- cfs_spinlock_t ec_lock;
+ struct obd_export *ec_exp; /* the local connection to osc/lov */
+ spinlock_t ec_lock;
cfs_list_t ec_objects;
cfs_list_t ec_locks;
int ec_nstripes;
lov_obd->lov_tgts */
unsigned int op_count; /* number of OSTs in the array */
unsigned int op_size; /* allocated size of lp_array */
- cfs_rw_semaphore_t op_rw_sem; /* to protect ost_pool use */
+ struct rw_semaphore op_rw_sem; /* to protect ost_pool use */
};
/* Round-robin allocator data */
unsigned long lqr_dirty:1; /* recalc round-robin list */
};
+/* allow statfs data caching for 1 second */
+#define OBD_STATFS_CACHE_SECONDS 1
+
struct lov_statfs_data {
struct obd_info lsd_oi;
struct obd_statfs lsd_statfs;
/* Stripe placement optimization */
struct lov_qos {
cfs_list_t lq_oss_list; /* list of OSSs that targets use */
- cfs_rw_semaphore_t lq_rw_sem;
+ struct rw_semaphore lq_rw_sem;
__u32 lq_active_oss_count;
unsigned int lq_prio_free; /* priority for free space */
unsigned int lq_threshold_rr;/* priority for rr */
#define pool_tgt_count(_p) _p->pool_obds.op_count
#define pool_tgt_array(_p) _p->pool_obds.op_array
#define pool_tgt_rw_sem(_p) _p->pool_obds.op_rw_sem
-#define pool_tgt(_p, _i) _p->pool_lov->lov_tgts[_p->pool_obds.op_array[_i]]
struct pool_desc {
char pool_name[LOV_MAXPOOLNAME + 1]; /* name of pool */
cfs_hlist_node_t pool_hash; /* access by poolname */
cfs_list_t pool_list; /* serial access */
cfs_proc_dir_entry_t *pool_proc_entry; /* file in /proc */
- struct lov_obd *pool_lov; /* lov obd to which this
- pool belong */
+ struct obd_device *pool_lobd; /* obd of the lov/lod to which
+ * this pool belongs */
};
struct lov_obd {
struct lov_tgt_desc **lov_tgts; /* sparse array */
struct ost_pool lov_packed; /* all OSTs in a packed
array */
- cfs_semaphore_t lov_lock;
+ struct mutex lov_lock;
struct obd_connect_data lov_ocd;
- struct lov_qos lov_qos; /* qos info per lov */
cfs_atomic_t lov_refcount;
__u32 lov_tgt_count; /* how many OBD's */
__u32 lov_active_tgt_count; /* how many active */
cfs_list_t lov_pool_list; /* used for sequential access */
cfs_proc_dir_entry_t *lov_pool_proc_entry;
enum lustre_sec_part lov_sp_me;
+
+ /* Cached LRU pages from upper layer */
+ void *lov_cache;
};
struct lmv_tgt_desc {
struct obd_export *ltd_exp;
int ltd_active; /* is this target up for requests */
int ltd_idx;
- cfs_semaphore_t ltd_fid_sem;
+ struct mutex ltd_fid_mutex;
};
enum placement_policy {
typedef enum placement_policy placement_policy_t;
struct lmv_obd {
- int refcount;
- struct lu_client_fld lmv_fld;
- cfs_spinlock_t lmv_lock;
+ int refcount;
+ struct lu_client_fld lmv_fld;
+ spinlock_t lmv_lock;
placement_policy_t lmv_placement;
struct lmv_desc desc;
struct obd_uuid cluuid;
int max_def_easize;
int max_cookiesize;
int server_timeout;
- cfs_semaphore_t init_sem;
+ struct mutex init_mutex;
struct lmv_tgt_desc *tgts;
int tgts_size;
};
struct niobuf_local {
- __u64 offset;
+ __u64 lnb_file_offset;
+ __u32 lnb_page_offset;
__u32 len;
__u32 flags;
cfs_page_t *page;
#define LUSTRE_CMM_NAME "cmm"
#define LUSTRE_MDD_NAME "mdd"
-#define LUSTRE_OSD_NAME "osd"
+#define LUSTRE_OSD_LDISKFS_NAME "osd-ldiskfs"
+#define LUSTRE_OSD_ZFS_NAME "osd-zfs"
#define LUSTRE_VVP_NAME "vvp"
#define LUSTRE_LMV_NAME "lmv"
#define LUSTRE_CMM_MDC_NAME "cmm-mdc"
#define LUSTRE_SLP_NAME "slp"
+#define LUSTRE_LOD_NAME "lod"
+#define LUSTRE_OSP_NAME "osp"
/* obd device type names */
/* FIXME all the references to LUSTRE_MDS_NAME should be swapped with LUSTRE_MDT_NAME */
#define LUSTRE_CACHEOBD_NAME "cobd"
#define LUSTRE_ECHO_NAME "obdecho"
#define LUSTRE_ECHO_CLIENT_NAME "echo_client"
+#define LUSTRE_QMT_NAME "qmt"
/* Constant obd names (post-rename) */
#define LUSTRE_MDS_OBDNAME "MDS"
/* Don't conflict with on-wire flags OBD_BRW_WRITE, etc */
#define N_LOCAL_TEMP_PAGE 0x10000000
+static inline int is_osp_on_ost(char *name)
+{
+ char *ptr;
+
+ ptr = strrchr(name, '-');
+ if (ptr == NULL) {
+ CERROR("%s is not a obdname\n", name);
+ return 0;
+ }
+
+ if (strncmp(ptr + 1, "OST", 3) != 0 && strncmp(ptr + 1, "MDT", 3) != 0)
+ return 0;
+
+ /* match the "-osp" */
+ if (ptr - name < strlen(LUSTRE_OSP_NAME) + 1)
+ return 0;
+
+ ptr -= (strlen(LUSTRE_OSP_NAME) + 1);
+ if (*ptr != '-')
+ return 0;
+
+ if (strncmp(ptr + 1, LUSTRE_OSP_NAME, strlen(LUSTRE_OSP_NAME)) != 0)
+ return 0;
+
+ return 1;
+}
+
struct obd_trans_info {
__u64 oti_transno;
__u64 oti_xid;
struct llog_cookie oti_onecookie;
struct llog_cookie *oti_logcookies;
int oti_numcookies;
+ /** synchronous write is needed */
+ long oti_sync_write:1;
/* initial thread handling transaction */
struct ptlrpc_thread * oti_thread;
__u32 oti_conn_cnt;
/** VBR: versions */
__u64 oti_pre_version;
+ /** JobID */
+ char *oti_jobid;
struct obd_uuid *oti_ost_uuid;
};
if (num_cookies == 1)
oti->oti_logcookies = &oti->oti_onecookie;
else
- OBD_ALLOC(oti->oti_logcookies,
- num_cookies * sizeof(oti->oti_onecookie));
+ OBD_ALLOC_LARGE(oti->oti_logcookies,
+ num_cookies * sizeof(oti->oti_onecookie));
oti->oti_numcookies = num_cookies;
}
if (oti->oti_logcookies == &oti->oti_onecookie)
LASSERT(oti->oti_numcookies == 1);
else
- OBD_FREE(oti->oti_logcookies,
- oti->oti_numcookies * sizeof(oti->oti_onecookie));
+ OBD_FREE_LARGE(oti->oti_logcookies,
+ oti->oti_numcookies*sizeof(oti->oti_onecookie));
oti->oti_logcookies = NULL;
oti->oti_numcookies = 0;
}
* Events signalled through obd_notify() upcall-chain.
*/
enum obd_notify_event {
+ /* target added */
+ OBD_NOTIFY_CREATE,
/* Device connect start */
OBD_NOTIFY_CONNECT,
/* Device activated */
OBD_NOTIFY_SYNC,
/* Configuration event */
OBD_NOTIFY_CONFIG,
- /* Trigger quota recovery */
- OBD_NOTIFY_QUOTA
+ /* Administratively deactivate/activate event */
+ OBD_NOTIFY_DEACTIVATE,
+ OBD_NOTIFY_ACTIVATE
};
/* bit-mask flags for config events */
};
struct target_recovery_data {
- svc_handler_t trd_recovery_handler;
- pid_t trd_processing_task;
- cfs_completion_t trd_starting;
- cfs_completion_t trd_finishing;
+ svc_handler_t trd_recovery_handler;
+ pid_t trd_processing_task;
+ struct completion trd_starting;
+ struct completion trd_finishing;
};
/**
int olg_seq;
struct llog_ctxt *olg_ctxts[LLOG_MAX_CTXTS];
cfs_waitq_t olg_waitq;
- cfs_spinlock_t olg_lock;
- struct obd_export *olg_exp;
- int olg_initializing;
- cfs_semaphore_t olg_cat_processing;
+ spinlock_t olg_lock;
+ struct obd_export *olg_exp;
+ int olg_initializing;
+ struct mutex olg_cat_processing;
};
/* corresponds to one of the obd's */
-#define MAX_OBD_NAME 128
#define OBD_DEVICE_MAGIC 0XAB5CD6EF
#define OBD_DEV_BY_DEVNAME 0xffffd0de
+
struct obd_device {
struct obd_type *obd_type;
__u32 obd_magic;
struct lu_device *obd_lu_dev;
int obd_minor;
+ /* bitfield modification is protected by obd_dev_lock */
unsigned long obd_attached:1, /* finished attach */
obd_set_up:1, /* finished setup */
obd_recovering:1, /* there are recoverable clients */
obd_abort_recovery:1,/* recovery expired */
obd_version_recov:1, /* obd uses version checking */
- obd_recovery_expired:1,
obd_replayable:1, /* recovery is enabled; inform clients */
obd_no_transno:1, /* no committed-transno notification */
obd_no_recov:1, /* fail instead of retry messages */
- obd_req_replaying:1, /* replaying requests */
obd_stopping:1, /* started cleanup */
obd_starting:1, /* started setup */
obd_force:1, /* cleanup with > 0 obd refcount */
obd_no_conn:1, /* deny new connections */
obd_inactive:1, /* device active/inactive
* (for /proc/status only!!) */
+ obd_no_ir:1, /* no imperative recovery. */
obd_process_conf:1; /* device is processing mgs config */
+ /* use separate field as it is set in interrupt to don't mess with
+ * protection of other bits using _bh lock */
+ unsigned long obd_recovery_expired:1;
/* uuid-export hash body */
cfs_hash_t *obd_uuid_hash;
/* nid-export hash body */
cfs_list_t obd_unlinked_exports;
cfs_list_t obd_delayed_exports;
int obd_num_exports;
- cfs_spinlock_t obd_nid_lock;
- struct ldlm_namespace *obd_namespace;
- struct ptlrpc_client obd_ldlm_client; /* XXX OST/MDS only */
- /* a spinlock is OK for what we do now, may need a semaphore later */
- cfs_spinlock_t obd_dev_lock;
- cfs_semaphore_t obd_dev_sem;
- __u64 obd_last_committed;
- struct fsfilt_operations *obd_fsops;
- cfs_spinlock_t obd_osfs_lock;
- struct obd_statfs obd_osfs; /* locked by obd_osfs_lock */
- __u64 obd_osfs_age;
- struct lvfs_run_ctxt obd_lvfs_ctxt;
- struct obd_llog_group obd_olg; /* default llog group */
- struct obd_device *obd_observer;
- cfs_rw_semaphore_t obd_observer_link_sem;
+ spinlock_t obd_nid_lock;
+ struct ldlm_namespace *obd_namespace;
+ struct ptlrpc_client obd_ldlm_client; /* XXX OST/MDS only */
+ /* a spinlock is OK for what we do now, may need a semaphore later */
+ spinlock_t obd_dev_lock; /* protect OBD bitfield above */
+ struct mutex obd_dev_mutex;
+ __u64 obd_last_committed;
+ struct fsfilt_operations *obd_fsops;
+ spinlock_t obd_osfs_lock;
+ struct obd_statfs obd_osfs; /* locked by obd_osfs_lock */
+ __u64 obd_osfs_age;
+ struct lvfs_run_ctxt obd_lvfs_ctxt;
+ struct obd_llog_group obd_olg; /* default llog group */
+ struct obd_device *obd_observer;
+ struct rw_semaphore obd_observer_link_sem;
struct obd_notify_upcall obd_upcall;
struct obd_export *obd_self_export;
/* list of exports in LRU order, for ping evictor, with obd_dev_lock */
time_t obd_eviction_timer; /* for ping evictor */
int obd_max_recoverable_clients;
- int obd_connected_clients;
+ cfs_atomic_t obd_connected_clients;
int obd_stale_clients;
int obd_delayed_clients;
- cfs_spinlock_t obd_processing_task_lock; /* BH lock (timer) */
+ /* this lock protects all recovery list_heads, timer and
+ * obd_next_recovery_transno value */
+ spinlock_t obd_recovery_task_lock;
__u64 obd_next_recovery_transno;
int obd_replayed_requests;
int obd_requests_queued_for_recovery;
cfs_waitq_t obd_next_transno_waitq;
+ /* protected by obd_recovery_task_lock */
cfs_timer_t obd_recovery_timer;
time_t obd_recovery_start; /* seconds */
time_t obd_recovery_end; /* seconds, for lprocfs_status */
- time_t obd_recovery_time_hard;
+ int obd_recovery_time_hard;
int obd_recovery_timeout;
+ int obd_recovery_ir_factor;
/* new recovery stuff from CMD2 */
struct target_recovery_data obd_recovery_data;
int obd_replayed_locks;
cfs_atomic_t obd_req_replay_clients;
cfs_atomic_t obd_lock_replay_clients;
+ /* all lists are protected by obd_recovery_task_lock */
cfs_list_t obd_req_replay_queue;
cfs_list_t obd_lock_replay_queue;
cfs_list_t obd_final_req_queue;
union {
struct obd_device_target obt;
struct filter_obd filter;
- struct mds_obd mds;
struct client_obd cli;
struct ost_obd ost;
struct echo_client_obd echo_client;
struct echo_obd echo;
struct lov_obd lov;
struct lmv_obd lmv;
- struct mgs_obd mgs;
} u;
/* Fields used by LProcFS */
unsigned int obd_cntr_base;
/**
* Ldlm pool part. Save last calculated SLV and Limit.
*/
- cfs_rwlock_t obd_pool_lock;
+ rwlock_t obd_pool_lock;
int obd_pool_limit;
__u64 obd_pool_slv;
* debugging.
*/
struct lu_ref obd_reference;
+
+ int obd_conn_inprogress;
};
#define OBD_LLOG_FL_SENDNOW 0x0001
+#define OBD_LLOG_FL_EXIT 0x0002
enum obd_cleanup_stage {
/* Special case hack for MDS LOVs */
#define KEY_NEXT_ID "next_id"
#define KEY_READ_ONLY "read-only"
#define KEY_REGISTER_TARGET "register_target"
-#define KEY_REVIMP_UPD "revimp_update"
#define KEY_SET_FS "set_fs"
+#define KEY_TGT_COUNT "tgt_count"
/* KEY_SET_INFO in lustre_idl.h */
#define KEY_SPTLRPC_CONF "sptlrpc_conf"
#define KEY_CONNECT_FLAG "connect_flags"
+#define KEY_SYNC_LOCK_CANCEL "sync_lock_cancel"
+#define KEY_CACHE_SET "cache_set"
+#define KEY_CACHE_LRU_SHRINK "cache_lru_shrink"
struct lu_context;
+/* /!\ must be coherent with include/linux/namei.h on patched kernel */
+#define IT_OPEN (1 << 0)
+#define IT_CREAT (1 << 1)
+#define IT_READDIR (1 << 2)
+#define IT_GETATTR (1 << 3)
+#define IT_LOOKUP (1 << 4)
+#define IT_UNLINK (1 << 5)
+#define IT_TRUNC (1 << 6)
+#define IT_GETXATTR (1 << 7)
+#define IT_EXEC (1 << 8)
+#define IT_PIN (1 << 9)
+#define IT_LAYOUT (1 << 10)
+#define IT_QUOTA_DQACQ (1 << 11)
+#define IT_QUOTA_CONN (1 << 12)
+
static inline int it_to_lock_mode(struct lookup_intent *it)
{
/* CREAT needs to be tested before open (both could be set) */
if (it->it_op & IT_CREAT)
return LCK_CW;
- else if (it->it_op & (IT_READDIR | IT_GETATTR | IT_OPEN | IT_LOOKUP))
+ else if (it->it_op & (IT_READDIR | IT_GETATTR | IT_OPEN | IT_LOOKUP |
+ IT_LAYOUT))
return LCK_CR;
LASSERTF(0, "Invalid it_op: %d\n", it->it_op);
struct lu_fid op_fid4; /* to the operation locks. */
mdsno_t op_mds; /* what mds server open will go to */
struct lustre_handle op_handle;
- __u64 op_mod_time;
+ obd_time op_mod_time;
const char *op_name;
int op_namelen;
__u32 op_mode;
/* iattr fields and blocks. */
struct iattr op_attr;
#ifdef __KERNEL__
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)
- unsigned int op_attr_flags;
-#endif
+ unsigned int op_attr_flags;
#endif
__u64 op_valid;
loff_t op_attr_blocks;
/* Operation type */
__u32 op_opc;
+
+ /* Used by readdir */
+ __u32 op_npages;
+ __u64 op_offset;
};
struct md_enqueue_info;
struct md_enqueue_info *minfo,
int rc);
+/* seq client type */
+enum lu_cli_type {
+ LUSTRE_SEQ_METADATA = 1,
+ LUSTRE_SEQ_DATA
+};
+
struct md_enqueue_info {
struct md_op_data mi_data;
struct lookup_intent mi_it;
struct lustre_handle mi_lockh;
- struct dentry *mi_dentry;
struct inode *mi_dir;
md_enqueue_cb_t mi_cb;
+ __u64 mi_cbdata;
unsigned int mi_generation;
- void *mi_cbdata;
};
struct obd_ops {
cfs_module_t *o_owner;
int (*o_iocontrol)(unsigned int cmd, struct obd_export *exp, int len,
void *karg, void *uarg);
- int (*o_get_info)(struct obd_export *, __u32 keylen, void *key,
- __u32 *vallen, void *val, struct lov_stripe_md *lsm);
- int (*o_set_info_async)(struct obd_export *, __u32 keylen, void *key,
+ int (*o_get_info)(const struct lu_env *env, struct obd_export *,
+ __u32 keylen, void *key, __u32 *vallen, void *val,
+ struct lov_stripe_md *lsm);
+ int (*o_set_info_async)(const struct lu_env *, struct obd_export *,
+ __u32 keylen, void *key,
__u32 vallen, void *val,
struct ptlrpc_request_set *set);
int (*o_attach)(struct obd_device *dev, obd_count len, void *data);
int (*o_disconnect)(struct obd_export *exp);
/* Initialize/finalize fids infrastructure. */
- int (*o_fid_init)(struct obd_export *exp);
+ int (*o_fid_init)(struct obd_export *exp, enum lu_cli_type type);
int (*o_fid_fini)(struct obd_export *exp);
/* Allocate new fid according to passed @hint. */
* Object with @fid is getting deleted, we may want to do something
* about this.
*/
- int (*o_fid_delete)(struct obd_export *exp, const struct lu_fid *fid);
-
- int (*o_statfs)(struct obd_device *obd, struct obd_statfs *osfs,
- __u64 max_age, __u32 flags);
- int (*o_statfs_async)(struct obd_device *obd, struct obd_info *oinfo,
+ int (*o_statfs)(const struct lu_env *, struct obd_export *exp,
+ struct obd_statfs *osfs, __u64 max_age, __u32 flags);
+ int (*o_statfs_async)(struct obd_export *exp, struct obd_info *oinfo,
__u64 max_age, struct ptlrpc_request_set *set);
int (*o_packmd)(struct obd_export *exp, struct lov_mds_md **disk_tgt,
struct lov_stripe_md *mem_src);
obd_id *ids);
/* FIXME: add fid capability support for create & destroy! */
int (*o_precreate)(struct obd_export *exp);
- int (*o_create)(struct obd_export *exp, struct obdo *oa,
- struct lov_stripe_md **ea, struct obd_trans_info *oti);
+ int (*o_create)(const struct lu_env *env, struct obd_export *exp,
+ struct obdo *oa, struct lov_stripe_md **ea,
+ struct obd_trans_info *oti);
int (*o_create_async)(struct obd_export *exp, struct obd_info *oinfo,
struct lov_stripe_md **ea,
struct obd_trans_info *oti);
- int (*o_destroy)(struct obd_export *exp, struct obdo *oa,
- struct lov_stripe_md *ea, struct obd_trans_info *oti,
- struct obd_export *md_exp, void *capa);
- int (*o_setattr)(struct obd_export *exp, struct obd_info *oinfo,
- struct obd_trans_info *oti);
+ int (*o_destroy)(const struct lu_env *env, struct obd_export *exp,
+ struct obdo *oa, struct lov_stripe_md *ea,
+ struct obd_trans_info *oti, struct obd_export *md_exp,
+ void *capa);
+ int (*o_setattr)(const struct lu_env *, struct obd_export *exp,
+ struct obd_info *oinfo, struct obd_trans_info *oti);
int (*o_setattr_async)(struct obd_export *exp, struct obd_info *oinfo,
struct obd_trans_info *oti,
struct ptlrpc_request_set *rqset);
- int (*o_getattr)(struct obd_export *exp, struct obd_info *oinfo);
+ int (*o_getattr)(const struct lu_env *env, struct obd_export *exp,
+ struct obd_info *oinfo);
int (*o_getattr_async)(struct obd_export *exp, struct obd_info *oinfo,
struct ptlrpc_request_set *set);
int (*o_brw)(int rw, struct obd_export *exp, struct obd_info *oinfo,
struct ost_lvb *lvb, int kms_only);
int (*o_adjust_kms)(struct obd_export *exp, struct lov_stripe_md *lsm,
obd_off size, int shrink);
- int (*o_punch)(struct obd_export *exp, struct obd_info *oinfo,
- struct obd_trans_info *oti,
+ int (*o_punch)(const struct lu_env *, struct obd_export *exp,
+ struct obd_info *oinfo, struct obd_trans_info *oti,
struct ptlrpc_request_set *rqset);
- int (*o_sync)(struct obd_export *exp, struct obdo *oa,
- struct lov_stripe_md *ea, obd_size start, obd_size end,
- void *capa);
+ int (*o_sync)(const struct lu_env *env, struct obd_export *exp,
+ struct obd_info *oinfo, obd_size start, obd_size end,
+ struct ptlrpc_request_set *set);
int (*o_migrate)(struct lustre_handle *conn, struct lov_stripe_md *dst,
struct lov_stripe_md *src, obd_size start,
obd_size end, struct obd_trans_info *oti);
int (*o_iterate)(struct lustre_handle *conn,
int (*)(obd_id, obd_seq, void *),
obd_id *startid, obd_seq seq, void *data);
- int (*o_preprw)(int cmd, struct obd_export *exp, struct obdo *oa,
- int objcount, struct obd_ioobj *obj,
- struct niobuf_remote *remote, int *nr_pages,
- struct niobuf_local *local,
- struct obd_trans_info *oti,
- struct lustre_capa *capa);
- int (*o_commitrw)(int cmd, struct obd_export *exp, struct obdo *oa,
+ int (*o_preprw)(const struct lu_env *env, int cmd,
+ struct obd_export *exp, struct obdo *oa, int objcount,
+ struct obd_ioobj *obj, struct niobuf_remote *remote,
+ int *nr_pages, struct niobuf_local *local,
+ struct obd_trans_info *oti, struct lustre_capa *capa);
+ int (*o_commitrw)(const struct lu_env *env, int cmd,
+ struct obd_export *exp, struct obdo *oa,
int objcount, struct obd_ioobj *obj,
struct niobuf_remote *remote, int pages,
struct niobuf_local *local,
int (*o_notify)(struct obd_device *obd, struct obd_device *watched,
enum obd_notify_event ev, void *data);
- int (*o_health_check)(struct obd_device *);
+ int (*o_health_check)(const struct lu_env *env, struct obd_device *);
struct obd_uuid *(*o_get_uuid) (struct obd_export *exp);
/* quota methods */
struct obd_quotactl *);
int (*o_quotactl)(struct obd_device *, struct obd_export *,
struct obd_quotactl *);
- int (*o_quota_adjust_qunit)(struct obd_export *exp,
- struct quota_adjust_qunit *oqaq,
- struct lustre_quota_ctxt *qctxt);
-
- int (*o_ping)(struct obd_export *exp);
+ int (*o_ping)(const struct lu_env *, struct obd_export *exp);
/* pools methods */
int (*o_pool_new)(struct obd_device *obd, char *poolname);
int (*m_enqueue)(struct obd_export *, struct ldlm_enqueue_info *,
struct lookup_intent *, struct md_op_data *,
struct lustre_handle *, void *, int,
- struct ptlrpc_request **, int);
+ struct ptlrpc_request **, __u64);
int (*m_getattr)(struct obd_export *, struct md_op_data *,
struct ptlrpc_request **);
int (*m_getattr_name)(struct obd_export *, struct md_op_data *,
int (*m_intent_lock)(struct obd_export *, struct md_op_data *,
void *, int, struct lookup_intent *, int,
struct ptlrpc_request **,
- ldlm_blocking_callback, int);
+ ldlm_blocking_callback, __u64);
int (*m_link)(struct obd_export *, struct md_op_data *,
struct ptlrpc_request **);
int (*m_rename)(struct obd_export *, struct md_op_data *,
struct md_open_data **mod);
int (*m_sync)(struct obd_export *, const struct lu_fid *,
struct obd_capa *, struct ptlrpc_request **);
- int (*m_readpage)(struct obd_export *, const struct lu_fid *,
- struct obd_capa *, __u64, struct page *,
- struct ptlrpc_request **);
+ int (*m_readpage)(struct obd_export *, struct md_op_data *,
+ struct page **, struct ptlrpc_request **);
int (*m_unlink)(struct obd_export *, struct md_op_data *,
struct ptlrpc_request **);
struct ptlrpc_request *);
int (*m_clear_open_replay_data)(struct obd_export *,
struct obd_client_handle *);
- int (*m_set_lock_data)(struct obd_export *, __u64 *, void *, __u32 *);
+ int (*m_set_lock_data)(struct obd_export *, __u64 *, void *, __u64 *);
- ldlm_mode_t (*m_lock_match)(struct obd_export *, int,
+ ldlm_mode_t (*m_lock_match)(struct obd_export *, __u64,
const struct lu_fid *, ldlm_type_t,
ldlm_policy_data_t *, ldlm_mode_t,
struct lustre_handle *);
struct ldlm_enqueue_info *);
int (*m_revalidate_lock)(struct obd_export *, struct lookup_intent *,
- struct lu_fid *, __u32 *);
+ struct lu_fid *, __u64 *bits);
/*
* NOTE: If adding ops, add another LPROCFS_MD_OP_INIT() line to
void (*lsm_stripe_by_offset)(struct lov_stripe_md *, int *, obd_off *,
obd_off *);
int (*lsm_lmm_verify) (struct lov_mds_md *lmm, int lmm_bytes,
- int *stripe_count);
+ __u16 *stripe_count);
int (*lsm_unpackmd) (struct lov_obd *lov, struct lov_stripe_md *lsm,
struct lov_mds_md *lmm);
};
#define OBD_CALC_STRIPE_START 1
#define OBD_CALC_STRIPE_END 2
-static inline void obd_transno_commit_cb(struct obd_device *obd, __u64 transno,
- struct obd_export *exp, int error)
-{
- if (error) {
- CERROR("%s: transno "LPU64" commit error: %d\n",
- obd->obd_name, transno, error);
- return;
- }
- if (exp && transno > exp->exp_last_committed) {
- CDEBUG(D_HA, "%s: transno "LPU64" committed\n",
- obd->obd_name, transno);
- exp->exp_last_committed = transno;
- ptlrpc_commit_replies(exp);
- } else {
- CDEBUG(D_INFO, "%s: transno "LPU64" committed\n",
- obd->obd_name, transno);
- }
- if (transno > obd->obd_last_committed)
- obd->obd_last_committed = transno;
-}
-
-static inline void init_obd_quota_ops(quota_interface_t *interface,
- struct obd_ops *obd_ops)
-{
- if (!interface)
- return;
-
- LASSERT(obd_ops);
- obd_ops->o_quotacheck = QUOTA_OP(interface, check);
- obd_ops->o_quotactl = QUOTA_OP(interface, ctl);
- obd_ops->o_quota_adjust_qunit = QUOTA_OP(interface, adjust_qunit);
-}
-
static inline struct lustre_capa *oinfo_capa(struct obd_info *oinfo)
{
return oinfo->oi_capa;
} \
})
-extern void obdo_from_inode(struct obdo *dst, struct inode *src,
- struct lu_fid *parent, obd_flag valid);
+void obdo_from_inode(struct obdo *dst, struct inode *src, obd_flag valid);
+void obdo_set_parent_fid(struct obdo *dst, const struct lu_fid *parent);
+
+/* return 1 if client should be resend request */
+static inline int client_should_resend(int resend, struct client_obd *cli)
+{
+ return cfs_atomic_read(&cli->cl_resends) ?
+ cfs_atomic_read(&cli->cl_resends) > resend : 1;
+}
+
+/**
+ * Return device name for this device
+ *
+ * XXX: lu_device is declared before obd_device, while a pointer pointing
+ * back to obd_device in lu_device, so this helper function defines here
+ * instead of in lu_object.h
+ */
+static inline const char *lu_dev_name(const struct lu_device *lu_dev)
+{
+ return lu_dev->ld_obd->obd_name;
+}
#endif /* __OBD_H */