*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*
* lustre/osp/osp_internal.h
*
struct lu_env ou_env;
};
-struct osp_rpc_lock {
- /** Lock protecting in-flight RPC concurrency. */
- struct mutex rpcl_mutex;
- /** Used for MDS/RPC load testing purposes. */
- unsigned int rpcl_fakes;
-};
-
struct osp_device {
struct dt_device opd_dt_dev;
/* corresponded OST index */
u64 opd_last_id;
struct lu_fid opd_gap_start_fid;
int opd_gap_count;
- /* connection to OST */
- struct osp_rpc_lock opd_rpc_lock;
struct obd_device *opd_obd;
struct obd_export *opd_exp;
struct obd_connect_data *opd_connect_data;
- int opd_connects;
+
/* connection status. */
unsigned int opd_new_connection:1,
opd_got_disconnected:1,
struct llog_gen opd_sync_generation;
/* number of changes to sync, used to wake up sync thread */
atomic_t opd_sync_changes;
+ /* limit of changes to sync */
+ int opd_sync_max_changes;
/* processing of changes from previous mount is done? */
int opd_sync_prev_done;
/* found records */
*/
int opd_reserved_mb_high;
int opd_reserved_mb_low;
+ bool opd_cleanup_orphans_done;
+ bool opd_force_creation;
};
#define opd_pre_used_fid opd_pre->osp_pre_used_fid
* The left part is for value, binary mode. */
struct osp_xattr_entry {
struct list_head oxe_list;
- atomic_t oxe_ref;
void *oxe_value;
- size_t oxe_buflen;
- size_t oxe_namelen;
- size_t oxe_vallen;
- unsigned int oxe_exist:1,
- oxe_ready:1;
- char oxe_buf[0];
+ atomic_t oxe_ref;
+ unsigned int oxe_buflen;
+ unsigned int oxe_vallen;
+ unsigned short oxe_namelen;
+ unsigned short oxe_exist:1,
+ oxe_ready:1,
+ oxe_largebuf:1;
+ char oxe_name[0];
};
/* this is a top object */
struct dt_object opo_obj;
unsigned int opo_reserved:1,
opo_non_exist:1,
- opo_stale:1;
+ opo_stale:1,
+ opo_destroyed:1;
/* read/write lock for md osp object */
struct rw_semaphore opo_sem;
struct rw_semaphore opo_invalidate_sem;
};
-extern struct lu_object_operations osp_lu_obj_ops;
-extern const struct dt_device_operations osp_dt_ops;
-extern struct dt_object_operations osp_md_obj_ops;
-extern struct dt_body_operations osp_md_body_ops;
+extern const struct lu_object_operations osp_lu_obj_ops;
+extern const struct dt_object_operations osp_md_obj_ops;
+extern const struct dt_body_operations osp_md_body_ops;
struct osp_thread_info {
struct lu_buf osi_lb;
return osp->opd_dt_dev.dd_lu_dev.ld_site->ld_seq_site;
}
-/**
- * Serializes in-flight MDT-modifying RPC requests to preserve idempotency.
- *
- * This mutex is used to implement execute-once semantics on the MDT.
- * The MDT stores the last transaction ID and result for every client in
- * its last_rcvd file. If the client doesn't get a reply, it can safely
- * resend the request and the MDT will reconstruct the reply being aware
- * that the request has already been executed. Without this lock,
- * execution status of concurrent in-flight requests would be
- * overwritten.
- *
- * This imlpementation limits the extent to which we can keep a full pipeline
- * of in-flight requests from a single client. This limitation can be
- * overcome by allowing multiple slots per client in the last_rcvd file,
- * see LU-6864.
- */
-#define OSP_FAKE_RPCL_IT ((void *)0x2c0012bfUL)
-
-static inline void osp_init_rpc_lock(struct osp_device *osp)
-{
- struct osp_rpc_lock *lck = &osp->opd_rpc_lock;
-
- mutex_init(&lck->rpcl_mutex);
- lck->rpcl_fakes = 0;
-}
-
-static inline void osp_get_rpc_lock(struct osp_device *osp)
-{
- struct osp_rpc_lock *lck = &osp->opd_rpc_lock;
-
- /* This would normally block until the existing request finishes.
- * If fail_loc is set it will block until the regular request is
- * done, then increment rpcl_fakes. Once that is non-zero it
- * will only be cleared when all fake requests are finished.
- * Only when all fake requests are finished can normal requests
- * be sent, to ensure they are recoverable again.
- */
- again:
- mutex_lock(&lck->rpcl_mutex);
-
- if (CFS_FAIL_CHECK_QUIET(OBD_FAIL_MDC_RPCS_SEM) ||
- CFS_FAIL_CHECK_QUIET(OBD_FAIL_OSP_RPCS_SEM)) {
- lck->rpcl_fakes++;
- mutex_unlock(&lck->rpcl_mutex);
-
- return;
- }
-
- /* This will only happen when the CFS_FAIL_CHECK() was just turned
- * off but there are still requests in progress. Wait until they
- * finish. It doesn't need to be efficient in this extremely rare
- * case, just have low overhead in the common case when it isn't true.
- */
- if (unlikely(lck->rpcl_fakes)) {
- mutex_unlock(&lck->rpcl_mutex);
- schedule_timeout_uninterruptible(cfs_time_seconds(1) / 4);
-
- goto again;
- }
-}
-
-static inline void osp_put_rpc_lock(struct osp_device *osp)
-{
- struct osp_rpc_lock *lck = &osp->opd_rpc_lock;
-
- if (lck->rpcl_fakes) { /* OBD_FAIL_OSP_RPCS_SEM */
- mutex_lock(&lck->rpcl_mutex);
-
- if (lck->rpcl_fakes) /* check again under lock */
- lck->rpcl_fakes--;
- }
-
- mutex_unlock(&lck->rpcl_mutex);
-}
-
static inline int osp_fid_diff(const struct lu_fid *fid1,
const struct lu_fid *fid2)
{