X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fosp%2Fosp_internal.h;h=57dd8f4fa186b7af5c10b496969cc4d7aa1f6dc7;hp=7246cd2b7e15b66e47e2007c03ce0f766b0219d1;hb=63e17799a369e2ff0b140fd41dc5d7d8656d2bf0;hpb=46f1fc6c1ba493cd81e47917c413951a0f096d5e diff --git a/lustre/osp/osp_internal.h b/lustre/osp/osp_internal.h index 7246cd2..57dd8f4 100644 --- a/lustre/osp/osp_internal.h +++ b/lustre/osp/osp_internal.h @@ -27,7 +27,6 @@ */ /* * This file is part of Lustre, http://www.lustre.org/ - * Lustre is a trademark of Sun Microsystems, Inc. * * lustre/osp/osp_internal.h * @@ -148,13 +147,6 @@ struct osp_updates { struct lu_env ou_env; }; -struct osp_rpc_lock { - /** Lock protecting in-flight RPC concurrency. */ - struct mutex rpcl_mutex; - /** Used for MDS/RPC load testing purposes. */ - unsigned int rpcl_fakes; -}; - struct osp_device { struct dt_device opd_dt_dev; /* corresponded OST index */ @@ -177,12 +169,10 @@ struct osp_device { u64 opd_last_id; struct lu_fid opd_gap_start_fid; int opd_gap_count; - /* connection to OST */ - struct osp_rpc_lock opd_rpc_lock; struct obd_device *opd_obd; struct obd_export *opd_exp; struct obd_connect_data *opd_connect_data; - int opd_connects; + /* connection status. */ unsigned int opd_new_connection:1, opd_got_disconnected:1, @@ -214,6 +204,8 @@ struct osp_device { struct llog_gen opd_sync_generation; /* number of changes to sync, used to wake up sync thread */ atomic_t opd_sync_changes; + /* limit of changes to sync */ + int opd_sync_max_changes; /* processing of changes from previous mount is done? */ int opd_sync_prev_done; /* found records */ @@ -276,6 +268,8 @@ struct osp_device { */ int opd_reserved_mb_high; int opd_reserved_mb_low; + bool opd_cleanup_orphans_done; + bool opd_force_creation; }; #define opd_pre_used_fid opd_pre->osp_pre_used_fid @@ -295,14 +289,15 @@ extern struct kmem_cache *osp_object_kmem; * The left part is for value, binary mode. */ struct osp_xattr_entry { struct list_head oxe_list; - atomic_t oxe_ref; void *oxe_value; - size_t oxe_buflen; - size_t oxe_namelen; - size_t oxe_vallen; - unsigned int oxe_exist:1, - oxe_ready:1; - char oxe_buf[0]; + atomic_t oxe_ref; + unsigned int oxe_buflen; + unsigned int oxe_vallen; + unsigned short oxe_namelen; + unsigned short oxe_exist:1, + oxe_ready:1, + oxe_largebuf:1; + char oxe_name[0]; }; /* this is a top object */ @@ -311,7 +306,8 @@ struct osp_object { struct dt_object opo_obj; unsigned int opo_reserved:1, opo_non_exist:1, - opo_stale:1; + opo_stale:1, + opo_destroyed:1; /* read/write lock for md osp object */ struct rw_semaphore opo_sem; @@ -326,10 +322,9 @@ struct osp_object { struct rw_semaphore opo_invalidate_sem; }; -extern struct lu_object_operations osp_lu_obj_ops; -extern const struct dt_device_operations osp_dt_ops; -extern struct dt_object_operations osp_md_obj_ops; -extern struct dt_body_operations osp_md_body_ops; +extern const struct lu_object_operations osp_lu_obj_ops; +extern const struct dt_object_operations osp_md_obj_ops; +extern const struct dt_body_operations osp_md_body_ops; struct osp_thread_info { struct lu_buf osi_lb; @@ -507,81 +502,6 @@ static inline struct seq_server_site *osp_seq_site(struct osp_device *osp) return osp->opd_dt_dev.dd_lu_dev.ld_site->ld_seq_site; } -/** - * Serializes in-flight MDT-modifying RPC requests to preserve idempotency. - * - * This mutex is used to implement execute-once semantics on the MDT. - * The MDT stores the last transaction ID and result for every client in - * its last_rcvd file. If the client doesn't get a reply, it can safely - * resend the request and the MDT will reconstruct the reply being aware - * that the request has already been executed. Without this lock, - * execution status of concurrent in-flight requests would be - * overwritten. - * - * This imlpementation limits the extent to which we can keep a full pipeline - * of in-flight requests from a single client. This limitation can be - * overcome by allowing multiple slots per client in the last_rcvd file, - * see LU-6864. - */ -#define OSP_FAKE_RPCL_IT ((void *)0x2c0012bfUL) - -static inline void osp_init_rpc_lock(struct osp_device *osp) -{ - struct osp_rpc_lock *lck = &osp->opd_rpc_lock; - - mutex_init(&lck->rpcl_mutex); - lck->rpcl_fakes = 0; -} - -static inline void osp_get_rpc_lock(struct osp_device *osp) -{ - struct osp_rpc_lock *lck = &osp->opd_rpc_lock; - - /* This would normally block until the existing request finishes. - * If fail_loc is set it will block until the regular request is - * done, then increment rpcl_fakes. Once that is non-zero it - * will only be cleared when all fake requests are finished. - * Only when all fake requests are finished can normal requests - * be sent, to ensure they are recoverable again. - */ - again: - mutex_lock(&lck->rpcl_mutex); - - if (CFS_FAIL_CHECK_QUIET(OBD_FAIL_MDC_RPCS_SEM) || - CFS_FAIL_CHECK_QUIET(OBD_FAIL_OSP_RPCS_SEM)) { - lck->rpcl_fakes++; - mutex_unlock(&lck->rpcl_mutex); - - return; - } - - /* This will only happen when the CFS_FAIL_CHECK() was just turned - * off but there are still requests in progress. Wait until they - * finish. It doesn't need to be efficient in this extremely rare - * case, just have low overhead in the common case when it isn't true. - */ - if (unlikely(lck->rpcl_fakes)) { - mutex_unlock(&lck->rpcl_mutex); - schedule_timeout_uninterruptible(cfs_time_seconds(1) / 4); - - goto again; - } -} - -static inline void osp_put_rpc_lock(struct osp_device *osp) -{ - struct osp_rpc_lock *lck = &osp->opd_rpc_lock; - - if (lck->rpcl_fakes) { /* OBD_FAIL_OSP_RPCS_SEM */ - mutex_lock(&lck->rpcl_mutex); - - if (lck->rpcl_fakes) /* check again under lock */ - lck->rpcl_fakes--; - } - - mutex_unlock(&lck->rpcl_mutex); -} - static inline int osp_fid_diff(const struct lu_fid *fid1, const struct lu_fid *fid2) { @@ -818,6 +738,7 @@ int osp_declare_xattr_del(const struct lu_env *env, struct dt_object *dt, int osp_xattr_del(const struct lu_env *env, struct dt_object *dt, const char *name, struct thandle *th); int osp_invalidate(const struct lu_env *env, struct dt_object *dt); +bool osp_check_stale(struct dt_object *dt); void osp_obj_invalidate_cache(struct osp_object *obj); int osp_trans_stop(const struct lu_env *env, struct dt_device *dt, @@ -849,7 +770,8 @@ extern const struct dt_index_operations osp_md_index_ops; /* osp_precreate.c */ int osp_init_precreate(struct osp_device *d); -int osp_precreate_reserve(const struct lu_env *env, struct osp_device *d); +int osp_precreate_reserve(const struct lu_env *env, + struct osp_device *d, bool can_block); __u64 osp_precreate_get_id(struct osp_device *d); int osp_precreate_get_fid(const struct lu_env *env, struct osp_device *d, struct lu_fid *fid);