* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Intel Corporation.
+ * Copyright (c) 2011, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#include <lustre_disk.h>
#include <lustre_lfsck.h>
+/* Each one represents a distribute transaction replay
+ * operation, and updates on each MDTs are linked to
+ * dtr_sub_list */
+struct distribute_txn_replay_req {
+ /* update record */
+ struct llog_update_record *dtrq_lur;
+ int dtrq_lur_size;
+
+ /* linked to the distribute transaction replay
+ * list (tdtd_replay_list) */
+ struct list_head dtrq_list;
+
+ /* all of sub updates are linked here */
+ struct list_head dtrq_sub_list;
+ spinlock_t dtrq_sub_list_lock;
+};
+
+/* Each one represents a sub replay item under a distribute
+ * transaction. A distribute transaction will be operated in
+ * two or more MDTs, and updates on each MDT will be represented
+ * by this structure */
+struct distribute_txn_replay_req_sub {
+ __u32 dtrqs_mdt_index;
+
+ /* All of cookies for the update will be linked here */
+ spinlock_t dtrqs_cookie_list_lock;
+ struct list_head dtrqs_cookie_list;
+ struct list_head dtrqs_list;
+};
+
+struct target_distribute_txn_data;
+typedef int (*distribute_txn_replay_handler_t)(struct lu_env *env,
+ struct target_distribute_txn_data *tdtd,
+ struct distribute_txn_replay_req *dtrq);
+struct target_distribute_txn_data {
+ /* Distribution ID is used to identify updates log on different
+ * MDTs for one operation */
+ spinlock_t tdtd_batchid_lock;
+ __u64 tdtd_batchid;
+ struct lu_target *tdtd_lut;
+ struct dt_object *tdtd_batchid_obj;
+ struct dt_device *tdtd_dt;
+
+ /* Committed batchid for distribute transaction */
+ __u64 tdtd_committed_batchid;
+
+ /* List for distribute transaction */
+ struct list_head tdtd_list;
+
+ /* Threads to manage distribute transaction */
+ wait_queue_head_t tdtd_commit_thread_waitq;
+ atomic_t tdtd_refcount;
+
+ /* recovery update */
+ distribute_txn_replay_handler_t tdtd_replay_handler;
+ struct list_head tdtd_replay_list;
+ spinlock_t tdtd_replay_list_lock;
+ /* last replay update transno */
+ __u64 tdtd_last_update_transno;
+ __u32 tdtd_replay_ready:1;
+
+};
+
struct lu_target {
struct obd_device *lut_obd;
struct dt_device *lut_bottom;
+ struct target_distribute_txn_data *lut_tdtd;
+ struct ptlrpc_thread lut_tdtd_commit_thread;
+
/* supported opcodes and handlers for this target */
struct tgt_opc_slice *lut_slice;
__u32 lut_reply_fail_id;
struct sptlrpc_rule_set lut_sptlrpc_rset;
spinlock_t lut_flags_lock;
int lut_sec_level;
- unsigned int lut_mds_capa:1,
- lut_oss_capa:1,
- lut_syncjournal:1,
+ unsigned int lut_syncjournal:1,
lut_sync_lock_cancel:2,
/* e.g. OST node */
lut_no_reconstruct:1;
spinlock_t lut_client_bitmap_lock;
/** Bitmap of known clients */
unsigned long *lut_client_bitmap;
+ /* Number of clients supporting multiple modify RPCs
+ * recorded in the bitmap */
+ atomic_t lut_num_clients;
+ /* Client generation to identify client slot reuse */
+ atomic_t lut_client_generation;
+ /** reply_data file */
+ struct dt_object *lut_reply_data;
+ /** Bitmap of used slots in the reply data file */
+ unsigned long **lut_reply_bitmap;
+};
+
+/* number of slots in reply bitmap */
+#define LUT_REPLY_SLOTS_PER_CHUNK (1<<20)
+#define LUT_REPLY_SLOTS_MAX_CHUNKS 16
+
+/**
+ * Target reply data
+ */
+struct tg_reply_data {
+ /** chain of reply data anchored in tg_export_data */
+ struct list_head trd_list;
+ /** copy of on-disk reply data */
+ struct lsd_reply_data trd_reply;
+ /** versions for Version Based Recovery */
+ __u64 trd_pre_versions[4];
+ /** slot index in reply_data file */
+ int trd_index;
+ /** tag the client used */
+ __u16 trd_tag;
};
extern struct lu_context_key tgt_session_key;
return !!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY);
}
+static inline bool tgt_is_multimodrpcs_client(struct obd_export *exp)
+{
+ return exp_connect_flags(exp) & OBD_CONNECT_MULTIMODRPCS;
+}
+
+
/* target/tgt_handler.c */
int tgt_request_handle(struct ptlrpc_request *req);
char *tgt_name(struct lu_target *tgt);
int tgt_hpreq_handler(struct ptlrpc_request *req);
void tgt_register_lfsck_in_notify(int (*notify)(const struct lu_env *,
struct dt_device *,
- struct lfsck_request *));
+ struct lfsck_request *,
+ struct thandle *));
void tgt_register_lfsck_query(int (*query)(const struct lu_env *,
struct dt_device *,
struct lfsck_request *));
+bool req_can_reconstruct(struct ptlrpc_request *req, struct tg_reply_data *trd);
-extern struct tgt_handler tgt_sec_ctx_handlers[];
extern struct tgt_handler tgt_lfsck_handlers[];
extern struct tgt_handler tgt_obd_handlers[];
extern struct tgt_handler tgt_dlm_handlers[];
int sync);
int tgt_truncate_last_rcvd(const struct lu_env *env, struct lu_target *tg,
loff_t off);
+int tgt_reply_data_init(const struct lu_env *env, struct lu_target *tgt);
+bool tgt_lookup_reply(struct ptlrpc_request *req, struct tg_reply_data *trd);
+
+/* target/update_trans.c */
+int distribute_txn_init(const struct lu_env *env,
+ struct lu_target *lut,
+ struct target_distribute_txn_data *tdtd,
+ __u32 index);
+void distribute_txn_fini(const struct lu_env *env,
+ struct target_distribute_txn_data *tdtd);
+
+/* target/update_recovery.c */
+int insert_update_records_to_replay_list(struct target_distribute_txn_data *,
+ struct llog_update_record *,
+ struct llog_cookie *, __u32);
+void dtrq_list_dump(struct target_distribute_txn_data *tdtd,
+ unsigned int mask);
+void dtrq_list_destroy(struct target_distribute_txn_data *tdtd);
+int distribute_txn_replay_handle(struct lu_env *env,
+ struct target_distribute_txn_data *tdtd,
+ struct distribute_txn_replay_req *dtrq);
+__u64 distribute_txn_get_next_transno(struct target_distribute_txn_data *tdtd);
+struct distribute_txn_replay_req *
+distribute_txn_get_next_req(struct target_distribute_txn_data *tdtd);
+void dtrq_destroy(struct distribute_txn_replay_req *dtrq);
+struct distribute_txn_replay_req_sub *
+dtrq_sub_lookup(struct distribute_txn_replay_req *dtrq, __u32 mdt_index);
enum {
ESERIOUS = 0x0001000