struct list_head rq_exp_list; /* server-side per-export list */
struct ptlrpc_hpreq_ops *rq_ops; /* server-side hp handlers */
__u64 rq_history_seq; /* history sequence # */
+ /* the index of service's srv_at_array into which request is linked */
+ time_t rq_at_index;
int rq_status;
spinlock_t rq_lock;
/* client-side flags are serialized by rq_lock */
* after server commits corresponding transaction. This is
* used for operations that require sequence of multiple
* requests to be replayed. The only example currently is file
- * open/close/dw/setattr. When last request in such a sequence
- * is committed, ->rq_replay is cleared on all requests in the
+ * open/close. When last request in such a sequence is
+ * committed, ->rq_replay is cleared on all requests in the
* sequence.
*/
rq_replay:1,
- /* this is the last request in the sequence. */
- rq_sequence:1,
rq_no_resend:1, rq_waiting:1, rq_receiving_reply:1,
rq_no_delay:1, rq_net_err:1, rq_wait_ctx:1,
rq_early:1, rq_must_unlink:1,
/* server-side flags */
rq_packed_final:1, /* packed final reply */
rq_sent_final:1, /* stop sending early replies */
- rq_hp:1; /* high priority RPC */
+ rq_hp:1, /* high priority RPC */
+ rq_at_linked:1; /* link into service's srv_at_array */
enum rq_phase rq_phase; /* one of RQ_PHASE_* */
enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
__u64 rq_transno;
__u64 rq_xid;
struct list_head rq_replay_list;
- struct list_head rq_mod_list;
struct ptlrpc_cli_ctx *rq_cli_ctx; /* client's half ctx */
struct ptlrpc_svc_ctx *rq_svc_ctx; /* server's half ctx */
struct req_capsule rq_pill;
};
-static inline void ptlrpc_close_replay_seq(struct ptlrpc_request *req)
-{
- spin_lock(&req->rq_lock);
- req->rq_replay = 0;
- req->rq_sequence = 1;
- spin_unlock(&req->rq_lock);
-}
-
static inline void lustre_set_req_swabbed(struct ptlrpc_request *req, int index)
{
LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
/* AT stuff */
struct adaptive_timeout srv_at_estimate;/* estimated rpc service time */
spinlock_t srv_at_lock;
- struct list_head srv_at_list; /* reqs waiting for replies */
+ struct ptlrpc_at_array srv_at_array; /* reqs waiting for replies */
cfs_timer_t srv_at_timer; /* early reply timer */
int srv_n_queued_reqs; /* # reqs in either of the queues below */
int ptlrpc_unregister_reply(struct ptlrpc_request *req, int async);
void ptlrpc_restart_req(struct ptlrpc_request *req);
void ptlrpc_abort_inflight(struct obd_import *imp);
+void ptlrpc_cleanup_imp(struct obd_import *imp);
void ptlrpc_abort_set(struct ptlrpc_request_set *set);
struct ptlrpc_request_set *ptlrpc_prep_set(void);
/* ptlrpc/service.c */
void ptlrpc_save_lock (struct ptlrpc_request *req,
struct lustre_handle *lock, int mode, int no_ack);
-void ptlrpc_commit_replies (struct obd_device *obd);
+void ptlrpc_commit_replies(struct obd_export *exp);
void ptlrpc_dispatch_difficult_reply (struct ptlrpc_reply_state *rs);
void ptlrpc_schedule_difficult_reply (struct ptlrpc_reply_state *rs);
struct ptlrpc_service *ptlrpc_init_svc_conf(struct ptlrpc_service_conf *c,
__u32 lustre_msg_get_opc(struct lustre_msg *msg);
__u64 lustre_msg_get_last_xid(struct lustre_msg *msg);
__u64 lustre_msg_get_last_committed(struct lustre_msg *msg);
+__u64 *lustre_msg_get_versions(struct lustre_msg *msg);
__u64 lustre_msg_get_transno(struct lustre_msg *msg);
__u64 lustre_msg_get_slv(struct lustre_msg *msg);
__u32 lustre_msg_get_limit(struct lustre_msg *msg);
void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc);
void lustre_msg_set_last_xid(struct lustre_msg *msg, __u64 last_xid);
void lustre_msg_set_last_committed(struct lustre_msg *msg,__u64 last_committed);
+void lustre_msg_set_versions(struct lustre_msg *msg, __u64 *versions);
void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno);
void lustre_msg_set_status(struct lustre_msg *msg, __u32 status);
void lustre_msg_set_conn_cnt(struct lustre_msg *msg, __u32 conn_cnt);
{
if (req->rq_phase == new_phase)
return;
-
+
if (new_phase == RQ_PHASE_UNREGISTERING) {
req->rq_next_phase = req->rq_phase;
if (req->rq_import)
atomic_inc(&req->rq_import->imp_unregistering);
}
-
+
if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
if (req->rq_import)
atomic_dec(&req->rq_import->imp_unregistering);
}
- DEBUG_REQ(D_RPCTRACE, req, "move req \"%s\" -> \"%s\"",
+ DEBUG_REQ(D_RPCTRACE, req, "move req \"%s\" -> \"%s\"",
ptlrpc_rqphase2str(req), ptlrpc_phase2str(new_phase));
req->rq_phase = new_phase;
int priority);
int client_import_del_conn(struct obd_import *imp, struct obd_uuid *uuid);
int import_set_conn_priority(struct obd_import *imp, struct obd_uuid *uuid);
+void client_destroy_import(struct obd_import *imp);
/* ptlrpc/pinger.c */
+enum timeout_event {
+ TIMEOUT_GRANT = 1
+};
+struct timeout_item;
+typedef int (*timeout_cb_t)(struct timeout_item *, void *);
int ptlrpc_pinger_add_import(struct obd_import *imp);
int ptlrpc_pinger_del_import(struct obd_import *imp);
+int ptlrpc_add_timeout_client(int time, enum timeout_event event,
+ timeout_cb_t cb, void *data,
+ struct list_head *obd_list);
+int ptlrpc_del_timeout_client(struct list_head *obd_list,
+ enum timeout_event event);
struct ptlrpc_request * ptlrpc_prep_ping(struct obd_import *imp);
int ptlrpc_obd_ping(struct obd_device *obd);
cfs_time_t ptlrpc_suspend_wakeup_time(void);
void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force);
void ptlrpcd_wake(struct ptlrpc_request *req);
void ptlrpcd_add_req(struct ptlrpc_request *req, enum ptlrpcd_scope scope);
+void ptlrpcd_add_rqset(struct ptlrpc_request_set *set);
int ptlrpcd_addref(void);
void ptlrpcd_decref(void);