#endif
/* updates to following flag serialised by srv_request_lock */
unsigned long rs_difficult:1; /* ACK/commit stuff */
+ unsigned long rs_no_ack:1; /* no ACK, even for
+ difficult requests */
unsigned long rs_scheduled:1; /* being handled? */
unsigned long rs_scheduled_ever:1;/* any schedule attempts? */
unsigned long rs_handled:1; /* been handled yet? */
RQ_PHASE_COMPLETE = 0xebc0de04,
};
+/** Type of request interpreter call-back */
+typedef int (*ptlrpc_interpterer_t)(const struct lu_env *env,
+ struct ptlrpc_request *req,
+ void *arg, int rc);
+
struct ptlrpc_request_pool {
spinlock_t prp_lock;
struct list_head prp_req_list; /* list of ptlrpc_request structs */
int srv_watchdog_factor; /* soft watchdog timeout mutiplier */
unsigned srv_cpu_affinity:1; /* bind threads to CPUs */
unsigned srv_at_check:1; /* check early replies */
+ unsigned srv_is_stopping:1; /* under unregister_service */
cfs_time_t srv_at_checktime; /* debug */
__u32 srv_req_portal;
* Thread name used in cfs_daemonize()
*/
char pc_name[16];
+ /**
+ * Environment for request interpreters to run in.
+ */
+ struct lu_env pc_env;
#ifndef __KERNEL__
/**
* Async rpcs flag to make sure that ptlrpcd_check() is called only
int ptlrpc_set_add_cb(struct ptlrpc_request_set *set,
set_interpreter_func fn, void *data);
int ptlrpc_set_next_timeout(struct ptlrpc_request_set *);
-int ptlrpc_check_set(struct ptlrpc_request_set *set);
+int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set);
int ptlrpc_set_wait(struct ptlrpc_request_set *);
int ptlrpc_expired_set(void *data);
void ptlrpc_interrupted_set(void *data);
/* ptlrpc/service.c */
void ptlrpc_save_lock (struct ptlrpc_request *req,
- struct lustre_handle *lock, int mode);
+ struct lustre_handle *lock, int mode, int no_ack);
void ptlrpc_commit_replies (struct obd_device *obd);
void ptlrpc_schedule_difficult_reply (struct ptlrpc_reply_state *rs);
struct ptlrpc_service *ptlrpc_init_svc_conf(struct ptlrpc_service_conf *c,