-/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
- * vim:expandtab:shiftwidth=8:tabstop=8:
- *
+/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
# endif
#endif /* __KERNEL__ */
+#define PTLRPC_NTHRS_MIN 2
+
/**
* The following constants determine how memory is used to buffer incoming
* service requests.
#define LDLM_MAXREPSIZE (1024)
/** Absolute limits */
-#define MDT_MIN_THREADS 2UL
#ifndef MDT_MAX_THREADS
+#define MDT_MIN_THREADS PTLRPC_NTHRS_MIN
#define MDT_MAX_THREADS 512UL
#endif
#define MDS_NBUFS (64 * cfs_num_online_cpus())
* least big enough for that.
*/
void *pointer_arg[11];
- __u64 space[6];
+ __u64 space[7];
};
struct ptlrpc_request_set;
typedef int (*set_interpreter_func)(struct ptlrpc_request_set *, void *, int);
+typedef int (*set_producer_func)(struct ptlrpc_request_set *, void *);
/**
* Definition of request set structure.
* returned.
*/
struct ptlrpc_request_set {
- cfs_atomic_t set_refcount;
- /** number of in queue requests */
- cfs_atomic_t set_new_count;
- /** number of uncompleted requests */
- cfs_atomic_t set_remaining;
- /** wait queue to wait on for request events */
- cfs_waitq_t set_waitq;
- cfs_waitq_t *set_wakeup_ptr;
- /** List of requests in the set */
- cfs_list_t set_requests;
- /**
- * List of completion callbacks to be called when the set is completed
- * This is only used if \a set_interpret is NULL.
- * Links struct ptlrpc_set_cbdata.
- */
- cfs_list_t set_cblist;
- /** Completion callback, if only one. */
- set_interpreter_func set_interpret;
- /** opaq argument passed to completion \a set_interpret callback. */
- void *set_arg;
- /**
- * Lock for \a set_new_requests manipulations
- * locked so that any old caller can communicate requests to
- * the set holder who can then fold them into the lock-free set
- */
- cfs_spinlock_t set_new_req_lock;
- /** List of new yet unsent requests. Only used with ptlrpcd now. */
- cfs_list_t set_new_requests;
+ cfs_atomic_t set_refcount;
+ /** number of in queue requests */
+ cfs_atomic_t set_new_count;
+ /** number of uncompleted requests */
+ cfs_atomic_t set_remaining;
+ /** wait queue to wait on for request events */
+ cfs_waitq_t set_waitq;
+ cfs_waitq_t *set_wakeup_ptr;
+ /** List of requests in the set */
+ cfs_list_t set_requests;
+ /**
+ * List of completion callbacks to be called when the set is completed
+ * This is only used if \a set_interpret is NULL.
+ * Links struct ptlrpc_set_cbdata.
+ */
+ cfs_list_t set_cblist;
+ /** Completion callback, if only one. */
+ set_interpreter_func set_interpret;
+ /** opaq argument passed to completion \a set_interpret callback. */
+ void *set_arg;
+ /**
+ * Lock for \a set_new_requests manipulations
+ * locked so that any old caller can communicate requests to
+ * the set holder who can then fold them into the lock-free set
+ */
+ cfs_spinlock_t set_new_req_lock;
+ /** List of new yet unsent requests. Only used with ptlrpcd now. */
+ cfs_list_t set_new_requests;
+
+ /** rq_status of requests that have been freed already */
+ int set_rc;
+ /** Additional fields used by the flow control extension */
+ /** Maximum number of RPCs in flight */
+ int set_max_inflight;
+ /** Callback function used to generate RPCs */
+ set_producer_func set_producer;
+ /** opaq argument passed to the producer callback */
+ void *set_producer_arg;
};
/**
};
struct ptlrpc_bulk_desc;
+struct ptlrpc_service_part;
/**
* ptlrpc callback & work item stuff
/** xid */
__u64 rs_xid;
struct obd_export *rs_export;
- struct ptlrpc_service *rs_service;
+ struct ptlrpc_service_part *rs_svcpt;
/** Lnet metadata handle for the reply */
lnet_handle_md_t rs_md_h;
cfs_atomic_t rs_refcount;
rq_reply_truncate:1,
rq_committed:1,
/* whether the "rq_set" is a valid one */
- rq_invalid_rqset:1;
+ rq_invalid_rqset:1,
+ rq_generation_set:1;
enum rq_phase rq_phase; /* one of RQ_PHASE_* */
enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
SVC_SIGNAL = 1 << 5,
};
+#define PTLRPC_THR_NAME_LEN 32
/**
* Definition of server service thread structure
*/
/**
* the svc this thread belonged to b=18582
*/
- struct ptlrpc_service *t_svc;
- cfs_waitq_t t_ctl_waitq;
- struct lu_env *t_env;
+ struct ptlrpc_service_part *t_svcpt;
+ cfs_waitq_t t_ctl_waitq;
+ struct lu_env *t_env;
+ char t_name[PTLRPC_THR_NAME_LEN];
};
+static inline int thread_is_init(struct ptlrpc_thread *thread)
+{
+ return thread->t_flags == 0;
+}
+
static inline int thread_is_stopped(struct ptlrpc_thread *thread)
{
return !!(thread->t_flags & SVC_STOPPED);
/** History of requests for this buffer */
cfs_list_t rqbd_reqs;
/** Back pointer to service for which this buffer is registered */
- struct ptlrpc_service *rqbd_service;
+ struct ptlrpc_service_part *rqbd_svcpt;
/** LNet descriptor */
lnet_handle_md_t rqbd_md_h;
int rqbd_refcount;
struct ptlrpc_request rqbd_req;
};
-typedef int (*svc_thr_init_t)(struct ptlrpc_thread *thread);
-typedef void (*svc_thr_done_t)(struct ptlrpc_thread *thread);
typedef int (*svc_handler_t)(struct ptlrpc_request *req);
-typedef int (*svc_hpreq_handler_t)(struct ptlrpc_request *);
-typedef void (*svc_req_printfn_t)(void *, struct ptlrpc_request *);
+
+struct ptlrpc_service_ops {
+ /**
+ * if non-NULL called during thread creation (ptlrpc_start_thread())
+ * to initialize service specific per-thread state.
+ */
+ int (*so_thr_init)(struct ptlrpc_thread *thr);
+ /**
+ * if non-NULL called during thread shutdown (ptlrpc_main()) to
+ * destruct state created by ->srv_init().
+ */
+ void (*so_thr_done)(struct ptlrpc_thread *thr);
+ /**
+ * Handler function for incoming requests for this service
+ */
+ int (*so_req_handler)(struct ptlrpc_request *req);
+ /**
+ * function to determine priority of the request, it's called
+ * on every new request
+ */
+ int (*so_hpreq_handler)(struct ptlrpc_request *);
+ /**
+ * service-specific print fn
+ */
+ void (*so_req_printer)(void *, struct ptlrpc_request *);
+};
#ifndef __cfs_cacheline_aligned
/* NB: put it here for reducing patche dependence */
* The service is listening on a particular portal (like tcp port)
* and perform actions for a specific server like IO service for OST
* or general metadata service for MDS.
- *
- * ptlrpc service has four locks:
- * \a srv_lock
- * serialize operations on rqbd and requests waiting for preprocess
- * \a srv_rq_lock
- * serialize operations active requests sent to this portal
- * \a srv_at_lock
- * serialize adaptive timeout stuff
- * \a srv_rs_lock
- * serialize operations on RS list (reply states)
- *
- * We don't have any use-case to take two or more locks at the same time
- * for now, so there is no lock order issue.
*/
struct ptlrpc_service {
+ /** serialize /proc operations */
+ cfs_spinlock_t srv_lock;
/** most often accessed fields */
/** chain thru all services */
cfs_list_t srv_list;
+ /** service operations table */
+ struct ptlrpc_service_ops srv_ops;
/** only statically allocated strings here; we don't clean them */
char *srv_name;
/** only statically allocated strings here; we don't clean them */
int srv_threads_min;
/** thread upper limit */
int srv_threads_max;
- /** always increasing number */
- unsigned srv_threads_next_id;
- /** # of starting threads */
- int srv_threads_starting;
- /** # running threads */
- int srv_threads_running;
-
- /** service operations, move to ptlrpc_svc_ops_t in the future */
- /** @{ */
- /**
- * if non-NULL called during thread creation (ptlrpc_start_thread())
- * to initialize service specific per-thread state.
- */
- svc_thr_init_t srv_init;
- /**
- * if non-NULL called during thread shutdown (ptlrpc_main()) to
- * destruct state created by ->srv_init().
- */
- svc_thr_done_t srv_done;
- /** Handler function for incoming requests for this service */
- svc_handler_t srv_handler;
- /** hp request handler */
- svc_hpreq_handler_t srv_hpreq_handler;
- /** service-specific print fn */
- svc_req_printfn_t srv_req_printfn;
- /** @} */
-
/** Root of /proc dir tree for this service */
cfs_proc_dir_entry_t *srv_procroot;
/** Pointer to statistic data for this service */
/** under unregister_service */
unsigned srv_is_stopping:1;
- /**
- * serialize the following fields, used for protecting
- * rqbd list and incoming requests waiting for preprocess
- */
- cfs_spinlock_t srv_lock __cfs_cacheline_aligned;
- /** incoming reqs */
- cfs_list_t srv_req_in_queue;
- /** total # req buffer descs allocated */
- int srv_nbufs;
- /** # posted request buffers */
- int srv_nrqbd_receiving;
- /** timeout before re-posting reqs, in tick */
- cfs_duration_t srv_rqbd_timeout;
- /** request buffers to be reposted */
- cfs_list_t srv_idle_rqbds;
- /** req buffers receiving */
- cfs_list_t srv_active_rqbds;
- /** request buffer history */
- cfs_list_t srv_history_rqbds;
- /** # request buffers in history */
- int srv_n_history_rqbds;
- /** max # request buffers in history */
- int srv_max_history_rqbds;
- /** request history */
- cfs_list_t srv_request_history;
- /** next request sequence # */
- __u64 srv_request_seq;
- /** highest seq culled from history */
- __u64 srv_request_max_cull_seq;
- /**
- * all threads sleep on this. This wait-queue is signalled when new
- * incoming request arrives and when difficult reply has to be handled.
- */
- cfs_waitq_t srv_waitq;
-
- /**
- * serialize the following fields, used for processing requests
- * sent to this portal
- */
- cfs_spinlock_t srv_rq_lock __cfs_cacheline_aligned;
- /** # reqs in either of the queues below */
- /** reqs waiting for service */
- cfs_list_t srv_request_queue;
- /** high priority queue */
- cfs_list_t srv_request_hpq;
- /** # incoming reqs */
- int srv_n_queued_reqs;
- /** # reqs being served */
- int srv_n_active_reqs;
- /** # HPreqs being served */
- int srv_n_active_hpreq;
- /** # hp requests handled */
- int srv_hpreq_count;
-
- /** AT stuff */
- /** @{ */
- /**
- * serialize the following fields, used for changes on
- * adaptive timeout
- */
- cfs_spinlock_t srv_at_lock __cfs_cacheline_aligned;
- /** estimated rpc service time */
- struct adaptive_timeout srv_at_estimate;
- /** reqs waiting for replies */
- struct ptlrpc_at_array srv_at_array;
- /** early reply timer */
- cfs_timer_t srv_at_timer;
- /** check early replies */
- unsigned srv_at_check;
- /** debug */
- cfs_time_t srv_at_checktime;
- /** @} */
+ /**
+ * max # request buffers in history, it needs to be convert into
+ * per-partition value when we have multiple partitions
+ */
+ int srv_max_history_rqbds;
+ /**
+ * partition data for ptlrpc service, only one instance so far,
+ * instance per CPT will come soon
+ */
+ struct ptlrpc_service_part *srv_part;
+};
- /**
- * serialize the following fields, used for processing
- * replies for this portal
- */
- cfs_spinlock_t srv_rs_lock __cfs_cacheline_aligned;
- /** all the active replies */
- cfs_list_t srv_active_replies;
+/**
+ * Definition of PortalRPC service partition data.
+ * Although a service only has one instance of it right now, but we
+ * will have multiple instances very soon (instance per CPT).
+ *
+ * it has four locks:
+ * \a scp_lock
+ * serialize operations on rqbd and requests waiting for preprocess
+ * \a scp_req_lock
+ * serialize operations active requests sent to this portal
+ * \a scp_at_lock
+ * serialize adaptive timeout stuff
+ * \a scp_rep_lock
+ * serialize operations on RS list (reply states)
+ *
+ * We don't have any use-case to take two or more locks at the same time
+ * for now, so there is no lock order issue.
+ */
+struct ptlrpc_service_part {
+ /** back reference to owner */
+ struct ptlrpc_service *scp_service __cfs_cacheline_aligned;
+ /* CPT id, reserved */
+ int scp_cpt;
+ /** always increasing number */
+ int scp_thr_nextid;
+ /** # of starting threads */
+ int scp_nthrs_starting;
+ /** # of stopping threads, reserved for shrinking threads */
+ int scp_nthrs_stopping;
+ /** # running threads */
+ int scp_nthrs_running;
+ /** service threads list */
+ cfs_list_t scp_threads;
+
+ /**
+ * serialize the following fields, used for protecting
+ * rqbd list and incoming requests waiting for preprocess,
+ * threads starting & stopping are also protected by this lock.
+ */
+ cfs_spinlock_t scp_lock __cfs_cacheline_aligned;
+ /** total # req buffer descs allocated */
+ int scp_nrqbds_total;
+ /** # posted request buffers for receiving */
+ int scp_nrqbds_posted;
+ /** # incoming reqs */
+ int scp_nreqs_incoming;
+ /** request buffers to be reposted */
+ cfs_list_t scp_rqbd_idle;
+ /** req buffers receiving */
+ cfs_list_t scp_rqbd_posted;
+ /** incoming reqs */
+ cfs_list_t scp_req_incoming;
+ /** timeout before re-posting reqs, in tick */
+ cfs_duration_t scp_rqbd_timeout;
+ /**
+ * all threads sleep on this. This wait-queue is signalled when new
+ * incoming request arrives and when difficult reply has to be handled.
+ */
+ cfs_waitq_t scp_waitq;
+
+ /** request history */
+ cfs_list_t scp_hist_reqs;
+ /** request buffer history */
+ cfs_list_t scp_hist_rqbds;
+ /** # request buffers in history */
+ int scp_hist_nrqbds;
+ /** sequence number for request */
+ __u64 scp_hist_seq;
+ /** highest seq culled from history */
+ __u64 scp_hist_seq_culled;
+
+ /**
+ * serialize the following fields, used for processing requests
+ * sent to this portal
+ */
+ cfs_spinlock_t scp_req_lock __cfs_cacheline_aligned;
+ /** # reqs in either of the queues below */
+ /** reqs waiting for service */
+ cfs_list_t scp_req_pending;
+ /** high priority queue */
+ cfs_list_t scp_hreq_pending;
+ /** # reqs being served */
+ int scp_nreqs_active;
+ /** # HPreqs being served */
+ int scp_nhreqs_active;
+ /** # hp requests handled */
+ int scp_hreq_count;
+
+ /** AT stuff */
+ /** @{ */
+ /**
+ * serialize the following fields, used for changes on
+ * adaptive timeout
+ */
+ cfs_spinlock_t scp_at_lock __cfs_cacheline_aligned;
+ /** estimated rpc service time */
+ struct adaptive_timeout scp_at_estimate;
+ /** reqs waiting for replies */
+ struct ptlrpc_at_array scp_at_array;
+ /** early reply timer */
+ cfs_timer_t scp_at_timer;
+ /** debug */
+ cfs_time_t scp_at_checktime;
+ /** check early replies */
+ unsigned scp_at_check;
+ /** @} */
+
+ /**
+ * serialize the following fields, used for processing
+ * replies for this portal
+ */
+ cfs_spinlock_t scp_rep_lock __cfs_cacheline_aligned;
+ /** all the active replies */
+ cfs_list_t scp_rep_active;
#ifndef __KERNEL__
- /** replies waiting for service */
- cfs_list_t srv_reply_queue;
+ /** replies waiting for service */
+ cfs_list_t scp_rep_queue;
#endif
- /** List of free reply_states */
- cfs_list_t srv_free_rs_list;
- /** waitq to run, when adding stuff to srv_free_rs_list */
- cfs_waitq_t srv_free_rs_waitq;
- /** # 'difficult' replies */
- cfs_atomic_t srv_n_difficult_replies;
- //struct ptlrpc_srv_ni srv_interfaces[0];
+ /** List of free reply_states */
+ cfs_list_t scp_rep_idle;
+ /** waitq to run, when adding stuff to srv_free_rs_list */
+ cfs_waitq_t scp_rep_waitq;
+ /** # 'difficult' replies */
+ cfs_atomic_t scp_nreps_difficult;
};
/**
* underlying buffer
* @{
*/
-extern void request_out_callback (lnet_event_t *ev);
+extern void request_out_callback(lnet_event_t *ev);
extern void reply_in_callback(lnet_event_t *ev);
-extern void client_bulk_callback (lnet_event_t *ev);
+extern void client_bulk_callback(lnet_event_t *ev);
extern void request_in_callback(lnet_event_t *ev);
extern void reply_out_callback(lnet_event_t *ev);
-extern void server_bulk_callback (lnet_event_t *ev);
+#ifdef HAVE_SERVER_SUPPORT
+extern void server_bulk_callback(lnet_event_t *ev);
+#endif
/** @} */
/* ptlrpc/connection.c */
* Actual interfacing with LNet to put/get/register/unregister stuff
* @{
*/
+#ifdef HAVE_SERVER_SUPPORT
+struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req,
+ int npages, int type, int portal);
int ptlrpc_start_bulk_transfer(struct ptlrpc_bulk_desc *desc);
void ptlrpc_abort_bulk(struct ptlrpc_bulk_desc *desc);
-int ptlrpc_register_bulk(struct ptlrpc_request *req);
-int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async);
static inline int ptlrpc_server_bulk_active(struct ptlrpc_bulk_desc *desc)
{
cfs_spin_unlock(&desc->bd_lock);
return rc;
}
+#endif
+
+int ptlrpc_register_bulk(struct ptlrpc_request *req);
+int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async);
static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req)
{
void ptlrpc_resend_req(struct ptlrpc_request *request);
int ptlrpc_at_get_net_latency(struct ptlrpc_request *req);
int ptl_send_rpc(struct ptlrpc_request *request, int noreply);
-int ptlrpc_register_rqbd (struct ptlrpc_request_buffer_desc *rqbd);
+int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd);
/** @} */
/* ptlrpc/client.c */
void ptlrpc_abort_set(struct ptlrpc_request_set *set);
struct ptlrpc_request_set *ptlrpc_prep_set(void);
+struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func,
+ void *arg);
int ptlrpc_set_add_cb(struct ptlrpc_request_set *set,
set_interpreter_func fn, void *data);
int ptlrpc_set_next_timeout(struct ptlrpc_request_set *);
void ptlrpc_req_finished(struct ptlrpc_request *request);
void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request);
struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req);
-struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp (struct ptlrpc_request *req,
- int npages, int type, int portal);
-struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_exp(struct ptlrpc_request *req,
+struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req,
int npages, int type, int portal);
void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk);
void ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc,
int ptlrpcd_queue_work(void *handler);
/** @} */
+struct ptlrpc_service_buf_conf {
+ /* nbufs is how many buffers to post */
+ unsigned int bc_nbufs;
+ /* buffer size to post */
+ unsigned int bc_buf_size;
+ /* portal to listed for requests on */
+ unsigned int bc_req_portal;
+ /* portal of where to send replies to */
+ unsigned int bc_rep_portal;
+ /* maximum request size to be accepted for this service */
+ unsigned int bc_req_max_size;
+ /* maximum reply size this service can ever send */
+ unsigned int bc_rep_max_size;
+};
+
+struct ptlrpc_service_thr_conf {
+ /* threadname should be 8 characters or less - 6 will be added on */
+ char *tc_thr_name;
+ /* min number of service threads to start */
+ unsigned int tc_nthrs_min;
+ /* max number of service threads to start */
+ unsigned int tc_nthrs_max;
+ /* user specified threads number, it will be validated due to
+ * other members of this structure. */
+ unsigned int tc_nthrs_user;
+ /* set NUMA node affinity for service threads */
+ unsigned int tc_cpu_affinity;
+ /* Tags for lu_context associated with service thread */
+ __u32 tc_ctx_tags;
+};
struct ptlrpc_service_conf {
- int psc_nbufs;
- int psc_bufsize;
- int psc_max_req_size;
- int psc_max_reply_size;
- int psc_req_portal;
- int psc_rep_portal;
- int psc_watchdog_factor;
- int psc_min_threads;
- int psc_max_threads;
- __u32 psc_ctx_tags;
+ /* service name */
+ char *psc_name;
+ /* soft watchdog timeout multiplifier to print stuck service traces */
+ unsigned int psc_watchdog_factor;
+ /* buffer information */
+ struct ptlrpc_service_buf_conf psc_buf;
+ /* thread information */
+ struct ptlrpc_service_thr_conf psc_thr;
+ /* function table */
+ struct ptlrpc_service_ops psc_ops;
};
/* ptlrpc/service.c */
*
* @{
*/
-void ptlrpc_save_lock (struct ptlrpc_request *req,
- struct lustre_handle *lock, int mode, int no_ack);
+void ptlrpc_save_lock(struct ptlrpc_request *req,
+ struct lustre_handle *lock, int mode, int no_ack);
void ptlrpc_commit_replies(struct obd_export *exp);
-void ptlrpc_dispatch_difficult_reply (struct ptlrpc_reply_state *rs);
-void ptlrpc_schedule_difficult_reply (struct ptlrpc_reply_state *rs);
-struct ptlrpc_service *ptlrpc_init_svc_conf(struct ptlrpc_service_conf *c,
- svc_handler_t h, char *name,
- struct proc_dir_entry *proc_entry,
- svc_req_printfn_t prntfn,
- char *threadname);
-
-struct ptlrpc_service *ptlrpc_init_svc(int nbufs, int bufsize, int max_req_size,
- int max_reply_size,
- int req_portal, int rep_portal,
- int watchdog_factor,
- svc_handler_t, char *name,
- cfs_proc_dir_entry_t *proc_entry,
- svc_req_printfn_t,
- int min_threads, int max_threads,
- char *threadname, __u32 ctx_tags,
- svc_hpreq_handler_t);
+void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs);
+void ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs);
+struct ptlrpc_service *ptlrpc_register_service(
+ struct ptlrpc_service_conf *conf,
+ struct proc_dir_entry *proc_entry);
void ptlrpc_stop_all_threads(struct ptlrpc_service *svc);
int ptlrpc_start_threads(struct ptlrpc_service *svc);
-int ptlrpc_start_thread(struct ptlrpc_service *svc);
int ptlrpc_unregister_service(struct ptlrpc_service *service);
-int liblustre_check_services (void *arg);
+int liblustre_check_services(void *arg);
void ptlrpc_daemonize(char *name);
int ptlrpc_service_health_check(struct ptlrpc_service *);
void ptlrpc_hpreq_reorder(struct ptlrpc_request *req);
# define ptlrpc_hr_fini() do {} while(0)
#endif
-struct ptlrpc_svc_data {
- char *name;
- struct ptlrpc_service *svc;
- struct ptlrpc_thread *thread;
-};
/** @} */
/* ptlrpc/import.c */
int lustre_msg_buflen(struct lustre_msg *m, int n);
void lustre_msg_set_buflen(struct lustre_msg *m, int n, int len);
int lustre_msg_bufcount(struct lustre_msg *m);
-char *lustre_msg_string (struct lustre_msg *m, int n, int max_len);
+char *lustre_msg_string(struct lustre_msg *m, int n, int max_len);
__u32 lustre_msghdr_get_flags(struct lustre_msg *msg);
void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags);
__u32 lustre_msg_get_flags(struct lustre_msg *msg);
__u32 lustre_msg_get_magic(struct lustre_msg *msg);
__u32 lustre_msg_get_timeout(struct lustre_msg *msg);
__u32 lustre_msg_get_service_time(struct lustre_msg *msg);
+char *lustre_msg_get_jobid(struct lustre_msg *msg);
__u32 lustre_msg_get_cksum(struct lustre_msg *msg);
#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(2, 9, 0, 0)
__u32 lustre_msg_calc_cksum(struct lustre_msg *msg, int compat18);
void ptlrpc_request_set_replen(struct ptlrpc_request *req);
void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout);
void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time);
+void lustre_msg_set_jobid(struct lustre_msg *msg, char *jobid);
void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum);
static inline void
return req->rq_no_resend;
}
+static inline int
+ptlrpc_server_get_timeout(struct ptlrpc_service_part *svcpt)
+{
+ int at = AT_OFF ? 0 : at_get(&svcpt->scp_at_estimate);
+
+ return svcpt->scp_service->srv_watchdog_factor *
+ max_t(int, at, obd_timeout);
+}
+
+static inline struct ptlrpc_service *
+ptlrpc_req2svc(struct ptlrpc_request *req)
+{
+ LASSERT(req->rq_rqbd != NULL);
+ return req->rq_rqbd->rqbd_svcpt->scp_service;
+}
+
/* ldlm/ldlm_lib.c */
/**
* Target client logic
void client_destroy_import(struct obd_import *imp);
/** @} */
+#ifdef HAVE_SERVER_SUPPORT
int server_disconnect_export(struct obd_export *exp);
+#endif
/* ptlrpc/pinger.c */
/**