/**
* FIEMAP request can be 4K+ for now
*/
-#define OST_MAXREQSIZE (5 * 1024)
+#define OST_MAXREQSIZE (16 * 1024)
#define OST_IO_MAXREQSIZE max_t(int, OST_MAXREQSIZE, \
(((_OST_MAXREQSIZE_SUM - 1) | (1024 - 1)) + 1))
* Structure to single define portal connection.
*/
struct ptlrpc_connection {
- /** linkage for connections hash table */
- cfs_hlist_node_t c_hash;
- /** Our own lnet nid for this connection */
- lnet_nid_t c_self;
- /** Remote side nid for this connection */
- lnet_process_id_t c_peer;
- /** UUID of the other side */
- struct obd_uuid c_remote_uuid;
- /** reference counter for this connection */
- cfs_atomic_t c_refcount;
+ /** linkage for connections hash table */
+ struct hlist_node c_hash;
+ /** Our own lnet nid for this connection */
+ lnet_nid_t c_self;
+ /** Remote side nid for this connection */
+ lnet_process_id_t c_peer;
+ /** UUID of the other side */
+ struct obd_uuid c_remote_uuid;
+ /** reference counter for this connection */
+ atomic_t c_refcount;
};
/** Client definition for PortalRPC */
* returned.
*/
struct ptlrpc_request_set {
- cfs_atomic_t set_refcount;
+ atomic_t set_refcount;
/** number of in queue requests */
- cfs_atomic_t set_new_count;
+ atomic_t set_new_count;
/** number of uncompleted requests */
- cfs_atomic_t set_remaining;
+ atomic_t set_remaining;
/** wait queue to wait on for request events */
- wait_queue_head_t set_waitq;
- wait_queue_head_t *set_wakeup_ptr;
+ wait_queue_head_t set_waitq;
+ wait_queue_head_t *set_wakeup_ptr;
/** List of requests in the set */
- cfs_list_t set_requests;
+ struct list_head set_requests;
/**
* List of completion callbacks to be called when the set is completed
* This is only used if \a set_interpret is NULL.
* Links struct ptlrpc_set_cbdata.
*/
- cfs_list_t set_cblist;
+ struct list_head set_cblist;
/** Completion callback, if only one. */
- set_interpreter_func set_interpret;
+ set_interpreter_func set_interpret;
/** opaq argument passed to completion \a set_interpret callback. */
- void *set_arg;
+ void *set_arg;
/**
* Lock for \a set_new_requests manipulations
* locked so that any old caller can communicate requests to
*/
spinlock_t set_new_req_lock;
/** List of new yet unsent requests. Only used with ptlrpcd now. */
- cfs_list_t set_new_requests;
+ struct list_head set_new_requests;
/** rq_status of requests that have been freed already */
- int set_rc;
+ int set_rc;
/** Additional fields used by the flow control extension */
/** Maximum number of RPCs in flight */
- int set_max_inflight;
+ int set_max_inflight;
/** Callback function used to generate RPCs */
- set_producer_func set_producer;
+ set_producer_func set_producer;
/** opaq argument passed to the producer callback */
- void *set_producer_arg;
+ void *set_producer_arg;
};
/**
* Description of a single ptrlrpc_set callback
*/
struct ptlrpc_set_cbdata {
- /** List linkage item */
- cfs_list_t psc_item;
- /** Pointer to interpreting function */
- set_interpreter_func psc_interpret;
- /** Opaq argument to pass to the callback */
- void *psc_data;
+ /** List linkage item */
+ struct list_head psc_item;
+ /** Pointer to interpreting function */
+ set_interpreter_func psc_interpret;
+ /** Opaq argument to pass to the callback */
+ void *psc_data;
};
struct ptlrpc_bulk_desc;
* added to the state for replay/failover consistency guarantees.
*/
struct ptlrpc_reply_state {
- /** Callback description */
- struct ptlrpc_cb_id rs_cb_id;
- /** Linkage for list of all reply states in a system */
- cfs_list_t rs_list;
- /** Linkage for list of all reply states on same export */
- cfs_list_t rs_exp_list;
- /** Linkage for list of all reply states for same obd */
- cfs_list_t rs_obd_list;
+ /** Callback description */
+ struct ptlrpc_cb_id rs_cb_id;
+ /** Linkage for list of all reply states in a system */
+ struct list_head rs_list;
+ /** Linkage for list of all reply states on same export */
+ struct list_head rs_exp_list;
+ /** Linkage for list of all reply states for same obd */
+ struct list_head rs_obd_list;
#if RS_DEBUG
- cfs_list_t rs_debug_list;
+ struct list_head rs_debug_list;
#endif
- /** A spinlock to protect the reply state flags */
+ /** A spinlock to protect the reply state flags */
spinlock_t rs_lock;
- /** Reply state flags */
+ /** Reply state flags */
unsigned long rs_difficult:1; /* ACK/commit stuff */
unsigned long rs_no_ack:1; /* no ACK, even for
difficult requests */
__u64 rs_transno;
/** xid */
__u64 rs_xid;
- struct obd_export *rs_export;
+ struct obd_export *rs_export;
struct ptlrpc_service_part *rs_svcpt;
- /** Lnet metadata handle for the reply */
- lnet_handle_md_t rs_md_h;
- cfs_atomic_t rs_refcount;
-
- /** Context for the sevice thread */
- struct ptlrpc_svc_ctx *rs_svc_ctx;
- /** Reply buffer (actually sent to the client), encoded if needed */
- struct lustre_msg *rs_repbuf; /* wrapper */
+ /** Lnet metadata handle for the reply */
+ lnet_handle_md_t rs_md_h;
+ atomic_t rs_refcount;
+
+ /** Context for the sevice thread */
+ struct ptlrpc_svc_ctx *rs_svc_ctx;
+ /** Reply buffer (actually sent to the client), encoded if needed */
+ struct lustre_msg *rs_repbuf; /* wrapper */
/** Size of the reply buffer */
int rs_repbuf_len; /* wrapper buf length */
/** Size of the reply message */
*/
struct ptlrpc_request_pool {
/** Locks the list */
- spinlock_t prp_lock;
- /** list of ptlrpc_request structs */
- cfs_list_t prp_req_list;
- /** Maximum message size that would fit into a rquest from this pool */
- int prp_rq_size;
- /** Function to allocate more requests for this pool */
- void (*prp_populate)(struct ptlrpc_request_pool *, int);
+ spinlock_t prp_lock;
+ /** list of ptlrpc_request structs */
+ struct list_head prp_req_list;
+ /** Maximum message size that would fit into a rquest from this pool */
+ int prp_rq_size;
+ /** Function to allocate more requests for this pool */
+ void (*prp_populate)(struct ptlrpc_request_pool *, int);
};
struct lu_context;
* initialize their resources here; this operation is optional.
*
* \param[in,out] policy The policy being started
+ * \param[in,out] arg A generic char buffer
*
* \see nrs_policy_start_locked()
*/
- int (*op_policy_start) (struct ptlrpc_nrs_policy *policy);
+ int (*op_policy_start) (struct ptlrpc_nrs_policy *policy,
+ char *arg);
/**
* Called when deactivating a policy via lprocfs; policies deallocate
* their resources here; this operation is optional
* \a nrq
* \param[in,out] nrq The request
*
- * \pre spin_is_locked(&svcpt->scp_req_lock)
+ * \pre assert_spin_locked(&svcpt->scp_req_lock)
*
* \see ptlrpc_nrs_req_stop_nolock()
*/
/**
* List of registered policies
*/
- cfs_list_t nrs_policy_list;
+ struct list_head nrs_policy_list;
/**
* List of policies with queued requests. Policies that have any
* outstanding requests are queued here, and this list is queried
* point transition away from the
* ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state are drained.
*/
- cfs_list_t nrs_policy_queued;
+ struct list_head nrs_policy_queued;
/**
* Service partition for this NRS head
*/
* unregistration
*/
unsigned nrs_stopping:1;
+ /**
+ * NRS policy is throttling reqeust
+ */
+ unsigned nrs_throttling:1;
};
#define NRS_POL_NAME_MAX 16
/**
* Link into nrs_core::nrs_policies
*/
- cfs_list_t pd_list;
+ struct list_head pd_list;
/**
* NRS operations for this policy
*/
/**
* # of references on this descriptor
*/
- cfs_atomic_t pd_refs;
+ atomic_t pd_refs;
};
/**
* Linkage into the NRS head's list of policies,
* ptlrpc_nrs:nrs_policy_list
*/
- cfs_list_t pol_list;
+ struct list_head pol_list;
/**
* Linkage into the NRS head's list of policies with enqueued
* requests ptlrpc_nrs:nrs_policy_queued
*/
- cfs_list_t pol_list_queued;
+ struct list_head pol_list_queued;
/**
* Current state of this policy
*/
/**
* List of queued requests.
*/
- cfs_list_t fh_list;
+ struct list_head fh_list;
/**
* For debugging purposes.
*/
};
struct nrs_fifo_req {
- cfs_list_t fr_list;
+ struct list_head fr_list;
__u64 fr_sequence;
};
*/
struct nrs_crrn_client {
struct ptlrpc_nrs_resource cc_res;
- cfs_hlist_node_t cc_hnode;
+ struct hlist_node cc_hnode;
lnet_nid_t cc_nid;
/**
* The round number against which this client is currently scheduling
* the current round number.
*/
__u64 cc_sequence;
- cfs_atomic_t cc_ref;
+ atomic_t cc_ref;
/**
* Round Robin quantum; the maximum number of RPCs the client is allowed
* to schedule in a single batch of each round.
*/
struct nrs_orr_object {
struct ptlrpc_nrs_resource oo_res;
- cfs_hlist_node_t oo_hnode;
+ struct hlist_node oo_hnode;
/**
* The round number against which requests are being scheduled for this
* object or OST
/** @} ORR/TRR */
+#include <lustre_nrs_tbf.h>
+
/**
* NRS request
*
struct nrs_crrn_req crr;
/** ORR and TRR share the same request definition */
struct nrs_orr_req orr;
+ /**
+ * TBF request definition
+ */
+ struct nrs_tbf_req tbf;
} nr_u;
/**
* Externally-registering policies may want to use this to allocate
*/
struct ptlrpc_request {
/* Request type: one of PTL_RPC_MSG_* */
- int rq_type;
+ int rq_type;
/** Result of request processing */
- int rq_status;
- /**
- * Linkage item through which this request is included into
- * sending/delayed lists on client and into rqbd list on server
- */
- cfs_list_t rq_list;
- /**
- * Server side list of incoming unserved requests sorted by arrival
- * time. Traversed from time to time to notice about to expire
- * requests and sent back "early replies" to clients to let them
- * know server is alive and well, just very busy to service their
- * requests in time
- */
- cfs_list_t rq_timed_list;
- /** server-side history, used for debuging purposes. */
- cfs_list_t rq_history_list;
- /** server-side per-export list */
- cfs_list_t rq_exp_list;
- /** server-side hp handlers */
- struct ptlrpc_hpreq_ops *rq_ops;
+ int rq_status;
+ /**
+ * Linkage item through which this request is included into
+ * sending/delayed lists on client and into rqbd list on server
+ */
+ struct list_head rq_list;
+ /**
+ * Server side list of incoming unserved requests sorted by arrival
+ * time. Traversed from time to time to notice about to expire
+ * requests and sent back "early replies" to clients to let them
+ * know server is alive and well, just very busy to service their
+ * requests in time
+ */
+ struct list_head rq_timed_list;
+ /** server-side history, used for debuging purposes. */
+ struct list_head rq_history_list;
+ /** server-side per-export list */
+ struct list_head rq_exp_list;
+ /** server-side hp handlers */
+ struct ptlrpc_hpreq_ops *rq_ops;
/** initial thread servicing this request */
- struct ptlrpc_thread *rq_svc_thread;
+ struct ptlrpc_thread *rq_svc_thread;
/** history sequence # */
- __u64 rq_history_seq;
+ __u64 rq_history_seq;
/** \addtogroup nrs
* @{
*/
rq_replay:1,
rq_no_resend:1, rq_waiting:1, rq_receiving_reply:1,
rq_no_delay:1, rq_net_err:1, rq_wait_ctx:1,
- rq_early:1, rq_must_unlink:1,
+ rq_early:1,
+ rq_req_unlink:1, rq_reply_unlink:1,
rq_memalloc:1, /* req originated from "kswapd" */
/* server-side flags */
rq_packed_final:1, /* packed final reply */
rq_no_retry_einprogress:1,
/* allow the req to be sent if the import is in recovery
* status */
- rq_allow_replay:1;
+ rq_allow_replay:1,
+ /* bulk request, sent to server, but uncommitted */
+ rq_unstable:1;
unsigned int rq_nr_resend;
- enum rq_phase rq_phase; /* one of RQ_PHASE_* */
- enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
- cfs_atomic_t rq_refcount;/* client-side refcount for SENT race,
- server-side refcounf for multiple replies */
+ enum rq_phase rq_phase; /* one of RQ_PHASE_* */
+ enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */
+ atomic_t rq_refcount;/* client-side refcount for SENT race,
+ server-side refcounf for multiple replies */
/** Portal to which this request would be sent */
short rq_request_portal; /* XXX FIXME bug 249 */
* there.
* Also see \a rq_replay comment above.
*/
- cfs_list_t rq_replay_list;
+ struct list_head rq_replay_list;
- /**
- * security and encryption data
- * @{ */
- struct ptlrpc_cli_ctx *rq_cli_ctx; /**< client's half ctx */
- struct ptlrpc_svc_ctx *rq_svc_ctx; /**< server's half ctx */
- cfs_list_t rq_ctx_chain; /**< link to waited ctx */
+ /**
+ * security and encryption data
+ * @{ */
+ struct ptlrpc_cli_ctx *rq_cli_ctx; /**< client's half ctx */
+ struct ptlrpc_svc_ctx *rq_svc_ctx; /**< server's half ctx */
+ struct list_head rq_ctx_chain; /**< link to waited ctx */
- struct sptlrpc_flavor rq_flvr; /**< for client & server */
- enum lustre_sec_part rq_sp_from;
+ struct sptlrpc_flavor rq_flvr; /**< for client & server */
+ enum lustre_sec_part rq_sp_from;
/* client/server security flags */
unsigned int
/** Per-request waitq introduced by bug 21938 for recovery waiting */
wait_queue_head_t rq_set_waitq;
/** Link item for request set lists */
- cfs_list_t rq_set_chain;
+ struct list_head rq_set_chain;
/** Link back to the request set */
struct ptlrpc_request_set *rq_set;
/** Async completion handler, called when reply is received */
struct ptlrpc_request_pool *rq_pool;
struct lu_context rq_session;
- struct lu_context rq_recov_session;
/** request format description */
struct req_capsule rq_pill;
* Structure that defines a single page of a bulk transfer
*/
struct ptlrpc_bulk_page {
- /** Linkage to list of pages in a bulk */
- cfs_list_t bp_link;
- /**
- * Number of bytes in a page to transfer starting from \a bp_pageoffset
- */
- int bp_buflen;
- /** offset within a page */
- int bp_pageoffset;
- /** The page itself */
- struct page *bp_page;
+ /** Linkage to list of pages in a bulk */
+ struct list_head bp_link;
+ /**
+ * Number of bytes in a page to transfer starting from \a bp_pageoffset
+ */
+ int bp_buflen;
+ /** offset within a page */
+ int bp_pageoffset;
+ /** The page itself */
+ struct page *bp_page;
};
#define BULK_GET_SOURCE 0
/**
* List of active threads in svc->srv_threads
*/
- cfs_list_t t_link;
+ struct list_head t_link;
/**
* thread-private data (preallocated memory)
*/
* More than one request can fit into the buffer.
*/
struct ptlrpc_request_buffer_desc {
- /** Link item for rqbds on a service */
- cfs_list_t rqbd_list;
- /** History of requests for this buffer */
- cfs_list_t rqbd_reqs;
- /** Back pointer to service for which this buffer is registered */
- struct ptlrpc_service_part *rqbd_svcpt;
- /** LNet descriptor */
- lnet_handle_md_t rqbd_md_h;
- int rqbd_refcount;
- /** The buffer itself */
- char *rqbd_buffer;
- struct ptlrpc_cb_id rqbd_cbid;
- /**
- * This "embedded" request structure is only used for the
- * last request to fit into the buffer
- */
- struct ptlrpc_request rqbd_req;
+ /** Link item for rqbds on a service */
+ struct list_head rqbd_list;
+ /** History of requests for this buffer */
+ struct list_head rqbd_reqs;
+ /** Back pointer to service for which this buffer is registered */
+ struct ptlrpc_service_part *rqbd_svcpt;
+ /** LNet descriptor */
+ lnet_handle_md_t rqbd_md_h;
+ int rqbd_refcount;
+ /** The buffer itself */
+ char *rqbd_buffer;
+ struct ptlrpc_cb_id rqbd_cbid;
+ /**
+ * This "embedded" request structure is only used for the
+ * last request to fit into the buffer
+ */
+ struct ptlrpc_request rqbd_req;
};
typedef int (*svc_handler_t)(struct ptlrpc_request *req);
struct ptlrpc_service {
/** serialize /proc operations */
spinlock_t srv_lock;
- /** most often accessed fields */
- /** chain thru all services */
- cfs_list_t srv_list;
+ /** most often accessed fields */
+ /** chain thru all services */
+ struct list_head srv_list;
/** service operations table */
struct ptlrpc_service_ops srv_ops;
/** only statically allocated strings here; we don't clean them */
/** only statically allocated strings here; we don't clean them */
char *srv_thread_name;
/** service thread list */
- cfs_list_t srv_threads;
+ struct list_head srv_threads;
/** threads # should be created for each partition on initializing */
int srv_nthrs_cpt_init;
/** limit of threads number for each partition */
/** # running threads */
int scp_nthrs_running;
/** service threads list */
- cfs_list_t scp_threads;
+ struct list_head scp_threads;
/**
* serialize the following fields, used for protecting
/** # incoming reqs */
int scp_nreqs_incoming;
/** request buffers to be reposted */
- cfs_list_t scp_rqbd_idle;
+ struct list_head scp_rqbd_idle;
/** req buffers receiving */
- cfs_list_t scp_rqbd_posted;
+ struct list_head scp_rqbd_posted;
/** incoming reqs */
- cfs_list_t scp_req_incoming;
+ struct list_head scp_req_incoming;
/** timeout before re-posting reqs, in tick */
cfs_duration_t scp_rqbd_timeout;
/**
wait_queue_head_t scp_waitq;
/** request history */
- cfs_list_t scp_hist_reqs;
+ struct list_head scp_hist_reqs;
/** request buffer history */
- cfs_list_t scp_hist_rqbds;
+ struct list_head scp_hist_rqbds;
/** # request buffers in history */
int scp_hist_nrqbds;
/** sequence number for request */
/** reqs waiting for replies */
struct ptlrpc_at_array scp_at_array;
/** early reply timer */
- cfs_timer_t scp_at_timer;
+ struct timer_list scp_at_timer;
/** debug */
cfs_time_t scp_at_checktime;
/** check early replies */
*/
spinlock_t scp_rep_lock __cfs_cacheline_aligned;
/** all the active replies */
- cfs_list_t scp_rep_active;
+ struct list_head scp_rep_active;
#ifndef __KERNEL__
/** replies waiting for service */
- cfs_list_t scp_rep_queue;
+ struct list_head scp_rep_queue;
#endif
/** List of free reply_states */
- cfs_list_t scp_rep_idle;
+ struct list_head scp_rep_idle;
/** waitq to run, when adding stuff to srv_free_rs_list */
wait_queue_head_t scp_rep_waitq;
/** # 'difficult' replies */
- cfs_atomic_t scp_nreps_difficult;
+ atomic_t scp_nreps_difficult;
};
#define ptlrpc_service_for_each_part(part, i, svc) \
* request queues, request management, etc.
* @{
*/
+void ptlrpc_request_committed(struct ptlrpc_request *req, int force);
+
void ptlrpc_init_client(int req_portal, int rep_portal, char *name,
struct ptlrpc_client *);
void ptlrpc_cleanup_client(struct obd_import *imp);
static inline void
ptlrpc_rqphase_move(struct ptlrpc_request *req, enum rq_phase new_phase)
{
- if (req->rq_phase == new_phase)
- return;
+ if (req->rq_phase == new_phase)
+ return;
- if (new_phase == RQ_PHASE_UNREGISTERING) {
- req->rq_next_phase = req->rq_phase;
- if (req->rq_import)
- cfs_atomic_inc(&req->rq_import->imp_unregistering);
- }
+ if (new_phase == RQ_PHASE_UNREGISTERING) {
+ req->rq_next_phase = req->rq_phase;
+ if (req->rq_import)
+ atomic_inc(&req->rq_import->imp_unregistering);
+ }
- if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
- if (req->rq_import)
- cfs_atomic_dec(&req->rq_import->imp_unregistering);
- }
+ if (req->rq_phase == RQ_PHASE_UNREGISTERING) {
+ if (req->rq_import)
+ atomic_dec(&req->rq_import->imp_unregistering);
+ }
- DEBUG_REQ(D_INFO, req, "move req \"%s\" -> \"%s\"",
- ptlrpc_rqphase2str(req), ptlrpc_phase2str(new_phase));
+ DEBUG_REQ(D_INFO, req, "move req \"%s\" -> \"%s\"",
+ ptlrpc_rqphase2str(req), ptlrpc_phase2str(new_phase));
- req->rq_phase = new_phase;
+ req->rq_phase = new_phase;
}
/**
spin_unlock(&req->rq_lock);
return 1;
}
- rc = req->rq_receiving_reply || req->rq_must_unlink;
+ rc = req->rq_receiving_reply ;
+ rc = rc || req->rq_req_unlink || req->rq_reply_unlink;
spin_unlock(&req->rq_lock);
return rc;
}
static inline void
ptlrpc_rs_addref(struct ptlrpc_reply_state *rs)
{
- LASSERT(cfs_atomic_read(&rs->rs_refcount) > 0);
- cfs_atomic_inc(&rs->rs_refcount);
+ LASSERT(atomic_read(&rs->rs_refcount) > 0);
+ atomic_inc(&rs->rs_refcount);
}
static inline void
ptlrpc_rs_decref(struct ptlrpc_reply_state *rs)
{
- LASSERT(cfs_atomic_read(&rs->rs_refcount) > 0);
- if (cfs_atomic_dec_and_test(&rs->rs_refcount))
- lustre_free_reply_state(rs);
+ LASSERT(atomic_read(&rs->rs_refcount) > 0);
+ if (atomic_dec_and_test(&rs->rs_refcount))
+ lustre_free_reply_state(rs);
}
/* Should only be called once per req */
int ptlrpc_pinger_add_import(struct obd_import *imp);
int ptlrpc_pinger_del_import(struct obd_import *imp);
int ptlrpc_add_timeout_client(int time, enum timeout_event event,
- timeout_cb_t cb, void *data,
- cfs_list_t *obd_list);
-int ptlrpc_del_timeout_client(cfs_list_t *obd_list,
+ timeout_cb_t cb, void *data,
+ struct list_head *obd_list);
+int ptlrpc_del_timeout_client(struct list_head *obd_list,
enum timeout_event event);
struct ptlrpc_request * ptlrpc_prep_ping(struct obd_import *imp);
int ptlrpc_obd_ping(struct obd_device *obd);