X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Finclude%2Flustre_net.h;h=70600b961bcc6c234d3eeacd6b2910fb1359783f;hb=8701e7e4b5ec1b34700c95b9b6588f4745730b72;hp=a644c06cb6696354f3577981f5ad333de277dfea;hpb=5661651b2cc6414686e7da581589c2ea0e1f1969;p=fs%2Flustre-release.git diff --git a/lustre/include/lustre_net.h b/lustre/include/lustre_net.h index a644c06..70600b9 100644 --- a/lustre/include/lustre_net.h +++ b/lustre/include/lustre_net.h @@ -27,7 +27,7 @@ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2010, 2012, Intel Corporation. + * Copyright (c) 2010, 2013, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -108,13 +108,13 @@ */ #define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS) #define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS) -#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> CFS_PAGE_SHIFT) +#define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) #define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS) #define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS) -#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> CFS_PAGE_SHIFT) +#define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) #define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE -#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> CFS_PAGE_SHIFT) +#define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_CACHE_SHIFT) #define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS) /* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */ @@ -122,8 +122,8 @@ # if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0) # error "PTLRPC_MAX_BRW_PAGES isn't a power of two" # endif -# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * CFS_PAGE_SIZE)) -# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * CFS_PAGE_SIZE" +# if (PTLRPC_MAX_BRW_SIZE != (PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE)) +# error "PTLRPC_MAX_BRW_SIZE isn't PTLRPC_MAX_BRW_PAGES * PAGE_CACHE_SIZE" # endif # if (PTLRPC_MAX_BRW_SIZE > LNET_MTU * PTLRPC_BULK_OPS_COUNT) # error "PTLRPC_MAX_BRW_SIZE too big" @@ -265,7 +265,7 @@ #define LDLM_THR_FACTOR 8 #define LDLM_NTHRS_INIT PTLRPC_NTHRS_INIT #define LDLM_NTHRS_BASE 24 -#define LDLM_NTHRS_MAX (cfs_num_online_cpus() == 1 ? 64 : 128) +#define LDLM_NTHRS_MAX (num_online_cpus() == 1 ? 64 : 128) #define LDLM_BL_THREADS LDLM_NTHRS_AUTO_INIT #define LDLM_CLIENT_NBUFS 1 @@ -380,8 +380,8 @@ * include linkea (4K maxim), together with other updates, we set it to 9K: * lustre_msg + ptlrpc_body + UPDATE_BUF_SIZE (8K) */ -#define MDS_OUT_MAXREQSIZE (9 * 1024) -#define MDS_OUT_MAXREPSIZE MDS_MAXREPSIZE +#define OUT_MAXREQSIZE (9 * 1024) +#define OUT_MAXREPSIZE MDS_MAXREPSIZE /** MDS_BUFSIZE = max_reqsize (w/o LOV EA) + max sptlrpc payload size */ #define MDS_BUFSIZE max(MDS_MAXREQSIZE + SPTLRPC_MAX_PAYLOAD, \ @@ -407,11 +407,11 @@ 160 * 1024) /** - * MDS_OUT_BUFSIZE = max_out_reqsize + max sptlrpc payload (~1K) which is + * OUT_BUFSIZE = max_out_reqsize + max sptlrpc payload (~1K) which is * about 10K, for the same reason as MDS_REG_BUFSIZE, we also give some * extra bytes to each request buffer to improve buffer utilization rate. */ -#define MDS_OUT_BUFSIZE max(MDS_OUT_MAXREQSIZE + SPTLRPC_MAX_PAYLOAD, \ +#define OUT_BUFSIZE max(OUT_MAXREQSIZE + SPTLRPC_MAX_PAYLOAD, \ 24 * 1024) /** FLD_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc */ @@ -465,7 +465,7 @@ */ /* depress threads factor for VM with small memory size */ #define OSS_THR_FACTOR min_t(int, 8, \ - CFS_NUM_CACHEPAGES >> (28 - CFS_PAGE_SHIFT)) + NUM_CACHEPAGES >> (28 - PAGE_CACHE_SHIFT)) #define OSS_NTHRS_INIT (PTLRPC_NTHRS_INIT + 1) #define OSS_NTHRS_BASE 64 #define OSS_NTHRS_MAX 512 @@ -494,7 +494,7 @@ /** * FIEMAP request can be 4K+ for now */ -#define OST_MAXREQSIZE (5 * 1024) +#define OST_MAXREQSIZE (16 * 1024) #define OST_IO_MAXREQSIZE max_t(int, OST_MAXREQSIZE, \ (((_OST_MAXREQSIZE_SUM - 1) | (1024 - 1)) + 1)) @@ -517,16 +517,16 @@ * Structure to single define portal connection. */ struct ptlrpc_connection { - /** linkage for connections hash table */ - cfs_hlist_node_t c_hash; - /** Our own lnet nid for this connection */ - lnet_nid_t c_self; - /** Remote side nid for this connection */ - lnet_process_id_t c_peer; - /** UUID of the other side */ - struct obd_uuid c_remote_uuid; - /** reference counter for this connection */ - cfs_atomic_t c_refcount; + /** linkage for connections hash table */ + struct hlist_node c_hash; + /** Our own lnet nid for this connection */ + lnet_nid_t c_self; + /** Remote side nid for this connection */ + lnet_process_id_t c_peer; + /** UUID of the other side */ + struct obd_uuid c_remote_uuid; + /** reference counter for this connection */ + atomic_t c_refcount; }; /** Client definition for PortalRPC */ @@ -574,26 +574,26 @@ typedef int (*set_producer_func)(struct ptlrpc_request_set *, void *); * returned. */ struct ptlrpc_request_set { - cfs_atomic_t set_refcount; + atomic_t set_refcount; /** number of in queue requests */ - cfs_atomic_t set_new_count; + atomic_t set_new_count; /** number of uncompleted requests */ - cfs_atomic_t set_remaining; + atomic_t set_remaining; /** wait queue to wait on for request events */ - cfs_waitq_t set_waitq; - cfs_waitq_t *set_wakeup_ptr; + wait_queue_head_t set_waitq; + wait_queue_head_t *set_wakeup_ptr; /** List of requests in the set */ - cfs_list_t set_requests; + struct list_head set_requests; /** * List of completion callbacks to be called when the set is completed * This is only used if \a set_interpret is NULL. * Links struct ptlrpc_set_cbdata. */ - cfs_list_t set_cblist; + struct list_head set_cblist; /** Completion callback, if only one. */ - set_interpreter_func set_interpret; + set_interpreter_func set_interpret; /** opaq argument passed to completion \a set_interpret callback. */ - void *set_arg; + void *set_arg; /** * Lock for \a set_new_requests manipulations * locked so that any old caller can communicate requests to @@ -601,29 +601,29 @@ struct ptlrpc_request_set { */ spinlock_t set_new_req_lock; /** List of new yet unsent requests. Only used with ptlrpcd now. */ - cfs_list_t set_new_requests; + struct list_head set_new_requests; /** rq_status of requests that have been freed already */ - int set_rc; + int set_rc; /** Additional fields used by the flow control extension */ /** Maximum number of RPCs in flight */ - int set_max_inflight; + int set_max_inflight; /** Callback function used to generate RPCs */ - set_producer_func set_producer; + set_producer_func set_producer; /** opaq argument passed to the producer callback */ - void *set_producer_arg; + void *set_producer_arg; }; /** * Description of a single ptrlrpc_set callback */ struct ptlrpc_set_cbdata { - /** List linkage item */ - cfs_list_t psc_item; - /** Pointer to interpreting function */ - set_interpreter_func psc_interpret; - /** Opaq argument to pass to the callback */ - void *psc_data; + /** List linkage item */ + struct list_head psc_item; + /** Pointer to interpreting function */ + set_interpreter_func psc_interpret; + /** Opaq argument to pass to the callback */ + void *psc_data; }; struct ptlrpc_bulk_desc; @@ -650,20 +650,20 @@ struct ptlrpc_cb_id { * added to the state for replay/failover consistency guarantees. */ struct ptlrpc_reply_state { - /** Callback description */ - struct ptlrpc_cb_id rs_cb_id; - /** Linkage for list of all reply states in a system */ - cfs_list_t rs_list; - /** Linkage for list of all reply states on same export */ - cfs_list_t rs_exp_list; - /** Linkage for list of all reply states for same obd */ - cfs_list_t rs_obd_list; + /** Callback description */ + struct ptlrpc_cb_id rs_cb_id; + /** Linkage for list of all reply states in a system */ + struct list_head rs_list; + /** Linkage for list of all reply states on same export */ + struct list_head rs_exp_list; + /** Linkage for list of all reply states for same obd */ + struct list_head rs_obd_list; #if RS_DEBUG - cfs_list_t rs_debug_list; + struct list_head rs_debug_list; #endif - /** A spinlock to protect the reply state flags */ + /** A spinlock to protect the reply state flags */ spinlock_t rs_lock; - /** Reply state flags */ + /** Reply state flags */ unsigned long rs_difficult:1; /* ACK/commit stuff */ unsigned long rs_no_ack:1; /* no ACK, even for difficult requests */ @@ -683,16 +683,16 @@ struct ptlrpc_reply_state { __u64 rs_transno; /** xid */ __u64 rs_xid; - struct obd_export *rs_export; + struct obd_export *rs_export; struct ptlrpc_service_part *rs_svcpt; - /** Lnet metadata handle for the reply */ - lnet_handle_md_t rs_md_h; - cfs_atomic_t rs_refcount; - - /** Context for the sevice thread */ - struct ptlrpc_svc_ctx *rs_svc_ctx; - /** Reply buffer (actually sent to the client), encoded if needed */ - struct lustre_msg *rs_repbuf; /* wrapper */ + /** Lnet metadata handle for the reply */ + lnet_handle_md_t rs_md_h; + atomic_t rs_refcount; + + /** Context for the sevice thread */ + struct ptlrpc_svc_ctx *rs_svc_ctx; + /** Reply buffer (actually sent to the client), encoded if needed */ + struct lustre_msg *rs_repbuf; /* wrapper */ /** Size of the reply buffer */ int rs_repbuf_len; /* wrapper buf length */ /** Size of the reply message */ @@ -738,13 +738,13 @@ typedef int (*ptlrpc_interpterer_t)(const struct lu_env *env, */ struct ptlrpc_request_pool { /** Locks the list */ - spinlock_t prp_lock; - /** list of ptlrpc_request structs */ - cfs_list_t prp_req_list; - /** Maximum message size that would fit into a rquest from this pool */ - int prp_rq_size; - /** Function to allocate more requests for this pool */ - void (*prp_populate)(struct ptlrpc_request_pool *, int); + spinlock_t prp_lock; + /** list of ptlrpc_request structs */ + struct list_head prp_req_list; + /** Maximum message size that would fit into a rquest from this pool */ + int prp_rq_size; + /** Function to allocate more requests for this pool */ + void (*prp_populate)(struct ptlrpc_request_pool *, int); }; struct lu_context; @@ -822,10 +822,12 @@ struct ptlrpc_nrs_pol_ops { * initialize their resources here; this operation is optional. * * \param[in,out] policy The policy being started + * \param[in,out] arg A generic char buffer * * \see nrs_policy_start_locked() */ - int (*op_policy_start) (struct ptlrpc_nrs_policy *policy); + int (*op_policy_start) (struct ptlrpc_nrs_policy *policy, + char *arg); /** * Called when deactivating a policy via lprocfs; policies deallocate * their resources here; this operation is optional @@ -961,7 +963,7 @@ struct ptlrpc_nrs_pol_ops { * \a nrq * \param[in,out] nrq The request * - * \pre spin_is_locked(&svcpt->scp_req_lock) + * \pre assert_spin_locked(&svcpt->scp_req_lock) * * \see ptlrpc_nrs_req_stop_nolock() */ @@ -1056,7 +1058,7 @@ struct ptlrpc_nrs { /** * List of registered policies */ - cfs_list_t nrs_policy_list; + struct list_head nrs_policy_list; /** * List of policies with queued requests. Policies that have any * outstanding requests are queued here, and this list is queried @@ -1065,7 +1067,7 @@ struct ptlrpc_nrs { * point transition away from the * ptlrpc_nrs_pol_state::NRS_POL_STATE_STARTED state are drained. */ - cfs_list_t nrs_policy_queued; + struct list_head nrs_policy_queued; /** * Service partition for this NRS head */ @@ -1103,6 +1105,10 @@ struct ptlrpc_nrs { * unregistration */ unsigned nrs_stopping:1; + /** + * NRS policy is throttling reqeust + */ + unsigned nrs_throttling:1; }; #define NRS_POL_NAME_MAX 16 @@ -1146,7 +1152,7 @@ struct ptlrpc_nrs_pol_conf { * different module to the one the NRS framework is held within * (currently ptlrpc), should set this field to THIS_MODULE. */ - cfs_module_t *nc_owner; + struct module *nc_owner; /** * Policy registration flags; a bitmast of \e nrs_policy_flags */ @@ -1167,7 +1173,7 @@ struct ptlrpc_nrs_pol_desc { /** * Link into nrs_core::nrs_policies */ - cfs_list_t pd_list; + struct list_head pd_list; /** * NRS operations for this policy */ @@ -1221,7 +1227,7 @@ struct ptlrpc_nrs_pol_desc { * then unregistration and lprocfs operations will be properly * serialized. */ - cfs_module_t *pd_owner; + struct module *pd_owner; /** * Bitmask of \e nrs_policy_flags */ @@ -1229,7 +1235,7 @@ struct ptlrpc_nrs_pol_desc { /** * # of references on this descriptor */ - cfs_atomic_t pd_refs; + atomic_t pd_refs; }; /** @@ -1303,12 +1309,12 @@ struct ptlrpc_nrs_policy { * Linkage into the NRS head's list of policies, * ptlrpc_nrs:nrs_policy_list */ - cfs_list_t pol_list; + struct list_head pol_list; /** * Linkage into the NRS head's list of policies with enqueued * requests ptlrpc_nrs:nrs_policy_queued */ - cfs_list_t pol_list_queued; + struct list_head pol_list_queued; /** * Current state of this policy */ @@ -1410,7 +1416,7 @@ struct nrs_fifo_head { /** * List of queued requests. */ - cfs_list_t fh_list; + struct list_head fh_list; /** * For debugging purposes. */ @@ -1418,7 +1424,7 @@ struct nrs_fifo_head { }; struct nrs_fifo_req { - cfs_list_t fr_list; + struct list_head fr_list; __u64 fr_sequence; }; @@ -1460,7 +1466,7 @@ struct nrs_crrn_net { */ struct nrs_crrn_client { struct ptlrpc_nrs_resource cc_res; - cfs_hlist_node_t cc_hnode; + struct hlist_node cc_hnode; lnet_nid_t cc_nid; /** * The round number against which this client is currently scheduling @@ -1472,7 +1478,7 @@ struct nrs_crrn_client { * the current round number. */ __u64 cc_sequence; - cfs_atomic_t cc_ref; + atomic_t cc_ref; /** * Round Robin quantum; the maximum number of RPCs the client is allowed * to schedule in a single batch of each round. @@ -1567,7 +1573,7 @@ struct nrs_orr_key { * id number, so this _should_ be more than enough for the maximum number of * CPTs on any system. If it does happen that this statement is incorrect, * nrs_orr_genobjname() will inevitably yield a non-unique name and cause - * cfs_mem_cache_create() to complain (on Linux), so the erroneous situation + * kmem_cache_create() to complain (on Linux), so the erroneous situation * will hopefully not go unnoticed. */ #define NRS_ORR_OBJ_NAME_MAX (sizeof("nrs_orr_reg_") + 3) @@ -1579,7 +1585,7 @@ struct nrs_orr_data { struct ptlrpc_nrs_resource od_res; cfs_binheap_t *od_binheap; cfs_hash_t *od_obj_hash; - cfs_mem_cache_t *od_cache; + struct kmem_cache *od_cache; /** * Used when a new scheduling round commences, in order to synchronize * all object or OST batches with the new round number. @@ -1620,7 +1626,7 @@ struct nrs_orr_data { */ struct nrs_orr_object { struct ptlrpc_nrs_resource oo_res; - cfs_hlist_node_t oo_hnode; + struct hlist_node oo_hnode; /** * The round number against which requests are being scheduled for this * object or OST @@ -1636,7 +1642,7 @@ struct nrs_orr_object { * scheduling RPCs */ struct nrs_orr_key oo_key; - cfs_atomic_t oo_ref; + long oo_ref; /** * Round Robin quantum; the maximum number of RPCs that are allowed to * be scheduled for the object or OST in a single batch of each round. @@ -1694,6 +1700,8 @@ struct nrs_orr_req { /** @} ORR/TRR */ +#include + /** * NRS request * @@ -1735,6 +1743,10 @@ struct ptlrpc_nrs_request { struct nrs_crrn_req crr; /** ORR and TRR share the same request definition */ struct nrs_orr_req orr; + /** + * TBF request definition + */ + struct nrs_tbf_req tbf; } nr_u; /** * Externally-registering policies may want to use this to allocate @@ -1778,34 +1790,34 @@ struct ptlrpc_hpreq_ops { */ struct ptlrpc_request { /* Request type: one of PTL_RPC_MSG_* */ - int rq_type; + int rq_type; /** Result of request processing */ - int rq_status; - /** - * Linkage item through which this request is included into - * sending/delayed lists on client and into rqbd list on server - */ - cfs_list_t rq_list; - /** - * Server side list of incoming unserved requests sorted by arrival - * time. Traversed from time to time to notice about to expire - * requests and sent back "early replies" to clients to let them - * know server is alive and well, just very busy to service their - * requests in time - */ - cfs_list_t rq_timed_list; - /** server-side history, used for debuging purposes. */ - cfs_list_t rq_history_list; - /** server-side per-export list */ - cfs_list_t rq_exp_list; - /** server-side hp handlers */ - struct ptlrpc_hpreq_ops *rq_ops; + int rq_status; + /** + * Linkage item through which this request is included into + * sending/delayed lists on client and into rqbd list on server + */ + struct list_head rq_list; + /** + * Server side list of incoming unserved requests sorted by arrival + * time. Traversed from time to time to notice about to expire + * requests and sent back "early replies" to clients to let them + * know server is alive and well, just very busy to service their + * requests in time + */ + struct list_head rq_timed_list; + /** server-side history, used for debuging purposes. */ + struct list_head rq_history_list; + /** server-side per-export list */ + struct list_head rq_exp_list; + /** server-side hp handlers */ + struct ptlrpc_hpreq_ops *rq_ops; /** initial thread servicing this request */ - struct ptlrpc_thread *rq_svc_thread; + struct ptlrpc_thread *rq_svc_thread; /** history sequence # */ - __u64 rq_history_seq; + __u64 rq_history_seq; /** \addtogroup nrs * @{ */ @@ -1833,7 +1845,8 @@ struct ptlrpc_request { rq_replay:1, rq_no_resend:1, rq_waiting:1, rq_receiving_reply:1, rq_no_delay:1, rq_net_err:1, rq_wait_ctx:1, - rq_early:1, rq_must_unlink:1, + rq_early:1, + rq_req_unlink:1, rq_reply_unlink:1, rq_memalloc:1, /* req originated from "kswapd" */ /* server-side flags */ rq_packed_final:1, /* packed final reply */ @@ -1854,10 +1867,10 @@ struct ptlrpc_request { unsigned int rq_nr_resend; - enum rq_phase rq_phase; /* one of RQ_PHASE_* */ - enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */ - cfs_atomic_t rq_refcount;/* client-side refcount for SENT race, - server-side refcounf for multiple replies */ + enum rq_phase rq_phase; /* one of RQ_PHASE_* */ + enum rq_phase rq_next_phase; /* one of RQ_PHASE_* to be used next */ + atomic_t rq_refcount;/* client-side refcount for SENT race, + server-side refcounf for multiple replies */ /** Portal to which this request would be sent */ short rq_request_portal; /* XXX FIXME bug 249 */ @@ -1887,17 +1900,17 @@ struct ptlrpc_request { * there. * Also see \a rq_replay comment above. */ - cfs_list_t rq_replay_list; + struct list_head rq_replay_list; - /** - * security and encryption data - * @{ */ - struct ptlrpc_cli_ctx *rq_cli_ctx; /**< client's half ctx */ - struct ptlrpc_svc_ctx *rq_svc_ctx; /**< server's half ctx */ - cfs_list_t rq_ctx_chain; /**< link to waited ctx */ + /** + * security and encryption data + * @{ */ + struct ptlrpc_cli_ctx *rq_cli_ctx; /**< client's half ctx */ + struct ptlrpc_svc_ctx *rq_svc_ctx; /**< server's half ctx */ + struct list_head rq_ctx_chain; /**< link to waited ctx */ - struct sptlrpc_flavor rq_flvr; /**< for client & server */ - enum lustre_sec_part rq_sp_from; + struct sptlrpc_flavor rq_flvr; /**< for client & server */ + enum lustre_sec_part rq_sp_from; /* client/server security flags */ unsigned int @@ -1968,10 +1981,10 @@ struct ptlrpc_request { /** incoming request buffer */ struct ptlrpc_request_buffer_desc *rq_rqbd; - /** client-only incoming reply */ - lnet_handle_md_t rq_reply_md_h; - cfs_waitq_t rq_reply_waitq; - struct ptlrpc_cb_id rq_reply_cbid; + /** client-only incoming reply */ + lnet_handle_md_t rq_reply_md_h; + wait_queue_head_t rq_reply_waitq; + struct ptlrpc_cb_id rq_reply_cbid; /** our LNet NID */ lnet_nid_t rq_self; @@ -2019,9 +2032,9 @@ struct ptlrpc_request { /** Multi-rpc bits */ /** Per-request waitq introduced by bug 21938 for recovery waiting */ - cfs_waitq_t rq_set_waitq; + wait_queue_head_t rq_set_waitq; /** Link item for request set lists */ - cfs_list_t rq_set_chain; + struct list_head rq_set_chain; /** Link back to the request set */ struct ptlrpc_request_set *rq_set; /** Async completion handler, called when reply is received */ @@ -2033,7 +2046,6 @@ struct ptlrpc_request { struct ptlrpc_request_pool *rq_pool; struct lu_context rq_session; - struct lu_context rq_recov_session; /** request format description */ struct req_capsule rq_pill; @@ -2232,16 +2244,16 @@ do { \ * Structure that defines a single page of a bulk transfer */ struct ptlrpc_bulk_page { - /** Linkage to list of pages in a bulk */ - cfs_list_t bp_link; - /** - * Number of bytes in a page to transfer starting from \a bp_pageoffset - */ - int bp_buflen; - /** offset within a page */ - int bp_pageoffset; - /** The page itself */ - struct page *bp_page; + /** Linkage to list of pages in a bulk */ + struct list_head bp_link; + /** + * Number of bytes in a page to transfer starting from \a bp_pageoffset + */ + int bp_buflen; + /** offset within a page */ + int bp_pageoffset; + /** The page itself */ + struct page *bp_page; }; #define BULK_GET_SOURCE 0 @@ -2278,7 +2290,7 @@ struct ptlrpc_bulk_desc { struct obd_import *bd_import; /** Back pointer to the request */ struct ptlrpc_request *bd_req; - cfs_waitq_t bd_waitq; /* server side only WQ */ + wait_queue_head_t bd_waitq; /* server side only WQ */ int bd_iov_count; /* # entries in bd_iov */ int bd_max_iov; /* allocated size of bd_iov */ int bd_nob; /* # bytes covered */ @@ -2322,7 +2334,7 @@ struct ptlrpc_thread { /** * List of active threads in svc->srv_threads */ - cfs_list_t t_link; + struct list_head t_link; /** * thread-private data (preallocated memory) */ @@ -2344,7 +2356,7 @@ struct ptlrpc_thread { * the svc this thread belonged to b=18582 */ struct ptlrpc_service_part *t_svcpt; - cfs_waitq_t t_ctl_waitq; + wait_queue_head_t t_ctl_waitq; struct lu_env *t_env; char t_name[PTLRPC_THR_NAME_LEN]; }; @@ -2417,23 +2429,23 @@ static inline int thread_test_and_clear_flags(struct ptlrpc_thread *thread, * More than one request can fit into the buffer. */ struct ptlrpc_request_buffer_desc { - /** Link item for rqbds on a service */ - cfs_list_t rqbd_list; - /** History of requests for this buffer */ - cfs_list_t rqbd_reqs; - /** Back pointer to service for which this buffer is registered */ - struct ptlrpc_service_part *rqbd_svcpt; - /** LNet descriptor */ - lnet_handle_md_t rqbd_md_h; - int rqbd_refcount; - /** The buffer itself */ - char *rqbd_buffer; - struct ptlrpc_cb_id rqbd_cbid; - /** - * This "embedded" request structure is only used for the - * last request to fit into the buffer - */ - struct ptlrpc_request rqbd_req; + /** Link item for rqbds on a service */ + struct list_head rqbd_list; + /** History of requests for this buffer */ + struct list_head rqbd_reqs; + /** Back pointer to service for which this buffer is registered */ + struct ptlrpc_service_part *rqbd_svcpt; + /** LNet descriptor */ + lnet_handle_md_t rqbd_md_h; + int rqbd_refcount; + /** The buffer itself */ + char *rqbd_buffer; + struct ptlrpc_cb_id rqbd_cbid; + /** + * This "embedded" request structure is only used for the + * last request to fit into the buffer + */ + struct ptlrpc_request rqbd_req; }; typedef int (*svc_handler_t)(struct ptlrpc_request *req); @@ -2484,9 +2496,9 @@ struct ptlrpc_service_ops { struct ptlrpc_service { /** serialize /proc operations */ spinlock_t srv_lock; - /** most often accessed fields */ - /** chain thru all services */ - cfs_list_t srv_list; + /** most often accessed fields */ + /** chain thru all services */ + struct list_head srv_list; /** service operations table */ struct ptlrpc_service_ops srv_ops; /** only statically allocated strings here; we don't clean them */ @@ -2494,7 +2506,7 @@ struct ptlrpc_service { /** only statically allocated strings here; we don't clean them */ char *srv_thread_name; /** service thread list */ - cfs_list_t srv_threads; + struct list_head srv_threads; /** threads # should be created for each partition on initializing */ int srv_nthrs_cpt_init; /** limit of threads number for each partition */ @@ -2575,7 +2587,7 @@ struct ptlrpc_service_part { /** # running threads */ int scp_nthrs_running; /** service threads list */ - cfs_list_t scp_threads; + struct list_head scp_threads; /** * serialize the following fields, used for protecting @@ -2592,23 +2604,23 @@ struct ptlrpc_service_part { /** # incoming reqs */ int scp_nreqs_incoming; /** request buffers to be reposted */ - cfs_list_t scp_rqbd_idle; + struct list_head scp_rqbd_idle; /** req buffers receiving */ - cfs_list_t scp_rqbd_posted; + struct list_head scp_rqbd_posted; /** incoming reqs */ - cfs_list_t scp_req_incoming; + struct list_head scp_req_incoming; /** timeout before re-posting reqs, in tick */ cfs_duration_t scp_rqbd_timeout; /** * all threads sleep on this. This wait-queue is signalled when new * incoming request arrives and when difficult reply has to be handled. */ - cfs_waitq_t scp_waitq; + wait_queue_head_t scp_waitq; /** request history */ - cfs_list_t scp_hist_reqs; + struct list_head scp_hist_reqs; /** request buffer history */ - cfs_list_t scp_hist_rqbds; + struct list_head scp_hist_rqbds; /** # request buffers in history */ int scp_hist_nrqbds; /** sequence number for request */ @@ -2647,7 +2659,7 @@ struct ptlrpc_service_part { /** reqs waiting for replies */ struct ptlrpc_at_array scp_at_array; /** early reply timer */ - cfs_timer_t scp_at_timer; + struct timer_list scp_at_timer; /** debug */ cfs_time_t scp_at_checktime; /** check early replies */ @@ -2660,17 +2672,17 @@ struct ptlrpc_service_part { */ spinlock_t scp_rep_lock __cfs_cacheline_aligned; /** all the active replies */ - cfs_list_t scp_rep_active; + struct list_head scp_rep_active; #ifndef __KERNEL__ /** replies waiting for service */ - cfs_list_t scp_rep_queue; + struct list_head scp_rep_queue; #endif /** List of free reply_states */ - cfs_list_t scp_rep_idle; + struct list_head scp_rep_idle; /** waitq to run, when adding stuff to srv_free_rs_list */ - cfs_waitq_t scp_rep_waitq; + wait_queue_head_t scp_rep_waitq; /** # 'difficult' replies */ - cfs_atomic_t scp_nreps_difficult; + atomic_t scp_nreps_difficult; }; #define ptlrpc_service_for_each_part(part, i, svc) \ @@ -2704,7 +2716,7 @@ struct ptlrpcd_ctl { */ struct ptlrpc_request_set *pc_set; /** - * Thread name used in cfs_daemonize() + * Thread name used in kthread_run() */ char pc_name[16]; /** @@ -2911,6 +2923,8 @@ int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd); * request queues, request management, etc. * @{ */ +void ptlrpc_request_committed(struct ptlrpc_request *req, int force); + void ptlrpc_init_client(int req_portal, int rep_portal, char *name, struct ptlrpc_client *); void ptlrpc_cleanup_client(struct obd_import *imp); @@ -2985,16 +2999,16 @@ static inline void ptlrpc_free_bulk_nopin(struct ptlrpc_bulk_desc *bulk) __ptlrpc_free_bulk(bulk, 0); } void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, - cfs_page_t *page, int pageoffset, int len, int); + struct page *page, int pageoffset, int len, int); static inline void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc, - cfs_page_t *page, int pageoffset, + struct page *page, int pageoffset, int len) { __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 1); } static inline void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc, - cfs_page_t *page, int pageoffset, + struct page *page, int pageoffset, int len) { __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0); @@ -3227,30 +3241,62 @@ lustre_shrink_reply(struct ptlrpc_request *req, int segment, req->rq_replen = lustre_shrink_msg(req->rq_repmsg, segment, newlen, move_data); } + +#ifdef LUSTRE_TRANSLATE_ERRNOS + +static inline int ptlrpc_status_hton(int h) +{ + /* + * Positive errnos must be network errnos, such as LUSTRE_EDEADLK, + * ELDLM_LOCK_ABORTED, etc. + */ + if (h < 0) + return -lustre_errno_hton(-h); + else + return h; +} + +static inline int ptlrpc_status_ntoh(int n) +{ + /* + * See the comment in ptlrpc_status_hton(). + */ + if (n < 0) + return -lustre_errno_ntoh(-n); + else + return n; +} + +#else + +#define ptlrpc_status_hton(h) (h) +#define ptlrpc_status_ntoh(n) (n) + +#endif /** @} */ /** Change request phase of \a req to \a new_phase */ static inline void ptlrpc_rqphase_move(struct ptlrpc_request *req, enum rq_phase new_phase) { - if (req->rq_phase == new_phase) - return; + if (req->rq_phase == new_phase) + return; - if (new_phase == RQ_PHASE_UNREGISTERING) { - req->rq_next_phase = req->rq_phase; - if (req->rq_import) - cfs_atomic_inc(&req->rq_import->imp_unregistering); - } + if (new_phase == RQ_PHASE_UNREGISTERING) { + req->rq_next_phase = req->rq_phase; + if (req->rq_import) + atomic_inc(&req->rq_import->imp_unregistering); + } - if (req->rq_phase == RQ_PHASE_UNREGISTERING) { - if (req->rq_import) - cfs_atomic_dec(&req->rq_import->imp_unregistering); - } + if (req->rq_phase == RQ_PHASE_UNREGISTERING) { + if (req->rq_import) + atomic_dec(&req->rq_import->imp_unregistering); + } - DEBUG_REQ(D_INFO, req, "move req \"%s\" -> \"%s\"", - ptlrpc_rqphase2str(req), ptlrpc_phase2str(new_phase)); + DEBUG_REQ(D_INFO, req, "move req \"%s\" -> \"%s\"", + ptlrpc_rqphase2str(req), ptlrpc_phase2str(new_phase)); - req->rq_phase = new_phase; + req->rq_phase = new_phase; } /** @@ -3298,7 +3344,8 @@ ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req) spin_unlock(&req->rq_lock); return 1; } - rc = req->rq_receiving_reply || req->rq_must_unlink; + rc = req->rq_receiving_reply ; + rc = rc || req->rq_req_unlink || req->rq_reply_unlink; spin_unlock(&req->rq_lock); return rc; } @@ -3306,25 +3353,25 @@ ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req) static inline void ptlrpc_client_wake_req(struct ptlrpc_request *req) { - if (req->rq_set == NULL) - cfs_waitq_signal(&req->rq_reply_waitq); - else - cfs_waitq_signal(&req->rq_set->set_waitq); + if (req->rq_set == NULL) + wake_up(&req->rq_reply_waitq); + else + wake_up(&req->rq_set->set_waitq); } static inline void ptlrpc_rs_addref(struct ptlrpc_reply_state *rs) { - LASSERT(cfs_atomic_read(&rs->rs_refcount) > 0); - cfs_atomic_inc(&rs->rs_refcount); + LASSERT(atomic_read(&rs->rs_refcount) > 0); + atomic_inc(&rs->rs_refcount); } static inline void ptlrpc_rs_decref(struct ptlrpc_reply_state *rs) { - LASSERT(cfs_atomic_read(&rs->rs_refcount) > 0); - if (cfs_atomic_dec_and_test(&rs->rs_refcount)) - lustre_free_reply_state(rs); + LASSERT(atomic_read(&rs->rs_refcount) > 0); + if (atomic_dec_and_test(&rs->rs_refcount)) + lustre_free_reply_state(rs); } /* Should only be called once per req */ @@ -3429,13 +3476,12 @@ typedef int (*timeout_cb_t)(struct timeout_item *, void *); int ptlrpc_pinger_add_import(struct obd_import *imp); int ptlrpc_pinger_del_import(struct obd_import *imp); int ptlrpc_add_timeout_client(int time, enum timeout_event event, - timeout_cb_t cb, void *data, - cfs_list_t *obd_list); -int ptlrpc_del_timeout_client(cfs_list_t *obd_list, + timeout_cb_t cb, void *data, + struct list_head *obd_list); +int ptlrpc_del_timeout_client(struct list_head *obd_list, enum timeout_event event); struct ptlrpc_request * ptlrpc_prep_ping(struct obd_import *imp); int ptlrpc_obd_ping(struct obd_device *obd); -cfs_time_t ptlrpc_suspend_wakeup_time(void); #ifdef __KERNEL__ void ping_evictor_start(void); void ping_evictor_stop(void); @@ -3443,7 +3489,6 @@ void ping_evictor_stop(void); #define ping_evictor_start() do {} while (0) #define ping_evictor_stop() do {} while (0) #endif -int ptlrpc_check_and_wait_suspend(struct ptlrpc_request *req); void ptlrpc_pinger_ir_up(void); void ptlrpc_pinger_ir_down(void); /** @} */ @@ -3514,7 +3559,6 @@ int llog_origin_handle_prev_block(struct ptlrpc_request *req); int llog_origin_handle_next_block(struct ptlrpc_request *req); int llog_origin_handle_read_header(struct ptlrpc_request *req); int llog_origin_handle_close(struct ptlrpc_request *req); -int llog_origin_handle_cancel(struct ptlrpc_request *req); /* ptlrpc/llog_client.c */ extern struct llog_operations llog_client_ops;