X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Finclude%2Flustre_net.h;h=106a17c13efdf7fbea8e978f6eabe6d287414224;hp=99e42371dab86f2a6fe2f9f265be9f05823fbcea;hb=268edb13d769994c4841864034d72f0bd7b36e12;hpb=2104ed0f0da3651f0cb4ab0c78a1037891d7cb4f diff --git a/lustre/include/lustre_net.h b/lustre/include/lustre_net.h index 99e4237..106a17c 100644 --- a/lustre/include/lustre_net.h +++ b/lustre/include/lustre_net.h @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -27,7 +23,7 @@ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2010, 2015, Intel Corporation. + * Copyright (c) 2010, 2017, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -54,12 +50,13 @@ * * @{ */ - +#include #include #include -#include #include -#include +#include +#include +#include #include #include #include @@ -67,7 +64,7 @@ #include #include #include -#include +#include /* MD flags we _always_ use */ #define PTLRPC_MD_OPTIONS 0 @@ -79,7 +76,7 @@ * value. The client is free to limit the actual RPC size for any bulk * transfer via cl_max_pages_per_rpc to some non-power-of-two value. * NOTE: This is limited to 16 (=64GB RPCs) by IOOBJ_MAX_BRW_BITS. */ -#define PTLRPC_BULK_OPS_BITS 4 +#define PTLRPC_BULK_OPS_BITS 6 #if PTLRPC_BULK_OPS_BITS > 16 #error "More than 65536 BRW RPCs not allowed by IOOBJ_MAX_BRW_BITS." #endif @@ -101,15 +98,16 @@ * currently supported maximum between peers at connect via ocd_brw_size. */ #define PTLRPC_MAX_BRW_BITS (LNET_MTU_BITS + PTLRPC_BULK_OPS_BITS) -#define PTLRPC_MAX_BRW_SIZE (1 << PTLRPC_MAX_BRW_BITS) +#define PTLRPC_MAX_BRW_SIZE (1U << PTLRPC_MAX_BRW_BITS) #define PTLRPC_MAX_BRW_PAGES (PTLRPC_MAX_BRW_SIZE >> PAGE_SHIFT) -#define ONE_MB_BRW_SIZE (1 << LNET_MTU_BITS) -#define MD_MAX_BRW_SIZE (1 << LNET_MTU_BITS) +#define ONE_MB_BRW_SIZE (1U << LNET_MTU_BITS) +#define MD_MAX_BRW_SIZE (1U << LNET_MTU_BITS) #define MD_MAX_BRW_PAGES (MD_MAX_BRW_SIZE >> PAGE_SHIFT) #define DT_MAX_BRW_SIZE PTLRPC_MAX_BRW_SIZE +#define DT_DEF_BRW_SIZE (4 * ONE_MB_BRW_SIZE) #define DT_MAX_BRW_PAGES (DT_MAX_BRW_SIZE >> PAGE_SHIFT) -#define OFD_MAX_BRW_SIZE (1 << LNET_MTU_BITS) +#define OFD_MAX_BRW_SIZE (1U << LNET_MTU_BITS) /* When PAGE_SIZE is a constant, we can check our arithmetic here with cpp! */ #if ((PTLRPC_MAX_BRW_PAGES & (PTLRPC_MAX_BRW_PAGES - 1)) != 0) @@ -475,34 +473,49 @@ * - single object with 16 pages is 512 bytes * - OST_IO_MAXREQSIZE must be at least 1 page of cookies plus some spillover * - Must be a multiple of 1024 - * - actual size is about 18K */ -#define _OST_MAXREQSIZE_SUM (sizeof(struct lustre_msg) + \ - sizeof(struct ptlrpc_body) + \ - sizeof(struct obdo) + \ - sizeof(struct obd_ioobj) + \ - sizeof(struct niobuf_remote) * DT_MAX_BRW_PAGES) +#define _OST_MAXREQSIZE_BASE ((unsigned long)(sizeof(struct lustre_msg) + \ + sizeof(struct ptlrpc_body) + \ + sizeof(struct obdo) + \ + sizeof(struct obd_ioobj) + \ + sizeof(struct niobuf_remote))) +#define _OST_MAXREQSIZE_SUM ((unsigned long)(_OST_MAXREQSIZE_BASE + \ + sizeof(struct niobuf_remote) * \ + (DT_MAX_BRW_PAGES - 1))) /** * FIEMAP request can be 4K+ for now */ -#define OST_MAXREQSIZE (16 * 1024) -#define OST_IO_MAXREQSIZE max_t(int, OST_MAXREQSIZE, \ - (((_OST_MAXREQSIZE_SUM - 1) | (1024 - 1)) + 1)) +#define OST_MAXREQSIZE (16UL * 1024UL) +#define OST_IO_MAXREQSIZE max(OST_MAXREQSIZE, \ + ((_OST_MAXREQSIZE_SUM - 1) | \ + (1024UL - 1)) + 1) +/* Safe estimate of free space in standard RPC, provides upper limit for # of + * bytes of i/o to pack in RPC (skipping bulk transfer). */ +#define OST_SHORT_IO_SPACE (OST_IO_MAXREQSIZE - _OST_MAXREQSIZE_BASE) + +/* Actual size used for short i/o buffer. Calculation means this: + * At least one page (for large PAGE_SIZE), or 16 KiB, but not more + * than the available space aligned to a page boundary. */ +#define OBD_MAX_SHORT_IO_BYTES min(max(PAGE_SIZE, 16UL * 1024UL), \ + OST_SHORT_IO_SPACE & PAGE_MASK) #define OST_MAXREPSIZE (9 * 1024) #define OST_IO_MAXREPSIZE OST_MAXREPSIZE #define OST_NBUFS 64 /** OST_BUFSIZE = max_reqsize + max sptlrpc payload size */ -#define OST_BUFSIZE max_t(int, OST_MAXREQSIZE + 1024, 16 * 1024) +#define OST_BUFSIZE max_t(int, OST_MAXREQSIZE + 1024, 32 * 1024) /** * OST_IO_MAXREQSIZE is 18K, giving extra 46K can increase buffer utilization * rate of request buffer, please check comment of MDS_LOV_BUFSIZE for details. */ #define OST_IO_BUFSIZE max_t(int, OST_IO_MAXREQSIZE + 1024, 64 * 1024) -/* Macro to hide a typecast. */ -#define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args) +/* Macro to hide a typecast and BUILD_BUG. */ +#define ptlrpc_req_async_args(_var, req) ({ \ + BUILD_BUG_ON(sizeof(*_var) > sizeof(req->rq_async_args)); \ + (typeof(_var))&req->rq_async_args; \ + }) struct ptlrpc_replay_async_args { int praa_old_state; @@ -518,7 +531,7 @@ struct ptlrpc_connection { /** Our own lnet nid for this connection */ lnet_nid_t c_self; /** Remote side nid for this connection */ - lnet_process_id_t c_peer; + struct lnet_process_id c_peer; /** UUID of the other side */ struct obd_uuid c_remote_uuid; /** reference counter for this connection */ @@ -532,7 +545,7 @@ struct ptlrpc_client { /** What portal do we expect replies on */ __u32 cli_reply_portal; /** Name of the client */ - char *cli_name; + const char *cli_name; }; /** state flags of requests */ @@ -555,7 +568,6 @@ union ptlrpc_async_args { }; struct ptlrpc_request_set; -typedef int (*set_interpreter_func)(struct ptlrpc_request_set *, void *, int); typedef int (*set_producer_func)(struct ptlrpc_request_set *, void *); /** @@ -577,20 +589,9 @@ struct ptlrpc_request_set { atomic_t set_remaining; /** wait queue to wait on for request events */ wait_queue_head_t set_waitq; - wait_queue_head_t *set_wakeup_ptr; /** List of requests in the set */ struct list_head set_requests; /** - * List of completion callbacks to be called when the set is completed - * This is only used if \a set_interpret is NULL. - * Links struct ptlrpc_set_cbdata. - */ - struct list_head set_cblist; - /** Completion callback, if only one. */ - set_interpreter_func set_interpret; - /** opaq argument passed to completion \a set_interpret callback. */ - void *set_arg; - /** * Lock for \a set_new_requests manipulations * locked so that any old caller can communicate requests to * the set holder who can then fold them into the lock-free set @@ -611,18 +612,6 @@ struct ptlrpc_request_set { unsigned int set_allow_intr:1; }; -/** - * Description of a single ptrlrpc_set callback - */ -struct ptlrpc_set_cbdata { - /** List linkage item */ - struct list_head psc_item; - /** Pointer to interpreting function */ - set_interpreter_func psc_interpret; - /** Opaq argument to pass to the callback */ - void *psc_data; -}; - struct ptlrpc_bulk_desc; struct ptlrpc_service_part; struct ptlrpc_service; @@ -631,8 +620,8 @@ struct ptlrpc_service; * ptlrpc callback & work item stuff */ struct ptlrpc_cb_id { - void (*cbid_fn)(lnet_event_t *ev); /* specific callback fn */ - void *cbid_arg; /* additional arg */ + void (*cbid_fn)(struct lnet_event *ev); /* specific callback fn */ + void *cbid_arg; /* additional arg */ }; /** Maximum number of locks to fit into reply state */ @@ -672,6 +661,8 @@ struct ptlrpc_reply_state { unsigned long rs_committed:1;/* the transaction was committed and the rs was dispatched by ptlrpc_commit_replies */ + unsigned long rs_convert_lock:1; /* need to convert saved + * locks to COS mode */ atomic_t rs_refcount; /* number of users */ /** Number of locks awaiting client ACK */ int rs_nlocks; @@ -687,7 +678,7 @@ struct ptlrpc_reply_state { struct obd_export *rs_export; struct ptlrpc_service_part *rs_svcpt; /** Lnet metadata handle for the reply */ - lnet_handle_md_t rs_md_h; + struct lnet_handle_md rs_md_h; /** Context for the sevice thread */ struct ptlrpc_svc_ctx *rs_svc_ctx; @@ -785,19 +776,19 @@ struct ptlrpc_cli_req { /** For bulk requests on client only: bulk descriptor */ struct ptlrpc_bulk_desc *cr_bulk; /** optional time limit for send attempts */ - cfs_duration_t cr_delay_limit; + time64_t cr_delay_limit; /** time request was first queued */ - cfs_time_t cr_queued_time; - /** request sent timeval */ - struct timeval cr_sent_tv; + time64_t cr_queued_time; + /** request sent in nanoseconds */ + ktime_t cr_sent_ns; /** time for request really sent out */ - time_t cr_sent_out; + time64_t cr_sent_out; /** when req reply unlink must finish. */ - time_t cr_reply_deadline; + time64_t cr_reply_deadline; /** when req bulk unlink must finish. */ - time_t cr_bulk_deadline; + time64_t cr_bulk_deadline; /** when req unlink must finish. */ - time_t cr_req_deadline; + time64_t cr_req_deadline; /** Portal to which this request would be sent */ short cr_req_ptl; /** Portal where to wait for reply and where reply would be sent */ @@ -819,11 +810,11 @@ struct ptlrpc_cli_req { /** Link back to the request set */ struct ptlrpc_request_set *cr_set; /** outgoing request MD handle */ - lnet_handle_md_t cr_req_md_h; + struct lnet_handle_md cr_req_md_h; /** request-out callback parameter */ struct ptlrpc_cb_id cr_req_cbid; /** incoming reply MD handle */ - lnet_handle_md_t cr_reply_md_h; + struct lnet_handle_md cr_reply_md_h; wait_queue_head_t cr_reply_waitq; /** reply callback parameter */ struct ptlrpc_cb_id cr_reply_cbid; @@ -852,7 +843,7 @@ struct ptlrpc_cli_req { #define rq_bulk rq_cli.cr_bulk #define rq_delay_limit rq_cli.cr_delay_limit #define rq_queued_time rq_cli.cr_queued_time -#define rq_sent_tv rq_cli.cr_sent_tv +#define rq_sent_ns rq_cli.cr_sent_ns #define rq_real_sent rq_cli.cr_sent_out #define rq_reply_deadline rq_cli.cr_reply_deadline #define rq_bulk_deadline rq_cli.cr_bulk_deadline @@ -914,7 +905,7 @@ struct ptlrpc_srv_req { struct ptlrpc_nrs_request sr_nrq; /** @} nrs */ /** request arrival time */ - struct timeval sr_arrival_time; + struct timespec64 sr_arrival_time; /** server's half ctx */ struct ptlrpc_svc_ctx *sr_svc_ctx; /** (server side), pointed directly into req buffer */ @@ -968,6 +959,7 @@ struct ptlrpc_request { * rq_list */ spinlock_t rq_lock; + spinlock_t rq_early_free_lock; /** client-side flags are serialized by rq_lock @{ */ unsigned int rq_intr:1, rq_replied:1, rq_err:1, rq_timedout:1, rq_resend:1, rq_restart:1, @@ -999,6 +991,7 @@ struct ptlrpc_request { rq_allow_replay:1, /* bulk request, sent to server, but uncommitted */ rq_unstable:1, + rq_early_free_repbuf:1, /* free reply buffer in advance */ rq_allow_intr:1; /** @} */ @@ -1058,6 +1051,13 @@ struct ptlrpc_request { /** description of flavors for client & server */ struct sptlrpc_flavor rq_flvr; + /** + * SELinux policy info at the time of the request + * sepol string format is: + * ::: + */ + char rq_sepol[LUSTRE_NODEMAP_SEPOL_LENGTH + 1]; + /* client/server security flags */ unsigned int rq_ctx_init:1, /* context initiation */ @@ -1108,18 +1108,21 @@ struct ptlrpc_request { /** our LNet NID */ lnet_nid_t rq_self; /** Peer description (the other side) */ - lnet_process_id_t rq_peer; + struct lnet_process_id rq_peer; + /** Descriptor for the NID from which the peer sent the request. */ + struct lnet_process_id rq_source; /** * service time estimate (secs) * If the request is not served by this time, it is marked as timed out. + * Do not change to time64_t since this is transmitted over the wire. */ - int rq_timeout; + time_t rq_timeout; /** * when request/reply sent (secs), or time when request should be sent */ - time_t rq_sent; + time64_t rq_sent; /** when request must finish. */ - time_t rq_deadline; + time64_t rq_deadline; /** request format description */ struct req_capsule rq_pill; }; @@ -1131,13 +1134,14 @@ struct ptlrpc_request { static inline int ptlrpc_req_interpret(const struct lu_env *env, struct ptlrpc_request *req, int rc) { - if (req->rq_interpret_reply != NULL) { - req->rq_status = req->rq_interpret_reply(env, req, - &req->rq_async_args, - rc); - return req->rq_status; - } - return rc; + if (req->rq_interpret_reply != NULL) { + req->rq_status = req->rq_interpret_reply(env, req, + &req->rq_async_args, + rc); + return req->rq_status; + } + + return rc; } /** \addtogroup nrs @@ -1170,37 +1174,37 @@ static inline bool ptlrpc_nrs_req_can_move(struct ptlrpc_request *req) /** @} nrs */ /** - * Returns 1 if request buffer at offset \a index was already swabbed + * Returns true if request buffer at offset \a index was already swabbed */ -static inline int lustre_req_swabbed(struct ptlrpc_request *req, size_t index) +static inline bool lustre_req_swabbed(struct ptlrpc_request *req, size_t index) { - LASSERT(index < sizeof(req->rq_req_swab_mask) * 8); - return req->rq_req_swab_mask & (1 << index); + LASSERT(index < sizeof(req->rq_req_swab_mask) * 8); + return req->rq_req_swab_mask & (1 << index); } /** - * Returns 1 if request reply buffer at offset \a index was already swabbed + * Returns true if request reply buffer at offset \a index was already swabbed */ -static inline int lustre_rep_swabbed(struct ptlrpc_request *req, size_t index) +static inline bool lustre_rep_swabbed(struct ptlrpc_request *req, size_t index) { - LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8); - return req->rq_rep_swab_mask & (1 << index); + LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8); + return req->rq_rep_swab_mask & (1 << index); } /** - * Returns 1 if request needs to be swabbed into local cpu byteorder + * Returns true if request needs to be swabbed into local cpu byteorder */ -static inline int ptlrpc_req_need_swab(struct ptlrpc_request *req) +static inline bool ptlrpc_req_need_swab(struct ptlrpc_request *req) { - return lustre_req_swabbed(req, MSG_PTLRPC_HEADER_OFF); + return lustre_req_swabbed(req, MSG_PTLRPC_HEADER_OFF); } /** - * Returns 1 if request reply needs to be swabbed into local cpu byteorder + * Returns true if request reply needs to be swabbed into local cpu byteorder */ -static inline int ptlrpc_rep_need_swab(struct ptlrpc_request *req) +static inline bool ptlrpc_rep_need_swab(struct ptlrpc_request *req) { - return lustre_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF); + return lustre_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF); } /** @@ -1275,12 +1279,15 @@ ptlrpc_rqphase2str(struct ptlrpc_request *req) FLAG(req->rq_err, "E"), FLAG(req->rq_net_err, "e"), \ FLAG(req->rq_timedout, "X") /* eXpired */, FLAG(req->rq_resend, "S"), \ FLAG(req->rq_restart, "T"), FLAG(req->rq_replay, "P"), \ - FLAG(req->rq_no_resend, "N"), \ + FLAG(req->rq_no_resend, "N"), FLAG(req->rq_no_reply, "n"), \ FLAG(req->rq_waiting, "W"), \ FLAG(req->rq_wait_ctx, "C"), FLAG(req->rq_hp, "H"), \ - FLAG(req->rq_committed, "M") + FLAG(req->rq_committed, "M"), \ + FLAG(req->rq_req_unlinked, "Q"), \ + FLAG(req->rq_reply_unlinked, "U"), \ + FLAG(req->rq_receiving_reply, "r") -#define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s%s%s" +#define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s" void _debug_req(struct ptlrpc_request *req, struct libcfs_debug_msg_data *data, const char *fmt, ...) @@ -1453,7 +1460,7 @@ struct ptlrpc_bulk_desc { struct obd_import *bd_import; /** Back pointer to the request */ struct ptlrpc_request *bd_req; - struct ptlrpc_bulk_frag_ops *bd_frag_ops; + const struct ptlrpc_bulk_frag_ops *bd_frag_ops; wait_queue_head_t bd_waitq; /* server side only WQ */ int bd_iov_count; /* # entries in bd_iov */ int bd_max_iov; /* allocated size of bd_iov */ @@ -1467,7 +1474,7 @@ struct ptlrpc_bulk_desc { int bd_md_count; /* # valid entries in bd_mds */ int bd_md_max_brw; /* max entries in bd_mds */ /** array of associated MDs */ - lnet_handle_md_t bd_mds[PTLRPC_BULK_OPS_COUNT]; + struct lnet_handle_md bd_mds[PTLRPC_BULK_OPS_COUNT]; union { struct { @@ -1496,12 +1503,12 @@ struct ptlrpc_bulk_desc { #define BD_GET_ENC_KVEC(desc, i) ((desc)->bd_u.bd_kvec.bd_enc_kvec[i]) enum { - SVC_STOPPED = 1 << 0, - SVC_STOPPING = 1 << 1, - SVC_STARTING = 1 << 2, - SVC_RUNNING = 1 << 3, - SVC_EVENT = 1 << 4, - SVC_SIGNAL = 1 << 5, + SVC_INIT = 0, + SVC_STOPPED = 1 << 0, + SVC_STOPPING = 1 << 1, + SVC_STARTING = 1 << 2, + SVC_RUNNING = 1 << 3, + SVC_EVENT = 1 << 4, }; #define PTLRPC_THR_NAME_LEN 32 @@ -1523,13 +1530,15 @@ struct ptlrpc_thread { */ unsigned int t_id; /** - * service thread pid + * service thread */ + struct task_struct *t_task; pid_t t_pid; + ktime_t t_touched; /** * put watchdog in the structure per thread b=14840 */ - struct lc_watchdog *t_watchdog; + struct delayed_work t_watchdog; /** * the svc this thread belonged to b=18582 */ @@ -1569,11 +1578,6 @@ static inline int thread_is_event(struct ptlrpc_thread *thread) return !!(thread->t_flags & SVC_EVENT); } -static inline int thread_is_signal(struct ptlrpc_thread *thread) -{ - return !!(thread->t_flags & SVC_SIGNAL); -} - static inline void thread_clear_flags(struct ptlrpc_thread *thread, __u32 flags) { thread->t_flags &= ~flags; @@ -1614,7 +1618,7 @@ struct ptlrpc_request_buffer_desc { /** Back pointer to service for which this buffer is registered */ struct ptlrpc_service_part *rqbd_svcpt; /** LNet descriptor */ - lnet_handle_md_t rqbd_md_h; + struct lnet_handle_md rqbd_md_h; int rqbd_refcount; /** The buffer itself */ char *rqbd_buffer; @@ -1689,8 +1693,8 @@ struct ptlrpc_service { int srv_nthrs_cpt_init; /** limit of threads number for each partition */ int srv_nthrs_cpt_limit; - /** Root of /proc dir tree for this service */ - struct proc_dir_entry *srv_procroot; + /** Root of debugfs dir tree for this service */ + struct dentry *srv_debugfs_entry; /** Pointer to statistic data for this service */ struct lprocfs_stats *srv_stats; /** # hp per lp reqs to handle */ @@ -1716,17 +1720,25 @@ struct ptlrpc_service { int srv_watchdog_factor; /** under unregister_service */ unsigned srv_is_stopping:1; + /** Whether or not to restrict service threads to CPUs in this CPT */ + unsigned srv_cpt_bind:1; + /** max # request buffers */ + int srv_nrqbds_max; /** max # request buffers in history per partition */ int srv_hist_nrqbds_cpt_max; - /** number of CPTs this service bound on */ + /** number of CPTs this service associated with */ int srv_ncpts; - /** CPTs array this service bound on */ + /** CPTs array this service associated with */ __u32 *srv_cpts; /** 2^srv_cptab_bits >= cfs_cpt_numbert(srv_cptable) */ int srv_cpt_bits; /** CPT table this service is running over */ struct cfs_cpt_table *srv_cptable; + + /* sysfs object */ + struct kobject srv_kobj; + struct completion srv_kobj_unregister; /** * partition data for ptlrpc service */ @@ -1773,6 +1785,8 @@ struct ptlrpc_service_part { * threads starting & stopping are also protected by this lock. */ spinlock_t scp_lock __cfs_cacheline_aligned; + /** userland serialization */ + struct mutex scp_mutex; /** total # req buffer descs allocated */ int scp_nrqbds_total; /** # posted request buffers for receiving */ @@ -1787,8 +1801,8 @@ struct ptlrpc_service_part { struct list_head scp_rqbd_posted; /** incoming reqs */ struct list_head scp_req_incoming; - /** timeout before re-posting reqs, in tick */ - cfs_duration_t scp_rqbd_timeout; + /** timeout before re-posting reqs, in jiffies */ + long scp_rqbd_timeout; /** * all threads sleep on this. This wait-queue is signalled when new * incoming request arrives and when difficult reply has to be handled. @@ -1839,7 +1853,7 @@ struct ptlrpc_service_part { /** early reply timer */ struct timer_list scp_at_timer; /** debug */ - cfs_time_t scp_at_checktime; + ktime_t scp_at_checktime; /** check early replies */ unsigned scp_at_check; /** @} */ @@ -1894,10 +1908,6 @@ struct ptlrpcd_ctl { */ char pc_name[16]; /** - * Environment for request interpreters to run in. - */ - struct lu_env pc_env; - /** * CPT the thread is bound on. */ int pc_cpt; @@ -1987,26 +1997,26 @@ static inline bool nrs_policy_compat_one(const struct ptlrpc_service *svc, /** @} nrs */ /* ptlrpc/events.c */ -extern lnet_handle_eq_t ptlrpc_eq_h; +extern struct lnet_handle_eq ptlrpc_eq_h; extern int ptlrpc_uuid_to_peer(struct obd_uuid *uuid, - lnet_process_id_t *peer, lnet_nid_t *self); + struct lnet_process_id *peer, lnet_nid_t *self); /** * These callbacks are invoked by LNet when something happened to * underlying buffer * @{ */ -extern void request_out_callback(lnet_event_t *ev); -extern void reply_in_callback(lnet_event_t *ev); -extern void client_bulk_callback(lnet_event_t *ev); -extern void request_in_callback(lnet_event_t *ev); -extern void reply_out_callback(lnet_event_t *ev); +extern void request_out_callback(struct lnet_event *ev); +extern void reply_in_callback(struct lnet_event *ev); +extern void client_bulk_callback(struct lnet_event *ev); +extern void request_in_callback(struct lnet_event *ev); +extern void reply_out_callback(struct lnet_event *ev); #ifdef HAVE_SERVER_SUPPORT -extern void server_bulk_callback(lnet_event_t *ev); +extern void server_bulk_callback(struct lnet_event *ev); #endif /** @} */ /* ptlrpc/connection.c */ -struct ptlrpc_connection *ptlrpc_connection_get(lnet_process_id_t peer, +struct ptlrpc_connection *ptlrpc_connection_get(struct lnet_process_id peer, lnet_nid_t self, struct obd_uuid *uuid); int ptlrpc_connection_put(struct ptlrpc_connection *c); @@ -2015,6 +2025,30 @@ int ptlrpc_connection_init(void); void ptlrpc_connection_fini(void); extern lnet_pid_t ptl_get_pid(void); +/* + * Check if the peer connection is on the local node. We need to use GFP_NOFS + * for requests from a local client to avoid recursing into the filesystem + * as we might end up waiting on a page sent in the request we're serving. + * + * Use __GFP_HIGHMEM so that the pages can use all of the available memory + * on 32-bit machines. Use more aggressive GFP_HIGHUSER flags from non-local + * clients to be able to generate more memory pressure on the OSS and allow + * inactive pages to be reclaimed, since it doesn't have any other processes + * or allocations that generate memory reclaim pressure. + * + * See b=17576 (bdf50dc9) and b=19529 (3dcf18d3) for details. + */ +static inline bool ptlrpc_connection_is_local(struct ptlrpc_connection *conn) +{ + if (!conn) + return false; + + if (conn->c_peer.nid == conn->c_self) + return true; + + RETURN(LNetIsPeerLocal(conn->c_peer.nid)); +} + /* ptlrpc/niobuf.c */ /** * Actual interfacing with LNet to put/get/register/unregister stuff @@ -2054,12 +2088,13 @@ static inline int ptlrpc_client_bulk_active(struct ptlrpc_request *req) LASSERT(req != NULL); desc = req->rq_bulk; - if (req->rq_bulk_deadline > cfs_time_current_sec()) - return 1; - if (!desc) return 0; + if (req->rq_bulk_deadline > ktime_get_real_seconds()) + return 1; + + spin_lock(&desc->bd_lock); rc = desc->bd_md_count; spin_unlock(&desc->bd_lock); @@ -2085,10 +2120,11 @@ int ptlrpc_register_rqbd(struct ptlrpc_request_buffer_desc *rqbd); */ void ptlrpc_request_committed(struct ptlrpc_request *req, int force); -void ptlrpc_init_client(int req_portal, int rep_portal, char *name, +void ptlrpc_init_client(int req_portal, int rep_portal, const char *name, struct ptlrpc_client *); void ptlrpc_cleanup_client(struct obd_import *imp); -struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid); +struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid, + lnet_nid_t nid4refnet); int ptlrpc_queue_wait(struct ptlrpc_request *req); int ptlrpc_replay_req(struct ptlrpc_request *req); @@ -2100,10 +2136,8 @@ void ptlrpc_abort_set(struct ptlrpc_request_set *set); struct ptlrpc_request_set *ptlrpc_prep_set(void); struct ptlrpc_request_set *ptlrpc_prep_fcset(int max, set_producer_func func, void *arg); -int ptlrpc_set_add_cb(struct ptlrpc_request_set *set, - set_interpreter_func fn, void *data); int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set); -int ptlrpc_set_wait(struct ptlrpc_request_set *); +int ptlrpc_set_wait(const struct lu_env *env, struct ptlrpc_request_set *); void ptlrpc_mark_interrupted(struct ptlrpc_request *req); void ptlrpc_set_destroy(struct ptlrpc_request_set *); void ptlrpc_set_add_req(struct ptlrpc_request_set *, struct ptlrpc_request *); @@ -2145,30 +2179,9 @@ int ptlrpc_prep_bulk_frag(struct ptlrpc_bulk_desc *desc, void __ptlrpc_prep_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page, int pageoffset, int len, int pin); -static inline void ptlrpc_prep_bulk_page_pin(struct ptlrpc_bulk_desc *desc, - struct page *page, int pageoffset, - int len) -{ - __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 1); -} - -static inline void ptlrpc_prep_bulk_page_nopin(struct ptlrpc_bulk_desc *desc, - struct page *page, int pageoffset, - int len) -{ - __ptlrpc_prep_bulk_page(desc, page, pageoffset, len, 0); -} void ptlrpc_free_bulk(struct ptlrpc_bulk_desc *bulk); -static inline void ptlrpc_release_bulk_page_pin(struct ptlrpc_bulk_desc *desc) -{ - int i; - - for (i = 0; i < desc->bd_iov_count ; i++) - page_cache_release(BD_GET_KIOV(desc, i).kiov_page); -} - static inline void ptlrpc_release_bulk_noop(struct ptlrpc_bulk_desc *desc) { } @@ -2220,8 +2233,8 @@ struct ptlrpc_service_thr_conf { /* user specified threads number, it will be validated due to * other members of this structure. */ unsigned int tc_nthrs_user; - /* set NUMA node affinity for service threads */ - unsigned int tc_cpu_affinity; + /* bind service threads to only CPUs in their associated CPT */ + unsigned int tc_cpu_bind; /* Tags for lu_context associated with service thread */ __u32 tc_ctx_tags; }; @@ -2230,6 +2243,8 @@ struct ptlrpc_service_cpt_conf { struct cfs_cpt_table *cc_cptable; /* string pattern to describe CPTs for a service */ char *cc_pattern; + /* whether or not to have per-CPT service partitions */ + bool cc_affinity; }; struct ptlrpc_service_conf { @@ -2254,26 +2269,26 @@ struct ptlrpc_service_conf { * * @{ */ -void ptlrpc_save_lock(struct ptlrpc_request *req, - struct lustre_handle *lock, int mode, int no_ack); +void ptlrpc_save_lock(struct ptlrpc_request *req, struct lustre_handle *lock, + int mode, bool no_ack, bool convert_lock); void ptlrpc_commit_replies(struct obd_export *exp); void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs); void ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs); int ptlrpc_hpreq_handler(struct ptlrpc_request *req); struct ptlrpc_service *ptlrpc_register_service( struct ptlrpc_service_conf *conf, - struct proc_dir_entry *proc_entry); + struct kset *parent, + struct dentry *debugfs_entry); void ptlrpc_stop_all_threads(struct ptlrpc_service *svc); int ptlrpc_start_threads(struct ptlrpc_service *svc); int ptlrpc_unregister_service(struct ptlrpc_service *service); -int liblustre_check_services(void *arg); -void ptlrpc_daemonize(char *name); int ptlrpc_service_health_check(struct ptlrpc_service *); void ptlrpc_server_drop_request(struct ptlrpc_request *req); void ptlrpc_request_change_export(struct ptlrpc_request *req, struct obd_export *export); -void ptlrpc_update_export_timer(struct obd_export *exp, long extra_delay); +void ptlrpc_update_export_timer(struct obd_export *exp, + time64_t extra_delay); int ptlrpc_hr_init(void); void ptlrpc_hr_fini(void); @@ -2288,6 +2303,7 @@ void ptlrpc_hr_fini(void); int ptlrpc_connect_import(struct obd_import *imp); int ptlrpc_init_import(struct obd_import *imp); int ptlrpc_disconnect_import(struct obd_import *imp, int noclose); +int ptlrpc_disconnect_and_idle_import(struct obd_import *imp); int ptlrpc_import_recovery_state_machine(struct obd_import *imp); void deuuidify(char *uuid, const char *prefix, char **uuid_start, int *uuid_len); @@ -2301,8 +2317,14 @@ int ptlrpc_reconnect_import(struct obd_import *imp); * * @{ */ -int ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout, - __u32 index); +#define PTLRPC_MAX_BUFCOUNT \ + (sizeof(((struct ptlrpc_request *)0)->rq_req_swab_mask) * 8) +#define MD_MAX_BUFLEN (MDS_REG_MAXREQSIZE > OUT_MAXREQSIZE ? \ + MDS_REG_MAXREQSIZE : OUT_MAXREQSIZE) +#define PTLRPC_MAX_BUFLEN (OST_IO_MAXREQSIZE > MD_MAX_BUFLEN ? \ + OST_IO_MAXREQSIZE : MD_MAX_BUFLEN) +bool ptlrpc_buf_need_swab(struct ptlrpc_request *req, const int inout, + __u32 index); void ptlrpc_buf_set_swabbed(struct ptlrpc_request *req, const int inout, __u32 index); int ptlrpc_unpack_rep_msg(struct ptlrpc_request *req, int len); @@ -2345,7 +2367,7 @@ __u32 lustre_msg_get_op_flags(struct lustre_msg *msg); void lustre_msg_add_op_flags(struct lustre_msg *msg, __u32 flags); struct lustre_handle *lustre_msg_get_handle(struct lustre_msg *msg); __u32 lustre_msg_get_type(struct lustre_msg *msg); -__u32 lustre_msg_get_version(struct lustre_msg *msg); +enum lustre_msg_version lustre_msg_get_version(struct lustre_msg *msg); void lustre_msg_add_version(struct lustre_msg *msg, __u32 version); __u32 lustre_msg_get_opc(struct lustre_msg *msg); __u64 lustre_msg_get_last_xid(struct lustre_msg *msg); @@ -2473,7 +2495,7 @@ ptlrpc_client_early(struct ptlrpc_request *req) static inline int ptlrpc_client_replied(struct ptlrpc_request *req) { - if (req->rq_reply_deadline > cfs_time_current_sec()) + if (req->rq_reply_deadline > ktime_get_real_seconds()) return 0; return req->rq_replied; } @@ -2482,7 +2504,7 @@ ptlrpc_client_replied(struct ptlrpc_request *req) static inline int ptlrpc_client_recv(struct ptlrpc_request *req) { - if (req->rq_reply_deadline > cfs_time_current_sec()) + if (req->rq_reply_deadline > ktime_get_real_seconds()) return 1; return req->rq_receiving_reply; } @@ -2493,11 +2515,11 @@ ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req) int rc; spin_lock(&req->rq_lock); - if (req->rq_reply_deadline > cfs_time_current_sec()) { + if (req->rq_reply_deadline > ktime_get_real_seconds()) { spin_unlock(&req->rq_lock); return 1; } - if (req->rq_req_deadline > cfs_time_current_sec()) { + if (req->rq_req_deadline > ktime_get_real_seconds()) { spin_unlock(&req->rq_lock); return 1; } @@ -2511,6 +2533,7 @@ ptlrpc_client_recv_or_unlink(struct ptlrpc_request *req) static inline void ptlrpc_client_wake_req(struct ptlrpc_request *req) { + smp_mb(); if (req->rq_set == NULL) wake_up(&req->rq_reply_waitq); else @@ -2562,11 +2585,8 @@ static inline int ptlrpc_req_get_repsize(struct ptlrpc_request *req) static inline int ptlrpc_send_limit_expired(struct ptlrpc_request *req) { if (req->rq_delay_limit != 0 && - cfs_time_before(cfs_time_add(req->rq_queued_time, - cfs_time_seconds(req->rq_delay_limit)), - cfs_time_current())) { + req->rq_queued_time + req->rq_delay_limit < ktime_get_seconds()) return 1; - } return 0; } @@ -2633,7 +2653,7 @@ struct timeout_item; typedef int (*timeout_cb_t)(struct timeout_item *, void *); int ptlrpc_pinger_add_import(struct obd_import *imp); int ptlrpc_pinger_del_import(struct obd_import *imp); -int ptlrpc_add_timeout_client(int time, enum timeout_event event, +int ptlrpc_add_timeout_client(time64_t time, enum timeout_event event, timeout_cb_t cb, void *data, struct list_head *obd_list); int ptlrpc_del_timeout_client(struct list_head *obd_list, @@ -2662,6 +2682,7 @@ void ptlrpcd_decref(void); * @{ */ const char* ll_opcode2str(__u32 opcode); +const int ll_str2opcode(const char *ops); #ifdef CONFIG_PROC_FS void ptlrpc_lprocfs_register_obd(struct obd_device *obd); void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd); @@ -2675,11 +2696,9 @@ static inline void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes) {} /* ptlrpc/llog_server.c */ int llog_origin_handle_open(struct ptlrpc_request *req); -int llog_origin_handle_destroy(struct ptlrpc_request *req); int llog_origin_handle_prev_block(struct ptlrpc_request *req); int llog_origin_handle_next_block(struct ptlrpc_request *req); int llog_origin_handle_read_header(struct ptlrpc_request *req); -int llog_origin_handle_close(struct ptlrpc_request *req); /* ptlrpc/llog_client.c */ extern struct llog_operations llog_client_ops;