X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Finclude%2Flustre_net.h;h=363a0b8b4e4e748d078f6249f8428997372577bc;hp=dd11208d9d20b9a21260e19c563fb9e3808c6a53;hb=62499626b81f83f9e2ceceaa11d9b33861581cb6;hpb=113303973ec9f8484eb2355a1a6ef3c4c7fd6a56 diff --git a/lustre/include/lustre_net.h b/lustre/include/lustre_net.h index dd11208..363a0b8 100644 --- a/lustre/include/lustre_net.h +++ b/lustre/include/lustre_net.h @@ -1,23 +1,37 @@ /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- * vim:expandtab:shiftwidth=8:tabstop=8: * - * Copyright (C) 2002, 2003 Cluster File Systems, Inc. + * GPL HEADER START * - * This file is part of Lustre, http://www.lustre.org. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * - * Lustre is free software; you can redistribute it and/or - * modify it under the terms of version 2 of the GNU General Public - * License as published by the Free Software Foundation. + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 only, + * as published by the Free Software Foundation. * - * Lustre is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License version 2 for more details (a copy is included + * in the LICENSE file that accompanied this code). * - * You should have received a copy of the GNU General Public License - * along with Lustre; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * You should have received a copy of the GNU General Public License + * version 2 along with this program; If not, see + * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf * + * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, + * CA 95054 USA or visit www.sun.com if you need additional information or + * have any questions. + * + * GPL HEADER END + */ +/* + * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Use is subject to license terms. + */ +/* + * This file is part of Lustre, http://www.lustre.org/ + * Lustre is a trademark of Sun Microsystems, Inc. */ #ifndef _LUSTRE_NET_H @@ -33,13 +47,16 @@ #error Unsupported operating system. #endif -#include +#include // #include #include #include #include +#include #include #include +#include +#include /* MD flags we _always_ use */ #define PTLRPC_MD_OPTIONS 0 @@ -85,23 +102,29 @@ * considered full when less than ?_MAXREQSIZE is left in them. */ -#define LDLM_THREADS_AUTO_MIN min((int)(smp_num_cpus * smp_num_cpus * 2), 8) +#define LDLM_THREADS_AUTO_MIN \ + min((int)(num_online_cpus() * num_online_cpus() * 2), 8) #define LDLM_THREADS_AUTO_MAX (LDLM_THREADS_AUTO_MIN * 16) #define LDLM_BL_THREADS LDLM_THREADS_AUTO_MIN -#define LDLM_NBUFS (64 * smp_num_cpus) +#define LDLM_NBUFS (64 * num_online_cpus()) #define LDLM_BUFSIZE (8 * 1024) #define LDLM_MAXREQSIZE (5 * 1024) #define LDLM_MAXREPSIZE (1024) +#define MDT_MIN_THREADS 2UL +#define MDT_MAX_THREADS 512UL +#define MDT_NUM_THREADS max(min_t(unsigned long, MDT_MAX_THREADS, \ + num_physpages >> (25 - CFS_PAGE_SHIFT)), 2UL) +#define FLD_NUM_THREADS max(min_t(unsigned long, MDT_MAX_THREADS, \ + num_physpages >> (25 - CFS_PAGE_SHIFT)), 2UL) +#define SEQ_NUM_THREADS max(min_t(unsigned long, MDT_MAX_THREADS, \ + num_physpages >> (25 - CFS_PAGE_SHIFT)), 2UL) + /* Absolute limits */ -#define MDS_THREADS_MIN 2UL -#define MDS_THREADS_MAX 512UL -/* Dynamic thread limits */ -#define MDS_THREADS_AUTO_MIN max(MDS_THREADS_MIN, min(32UL, \ - smp_num_cpus * num_physpages >> (27 - CFS_PAGE_SHIFT))) -#define MDS_THREADS_AUTO_MAX min(MDS_THREADS_MAX, MDS_THREADS_AUTO_MIN * 4) +#define MDS_THREADS_MIN 2 +#define MDS_THREADS_MAX 512 #define MDS_THREADS_MIN_READPAGE 2 -#define MDS_NBUFS (64 * smp_num_cpus) +#define MDS_NBUFS (64 * num_online_cpus()) #define MDS_BUFSIZE (8 * 1024) /* Assume file name length = FNAME_MAX = 256 (true for ext3). * path name length = PATH_MAX = 4096 @@ -123,21 +146,30 @@ #define MDS_MAXREQSIZE (5 * 1024) #define MDS_MAXREPSIZE max(9 * 1024, 280 + LOV_MAX_STRIPE_COUNT * 56) -#define MGS_THREADS_AUTO_MAX 128UL -#define MGS_THREADS_AUTO_MIN MDS_THREADS_AUTO_MIN -#define MGS_NBUFS (64 * smp_num_cpus) +/* FLD_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc + md_fld */ +#define FLD_MAXREQSIZE (160) + +/* FLD_MAXREPSIZE == lustre_msg + ptlrpc_body + md_fld */ +#define FLD_MAXREPSIZE (152) + +/* SEQ_MAXREQSIZE == lustre_msg + __u32 padding + ptlrpc_body + opc + lu_range + + * __u32 padding */ +#define SEQ_MAXREQSIZE (160) + +/* SEQ_MAXREPSIZE == lustre_msg + ptlrpc_body + lu_range */ +#define SEQ_MAXREPSIZE (152) + +#define MGS_THREADS_AUTO_MIN 2 +#define MGS_THREADS_AUTO_MAX 32 +#define MGS_NBUFS (64 * num_online_cpus()) #define MGS_BUFSIZE (8 * 1024) -#define MGS_MAXREQSIZE (8 * 1024) +#define MGS_MAXREQSIZE (7 * 1024) #define MGS_MAXREPSIZE (9 * 1024) /* Absolute limits */ -#define OSS_THREADS_MIN 2UL -#define OSS_THREADS_MAX 512UL -/* Dynamic thread limits */ -#define OSS_THREADS_AUTO_MIN max(OSS_THREADS_MIN, \ - smp_num_cpus * num_physpages >> (27 - CFS_PAGE_SHIFT)) -#define OSS_THREADS_AUTO_MAX min(OSS_THREADS_MAX, OSS_THREADS_AUTO_MIN * 4) -#define OST_NBUFS (64 * smp_num_cpus) +#define OSS_THREADS_MIN 2 +#define OSS_THREADS_MAX 512 +#define OST_NBUFS (64 * num_online_cpus()) #define OST_BUFSIZE (8 * 1024) /* OST_MAXREQSIZE ~= 4768 bytes = * lustre_msg + obdo + 16 * obd_ioobj + 256 * niobuf_remote @@ -148,8 +180,11 @@ #define OST_MAXREQSIZE (5 * 1024) #define OST_MAXREPSIZE (9 * 1024) +/* Macro to hide a typecast. */ +#define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args) + struct ptlrpc_connection { - struct list_head c_link; + struct hlist_node c_hash; lnet_nid_t c_self; lnet_process_id_t c_peer; struct obd_uuid c_remote_uuid; @@ -157,9 +192,9 @@ struct ptlrpc_connection { }; struct ptlrpc_client { - __u32 cli_request_portal; - __u32 cli_reply_portal; - char *cli_name; + __u32 cli_request_portal; + __u32 cli_reply_portal; + char *cli_name; }; /* state flags of requests */ @@ -187,6 +222,7 @@ struct ptlrpc_request_set { cfs_waitq_t set_waitq; cfs_waitq_t *set_wakeup_ptr; struct list_head set_requests; + struct list_head set_cblist; /* list of completion callbacks */ set_interpreter_func set_interpret; /* completion callback */ void *set_arg; /* completion context */ /* locked so that any old caller can communicate requests to @@ -195,6 +231,12 @@ struct ptlrpc_request_set { struct list_head set_new_requests; }; +struct ptlrpc_set_cbdata { + struct list_head psc_item; + set_interpreter_func psc_interpret; + void *psc_data; +}; + struct ptlrpc_bulk_desc; /* @@ -205,7 +247,7 @@ struct ptlrpc_cb_id { void *cbid_arg; /* additional arg */ }; -#define RS_MAX_LOCKS 4 +#define RS_MAX_LOCKS 8 #define RS_DEBUG 1 struct ptlrpc_reply_state { @@ -217,12 +259,14 @@ struct ptlrpc_reply_state { struct list_head rs_debug_list; #endif /* updates to following flag serialised by srv_request_lock */ - unsigned int rs_difficult:1; /* ACK/commit stuff */ - unsigned int rs_scheduled:1; /* being handled? */ - unsigned int rs_scheduled_ever:1;/* any schedule attempts? */ - unsigned int rs_handled:1; /* been handled yet? */ - unsigned int rs_on_net:1; /* reply_out_callback pending? */ - unsigned int rs_prealloc:1; /* rs from prealloc list */ + unsigned long rs_difficult:1; /* ACK/commit stuff */ + unsigned long rs_no_ack:1; /* no ACK, even for + difficult requests */ + unsigned long rs_scheduled:1; /* being handled? */ + unsigned long rs_scheduled_ever:1;/* any schedule attempts? */ + unsigned long rs_handled:1; /* been handled yet? */ + unsigned long rs_on_net:1; /* reply_out_callback pending? */ + unsigned long rs_prealloc:1; /* rs from prealloc list */ int rs_size; __u64 rs_transno; @@ -232,12 +276,16 @@ struct ptlrpc_reply_state { lnet_handle_md_t rs_md_h; atomic_t rs_refcount; + struct ptlrpc_svc_ctx *rs_svc_ctx; + struct lustre_msg *rs_repbuf; /* wrapper */ + int rs_repbuf_len; /* wrapper buf length */ + int rs_repdata_len; /* wrapper msg length */ + struct lustre_msg *rs_msg; /* reply message */ + /* locks awaiting client reply ACK */ int rs_nlocks; struct lustre_handle rs_locks[RS_MAX_LOCKS]; ldlm_mode_t rs_modes[RS_MAX_LOCKS]; - /* last member: variable sized reply message */ - struct lustre_msg *rs_msg; }; struct ptlrpc_thread; @@ -250,6 +298,11 @@ enum rq_phase { RQ_PHASE_COMPLETE = 0xebc0de04, }; +/** Type of request interpreter call-back */ +typedef int (*ptlrpc_interpterer_t)(const struct lu_env *env, + struct ptlrpc_request *req, + void *arg, int rc); + struct ptlrpc_request_pool { spinlock_t prp_lock; struct list_head prp_req_list; /* list of ptlrpc_request structs */ @@ -257,30 +310,45 @@ struct ptlrpc_request_pool { void (*prp_populate)(struct ptlrpc_request_pool *, int); }; +struct lu_context; +struct lu_env; + +/** + * Represents remote procedure call. + */ struct ptlrpc_request { int rq_type; /* one of PTL_RPC_MSG_* */ struct list_head rq_list; + struct list_head rq_timed_list; /* server-side early replies */ struct list_head rq_history_list; /* server-side history */ __u64 rq_history_seq; /* history sequence # */ int rq_status; spinlock_t rq_lock; - /* client-side flags */ - unsigned int rq_intr:1, rq_replied:1, rq_err:1, + /* client-side flags are serialized by rq_lock */ + unsigned long rq_intr:1, rq_replied:1, rq_err:1, rq_timedout:1, rq_resend:1, rq_restart:1, /* * when ->rq_replay is set, request is kept by the client even * after server commits corresponding transaction. This is * used for operations that require sequence of multiple * requests to be replayed. The only example currently is file - * open/close. When last request in such a sequence is - * committed, ->rq_replay is cleared on all requests in the + * open/close/dw/setattr. When last request in such a sequence + * is committed, ->rq_replay is cleared on all requests in the * sequence. */ rq_replay:1, + /* this is the last request in the sequence. */ + rq_sequence:1, rq_no_resend:1, rq_waiting:1, rq_receiving_reply:1, - rq_no_delay:1, rq_net_err:1; + rq_no_delay:1, rq_net_err:1, rq_wait_ctx:1, + rq_early:1, rq_must_unlink:1, + /* server-side flags */ + rq_packed_final:1, /* packed final reply */ + rq_sent_final:1; /* stop sending early replies */ + enum rq_phase rq_phase; /* one of RQ_PHASE_* */ - atomic_t rq_refcount; /* client-side refcount for SENT race */ + atomic_t rq_refcount; /* client-side refcount for SENT race, + server-side refcounf for multiple replies */ struct ptlrpc_thread *rq_svc_thread; /* initial thread servicing req */ @@ -292,12 +360,56 @@ struct ptlrpc_request { int rq_reqlen; struct lustre_msg *rq_reqmsg; - int rq_timeout; /* time to wait for reply (seconds) */ int rq_replen; struct lustre_msg *rq_repmsg; __u64 rq_transno; __u64 rq_xid; struct list_head rq_replay_list; + struct list_head rq_mod_list; + + struct ptlrpc_cli_ctx *rq_cli_ctx; /* client's half ctx */ + struct ptlrpc_svc_ctx *rq_svc_ctx; /* server's half ctx */ + struct list_head rq_ctx_chain; /* link to waited ctx */ + + struct sptlrpc_flavor rq_flvr; /* client & server */ + enum lustre_sec_part rq_sp_from; + + unsigned long /* client/server security flags */ + rq_ctx_init:1, /* context initiation */ + rq_ctx_fini:1, /* context destroy */ + rq_bulk_read:1, /* request bulk read */ + rq_bulk_write:1, /* request bulk write */ + /* server authentication flags */ + rq_auth_gss:1, /* authenticated by gss */ + rq_auth_remote:1, /* authed as remote user */ + rq_auth_usr_root:1, /* authed as root */ + rq_auth_usr_mdt:1, /* authed as mdt */ + /* security tfm flags */ + rq_pack_udesc:1, + rq_pack_bulk:1, + /* doesn't expect reply FIXME */ + rq_no_reply:1; + + uid_t rq_auth_uid; /* authed uid */ + uid_t rq_auth_mapped_uid; /* authed uid mapped to */ + + /* (server side), pointed directly into req buffer */ + struct ptlrpc_user_desc *rq_user_desc; + + /* early replies go to offset 0, regular replies go after that */ + unsigned int rq_reply_off; + + /* various buffer pointers */ + struct lustre_msg *rq_reqbuf; /* req wrapper */ + int rq_reqbuf_len; /* req wrapper buf len */ + int rq_reqdata_len; /* req wrapper msg len */ + char *rq_repbuf; /* rep buffer */ + int rq_repbuf_len; /* rep buffer len */ + struct lustre_msg *rq_repdata; /* rep wrapper msg */ + int rq_repdata_len; /* rep wrapper msg len */ + struct lustre_msg *rq_clrbuf; /* only in priv mode */ + int rq_clrbuf_len; /* only in priv mode */ + int rq_clrdata_len; /* only in priv mode */ __u32 rq_req_swab_mask; __u32 rq_rep_swab_mask; @@ -305,6 +417,8 @@ struct ptlrpc_request { int rq_import_generation; enum lustre_imp_state rq_send_state; + int rq_early_count; /* how many early replies (for stats) */ + /* client+server request */ lnet_handle_md_t rq_req_md_h; struct ptlrpc_cb_id rq_req_cbid; @@ -331,19 +445,39 @@ struct ptlrpc_request { void (*rq_commit_cb)(struct ptlrpc_request *); void *rq_cb_data; - struct ptlrpc_bulk_desc *rq_bulk; /* client side bulk */ - time_t rq_sent; /* when request sent, seconds */ + struct ptlrpc_bulk_desc *rq_bulk;/* client side bulk */ + + /* client outgoing req */ + time_t rq_sent; /* when request/reply sent (secs), or + * time when request should be sent */ + + volatile time_t rq_deadline; /* when request must finish. volatile + so that servers' early reply updates to the deadline aren't + kept in per-cpu cache */ + int rq_timeout; /* service time estimate (secs) */ /* Multi-rpc bits */ struct list_head rq_set_chain; struct ptlrpc_request_set *rq_set; - void *rq_interpret_reply; /* Async completion handler */ + /** Async completion handler */ + ptlrpc_interpterer_t rq_interpret_reply; union ptlrpc_async_args rq_async_args; /* Async completion context */ - void *rq_ptlrpcd_data; struct ptlrpc_request_pool *rq_pool; /* Pool if request from preallocated list */ + struct lu_context rq_session; + + /* request format */ + struct req_capsule rq_pill; }; +static inline void ptlrpc_close_replay_seq(struct ptlrpc_request *req) +{ + spin_lock(&req->rq_lock); + req->rq_replay = 0; + req->rq_sequence = 1; + spin_unlock(&req->rq_lock); +} + static inline void lustre_set_req_swabbed(struct ptlrpc_request *req, int index) { LASSERT(index < sizeof(req->rq_req_swab_mask) * 8); @@ -359,7 +493,7 @@ static inline void lustre_set_rep_swabbed(struct ptlrpc_request *req, int index) } static inline int lustre_req_swabbed(struct ptlrpc_request *req, int index) -{ +{ LASSERT(index < sizeof(req->rq_req_swab_mask) * 8); return req->rq_req_swab_mask & (1 << index); } @@ -370,26 +504,8 @@ static inline int lustre_rep_swabbed(struct ptlrpc_request *req, int index) return req->rq_rep_swab_mask & (1 << index); } -#define SWAB_PARANOIA 1 - -#if SWAB_PARANOIA -/* unpacking: assert idx not unpacked already */ -#define LASSERT_REQSWAB(rq, idx) lustre_set_req_swabbed(rq, idx) -#define LASSERT_REPSWAB(rq, idx) lustre_set_rep_swabbed(rq, idx) - -/* just looking: assert idx already unpacked */ -#define LASSERT_REQSWABBED(rq, idx) LASSERT(lustre_req_swabbed(rq, idx)) -#define LASSERT_REPSWABBED(rq, idx) LASSERT(lustre_rep_swabbed(rq, idx)) - -#else -#define LASSERT_REQSWAB(rq, idx) -#define LASSERT_REPSWAB(rq, idx) -#define LASSERT_REQSWABBED(rq, idx) -#define LASSERT_REPSWABBED(rq, idx) -#endif - static inline const char * -ptlrpc_rqphase2str(struct ptlrpc_request *req) +ptlrpc_rqphase2str(const struct ptlrpc_request *req) { switch (req->rq_phase) { case RQ_PHASE_NEW: @@ -417,43 +533,45 @@ ptlrpc_rqphase2str(struct ptlrpc_request *req) FLAG(req->rq_timedout, "X") /* eXpired */, FLAG(req->rq_resend, "S"), \ FLAG(req->rq_restart, "T"), FLAG(req->rq_replay, "P"), \ FLAG(req->rq_no_resend, "N"), \ - FLAG(req->rq_waiting, "W") + FLAG(req->rq_waiting, "W"), \ + FLAG(req->rq_wait_ctx, "C") -#define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s" +#define REQ_FLAGS_FMT "%s:%s%s%s%s%s%s%s%s%s%s" void _debug_req(struct ptlrpc_request *req, __u32 mask, - struct libcfs_debug_msg_data *data, const char *fmt, ...); - -#define debug_req(cdls, level, req, file, func, line, fmt, a...) \ -do { \ - CHECK_STACK(); \ - \ - if (((level) & D_CANTMASK) != 0 || \ - ((libcfs_debug & (level)) != 0 && \ - (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) { \ - static struct libcfs_debug_msg_data _req_dbg_data = \ - DEBUG_MSG_DATA_INIT(cdls, DEBUG_SUBSYSTEM, file, func, line); \ - _debug_req((req), (level), &_req_dbg_data, fmt, ##a); \ - } \ + struct libcfs_debug_msg_data *data, const char *fmt, ...) + __attribute__ ((format (printf, 4, 5))); + +#define debug_req(cdls, level, req, file, func, line, fmt, a...) \ +do { \ + CHECK_STACK(); \ + \ + if (((level) & D_CANTMASK) != 0 || \ + ((libcfs_debug & (level)) != 0 && \ + (libcfs_subsystem_debug & DEBUG_SUBSYSTEM) != 0)) { \ + static struct libcfs_debug_msg_data _req_dbg_data = \ + DEBUG_MSG_DATA_INIT(cdls, DEBUG_SUBSYSTEM, file, func, line); \ + _debug_req((req), (level), &_req_dbg_data, fmt, ##a); \ + } \ } while(0) /* for most callers (level is a constant) this is resolved at compile time */ -#define DEBUG_REQ(level, req, fmt, args...) \ -do { \ - if ((level) & (D_ERROR | D_WARNING)) { \ - static cfs_debug_limit_state_t cdls; \ - debug_req(&cdls, level, req, __FILE__, __func__, __LINE__, \ - "@@@ "fmt, ## args); \ - } else \ - debug_req(NULL, level, req, __FILE__, __func__, __LINE__, \ - "@@@ "fmt, ## args); \ +#define DEBUG_REQ(level, req, fmt, args...) \ +do { \ + if ((level) & (D_ERROR | D_WARNING)) { \ + static cfs_debug_limit_state_t cdls; \ + debug_req(&cdls, level, req, __FILE__, __func__, __LINE__, \ + "@@@ "fmt" ", ## args); \ + } else \ + debug_req(NULL, level, req, __FILE__, __func__, __LINE__, \ + "@@@ "fmt" ", ## args); \ } while (0) struct ptlrpc_bulk_page { struct list_head bp_link; - int bp_buflen; - int bp_pageoffset; /* offset within a page */ - struct page *bp_page; + int bp_buflen; + int bp_pageoffset; /* offset within a page */ + struct page *bp_page; }; #define BULK_GET_SOURCE 0 @@ -462,10 +580,10 @@ struct ptlrpc_bulk_page { #define BULK_PUT_SOURCE 3 struct ptlrpc_bulk_desc { - unsigned int bd_success:1; /* completed successfully */ - unsigned int bd_network_rw:1; /* accessible to the network */ - unsigned int bd_type:2; /* {put,get}{source,sink} */ - unsigned int bd_registered:1; /* client side */ + unsigned long bd_success:1; /* completed successfully */ + unsigned long bd_network_rw:1; /* accessible to the network */ + unsigned long bd_type:2; /* {put,get}{source,sink} */ + unsigned long bd_registered:1; /* client side */ spinlock_t bd_lock; /* serialise with callback */ int bd_import_generation; struct obd_export *bd_export; @@ -481,24 +599,27 @@ struct ptlrpc_bulk_desc { __u64 bd_last_xid; struct ptlrpc_cb_id bd_cbid; /* network callback info */ - lnet_handle_md_t bd_md_h; /* associated MD */ + lnet_handle_md_t bd_md_h; /* associated MD */ + lnet_nid_t bd_sender; /* stash event::sender */ + cfs_page_t **bd_enc_pages; #if defined(__KERNEL__) - lnet_kiov_t bd_iov[0]; + lnet_kiov_t bd_iov[0]; #else - lnet_md_iovec_t bd_iov[0]; + lnet_md_iovec_t bd_iov[0]; #endif }; struct ptlrpc_thread { - struct list_head t_link; /* active threads for service, from svc->srv_threads */ + struct list_head t_link; /* active threads in svc->srv_threads */ void *t_data; /* thread-private data (preallocated memory) */ __u32 t_flags; unsigned int t_id; /* service thread index, from ptlrpc_start_threads */ cfs_waitq_t t_ctl_waitq; + struct lu_env *t_env; }; struct ptlrpc_request_buffer_desc { @@ -529,13 +650,23 @@ struct ptlrpc_service { int srv_n_difficult_replies; /* # 'difficult' replies */ int srv_n_active_reqs; /* # reqs being served */ cfs_duration_t srv_rqbd_timeout; /* timeout before re-posting reqs, in tick */ - int srv_watchdog_timeout; /* soft watchdog timeout, in ms */ + int srv_watchdog_factor; /* soft watchdog timeout mutiplier */ unsigned srv_cpu_affinity:1; /* bind threads to CPUs */ + unsigned srv_at_check:1; /* check early replies */ + unsigned srv_is_stopping:1; /* under unregister_service */ + cfs_time_t srv_at_checktime; /* debug */ __u32 srv_req_portal; __u32 srv_rep_portal; - int srv_n_queued_reqs; /* # reqs waiting to be served */ + /* AT stuff */ + struct adaptive_timeout srv_at_estimate;/* estimated rpc service time */ + spinlock_t srv_at_lock; + struct list_head srv_at_list; /* reqs waiting for replies */ + cfs_timer_t srv_at_timer; /* early reply timer */ + + int srv_n_queued_reqs; /* # reqs in either of the queues below */ + struct list_head srv_req_in_queue; /* incoming reqs */ struct list_head srv_request_queue; /* reqs waiting for service */ struct list_head srv_request_history; /* request history */ @@ -562,8 +693,8 @@ struct ptlrpc_service { struct list_head srv_threads; /* service thread list */ svc_handler_t srv_handler; - char *srv_name; /* only statically allocated strings here; we don't clean them */ - char *srv_thread_name; /* only statically allocated strings here; we don't clean them */ + char *srv_name; /* only statically allocated strings here; we don't clean them */ + char *srv_thread_name; /* only statically allocated strings here; we don't clean them */ spinlock_t srv_lock; @@ -574,7 +705,12 @@ struct ptlrpc_service { struct list_head srv_free_rs_list; /* waitq to run, when adding stuff to srv_free_rs_list */ cfs_waitq_t srv_free_rs_waitq; - + + /* + * Tags for lu_context associated with this thread, see struct + * lu_context. + */ + __u32 srv_ctx_tags; /* * if non-NULL called during thread creation (ptlrpc_start_thread()) * to initialize service specific per-thread state. @@ -589,6 +725,73 @@ struct ptlrpc_service { //struct ptlrpc_srv_ni srv_interfaces[0]; }; +struct ptlrpcd_ctl { + /** + * Ptlrpc thread control flags (LIOD_START, LIOD_STOP, LIOD_STOP_FORCE) + */ + unsigned long pc_flags; + /** + * Thread lock protecting structure fields. + */ + spinlock_t pc_lock; + /** + * Start completion. + */ + struct completion pc_starting; + /** + * Stop completion. + */ + struct completion pc_finishing; + /** + * Thread requests set. + */ + struct ptlrpc_request_set *pc_set; + /** + * Thread name used in cfs_daemonize() + */ + char pc_name[16]; + /** + * Environment for request interpreters to run in. + */ + struct lu_env pc_env; +#ifndef __KERNEL__ + /** + * Async rpcs flag to make sure that ptlrpcd_check() is called only + * once. + */ + int pc_recurred; + /** + * Currently not used. + */ + void *pc_callback; + /** + * User-space async rpcs callback. + */ + void *pc_wait_callback; + /** + * User-space check idle rpcs callback. + */ + void *pc_idle_callback; +#endif +}; + +/* Bits for pc_flags */ +enum ptlrpcd_ctl_flags { + /** + * Ptlrpc thread start flag. + */ + LIOD_START = 1 << 0, + /** + * Ptlrpc thread stop flag. + */ + LIOD_STOP = 1 << 1, + /** + * Ptlrpc thread stop force flag. This will cause also + * aborting any inflight rpcs handled by thread. + */ + LIOD_STOP_FORCE = 1 << 2 +}; + /* ptlrpc/events.c */ extern lnet_handle_eq_t ptlrpc_eq_h; extern int ptlrpc_uuid_to_peer(struct obd_uuid *uuid, @@ -601,14 +804,13 @@ extern void reply_out_callback(lnet_event_t *ev); extern void server_bulk_callback (lnet_event_t *ev); /* ptlrpc/connection.c */ -void ptlrpc_dump_connections(void); -void ptlrpc_readdress_connection(struct ptlrpc_connection *, struct obd_uuid *); -struct ptlrpc_connection *ptlrpc_get_connection(lnet_process_id_t peer, - lnet_nid_t self, struct obd_uuid *uuid); -int ptlrpc_put_connection(struct ptlrpc_connection *c); +struct ptlrpc_connection *ptlrpc_connection_get(lnet_process_id_t peer, + lnet_nid_t self, + struct obd_uuid *uuid); +int ptlrpc_connection_put(struct ptlrpc_connection *c); struct ptlrpc_connection *ptlrpc_connection_addref(struct ptlrpc_connection *); -void ptlrpc_init_connection(void); -void ptlrpc_cleanup_connection(void); +int ptlrpc_connection_init(void); +void ptlrpc_connection_fini(void); extern lnet_pid_t ptl_get_pid(void); /* ptlrpc/niobuf.c */ @@ -627,10 +829,14 @@ static inline int ptlrpc_bulk_active (struct ptlrpc_bulk_desc *desc) return (rc); } -int ptlrpc_send_reply(struct ptlrpc_request *req, int); +#define PTLRPC_REPLY_MAYBE_DIFFICULT 0x01 +#define PTLRPC_REPLY_EARLY 0x02 +int ptlrpc_send_reply(struct ptlrpc_request *req, int flags); int ptlrpc_reply(struct ptlrpc_request *req); +int ptlrpc_send_error(struct ptlrpc_request *req, int difficult); int ptlrpc_error(struct ptlrpc_request *req); void ptlrpc_resend_req(struct ptlrpc_request *request); +int ptlrpc_at_get_net_latency(struct ptlrpc_request *req); int ptl_send_rpc(struct ptlrpc_request *request, int noreply); int ptlrpc_register_rqbd (struct ptlrpc_request_buffer_desc *rqbd); @@ -641,23 +847,12 @@ void ptlrpc_cleanup_client(struct obd_import *imp); struct ptlrpc_connection *ptlrpc_uuid_to_connection(struct obd_uuid *uuid); static inline int -ptlrpc_client_receiving_reply (struct ptlrpc_request *req) -{ - int rc; - - spin_lock(&req->rq_lock); - rc = req->rq_receiving_reply; - spin_unlock(&req->rq_lock); - return (rc); -} - -static inline int -ptlrpc_client_replied (struct ptlrpc_request *req) +ptlrpc_client_recv_or_unlink (struct ptlrpc_request *req) { int rc; spin_lock(&req->rq_lock); - rc = req->rq_replied; + rc = req->rq_receiving_reply || req->rq_must_unlink; spin_unlock(&req->rq_lock); return (rc); } @@ -676,31 +871,51 @@ int ptlrpc_replay_req(struct ptlrpc_request *req); void ptlrpc_unregister_reply(struct ptlrpc_request *req); void ptlrpc_restart_req(struct ptlrpc_request *req); void ptlrpc_abort_inflight(struct obd_import *imp); +void ptlrpc_abort_set(struct ptlrpc_request_set *set); struct ptlrpc_request_set *ptlrpc_prep_set(void); +int ptlrpc_set_add_cb(struct ptlrpc_request_set *set, + set_interpreter_func fn, void *data); int ptlrpc_set_next_timeout(struct ptlrpc_request_set *); -int ptlrpc_check_set(struct ptlrpc_request_set *set); +int ptlrpc_check_set(const struct lu_env *env, struct ptlrpc_request_set *set); int ptlrpc_set_wait(struct ptlrpc_request_set *); int ptlrpc_expired_set(void *data); void ptlrpc_interrupted_set(void *data); void ptlrpc_mark_interrupted(struct ptlrpc_request *req); void ptlrpc_set_destroy(struct ptlrpc_request_set *); void ptlrpc_set_add_req(struct ptlrpc_request_set *, struct ptlrpc_request *); -void ptlrpc_set_add_new_req(struct ptlrpc_request_set *, - struct ptlrpc_request *); +int ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc, + struct ptlrpc_request *req); void ptlrpc_free_rq_pool(struct ptlrpc_request_pool *pool); void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq); -struct ptlrpc_request_pool *ptlrpc_init_rq_pool(int, int, - void (*populate_pool)(struct ptlrpc_request_pool *, int)); + +struct ptlrpc_request_pool * +ptlrpc_init_rq_pool(int, int, + void (*populate_pool)(struct ptlrpc_request_pool *, int)); + +void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req); +struct ptlrpc_request *ptlrpc_request_alloc(struct obd_import *imp, + const struct req_format *format); +struct ptlrpc_request *ptlrpc_request_alloc_pool(struct obd_import *imp, + struct ptlrpc_request_pool *, + const struct req_format *format); +void ptlrpc_request_free(struct ptlrpc_request *request); +int ptlrpc_request_pack(struct ptlrpc_request *request, + __u32 version, int opcode); +struct ptlrpc_request *ptlrpc_request_alloc_pack(struct obd_import *imp, + const struct req_format *format, + __u32 version, int opcode); +int ptlrpc_request_bufs_pack(struct ptlrpc_request *request, + __u32 version, int opcode, char **bufs, + struct ptlrpc_cli_ctx *ctx); struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, __u32 version, - int opcode, int count, int *lengths, + int opcode, int count, __u32 *lengths, char **bufs); struct ptlrpc_request *ptlrpc_prep_req_pool(struct obd_import *imp, __u32 version, int opcode, - int count, int *lengths, char **bufs, + int count, __u32 *lengths, char **bufs, struct ptlrpc_request_pool *pool); -void ptlrpc_free_req(struct ptlrpc_request *request); void ptlrpc_req_finished(struct ptlrpc_request *request); void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request); struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req); @@ -717,20 +932,39 @@ __u64 ptlrpc_next_xid(void); __u64 ptlrpc_sample_next_xid(void); __u64 ptlrpc_req_xid(struct ptlrpc_request *request); +struct ptlrpc_service_conf { + int psc_nbufs; + int psc_bufsize; + int psc_max_req_size; + int psc_max_reply_size; + int psc_req_portal; + int psc_rep_portal; + int psc_watchdog_factor; + int psc_min_threads; + int psc_max_threads; + __u32 psc_ctx_tags; +}; + /* ptlrpc/service.c */ void ptlrpc_save_lock (struct ptlrpc_request *req, - struct lustre_handle *lock, int mode); + struct lustre_handle *lock, int mode, int no_ack); void ptlrpc_commit_replies (struct obd_device *obd); void ptlrpc_schedule_difficult_reply (struct ptlrpc_reply_state *rs); +struct ptlrpc_service *ptlrpc_init_svc_conf(struct ptlrpc_service_conf *c, + svc_handler_t h, char *name, + struct proc_dir_entry *proc_entry, + svcreq_printfn_t prntfn, + char *threadname); + struct ptlrpc_service *ptlrpc_init_svc(int nbufs, int bufsize, int max_req_size, int max_reply_size, int req_portal, int rep_portal, - int watchdog_timeout, /* in ms */ + int watchdog_factor, svc_handler_t, char *name, cfs_proc_dir_entry_t *proc_entry, - svcreq_printfn_t, + svcreq_printfn_t, int min_threads, int max_threads, - char *threadname); + char *threadname, __u32 ctx_tags); void ptlrpc_stop_all_threads(struct ptlrpc_service *svc); int ptlrpc_start_threads(struct obd_device *dev, struct ptlrpc_service *svc); @@ -755,17 +989,30 @@ int ptlrpc_disconnect_import(struct obd_import *imp, int noclose); int ptlrpc_import_recovery_state_machine(struct obd_import *imp); /* ptlrpc/pack_generic.c */ +int ptlrpc_reconnect_import(struct obd_import *imp); int lustre_msg_swabbed(struct lustre_msg *msg); int lustre_msg_check_version(struct lustre_msg *msg, __u32 version); +void lustre_init_msg_v2(struct lustre_msg_v2 *msg, int count, __u32 *lens, + char **bufs); int lustre_pack_request(struct ptlrpc_request *, __u32 magic, int count, - int *lens, char **bufs); -int lustre_pack_reply(struct ptlrpc_request *, int count, int *lens, + __u32 *lens, char **bufs); +int lustre_pack_reply(struct ptlrpc_request *, int count, __u32 *lens, char **bufs); -void lustre_shrink_reply(struct ptlrpc_request *req, int segment, - unsigned int newlen, int move_data); +int lustre_pack_reply_v2(struct ptlrpc_request *req, int count, + __u32 *lens, char **bufs, int flags); +#define LPRFL_EARLY_REPLY 1 +int lustre_pack_reply_flags(struct ptlrpc_request *, int count, __u32 *lens, + char **bufs, int flags); +int lustre_shrink_msg(struct lustre_msg *msg, int segment, + unsigned int newlen, int move_data); void lustre_free_reply_state(struct ptlrpc_reply_state *rs); -int lustre_msg_size(__u32 magic, int count, int *lengths); +int lustre_msg_hdr_size(__u32 magic, int count); +int lustre_msg_size(__u32 magic, int count, __u32 *lengths); +int lustre_msg_size_v2(int count, __u32 *lengths); +int lustre_packed_msg_size(struct lustre_msg *msg); +int lustre_msg_early_size(void); int lustre_unpack_msg(struct lustre_msg *m, int len); +void *lustre_msg_buf_v2(struct lustre_msg_v2 *m, int n, int min_size); void *lustre_msg_buf(struct lustre_msg *m, int n, int minlen); int lustre_msg_buflen(struct lustre_msg *m, int n); void lustre_msg_set_buflen(struct lustre_msg *m, int n, int len); @@ -776,6 +1023,8 @@ void *lustre_swab_reqbuf(struct ptlrpc_request *req, int n, int minlen, void *swabber); void *lustre_swab_repbuf(struct ptlrpc_request *req, int n, int minlen, void *swabber); +__u32 lustre_msghdr_get_flags(struct lustre_msg *msg); +void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags); __u32 lustre_msg_get_flags(struct lustre_msg *msg); void lustre_msg_add_flags(struct lustre_msg *msg, int flags); void lustre_msg_set_flags(struct lustre_msg *msg, int flags); @@ -791,9 +1040,18 @@ __u32 lustre_msg_get_opc(struct lustre_msg *msg); __u64 lustre_msg_get_last_xid(struct lustre_msg *msg); __u64 lustre_msg_get_last_committed(struct lustre_msg *msg); __u64 lustre_msg_get_transno(struct lustre_msg *msg); -__u32 lustre_msg_get_status(struct lustre_msg *msg); +__u64 lustre_msg_get_slv(struct lustre_msg *msg); +__u32 lustre_msg_get_limit(struct lustre_msg *msg); +void lustre_msg_set_slv(struct lustre_msg *msg, __u64 slv); +void lustre_msg_set_limit(struct lustre_msg *msg, __u64 limit); +int lustre_msg_get_status(struct lustre_msg *msg); __u32 lustre_msg_get_conn_cnt(struct lustre_msg *msg); +int lustre_msg_is_v1(struct lustre_msg *msg); __u32 lustre_msg_get_magic(struct lustre_msg *msg); +__u32 lustre_msg_get_timeout(struct lustre_msg *msg); +__u32 lustre_msg_get_service_time(struct lustre_msg *msg); +__u32 lustre_msg_get_cksum(struct lustre_msg *msg); +__u32 lustre_msg_calc_cksum(struct lustre_msg *msg); void lustre_msg_set_handle(struct lustre_msg *msg,struct lustre_handle *handle); void lustre_msg_set_type(struct lustre_msg *msg, __u32 type); void lustre_msg_set_opc(struct lustre_msg *msg, __u32 opc); @@ -802,6 +1060,21 @@ void lustre_msg_set_last_committed(struct lustre_msg *msg,__u64 last_committed); void lustre_msg_set_transno(struct lustre_msg *msg, __u64 transno); void lustre_msg_set_status(struct lustre_msg *msg, __u32 status); void lustre_msg_set_conn_cnt(struct lustre_msg *msg, __u32 conn_cnt); +void ptlrpc_req_set_repsize(struct ptlrpc_request *req, int count, __u32 *sizes); +void ptlrpc_request_set_replen(struct ptlrpc_request *req); +void lustre_msg_set_timeout(struct lustre_msg *msg, __u32 timeout); +void lustre_msg_set_service_time(struct lustre_msg *msg, __u32 service_time); +void lustre_msg_set_cksum(struct lustre_msg *msg, __u32 cksum); + +static inline void +lustre_shrink_reply(struct ptlrpc_request *req, int segment, + unsigned int newlen, int move_data) +{ + LASSERT(req->rq_reply_state); + LASSERT(req->rq_repmsg); + req->rq_replen = lustre_shrink_msg(req->rq_repmsg, segment, + newlen, move_data); +} static inline void ptlrpc_rs_addref(struct ptlrpc_reply_state *rs) @@ -818,6 +1091,16 @@ ptlrpc_rs_decref(struct ptlrpc_reply_state *rs) lustre_free_reply_state(rs); } +/* Should only be called once per req */ +static inline void ptlrpc_req_drop_rs(struct ptlrpc_request *req) +{ + if (req->rq_reply_state == NULL) + return; /* shouldn't occur */ + ptlrpc_rs_decref(req->rq_reply_state); + req->rq_reply_state = NULL; + req->rq_repmsg = NULL; +} + static inline __u32 lustre_request_magic(struct ptlrpc_request *req) { return lustre_msg_get_magic(req->rq_reqmsg); @@ -826,9 +1109,6 @@ static inline __u32 lustre_request_magic(struct ptlrpc_request *req) static inline int ptlrpc_req_get_repsize(struct ptlrpc_request *req) { switch (req->rq_reqmsg->lm_magic) { - case LUSTRE_MSG_MAGIC_V1: - CERROR("function not supported for lustre_msg V1!\n"); - return -ENOTSUPP; case LUSTRE_MSG_MAGIC_V2: return req->rq_reqmsg->lm_repsize; default: @@ -838,19 +1118,13 @@ static inline int ptlrpc_req_get_repsize(struct ptlrpc_request *req) } } -static inline void -ptlrpc_req_set_repsize(struct ptlrpc_request *req, int count, int *lens) -{ - req->rq_replen = lustre_msg_size(req->rq_reqmsg->lm_magic, count, lens); - if (req->rq_reqmsg->lm_magic == LUSTRE_MSG_MAGIC_V2) - req->rq_reqmsg->lm_repsize = req->rq_replen; -} - /* ldlm/ldlm_lib.c */ -int client_obd_setup(struct obd_device *obddev, obd_count len, void *buf); -int client_obd_cleanup(struct obd_device * obddev); -int client_connect_import(struct lustre_handle *conn, struct obd_device *obd, - struct obd_uuid *cluuid, struct obd_connect_data *); +int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg); +int client_obd_cleanup(struct obd_device *obddev); +int client_connect_import(const struct lu_env *env, + struct lustre_handle *conn, struct obd_device *obd, + struct obd_uuid *cluuid, struct obd_connect_data *, + void *localdata); int client_disconnect_export(struct obd_export *exp); int client_import_add_conn(struct obd_import *imp, struct obd_uuid *uuid, int priority); @@ -860,6 +1134,7 @@ int import_set_conn_priority(struct obd_import *imp, struct obd_uuid *uuid); /* ptlrpc/pinger.c */ int ptlrpc_pinger_add_import(struct obd_import *imp); int ptlrpc_pinger_del_import(struct obd_import *imp); +cfs_time_t ptlrpc_suspend_wakeup_time(void); #ifdef __KERNEL__ void ping_evictor_start(void); void ping_evictor_stop(void); @@ -867,23 +1142,26 @@ void ping_evictor_stop(void); #define ping_evictor_start() do {} while (0) #define ping_evictor_stop() do {} while (0) #endif +int ptlrpc_check_and_wait_suspend(struct ptlrpc_request *req); /* ptlrpc/ptlrpcd.c */ +int ptlrpcd_start(char *name, struct ptlrpcd_ctl *pc); +void ptlrpcd_stop(struct ptlrpcd_ctl *pc, int force); void ptlrpcd_wake(struct ptlrpc_request *req); void ptlrpcd_add_req(struct ptlrpc_request *req); int ptlrpcd_addref(void); void ptlrpcd_decref(void); /* ptlrpc/lproc_ptlrpc.c */ +const char* ll_opcode2str(__u32 opcode); #ifdef LPROCFS void ptlrpc_lprocfs_register_obd(struct obd_device *obd); void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd); -void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int opc, int bytes); +void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes); #else static inline void ptlrpc_lprocfs_register_obd(struct obd_device *obd) {} static inline void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd) {} -static inline void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int opc, - int bytes) {} +static inline void ptlrpc_lprocfs_brw(struct ptlrpc_request *req, int bytes) {} #endif /* ptlrpc/llog_server.c */