* considered full when less than ?_MAXREQSIZE is left in them.
*/
-#define LDLM_THREADS_AUTO_MIN min((int)(smp_num_cpus * smp_num_cpus * 2), 8)
+#define LDLM_THREADS_AUTO_MIN \
+ min((int)(num_online_cpus() * num_online_cpus() * 2), 8)
#define LDLM_THREADS_AUTO_MAX (LDLM_THREADS_AUTO_MIN * 16)
#define LDLM_BL_THREADS LDLM_THREADS_AUTO_MIN
-#define LDLM_NBUFS (64 * smp_num_cpus)
+#define LDLM_NBUFS (64 * num_online_cpus())
#define LDLM_BUFSIZE (8 * 1024)
#define LDLM_MAXREQSIZE (5 * 1024)
#define LDLM_MAXREPSIZE (1024)
#define MDS_THREADS_MIN 2
#define MDS_THREADS_MAX 512
#define MDS_THREADS_MIN_READPAGE 2
-#define MDS_NBUFS (64 * smp_num_cpus)
+#define MDS_NBUFS (64 * num_online_cpus())
#define MDS_BUFSIZE (8 * 1024)
/* Assume file name length = FNAME_MAX = 256 (true for ext3).
* path name length = PATH_MAX = 4096
#define MGS_THREADS_AUTO_MIN 2
#define MGS_THREADS_AUTO_MAX 32
-#define MGS_NBUFS (64 * smp_num_cpus)
+#define MGS_NBUFS (64 * num_online_cpus())
#define MGS_BUFSIZE (8 * 1024)
#define MGS_MAXREQSIZE (8 * 1024)
#define MGS_MAXREPSIZE (9 * 1024)
/* Absolute limits */
#define OSS_THREADS_MIN 2
#define OSS_THREADS_MAX 512
-#define OST_NBUFS (64 * smp_num_cpus)
+#define OST_NBUFS (64 * num_online_cpus())
#define OST_BUFSIZE (8 * 1024)
/* OST_MAXREQSIZE ~= 4768 bytes =
* lustre_msg + obdo + 16 * obd_ioobj + 256 * niobuf_remote
cfs_waitq_t set_waitq;
cfs_waitq_t *set_wakeup_ptr;
struct list_head set_requests;
+ struct list_head set_cblist; /* list of completion callbacks */
set_interpreter_func set_interpret; /* completion callback */
void *set_arg; /* completion context */
- void *set_countp; /* pointer to NOB counter in case
- * of directIO (bug11737) */
/* locked so that any old caller can communicate requests to
* the set holder who can then fold them into the lock-free set */
spinlock_t set_new_req_lock;
struct list_head set_new_requests;
};
+struct ptlrpc_set_cbdata {
+ struct list_head psc_item;
+ set_interpreter_func psc_interpret;
+ void *psc_data;
+};
+
struct ptlrpc_bulk_desc;
/*
struct list_head rs_debug_list;
#endif
/* updates to following flag serialised by srv_request_lock */
- unsigned int rs_difficult:1; /* ACK/commit stuff */
- unsigned int rs_scheduled:1; /* being handled? */
- unsigned int rs_scheduled_ever:1;/* any schedule attempts? */
- unsigned int rs_handled:1; /* been handled yet? */
- unsigned int rs_on_net:1; /* reply_out_callback pending? */
- unsigned int rs_prealloc:1; /* rs from prealloc list */
+ unsigned long rs_difficult:1; /* ACK/commit stuff */
+ unsigned long rs_scheduled:1; /* being handled? */
+ unsigned long rs_scheduled_ever:1;/* any schedule attempts? */
+ unsigned long rs_handled:1; /* been handled yet? */
+ unsigned long rs_on_net:1; /* reply_out_callback pending? */
+ unsigned long rs_prealloc:1; /* rs from prealloc list */
int rs_size;
__u64 rs_transno;
int rq_status;
spinlock_t rq_lock;
/* client-side flags are serialized by rq_lock */
- unsigned int rq_intr:1, rq_replied:1, rq_err:1,
+ unsigned long rq_intr:1, rq_replied:1, rq_err:1,
rq_timedout:1, rq_resend:1, rq_restart:1,
/*
* when ->rq_replay is set, request is kept by the client even
rq_no_resend:1, rq_waiting:1, rq_receiving_reply:1,
rq_no_delay:1, rq_net_err:1, rq_early:1, rq_must_unlink:1,
/* server-side flags */
- rq_packed_final:1; /* packed final reply */
+ rq_packed_final:1, /* packed final reply */
+ rq_sent_final:1; /* stop sending early replies */
enum rq_phase rq_phase; /* one of RQ_PHASE_* */
atomic_t rq_refcount; /* client-side refcount for SENT race,
server-side refcounf for multiple replies */
void *rq_cb_data;
struct ptlrpc_bulk_desc *rq_bulk; /* client side bulk */
-
/* client outgoing req */
- time_t rq_sent; /* when request/reply sent (secs) */
+ time_t rq_sent; /* when request sent, seconds,
+ * or time when request should
+ * be sent */
volatile time_t rq_deadline; /* when request must finish. volatile
so that servers' early reply updates to the deadline aren't
kept in per-cpu cache */
struct ptlrpc_request_set *rq_set;
void *rq_interpret_reply; /* Async completion handler */
union ptlrpc_async_args rq_async_args; /* Async completion context */
- void *rq_ptlrpcd_data;
struct ptlrpc_request_pool *rq_pool; /* Pool if request from
preallocated list */
};
return req->rq_rep_swab_mask & (1 << index);
}
-#define SWAB_PARANOIA 1
-
-#if SWAB_PARANOIA
-/* unpacking: assert idx not unpacked already */
-#define LASSERT_REQSWAB(rq, idx) lustre_set_req_swabbed(rq, idx)
-#define LASSERT_REPSWAB(rq, idx) lustre_set_rep_swabbed(rq, idx)
-
-/* just looking: assert idx already unpacked */
-#define LASSERT_REQSWABBED(rq, idx) LASSERT(lustre_req_swabbed(rq, idx))
-#define LASSERT_REPSWABBED(rq, idx) LASSERT(lustre_rep_swabbed(rq, idx))
-
-#else
-#define LASSERT_REQSWAB(rq, idx)
-#define LASSERT_REPSWAB(rq, idx)
-#define LASSERT_REQSWABBED(rq, idx)
-#define LASSERT_REPSWABBED(rq, idx)
-#endif
-
static inline const char *
ptlrpc_rqphase2str(struct ptlrpc_request *req)
{
#define BULK_PUT_SOURCE 3
struct ptlrpc_bulk_desc {
- unsigned int bd_success:1; /* completed successfully */
- unsigned int bd_network_rw:1; /* accessible to the network */
- unsigned int bd_type:2; /* {put,get}{source,sink} */
- unsigned int bd_registered:1; /* client side */
+ unsigned long bd_success:1; /* completed successfully */
+ unsigned long bd_network_rw:1; /* accessible to the network */
+ unsigned long bd_type:2; /* {put,get}{source,sink} */
+ unsigned long bd_registered:1; /* client side */
spinlock_t bd_lock; /* serialise with callback */
int bd_import_generation;
struct obd_export *bd_export;
__u32 srv_rep_portal;
/* AT stuff */
- struct adaptive_timeout srv_at_estimate;/* estimated service time */
+ struct adaptive_timeout srv_at_estimate;/* estimated rpc service time */
spinlock_t srv_at_lock;
struct list_head srv_at_list; /* reqs waiting for replies */
cfs_timer_t srv_at_timer; /* early reply timer */
void ptlrpc_abort_inflight(struct obd_import *imp);
struct ptlrpc_request_set *ptlrpc_prep_set(void);
+int ptlrpc_set_add_cb(struct ptlrpc_request_set *set,
+ set_interpreter_func fn, void *data);
int ptlrpc_set_next_timeout(struct ptlrpc_request_set *);
int ptlrpc_check_set(struct ptlrpc_request_set *set);
int ptlrpc_set_wait(struct ptlrpc_request_set *);
void ptlrpc_add_rqs_to_pool(struct ptlrpc_request_pool *pool, int num_rq);
struct ptlrpc_request_pool *ptlrpc_init_rq_pool(int, int,
void (*populate_pool)(struct ptlrpc_request_pool *, int));
+void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req);
struct ptlrpc_request *ptlrpc_prep_req(struct obd_import *imp, __u32 version,
int opcode, int count, int *lengths,
char **bufs);
int ptlrpc_disconnect_import(struct obd_import *imp, int noclose);
int ptlrpc_import_recovery_state_machine(struct obd_import *imp);
void ptlrpc_import_setasync(struct obd_import *imp, int count);
+int ptlrpc_reconnect_import(struct obd_import *imp);
/* ptlrpc/pack_generic.c */
int lustre_msg_swabbed(struct lustre_msg *msg);
unsigned int newlen, int move_data);
void lustre_free_reply_state(struct ptlrpc_reply_state *rs);
int lustre_msg_size(__u32 magic, int count, int *lengths);
+int lustre_packed_msg_size(struct lustre_msg *msg);
int lustre_msg_early_size(void);
int lustre_unpack_msg(struct lustre_msg *m, int len);
void *lustre_msg_buf(struct lustre_msg *m, int n, int minlen);
void *swabber);
void *lustre_swab_repbuf(struct ptlrpc_request *req, int n, int minlen,
void *swabber);
+__u32 lustre_msghdr_get_flags(struct lustre_msg *msg);
+void lustre_msghdr_set_flags(struct lustre_msg *msg, __u32 flags);
__u32 lustre_msg_get_flags(struct lustre_msg *msg);
void lustre_msg_add_flags(struct lustre_msg *msg, int flags);
void lustre_msg_set_flags(struct lustre_msg *msg, int flags);
int client_obd_setup(struct obd_device *obddev, obd_count len, void *buf);
int client_obd_cleanup(struct obd_device * obddev);
int client_connect_import(struct lustre_handle *conn, struct obd_device *obd,
- struct obd_uuid *cluuid, struct obd_connect_data *);
+ struct obd_uuid *cluuid, struct obd_connect_data *,
+ void *localdata);
int client_disconnect_export(struct obd_export *exp);
int client_import_add_conn(struct obd_import *imp, struct obd_uuid *uuid,
int priority);