2009-08-21 16:28:31 +04:00
# ifndef _RDS_TCP_H
# define _RDS_TCP_H
# define RDS_TCP_PORT 16385
struct rds_tcp_incoming {
struct rds_incoming ti_inc ;
struct sk_buff_head ti_skb_list ;
} ;
struct rds_tcp_connection {
struct list_head t_tcp_node ;
2016-07-01 02:11:12 +03:00
struct rds_conn_path * t_cpath ;
/* t_conn_path_lock synchronizes the connection establishment between
2016-05-02 21:24:52 +03:00
* rds_tcp_accept_one and rds_tcp_conn_connect
*/
2016-07-01 02:11:12 +03:00
struct mutex t_conn_path_lock ;
2009-08-21 16:28:31 +04:00
struct socket * t_sock ;
void * t_orig_write_space ;
void * t_orig_data_ready ;
void * t_orig_state_change ;
struct rds_tcp_incoming * t_tinc ;
size_t t_tinc_hdr_rem ;
size_t t_tinc_data_rem ;
/* XXX error report? */
struct work_struct t_conn_w ;
struct work_struct t_send_w ;
struct work_struct t_down_w ;
struct work_struct t_recv_w ;
/* for info exporting only */
struct list_head t_list_item ;
u32 t_last_sent_nxt ;
u32 t_last_expected_una ;
u32 t_last_seen_una ;
} ;
struct rds_tcp_statistics {
uint64_t s_tcp_data_ready_calls ;
uint64_t s_tcp_write_space_calls ;
uint64_t s_tcp_sndbuf_full ;
uint64_t s_tcp_connect_raced ;
uint64_t s_tcp_listen_closed_stale ;
} ;
/* tcp.c */
void rds_tcp_tune ( struct socket * sock ) ;
void rds_tcp_nonagle ( struct socket * sock ) ;
2016-07-01 02:11:14 +03:00
void rds_tcp_set_callbacks ( struct socket * sock , struct rds_conn_path * cp ) ;
void rds_tcp_reset_callbacks ( struct socket * sock , struct rds_conn_path * cp ) ;
2009-08-21 16:28:31 +04:00
void rds_tcp_restore_callbacks ( struct socket * sock ,
struct rds_tcp_connection * tc ) ;
u32 rds_tcp_snd_nxt ( struct rds_tcp_connection * tc ) ;
u32 rds_tcp_snd_una ( struct rds_tcp_connection * tc ) ;
u64 rds_tcp_map_seq ( struct rds_tcp_connection * tc , u32 seq ) ;
extern struct rds_transport rds_tcp_transport ;
2015-08-05 08:43:26 +03:00
void rds_tcp_accept_work ( struct sock * sk ) ;
2009-08-21 16:28:31 +04:00
/* tcp_connect.c */
int rds_tcp_conn_connect ( struct rds_connection * conn ) ;
2016-07-01 02:11:10 +03:00
void rds_tcp_conn_path_shutdown ( struct rds_conn_path * conn ) ;
2009-08-21 16:28:31 +04:00
void rds_tcp_state_change ( struct sock * sk ) ;
/* tcp_listen.c */
2015-08-05 08:43:26 +03:00
struct socket * rds_tcp_listen_init ( struct net * ) ;
void rds_tcp_listen_stop ( struct socket * ) ;
2014-04-12 00:15:36 +04:00
void rds_tcp_listen_data_ready ( struct sock * sk ) ;
2015-08-05 08:43:26 +03:00
int rds_tcp_accept_one ( struct socket * sock ) ;
int rds_tcp_keepalive ( struct socket * sock ) ;
2009-08-21 16:28:31 +04:00
/* tcp_recv.c */
2010-07-09 23:26:20 +04:00
int rds_tcp_recv_init ( void ) ;
2009-08-21 16:28:31 +04:00
void rds_tcp_recv_exit ( void ) ;
2014-04-12 00:15:36 +04:00
void rds_tcp_data_ready ( struct sock * sk ) ;
2016-07-01 02:11:15 +03:00
int rds_tcp_recv_path ( struct rds_conn_path * cp ) ;
2009-08-21 16:28:31 +04:00
void rds_tcp_inc_free ( struct rds_incoming * inc ) ;
2014-11-20 17:21:14 +03:00
int rds_tcp_inc_copy_to_user ( struct rds_incoming * inc , struct iov_iter * to ) ;
2009-08-21 16:28:31 +04:00
/* tcp_send.c */
2016-07-01 02:11:10 +03:00
void rds_tcp_xmit_path_prepare ( struct rds_conn_path * cp ) ;
void rds_tcp_xmit_path_complete ( struct rds_conn_path * cp ) ;
2009-08-21 16:28:31 +04:00
int rds_tcp_xmit ( struct rds_connection * conn , struct rds_message * rm ,
2016-06-18 18:46:31 +03:00
unsigned int hdr_off , unsigned int sg , unsigned int off ) ;
2009-08-21 16:28:31 +04:00
void rds_tcp_write_space ( struct sock * sk ) ;
/* tcp_stats.c */
DECLARE_PER_CPU ( struct rds_tcp_statistics , rds_tcp_stats ) ;
# define rds_tcp_stats_inc(member) rds_stats_inc_which(rds_tcp_stats, member)
unsigned int rds_tcp_stats_info_copy ( struct rds_info_iterator * iter ,
unsigned int avail ) ;
# endif