@ -1260,187 +1260,6 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
} ;
# endif
static bool tcp_fastopen_check ( struct sock * sk , struct sk_buff * skb ,
struct request_sock * req ,
struct tcp_fastopen_cookie * foc ,
struct tcp_fastopen_cookie * valid_foc )
{
bool skip_cookie = false ;
struct fastopen_queue * fastopenq ;
if ( likely ( ! fastopen_cookie_present ( foc ) ) ) {
/* See include/net/tcp.h for the meaning of these knobs */
if ( ( sysctl_tcp_fastopen & TFO_SERVER_ALWAYS ) | |
( ( sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD ) & &
( TCP_SKB_CB ( skb ) - > end_seq ! = TCP_SKB_CB ( skb ) - > seq + 1 ) ) )
skip_cookie = true ; /* no cookie to validate */
else
return false ;
}
fastopenq = inet_csk ( sk ) - > icsk_accept_queue . fastopenq ;
/* A FO option is present; bump the counter. */
NET_INC_STATS_BH ( sock_net ( sk ) , LINUX_MIB_TCPFASTOPENPASSIVE ) ;
/* Make sure the listener has enabled fastopen, and we don't
* exceed the max # of pending TFO requests allowed before trying
* to validating the cookie in order to avoid burning CPU cycles
* unnecessarily .
*
* XXX ( TFO ) - The implication of checking the max_qlen before
* processing a cookie request is that clients can ' t differentiate
* between qlen overflow causing Fast Open to be disabled
* temporarily vs a server not supporting Fast Open at all .
*/
if ( ( sysctl_tcp_fastopen & TFO_SERVER_ENABLE ) = = 0 | |
fastopenq = = NULL | | fastopenq - > max_qlen = = 0 )
return false ;
if ( fastopenq - > qlen > = fastopenq - > max_qlen ) {
struct request_sock * req1 ;
spin_lock ( & fastopenq - > lock ) ;
req1 = fastopenq - > rskq_rst_head ;
if ( ( req1 = = NULL ) | | time_after ( req1 - > expires , jiffies ) ) {
spin_unlock ( & fastopenq - > lock ) ;
NET_INC_STATS_BH ( sock_net ( sk ) ,
LINUX_MIB_TCPFASTOPENLISTENOVERFLOW ) ;
/* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/
foc - > len = - 1 ;
return false ;
}
fastopenq - > rskq_rst_head = req1 - > dl_next ;
fastopenq - > qlen - - ;
spin_unlock ( & fastopenq - > lock ) ;
reqsk_free ( req1 ) ;
}
if ( skip_cookie ) {
tcp_rsk ( req ) - > rcv_nxt = TCP_SKB_CB ( skb ) - > end_seq ;
return true ;
}
if ( foc - > len = = TCP_FASTOPEN_COOKIE_SIZE ) {
if ( ( sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED ) = = 0 ) {
tcp_fastopen_cookie_gen ( ip_hdr ( skb ) - > saddr ,
ip_hdr ( skb ) - > daddr , valid_foc ) ;
if ( ( valid_foc - > len ! = TCP_FASTOPEN_COOKIE_SIZE ) | |
memcmp ( & foc - > val [ 0 ] , & valid_foc - > val [ 0 ] ,
TCP_FASTOPEN_COOKIE_SIZE ) ! = 0 )
return false ;
valid_foc - > len = - 1 ;
}
/* Acknowledge the data received from the peer. */
tcp_rsk ( req ) - > rcv_nxt = TCP_SKB_CB ( skb ) - > end_seq ;
return true ;
} else if ( foc - > len = = 0 ) { /* Client requesting a cookie */
tcp_fastopen_cookie_gen ( ip_hdr ( skb ) - > saddr ,
ip_hdr ( skb ) - > daddr , valid_foc ) ;
NET_INC_STATS_BH ( sock_net ( sk ) ,
LINUX_MIB_TCPFASTOPENCOOKIEREQD ) ;
} else {
/* Client sent a cookie with wrong size. Treat it
* the same as invalid and return a valid one .
*/
tcp_fastopen_cookie_gen ( ip_hdr ( skb ) - > saddr ,
ip_hdr ( skb ) - > daddr , valid_foc ) ;
}
return false ;
}
static int tcp_v4_conn_req_fastopen ( struct sock * sk ,
struct sk_buff * skb ,
struct sk_buff * skb_synack ,
struct request_sock * req )
{
struct tcp_sock * tp = tcp_sk ( sk ) ;
struct request_sock_queue * queue = & inet_csk ( sk ) - > icsk_accept_queue ;
const struct inet_request_sock * ireq = inet_rsk ( req ) ;
struct sock * child ;
int err ;
req - > num_retrans = 0 ;
req - > num_timeout = 0 ;
req - > sk = NULL ;
child = inet_csk ( sk ) - > icsk_af_ops - > syn_recv_sock ( sk , skb , req , NULL ) ;
if ( child = = NULL ) {
NET_INC_STATS_BH ( sock_net ( sk ) ,
LINUX_MIB_TCPFASTOPENPASSIVEFAIL ) ;
kfree_skb ( skb_synack ) ;
return - 1 ;
}
err = ip_build_and_send_pkt ( skb_synack , sk , ireq - > ir_loc_addr ,
ireq - > ir_rmt_addr , ireq - > opt ) ;
err = net_xmit_eval ( err ) ;
if ( ! err )
tcp_rsk ( req ) - > snt_synack = tcp_time_stamp ;
/* XXX (TFO) - is it ok to ignore error and continue? */
spin_lock ( & queue - > fastopenq - > lock ) ;
queue - > fastopenq - > qlen + + ;
spin_unlock ( & queue - > fastopenq - > lock ) ;
/* Initialize the child socket. Have to fix some values to take
* into account the child is a Fast Open socket and is created
* only out of the bits carried in the SYN packet .
*/
tp = tcp_sk ( child ) ;
tp - > fastopen_rsk = req ;
/* Do a hold on the listner sk so that if the listener is being
* closed , the child that has been accepted can live on and still
* access listen_lock .
*/
sock_hold ( sk ) ;
tcp_rsk ( req ) - > listener = sk ;
/* RFC1323: The window in SYN & SYN/ACK segments is never
* scaled . So correct it appropriately .
*/
tp - > snd_wnd = ntohs ( tcp_hdr ( skb ) - > window ) ;
/* Activate the retrans timer so that SYNACK can be retransmitted.
* The request socket is not added to the SYN table of the parent
* because it ' s been added to the accept queue directly .
*/
inet_csk_reset_xmit_timer ( child , ICSK_TIME_RETRANS ,
TCP_TIMEOUT_INIT , TCP_RTO_MAX ) ;
/* Add the child socket directly into the accept queue */
inet_csk_reqsk_queue_add ( sk , req , child ) ;
/* Now finish processing the fastopen child socket. */
inet_csk ( child ) - > icsk_af_ops - > rebuild_header ( child ) ;
tcp_init_congestion_control ( child ) ;
tcp_mtup_init ( child ) ;
tcp_init_metrics ( child ) ;
tcp_init_buffer_space ( child ) ;
/* Queue the data carried in the SYN packet. We need to first
* bump skb ' s refcnt because the caller will attempt to free it .
*
* XXX ( TFO ) - we honor a zero - payload TFO request for now .
* ( Any reason not to ? )
*/
if ( TCP_SKB_CB ( skb ) - > end_seq = = TCP_SKB_CB ( skb ) - > seq + 1 ) {
/* Don't queue the skb if there is no payload in SYN.
* XXX ( TFO ) - How about SYN + FIN ?
*/
tp - > rcv_nxt = TCP_SKB_CB ( skb ) - > end_seq ;
} else {
skb = skb_get ( skb ) ;
skb_dst_drop ( skb ) ;
__skb_pull ( skb , tcp_hdr ( skb ) - > doff * 4 ) ;
skb_set_owner_r ( skb , child ) ;
__skb_queue_tail ( & child - > sk_receive_queue , skb ) ;
tp - > rcv_nxt = TCP_SKB_CB ( skb ) - > end_seq ;
tp - > syn_data_acked = 1 ;
}
sk - > sk_data_ready ( sk ) ;
bh_unlock_sock ( child ) ;
sock_put ( child ) ;
WARN_ON ( req - > sk = = NULL ) ;
return 0 ;
}
int tcp_v4_conn_request ( struct sock * sk , struct sk_buff * skb )
{
struct tcp_options_received tmp_opt ;
@ -1599,8 +1418,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
if ( fastopen_cookie_present ( & foc ) & & foc . len ! = 0 )
NET_INC_STATS_BH ( sock_net ( sk ) ,
LINUX_MIB_TCPFASTOPENPASSIVEFAIL ) ;
} else if ( tcp_v4_conn_req_fastopen ( sk , skb , skb_synack , req ) )
goto drop_and_f ree ;
} else if ( tcp_fastopen_create_child ( sk , skb , skb_synack , req ) )
goto drop_and_releas e ;
return 0 ;