@ -825,7 +825,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
static int tcp_v4_send_synack ( struct sock * sk , struct dst_entry * dst ,
struct request_sock * req ,
struct request_values * rvp ,
u16 queue_mapping )
u16 queue_mapping ,
bool nocache )
{
const struct inet_request_sock * ireq = inet_rsk ( req ) ;
struct flowi4 fl4 ;
@ -833,7 +834,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
struct sk_buff * skb ;
/* First, grab a route. */
if ( ! dst & & ( dst = inet_csk_route_req ( sk , & fl4 , req ) ) = = NULL )
if ( ! dst & & ( dst = inet_csk_route_req ( sk , & fl4 , req , nocache ) ) = = NULL )
return - 1 ;
skb = tcp_make_synack ( sk , dst , req , rvp ) ;
@ -855,7 +856,7 @@ static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
struct request_values * rvp )
{
TCP_INC_STATS_BH ( sock_net ( sk ) , TCP_MIB_RETRANSSEGS ) ;
return tcp_v4_send_synack ( sk , NULL , req , rvp , 0 ) ;
return tcp_v4_send_synack ( sk , NULL , req , rvp , 0 , false ) ;
}
/*
@ -1388,7 +1389,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
*/
if ( tmp_opt . saw_tstamp & &
tcp_death_row . sysctl_tw_recycle & &
( dst = inet_csk_route_req ( sk , & fl4 , req ) ) ! = NULL & &
( dst = inet_csk_route_req ( sk , & fl4 , req , want_cookie ) ) ! = NULL & &
fl4 . daddr = = saddr & &
( peer = rt_get_peer ( ( struct rtable * ) dst , fl4 . daddr ) ) ! = NULL ) {
inet_peer_refcheck ( peer ) ;
@ -1424,7 +1425,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
if ( tcp_v4_send_synack ( sk , dst , req ,
( struct request_values * ) & tmp_ext ,
skb_get_queue_mapping ( skb ) ) | |
skb_get_queue_mapping ( skb ) ,
want_cookie ) | |
want_cookie )
goto drop_and_free ;