@ -1669,6 +1669,47 @@ linearize_chk_done:
return linearize ;
}
/**
* __i40evf_maybe_stop_tx - 2 nd level check for tx stop conditions
* @ tx_ring : the ring to be checked
* @ size : the size buffer we want to assure is available
*
* Returns - EBUSY if a stop is needed , else 0
* */
static inline int __i40evf_maybe_stop_tx ( struct i40e_ring * tx_ring , int size )
{
netif_stop_subqueue ( tx_ring - > netdev , tx_ring - > queue_index ) ;
/* Memory barrier before checking head and tail */
smp_mb ( ) ;
/* Check again in a case another CPU has just made room available. */
if ( likely ( I40E_DESC_UNUSED ( tx_ring ) < size ) )
return - EBUSY ;
/* A reprieve! - use start_queue because it doesn't call schedule */
netif_start_subqueue ( tx_ring - > netdev , tx_ring - > queue_index ) ;
+ + tx_ring - > tx_stats . restart_queue ;
return 0 ;
}
/**
* i40evf_maybe_stop_tx - 1 st level check for tx stop conditions
* @ tx_ring : the ring to be checked
* @ size : the size buffer we want to assure is available
*
* Returns 0 if stop is not needed
* */
# ifdef I40E_FCOE
int i40evf_maybe_stop_tx ( struct i40e_ring * tx_ring , int size )
# else
static int i40evf_maybe_stop_tx ( struct i40e_ring * tx_ring , int size )
# endif
{
if ( likely ( I40E_DESC_UNUSED ( tx_ring ) > = size ) )
return 0 ;
return __i40evf_maybe_stop_tx ( tx_ring , size ) ;
}
/**
* i40e_tx_map - Build the Tx descriptor
* @ tx_ring : ring to send buffer on
@ -1806,8 +1847,12 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring - > next_to_use = i ;
i40evf_maybe_stop_tx ( tx_ring , DESC_NEEDED ) ;
/* notify HW of packet */
writel ( i , tx_ring - > tail ) ;
if ( ! skb - > xmit_more | |
netif_xmit_stopped ( netdev_get_tx_queue ( tx_ring - > netdev ,
tx_ring - > queue_index ) ) )
writel ( i , tx_ring - > tail ) ;
return ;
@ -1828,43 +1873,6 @@ dma_error:
tx_ring - > next_to_use = i ;
}
/**
* __i40e_maybe_stop_tx - 2 nd level check for tx stop conditions
* @ tx_ring : the ring to be checked
* @ size : the size buffer we want to assure is available
*
* Returns - EBUSY if a stop is needed , else 0
* */
static inline int __i40e_maybe_stop_tx ( struct i40e_ring * tx_ring , int size )
{
netif_stop_subqueue ( tx_ring - > netdev , tx_ring - > queue_index ) ;
/* Memory barrier before checking head and tail */
smp_mb ( ) ;
/* Check again in a case another CPU has just made room available. */
if ( likely ( I40E_DESC_UNUSED ( tx_ring ) < size ) )
return - EBUSY ;
/* A reprieve! - use start_queue because it doesn't call schedule */
netif_start_subqueue ( tx_ring - > netdev , tx_ring - > queue_index ) ;
+ + tx_ring - > tx_stats . restart_queue ;
return 0 ;
}
/**
* i40e_maybe_stop_tx - 1 st level check for tx stop conditions
* @ tx_ring : the ring to be checked
* @ size : the size buffer we want to assure is available
*
* Returns 0 if stop is not needed
* */
static int i40e_maybe_stop_tx ( struct i40e_ring * tx_ring , int size )
{
if ( likely ( I40E_DESC_UNUSED ( tx_ring ) > = size ) )
return 0 ;
return __i40e_maybe_stop_tx ( tx_ring , size ) ;
}
/**
* i40e_xmit_descriptor_count - calculate number of tx descriptors needed
* @ skb : send buffer
@ -1890,7 +1898,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
count + = TXD_USE_COUNT ( skb_shinfo ( skb ) - > frags [ f ] . size ) ;
count + = TXD_USE_COUNT ( skb_headlen ( skb ) ) ;
if ( i40e_maybe_stop_tx ( tx_ring , count + 4 + 1 ) ) {
if ( i40evf _maybe_stop_tx ( tx_ring , count + 4 + 1 ) ) {
tx_ring - > tx_stats . tx_busy + + ;
return 0 ;
}
@ -1966,8 +1974,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
i40e_tx_map ( tx_ring , skb , first , tx_flags , hdr_len ,
td_cmd , td_offset ) ;
i40e_maybe_stop_tx ( tx_ring , DESC_NEEDED ) ;
return NETDEV_TX_OK ;
out_drop :