@ -197,14 +197,20 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
return avail ;
return avail ;
}
}
static inline u32 stmmac_rx_dirty ( struct stmmac_priv * priv )
/**
* stmmac_rx_dirty - Get RX queue dirty
* @ priv : driver private structure
* @ queue : RX queue index
*/
static inline u32 stmmac_rx_dirty ( struct stmmac_priv * priv , u32 queue )
{
{
struct stmmac_rx_queue * rx_q = & priv - > rx_queue [ queue ] ;
u32 dirty ;
u32 dirty ;
if ( priv - > dirty_rx < = priv - > cur_rx )
if ( rx_q - > dirty_rx < = rx_q - > cur_rx )
dirty = priv - > cur_rx - priv - > dirty_rx ;
dirty = rx_q - > cur_rx - rx_q - > dirty_rx ;
else
else
dirty = DMA_RX_SIZE - priv - > dirty_rx + priv - > cur_rx ;
dirty = DMA_RX_SIZE - rx_q - > dirty_rx + rx_q - > cur_rx ;
return dirty ;
return dirty ;
}
}
@ -891,15 +897,24 @@ static int stmmac_init_phy(struct net_device *dev)
static void stmmac_display_rx_rings ( struct stmmac_priv * priv )
static void stmmac_display_rx_rings ( struct stmmac_priv * priv )
{
{
u32 rx_cnt = priv - > plat - > rx_queues_to_use ;
void * head_rx ;
void * head_rx ;
u32 queue ;
if ( priv - > extend_desc )
/* Display RX rings */
head_rx = ( void * ) priv - > dma_erx ;
for ( queue = 0 ; queue < rx_cnt ; queue + + ) {
else
struct stmmac_rx_queue * rx_q = & priv - > rx_queue [ queue ] ;
head_rx = ( void * ) priv - > dma_rx ;
/* Display RX ring */
pr_info ( " \t RX Queue %u rings \n " , queue ) ;
priv - > hw - > desc - > display_ring ( head_rx , DMA_RX_SIZE , true ) ;
if ( priv - > extend_desc )
head_rx = ( void * ) rx_q - > dma_erx ;
else
head_rx = ( void * ) rx_q - > dma_rx ;
/* Display RX ring */
priv - > hw - > desc - > display_ring ( head_rx , DMA_RX_SIZE , true ) ;
}
}
}
static void stmmac_display_tx_rings ( struct stmmac_priv * priv )
static void stmmac_display_tx_rings ( struct stmmac_priv * priv )
@ -943,21 +958,23 @@ static int stmmac_set_bfsize(int mtu, int bufsize)
/**
/**
* stmmac_clear_rx_descriptors - clear RX descriptors
* stmmac_clear_rx_descriptors - clear RX descriptors
* @ priv : driver private structure
* @ priv : driver private structure
* @ queue : RX queue index
* Description : this function is called to clear the RX descriptors
* Description : this function is called to clear the RX descriptors
* in case of both basic and extended descriptors are used .
* in case of both basic and extended descriptors are used .
*/
*/
static void stmmac_clear_rx_descriptors ( struct stmmac_priv * priv )
static void stmmac_clear_rx_descriptors ( struct stmmac_priv * priv , u32 queue )
{
{
struct stmmac_rx_queue * rx_q = & priv - > rx_queue [ queue ] ;
int i ;
int i ;
/* Clear the RX descriptors */
/* Clear the RX descriptors */
for ( i = 0 ; i < DMA_RX_SIZE ; i + + )
for ( i = 0 ; i < DMA_RX_SIZE ; i + + )
if ( priv - > extend_desc )
if ( priv - > extend_desc )
priv - > hw - > desc - > init_rx_desc ( & priv - > dma_erx [ i ] . basic ,
priv - > hw - > desc - > init_rx_desc ( & rx_q - > dma_erx [ i ] . basic ,
priv - > use_riwt , priv - > mode ,
priv - > use_riwt , priv - > mode ,
( i = = DMA_RX_SIZE - 1 ) ) ;
( i = = DMA_RX_SIZE - 1 ) ) ;
else
else
priv - > hw - > desc - > init_rx_desc ( & priv - > dma_rx [ i ] ,
priv - > hw - > desc - > init_rx_desc ( & rx_q - > dma_rx [ i ] ,
priv - > use_riwt , priv - > mode ,
priv - > use_riwt , priv - > mode ,
( i = = DMA_RX_SIZE - 1 ) ) ;
( i = = DMA_RX_SIZE - 1 ) ) ;
}
}
@ -992,8 +1009,12 @@ static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv)
*/
*/
static void stmmac_clear_descriptors ( struct stmmac_priv * priv )
static void stmmac_clear_descriptors ( struct stmmac_priv * priv )
{
{
u32 rx_queue_cnt = priv - > plat - > rx_queues_to_use ;
u32 queue ;
/* Clear the RX descriptors */
/* Clear the RX descriptors */
stmmac_clear_rx_descriptors ( priv ) ;
for ( queue = 0 ; queue < rx_queue_cnt ; queue + + )
stmmac_clear_rx_descriptors ( priv , queue ) ;
/* Clear the TX descriptors */
/* Clear the TX descriptors */
stmmac_clear_tx_descriptors ( priv ) ;
stmmac_clear_tx_descriptors ( priv ) ;
@ -1004,13 +1025,15 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv)
* @ priv : driver private structure
* @ priv : driver private structure
* @ p : descriptor pointer
* @ p : descriptor pointer
* @ i : descriptor index
* @ i : descriptor index
* @ flags : gfp flag .
* @ flags : gfp flag
* @ queue : RX queue index
* Description : this function is called to allocate a receive buffer , perform
* Description : this function is called to allocate a receive buffer , perform
* the DMA mapping and init the descriptor .
* the DMA mapping and init the descriptor .
*/
*/
static int stmmac_init_rx_buffers ( struct stmmac_priv * priv , struct dma_desc * p ,
static int stmmac_init_rx_buffers ( struct stmmac_priv * priv , struct dma_desc * p ,
int i , gfp_t flags )
int i , gfp_t flags , u32 queue )
{
{
struct stmmac_rx_queue * rx_q = & priv - > rx_queue [ queue ] ;
struct sk_buff * skb ;
struct sk_buff * skb ;
skb = __netdev_alloc_skb_ip_align ( priv - > dev , priv - > dma_buf_sz , flags ) ;
skb = __netdev_alloc_skb_ip_align ( priv - > dev , priv - > dma_buf_sz , flags ) ;
@ -1019,20 +1042,20 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
" %s: Rx init fails; skb is NULL \n " , __func__ ) ;
" %s: Rx init fails; skb is NULL \n " , __func__ ) ;
return - ENOMEM ;
return - ENOMEM ;
}
}
priv - > rx_skbuff [ i ] = skb ;
rx_q - > rx_skbuff [ i ] = skb ;
priv - > rx_skbuff_dma [ i ] = dma_map_single ( priv - > device , skb - > data ,
rx_q - > rx_skbuff_dma [ i ] = dma_map_single ( priv - > device , skb - > data ,
priv - > dma_buf_sz ,
priv - > dma_buf_sz ,
DMA_FROM_DEVICE ) ;
DMA_FROM_DEVICE ) ;
if ( dma_mapping_error ( priv - > device , priv - > rx_skbuff_dma [ i ] ) ) {
if ( dma_mapping_error ( priv - > device , rx_q - > rx_skbuff_dma [ i ] ) ) {
netdev_err ( priv - > dev , " %s: DMA mapping error \n " , __func__ ) ;
netdev_err ( priv - > dev , " %s: DMA mapping error \n " , __func__ ) ;
dev_kfree_skb_any ( skb ) ;
dev_kfree_skb_any ( skb ) ;
return - EINVAL ;
return - EINVAL ;
}
}
if ( priv - > synopsys_id > = DWMAC_CORE_4_00 )
if ( priv - > synopsys_id > = DWMAC_CORE_4_00 )
p - > des0 = cpu_to_le32 ( priv - > rx_skbuff_dma [ i ] ) ;
p - > des0 = cpu_to_le32 ( rx_q - > rx_skbuff_dma [ i ] ) ;
else
else
p - > des2 = cpu_to_le32 ( priv - > rx_skbuff_dma [ i ] ) ;
p - > des2 = cpu_to_le32 ( rx_q - > rx_skbuff_dma [ i ] ) ;
if ( ( priv - > hw - > mode - > init_desc3 ) & &
if ( ( priv - > hw - > mode - > init_desc3 ) & &
( priv - > dma_buf_sz = = BUF_SIZE_16KiB ) )
( priv - > dma_buf_sz = = BUF_SIZE_16KiB ) )
@ -1044,16 +1067,19 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
/**
/**
* stmmac_free_rx_buffer - free RX dma buffers
* stmmac_free_rx_buffer - free RX dma buffers
* @ priv : private structure
* @ priv : private structure
* @ queue : RX queue index
* @ i : buffer index .
* @ i : buffer index .
*/
*/
static void stmmac_free_rx_buffer ( struct stmmac_priv * priv , int i )
static void stmmac_free_rx_buffer ( struct stmmac_priv * priv , u32 queue , int i )
{
{
if ( priv - > rx_skbuff [ i ] ) {
struct stmmac_rx_queue * rx_q = & priv - > rx_queue [ queue ] ;
dma_unmap_single ( priv - > device , priv - > rx_skbuff_dma [ i ] ,
if ( rx_q - > rx_skbuff [ i ] ) {
dma_unmap_single ( priv - > device , rx_q - > rx_skbuff_dma [ i ] ,
priv - > dma_buf_sz , DMA_FROM_DEVICE ) ;
priv - > dma_buf_sz , DMA_FROM_DEVICE ) ;
dev_kfree_skb_any ( priv - > rx_skbuff [ i ] ) ;
dev_kfree_skb_any ( rx_q - > rx_skbuff [ i ] ) ;
}
}
priv - > rx_skbuff [ i ] = NULL ;
rx_q - > rx_skbuff [ i ] = NULL ;
}
}
/**
/**
@ -1094,10 +1120,12 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, int i)
*/
*/
static int init_dma_rx_desc_rings ( struct net_device * dev , gfp_t flags )
static int init_dma_rx_desc_rings ( struct net_device * dev , gfp_t flags )
{
{
int i ;
struct stmmac_priv * priv = netdev_priv ( dev ) ;
struct stmmac_priv * priv = netdev_priv ( dev ) ;
u32 rx_count = priv - > plat - > rx_queues_to_use ;
unsigned int bfsize = 0 ;
unsigned int bfsize = 0 ;
int ret = - ENOMEM ;
int ret = - ENOMEM ;
u32 queue ;
int i ;
if ( priv - > hw - > mode - > set_16kib_bfsize )
if ( priv - > hw - > mode - > set_16kib_bfsize )
bfsize = priv - > hw - > mode - > set_16kib_bfsize ( dev - > mtu ) ;
bfsize = priv - > hw - > mode - > set_16kib_bfsize ( dev - > mtu ) ;
@ -1107,45 +1135,69 @@ static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
priv - > dma_buf_sz = bfsize ;
priv - > dma_buf_sz = bfsize ;
netif_dbg ( priv , probe , priv - > dev ,
/* RX INITIALIZATION */
" (%s) dma_rx_phy=0x%08x \n " , __func__ , ( u32 ) priv - > dma_rx_phy ) ;
netif_dbg ( priv , probe , priv - > dev ,
netif_dbg ( priv , probe , priv - > dev ,
" SKB addresses: \n skb \t \t skb data \t dma data \n " ) ;
" SKB addresses: \n skb \t \t skb data \t dma data \n " ) ;
for ( i = 0 ; i < DMA_RX_SIZE ; i + + ) {
for ( queue = 0 ; queue < rx_count ; queue + + ) {
struct dma_desc * p ;
struct stmmac_rx_queue * rx_q = & priv - > rx_queue [ queue ] ;
if ( priv - > extend_desc )
p = & ( ( priv - > dma_erx + i ) - > basic ) ;
else
p = priv - > dma_rx + i ;
ret = stmmac_init_rx_buffers ( priv , p , i , flags ) ;
netif_dbg ( priv , probe , priv - > dev ,
if ( ret )
" (%s) dma_rx_phy=0x%08x \n " , __func__ ,
goto err_init_rx_buffers ;
( u32 ) rx_q - > dma_rx_phy ) ;
netif_dbg ( priv , probe , priv - > dev , " [%p] \t [%p] \t [%x] \n " ,
for ( i = 0 ; i < DMA_RX_SIZE ; i + + ) {
priv - > rx_skbuff [ i ] , priv - > rx_skbuff [ i ] - > data ,
struct dma_desc * p ;
( unsigned int ) priv - > rx_skbuff_dma [ i ] ) ;
}
priv - > cur_rx = 0 ;
priv - > dirty_rx = ( unsigned int ) ( i - DMA_RX_SIZE ) ;
buf_sz = bfsize ;
/* Setup the chained descriptor addresses */
if ( priv - > extend_desc )
if ( priv - > mode = = STMMAC_CHAIN_MODE ) {
p = & ( ( rx_q - > dma_erx + i ) - > basic ) ;
if ( priv - > extend_desc )
else
priv - > hw - > mode - > init ( priv - > dma_erx , priv - > dma_rx_phy ,
p = rx_q - > dma_rx + i ;
DMA_RX_SIZE , 1 ) ;
else
ret = stmmac_init_rx_buffers ( priv , p , i , flags ,
priv - > hw - > mode - > init ( priv - > dma_rx , priv - > dma_rx_phy ,
queue ) ;
DMA_RX_SIZE , 0 ) ;
if ( ret )
goto err_init_rx_buffers ;
netif_dbg ( priv , probe , priv - > dev , " [%p] \t [%p] \t [%x] \n " ,
rx_q - > rx_skbuff [ i ] , rx_q - > rx_skbuff [ i ] - > data ,
( unsigned int ) rx_q - > rx_skbuff_dma [ i ] ) ;
}
rx_q - > cur_rx = 0 ;
rx_q - > dirty_rx = ( unsigned int ) ( i - DMA_RX_SIZE ) ;
stmmac_clear_rx_descriptors ( priv , queue ) ;
/* Setup the chained descriptor addresses */
if ( priv - > mode = = STMMAC_CHAIN_MODE ) {
if ( priv - > extend_desc )
priv - > hw - > mode - > init ( rx_q - > dma_erx ,
rx_q - > dma_rx_phy ,
DMA_RX_SIZE , 1 ) ;
else
priv - > hw - > mode - > init ( rx_q - > dma_rx ,
rx_q - > dma_rx_phy ,
DMA_RX_SIZE , 0 ) ;
}
}
}
buf_sz = bfsize ;
return 0 ;
return 0 ;
err_init_rx_buffers :
err_init_rx_buffers :
while ( - - i > = 0 )
while ( queue > = 0 ) {
stmmac_free_rx_buffer ( priv , i ) ;
while ( - - i > = 0 )
stmmac_free_rx_buffer ( priv , queue , i ) ;
if ( queue = = 0 )
break ;
i = DMA_RX_SIZE ;
queue - - ;
}
return ret ;
return ret ;
}
}
@ -1234,13 +1286,14 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
/**
/**
* dma_free_rx_skbufs - free RX dma buffers
* dma_free_rx_skbufs - free RX dma buffers
* @ priv : private structure
* @ priv : private structure
* @ queue : RX queue index
*/
*/
static void dma_free_rx_skbufs ( struct stmmac_priv * priv )
static void dma_free_rx_skbufs ( struct stmmac_priv * priv , u32 queue )
{
{
int i ;
int i ;
for ( i = 0 ; i < DMA_RX_SIZE ; i + + )
for ( i = 0 ; i < DMA_RX_SIZE ; i + + )
stmmac_free_rx_buffer ( priv , i ) ;
stmmac_free_rx_buffer ( priv , queue , i ) ;
}
}
/**
/**
@ -1255,6 +1308,37 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
stmmac_free_tx_buffer ( priv , i ) ;
stmmac_free_tx_buffer ( priv , i ) ;
}
}
/**
* free_dma_rx_desc_resources - free RX dma desc resources
* @ priv : private structure
*/
static void free_dma_rx_desc_resources ( struct stmmac_priv * priv )
{
u32 rx_count = priv - > plat - > rx_queues_to_use ;
u32 queue ;
/* Free RX queue resources */
for ( queue = 0 ; queue < rx_count ; queue + + ) {
struct stmmac_rx_queue * rx_q = & priv - > rx_queue [ queue ] ;
/* Release the DMA RX socket buffers */
dma_free_rx_skbufs ( priv , queue ) ;
/* Free DMA regions of consistent memory previously allocated */
if ( ! priv - > extend_desc )
dma_free_coherent ( priv - > device ,
DMA_RX_SIZE * sizeof ( struct dma_desc ) ,
rx_q - > dma_rx , rx_q - > dma_rx_phy ) ;
else
dma_free_coherent ( priv - > device , DMA_RX_SIZE *
sizeof ( struct dma_extended_desc ) ,
rx_q - > dma_erx , rx_q - > dma_rx_phy ) ;
kfree ( rx_q - > rx_skbuff_dma ) ;
kfree ( rx_q - > rx_skbuff ) ;
}
}
/**
/**
* alloc_dma_rx_desc_resources - alloc RX resources .
* alloc_dma_rx_desc_resources - alloc RX resources .
* @ priv : private structure
* @ priv : private structure
@ -1265,42 +1349,56 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
*/
*/
static int alloc_dma_rx_desc_resources ( struct stmmac_priv * priv )
static int alloc_dma_rx_desc_resources ( struct stmmac_priv * priv )
{
{
u32 rx_count = priv - > plat - > rx_queues_to_use ;
int ret = - ENOMEM ;
int ret = - ENOMEM ;
u32 queue ;
priv - > rx_skbuff_dma = kmalloc_array ( DMA_RX_SIZE , sizeof ( dma_addr_t ) ,
/* RX queues buffers and DMA */
GFP_KERNEL ) ;
for ( queue = 0 ; queue < rx_count ; queue + + ) {
if ( ! priv - > rx_skbuff_dma )
struct stmmac_rx_queue * rx_q = & priv - > rx_queue [ queue ] ;
return - ENOMEM ;
priv - > rx_skbuff = kmalloc_array ( DMA_RX_SIZE , sizeof ( struct sk_buff * ) ,
rx_q - > queue_index = queue ;
GFP_KERNEL ) ;
rx_q - > priv_data = priv ;
if ( ! priv - > rx_skbuff )
goto err_rx_skbuff ;
if ( priv - > extend_desc ) {
rx_q - > rx_skbuff_dma = kmalloc_array ( DMA_RX_SIZE ,
priv - > dma_erx = dma_zalloc_coherent ( priv - > device , DMA_RX_SIZE *
sizeof ( dma_addr_t ) ,
sizeof ( struct
dma_extended_desc ) ,
& priv - > dma_rx_phy ,
GFP_KERNEL ) ;
GFP_KERNEL ) ;
if ( ! priv - > dma_erx )
if ( ! rx_q - > rx_skbuff_dma )
goto err_dma ;
return - ENOMEM ;
} else {
rx_q - > rx_skbuff = kmalloc_array ( DMA_RX_SIZE ,
priv - > dma_rx = dma_zalloc_coherent ( priv - > device , DMA_RX_SIZE *
sizeof ( struct sk_buff * ) ,
sizeof ( struct dma_desc ) ,
GFP_KERNEL ) ;
& priv - > dma_rx_phy ,
if ( ! rx_q - > rx_skbuff )
GFP_KERNEL ) ;
if ( ! priv - > dma_rx )
goto err_dma ;
goto err_dma ;
if ( priv - > extend_desc ) {
rx_q - > dma_erx = dma_zalloc_coherent ( priv - > device ,
DMA_RX_SIZE *
sizeof ( struct
dma_extended_desc ) ,
& rx_q - > dma_rx_phy ,
GFP_KERNEL ) ;
if ( ! rx_q - > dma_erx )
goto err_dma ;
} else {
rx_q - > dma_rx = dma_zalloc_coherent ( priv - > device ,
DMA_RX_SIZE *
sizeof ( struct
dma_desc ) ,
& rx_q - > dma_rx_phy ,
GFP_KERNEL ) ;
if ( ! rx_q - > dma_rx )
goto err_dma ;
}
}
}
return 0 ;
return 0 ;
err_dma :
err_dma :
kfree ( priv - > rx_skbuff ) ;
free_dma_rx_desc_resources ( priv ) ;
err_rx_skbuff :
kfree ( priv - > rx_skbuff_dma ) ;
return ret ;
return ret ;
}
}
@ -1333,23 +1431,15 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
dma_extended_desc ) ,
dma_extended_desc ) ,
& priv - > dma_tx_phy ,
& priv - > dma_tx_phy ,
GFP_KERNEL ) ;
GFP_KERNEL ) ;
if ( ! priv - > dma_etx ) {
if ( ! priv - > dma_etx )
dma_free_coherent ( priv - > device , DMA_RX_SIZE *
sizeof ( struct dma_extended_desc ) ,
priv - > dma_erx , priv - > dma_rx_phy ) ;
goto err_dma ;
goto err_dma ;
}
} else {
} else {
priv - > dma_tx = dma_zalloc_coherent ( priv - > device , DMA_TX_SIZE *
priv - > dma_tx = dma_zalloc_coherent ( priv - > device , DMA_TX_SIZE *
sizeof ( struct dma_desc ) ,
sizeof ( struct dma_desc ) ,
& priv - > dma_tx_phy ,
& priv - > dma_tx_phy ,
GFP_KERNEL ) ;
GFP_KERNEL ) ;
if ( ! priv - > dma_tx ) {
if ( ! priv - > dma_tx )
dma_free_coherent ( priv - > device , DMA_RX_SIZE *
sizeof ( struct dma_desc ) ,
priv - > dma_rx , priv - > dma_rx_phy ) ;
goto err_dma ;
goto err_dma ;
}
}
}
return 0 ;
return 0 ;
@ -1371,6 +1461,7 @@ err_tx_skbuff:
*/
*/
static int alloc_dma_desc_resources ( struct stmmac_priv * priv )
static int alloc_dma_desc_resources ( struct stmmac_priv * priv )
{
{
/* RX Allocation */
int ret = alloc_dma_rx_desc_resources ( priv ) ;
int ret = alloc_dma_rx_desc_resources ( priv ) ;
if ( ret )
if ( ret )
@ -1381,29 +1472,6 @@ static int alloc_dma_desc_resources(struct stmmac_priv *priv)
return ret ;
return ret ;
}
}
/**
* free_dma_rx_desc_resources - free RX dma desc resources
* @ priv : private structure
*/
static void free_dma_rx_desc_resources ( struct stmmac_priv * priv )
{
/* Release the DMA RX socket buffers */
dma_free_rx_skbufs ( priv ) ;
/* Free DMA regions of consistent memory previously allocated */
if ( ! priv - > extend_desc )
dma_free_coherent ( priv - > device ,
DMA_RX_SIZE * sizeof ( struct dma_desc ) ,
priv - > dma_rx , priv - > dma_rx_phy ) ;
else
dma_free_coherent ( priv - > device , DMA_RX_SIZE *
sizeof ( struct dma_extended_desc ) ,
priv - > dma_erx , priv - > dma_rx_phy ) ;
kfree ( priv - > rx_skbuff_dma ) ;
kfree ( priv - > rx_skbuff ) ;
}
/**
/**
* free_dma_tx_desc_resources - free TX dma desc resources
* free_dma_tx_desc_resources - free TX dma desc resources
* @ priv : private structure
* @ priv : private structure
@ -1914,6 +1982,7 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
{
{
u32 rx_channels_count = priv - > plat - > rx_queues_to_use ;
u32 rx_channels_count = priv - > plat - > rx_queues_to_use ;
u32 tx_channels_count = priv - > plat - > tx_queues_to_use ;
u32 tx_channels_count = priv - > plat - > tx_queues_to_use ;
struct stmmac_rx_queue * rx_q ;
u32 dummy_dma_rx_phy = 0 ;
u32 dummy_dma_rx_phy = 0 ;
u32 dummy_dma_tx_phy = 0 ;
u32 dummy_dma_tx_phy = 0 ;
u32 chan = 0 ;
u32 chan = 0 ;
@ -1941,14 +2010,16 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
/* DMA RX Channel Configuration */
/* DMA RX Channel Configuration */
for ( chan = 0 ; chan < rx_channels_count ; chan + + ) {
for ( chan = 0 ; chan < rx_channels_count ; chan + + ) {
rx_q = & priv - > rx_queue [ chan ] ;
priv - > hw - > dma - > init_rx_chan ( priv - > ioaddr ,
priv - > hw - > dma - > init_rx_chan ( priv - > ioaddr ,
priv - > plat - > dma_cfg ,
priv - > plat - > dma_cfg ,
priv - > dma_rx_phy , chan ) ;
rx_q - > dma_rx_phy , chan ) ;
priv - > rx_tail_addr = priv - > dma_rx_phy +
rx_q - > rx_tail_addr = rx_q - > dma_rx_phy +
( DMA_RX_SIZE * sizeof ( struct dma_desc ) ) ;
( DMA_RX_SIZE * sizeof ( struct dma_desc ) ) ;
priv - > hw - > dma - > set_rx_tail_ptr ( priv - > ioaddr ,
priv - > hw - > dma - > set_rx_tail_ptr ( priv - > ioaddr ,
priv - > rx_tail_addr ,
rx_q - > rx_tail_addr ,
chan ) ;
chan ) ;
}
}
@ -1969,8 +2040,9 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
chan ) ;
chan ) ;
}
}
} else {
} else {
rx_q = & priv - > rx_queue [ chan ] ;
priv - > hw - > dma - > init ( priv - > ioaddr , priv - > plat - > dma_cfg ,
priv - > hw - > dma - > init ( priv - > ioaddr , priv - > plat - > dma_cfg ,
priv - > dma_tx_phy , priv - > dma_rx_phy , atds ) ;
priv - > dma_tx_phy , rx_q - > dma_rx_phy , atds ) ;
}
}
if ( priv - > plat - > axi & & priv - > hw - > dma - > axi )
if ( priv - > plat - > axi & & priv - > hw - > dma - > axi )
@ -2942,9 +3014,9 @@ static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
}
}
static inline int stmmac_rx_threshold_count ( struct stmmac_priv * priv )
static inline int stmmac_rx_threshold_count ( struct stmmac_rx_queue * rx_q )
{
{
if ( priv - > rx_zeroc_thresh < STMMAC_RX_THRESH )
if ( rx_q - > rx_zeroc_thresh < STMMAC_RX_THRESH )
return 0 ;
return 0 ;
return 1 ;
return 1 ;
@ -2953,30 +3025,33 @@ static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv)
/**
/**
* stmmac_rx_refill - refill used skb preallocated buffers
* stmmac_rx_refill - refill used skb preallocated buffers
* @ priv : driver private structure
* @ priv : driver private structure
* @ queue : RX queue index
* Description : this is to reallocate the skb for the reception process
* Description : this is to reallocate the skb for the reception process
* that is based on zero - copy .
* that is based on zero - copy .
*/
*/
static inline void stmmac_rx_refill ( struct stmmac_priv * priv )
static inline void stmmac_rx_refill ( struct stmmac_priv * priv , u32 queue )
{
{
struct stmmac_rx_queue * rx_q = & priv - > rx_queue [ queue ] ;
int dirty = stmmac_rx_dirty ( priv , queue ) ;
unsigned int entry = rx_q - > dirty_rx ;
int bfsize = priv - > dma_buf_sz ;
int bfsize = priv - > dma_buf_sz ;
unsigned int entry = priv - > dirty_rx ;
int dirty = stmmac_rx_dirty ( priv ) ;
while ( dirty - - > 0 ) {
while ( dirty - - > 0 ) {
struct dma_desc * p ;
struct dma_desc * p ;
if ( priv - > extend_desc )
if ( priv - > extend_desc )
p = ( struct dma_desc * ) ( priv - > dma_erx + entry ) ;
p = ( struct dma_desc * ) ( rx_q - > dma_erx + entry ) ;
else
else
p = priv - > dma_rx + entry ;
p = rx_q - > dma_rx + entry ;
if ( likely ( priv - > rx_skbuff [ entry ] = = NULL ) ) {
if ( likely ( ! rx_q - > rx_skbuff [ entry ] ) ) {
struct sk_buff * skb ;
struct sk_buff * skb ;
skb = netdev_alloc_skb_ip_align ( priv - > dev , bfsize ) ;
skb = netdev_alloc_skb_ip_align ( priv - > dev , bfsize ) ;
if ( unlikely ( ! skb ) ) {
if ( unlikely ( ! skb ) ) {
/* so for a while no zero-copy! */
/* so for a while no zero-copy! */
priv - > rx_zeroc_thresh = STMMAC_RX_THRESH ;
rx_q - > rx_zeroc_thresh = STMMAC_RX_THRESH ;
if ( unlikely ( net_ratelimit ( ) ) )
if ( unlikely ( net_ratelimit ( ) ) )
dev_err ( priv - > device ,
dev_err ( priv - > device ,
" fail to alloc skb entry %d \n " ,
" fail to alloc skb entry %d \n " ,
@ -2984,28 +3059,28 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
break ;
break ;
}
}
priv - > rx_skbuff [ entry ] = skb ;
rx_q - > rx_skbuff [ entry ] = skb ;
priv - > rx_skbuff_dma [ entry ] =
rx_q - > rx_skbuff_dma [ entry ] =
dma_map_single ( priv - > device , skb - > data , bfsize ,
dma_map_single ( priv - > device , skb - > data , bfsize ,
DMA_FROM_DEVICE ) ;
DMA_FROM_DEVICE ) ;
if ( dma_mapping_error ( priv - > device ,
if ( dma_mapping_error ( priv - > device ,
priv - > rx_skbuff_dma [ entry ] ) ) {
rx_q - > rx_skbuff_dma [ entry ] ) ) {
netdev_err ( priv - > dev , " Rx DMA map failed \n " ) ;
netdev_err ( priv - > dev , " Rx DMA map failed \n " ) ;
dev_kfree_skb ( skb ) ;
dev_kfree_skb ( skb ) ;
break ;
break ;
}
}
if ( unlikely ( priv - > synopsys_id > = DWMAC_CORE_4_00 ) ) {
if ( unlikely ( priv - > synopsys_id > = DWMAC_CORE_4_00 ) ) {
p - > des0 = cpu_to_le32 ( priv - > rx_skbuff_dma [ entry ] ) ;
p - > des0 = cpu_to_le32 ( rx_q - > rx_skbuff_dma [ entry ] ) ;
p - > des1 = 0 ;
p - > des1 = 0 ;
} else {
} else {
p - > des2 = cpu_to_le32 ( priv - > rx_skbuff_dma [ entry ] ) ;
p - > des2 = cpu_to_le32 ( rx_q - > rx_skbuff_dma [ entry ] ) ;
}
}
if ( priv - > hw - > mode - > refill_desc3 )
if ( priv - > hw - > mode - > refill_desc3 )
priv - > hw - > mode - > refill_desc3 ( priv , p ) ;
priv - > hw - > mode - > refill_desc3 ( rx_q , p ) ;
if ( priv - > rx_zeroc_thresh > 0 )
if ( rx_q - > rx_zeroc_thresh > 0 )
priv - > rx_zeroc_thresh - - ;
rx_q - > rx_zeroc_thresh - - ;
netif_dbg ( priv , rx_status , priv - > dev ,
netif_dbg ( priv , rx_status , priv - > dev ,
" refill entry #%d \n " , entry ) ;
" refill entry #%d \n " , entry ) ;
@ -3021,31 +3096,33 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
entry = STMMAC_GET_ENTRY ( entry , DMA_RX_SIZE ) ;
entry = STMMAC_GET_ENTRY ( entry , DMA_RX_SIZE ) ;
}
}
priv - > dirty_rx = entry ;
rx_q - > dirty_rx = entry ;
}
}
/**
/**
* stmmac_rx - manage the receive process
* stmmac_rx - manage the receive process
* @ priv : driver private structure
* @ priv : driver private structure
* @ limit : napi bugget .
* @ limit : napi bugget
* @ queue : RX queue index .
* Description : this the function called by the napi poll method .
* Description : this the function called by the napi poll method .
* It gets all the frames inside the ring .
* It gets all the frames inside the ring .
*/
*/
static int stmmac_rx ( struct stmmac_priv * priv , int limit )
static int stmmac_rx ( struct stmmac_priv * priv , int limit , u32 queue )
{
{
unsigned int entry = priv - > cur_rx ;
struct stmmac_rx_queue * rx_q = & priv - > rx_queue [ queue ] ;
unsigned int entry = rx_q - > cur_rx ;
int coe = priv - > hw - > rx_csum ;
unsigned int next_entry ;
unsigned int next_entry ;
unsigned int count = 0 ;
unsigned int count = 0 ;
int coe = priv - > hw - > rx_csum ;
if ( netif_msg_rx_status ( priv ) ) {
if ( netif_msg_rx_status ( priv ) ) {
void * rx_head ;
void * rx_head ;
netdev_dbg ( priv - > dev , " %s: descriptor ring: \n " , __func__ ) ;
netdev_dbg ( priv - > dev , " %s: descriptor ring: \n " , __func__ ) ;
if ( priv - > extend_desc )
if ( priv - > extend_desc )
rx_head = ( void * ) priv - > dma_erx ;
rx_head = ( void * ) rx_q - > dma_erx ;
else
else
rx_head = ( void * ) priv - > dma_rx ;
rx_head = ( void * ) rx_q - > dma_rx ;
priv - > hw - > desc - > display_ring ( rx_head , DMA_RX_SIZE , true ) ;
priv - > hw - > desc - > display_ring ( rx_head , DMA_RX_SIZE , true ) ;
}
}
@ -3055,9 +3132,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
struct dma_desc * np ;
struct dma_desc * np ;
if ( priv - > extend_desc )
if ( priv - > extend_desc )
p = ( struct dma_desc * ) ( priv - > dma_erx + entry ) ;
p = ( struct dma_desc * ) ( rx_q - > dma_erx + entry ) ;
else
else
p = priv - > dma_rx + entry ;
p = rx_q - > dma_rx + entry ;
/* read the status of the incoming frame */
/* read the status of the incoming frame */
status = priv - > hw - > desc - > rx_status ( & priv - > dev - > stats ,
status = priv - > hw - > desc - > rx_status ( & priv - > dev - > stats ,
@ -3068,20 +3145,20 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
count + + ;
count + + ;
priv - > cur_rx = STMMAC_GET_ENTRY ( priv - > cur_rx , DMA_RX_SIZE ) ;
rx_q - > cur_rx = STMMAC_GET_ENTRY ( rx_q - > cur_rx , DMA_RX_SIZE ) ;
next_entry = priv - > cur_rx ;
next_entry = rx_q - > cur_rx ;
if ( priv - > extend_desc )
if ( priv - > extend_desc )
np = ( struct dma_desc * ) ( priv - > dma_erx + next_entry ) ;
np = ( struct dma_desc * ) ( rx_q - > dma_erx + next_entry ) ;
else
else
np = priv - > dma_rx + next_entry ;
np = rx_q - > dma_rx + next_entry ;
prefetch ( np ) ;
prefetch ( np ) ;
if ( ( priv - > extend_desc ) & & ( priv - > hw - > desc - > rx_extended_status ) )
if ( ( priv - > extend_desc ) & & ( priv - > hw - > desc - > rx_extended_status ) )
priv - > hw - > desc - > rx_extended_status ( & priv - > dev - > stats ,
priv - > hw - > desc - > rx_extended_status ( & priv - > dev - > stats ,
& priv - > xstats ,
& priv - > xstats ,
priv - > dma_erx +
rx_q - > dma_erx +
entry ) ;
entry ) ;
if ( unlikely ( status = = discard_frame ) ) {
if ( unlikely ( status = = discard_frame ) ) {
priv - > dev - > stats . rx_errors + + ;
priv - > dev - > stats . rx_errors + + ;
@ -3091,9 +3168,9 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
* them in stmmac_rx_refill ( ) function so that
* them in stmmac_rx_refill ( ) function so that
* device can reuse it .
* device can reuse it .
*/
*/
priv - > rx_skbuff [ entry ] = NULL ;
rx_q - > rx_skbuff [ entry ] = NULL ;
dma_unmap_single ( priv - > device ,
dma_unmap_single ( priv - > device ,
priv - > rx_skbuff_dma [ entry ] ,
rx_q - > rx_skbuff_dma [ entry ] ,
priv - > dma_buf_sz ,
priv - > dma_buf_sz ,
DMA_FROM_DEVICE ) ;
DMA_FROM_DEVICE ) ;
}
}
@ -3141,7 +3218,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
*/
*/
if ( unlikely ( ! priv - > plat - > has_gmac4 & &
if ( unlikely ( ! priv - > plat - > has_gmac4 & &
( ( frame_len < priv - > rx_copybreak ) | |
( ( frame_len < priv - > rx_copybreak ) | |
stmmac_rx_threshold_count ( priv ) ) ) ) {
stmmac_rx_threshold_count ( rx_q ) ) ) ) {
skb = netdev_alloc_skb_ip_align ( priv - > dev ,
skb = netdev_alloc_skb_ip_align ( priv - > dev ,
frame_len ) ;
frame_len ) ;
if ( unlikely ( ! skb ) ) {
if ( unlikely ( ! skb ) ) {
@ -3153,21 +3230,21 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
}
}
dma_sync_single_for_cpu ( priv - > device ,
dma_sync_single_for_cpu ( priv - > device ,
priv - > rx_skbuff_dma
rx_q - > rx_skbuff_dma
[ entry ] , frame_len ,
[ entry ] , frame_len ,
DMA_FROM_DEVICE ) ;
DMA_FROM_DEVICE ) ;
skb_copy_to_linear_data ( skb ,
skb_copy_to_linear_data ( skb ,
priv - >
rx_q - >
rx_skbuff [ entry ] - > data ,
rx_skbuff [ entry ] - > data ,
frame_len ) ;
frame_len ) ;
skb_put ( skb , frame_len ) ;
skb_put ( skb , frame_len ) ;
dma_sync_single_for_device ( priv - > device ,
dma_sync_single_for_device ( priv - > device ,
priv - > rx_skbuff_dma
rx_q - > rx_skbuff_dma
[ entry ] , frame_len ,
[ entry ] , frame_len ,
DMA_FROM_DEVICE ) ;
DMA_FROM_DEVICE ) ;
} else {
} else {
skb = priv - > rx_skbuff [ entry ] ;
skb = rx_q - > rx_skbuff [ entry ] ;
if ( unlikely ( ! skb ) ) {
if ( unlikely ( ! skb ) ) {
netdev_err ( priv - > dev ,
netdev_err ( priv - > dev ,
" %s: Inconsistent Rx chain \n " ,
" %s: Inconsistent Rx chain \n " ,
@ -3176,12 +3253,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
break ;
break ;
}
}
prefetch ( skb - > data - NET_IP_ALIGN ) ;
prefetch ( skb - > data - NET_IP_ALIGN ) ;
priv - > rx_skbuff [ entry ] = NULL ;
rx_q - > rx_skbuff [ entry ] = NULL ;
priv - > rx_zeroc_thresh + + ;
rx_q - > rx_zeroc_thresh + + ;
skb_put ( skb , frame_len ) ;
skb_put ( skb , frame_len ) ;
dma_unmap_single ( priv - > device ,
dma_unmap_single ( priv - > device ,
priv - > rx_skbuff_dma [ entry ] ,
rx_q - > rx_skbuff_dma [ entry ] ,
priv - > dma_buf_sz ,
priv - > dma_buf_sz ,
DMA_FROM_DEVICE ) ;
DMA_FROM_DEVICE ) ;
}
}
@ -3211,7 +3288,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
entry = next_entry ;
entry = next_entry ;
}
}
stmmac_rx_refill ( priv ) ;
stmmac_rx_refill ( priv , queue ) ;
priv - > xstats . rx_pkt_n + = count ;
priv - > xstats . rx_pkt_n + = count ;
@ -3229,13 +3306,14 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
static int stmmac_poll ( struct napi_struct * napi , int budget )
static int stmmac_poll ( struct napi_struct * napi , int budget )
{
{
struct stmmac_priv * priv = container_of ( napi , struct stmmac_priv , napi ) ;
struct stmmac_priv * priv = container_of ( napi , struct stmmac_priv , napi ) ;
int work_done = 0 ;
u32 chan = STMMAC_CHAN0 ;
u32 chan = STMMAC_CHAN0 ;
int work_done = 0 ;
u32 queue = chan ;
priv - > xstats . napi_poll + + ;
priv - > xstats . napi_poll + + ;
stmmac_tx_clean ( priv ) ;
stmmac_tx_clean ( priv ) ;
work_done = stmmac_rx ( priv , budget ) ;
work_done = stmmac_rx ( priv , budget , queue ) ;
if ( work_done < budget ) {
if ( work_done < budget ) {
napi_complete_done ( napi , work_done ) ;
napi_complete_done ( napi , work_done ) ;
stmmac_enable_dma_irq ( priv , chan ) ;
stmmac_enable_dma_irq ( priv , chan ) ;
@ -3396,6 +3474,9 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
if ( priv - > synopsys_id > = DWMAC_CORE_4_00 ) {
if ( priv - > synopsys_id > = DWMAC_CORE_4_00 ) {
for ( queue = 0 ; queue < queues_count ; queue + + ) {
for ( queue = 0 ; queue < queues_count ; queue + + ) {
struct stmmac_rx_queue * rx_q =
& priv - > rx_queue [ queue ] ;
status | =
status | =
priv - > hw - > mac - > host_mtl_irq_status ( priv - > hw ,
priv - > hw - > mac - > host_mtl_irq_status ( priv - > hw ,
queue ) ;
queue ) ;
@ -3403,7 +3484,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
if ( status & CORE_IRQ_MTL_RX_OVERFLOW & &
if ( status & CORE_IRQ_MTL_RX_OVERFLOW & &
priv - > hw - > dma - > set_rx_tail_ptr )
priv - > hw - > dma - > set_rx_tail_ptr )
priv - > hw - > dma - > set_rx_tail_ptr ( priv - > ioaddr ,
priv - > hw - > dma - > set_rx_tail_ptr ( priv - > ioaddr ,
priv - > rx_tail_addr ,
rx_q - > rx_tail_addr ,
queue ) ;
queue ) ;
}
}
}
}
@ -3503,15 +3584,29 @@ static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
{
{
struct net_device * dev = seq - > private ;
struct net_device * dev = seq - > private ;
struct stmmac_priv * priv = netdev_priv ( dev ) ;
struct stmmac_priv * priv = netdev_priv ( dev ) ;
u32 rx_count = priv - > plat - > rx_queues_to_use ;
u32 queue ;
for ( queue = 0 ; queue < rx_count ; queue + + ) {
struct stmmac_rx_queue * rx_q = & priv - > rx_queue [ queue ] ;
seq_printf ( seq , " RX Queue %d: \n " , queue ) ;
if ( priv - > extend_desc ) {
seq_printf ( seq , " Extended descriptor ring: \n " ) ;
sysfs_display_ring ( ( void * ) rx_q - > dma_erx ,
DMA_RX_SIZE , 1 , seq ) ;
} else {
seq_printf ( seq , " Descriptor ring: \n " ) ;
sysfs_display_ring ( ( void * ) rx_q - > dma_rx ,
DMA_RX_SIZE , 0 , seq ) ;
}
}
if ( priv - > extend_desc ) {
if ( priv - > extend_desc ) {
seq_printf ( seq , " Extended RX descriptor ring: \n " ) ;
sysfs_display_ring ( ( void * ) priv - > dma_erx , DMA_RX_SIZE , 1 , seq ) ;
seq_printf ( seq , " Extended TX descriptor ring: \n " ) ;
seq_printf ( seq , " Extended TX descriptor ring: \n " ) ;
sysfs_display_ring ( ( void * ) priv - > dma_etx , DMA_TX_SIZE , 1 , seq ) ;
sysfs_display_ring ( ( void * ) priv - > dma_etx , DMA_TX_SIZE , 1 , seq ) ;
} else {
} else {
seq_printf ( seq , " RX descriptor ring: \n " ) ;
sysfs_display_ring ( ( void * ) priv - > dma_rx , DMA_RX_SIZE , 0 , seq ) ;
seq_printf ( seq , " TX descriptor ring: \n " ) ;
seq_printf ( seq , " TX descriptor ring: \n " ) ;
sysfs_display_ring ( ( void * ) priv - > dma_tx , DMA_TX_SIZE , 0 , seq ) ;
sysfs_display_ring ( ( void * ) priv - > dma_tx , DMA_TX_SIZE , 0 , seq ) ;
}
}
@ -4025,6 +4120,26 @@ int stmmac_suspend(struct device *dev)
}
}
EXPORT_SYMBOL_GPL ( stmmac_suspend ) ;
EXPORT_SYMBOL_GPL ( stmmac_suspend ) ;
/**
* stmmac_reset_queues_param - reset queue parameters
* @ dev : device pointer
*/
static void stmmac_reset_queues_param ( struct stmmac_priv * priv )
{
u32 rx_cnt = priv - > plat - > rx_queues_to_use ;
u32 queue ;
for ( queue = 0 ; queue < rx_cnt ; queue + + ) {
struct stmmac_rx_queue * rx_q = & priv - > rx_queue [ queue ] ;
rx_q - > cur_rx = 0 ;
rx_q - > dirty_rx = 0 ;
}
priv - > dirty_tx = 0 ;
priv - > cur_tx = 0 ;
}
/**
/**
* stmmac_resume - resume callback
* stmmac_resume - resume callback
* @ dev : device pointer
* @ dev : device pointer
@ -4065,10 +4180,8 @@ int stmmac_resume(struct device *dev)
spin_lock_irqsave ( & priv - > lock , flags ) ;
spin_lock_irqsave ( & priv - > lock , flags ) ;
priv - > cur_rx = 0 ;
stmmac_reset_queues_param ( priv ) ;
priv - > dirty_rx = 0 ;
priv - > dirty_tx = 0 ;
priv - > cur_tx = 0 ;
/* reset private mss value to force mss context settings at
/* reset private mss value to force mss context settings at
* next tso xmit ( only used for gmac4 ) .
* next tso xmit ( only used for gmac4 ) .
*/
*/