|
|
|
@ -276,6 +276,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg) |
|
|
|
|
bool stmmac_eee_init(struct stmmac_priv *priv) |
|
|
|
|
{ |
|
|
|
|
char *phy_bus_name = priv->plat->phy_bus_name; |
|
|
|
|
unsigned long flags; |
|
|
|
|
bool ret = false; |
|
|
|
|
|
|
|
|
|
/* Using PCS we cannot dial with the phy registers at this stage
|
|
|
|
@ -300,6 +301,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv) |
|
|
|
|
* changed). |
|
|
|
|
* In that case the driver disable own timers. |
|
|
|
|
*/ |
|
|
|
|
spin_lock_irqsave(&priv->lock, flags); |
|
|
|
|
if (priv->eee_active) { |
|
|
|
|
pr_debug("stmmac: disable EEE\n"); |
|
|
|
|
del_timer_sync(&priv->eee_ctrl_timer); |
|
|
|
@ -307,9 +309,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv) |
|
|
|
|
tx_lpi_timer); |
|
|
|
|
} |
|
|
|
|
priv->eee_active = 0; |
|
|
|
|
spin_unlock_irqrestore(&priv->lock, flags); |
|
|
|
|
goto out; |
|
|
|
|
} |
|
|
|
|
/* Activate the EEE and start timers */ |
|
|
|
|
spin_lock_irqsave(&priv->lock, flags); |
|
|
|
|
if (!priv->eee_active) { |
|
|
|
|
priv->eee_active = 1; |
|
|
|
|
init_timer(&priv->eee_ctrl_timer); |
|
|
|
@ -325,9 +329,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv) |
|
|
|
|
/* Set HW EEE according to the speed */ |
|
|
|
|
priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link); |
|
|
|
|
|
|
|
|
|
pr_debug("stmmac: Energy-Efficient Ethernet initialized\n"); |
|
|
|
|
|
|
|
|
|
ret = true; |
|
|
|
|
spin_unlock_irqrestore(&priv->lock, flags); |
|
|
|
|
|
|
|
|
|
pr_debug("stmmac: Energy-Efficient Ethernet initialized\n"); |
|
|
|
|
} |
|
|
|
|
out: |
|
|
|
|
return ret; |
|
|
|
@ -760,12 +765,12 @@ static void stmmac_adjust_link(struct net_device *dev) |
|
|
|
|
if (new_state && netif_msg_link(priv)) |
|
|
|
|
phy_print_status(phydev); |
|
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&priv->lock, flags); |
|
|
|
|
|
|
|
|
|
/* At this stage, it could be needed to setup the EEE or adjust some
|
|
|
|
|
* MAC related HW registers. |
|
|
|
|
*/ |
|
|
|
|
priv->eee_enabled = stmmac_eee_init(priv); |
|
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&priv->lock, flags); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@ -959,12 +964,12 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv) |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, |
|
|
|
|
int i) |
|
|
|
|
int i, gfp_t flags) |
|
|
|
|
{ |
|
|
|
|
struct sk_buff *skb; |
|
|
|
|
|
|
|
|
|
skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN, |
|
|
|
|
GFP_KERNEL); |
|
|
|
|
flags); |
|
|
|
|
if (!skb) { |
|
|
|
|
pr_err("%s: Rx init fails; skb is NULL\n", __func__); |
|
|
|
|
return -ENOMEM; |
|
|
|
@ -1006,7 +1011,7 @@ static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i) |
|
|
|
|
* and allocates the socket buffers. It suppors the chained and ring |
|
|
|
|
* modes. |
|
|
|
|
*/ |
|
|
|
|
static int init_dma_desc_rings(struct net_device *dev) |
|
|
|
|
static int init_dma_desc_rings(struct net_device *dev, gfp_t flags) |
|
|
|
|
{ |
|
|
|
|
int i; |
|
|
|
|
struct stmmac_priv *priv = netdev_priv(dev); |
|
|
|
@ -1041,7 +1046,7 @@ static int init_dma_desc_rings(struct net_device *dev) |
|
|
|
|
else |
|
|
|
|
p = priv->dma_rx + i; |
|
|
|
|
|
|
|
|
|
ret = stmmac_init_rx_buffers(priv, p, i); |
|
|
|
|
ret = stmmac_init_rx_buffers(priv, p, i, flags); |
|
|
|
|
if (ret) |
|
|
|
|
goto err_init_rx_buffers; |
|
|
|
|
|
|
|
|
@ -1647,11 +1652,6 @@ static int stmmac_hw_setup(struct net_device *dev) |
|
|
|
|
struct stmmac_priv *priv = netdev_priv(dev); |
|
|
|
|
int ret; |
|
|
|
|
|
|
|
|
|
ret = init_dma_desc_rings(dev); |
|
|
|
|
if (ret < 0) { |
|
|
|
|
pr_err("%s: DMA descriptors initialization failed\n", __func__); |
|
|
|
|
return ret; |
|
|
|
|
} |
|
|
|
|
/* DMA initialization and SW reset */ |
|
|
|
|
ret = stmmac_init_dma_engine(priv); |
|
|
|
|
if (ret < 0) { |
|
|
|
@ -1705,10 +1705,6 @@ static int stmmac_hw_setup(struct net_device *dev) |
|
|
|
|
} |
|
|
|
|
priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; |
|
|
|
|
|
|
|
|
|
priv->eee_enabled = stmmac_eee_init(priv); |
|
|
|
|
|
|
|
|
|
stmmac_init_tx_coalesce(priv); |
|
|
|
|
|
|
|
|
|
if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { |
|
|
|
|
priv->rx_riwt = MAX_DMA_RIWT; |
|
|
|
|
priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT); |
|
|
|
@ -1761,12 +1757,20 @@ static int stmmac_open(struct net_device *dev) |
|
|
|
|
goto dma_desc_error; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
ret = init_dma_desc_rings(dev, GFP_KERNEL); |
|
|
|
|
if (ret < 0) { |
|
|
|
|
pr_err("%s: DMA descriptors initialization failed\n", __func__); |
|
|
|
|
goto init_error; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
ret = stmmac_hw_setup(dev); |
|
|
|
|
if (ret < 0) { |
|
|
|
|
pr_err("%s: Hw setup failed\n", __func__); |
|
|
|
|
goto init_error; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
stmmac_init_tx_coalesce(priv); |
|
|
|
|
|
|
|
|
|
if (priv->phydev) |
|
|
|
|
phy_start(priv->phydev); |
|
|
|
|
|
|
|
|
@ -1894,7 +1898,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) |
|
|
|
|
unsigned int nopaged_len = skb_headlen(skb); |
|
|
|
|
unsigned int enh_desc = priv->plat->enh_desc; |
|
|
|
|
|
|
|
|
|
spin_lock(&priv->tx_lock); |
|
|
|
|
|
|
|
|
|
if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { |
|
|
|
|
spin_unlock(&priv->tx_lock); |
|
|
|
|
if (!netif_queue_stopped(dev)) { |
|
|
|
|
netif_stop_queue(dev); |
|
|
|
|
/* This is a hard error, log it. */ |
|
|
|
@ -1903,8 +1910,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) |
|
|
|
|
return NETDEV_TX_BUSY; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
spin_lock(&priv->tx_lock); |
|
|
|
|
|
|
|
|
|
if (priv->tx_path_in_lpi_mode) |
|
|
|
|
stmmac_disable_eee_mode(priv); |
|
|
|
|
|
|
|
|
@ -2025,6 +2030,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) |
|
|
|
|
return NETDEV_TX_OK; |
|
|
|
|
|
|
|
|
|
dma_map_err: |
|
|
|
|
spin_unlock(&priv->tx_lock); |
|
|
|
|
dev_err(priv->device, "Tx dma map failed\n"); |
|
|
|
|
dev_kfree_skb(skb); |
|
|
|
|
priv->dev->stats.tx_dropped++; |
|
|
|
@ -2281,9 +2287,7 @@ static void stmmac_set_rx_mode(struct net_device *dev) |
|
|
|
|
{ |
|
|
|
|
struct stmmac_priv *priv = netdev_priv(dev); |
|
|
|
|
|
|
|
|
|
spin_lock(&priv->lock); |
|
|
|
|
priv->hw->mac->set_filter(priv->hw, dev); |
|
|
|
|
spin_unlock(&priv->lock); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
@ -2950,7 +2954,7 @@ int stmmac_suspend(struct net_device *ndev) |
|
|
|
|
stmmac_set_mac(priv->ioaddr, false); |
|
|
|
|
pinctrl_pm_select_sleep_state(priv->device); |
|
|
|
|
/* Disable clock in case of PWM is off */ |
|
|
|
|
clk_disable_unprepare(priv->stmmac_clk); |
|
|
|
|
clk_disable(priv->stmmac_clk); |
|
|
|
|
} |
|
|
|
|
spin_unlock_irqrestore(&priv->lock, flags); |
|
|
|
|
|
|
|
|
@ -2982,7 +2986,7 @@ int stmmac_resume(struct net_device *ndev) |
|
|
|
|
} else { |
|
|
|
|
pinctrl_pm_select_default_state(priv->device); |
|
|
|
|
/* enable the clk prevously disabled */ |
|
|
|
|
clk_prepare_enable(priv->stmmac_clk); |
|
|
|
|
clk_enable(priv->stmmac_clk); |
|
|
|
|
/* reset the phy so that it's ready */ |
|
|
|
|
if (priv->mii) |
|
|
|
|
stmmac_mdio_reset(priv->mii); |
|
|
|
@ -2990,7 +2994,9 @@ int stmmac_resume(struct net_device *ndev) |
|
|
|
|
|
|
|
|
|
netif_device_attach(ndev); |
|
|
|
|
|
|
|
|
|
init_dma_desc_rings(ndev, GFP_ATOMIC); |
|
|
|
|
stmmac_hw_setup(ndev); |
|
|
|
|
stmmac_init_tx_coalesce(priv); |
|
|
|
|
|
|
|
|
|
napi_enable(&priv->napi); |
|
|
|
|
|
|
|
|
|