forked from Openwrt/openwrt
fb2c6e9d4d
Removed because they are upstream: generic/backport-5.15/704-15-v5.19-net-mtk_eth_soc-move-MAC_MCR-setting-to-mac_finish.patch https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=linux-5.15.y&id=c5c0760adc260d55265c086b9efb350ea6dda38b generic/pending-5.15/735-net-mediatek-mtk_eth_soc-release-MAC_MCR_FORCE_LINK-.patch https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=linux-5.15.y&id=448cc8b5f743985f6d1d98aa4efb386fef4c3bf2 generic/pending-5.15/736-net-ethernet-mtk_eth_soc-fix-PPE-hanging-issue.patch https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git/commit/?h=linux-5.15.y&id=9fcadd125044007351905d40c405fadc2d3bb6d6 Add new configuration symbols for tegra target. Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
144 lines
4.7 KiB
Diff
144 lines
4.7 KiB
Diff
From: Felix Fietkau <nbd@nbd.name>
|
|
Date: Thu, 27 Oct 2022 19:53:57 +0200
|
|
Subject: [PATCH] net: ethernet: mtk_eth_soc: increase tx ring side for
|
|
QDMA devices
|
|
|
|
In order to use the hardware traffic shaper feature, a larger tx ring is
|
|
needed, especially for the scratch ring, which the hardware shaper uses to
|
|
reorder packets.
|
|
|
|
Signed-off-by: Felix Fietkau <nbd@nbd.name>
|
|
---
|
|
|
|
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
|
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
|
|
@@ -900,7 +900,7 @@ static int mtk_init_fq_dma(struct mtk_et
|
|
{
|
|
const struct mtk_soc_data *soc = eth->soc;
|
|
dma_addr_t phy_ring_tail;
|
|
- int cnt = MTK_DMA_SIZE;
|
|
+ int cnt = MTK_QDMA_RING_SIZE;
|
|
dma_addr_t dma_addr;
|
|
int i;
|
|
|
|
@@ -2154,19 +2154,25 @@ static int mtk_tx_alloc(struct mtk_eth *
|
|
struct mtk_tx_ring *ring = ð->tx_ring;
|
|
int i, sz = soc->txrx.txd_size;
|
|
struct mtk_tx_dma_v2 *txd;
|
|
+ int ring_size;
|
|
|
|
- ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf),
|
|
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
|
|
+ ring_size = MTK_QDMA_RING_SIZE;
|
|
+ else
|
|
+ ring_size = MTK_DMA_SIZE;
|
|
+
|
|
+ ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
|
|
GFP_KERNEL);
|
|
if (!ring->buf)
|
|
goto no_tx_mem;
|
|
|
|
- ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
|
|
+ ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
|
|
&ring->phys, GFP_KERNEL);
|
|
if (!ring->dma)
|
|
goto no_tx_mem;
|
|
|
|
- for (i = 0; i < MTK_DMA_SIZE; i++) {
|
|
- int next = (i + 1) % MTK_DMA_SIZE;
|
|
+ for (i = 0; i < ring_size; i++) {
|
|
+ int next = (i + 1) % ring_size;
|
|
u32 next_ptr = ring->phys + next * sz;
|
|
|
|
txd = ring->dma + i * sz;
|
|
@@ -2186,22 +2192,22 @@ static int mtk_tx_alloc(struct mtk_eth *
|
|
* descriptors in ring->dma_pdma.
|
|
*/
|
|
if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
|
|
- ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz,
|
|
+ ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
|
|
&ring->phys_pdma, GFP_KERNEL);
|
|
if (!ring->dma_pdma)
|
|
goto no_tx_mem;
|
|
|
|
- for (i = 0; i < MTK_DMA_SIZE; i++) {
|
|
+ for (i = 0; i < ring_size; i++) {
|
|
ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
|
|
ring->dma_pdma[i].txd4 = 0;
|
|
}
|
|
}
|
|
|
|
- ring->dma_size = MTK_DMA_SIZE;
|
|
- atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
|
|
+ ring->dma_size = ring_size;
|
|
+ atomic_set(&ring->free_count, ring_size - 2);
|
|
ring->next_free = ring->dma;
|
|
ring->last_free = (void *)txd;
|
|
- ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz));
|
|
+ ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz));
|
|
ring->thresh = MAX_SKB_FRAGS;
|
|
|
|
/* make sure that all changes to the dma ring are flushed before we
|
|
@@ -2213,14 +2219,14 @@ static int mtk_tx_alloc(struct mtk_eth *
|
|
mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
|
|
mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
|
|
mtk_w32(eth,
|
|
- ring->phys + ((MTK_DMA_SIZE - 1) * sz),
|
|
+ ring->phys + ((ring_size - 1) * sz),
|
|
soc->reg_map->qdma.crx_ptr);
|
|
mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
|
|
mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES,
|
|
soc->reg_map->qdma.qtx_cfg);
|
|
} else {
|
|
mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
|
|
- mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0);
|
|
+ mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
|
|
mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
|
|
mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
|
|
}
|
|
@@ -2238,7 +2244,7 @@ static void mtk_tx_clean(struct mtk_eth
|
|
int i;
|
|
|
|
if (ring->buf) {
|
|
- for (i = 0; i < MTK_DMA_SIZE; i++)
|
|
+ for (i = 0; i < ring->dma_size; i++)
|
|
mtk_tx_unmap(eth, &ring->buf[i], false);
|
|
kfree(ring->buf);
|
|
ring->buf = NULL;
|
|
@@ -2246,14 +2252,14 @@ static void mtk_tx_clean(struct mtk_eth
|
|
|
|
if (ring->dma) {
|
|
dma_free_coherent(eth->dma_dev,
|
|
- MTK_DMA_SIZE * soc->txrx.txd_size,
|
|
+ ring->dma_size * soc->txrx.txd_size,
|
|
ring->dma, ring->phys);
|
|
ring->dma = NULL;
|
|
}
|
|
|
|
if (ring->dma_pdma) {
|
|
dma_free_coherent(eth->dma_dev,
|
|
- MTK_DMA_SIZE * soc->txrx.txd_size,
|
|
+ ring->dma_size * soc->txrx.txd_size,
|
|
ring->dma_pdma, ring->phys_pdma);
|
|
ring->dma_pdma = NULL;
|
|
}
|
|
@@ -2776,7 +2782,7 @@ static void mtk_dma_free(struct mtk_eth
|
|
netdev_reset_queue(eth->netdev[i]);
|
|
if (eth->scratch_ring) {
|
|
dma_free_coherent(eth->dma_dev,
|
|
- MTK_DMA_SIZE * soc->txrx.txd_size,
|
|
+ MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
|
|
eth->scratch_ring, eth->phy_scratch_ring);
|
|
eth->scratch_ring = NULL;
|
|
eth->phy_scratch_ring = 0;
|
|
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
|
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
|
|
@@ -27,6 +27,7 @@
|
|
#define MTK_MAX_RX_LENGTH_2K 2048
|
|
#define MTK_TX_DMA_BUF_LEN 0x3fff
|
|
#define MTK_TX_DMA_BUF_LEN_V2 0xffff
|
|
+#define MTK_QDMA_RING_SIZE 2048
|
|
#define MTK_DMA_SIZE 512
|
|
#define MTK_MAC_COUNT 2
|
|
#define MTK_RX_ETH_HLEN (VLAN_ETH_HLEN + ETH_FCS_LEN)
|