WIP: EN7523 Ethernet support #4

Draft
Sirherobrine23 wants to merge 1 commits from airoha_en7523_eth into airoha_en7523
3 changed files with 155 additions and 58 deletions

View File

@@ -267,6 +267,12 @@ static int airoha_fe_set_pse_oq_rsv(struct airoha_eth *eth,
FIELD_PREP(PSE_ALLRSV_MASK, all_rsv));
/* modify hthd */
if (device_is_compatible(eth->dev, "airoha,en7523-eth")) {
airoha_fe_wr(eth, REG_FE_PSE_BUF_SET, 0x000002B4);
airoha_fe_wr(eth, REG_PSE_SHARE_USED_THD, 0x01E001F4);
goto done;
}
tmp = airoha_fe_rr(eth, PSE_FQ_CFG);
fq_limit = FIELD_GET(PSE_FQ_LIMIT_MASK, tmp);
tmp = fq_limit - all_rsv - 0x20;
@@ -283,6 +289,7 @@ static int airoha_fe_set_pse_oq_rsv(struct airoha_eth *eth,
PSE_SHARE_USED_LTHD_MASK,
FIELD_PREP(PSE_SHARE_USED_LTHD_MASK, tmp));
done:
return 0;
}
@@ -310,7 +317,10 @@ static void airoha_fe_pse_ports_init(struct airoha_eth *eth)
all_rsv += PSE_RSV_PAGES *
pse_port_num_queues[FE_PSE_PORT_PPE2];
}
airoha_fe_set(eth, REG_FE_PSE_BUF_SET, all_rsv);
if (!device_is_compatible(eth->dev, "airoha,en7523-eth"))
airoha_fe_set(eth, REG_FE_PSE_BUF_SET, all_rsv);
/* CMD1 */
for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++)
@@ -474,20 +484,31 @@ static int airoha_fe_init(struct airoha_eth *eth)
FIELD_PREP(CDM_VIP_QSEL_MASK, 0x4));
airoha_fe_rmw(eth, REG_CDM_FWD_CFG(2), CDM_VIP_QSEL_MASK,
FIELD_PREP(CDM_VIP_QSEL_MASK, 0x4));
/* set GDM4 source interface offset to 8 */
airoha_fe_rmw(eth, REG_GDM_SRC_PORT_SET(4),
GDM_SPORT_OFF2_MASK |
GDM_SPORT_OFF1_MASK |
GDM_SPORT_OFF0_MASK,
FIELD_PREP(GDM_SPORT_OFF2_MASK, 8) |
FIELD_PREP(GDM_SPORT_OFF1_MASK, 8) |
FIELD_PREP(GDM_SPORT_OFF0_MASK, 8));
if (!device_is_compatible(eth->dev, "airoha,en7523-eth"))
airoha_fe_rmw(eth, REG_GDM_SRC_PORT_SET(4),
GDM_SPORT_OFF2_MASK |
GDM_SPORT_OFF1_MASK |
GDM_SPORT_OFF0_MASK,
FIELD_PREP(GDM_SPORT_OFF2_MASK, 8) |
FIELD_PREP(GDM_SPORT_OFF1_MASK, 8) |
FIELD_PREP(GDM_SPORT_OFF0_MASK, 8));
/* set PSE Page as 128B */
airoha_fe_rmw(eth, REG_FE_DMA_GLO_CFG,
FE_DMA_GLO_L2_SPACE_MASK | FE_DMA_GLO_PG_SZ_MASK,
FIELD_PREP(FE_DMA_GLO_L2_SPACE_MASK, 2) |
FIELD_PREP(FE_DMA_GLO_L2_SPACE_MASK,
device_is_compatible(eth->dev, "airoha,en7523-eth") ? 3 : 2) |
FE_DMA_GLO_PG_SZ_MASK);
if (device_is_compatible(eth->dev, "airoha,en7523-eth")) {
/* map GDMP sram to fe */
airoha_wr(eth->gdmp_regs, 0x74, 3);
/* set PSE buffer to 0x500 = 0x400(pse itself) + 0x100(GDMP buffer) */
airoha_fe_wr(eth, PSE_FQ_CFG, 0x500);
}
airoha_fe_wr(eth, REG_FE_RST_GLO_CFG,
FE_RST_CORE_MASK | FE_RST_GDM3_MBI_ARB_MASK |
FE_RST_GDM4_MBI_ARB_MASK);
@@ -512,16 +533,21 @@ static int airoha_fe_init(struct airoha_eth *eth)
/* init fragment and assemble Force Port */
/* NPU Core-3, NPU Bridge Channel-3 */
if (!device_is_compatible(eth->dev, "airoha,en7523-eth"))
airoha_fe_rmw(eth, REG_IP_FRAG_FP,
IP_FRAGMENT_PORT_MASK | IP_FRAGMENT_NBQ_MASK,
FIELD_PREP(IP_FRAGMENT_PORT_MASK, 6) |
FIELD_PREP(IP_FRAGMENT_NBQ_MASK, 3));
/* QDMA LAN, RX Ring-22 */
if (!device_is_compatible(eth->dev, "airoha,en7523-eth"))
airoha_fe_rmw(eth, REG_IP_FRAG_FP,
IP_ASSEMBLE_PORT_MASK | IP_ASSEMBLE_NBQ_MASK,
FIELD_PREP(IP_ASSEMBLE_PORT_MASK, 0) |
FIELD_PREP(IP_ASSEMBLE_NBQ_MASK, 22));
/* set rx queue for lan->wifi traffic to Q1 */
/* CDMA1_CRSN_QSEL */
airoha_fe_set(eth, REG_GDM_FWD_CFG(3),
GDM_PAD_EN_MASK | GDM_STRIP_CRC_MASK);
airoha_fe_set(eth, REG_GDM_FWD_CFG(4),
@@ -530,7 +556,15 @@ static int airoha_fe_init(struct airoha_eth *eth)
airoha_fe_crsn_qsel_init(eth);
airoha_fe_clear(eth, REG_FE_CPORT_CFG, FE_CPORT_QUEUE_XFC_MASK);
airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PORT_XFC_MASK);
if (!device_is_compatible(eth->dev, "airoha,en7523-eth"))
airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PORT_XFC_MASK);
else
airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PORT_XFC_MASK | FE_CPORT_DIS_FE2GSW_CRC);
if (device_is_compatible(eth->dev, "airoha,en7523-eth"))
airoha_fe_rmw(eth, REG_FE_CPORT_CFG, FE_CPORT_FE2SW_IPG,
FIELD_PREP(FE_CPORT_FE2SW_IPG, 2));
/* default aging mode for mbi unlock issue */
airoha_fe_rmw(eth, REG_GDM_CHN_RLS(2),
@@ -541,6 +575,10 @@ static int airoha_fe_init(struct airoha_eth *eth)
/* disable IFC by default */
airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK);
/* enable sp_tag generation */
if (device_is_compatible(eth->dev, "airoha,en7523-eth"))
airoha_fe_set(eth, GDM1_BASE_STAG_EN, CPORT_TX_STAG_EN | CPORT_RX_STAG_EN | GDM1_RX_LAN_SPORT);
/* enable 1:N vlan action, init vlan table */
airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK);
@@ -597,6 +635,7 @@ static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
u32 port, sport, msg1 = le32_to_cpu(desc->msg1);
sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1);
dev_dbg(eth->dev, "airoha_qdma_get_gdm_port() sport = %d\n", sport);
switch (sport) {
case 0x18:
port = 3; /* GDM4 */
@@ -604,7 +643,7 @@ static int airoha_qdma_get_gdm_port(struct airoha_eth *eth,
case 0x16:
port = 2; /* GDM3 */
break;
case 0x10 ... 0x14:
case 0x8 ... 0x14:
port = 0; /* GDM1 */
break;
case 0x2 ... 0x4:
@@ -650,12 +689,15 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
goto free_frag;
p = airoha_qdma_get_gdm_port(eth, desc);
if (p < 0 || !eth->ports[p])
if (p < 0 || !eth->ports[p]) {
dev_err(eth->dev, "airoha_qdma_rx_process() free frag\n");
goto free_frag;
}
port = eth->ports[p];
if (!q->skb) { /* first buffer */
q->skb = napi_build_skb(e->buf, q->buf_size);
dev_dbg(eth->dev, "airoha_qdma_rx_process() skb = %d\n", !q->skb);
if (!q->skb)
goto free_frag;
@@ -665,12 +707,15 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
q->skb->protocol = eth_type_trans(q->skb, port->dev);
q->skb->ip_summed = CHECKSUM_UNNECESSARY;
skb_record_rx_queue(q->skb, qid);
print_hex_dump_bytes("rxh: ", DUMP_PREFIX_ADDRESS, q->skb->head, 16);
print_hex_dump_bytes("rxd: ", DUMP_PREFIX_ADDRESS, q->skb->data, 128);
} else { /* scattered frame */
struct skb_shared_info *shinfo = skb_shinfo(q->skb);
int nr_frags = shinfo->nr_frags;
if (nr_frags >= ARRAY_SIZE(shinfo->frags))
goto free_frag;
print_hex_dump_bytes("rx_frag: ", DUMP_PREFIX_ADDRESS, q->skb->data, 128);
skb_add_rx_frag(q->skb, nr_frags, page,
e->buf - page_address(page), len,
@@ -688,6 +733,8 @@ static int airoha_qdma_rx_process(struct airoha_queue *q, int budget)
*/
u32 sptag = FIELD_GET(QDMA_ETH_RXMSG_SPTAG,
le32_to_cpu(desc->msg0));
dev_dbg(eth->dev, "sp_tag = %d\n", sptag);
print_hex_dump_bytes("dscp: ", DUMP_PREFIX_ADDRESS, desc, 32);
if (sptag < ARRAY_SIZE(port->dsa_meta) &&
port->dsa_meta[sptag])
@@ -858,6 +905,8 @@ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
id = irq_q - &qdma->q_tx_irq[0];
eth = qdma->eth;
dev_dbg(eth->dev, "airoha_qdma_tx_napi_poll\n");
status = airoha_qdma_rr(qdma, REG_IRQ_STATUS(id));
head = FIELD_GET(IRQ_HEAD_IDX_MASK, status);
head = head % irq_q->size;
@@ -913,6 +962,8 @@ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
WRITE_ONCE(desc->msg1, 0);
q->queued--;
dev_err(eth->dev, "q->tail = %d q->head = %d q->queued = %d\n", q->tail, q->head, q->queued);
/* completion ring can report out-of-order indexes if hw QoS
* is enabled and packets with different priority are queued
* to same DMA ring. Take into account possible out-of-order
@@ -984,8 +1035,8 @@ static int airoha_qdma_init_tx_queue(struct airoha_queue *q,
}
/* xmit ring drop default setting */
airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(qid),
TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK);
// airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(qid),
// TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK);
airoha_qdma_wr(qdma, REG_TX_RING_BASE(qid), dma_addr);
airoha_qdma_rmw(qdma, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK,
@@ -1132,9 +1183,6 @@ static int airoha_qdma_init_hfwd_queues(struct airoha_qdma *qdma)
static void airoha_qdma_init_qos(struct airoha_qdma *qdma)
{
if (airoha_is_7523(qdma->eth))
return;
airoha_qdma_clear(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK);
airoha_qdma_set(qdma, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK);
@@ -1187,9 +1235,6 @@ static void airoha_qdma_init_qos_stats(struct airoha_qdma *qdma)
{
int i;
if (airoha_is_7523(qdma->eth))
return;
for (i = 0; i < AIROHA_NUM_QOS_CHANNELS; i++) {
/* Tx-cpu transferred count */
airoha_qdma_wr(qdma, REG_CNTR_VAL(i << 1), 0);
@@ -1210,7 +1255,6 @@ static void airoha_qdma_init_qos_stats(struct airoha_qdma *qdma)
static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
{
int i;
bool not_en7523 = !airoha_is_7523(qdma->eth);
for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
/* clear pending irqs */
@@ -1220,20 +1264,27 @@ static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
INT_RX0_MASK(RX_IRQ_BANK_PIN_MASK(i)));
airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX1,
INT_RX1_MASK(RX_IRQ_BANK_PIN_MASK(i)));
if (not_en7523) {
airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX2,
INT_RX2_MASK(RX_IRQ_BANK_PIN_MASK(i)));
airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX3,
INT_RX3_MASK(RX_IRQ_BANK_PIN_MASK(i)));
}
airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX2,
INT_RX2_MASK(RX_IRQ_BANK_PIN_MASK(i)));
airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX3,
INT_RX3_MASK(RX_IRQ_BANK_PIN_MASK(i)));
}
/* setup tx irqs */
airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0,
TX_COHERENT_LOW_INT_MASK | INT_TX_MASK);
if (not_en7523)
airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX4,
airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX4,
TX_COHERENT_HIGH_INT_MASK);
if (device_is_compatible(qdma->eth->dev, "airoha,en7523-eth")) {
airoha_qdma_wr(qdma, 0x30, 0x7C000000);
airoha_qdma_wr(qdma, 0x34, 0x7C007C00);
airoha_qdma_wr(qdma, 0x38, 0x00200000);
airoha_qdma_wr(qdma, 0x3C, 0x00200020);
airoha_qdma_wr(qdma, 0x40, 0x00000030);
//airoha_qdma_wr(qdma, 0x40, 0x00000030);
airoha_qdma_wr(qdma, 0x6C, 0x00000000);
}
/* setup irq binding */
for (i = 0; i < AIROHA_NUM_TX_RING(qdma->eth->soc); i++) {
if (!qdma->q_tx[i].ndesc)
@@ -1245,6 +1296,15 @@ static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
else
airoha_qdma_clear(qdma, REG_TX_RING_BLOCKING(i),
TX_RING_IRQ_BLOCKING_CFG_MASK);
if (device_is_compatible(qdma->eth->dev, "airoha,en7523-eth")) {
if (i == 0)
airoha_qdma_set(qdma, REG_TX_RING_BLOCKING(i),
TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK);
else
airoha_qdma_clear(qdma, REG_TX_RING_BLOCKING(i),
TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK);
}
}
airoha_qdma_wr(qdma, REG_QDMA_GLOBAL_CFG,
@@ -1281,19 +1341,14 @@ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
struct airoha_qdma *qdma = irq_bank->qdma;
u32 rx_intr_mask = 0, rx_intr1, rx_intr2;
u32 intr[ARRAY_SIZE(irq_bank->irqmask)];
int i, num_regs;
int i;
num_regs = airoha_is_7523(qdma->eth) ? 2 : ARRAY_SIZE(intr);
for (i = 0; i < num_regs; i++) {
for (i = 0; i < ARRAY_SIZE(intr); i++) {
intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i));
intr[i] &= irq_bank->irqmask[i];
airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
}
/* Zero out intr values for regs that weren't read */
for (; i < ARRAY_SIZE(intr); i++)
intr[i] = 0;
if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state))
return IRQ_NONE;
@@ -1675,12 +1730,14 @@ static int airoha_dev_open(struct net_device *dev)
return err;
/* It seems GDM3 and GDM4 needs SPORT enabled to correctly work */
if (netdev_uses_dsa(dev) || port->id > 2)
airoha_fe_set(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
GDM_STAG_EN_MASK);
else
airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
GDM_STAG_EN_MASK);
if (!device_is_compatible(&dev->dev, "airoha,en7523-eth")) {
if (netdev_uses_dsa(dev) || port->id > 2)
airoha_fe_set(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
GDM_STAG_EN_MASK);
else
airoha_fe_clear(qdma->eth, REG_GDM_INGRESS_CFG(port->id),
GDM_STAG_EN_MASK);
}
airoha_fe_rmw(qdma->eth, REG_GDM_LEN_CFG(port->id),
GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK,
@@ -1990,6 +2047,8 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
u16 index;
u8 fport;
dev_dbg(NULL, "airoha_dev_xmit\n");
qid = skb_get_queue_mapping(skb) % AIROHA_NUM_TX_RING(qdma->eth->soc);
tag = airoha_get_dsa_tag(skb, dev);
@@ -2018,6 +2077,7 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
}
fport = airoha_get_fe_port(port);
dev_dbg(NULL, "fport = %d, tag = %x, qid = %d\n", fport, tag, qid);
msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) |
FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f);
@@ -2038,6 +2098,9 @@ static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb,
}
len = skb_headlen(skb);
if (device_is_compatible(&dev->dev, "airoha,en7523-eth"))
len += 4;
data = skb->data;
index = q->head;
@@ -2844,8 +2907,6 @@ static int airoha_dev_tc_setup(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
struct airoha_gdm_port *port = netdev_priv(dev);
if (airoha_is_7523(port->qdma->eth))
return -EOPNOTSUPP;
switch (type) {
case TC_SETUP_QDISC_ETS:
@@ -3159,6 +3220,13 @@ static int airoha_probe(struct platform_device *pdev)
return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs),
"failed to iomap fe regs\n");
if (device_is_compatible(&pdev->dev, "airoha,en7523-eth")) {
eth->gdmp_regs = devm_platform_ioremap_resource_byname(pdev, "gdmp");
if (IS_ERR(eth->gdmp_regs))
return dev_err_probe(eth->dev, PTR_ERR(eth->gdmp_regs),
"failed to iomap gdmp regs\n");
}
eth->rsts[0].id = "fe";
eth->rsts[1].id = "pdma";
eth->rsts[2].id = "qdma";

View File

@@ -18,7 +18,7 @@
#define AIROHA_MAX_NUM_GDM_PORTS 4
#define AIROHA_MAX_NUM_QDMA 2
#define AIROHA_MAX_NUM_IRQ_BANKS 4
#define AIROHA_MAX_NUM_IRQ_BANKS 2
#define AIROHA_MAX_DSA_PORTS 7
#define AIROHA_MAX_NUM_RSTS 3
#define AIROHA_MAX_MTU 9216
@@ -37,7 +37,8 @@
#define TX_DSCP_NUM 1024
#define RX_DSCP_NUM(_n) \
((_n) == 2 ? 128 : \
(_n) == 1 ? 1024 : \
(_n) == 1 ? 256 : \
(_n) == 5 ? 256 : \
(_n) == 11 ? 128 : \
(_n) == 15 ? 128 : \
(_n) == 0 ? 1024 : 16)
@@ -64,6 +65,8 @@ enum {
QDMA_INT_REG_IDX2,
QDMA_INT_REG_IDX3,
QDMA_INT_REG_IDX4,
QDMA_INT_REG_IDX5,
QDMA_INT_REG_IDX6,
QDMA_INT_REG_MAX
};
@@ -596,6 +599,7 @@ struct airoha_eth {
unsigned long state;
void __iomem *fe_regs;
void __iomem *gdmp_regs;
struct airoha_npu __rcu *npu;

View File

@@ -138,6 +138,11 @@
#define GDM_INGRESS_FC_EN_MASK BIT(1)
#define GDM_STAG_EN_MASK BIT(0)
#define GDM1_BASE_STAG_EN (GDM1_BASE + 0x10)
#define CPORT_TX_STAG_EN BIT(2)
#define CPORT_RX_STAG_EN BIT(1)
#define GDM1_RX_LAN_SPORT BIT(0)
#define REG_GDM_LEN_CFG(_n) (GDM_BASE(_n) + 0x14)
#define GDM_SHORT_LEN_MASK GENMASK(13, 0)
#define GDM_LONG_LEN_MASK GENMASK(29, 16)
@@ -160,9 +165,13 @@
#define REG_GDM_RXCHN_EN(_n) (GDM_BASE(_n) + 0x28)
#define REG_FE_CPORT_CFG (GDM1_BASE + 0x40)
#define FE_CPORT_DIS_FE2GSW_CRC BIT(31)
#define FE_CPORT_DIS_GSW2FE_CRC BIT(30)
#define FE_CPORT_PAD BIT(26)
#define FE_CPORT_PORT_XFC_MASK BIT(25)
#define FE_CPORT_QUEUE_XFC_MASK BIT(24)
#define FE_CPORT_FE2SW_IPG GENMASK(15, 8)
#define FE_CPORT_SW2FE_IPG GENMASK(7, 0)
#define REG_FE_GDM_MIB_CLEAR(_n) (GDM_BASE(_n) + 0xf0)
#define FE_GDM_MIB_RX_CLEAR_MASK BIT(1)
@@ -436,7 +445,9 @@
((_n) == 1) ? 0x0024 : 0x0020)
#define REG_INT_ENABLE(_b, _n) \
(((_n) == 4) ? 0x0750 + ((_b) << 5) : \
(((_n) == 6) ? 0x0034 + ((_b) << 5) : \
((_n) == 5) ? 0x0030 + ((_b) << 5) : \
((_n) == 4) ? 0x0750 + ((_b) << 5) : \
((_n) == 3) ? 0x0744 + ((_b) << 5) : \
((_n) == 2) ? 0x0740 + ((_b) << 5) : \
((_n) == 1) ? 0x002c + ((_b) << 3) : \
@@ -475,6 +486,13 @@
#define IRQ0_FULL_INT_MASK BIT(1)
#define IRQ0_INT_MASK BIT(0)
/* EN7523 QDMA_CSR_INT_ENABLE5 30 */
#define EN7523_RX_COHERENT_LOW_INT_MASK \
(RX14_COHERENT_INT_MASK | \
RX13_COHERENT_INT_MASK | RX12_COHERENT_INT_MASK | \
RX11_COHERENT_INT_MASK | RX10_COHERENT_INT_MASK)
#define RX_COHERENT_LOW_INT_MASK \
(RX15_COHERENT_INT_MASK | RX14_COHERENT_INT_MASK | \
RX13_COHERENT_INT_MASK | RX12_COHERENT_INT_MASK | \
@@ -496,11 +514,11 @@
TX1_COHERENT_INT_MASK | TX0_COHERENT_INT_MASK)
#define TX_DONE_INT_MASK(_n) \
((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK \
((_n) ? 0 \
: IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
#define INT_TX_MASK \
(IRQ1_INT_MASK | IRQ1_FULL_INT_MASK | \
(0 | \
IRQ0_INT_MASK | IRQ0_FULL_INT_MASK)
/* QDMA_CSR_INT_ENABLE2 */
@@ -537,6 +555,13 @@
#define RX1_DONE_INT_MASK BIT(1)
#define RX0_DONE_INT_MASK BIT(0)
/* EN7523 QDMA_CSR_INT_ENABLE6 34 */
#define EN7523_RX_NO_CPU_DSCP_LOW_INT_MASK \
(RX14_NO_CPU_DSCP_INT_MASK | \
RX13_NO_CPU_DSCP_INT_MASK | RX12_NO_CPU_DSCP_INT_MASK | \
RX11_NO_CPU_DSCP_INT_MASK | RX10_NO_CPU_DSCP_INT_MASK)
#define RX_NO_CPU_DSCP_LOW_INT_MASK \
(RX15_NO_CPU_DSCP_INT_MASK | RX14_NO_CPU_DSCP_INT_MASK | \
RX13_NO_CPU_DSCP_INT_MASK | RX12_NO_CPU_DSCP_INT_MASK | \
@@ -890,14 +915,14 @@
/* RX MSG0 */
#define QDMA_ETH_RXMSG_SPTAG GENMASK(21, 14)
/* RX MSG1 */
#define QDMA_ETH_RXMSG_DEI_MASK BIT(31)
#define QDMA_ETH_RXMSG_IP6_MASK BIT(30)
#define QDMA_ETH_RXMSG_IP4_MASK BIT(29)
#define QDMA_ETH_RXMSG_IP4F_MASK BIT(28)
#define QDMA_ETH_RXMSG_L4_VALID_MASK BIT(27)
#define QDMA_ETH_RXMSG_L4F_MASK BIT(26)
#define QDMA_ETH_RXMSG_SPORT_MASK GENMASK(25, 21)
#define QDMA_ETH_RXMSG_CRSN_MASK GENMASK(20, 16)
#define QDMA_ETH_RXMSG_DEI_MASK BIT(30)
#define QDMA_ETH_RXMSG_IP6_MASK BIT(29)
#define QDMA_ETH_RXMSG_IP4_MASK BIT(28)
#define QDMA_ETH_RXMSG_IP4F_MASK BIT(27)
#define QDMA_ETH_RXMSG_L4_VALID_MASK BIT(26)
#define QDMA_ETH_RXMSG_L4F_MASK BIT(25)
#define QDMA_ETH_RXMSG_SPORT_MASK GENMASK(24, 20)
#define QDMA_ETH_RXMSG_CRSN_MASK GENMASK(19, 16)
#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0)
struct airoha_qdma_desc {