Files
openwrt_mitrastar/target/linux/qualcommbe/patches-6.12/0346-net-ethernet-qualcomm-Add-miscellaneous-error-interr.patch
Alexandru Gagniuc f3fc278fcb qualcommbe: v6.12: add PPE driver (part 2)
Add the second part of the PPE driver. This includes the EDMA and
network device support. This part does not appear to have been
officially submitted for upstream review. The series is taken from
target/linux/qualcommbe/patches-6.6, and had to be heavily modified
in order to compile of v6.12. Changes to patches are noted in the
respective patch body.

Also add the PPE and EDMA nodes in this series.

Signed-off-by: Alexandru Gagniuc <mr.nuke.me@gmail.com>
Link: https://github.com/openwrt/openwrt/pull/18796
Signed-off-by: Robert Marko <robimarko@gmail.com>
2025-05-31 12:25:48 +02:00

731 lines
22 KiB
Diff

From 8a924457c0b71acee96c8f78ef386e2a354a2aca Mon Sep 17 00:00:00 2001
From: Suruchi Agarwal <quic_suruchia@quicinc.com>
Date: Thu, 21 Mar 2024 16:31:04 -0700
Subject: [PATCH] net: ethernet: qualcomm: Add miscellaneous error interrupts
and counters
Miscellaneous error interrupts, EDMA Tx/Rx and error counters are supported
using debugfs framework.
Change-Id: I7da8b978a7e93947b03a45269a81b401f35da31c
Co-developed-by: Pavithra R <quic_pavir@quicinc.com>
Signed-off-by: Pavithra R <quic_pavir@quicinc.com>
Signed-off-by: Suruchi Agarwal <quic_suruchia@quicinc.com>
---
drivers/net/ethernet/qualcomm/ppe/Makefile | 2 +-
drivers/net/ethernet/qualcomm/ppe/edma.c | 162 ++++++++
drivers/net/ethernet/qualcomm/ppe/edma.h | 30 ++
.../net/ethernet/qualcomm/ppe/edma_debugfs.c | 370 ++++++++++++++++++
.../net/ethernet/qualcomm/ppe/ppe_debugfs.c | 17 +
5 files changed, 580 insertions(+), 1 deletion(-)
create mode 100644 drivers/net/ethernet/qualcomm/ppe/edma_debugfs.c
--- a/drivers/net/ethernet/qualcomm/ppe/Makefile
+++ b/drivers/net/ethernet/qualcomm/ppe/Makefile
@@ -7,4 +7,4 @@ obj-$(CONFIG_QCOM_PPE) += qcom-ppe.o
qcom-ppe-objs := ppe.o ppe_config.o ppe_debugfs.o ppe_port.o
#EDMA
-qcom-ppe-objs += edma.o edma_cfg_rx.o edma_cfg_tx.o edma_port.o edma_rx.o edma_tx.o
+qcom-ppe-objs += edma.o edma_cfg_rx.o edma_cfg_tx.o edma_debugfs.o edma_port.o edma_rx.o edma_tx.o
--- a/drivers/net/ethernet/qualcomm/ppe/edma.c
+++ b/drivers/net/ethernet/qualcomm/ppe/edma.c
@@ -152,6 +152,42 @@ static int edma_clock_init(void)
}
/**
+ * edma_err_stats_alloc - Allocate stats memory
+ *
+ * Allocate memory for per-CPU error stats.
+ */
+int edma_err_stats_alloc(void)
+{
+ u32 i;
+
+ edma_ctx->err_stats = alloc_percpu(*edma_ctx->err_stats);
+ if (!edma_ctx->err_stats)
+ return -ENOMEM;
+
+ for_each_possible_cpu(i) {
+ struct edma_err_stats *stats;
+
+ stats = per_cpu_ptr(edma_ctx->err_stats, i);
+ u64_stats_init(&stats->syncp);
+ }
+
+ return 0;
+}
+
+/**
+ * edma_err_stats_free - Free stats memory
+ *
+ * Free memory of per-CPU error stats.
+ */
+void edma_err_stats_free(void)
+{
+ if (edma_ctx->err_stats) {
+ free_percpu(edma_ctx->err_stats);
+ edma_ctx->err_stats = NULL;
+ }
+}
+
+/**
* edma_configure_ucast_prio_map_tbl - Configure unicast priority map table.
*
* Map int_priority values to priority class and initialize
@@ -191,11 +227,113 @@ static int edma_configure_ucast_prio_map
return ret;
}
+static void edma_disable_misc_interrupt(void)
+{
+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
+ struct regmap *regmap = ppe_dev->regmap;
+ u32 reg;
+
+ reg = EDMA_BASE_OFFSET + EDMA_REG_MISC_INT_MASK_ADDR;
+ regmap_write(regmap, reg, EDMA_MASK_INT_CLEAR);
+}
+
+static void edma_enable_misc_interrupt(void)
+{
+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
+ struct regmap *regmap = ppe_dev->regmap;
+ u32 reg;
+
+ reg = EDMA_BASE_OFFSET + EDMA_REG_MISC_INT_MASK_ADDR;
+ regmap_write(regmap, reg, edma_ctx->intr_info.intr_mask_misc);
+}
+
+static irqreturn_t edma_misc_handle_irq(int irq,
+ __maybe_unused void *ctx)
+{
+ struct edma_err_stats *stats = this_cpu_ptr(edma_ctx->err_stats);
+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
+ struct regmap *regmap = ppe_dev->regmap;
+ u32 misc_intr_status, data, reg;
+
+ /* Read Misc intr status */
+ reg = EDMA_BASE_OFFSET + EDMA_REG_MISC_INT_STAT_ADDR;
+ regmap_read(regmap, reg, &data);
+ misc_intr_status = data & edma_ctx->intr_info.intr_mask_misc;
+
+ pr_debug("Received misc irq %d, status: %d\n", irq, misc_intr_status);
+
+ if (FIELD_GET(EDMA_MISC_AXI_RD_ERR_MASK, misc_intr_status)) {
+ pr_err("MISC AXI read error received\n");
+ u64_stats_update_begin(&stats->syncp);
+ ++stats->edma_axi_read_err;
+ u64_stats_update_end(&stats->syncp);
+ }
+
+ if (FIELD_GET(EDMA_MISC_AXI_WR_ERR_MASK, misc_intr_status)) {
+ pr_err("MISC AXI write error received\n");
+ u64_stats_update_begin(&stats->syncp);
+ ++stats->edma_axi_write_err;
+ u64_stats_update_end(&stats->syncp);
+ }
+
+ if (FIELD_GET(EDMA_MISC_RX_DESC_FIFO_FULL_MASK, misc_intr_status)) {
+ if (net_ratelimit())
+ pr_err("MISC Rx descriptor fifo full error received\n");
+ u64_stats_update_begin(&stats->syncp);
+ ++stats->edma_rxdesc_fifo_full;
+ u64_stats_update_end(&stats->syncp);
+ }
+
+ if (FIELD_GET(EDMA_MISC_RX_ERR_BUF_SIZE_MASK, misc_intr_status)) {
+ if (net_ratelimit())
+ pr_err("MISC Rx buffer size error received\n");
+ u64_stats_update_begin(&stats->syncp);
+ ++stats->edma_rx_buf_size_err;
+ u64_stats_update_end(&stats->syncp);
+ }
+
+ if (FIELD_GET(EDMA_MISC_TX_SRAM_FULL_MASK, misc_intr_status)) {
+ if (net_ratelimit())
+ pr_err("MISC Tx SRAM full error received\n");
+ u64_stats_update_begin(&stats->syncp);
+ ++stats->edma_tx_sram_full;
+ u64_stats_update_end(&stats->syncp);
+ }
+
+ if (FIELD_GET(EDMA_MISC_TX_CMPL_BUF_FULL_MASK, misc_intr_status)) {
+ if (net_ratelimit())
+ pr_err("MISC Tx complete buffer full error received\n");
+ u64_stats_update_begin(&stats->syncp);
+ ++stats->edma_txcmpl_buf_full;
+ u64_stats_update_end(&stats->syncp);
+ }
+
+ if (FIELD_GET(EDMA_MISC_DATA_LEN_ERR_MASK, misc_intr_status)) {
+ if (net_ratelimit())
+ pr_err("MISC data length error received\n");
+ u64_stats_update_begin(&stats->syncp);
+ ++stats->edma_tx_data_len_err;
+ u64_stats_update_end(&stats->syncp);
+ }
+
+ if (FIELD_GET(EDMA_MISC_TX_TIMEOUT_MASK, misc_intr_status)) {
+ if (net_ratelimit())
+ pr_err("MISC Tx timeout error received\n");
+ u64_stats_update_begin(&stats->syncp);
+ ++stats->edma_tx_timeout;
+ u64_stats_update_end(&stats->syncp);
+ }
+
+ return IRQ_HANDLED;
+}
+
static int edma_irq_register(void)
{
struct edma_hw_info *hw_info = edma_ctx->hw_info;
struct edma_ring_info *txcmpl = hw_info->txcmpl;
+ struct ppe_device *ppe_dev = edma_ctx->ppe_dev;
struct edma_ring_info *rx = hw_info->rx;
+ struct device *dev = ppe_dev->dev;
int ret;
u32 i;
@@ -270,8 +408,25 @@ static int edma_irq_register(void)
edma_rxdesc_irq_name[i]);
}
+ /* Request Misc IRQ */
+ ret = request_irq(edma_ctx->intr_info.intr_misc, edma_misc_handle_irq,
+ IRQF_SHARED, "edma_misc",
+ (void *)dev);
+ if (ret) {
+ pr_err("MISC IRQ:%d request failed\n",
+ edma_ctx->intr_info.intr_misc);
+ goto misc_intr_req_fail;
+ }
+
return 0;
+misc_intr_req_fail:
+ /* Free IRQ for RXDESC rings */
+ for (i = 0; i < rx->num_rings; i++) {
+ synchronize_irq(edma_ctx->intr_info.intr_rx[i]);
+ free_irq(edma_ctx->intr_info.intr_rx[i],
+ (void *)&edma_ctx->rx_rings[i]);
+ }
rx_desc_ring_intr_req_fail:
for (i = 0; i < rx->num_rings; i++)
kfree(edma_rxdesc_irq_name[i]);
@@ -503,6 +658,7 @@ static int edma_hw_configure(void)
edma_cfg_tx_disable_interrupts(i);
edma_cfg_rx_disable_interrupts();
+ edma_disable_misc_interrupt();
edma_cfg_rx_rings_disable();
@@ -614,6 +770,7 @@ void edma_destroy(struct ppe_device *ppe
edma_cfg_tx_disable_interrupts(i);
edma_cfg_rx_disable_interrupts();
+ edma_disable_misc_interrupt();
/* Free IRQ for TXCMPL rings. */
for (i = 0; i < txcmpl->num_rings; i++) {
@@ -634,6 +791,10 @@ void edma_destroy(struct ppe_device *ppe
}
kfree(edma_rxdesc_irq_name);
+ /* Free Misc IRQ */
+ synchronize_irq(edma_ctx->intr_info.intr_misc);
+ free_irq(edma_ctx->intr_info.intr_misc, (void *)(ppe_dev->dev));
+
kfree(edma_ctx->intr_info.intr_rx);
kfree(edma_ctx->intr_info.intr_txcmpl);
@@ -699,6 +860,7 @@ int edma_setup(struct ppe_device *ppe_de
}
edma_cfg_rx_enable_interrupts();
+ edma_enable_misc_interrupt();
dev_info(dev, "EDMA configuration successful\n");
--- a/drivers/net/ethernet/qualcomm/ppe/edma.h
+++ b/drivers/net/ethernet/qualcomm/ppe/edma.h
@@ -47,6 +47,30 @@ enum ppe_queue_class_type {
};
/**
+ * struct edma_err_stats - EDMA error stats
+ * @edma_axi_read_err: AXI read error
+ * @edma_axi_write_err: AXI write error
+ * @edma_rxdesc_fifo_full: Rx desc FIFO full error
+ * @edma_rx_buf_size_err: Rx buffer size too small error
+ * @edma_tx_sram_full: Tx packet SRAM buffer full error
+ * @edma_tx_data_len_err: Tx data length error
+ * @edma_tx_timeout: Tx timeout error
+ * @edma_txcmpl_buf_full: Tx completion buffer full error
+ * @syncp: Synchronization pointer
+ */
+struct edma_err_stats {
+ u64 edma_axi_read_err;
+ u64 edma_axi_write_err;
+ u64 edma_rxdesc_fifo_full;
+ u64 edma_rx_buf_size_err;
+ u64 edma_tx_sram_full;
+ u64 edma_tx_data_len_err;
+ u64 edma_tx_timeout;
+ u64 edma_txcmpl_buf_full;
+ struct u64_stats_sync syncp;
+};
+
+/**
* struct edma_ring_info - EDMA ring data structure.
* @max_rings: Maximum number of rings
* @ring_start: Ring start ID
@@ -107,6 +131,7 @@ struct edma_intr_info {
* @rx_rings: Rx Desc Rings, SW is consumer
* @tx_rings: Tx Descriptor Ring, SW is producer
* @txcmpl_rings: Tx complete Ring, SW is consumer
+ * @err_stats: Per CPU error statistics
* @rx_page_mode: Page mode enabled or disabled
* @rx_buf_size: Rx buffer size for Jumbo MRU
* @tx_requeue_stop: Tx requeue stop enabled or disabled
@@ -121,6 +146,7 @@ struct edma_context {
struct edma_rxdesc_ring *rx_rings;
struct edma_txdesc_ring *tx_rings;
struct edma_txcmpl_ring *txcmpl_rings;
+ struct edma_err_stats __percpu *err_stats;
u32 rx_page_mode;
u32 rx_buf_size;
bool tx_requeue_stop;
@@ -129,8 +155,12 @@ struct edma_context {
/* Global EDMA context */
extern struct edma_context *edma_ctx;
+int edma_err_stats_alloc(void);
+void edma_err_stats_free(void);
void edma_destroy(struct ppe_device *ppe_dev);
int edma_setup(struct ppe_device *ppe_dev);
+void edma_debugfs_teardown(void);
+int edma_debugfs_setup(struct ppe_device *ppe_dev);
int ppe_edma_queue_offset_config(struct ppe_device *ppe_dev,
enum ppe_queue_class_type class,
int index, int queue_offset);
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/ppe/edma_debugfs.c
@@ -0,0 +1,370 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/* EDMA debugfs routines for display of Tx/Rx counters. */
+
+#include <linux/cpumask.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/printk.h>
+
+#include "edma.h"
+
+#define EDMA_STATS_BANNER_MAX_LEN 80
+#define EDMA_RX_RING_STATS_NODE_NAME "EDMA_RX"
+#define EDMA_TX_RING_STATS_NODE_NAME "EDMA_TX"
+#define EDMA_ERR_STATS_NODE_NAME "EDMA_ERR"
+
+static struct dentry *edma_dentry;
+static struct dentry *stats_dentry;
+
+static void edma_debugfs_print_banner(struct seq_file *m, char *node)
+{
+ u32 banner_char_len, i;
+
+ for (i = 0; i < EDMA_STATS_BANNER_MAX_LEN; i++)
+ seq_puts(m, "_");
+ banner_char_len = (EDMA_STATS_BANNER_MAX_LEN - (strlen(node) + 2)) / 2;
+ seq_puts(m, "\n\n");
+
+ for (i = 0; i < banner_char_len; i++)
+ seq_puts(m, "<");
+ seq_printf(m, " %s ", node);
+
+ for (i = 0; i < banner_char_len; i++)
+ seq_puts(m, ">");
+ seq_puts(m, "\n");
+
+ for (i = 0; i < EDMA_STATS_BANNER_MAX_LEN; i++)
+ seq_puts(m, "_");
+ seq_puts(m, "\n\n");
+}
+
+static int edma_debugfs_rx_rings_stats_show(struct seq_file *m,
+ void __maybe_unused *p)
+{
+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
+ struct edma_ring_info *rxfill = hw_info->rxfill;
+ struct edma_rxfill_stats *rxfill_stats;
+ struct edma_rxdesc_stats *rxdesc_stats;
+ struct edma_ring_info *rx = hw_info->rx;
+ unsigned int start;
+ u32 i;
+
+ rxfill_stats = kcalloc(rxfill->num_rings, sizeof(*rxfill_stats), GFP_KERNEL);
+ if (!rxfill_stats)
+ return -ENOMEM;
+
+ rxdesc_stats = kcalloc(rx->num_rings, sizeof(*rxdesc_stats), GFP_KERNEL);
+ if (!rxdesc_stats) {
+ kfree(rxfill_stats);
+ return -ENOMEM;
+ }
+
+ /* Get stats for Rx fill rings. */
+ for (i = 0; i < rxfill->num_rings; i++) {
+ struct edma_rxfill_ring *rxfill_ring;
+ struct edma_rxfill_stats *stats;
+
+ rxfill_ring = &edma_ctx->rxfill_rings[i];
+ stats = &rxfill_ring->rxfill_stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ rxfill_stats[i].alloc_failed = stats->alloc_failed;
+ rxfill_stats[i].page_alloc_failed = stats->page_alloc_failed;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+ }
+
+ /* Get stats for Rx Desc rings. */
+ for (i = 0; i < rx->num_rings; i++) {
+ struct edma_rxdesc_ring *rxdesc_ring;
+ struct edma_rxdesc_stats *stats;
+
+ rxdesc_ring = &edma_ctx->rx_rings[i];
+ stats = &rxdesc_ring->rxdesc_stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ rxdesc_stats[i].src_port_inval = stats->src_port_inval;
+ rxdesc_stats[i].src_port_inval_type = stats->src_port_inval_type;
+ rxdesc_stats[i].src_port_inval_netdev = stats->src_port_inval_netdev;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+ }
+
+ edma_debugfs_print_banner(m, EDMA_RX_RING_STATS_NODE_NAME);
+
+ seq_puts(m, "\n#EDMA RX descriptor rings stats:\n\n");
+ for (i = 0; i < rx->num_rings; i++) {
+ seq_printf(m, "\t\tEDMA RX descriptor %d ring stats:\n", i + rx->ring_start);
+ seq_printf(m, "\t\t rxdesc[%d]:src_port_inval = %llu\n",
+ i + rx->ring_start, rxdesc_stats[i].src_port_inval);
+ seq_printf(m, "\t\t rxdesc[%d]:src_port_inval_type = %llu\n",
+ i + rx->ring_start, rxdesc_stats[i].src_port_inval_type);
+ seq_printf(m, "\t\t rxdesc[%d]:src_port_inval_netdev = %llu\n",
+ i + rx->ring_start,
+ rxdesc_stats[i].src_port_inval_netdev);
+ seq_puts(m, "\n");
+ }
+
+ seq_puts(m, "\n#EDMA RX fill rings stats:\n\n");
+ for (i = 0; i < rxfill->num_rings; i++) {
+ seq_printf(m, "\t\tEDMA RX fill %d ring stats:\n", i + rxfill->ring_start);
+ seq_printf(m, "\t\t rxfill[%d]:alloc_failed = %llu\n",
+ i + rxfill->ring_start, rxfill_stats[i].alloc_failed);
+ seq_printf(m, "\t\t rxfill[%d]:page_alloc_failed = %llu\n",
+ i + rxfill->ring_start, rxfill_stats[i].page_alloc_failed);
+ seq_puts(m, "\n");
+ }
+
+ kfree(rxfill_stats);
+ kfree(rxdesc_stats);
+ return 0;
+}
+
+static int edma_debugfs_tx_rings_stats_show(struct seq_file *m,
+ void __maybe_unused *p)
+{
+ struct edma_hw_info *hw_info = edma_ctx->hw_info;
+ struct edma_ring_info *txcmpl = hw_info->txcmpl;
+ struct edma_ring_info *tx = hw_info->tx;
+ struct edma_txcmpl_stats *txcmpl_stats;
+ struct edma_txdesc_stats *txdesc_stats;
+ unsigned int start;
+ u32 i;
+
+ txcmpl_stats = kcalloc(txcmpl->num_rings, sizeof(*txcmpl_stats), GFP_KERNEL);
+ if (!txcmpl_stats)
+ return -ENOMEM;
+
+ txdesc_stats = kcalloc(tx->num_rings, sizeof(*txdesc_stats), GFP_KERNEL);
+ if (!txdesc_stats) {
+ kfree(txcmpl_stats);
+ return -ENOMEM;
+ }
+
+ /* Get stats for Tx desc rings. */
+ for (i = 0; i < tx->num_rings; i++) {
+ struct edma_txdesc_ring *txdesc_ring;
+ struct edma_txdesc_stats *stats;
+
+ txdesc_ring = &edma_ctx->tx_rings[i];
+ stats = &txdesc_ring->txdesc_stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ txdesc_stats[i].no_desc_avail = stats->no_desc_avail;
+ txdesc_stats[i].tso_max_seg_exceed = stats->tso_max_seg_exceed;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+ }
+
+ /* Get stats for Tx Complete rings. */
+ for (i = 0; i < txcmpl->num_rings; i++) {
+ struct edma_txcmpl_ring *txcmpl_ring;
+ struct edma_txcmpl_stats *stats;
+
+ txcmpl_ring = &edma_ctx->txcmpl_rings[i];
+ stats = &txcmpl_ring->txcmpl_stats;
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ txcmpl_stats[i].invalid_buffer = stats->invalid_buffer;
+ txcmpl_stats[i].errors = stats->errors;
+ txcmpl_stats[i].desc_with_more_bit = stats->desc_with_more_bit;
+ txcmpl_stats[i].no_pending_desc = stats->no_pending_desc;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+ }
+
+ edma_debugfs_print_banner(m, EDMA_TX_RING_STATS_NODE_NAME);
+
+ seq_puts(m, "\n#EDMA TX complete rings stats:\n\n");
+ for (i = 0; i < txcmpl->num_rings; i++) {
+ seq_printf(m, "\t\tEDMA TX complete %d ring stats:\n", i + txcmpl->ring_start);
+ seq_printf(m, "\t\t txcmpl[%d]:invalid_buffer = %llu\n",
+ i + txcmpl->ring_start, txcmpl_stats[i].invalid_buffer);
+ seq_printf(m, "\t\t txcmpl[%d]:errors = %llu\n",
+ i + txcmpl->ring_start, txcmpl_stats[i].errors);
+ seq_printf(m, "\t\t txcmpl[%d]:desc_with_more_bit = %llu\n",
+ i + txcmpl->ring_start, txcmpl_stats[i].desc_with_more_bit);
+ seq_printf(m, "\t\t txcmpl[%d]:no_pending_desc = %llu\n",
+ i + txcmpl->ring_start, txcmpl_stats[i].no_pending_desc);
+ seq_puts(m, "\n");
+ }
+
+ seq_puts(m, "\n#EDMA TX descriptor rings stats:\n\n");
+ for (i = 0; i < tx->num_rings; i++) {
+ seq_printf(m, "\t\tEDMA TX descriptor %d ring stats:\n", i + tx->ring_start);
+ seq_printf(m, "\t\t txdesc[%d]:no_desc_avail = %llu\n",
+ i + tx->ring_start, txdesc_stats[i].no_desc_avail);
+ seq_printf(m, "\t\t txdesc[%d]:tso_max_seg_exceed = %llu\n",
+ i + tx->ring_start, txdesc_stats[i].tso_max_seg_exceed);
+ seq_puts(m, "\n");
+ }
+
+ kfree(txcmpl_stats);
+ kfree(txdesc_stats);
+ return 0;
+}
+
+static int edma_debugfs_err_stats_show(struct seq_file *m,
+ void __maybe_unused *p)
+{
+ struct edma_err_stats *err_stats, *pcpu_err_stats;
+ unsigned int start;
+ u32 cpu;
+
+ err_stats = kzalloc(sizeof(*err_stats), GFP_KERNEL);
+ if (!err_stats)
+ return -ENOMEM;
+
+ /* Get percpu EDMA miscellaneous stats. */
+ for_each_possible_cpu(cpu) {
+ pcpu_err_stats = per_cpu_ptr(edma_ctx->err_stats, cpu);
+ do {
+ start = u64_stats_fetch_begin(&pcpu_err_stats->syncp);
+ err_stats->edma_axi_read_err +=
+ pcpu_err_stats->edma_axi_read_err;
+ err_stats->edma_axi_write_err +=
+ pcpu_err_stats->edma_axi_write_err;
+ err_stats->edma_rxdesc_fifo_full +=
+ pcpu_err_stats->edma_rxdesc_fifo_full;
+ err_stats->edma_rx_buf_size_err +=
+ pcpu_err_stats->edma_rx_buf_size_err;
+ err_stats->edma_tx_sram_full +=
+ pcpu_err_stats->edma_tx_sram_full;
+ err_stats->edma_tx_data_len_err +=
+ pcpu_err_stats->edma_tx_data_len_err;
+ err_stats->edma_tx_timeout +=
+ pcpu_err_stats->edma_tx_timeout;
+ err_stats->edma_txcmpl_buf_full +=
+ pcpu_err_stats->edma_txcmpl_buf_full;
+ } while (u64_stats_fetch_retry(&pcpu_err_stats->syncp, start));
+ }
+
+ edma_debugfs_print_banner(m, EDMA_ERR_STATS_NODE_NAME);
+
+ seq_puts(m, "\n#EDMA error stats:\n\n");
+ seq_printf(m, "\t\t axi read error = %llu\n",
+ err_stats->edma_axi_read_err);
+ seq_printf(m, "\t\t axi write error = %llu\n",
+ err_stats->edma_axi_write_err);
+ seq_printf(m, "\t\t Rx descriptor fifo full = %llu\n",
+ err_stats->edma_rxdesc_fifo_full);
+ seq_printf(m, "\t\t Rx buffer size error = %llu\n",
+ err_stats->edma_rx_buf_size_err);
+ seq_printf(m, "\t\t Tx SRAM full = %llu\n",
+ err_stats->edma_tx_sram_full);
+ seq_printf(m, "\t\t Tx data length error = %llu\n",
+ err_stats->edma_tx_data_len_err);
+ seq_printf(m, "\t\t Tx timeout = %llu\n",
+ err_stats->edma_tx_timeout);
+ seq_printf(m, "\t\t Tx completion buffer full = %llu\n",
+ err_stats->edma_txcmpl_buf_full);
+
+ kfree(err_stats);
+ return 0;
+}
+
+static int edma_debugs_rx_rings_stats_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, edma_debugfs_rx_rings_stats_show,
+ inode->i_private);
+}
+
+static const struct file_operations edma_debugfs_rx_rings_file_ops = {
+ .open = edma_debugs_rx_rings_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release
+};
+
+static int edma_debugs_tx_rings_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, edma_debugfs_tx_rings_stats_show, inode->i_private);
+}
+
+static const struct file_operations edma_debugfs_tx_rings_file_ops = {
+ .open = edma_debugs_tx_rings_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release
+};
+
+static int edma_debugs_err_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, edma_debugfs_err_stats_show, inode->i_private);
+}
+
+static const struct file_operations edma_debugfs_misc_file_ops = {
+ .open = edma_debugs_err_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release
+};
+
+/**
+ * edma_debugfs_teardown - EDMA debugfs teardown.
+ *
+ * EDMA debugfs teardown and free stats memory.
+ */
+void edma_debugfs_teardown(void)
+{
+ /* Free EDMA miscellaneous stats memory */
+ edma_err_stats_free();
+
+ debugfs_remove_recursive(edma_dentry);
+ edma_dentry = NULL;
+ stats_dentry = NULL;
+}
+
+/**
+ * edma_debugfs_setup - EDMA debugfs setup.
+ * @ppe_dev: PPE Device
+ *
+ * EDMA debugfs setup.
+ */
+int edma_debugfs_setup(struct ppe_device *ppe_dev)
+{
+ edma_dentry = debugfs_create_dir("edma", ppe_dev->debugfs_root);
+ if (!edma_dentry) {
+ pr_err("Unable to create debugfs edma directory in debugfs\n");
+ goto debugfs_dir_failed;
+ }
+
+ stats_dentry = debugfs_create_dir("stats", edma_dentry);
+ if (!stats_dentry) {
+ pr_err("Unable to create debugfs stats directory in debugfs\n");
+ goto debugfs_dir_failed;
+ }
+
+ if (!debugfs_create_file("rx_ring_stats", 0444, stats_dentry,
+ NULL, &edma_debugfs_rx_rings_file_ops)) {
+ pr_err("Unable to create Rx rings statistics file entry in debugfs\n");
+ goto debugfs_dir_failed;
+ }
+
+ if (!debugfs_create_file("tx_ring_stats", 0444, stats_dentry,
+ NULL, &edma_debugfs_tx_rings_file_ops)) {
+ pr_err("Unable to create Tx rings statistics file entry in debugfs\n");
+ goto debugfs_dir_failed;
+ }
+
+ /* Allocate memory for EDMA miscellaneous stats */
+ if (edma_err_stats_alloc() < 0) {
+ pr_err("Unable to allocate miscellaneous percpu stats\n");
+ goto debugfs_dir_failed;
+ }
+
+ if (!debugfs_create_file("err_stats", 0444, stats_dentry,
+ NULL, &edma_debugfs_misc_file_ops)) {
+ pr_err("Unable to create EDMA miscellaneous statistics file entry in debugfs\n");
+ goto debugfs_dir_failed;
+ }
+
+ return 0;
+
+debugfs_dir_failed:
+ debugfs_remove_recursive(edma_dentry);
+ edma_dentry = NULL;
+ stats_dentry = NULL;
+ return -ENOMEM;
+}
--- a/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c
+++ b/drivers/net/ethernet/qualcomm/ppe/ppe_debugfs.c
@@ -7,9 +7,11 @@
#include <linux/bitfield.h>
#include <linux/debugfs.h>
+#include <linux/netdevice.h>
#include <linux/regmap.h>
#include <linux/seq_file.h>
+#include "edma.h"
#include "ppe.h"
#include "ppe_config.h"
#include "ppe_debugfs.h"
@@ -678,15 +680,30 @@ static const struct file_operations ppe_
void ppe_debugfs_setup(struct ppe_device *ppe_dev)
{
+ int ret;
+
ppe_dev->debugfs_root = debugfs_create_dir("ppe", NULL);
debugfs_create_file("packet_counters", 0444,
ppe_dev->debugfs_root,
ppe_dev,
&ppe_debugfs_packet_counter_fops);
+
+ if (!ppe_dev->debugfs_root) {
+ dev_err(ppe_dev->dev, "Error in PPE debugfs setup\n");
+ return;
+ }
+
+ ret = edma_debugfs_setup(ppe_dev);
+ if (ret) {
+ dev_err(ppe_dev->dev, "Error in EDMA debugfs setup API. ret: %d\n", ret);
+ debugfs_remove_recursive(ppe_dev->debugfs_root);
+ ppe_dev->debugfs_root = NULL;
+ }
}
void ppe_debugfs_teardown(struct ppe_device *ppe_dev)
{
+ edma_debugfs_teardown();
debugfs_remove_recursive(ppe_dev->debugfs_root);
ppe_dev->debugfs_root = NULL;
}