forked from Openwrt/openwrt
fa79baf4a6
Copy backport, hack, pending patch and config from 5.15 to 6.1. Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
53 lines
1.7 KiB
Diff
53 lines
1.7 KiB
Diff
From: Lorenzo Bianconi <lorenzo@kernel.org>
|
|
Date: Tue, 10 Jan 2023 10:31:26 +0100
|
|
Subject: [PATCH] net: ethernet: mtk_wed: get rid of queue lock for rx queue
|
|
|
|
Queue spinlock is currently held in mtk_wed_wo_queue_rx_clean and
|
|
mtk_wed_wo_queue_refill routines for MTK Wireless Ethernet Dispatcher
|
|
MCU rx queue. mtk_wed_wo_queue_refill() is running during initialization
|
|
and in rx tasklet while mtk_wed_wo_queue_rx_clean() is running in
|
|
mtk_wed_wo_hw_deinit() during hw de-init phase after rx tasklet has been
|
|
disabled. Since mtk_wed_wo_queue_rx_clean and mtk_wed_wo_queue_refill
|
|
routines can't run concurrently get rid of spinlock for mcu rx queue.
|
|
|
|
Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
|
|
Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
|
|
Link: https://lore.kernel.org/r/36ec3b729542ea60898471d890796f745479ba32.1673342990.git.lorenzo@kernel.org
|
|
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
|
|
---
|
|
|
|
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.c
|
|
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
|
|
@@ -138,7 +138,6 @@ mtk_wed_wo_queue_refill(struct mtk_wed_w
|
|
enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
|
|
int n_buf = 0;
|
|
|
|
- spin_lock_bh(&q->lock);
|
|
while (q->queued < q->n_desc) {
|
|
struct mtk_wed_wo_queue_entry *entry;
|
|
dma_addr_t addr;
|
|
@@ -172,7 +171,6 @@ mtk_wed_wo_queue_refill(struct mtk_wed_w
|
|
q->queued++;
|
|
n_buf++;
|
|
}
|
|
- spin_unlock_bh(&q->lock);
|
|
|
|
return n_buf;
|
|
}
|
|
@@ -316,7 +314,6 @@ mtk_wed_wo_queue_rx_clean(struct mtk_wed
|
|
{
|
|
struct page *page;
|
|
|
|
- spin_lock_bh(&q->lock);
|
|
for (;;) {
|
|
void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
|
|
|
|
@@ -325,7 +322,6 @@ mtk_wed_wo_queue_rx_clean(struct mtk_wed
|
|
|
|
skb_free_frag(buf);
|
|
}
|
|
- spin_unlock_bh(&q->lock);
|
|
|
|
if (!q->cache.va)
|
|
return;
|