a5c095c453
Our downstream patch "net/core: add optional threading for backlog processing" has been broken with the switch to Linux 6.6. Replace it by backporting the now available upstream solution. Signed-off-by: Daniel Golle <daniel@makrotopia.org> Link: https://github.com/openwrt/openwrt/pull/15592 Signed-off-by: Christian Marangi <ansuelsmth@gmail.com>
165 lines
5.5 KiB
Diff
165 lines
5.5 KiB
Diff
From 765b11f8f4e20b7433e4ba4a3e9106a0d59501ed Mon Sep 17 00:00:00 2001
|
|
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
Date: Mon, 25 Mar 2024 08:40:31 +0100
|
|
Subject: [PATCH 4/4] net: Rename rps_lock to backlog_lock.
|
|
|
|
The rps_lock.*() functions use the inner lock of a sk_buff_head for
|
|
locking. This lock is used if RPS is enabled, otherwise the list is
|
|
accessed lockless and disabling interrupts is enough for the
|
|
synchronisation because it is only accessed CPU local. Not only the list
|
|
is protected but also the NAPI state protected.
|
|
With the addition of backlog threads, the lock is also needed because of
|
|
the cross CPU access even without RPS. The clean up of the defer_list
|
|
list is also done via backlog threads (if enabled).
|
|
|
|
It has been suggested to rename the locking function since it is no
|
|
longer just RPS.
|
|
|
|
Rename the rps_lock*() functions to backlog_lock*().
|
|
|
|
Suggested-by: Jakub Kicinski <kuba@kernel.org>
|
|
Acked-by: Jakub Kicinski <kuba@kernel.org>
|
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
|
|
---
|
|
net/core/dev.c | 34 +++++++++++++++++-----------------
|
|
1 file changed, 17 insertions(+), 17 deletions(-)
|
|
|
|
--- a/net/core/dev.c
|
|
+++ b/net/core/dev.c
|
|
@@ -243,8 +243,8 @@ static bool use_backlog_threads(void)
|
|
|
|
#endif
|
|
|
|
-static inline void rps_lock_irqsave(struct softnet_data *sd,
|
|
- unsigned long *flags)
|
|
+static inline void backlog_lock_irq_save(struct softnet_data *sd,
|
|
+ unsigned long *flags)
|
|
{
|
|
if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
|
|
spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags);
|
|
@@ -252,7 +252,7 @@ static inline void rps_lock_irqsave(stru
|
|
local_irq_save(*flags);
|
|
}
|
|
|
|
-static inline void rps_lock_irq_disable(struct softnet_data *sd)
|
|
+static inline void backlog_lock_irq_disable(struct softnet_data *sd)
|
|
{
|
|
if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
|
|
spin_lock_irq(&sd->input_pkt_queue.lock);
|
|
@@ -260,8 +260,8 @@ static inline void rps_lock_irq_disable(
|
|
local_irq_disable();
|
|
}
|
|
|
|
-static inline void rps_unlock_irq_restore(struct softnet_data *sd,
|
|
- unsigned long *flags)
|
|
+static inline void backlog_unlock_irq_restore(struct softnet_data *sd,
|
|
+ unsigned long *flags)
|
|
{
|
|
if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
|
|
spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags);
|
|
@@ -269,7 +269,7 @@ static inline void rps_unlock_irq_restor
|
|
local_irq_restore(*flags);
|
|
}
|
|
|
|
-static inline void rps_unlock_irq_enable(struct softnet_data *sd)
|
|
+static inline void backlog_unlock_irq_enable(struct softnet_data *sd)
|
|
{
|
|
if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads())
|
|
spin_unlock_irq(&sd->input_pkt_queue.lock);
|
|
@@ -4779,12 +4779,12 @@ void kick_defer_list_purge(struct softne
|
|
unsigned long flags;
|
|
|
|
if (use_backlog_threads()) {
|
|
- rps_lock_irqsave(sd, &flags);
|
|
+ backlog_lock_irq_save(sd, &flags);
|
|
|
|
if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state))
|
|
__napi_schedule_irqoff(&sd->backlog);
|
|
|
|
- rps_unlock_irq_restore(sd, &flags);
|
|
+ backlog_unlock_irq_restore(sd, &flags);
|
|
|
|
} else if (!cmpxchg(&sd->defer_ipi_scheduled, 0, 1)) {
|
|
smp_call_function_single_async(cpu, &sd->defer_csd);
|
|
@@ -4846,7 +4846,7 @@ static int enqueue_to_backlog(struct sk_
|
|
reason = SKB_DROP_REASON_NOT_SPECIFIED;
|
|
sd = &per_cpu(softnet_data, cpu);
|
|
|
|
- rps_lock_irqsave(sd, &flags);
|
|
+ backlog_lock_irq_save(sd, &flags);
|
|
if (!netif_running(skb->dev))
|
|
goto drop;
|
|
qlen = skb_queue_len(&sd->input_pkt_queue);
|
|
@@ -4855,7 +4855,7 @@ static int enqueue_to_backlog(struct sk_
|
|
enqueue:
|
|
__skb_queue_tail(&sd->input_pkt_queue, skb);
|
|
input_queue_tail_incr_save(sd, qtail);
|
|
- rps_unlock_irq_restore(sd, &flags);
|
|
+ backlog_unlock_irq_restore(sd, &flags);
|
|
return NET_RX_SUCCESS;
|
|
}
|
|
|
|
@@ -4870,7 +4870,7 @@ enqueue:
|
|
|
|
drop:
|
|
sd->dropped++;
|
|
- rps_unlock_irq_restore(sd, &flags);
|
|
+ backlog_unlock_irq_restore(sd, &flags);
|
|
|
|
dev_core_stats_rx_dropped_inc(skb->dev);
|
|
kfree_skb_reason(skb, reason);
|
|
@@ -5901,7 +5901,7 @@ static void flush_backlog(struct work_st
|
|
local_bh_disable();
|
|
sd = this_cpu_ptr(&softnet_data);
|
|
|
|
- rps_lock_irq_disable(sd);
|
|
+ backlog_lock_irq_disable(sd);
|
|
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
|
|
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
|
|
__skb_unlink(skb, &sd->input_pkt_queue);
|
|
@@ -5909,7 +5909,7 @@ static void flush_backlog(struct work_st
|
|
input_queue_head_incr(sd);
|
|
}
|
|
}
|
|
- rps_unlock_irq_enable(sd);
|
|
+ backlog_unlock_irq_enable(sd);
|
|
|
|
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
|
|
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
|
|
@@ -5927,14 +5927,14 @@ static bool flush_required(int cpu)
|
|
struct softnet_data *sd = &per_cpu(softnet_data, cpu);
|
|
bool do_flush;
|
|
|
|
- rps_lock_irq_disable(sd);
|
|
+ backlog_lock_irq_disable(sd);
|
|
|
|
/* as insertion into process_queue happens with the rps lock held,
|
|
* process_queue access may race only with dequeue
|
|
*/
|
|
do_flush = !skb_queue_empty(&sd->input_pkt_queue) ||
|
|
!skb_queue_empty_lockless(&sd->process_queue);
|
|
- rps_unlock_irq_enable(sd);
|
|
+ backlog_unlock_irq_enable(sd);
|
|
|
|
return do_flush;
|
|
#endif
|
|
@@ -6049,7 +6049,7 @@ static int process_backlog(struct napi_s
|
|
|
|
}
|
|
|
|
- rps_lock_irq_disable(sd);
|
|
+ backlog_lock_irq_disable(sd);
|
|
if (skb_queue_empty(&sd->input_pkt_queue)) {
|
|
/*
|
|
* Inline a custom version of __napi_complete().
|
|
@@ -6065,7 +6065,7 @@ static int process_backlog(struct napi_s
|
|
skb_queue_splice_tail_init(&sd->input_pkt_queue,
|
|
&sd->process_queue);
|
|
}
|
|
- rps_unlock_irq_enable(sd);
|
|
+ backlog_unlock_irq_enable(sd);
|
|
}
|
|
|
|
return work;
|