154 lines
4.4 KiB
Diff
154 lines
4.4 KiB
Diff
|
From 3dc115ac6d93561730456d579c53d24a6e4e9804 Mon Sep 17 00:00:00 2001
|
||
|
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||
|
Date: Wed, 21 Feb 2018 10:39:54 +0100
|
||
|
Subject: [PATCH 355/365] net: use task_struct instead of CPU number as the
|
||
|
queue owner on -RT
|
||
|
|
||
|
In commit ("net: move xmit_recursion to per-task variable on -RT") the
|
||
|
recursion level was changed to be per-task since we can get preempted in
|
||
|
BH on -RT. The lock owner should consequently be recorded as the task
|
||
|
that holds the lock and not the CPU. Otherwise we trigger the "Dead loop
|
||
|
on virtual device" warning on SMP systems.
|
||
|
|
||
|
Cc: stable-rt@vger.kernel.org
|
||
|
Reported-by: Kurt Kanzenbach <kurt.kanzenbach@linutronix.de>
|
||
|
Tested-by: Kurt Kanzenbach <kurt.kanzenbach@linutronix.de>
|
||
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||
|
(cherry picked from commit d3a66ffd1c4f0253076069b10a8223e7b6e80e38)
|
||
|
Signed-off-by: Julia Cartwright <julia@ni.com>
|
||
|
---
|
||
|
include/linux/netdevice.h | 54 ++++++++++++++++++++++++++++++++++-----
|
||
|
net/core/dev.c | 6 ++++-
|
||
|
2 files changed, 53 insertions(+), 7 deletions(-)
|
||
|
|
||
|
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
|
||
|
index f91549292fdc..5decbf0980b0 100644
|
||
|
--- a/include/linux/netdevice.h
|
||
|
+++ b/include/linux/netdevice.h
|
||
|
@@ -594,7 +594,11 @@ struct netdev_queue {
|
||
|
* write-mostly part
|
||
|
*/
|
||
|
spinlock_t _xmit_lock ____cacheline_aligned_in_smp;
|
||
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
||
|
+ struct task_struct *xmit_lock_owner;
|
||
|
+#else
|
||
|
int xmit_lock_owner;
|
||
|
+#endif
|
||
|
/*
|
||
|
* Time (in jiffies) of last Tx
|
||
|
*/
|
||
|
@@ -3625,41 +3629,79 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
|
||
|
return (1U << debug_value) - 1;
|
||
|
}
|
||
|
|
||
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
||
|
+static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu)
|
||
|
+{
|
||
|
+ txq->xmit_lock_owner = current;
|
||
|
+}
|
||
|
+
|
||
|
+static inline void netdev_queue_clear_owner(struct netdev_queue *txq)
|
||
|
+{
|
||
|
+ txq->xmit_lock_owner = NULL;
|
||
|
+}
|
||
|
+
|
||
|
+static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
|
||
|
+{
|
||
|
+ if (txq->xmit_lock_owner != NULL)
|
||
|
+ return true;
|
||
|
+ return false;
|
||
|
+}
|
||
|
+
|
||
|
+#else
|
||
|
+
|
||
|
+static inline void netdev_queue_set_owner(struct netdev_queue *txq, int cpu)
|
||
|
+{
|
||
|
+ txq->xmit_lock_owner = cpu;
|
||
|
+}
|
||
|
+
|
||
|
+static inline void netdev_queue_clear_owner(struct netdev_queue *txq)
|
||
|
+{
|
||
|
+ txq->xmit_lock_owner = -1;
|
||
|
+}
|
||
|
+
|
||
|
+static inline bool netdev_queue_has_owner(struct netdev_queue *txq)
|
||
|
+{
|
||
|
+ if (txq->xmit_lock_owner != -1)
|
||
|
+ return true;
|
||
|
+ return false;
|
||
|
+}
|
||
|
+#endif
|
||
|
+
|
||
|
static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
|
||
|
{
|
||
|
spin_lock(&txq->_xmit_lock);
|
||
|
- txq->xmit_lock_owner = cpu;
|
||
|
+ netdev_queue_set_owner(txq, cpu);
|
||
|
}
|
||
|
|
||
|
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
|
||
|
{
|
||
|
spin_lock_bh(&txq->_xmit_lock);
|
||
|
- txq->xmit_lock_owner = smp_processor_id();
|
||
|
+ netdev_queue_set_owner(txq, smp_processor_id());
|
||
|
}
|
||
|
|
||
|
static inline bool __netif_tx_trylock(struct netdev_queue *txq)
|
||
|
{
|
||
|
bool ok = spin_trylock(&txq->_xmit_lock);
|
||
|
if (likely(ok))
|
||
|
- txq->xmit_lock_owner = smp_processor_id();
|
||
|
+ netdev_queue_set_owner(txq, smp_processor_id());
|
||
|
return ok;
|
||
|
}
|
||
|
|
||
|
static inline void __netif_tx_unlock(struct netdev_queue *txq)
|
||
|
{
|
||
|
- txq->xmit_lock_owner = -1;
|
||
|
+ netdev_queue_clear_owner(txq);
|
||
|
spin_unlock(&txq->_xmit_lock);
|
||
|
}
|
||
|
|
||
|
static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
|
||
|
{
|
||
|
- txq->xmit_lock_owner = -1;
|
||
|
+ netdev_queue_clear_owner(txq);
|
||
|
spin_unlock_bh(&txq->_xmit_lock);
|
||
|
}
|
||
|
|
||
|
static inline void txq_trans_update(struct netdev_queue *txq)
|
||
|
{
|
||
|
- if (txq->xmit_lock_owner != -1)
|
||
|
+ if (netdev_queue_has_owner(txq))
|
||
|
txq->trans_start = jiffies;
|
||
|
}
|
||
|
|
||
|
diff --git a/net/core/dev.c b/net/core/dev.c
|
||
|
index 2db59a992414..51da1bac90c1 100644
|
||
|
--- a/net/core/dev.c
|
||
|
+++ b/net/core/dev.c
|
||
|
@@ -3461,7 +3461,11 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
|
||
|
if (dev->flags & IFF_UP) {
|
||
|
int cpu = smp_processor_id(); /* ok because BHs are off */
|
||
|
|
||
|
+#ifdef CONFIG_PREEMPT_RT_FULL
|
||
|
+ if (txq->xmit_lock_owner != current) {
|
||
|
+#else
|
||
|
if (txq->xmit_lock_owner != cpu) {
|
||
|
+#endif
|
||
|
if (unlikely(xmit_rec_read() > XMIT_RECURSION_LIMIT))
|
||
|
goto recursion_alert;
|
||
|
|
||
|
@@ -7193,7 +7197,7 @@ static void netdev_init_one_queue(struct net_device *dev,
|
||
|
/* Initialize queue lock */
|
||
|
spin_lock_init(&queue->_xmit_lock);
|
||
|
netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
|
||
|
- queue->xmit_lock_owner = -1;
|
||
|
+ netdev_queue_clear_owner(queue);
|
||
|
netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
|
||
|
queue->dev = dev;
|
||
|
#ifdef CONFIG_BQL
|
||
|
--
|
||
|
2.28.0
|
||
|
|