199 lines
5.5 KiB
Diff
199 lines
5.5 KiB
Diff
|
From 7f1abf5a3fdf6ba8b9de86c98d3ba30238ba3b90 Mon Sep 17 00:00:00 2001
|
||
|
From: Thomas Gleixner <tglx@linutronix.de>
|
||
|
Date: Wed, 22 Feb 2012 12:03:30 +0100
|
||
|
Subject: [PATCH 230/365] seqlock: Prevent rt starvation
|
||
|
|
||
|
If a low prio writer gets preempted while holding the seqlock write
|
||
|
locked, a high prio reader spins forever on RT.
|
||
|
|
||
|
To prevent this let the reader grab the spinlock, so it blocks and
|
||
|
eventually boosts the writer. This way the writer can proceed and
|
||
|
endless spinning is prevented.
|
||
|
|
||
|
For seqcount writers we disable preemption over the update code
|
||
|
path. Thanks to Al Viro for distangling some VFS code to make that
|
||
|
possible.
|
||
|
|
||
|
Nicholas Mc Guire:
|
||
|
- spin_lock+unlock => spin_unlock_wait
|
||
|
- __write_seqcount_begin => __raw_write_seqcount_begin
|
||
|
|
||
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||
|
---
|
||
|
include/linux/seqlock.h | 56 ++++++++++++++++++++++++++++++++---------
|
||
|
include/net/dst.h | 2 +-
|
||
|
include/net/neighbour.h | 4 +--
|
||
|
3 files changed, 47 insertions(+), 15 deletions(-)
|
||
|
|
||
|
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
|
||
|
index 1613fe5c668e..d0813d9d4491 100644
|
||
|
--- a/include/linux/seqlock.h
|
||
|
+++ b/include/linux/seqlock.h
|
||
|
@@ -220,20 +220,30 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
|
||
|
return __read_seqcount_retry(s, start);
|
||
|
}
|
||
|
|
||
|
-
|
||
|
-
|
||
|
-static inline void raw_write_seqcount_begin(seqcount_t *s)
|
||
|
+static inline void __raw_write_seqcount_begin(seqcount_t *s)
|
||
|
{
|
||
|
s->sequence++;
|
||
|
smp_wmb();
|
||
|
}
|
||
|
|
||
|
-static inline void raw_write_seqcount_end(seqcount_t *s)
|
||
|
+static inline void raw_write_seqcount_begin(seqcount_t *s)
|
||
|
+{
|
||
|
+ preempt_disable_rt();
|
||
|
+ __raw_write_seqcount_begin(s);
|
||
|
+}
|
||
|
+
|
||
|
+static inline void __raw_write_seqcount_end(seqcount_t *s)
|
||
|
{
|
||
|
smp_wmb();
|
||
|
s->sequence++;
|
||
|
}
|
||
|
|
||
|
+static inline void raw_write_seqcount_end(seqcount_t *s)
|
||
|
+{
|
||
|
+ __raw_write_seqcount_end(s);
|
||
|
+ preempt_enable_rt();
|
||
|
+}
|
||
|
+
|
||
|
/**
|
||
|
* raw_write_seqcount_barrier - do a seq write barrier
|
||
|
* @s: pointer to seqcount_t
|
||
|
@@ -435,10 +445,32 @@ typedef struct {
|
||
|
/*
|
||
|
* Read side functions for starting and finalizing a read side section.
|
||
|
*/
|
||
|
+#ifndef CONFIG_PREEMPT_RT_FULL
|
||
|
static inline unsigned read_seqbegin(const seqlock_t *sl)
|
||
|
{
|
||
|
return read_seqcount_begin(&sl->seqcount);
|
||
|
}
|
||
|
+#else
|
||
|
+/*
|
||
|
+ * Starvation safe read side for RT
|
||
|
+ */
|
||
|
+static inline unsigned read_seqbegin(seqlock_t *sl)
|
||
|
+{
|
||
|
+ unsigned ret;
|
||
|
+
|
||
|
+repeat:
|
||
|
+ ret = ACCESS_ONCE(sl->seqcount.sequence);
|
||
|
+ if (unlikely(ret & 1)) {
|
||
|
+ /*
|
||
|
+ * Take the lock and let the writer proceed (i.e. evtl
|
||
|
+ * boost it), otherwise we could loop here forever.
|
||
|
+ */
|
||
|
+ spin_unlock_wait(&sl->lock);
|
||
|
+ goto repeat;
|
||
|
+ }
|
||
|
+ return ret;
|
||
|
+}
|
||
|
+#endif
|
||
|
|
||
|
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
|
||
|
{
|
||
|
@@ -453,36 +485,36 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
|
||
|
static inline void write_seqlock(seqlock_t *sl)
|
||
|
{
|
||
|
spin_lock(&sl->lock);
|
||
|
- write_seqcount_begin(&sl->seqcount);
|
||
|
+ __raw_write_seqcount_begin(&sl->seqcount);
|
||
|
}
|
||
|
|
||
|
static inline void write_sequnlock(seqlock_t *sl)
|
||
|
{
|
||
|
- write_seqcount_end(&sl->seqcount);
|
||
|
+ __raw_write_seqcount_end(&sl->seqcount);
|
||
|
spin_unlock(&sl->lock);
|
||
|
}
|
||
|
|
||
|
static inline void write_seqlock_bh(seqlock_t *sl)
|
||
|
{
|
||
|
spin_lock_bh(&sl->lock);
|
||
|
- write_seqcount_begin(&sl->seqcount);
|
||
|
+ __raw_write_seqcount_begin(&sl->seqcount);
|
||
|
}
|
||
|
|
||
|
static inline void write_sequnlock_bh(seqlock_t *sl)
|
||
|
{
|
||
|
- write_seqcount_end(&sl->seqcount);
|
||
|
+ __raw_write_seqcount_end(&sl->seqcount);
|
||
|
spin_unlock_bh(&sl->lock);
|
||
|
}
|
||
|
|
||
|
static inline void write_seqlock_irq(seqlock_t *sl)
|
||
|
{
|
||
|
spin_lock_irq(&sl->lock);
|
||
|
- write_seqcount_begin(&sl->seqcount);
|
||
|
+ __raw_write_seqcount_begin(&sl->seqcount);
|
||
|
}
|
||
|
|
||
|
static inline void write_sequnlock_irq(seqlock_t *sl)
|
||
|
{
|
||
|
- write_seqcount_end(&sl->seqcount);
|
||
|
+ __raw_write_seqcount_end(&sl->seqcount);
|
||
|
spin_unlock_irq(&sl->lock);
|
||
|
}
|
||
|
|
||
|
@@ -491,7 +523,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
|
||
|
unsigned long flags;
|
||
|
|
||
|
spin_lock_irqsave(&sl->lock, flags);
|
||
|
- write_seqcount_begin(&sl->seqcount);
|
||
|
+ __raw_write_seqcount_begin(&sl->seqcount);
|
||
|
return flags;
|
||
|
}
|
||
|
|
||
|
@@ -501,7 +533,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
|
||
|
static inline void
|
||
|
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
|
||
|
{
|
||
|
- write_seqcount_end(&sl->seqcount);
|
||
|
+ __raw_write_seqcount_end(&sl->seqcount);
|
||
|
spin_unlock_irqrestore(&sl->lock, flags);
|
||
|
}
|
||
|
|
||
|
diff --git a/include/net/dst.h b/include/net/dst.h
|
||
|
index 12247c034206..37d79ab602ca 100644
|
||
|
--- a/include/net/dst.h
|
||
|
+++ b/include/net/dst.h
|
||
|
@@ -452,7 +452,7 @@ static inline void dst_confirm(struct dst_entry *dst)
|
||
|
static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
|
||
|
struct sk_buff *skb)
|
||
|
{
|
||
|
- const struct hh_cache *hh;
|
||
|
+ struct hh_cache *hh;
|
||
|
|
||
|
if (dst->pending_confirm) {
|
||
|
unsigned long now = jiffies;
|
||
|
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
|
||
|
index a68a460fa4f3..96a930867144 100644
|
||
|
--- a/include/net/neighbour.h
|
||
|
+++ b/include/net/neighbour.h
|
||
|
@@ -446,7 +446,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
|
||
|
}
|
||
|
#endif
|
||
|
|
||
|
-static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
|
||
|
+static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb)
|
||
|
{
|
||
|
unsigned int hh_alen = 0;
|
||
|
unsigned int seq;
|
||
|
@@ -519,7 +519,7 @@ struct neighbour_cb {
|
||
|
|
||
|
#define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb)
|
||
|
|
||
|
-static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
|
||
|
+static inline void neigh_ha_snapshot(char *dst, struct neighbour *n,
|
||
|
const struct net_device *dev)
|
||
|
{
|
||
|
unsigned int seq;
|
||
|
--
|
||
|
2.28.0
|
||
|
|