107 lines
3.4 KiB
Diff
107 lines
3.4 KiB
Diff
From 59b3bf55eaeec69d41407f222029d2b91b26e671 Mon Sep 17 00:00:00 2001
|
|
From: "Steven Rostedt (VMware)" <rostedt@goodmis.org>
|
|
Date: Wed, 22 Nov 2017 07:31:19 -0500
|
|
Subject: [PATCH 329/365] Revert "memcontrol: Prevent scheduling while atomic
|
|
in cgroup code"
|
|
|
|
The commit "memcontrol: Prevent scheduling while atomic in cgroup code"
|
|
fixed this issue:
|
|
|
|
refill_stock()
|
|
get_cpu_var()
|
|
drain_stock()
|
|
res_counter_uncharge()
|
|
res_counter_uncharge_until()
|
|
spin_lock() <== boom
|
|
|
|
But commit 3e32cb2e0a12b ("mm: memcontrol: lockless page counters") replaced
|
|
the calls to res_counter_uncharge() in drain_stock() to the lockless
|
|
function page_counter_uncharge(). There is no more spin lock there and no
|
|
more reason to have that local lock.
|
|
|
|
Cc: stable-rt@vger.kernel.org
|
|
Reported-by: Haiyang HY1 Tan <tanhy1@lenovo.com>
|
|
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
|
|
[bigeasy: That upstream commit appeared in v3.19 and the patch in
|
|
question in v3.18.7-rt2 and v3.18 seems still to be maintained. So I
|
|
guess that v3.18 would need the locallocks that we are about to remove
|
|
here. I am not sure if any earlier versions have the patch
|
|
backported.
|
|
The stable tag here is because Haiyang reported (and debugged) a crash
|
|
in 4.4-RT with this patch applied (which has get_cpu_light() instead
|
|
the locallocks it gained in v4.9-RT).
|
|
https://lkml.kernel.org/r/05AA4EC5C6EC1D48BE2CDCFF3AE0B8A637F78A15@CNMAILEX04.lenovo.com
|
|
]
|
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
---
|
|
mm/memcontrol.c | 13 ++++++-------
|
|
1 file changed, 6 insertions(+), 7 deletions(-)
|
|
|
|
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
|
|
index 106918b6b30b..1c39495b1677 100644
|
|
--- a/mm/memcontrol.c
|
|
+++ b/mm/memcontrol.c
|
|
@@ -1717,7 +1717,6 @@ struct memcg_stock_pcp {
|
|
#define FLUSHING_CACHED_CHARGE 0
|
|
};
|
|
static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
|
|
-static DEFINE_LOCAL_IRQ_LOCK(memcg_stock_ll);
|
|
static DEFINE_MUTEX(percpu_charge_mutex);
|
|
|
|
/**
|
|
@@ -1740,7 +1739,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
|
|
if (nr_pages > CHARGE_BATCH)
|
|
return ret;
|
|
|
|
- local_lock_irqsave(memcg_stock_ll, flags);
|
|
+ local_irq_save(flags);
|
|
|
|
stock = this_cpu_ptr(&memcg_stock);
|
|
if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
|
|
@@ -1748,7 +1747,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
|
|
ret = true;
|
|
}
|
|
|
|
- local_unlock_irqrestore(memcg_stock_ll, flags);
|
|
+ local_irq_restore(flags);
|
|
|
|
return ret;
|
|
}
|
|
@@ -1775,13 +1774,13 @@ static void drain_local_stock(struct work_struct *dummy)
|
|
struct memcg_stock_pcp *stock;
|
|
unsigned long flags;
|
|
|
|
- local_lock_irqsave(memcg_stock_ll, flags);
|
|
+ local_irq_save(flags);
|
|
|
|
stock = this_cpu_ptr(&memcg_stock);
|
|
drain_stock(stock);
|
|
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
|
|
|
|
- local_unlock_irqrestore(memcg_stock_ll, flags);
|
|
+ local_irq_restore(flags);
|
|
}
|
|
|
|
/*
|
|
@@ -1793,7 +1792,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
|
|
struct memcg_stock_pcp *stock;
|
|
unsigned long flags;
|
|
|
|
- local_lock_irqsave(memcg_stock_ll, flags);
|
|
+ local_irq_save(flags);
|
|
|
|
stock = this_cpu_ptr(&memcg_stock);
|
|
if (stock->cached != memcg) { /* reset if necessary */
|
|
@@ -1802,7 +1801,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
|
|
}
|
|
stock->nr_pages += nr_pages;
|
|
|
|
- local_unlock_irqrestore(memcg_stock_ll, flags);
|
|
+ local_irq_restore(flags);
|
|
}
|
|
|
|
/*
|
|
--
|
|
2.28.0
|
|
|