145 lines
4.6 KiB
Diff
145 lines
4.6 KiB
Diff
|
From ed55077023eb04a6634786286e58d78ece0a7ab6 Mon Sep 17 00:00:00 2001
|
||
|
From: Ingo Molnar <mingo@elte.hu>
|
||
|
Date: Fri, 3 Jul 2009 08:30:13 -0500
|
||
|
Subject: [PATCH 103/365] mm/vmstat: Protect per cpu variables with preempt
|
||
|
disable on RT
|
||
|
|
||
|
Disable preemption on -RT for the vmstat code. On vanila the code runs in
|
||
|
IRQ-off regions while on -RT it is not. "preempt_disable" ensures that the
|
||
|
same ressources is not updated in parallel due to preemption.
|
||
|
|
||
|
Signed-off-by: Ingo Molnar <mingo@elte.hu>
|
||
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
||
|
---
|
||
|
include/linux/vmstat.h | 4 ++++
|
||
|
mm/vmstat.c | 12 ++++++++++++
|
||
|
2 files changed, 16 insertions(+)
|
||
|
|
||
|
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
|
||
|
index 613771909b6e..e28c5a43229d 100644
|
||
|
--- a/include/linux/vmstat.h
|
||
|
+++ b/include/linux/vmstat.h
|
||
|
@@ -33,7 +33,9 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states);
|
||
|
*/
|
||
|
static inline void __count_vm_event(enum vm_event_item item)
|
||
|
{
|
||
|
+ preempt_disable_rt();
|
||
|
raw_cpu_inc(vm_event_states.event[item]);
|
||
|
+ preempt_enable_rt();
|
||
|
}
|
||
|
|
||
|
static inline void count_vm_event(enum vm_event_item item)
|
||
|
@@ -43,7 +45,9 @@ static inline void count_vm_event(enum vm_event_item item)
|
||
|
|
||
|
static inline void __count_vm_events(enum vm_event_item item, long delta)
|
||
|
{
|
||
|
+ preempt_disable_rt();
|
||
|
raw_cpu_add(vm_event_states.event[item], delta);
|
||
|
+ preempt_enable_rt();
|
||
|
}
|
||
|
|
||
|
static inline void count_vm_events(enum vm_event_item item, long delta)
|
||
|
diff --git a/mm/vmstat.c b/mm/vmstat.c
|
||
|
index e60435d556e3..ea660046761b 100644
|
||
|
--- a/mm/vmstat.c
|
||
|
+++ b/mm/vmstat.c
|
||
|
@@ -245,6 +245,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
|
||
|
long x;
|
||
|
long t;
|
||
|
|
||
|
+ preempt_disable_rt();
|
||
|
x = delta + __this_cpu_read(*p);
|
||
|
|
||
|
t = __this_cpu_read(pcp->stat_threshold);
|
||
|
@@ -254,6 +255,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
|
||
|
x = 0;
|
||
|
}
|
||
|
__this_cpu_write(*p, x);
|
||
|
+ preempt_enable_rt();
|
||
|
}
|
||
|
EXPORT_SYMBOL(__mod_zone_page_state);
|
||
|
|
||
|
@@ -265,6 +267,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
|
||
|
long x;
|
||
|
long t;
|
||
|
|
||
|
+ preempt_disable_rt();
|
||
|
x = delta + __this_cpu_read(*p);
|
||
|
|
||
|
t = __this_cpu_read(pcp->stat_threshold);
|
||
|
@@ -274,6 +277,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item,
|
||
|
x = 0;
|
||
|
}
|
||
|
__this_cpu_write(*p, x);
|
||
|
+ preempt_enable_rt();
|
||
|
}
|
||
|
EXPORT_SYMBOL(__mod_node_page_state);
|
||
|
|
||
|
@@ -306,6 +310,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
|
||
|
s8 __percpu *p = pcp->vm_stat_diff + item;
|
||
|
s8 v, t;
|
||
|
|
||
|
+ preempt_disable_rt();
|
||
|
v = __this_cpu_inc_return(*p);
|
||
|
t = __this_cpu_read(pcp->stat_threshold);
|
||
|
if (unlikely(v > t)) {
|
||
|
@@ -314,6 +319,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
|
||
|
zone_page_state_add(v + overstep, zone, item);
|
||
|
__this_cpu_write(*p, -overstep);
|
||
|
}
|
||
|
+ preempt_enable_rt();
|
||
|
}
|
||
|
|
||
|
void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
|
||
|
@@ -322,6 +328,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
|
||
|
s8 __percpu *p = pcp->vm_node_stat_diff + item;
|
||
|
s8 v, t;
|
||
|
|
||
|
+ preempt_disable_rt();
|
||
|
v = __this_cpu_inc_return(*p);
|
||
|
t = __this_cpu_read(pcp->stat_threshold);
|
||
|
if (unlikely(v > t)) {
|
||
|
@@ -330,6 +337,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
|
||
|
node_page_state_add(v + overstep, pgdat, item);
|
||
|
__this_cpu_write(*p, -overstep);
|
||
|
}
|
||
|
+ preempt_enable_rt();
|
||
|
}
|
||
|
|
||
|
void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
|
||
|
@@ -350,6 +358,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
|
||
|
s8 __percpu *p = pcp->vm_stat_diff + item;
|
||
|
s8 v, t;
|
||
|
|
||
|
+ preempt_disable_rt();
|
||
|
v = __this_cpu_dec_return(*p);
|
||
|
t = __this_cpu_read(pcp->stat_threshold);
|
||
|
if (unlikely(v < - t)) {
|
||
|
@@ -358,6 +367,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
|
||
|
zone_page_state_add(v - overstep, zone, item);
|
||
|
__this_cpu_write(*p, overstep);
|
||
|
}
|
||
|
+ preempt_enable_rt();
|
||
|
}
|
||
|
|
||
|
void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
|
||
|
@@ -366,6 +376,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
|
||
|
s8 __percpu *p = pcp->vm_node_stat_diff + item;
|
||
|
s8 v, t;
|
||
|
|
||
|
+ preempt_disable_rt();
|
||
|
v = __this_cpu_dec_return(*p);
|
||
|
t = __this_cpu_read(pcp->stat_threshold);
|
||
|
if (unlikely(v < - t)) {
|
||
|
@@ -374,6 +385,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
|
||
|
node_page_state_add(v - overstep, pgdat, item);
|
||
|
__this_cpu_write(*p, overstep);
|
||
|
}
|
||
|
+ preempt_enable_rt();
|
||
|
}
|
||
|
|
||
|
void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
|
||
|
--
|
||
|
2.28.0
|
||
|
|