tegrakernel/kernel/kernel-4.9/rt-patches/0264-arm-Enable-highmem-for...

184 lines
4.9 KiB
Diff

From 6b7cc73f405aab3a36417002e19fa935f5ec8e0b Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 13 Feb 2013 11:03:11 +0100
Subject: [PATCH 264/365] arm: Enable highmem for rt
fixup highmem for ARM.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
arch/arm/include/asm/switch_to.h | 8 +++++
arch/arm/mm/highmem.c | 56 +++++++++++++++++++++++++++-----
include/linux/highmem.h | 1 +
3 files changed, 57 insertions(+), 8 deletions(-)
diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h
index 12ebfcc1d539..c962084605bc 100644
--- a/arch/arm/include/asm/switch_to.h
+++ b/arch/arm/include/asm/switch_to.h
@@ -3,6 +3,13 @@
#include <linux/thread_info.h>
+#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
+void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p);
+#else
+static inline void
+switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
+#endif
+
/*
* For v7 SMP cores running a preemptible kernel we may be pre-empted
* during a TLB maintenance operation, so execute an inner-shareable dsb
@@ -25,6 +32,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info
#define switch_to(prev,next,last) \
do { \
__complete_pending_tlbi(); \
+ switch_kmaps(prev, next); \
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
} while (0)
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index eb4b225d28c9..542692dbd40a 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -34,6 +34,11 @@ static inline pte_t get_fixmap_pte(unsigned long vaddr)
return *ptep;
}
+static unsigned int fixmap_idx(int type)
+{
+ return FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
+}
+
void *kmap(struct page *page)
{
might_sleep();
@@ -54,12 +59,13 @@ EXPORT_SYMBOL(kunmap);
void *kmap_atomic(struct page *page)
{
+ pte_t pte = mk_pte(page, kmap_prot);
unsigned int idx;
unsigned long vaddr;
void *kmap;
int type;
- preempt_disable();
+ preempt_disable_nort();
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
@@ -79,7 +85,7 @@ void *kmap_atomic(struct page *page)
type = kmap_atomic_idx_push();
- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
+ idx = fixmap_idx(type);
vaddr = __fix_to_virt(idx);
#ifdef CONFIG_DEBUG_HIGHMEM
/*
@@ -93,7 +99,10 @@ void *kmap_atomic(struct page *page)
* in place, so the contained TLB flush ensures the TLB is updated
* with the new mapping.
*/
- set_fixmap_pte(idx, mk_pte(page, kmap_prot));
+#ifdef CONFIG_PREEMPT_RT_FULL
+ current->kmap_pte[type] = pte;
+#endif
+ set_fixmap_pte(idx, pte);
return (void *)vaddr;
}
@@ -106,10 +115,13 @@ void __kunmap_atomic(void *kvaddr)
if (kvaddr >= (void *)FIXADDR_START) {
type = kmap_atomic_idx();
- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
+ idx = fixmap_idx(type);
if (cache_is_vivt())
__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
+#ifdef CONFIG_PREEMPT_RT_FULL
+ current->kmap_pte[type] = __pte(0);
+#endif
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(vaddr != __fix_to_virt(idx));
#else
@@ -122,28 +134,56 @@ void __kunmap_atomic(void *kvaddr)
kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
}
pagefault_enable();
- preempt_enable();
+ preempt_enable_nort();
}
EXPORT_SYMBOL(__kunmap_atomic);
void *kmap_atomic_pfn(unsigned long pfn)
{
+ pte_t pte = pfn_pte(pfn, kmap_prot);
unsigned long vaddr;
int idx, type;
struct page *page = pfn_to_page(pfn);
- preempt_disable();
+ preempt_disable_nort();
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);
type = kmap_atomic_idx_push();
- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
+ idx = fixmap_idx(type);
vaddr = __fix_to_virt(idx);
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
#endif
- set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
+#ifdef CONFIG_PREEMPT_RT_FULL
+ current->kmap_pte[type] = pte;
+#endif
+ set_fixmap_pte(idx, pte);
return (void *)vaddr;
}
+#if defined CONFIG_PREEMPT_RT_FULL
+void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
+{
+ int i;
+
+ /*
+ * Clear @prev's kmap_atomic mappings
+ */
+ for (i = 0; i < prev_p->kmap_idx; i++) {
+ int idx = fixmap_idx(i);
+
+ set_fixmap_pte(idx, __pte(0));
+ }
+ /*
+ * Restore @next_p's kmap_atomic mappings
+ */
+ for (i = 0; i < next_p->kmap_idx; i++) {
+ int idx = fixmap_idx(i);
+
+ if (!pte_none(next_p->kmap_pte[i]))
+ set_fixmap_pte(idx, next_p->kmap_pte[i]);
+ }
+}
+#endif
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 2f56829181ee..a2a50f599ea9 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -7,6 +7,7 @@
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
+#include <linux/sched.h>
#include <asm/cacheflush.h>
--
2.28.0