135 lines
5.2 KiB
Diff
135 lines
5.2 KiB
Diff
|
From 730dd6e7a0d54f25ffde85be6dd31fdcac74ac01 Mon Sep 17 00:00:00 2001
|
||
|
From: Anders Roxell <anders.roxell@linaro.org>
|
||
|
Date: Thu, 14 May 2015 17:52:17 +0200
|
||
|
Subject: [PATCH 304/304] arch/arm64: Add lazy preempt support
|
||
|
|
||
|
arm64 is missing support for PREEMPT_RT. The main feature which is
|
||
|
lacking is support for lazy preemption. The arch-specific entry code,
|
||
|
thread information structure definitions, and associated data tables
|
||
|
have to be extended to provide this support. Then the Kconfig file has
|
||
|
to be extended to indicate the support is available, and also to
|
||
|
indicate that support for full RT preemption is now available.
|
||
|
|
||
|
Change-Id: I14be0f2747e4b745a7210fa9e74d0a5ee3a00a02
|
||
|
Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
|
||
|
---
|
||
|
arch/arm64/Kconfig | 1 +
|
||
|
arch/arm64/include/asm/thread_info.h | 8 +++++++-
|
||
|
arch/arm64/kernel/asm-offsets.c | 1 +
|
||
|
arch/arm64/kernel/entry.S | 12 +++++++++---
|
||
|
arch/arm64/kernel/signal.c | 2 +-
|
||
|
5 files changed, 19 insertions(+), 5 deletions(-)
|
||
|
|
||
|
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
|
||
|
index e40339726039..1e97dd7efb5a 100644
|
||
|
--- a/arch/arm64/Kconfig
|
||
|
+++ b/arch/arm64/Kconfig
|
||
|
@@ -92,6 +92,7 @@ config ARM64
|
||
|
select HAVE_PERF_EVENTS
|
||
|
select HAVE_PERF_REGS
|
||
|
select HAVE_PERF_USER_STACK_DUMP
|
||
|
+ select HAVE_PREEMPT_LAZY
|
||
|
select HAVE_REGS_AND_STACK_ACCESS_API
|
||
|
select HAVE_RCU_TABLE_FREE
|
||
|
select HAVE_SYSCALL_TRACEPOINTS
|
||
|
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
|
||
|
index 5b96df618873..f7c4271d565c 100644
|
||
|
--- a/arch/arm64/include/asm/thread_info.h
|
||
|
+++ b/arch/arm64/include/asm/thread_info.h
|
||
|
@@ -51,6 +51,8 @@ struct thread_info {
|
||
|
u64 ttbr0; /* saved TTBR0_EL1 */
|
||
|
#endif
|
||
|
int preempt_count; /* 0 => preemptable, <0 => bug */
|
||
|
+ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
|
||
|
+ int cpu; /* cpu */
|
||
|
};
|
||
|
|
||
|
#define INIT_THREAD_INFO(tsk) \
|
||
|
@@ -85,6 +87,7 @@ struct thread_info {
|
||
|
#define TIF_NEED_RESCHED 1
|
||
|
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
|
||
|
#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
|
||
|
+#define TIF_NEED_RESCHED_LAZY 4
|
||
|
#define TIF_NOHZ 7
|
||
|
#define TIF_SYSCALL_TRACE 8
|
||
|
#define TIF_SYSCALL_AUDIT 9
|
||
|
@@ -102,6 +105,7 @@ struct thread_info {
|
||
|
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
||
|
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||
|
#define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE)
|
||
|
+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY)
|
||
|
#define _TIF_NOHZ (1 << TIF_NOHZ)
|
||
|
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||
|
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
|
||
|
@@ -111,7 +115,9 @@ struct thread_info {
|
||
|
#define _TIF_DEPRECATED_WARN (1 << TIF_DEPRECATED_WARN)
|
||
|
|
||
|
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
|
||
|
- _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE)
|
||
|
+ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
|
||
|
+ _TIF_NEED_RESCHED_LAZY)
|
||
|
+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
|
||
|
|
||
|
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
|
||
|
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
|
||
|
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
|
||
|
index b2debecb51e2..ae495cc3cdb2 100644
|
||
|
--- a/arch/arm64/kernel/asm-offsets.c
|
||
|
+++ b/arch/arm64/kernel/asm-offsets.c
|
||
|
@@ -39,6 +39,7 @@ int main(void)
|
||
|
BLANK();
|
||
|
DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
|
||
|
DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
|
||
|
+ DEFINE(TSK_TI_PREEMPT_LAZY, offsetof(struct task_struct, thread_info.preempt_lazy_count));
|
||
|
DEFINE(TSK_TI_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit));
|
||
|
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||
|
DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0));
|
||
|
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
|
||
|
index 09bf87b0a3a7..91241924ab1d 100644
|
||
|
--- a/arch/arm64/kernel/entry.S
|
||
|
+++ b/arch/arm64/kernel/entry.S
|
||
|
@@ -564,11 +564,16 @@ el1_irq:
|
||
|
|
||
|
#ifdef CONFIG_PREEMPT
|
||
|
ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count
|
||
|
- cbnz w24, 1f // preempt count != 0
|
||
|
+ cbnz w24, 2f // preempt count != 0
|
||
|
ldr x0, [tsk, #TSK_TI_FLAGS] // get flags
|
||
|
- tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
|
||
|
- bl el1_preempt
|
||
|
+ tbnz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
|
||
|
+
|
||
|
+ ldr w24, [tsk, #TSK_TI_PREEMPT_LAZY]// get preempt lazy count
|
||
|
+ cbnz w24, 2f // preempt lazy count != 0
|
||
|
+ tbz x0, #TIF_NEED_RESCHED_LAZY, 2f // needs rescheduling?
|
||
|
1:
|
||
|
+ bl el1_preempt
|
||
|
+2:
|
||
|
#endif
|
||
|
#ifdef CONFIG_TRACE_IRQFLAGS
|
||
|
bl trace_hardirqs_on
|
||
|
@@ -582,6 +587,7 @@ el1_preempt:
|
||
|
1: bl preempt_schedule_irq // irq en/disable is done inside
|
||
|
ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS
|
||
|
tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
|
||
|
+ tbnz x0, #TIF_NEED_RESCHED_LAZY, 1b // needs rescheduling?
|
||
|
ret x24
|
||
|
#endif
|
||
|
|
||
|
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
|
||
|
index 404dd67080b9..639dc6d12e72 100644
|
||
|
--- a/arch/arm64/kernel/signal.c
|
||
|
+++ b/arch/arm64/kernel/signal.c
|
||
|
@@ -409,7 +409,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
|
||
|
*/
|
||
|
trace_hardirqs_off();
|
||
|
do {
|
||
|
- if (thread_flags & _TIF_NEED_RESCHED) {
|
||
|
+ if (thread_flags & _TIF_NEED_RESCHED_MASK) {
|
||
|
schedule();
|
||
|
} else {
|
||
|
local_irq_enable();
|
||
|
--
|
||
|
2.28.0
|
||
|
|