282 lines
9.1 KiB
Diff
282 lines
9.1 KiB
Diff
From 34f1161d0dc863f8a59c03857c2ccd856ca47d7e Mon Sep 17 00:00:00 2001
|
|
From: Thomas Gleixner <tglx@linutronix.de>
|
|
Date: Fri, 17 Feb 2017 19:44:39 +0100
|
|
Subject: [PATCH 002/365] lockdep: Handle statically initialized PER_CPU locks
|
|
proper
|
|
|
|
If a PER_CPU struct which contains a spin_lock is statically initialized
|
|
via:
|
|
|
|
DEFINE_PER_CPU(struct foo, bla) = {
|
|
.lock = __SPIN_LOCK_UNLOCKED(bla.lock)
|
|
};
|
|
|
|
then lockdep assigns a seperate key to each lock because the logic for
|
|
assigning a key to statically initialized locks is to use the address as
|
|
the key. With per CPU locks the address is obvioulsy different on each CPU.
|
|
|
|
That's wrong, because all locks should have the same key.
|
|
|
|
To solve this the following modifications are required:
|
|
|
|
1) Extend the is_kernel/module_percpu_addr() functions to hand back the
|
|
canonical address of the per CPU address, i.e. the per CPU address
|
|
minus the per CPU offset.
|
|
|
|
2) Check the lock address with these functions and if the per CPU check
|
|
matches use the returned canonical address as the lock key, so all per
|
|
CPU locks have the same key.
|
|
|
|
3) Move the static_obj(key) check into look_up_lock_class() so this check
|
|
can be avoided for statically initialized per CPU locks. That's
|
|
required because the canonical address fails the static_obj(key) check
|
|
for obvious reasons.
|
|
|
|
Reported-by: Mike Galbraith <efault@gmx.de>
|
|
Cc: stable-rt@vger.kernel.org
|
|
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
|
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
|
---
|
|
include/linux/module.h | 1 +
|
|
include/linux/percpu.h | 1 +
|
|
kernel/locking/lockdep.c | 33 +++++++++++++++++++++++----------
|
|
kernel/module.c | 31 +++++++++++++++++++------------
|
|
mm/percpu.c | 37 +++++++++++++++++++++++--------------
|
|
5 files changed, 67 insertions(+), 36 deletions(-)
|
|
|
|
diff --git a/include/linux/module.h b/include/linux/module.h
|
|
index 49f0aae0a5db..ee6f07ffa6e4 100644
|
|
--- a/include/linux/module.h
|
|
+++ b/include/linux/module.h
|
|
@@ -501,6 +501,7 @@ static inline int module_is_live(struct module *mod)
|
|
struct module *__module_text_address(unsigned long addr);
|
|
struct module *__module_address(unsigned long addr);
|
|
bool is_module_address(unsigned long addr);
|
|
+bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr);
|
|
bool is_module_percpu_address(unsigned long addr);
|
|
bool is_module_text_address(unsigned long addr);
|
|
|
|
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
|
|
index 56939d3f6e53..491b3f5a5f8a 100644
|
|
--- a/include/linux/percpu.h
|
|
+++ b/include/linux/percpu.h
|
|
@@ -110,6 +110,7 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
|
|
#endif
|
|
|
|
extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
|
|
+extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
|
|
extern bool is_kernel_percpu_address(unsigned long addr);
|
|
|
|
#if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
|
|
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
|
|
index 9f56e3fac795..736ab5cf03aa 100644
|
|
--- a/kernel/locking/lockdep.c
|
|
+++ b/kernel/locking/lockdep.c
|
|
@@ -658,6 +658,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
|
|
struct lockdep_subclass_key *key;
|
|
struct hlist_head *hash_head;
|
|
struct lock_class *class;
|
|
+ bool is_static = false;
|
|
|
|
if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
|
|
debug_locks_off();
|
|
@@ -671,10 +672,23 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
|
|
|
|
/*
|
|
* Static locks do not have their class-keys yet - for them the key
|
|
- * is the lock object itself:
|
|
+ * is the lock object itself. If the lock is in the per cpu area,
|
|
+ * the canonical address of the lock (per cpu offset removed) is
|
|
+ * used.
|
|
*/
|
|
- if (unlikely(!lock->key))
|
|
- lock->key = (void *)lock;
|
|
+ if (unlikely(!lock->key)) {
|
|
+ unsigned long can_addr, addr = (unsigned long)lock;
|
|
+
|
|
+ if (__is_kernel_percpu_address(addr, &can_addr))
|
|
+ lock->key = (void *)can_addr;
|
|
+ else if (__is_module_percpu_address(addr, &can_addr))
|
|
+ lock->key = (void *)can_addr;
|
|
+ else if (static_obj(lock))
|
|
+ lock->key = (void *)lock;
|
|
+ else
|
|
+ return ERR_PTR(-EINVAL);
|
|
+ is_static = true;
|
|
+ }
|
|
|
|
/*
|
|
* NOTE: the class-key must be unique. For dynamic locks, a static
|
|
@@ -706,7 +720,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
|
|
}
|
|
}
|
|
|
|
- return NULL;
|
|
+ return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL);
|
|
}
|
|
|
|
/*
|
|
@@ -724,19 +738,18 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
|
|
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
|
|
|
|
class = look_up_lock_class(lock, subclass);
|
|
- if (likely(class))
|
|
+ if (likely(!IS_ERR_OR_NULL(class)))
|
|
goto out_set_class_cache;
|
|
|
|
/*
|
|
* Debug-check: all keys must be persistent!
|
|
- */
|
|
- if (!static_obj(lock->key)) {
|
|
+ */
|
|
+ if (IS_ERR(class)) {
|
|
debug_locks_off();
|
|
printk("INFO: trying to register non-static key.\n");
|
|
printk("the code is fine but needs lockdep annotation.\n");
|
|
printk("turning off the locking correctness validator.\n");
|
|
dump_stack();
|
|
-
|
|
return NULL;
|
|
}
|
|
|
|
@@ -3421,7 +3434,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
|
|
* Clearly if the lock hasn't been acquired _ever_, we're not
|
|
* holding it either, so report failure.
|
|
*/
|
|
- if (!class)
|
|
+ if (IS_ERR_OR_NULL(class))
|
|
return 0;
|
|
|
|
/*
|
|
@@ -4173,7 +4186,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
|
|
* If the class exists we look it up and zap it:
|
|
*/
|
|
class = look_up_lock_class(lock, j);
|
|
- if (class)
|
|
+ if (!IS_ERR_OR_NULL(class))
|
|
zap_class(class);
|
|
}
|
|
/*
|
|
diff --git a/kernel/module.c b/kernel/module.c
|
|
index edda7158fc24..d7a4d0bcf7b6 100644
|
|
--- a/kernel/module.c
|
|
+++ b/kernel/module.c
|
|
@@ -660,16 +660,7 @@ static void percpu_modcopy(struct module *mod,
|
|
memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
|
|
}
|
|
|
|
-/**
|
|
- * is_module_percpu_address - test whether address is from module static percpu
|
|
- * @addr: address to test
|
|
- *
|
|
- * Test whether @addr belongs to module static percpu area.
|
|
- *
|
|
- * RETURNS:
|
|
- * %true if @addr is from module static percpu area
|
|
- */
|
|
-bool is_module_percpu_address(unsigned long addr)
|
|
+bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
|
|
{
|
|
struct module *mod;
|
|
unsigned int cpu;
|
|
@@ -683,9 +674,11 @@ bool is_module_percpu_address(unsigned long addr)
|
|
continue;
|
|
for_each_possible_cpu(cpu) {
|
|
void *start = per_cpu_ptr(mod->percpu, cpu);
|
|
+ void *va = (void *)addr;
|
|
|
|
- if ((void *)addr >= start &&
|
|
- (void *)addr < start + mod->percpu_size) {
|
|
+ if (va >= start && va < start + mod->percpu_size) {
|
|
+ if (can_addr)
|
|
+ *can_addr = (unsigned long) (va - start);
|
|
preempt_enable();
|
|
return true;
|
|
}
|
|
@@ -696,6 +689,20 @@ bool is_module_percpu_address(unsigned long addr)
|
|
return false;
|
|
}
|
|
|
|
+/**
|
|
+ * is_module_percpu_address - test whether address is from module static percpu
|
|
+ * @addr: address to test
|
|
+ *
|
|
+ * Test whether @addr belongs to module static percpu area.
|
|
+ *
|
|
+ * RETURNS:
|
|
+ * %true if @addr is from module static percpu area
|
|
+ */
|
|
+bool is_module_percpu_address(unsigned long addr)
|
|
+{
|
|
+ return __is_module_percpu_address(addr, NULL);
|
|
+}
|
|
+
|
|
#else /* ... !CONFIG_SMP */
|
|
|
|
static inline void __percpu *mod_percpu(struct module *mod)
|
|
diff --git a/mm/percpu.c b/mm/percpu.c
|
|
index 0462a2a00f05..16bb5f561e53 100644
|
|
--- a/mm/percpu.c
|
|
+++ b/mm/percpu.c
|
|
@@ -1284,18 +1284,7 @@ void free_percpu(void __percpu *ptr)
|
|
}
|
|
EXPORT_SYMBOL_GPL(free_percpu);
|
|
|
|
-/**
|
|
- * is_kernel_percpu_address - test whether address is from static percpu area
|
|
- * @addr: address to test
|
|
- *
|
|
- * Test whether @addr belongs to in-kernel static percpu area. Module
|
|
- * static percpu areas are not considered. For those, use
|
|
- * is_module_percpu_address().
|
|
- *
|
|
- * RETURNS:
|
|
- * %true if @addr is from in-kernel static percpu area, %false otherwise.
|
|
- */
|
|
-bool is_kernel_percpu_address(unsigned long addr)
|
|
+bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
|
|
{
|
|
#ifdef CONFIG_SMP
|
|
const size_t static_size = __per_cpu_end - __per_cpu_start;
|
|
@@ -1304,15 +1293,35 @@ bool is_kernel_percpu_address(unsigned long addr)
|
|
|
|
for_each_possible_cpu(cpu) {
|
|
void *start = per_cpu_ptr(base, cpu);
|
|
+ void *va = (void *)addr;
|
|
|
|
- if ((void *)addr >= start && (void *)addr < start + static_size)
|
|
+ if (va >= start && va < start + static_size) {
|
|
+ if (can_addr)
|
|
+ *can_addr = (unsigned long) (va - start);
|
|
return true;
|
|
- }
|
|
+ }
|
|
+ }
|
|
#endif
|
|
/* on UP, can't distinguish from other static vars, always false */
|
|
return false;
|
|
}
|
|
|
|
+/**
|
|
+ * is_kernel_percpu_address - test whether address is from static percpu area
|
|
+ * @addr: address to test
|
|
+ *
|
|
+ * Test whether @addr belongs to in-kernel static percpu area. Module
|
|
+ * static percpu areas are not considered. For those, use
|
|
+ * is_module_percpu_address().
|
|
+ *
|
|
+ * RETURNS:
|
|
+ * %true if @addr is from in-kernel static percpu area, %false otherwise.
|
|
+ */
|
|
+bool is_kernel_percpu_address(unsigned long addr)
|
|
+{
|
|
+ return __is_kernel_percpu_address(addr, NULL);
|
|
+}
|
|
+
|
|
/**
|
|
* per_cpu_ptr_to_phys - convert translated percpu address to physical address
|
|
* @addr: the address to be converted to physical address
|
|
--
|
|
2.28.0
|
|
|