231 lines
7.0 KiB
Diff
231 lines
7.0 KiB
Diff
|
From 7f110092fc2b685e27c8f6af72f56ae73f8bf452 Mon Sep 17 00:00:00 2001
|
||
|
From: Mike Galbraith <umgwanakikbuti@gmail.com>
|
||
|
Date: Sun, 2 Nov 2014 08:31:37 +0100
|
||
|
Subject: [PATCH 207/365] x86: UV: raw_spinlock conversion
|
||
|
|
||
|
Shrug. Lots of hobbyists have a beast in their basement, right?
|
||
|
|
||
|
|
||
|
Signed-off-by: Mike Galbraith <mgalbraith@suse.de>
|
||
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||
|
---
|
||
|
arch/x86/include/asm/uv/uv_bau.h | 14 +++++++-------
|
||
|
arch/x86/platform/uv/tlb_uv.c | 26 +++++++++++++-------------
|
||
|
arch/x86/platform/uv/uv_time.c | 21 +++++++++++++--------
|
||
|
3 files changed, 33 insertions(+), 28 deletions(-)
|
||
|
|
||
|
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
|
||
|
index 57ab86d94d64..35d25e27180f 100644
|
||
|
--- a/arch/x86/include/asm/uv/uv_bau.h
|
||
|
+++ b/arch/x86/include/asm/uv/uv_bau.h
|
||
|
@@ -624,9 +624,9 @@ struct bau_control {
|
||
|
cycles_t send_message;
|
||
|
cycles_t period_end;
|
||
|
cycles_t period_time;
|
||
|
- spinlock_t uvhub_lock;
|
||
|
- spinlock_t queue_lock;
|
||
|
- spinlock_t disable_lock;
|
||
|
+ raw_spinlock_t uvhub_lock;
|
||
|
+ raw_spinlock_t queue_lock;
|
||
|
+ raw_spinlock_t disable_lock;
|
||
|
/* tunables */
|
||
|
int max_concurr;
|
||
|
int max_concurr_const;
|
||
|
@@ -815,15 +815,15 @@ static inline int atom_asr(short i, struct atomic_short *v)
|
||
|
* to be lowered below the current 'v'. atomic_add_unless can only stop
|
||
|
* on equal.
|
||
|
*/
|
||
|
-static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
|
||
|
+static inline int atomic_inc_unless_ge(raw_spinlock_t *lock, atomic_t *v, int u)
|
||
|
{
|
||
|
- spin_lock(lock);
|
||
|
+ raw_spin_lock(lock);
|
||
|
if (atomic_read(v) >= u) {
|
||
|
- spin_unlock(lock);
|
||
|
+ raw_spin_unlock(lock);
|
||
|
return 0;
|
||
|
}
|
||
|
atomic_inc(v);
|
||
|
- spin_unlock(lock);
|
||
|
+ raw_spin_unlock(lock);
|
||
|
return 1;
|
||
|
}
|
||
|
|
||
|
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
|
||
|
index 16d4967d59ea..cb5382be4681 100644
|
||
|
--- a/arch/x86/platform/uv/tlb_uv.c
|
||
|
+++ b/arch/x86/platform/uv/tlb_uv.c
|
||
|
@@ -748,9 +748,9 @@ static void destination_plugged(struct bau_desc *bau_desc,
|
||
|
|
||
|
quiesce_local_uvhub(hmaster);
|
||
|
|
||
|
- spin_lock(&hmaster->queue_lock);
|
||
|
+ raw_spin_lock(&hmaster->queue_lock);
|
||
|
reset_with_ipi(&bau_desc->distribution, bcp);
|
||
|
- spin_unlock(&hmaster->queue_lock);
|
||
|
+ raw_spin_unlock(&hmaster->queue_lock);
|
||
|
|
||
|
end_uvhub_quiesce(hmaster);
|
||
|
|
||
|
@@ -770,9 +770,9 @@ static void destination_timeout(struct bau_desc *bau_desc,
|
||
|
|
||
|
quiesce_local_uvhub(hmaster);
|
||
|
|
||
|
- spin_lock(&hmaster->queue_lock);
|
||
|
+ raw_spin_lock(&hmaster->queue_lock);
|
||
|
reset_with_ipi(&bau_desc->distribution, bcp);
|
||
|
- spin_unlock(&hmaster->queue_lock);
|
||
|
+ raw_spin_unlock(&hmaster->queue_lock);
|
||
|
|
||
|
end_uvhub_quiesce(hmaster);
|
||
|
|
||
|
@@ -793,7 +793,7 @@ static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat)
|
||
|
cycles_t tm1;
|
||
|
|
||
|
hmaster = bcp->uvhub_master;
|
||
|
- spin_lock(&hmaster->disable_lock);
|
||
|
+ raw_spin_lock(&hmaster->disable_lock);
|
||
|
if (!bcp->baudisabled) {
|
||
|
stat->s_bau_disabled++;
|
||
|
tm1 = get_cycles();
|
||
|
@@ -806,7 +806,7 @@ static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat)
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
- spin_unlock(&hmaster->disable_lock);
|
||
|
+ raw_spin_unlock(&hmaster->disable_lock);
|
||
|
}
|
||
|
|
||
|
static void count_max_concurr(int stat, struct bau_control *bcp,
|
||
|
@@ -869,7 +869,7 @@ static void record_send_stats(cycles_t time1, cycles_t time2,
|
||
|
*/
|
||
|
static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
|
||
|
{
|
||
|
- spinlock_t *lock = &hmaster->uvhub_lock;
|
||
|
+ raw_spinlock_t *lock = &hmaster->uvhub_lock;
|
||
|
atomic_t *v;
|
||
|
|
||
|
v = &hmaster->active_descriptor_count;
|
||
|
@@ -1002,7 +1002,7 @@ static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
|
||
|
struct bau_control *hmaster;
|
||
|
|
||
|
hmaster = bcp->uvhub_master;
|
||
|
- spin_lock(&hmaster->disable_lock);
|
||
|
+ raw_spin_lock(&hmaster->disable_lock);
|
||
|
if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
|
||
|
stat->s_bau_reenabled++;
|
||
|
for_each_present_cpu(tcpu) {
|
||
|
@@ -1014,10 +1014,10 @@ static int check_enable(struct bau_control *bcp, struct ptc_stats *stat)
|
||
|
tbcp->period_giveups = 0;
|
||
|
}
|
||
|
}
|
||
|
- spin_unlock(&hmaster->disable_lock);
|
||
|
+ raw_spin_unlock(&hmaster->disable_lock);
|
||
|
return 0;
|
||
|
}
|
||
|
- spin_unlock(&hmaster->disable_lock);
|
||
|
+ raw_spin_unlock(&hmaster->disable_lock);
|
||
|
return -1;
|
||
|
}
|
||
|
|
||
|
@@ -1940,9 +1940,9 @@ static void __init init_per_cpu_tunables(void)
|
||
|
bcp->cong_reps = congested_reps;
|
||
|
bcp->disabled_period = sec_2_cycles(disabled_period);
|
||
|
bcp->giveup_limit = giveup_limit;
|
||
|
- spin_lock_init(&bcp->queue_lock);
|
||
|
- spin_lock_init(&bcp->uvhub_lock);
|
||
|
- spin_lock_init(&bcp->disable_lock);
|
||
|
+ raw_spin_lock_init(&bcp->queue_lock);
|
||
|
+ raw_spin_lock_init(&bcp->uvhub_lock);
|
||
|
+ raw_spin_lock_init(&bcp->disable_lock);
|
||
|
}
|
||
|
}
|
||
|
|
||
|
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
|
||
|
index b333fc45f9ec..8b85916e6986 100644
|
||
|
--- a/arch/x86/platform/uv/uv_time.c
|
||
|
+++ b/arch/x86/platform/uv/uv_time.c
|
||
|
@@ -57,7 +57,7 @@ static DEFINE_PER_CPU(struct clock_event_device, cpu_ced);
|
||
|
|
||
|
/* There is one of these allocated per node */
|
||
|
struct uv_rtc_timer_head {
|
||
|
- spinlock_t lock;
|
||
|
+ raw_spinlock_t lock;
|
||
|
/* next cpu waiting for timer, local node relative: */
|
||
|
int next_cpu;
|
||
|
/* number of cpus on this node: */
|
||
|
@@ -177,7 +177,7 @@ static __init int uv_rtc_allocate_timers(void)
|
||
|
uv_rtc_deallocate_timers();
|
||
|
return -ENOMEM;
|
||
|
}
|
||
|
- spin_lock_init(&head->lock);
|
||
|
+ raw_spin_lock_init(&head->lock);
|
||
|
head->ncpus = uv_blade_nr_possible_cpus(bid);
|
||
|
head->next_cpu = -1;
|
||
|
blade_info[bid] = head;
|
||
|
@@ -231,7 +231,7 @@ static int uv_rtc_set_timer(int cpu, u64 expires)
|
||
|
unsigned long flags;
|
||
|
int next_cpu;
|
||
|
|
||
|
- spin_lock_irqsave(&head->lock, flags);
|
||
|
+ raw_spin_lock_irqsave(&head->lock, flags);
|
||
|
|
||
|
next_cpu = head->next_cpu;
|
||
|
*t = expires;
|
||
|
@@ -243,12 +243,12 @@ static int uv_rtc_set_timer(int cpu, u64 expires)
|
||
|
if (uv_setup_intr(cpu, expires)) {
|
||
|
*t = ULLONG_MAX;
|
||
|
uv_rtc_find_next_timer(head, pnode);
|
||
|
- spin_unlock_irqrestore(&head->lock, flags);
|
||
|
+ raw_spin_unlock_irqrestore(&head->lock, flags);
|
||
|
return -ETIME;
|
||
|
}
|
||
|
}
|
||
|
|
||
|
- spin_unlock_irqrestore(&head->lock, flags);
|
||
|
+ raw_spin_unlock_irqrestore(&head->lock, flags);
|
||
|
return 0;
|
||
|
}
|
||
|
|
||
|
@@ -267,7 +267,7 @@ static int uv_rtc_unset_timer(int cpu, int force)
|
||
|
unsigned long flags;
|
||
|
int rc = 0;
|
||
|
|
||
|
- spin_lock_irqsave(&head->lock, flags);
|
||
|
+ raw_spin_lock_irqsave(&head->lock, flags);
|
||
|
|
||
|
if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force)
|
||
|
rc = 1;
|
||
|
@@ -279,7 +279,7 @@ static int uv_rtc_unset_timer(int cpu, int force)
|
||
|
uv_rtc_find_next_timer(head, pnode);
|
||
|
}
|
||
|
|
||
|
- spin_unlock_irqrestore(&head->lock, flags);
|
||
|
+ raw_spin_unlock_irqrestore(&head->lock, flags);
|
||
|
|
||
|
return rc;
|
||
|
}
|
||
|
@@ -299,13 +299,18 @@ static int uv_rtc_unset_timer(int cpu, int force)
|
||
|
static cycle_t uv_read_rtc(struct clocksource *cs)
|
||
|
{
|
||
|
unsigned long offset;
|
||
|
+ cycle_t cycles;
|
||
|
|
||
|
+ preempt_disable();
|
||
|
if (uv_get_min_hub_revision_id() == 1)
|
||
|
offset = 0;
|
||
|
else
|
||
|
offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
|
||
|
|
||
|
- return (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
|
||
|
+ cycles = (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
|
||
|
+ preempt_enable();
|
||
|
+
|
||
|
+ return cycles;
|
||
|
}
|
||
|
|
||
|
/*
|
||
|
--
|
||
|
2.28.0
|
||
|
|