From d7eee5fc20f86a2ceaeb6dbcf42881a79294740a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 6 Apr 2010 16:51:31 +0200 Subject: [PATCH 214/365] md: raid5: Make raid5_percpu handling RT aware __raid_run_ops() disables preemption with get_cpu() around the access to the raid5_percpu variables. That causes scheduling while atomic spews on RT. Serialize the access to the percpu data with a lock and keep the code preemptible. Reported-by: Udo van den Heuvel Signed-off-by: Thomas Gleixner Tested-by: Udo van den Heuvel --- drivers/md/raid5.c | 8 +++++--- drivers/md/raid5.h | 1 + 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index e4d1b3b16976..14f15ae1fbf9 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1934,8 +1934,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) struct raid5_percpu *percpu; unsigned long cpu; - cpu = get_cpu(); + cpu = get_cpu_light(); percpu = per_cpu_ptr(conf->percpu, cpu); + spin_lock(&percpu->lock); if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { ops_run_biofill(sh); overlap_clear++; @@ -1991,7 +1992,8 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) if (test_and_clear_bit(R5_Overlap, &dev->flags)) wake_up(&sh->raid_conf->wait_for_overlap); } - put_cpu(); + spin_unlock(&percpu->lock); + put_cpu_light(); } static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp, @@ -6425,6 +6427,7 @@ static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node) __func__, cpu); return -ENOMEM; } + spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock); return 0; } @@ -6435,7 +6438,6 @@ static int raid5_alloc_percpu(struct r5conf *conf) conf->percpu = alloc_percpu(struct raid5_percpu); if (!conf->percpu) return -ENOMEM; - err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node); if (!err) { conf->scribble_disks = max(conf->raid_disks, diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 57ec49f0839e..0739604990b7 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -504,6 +504,7 @@ struct r5conf { int recovery_disabled; /* per cpu variables */ struct raid5_percpu { + spinlock_t lock; /* Protection for -RT */ struct page *spare_page; /* Used when checking P/Q in raid6 */ struct flex_array *scribble; /* space for constructing buffer * lists and performing address -- 2.28.0