From 61751ce1ba547bd0ef0457be194e8aee3c81d577 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Thu, 20 Oct 2016 11:15:22 +0200 Subject: [PATCH 311/365] drivers/zram: Don't disable preemption in zcomp_stream_get/put() In v4.7, the driver switched to percpu compression streams, disabling preemption via get/put_cpu_ptr(). Use a per-zcomp_strm lock here. We also have to fix an lock order issue in zram_decompress_page() such that zs_map_object() nests inside of zcomp_stream_put() as it does in zram_bvec_write(). Signed-off-by: Mike Galbraith [bigeasy: get_locked_var() -> per zcomp_strm lock] Signed-off-by: Sebastian Andrzej Siewior --- drivers/block/zram/zcomp.c | 12 ++++++++++-- drivers/block/zram/zcomp.h | 1 + drivers/block/zram/zram_drv.c | 6 +++--- 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c index 4b5cd3a7b2b6..fa8329ad79fd 100644 --- a/drivers/block/zram/zcomp.c +++ b/drivers/block/zram/zcomp.c @@ -118,12 +118,19 @@ ssize_t zcomp_available_show(const char *comp, char *buf) struct zcomp_strm *zcomp_stream_get(struct zcomp *comp) { - return *get_cpu_ptr(comp->stream); + struct zcomp_strm *zstrm; + + zstrm = *this_cpu_ptr(comp->stream); + spin_lock(&zstrm->zcomp_lock); + return zstrm; } void zcomp_stream_put(struct zcomp *comp) { - put_cpu_ptr(comp->stream); + struct zcomp_strm *zstrm; + + zstrm = *this_cpu_ptr(comp->stream); + spin_unlock(&zstrm->zcomp_lock); } int zcomp_compress(struct zcomp_strm *zstrm, @@ -174,6 +181,7 @@ static int __zcomp_cpu_notifier(struct zcomp *comp, pr_err("Can't allocate a compression stream\n"); return NOTIFY_BAD; } + spin_lock_init(&zstrm->zcomp_lock); *per_cpu_ptr(comp->stream, cpu) = zstrm; break; case CPU_DEAD: diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h index 478cac2ed465..f7a6efdc3285 100644 --- a/drivers/block/zram/zcomp.h +++ b/drivers/block/zram/zcomp.h @@ -14,6 +14,7 @@ struct zcomp_strm { /* compression/decompression buffer */ void *buffer; struct crypto_comp *tfm; + spinlock_t zcomp_lock; }; /* dynamic per-device compression frontend */ diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index c864ad57348a..a9765ffa322a 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -577,6 +577,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index) struct zram_meta *meta = zram->meta; unsigned long handle; unsigned int size; + struct zcomp_strm *zstrm; zram_lock_table(&meta->table[index]); handle = meta->table[index].handle; @@ -588,16 +589,15 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index) return 0; } + zstrm = zcomp_stream_get(zram->comp); cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); if (size == PAGE_SIZE) { memcpy(mem, cmem, PAGE_SIZE); } else { - struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp); - ret = zcomp_decompress(zstrm, cmem, size, mem); - zcomp_stream_put(zram->comp); } zs_unmap_object(meta->mem_pool, handle); + zcomp_stream_put(zram->comp); zram_unlock_table(&meta->table[index]); /* Should NEVER happen. Return bio error if it does. */ -- 2.28.0