115 lines
3.7 KiB
Diff
115 lines
3.7 KiB
Diff
|
From 474b471d3b2670c710a4b7db7ce855e270857b5f Mon Sep 17 00:00:00 2001
|
||
|
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||
|
Date: Fri, 15 Jan 2016 16:33:34 +0100
|
||
|
Subject: [PATCH 288/365] net/core: protect users of napi_alloc_cache against
|
||
|
reentrance
|
||
|
|
||
|
On -RT the code running in BH can not be moved to another CPU so CPU
|
||
|
local variable remain local. However the code can be preempted
|
||
|
and another task may enter BH accessing the same CPU using the same
|
||
|
napi_alloc_cache variable.
|
||
|
This patch ensures that each user of napi_alloc_cache uses a local lock.
|
||
|
|
||
|
Cc: stable-rt@vger.kernel.org
|
||
|
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
|
||
|
---
|
||
|
net/core/skbuff.c | 23 ++++++++++++++++++-----
|
||
|
1 file changed, 18 insertions(+), 5 deletions(-)
|
||
|
|
||
|
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
|
||
|
index c89c40271856..4b3ff49e1230 100644
|
||
|
--- a/net/core/skbuff.c
|
||
|
+++ b/net/core/skbuff.c
|
||
|
@@ -362,6 +362,7 @@ struct napi_alloc_cache {
|
||
|
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
|
||
|
static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
|
||
|
static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock);
|
||
|
+static DEFINE_LOCAL_IRQ_LOCK(napi_alloc_cache_lock);
|
||
|
|
||
|
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
|
||
|
{
|
||
|
@@ -393,9 +394,13 @@ EXPORT_SYMBOL(netdev_alloc_frag);
|
||
|
|
||
|
static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
|
||
|
{
|
||
|
- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
|
||
|
+ struct napi_alloc_cache *nc;
|
||
|
+ void *data;
|
||
|
|
||
|
- return __alloc_page_frag(&nc->page, fragsz, gfp_mask);
|
||
|
+ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
|
||
|
+ data = __alloc_page_frag(&nc->page, fragsz, gfp_mask);
|
||
|
+ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
|
||
|
+ return data;
|
||
|
}
|
||
|
|
||
|
void *napi_alloc_frag(unsigned int fragsz)
|
||
|
@@ -494,6 +499,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
|
||
|
struct napi_alloc_cache *nc;
|
||
|
struct sk_buff *skb;
|
||
|
void *data;
|
||
|
+ bool pfmemalloc;
|
||
|
|
||
|
len += NET_SKB_PAD + NET_IP_ALIGN;
|
||
|
|
||
|
@@ -516,7 +522,10 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
|
||
|
if (sk_memalloc_socks())
|
||
|
gfp_mask |= __GFP_MEMALLOC;
|
||
|
|
||
|
+ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
|
||
|
data = __alloc_page_frag(&nc->page, len, gfp_mask);
|
||
|
+ pfmemalloc = nc->page.pfmemalloc;
|
||
|
+ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
|
||
|
if (unlikely(!data))
|
||
|
return NULL;
|
||
|
|
||
|
@@ -527,7 +536,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
|
||
|
}
|
||
|
|
||
|
/* use OR instead of assignment to avoid clearing of bits in mask */
|
||
|
- if (nc->page.pfmemalloc)
|
||
|
+ if (pfmemalloc)
|
||
|
skb->pfmemalloc = 1;
|
||
|
skb->head_frag = 1;
|
||
|
|
||
|
@@ -771,23 +780,26 @@ EXPORT_SYMBOL(consume_skb);
|
||
|
|
||
|
void __kfree_skb_flush(void)
|
||
|
{
|
||
|
- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
|
||
|
+ struct napi_alloc_cache *nc;
|
||
|
|
||
|
+ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
|
||
|
/* flush skb_cache if containing objects */
|
||
|
if (nc->skb_count) {
|
||
|
kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
|
||
|
nc->skb_cache);
|
||
|
nc->skb_count = 0;
|
||
|
}
|
||
|
+ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
|
||
|
}
|
||
|
|
||
|
static inline void _kfree_skb_defer(struct sk_buff *skb)
|
||
|
{
|
||
|
- struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
|
||
|
+ struct napi_alloc_cache *nc;
|
||
|
|
||
|
/* drop skb->head and call any destructors for packet */
|
||
|
skb_release_all(skb);
|
||
|
|
||
|
+ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
|
||
|
/* record skb to CPU local list */
|
||
|
nc->skb_cache[nc->skb_count++] = skb;
|
||
|
|
||
|
@@ -802,6 +814,7 @@ static inline void _kfree_skb_defer(struct sk_buff *skb)
|
||
|
nc->skb_cache);
|
||
|
nc->skb_count = 0;
|
||
|
}
|
||
|
+ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
|
||
|
}
|
||
|
void __kfree_skb_defer(struct sk_buff *skb)
|
||
|
{
|
||
|
--
|
||
|
2.28.0
|
||
|
|