/*
* Based on arch/arm/include/asm/tlbflush.h
*
* Copyright (C) 1999-2003 Russell King
* Copyright (C) 2012 ARM Ltd.
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see .
*/
#ifndef __ASM_TLBFLUSH_H
#define __ASM_TLBFLUSH_H
#ifndef __ASSEMBLY__
#include
#include
#include
/*
* Raw TLBI operations.
*
* Where necessary, use the __tlbi() macro to avoid asm()
* boilerplate. Drivers and most kernel code should use the TLB
* management routines in preference to the macro below.
*
* The macro can be used as __tlbi(op) or __tlbi(op, arg), depending
* on whether a particular TLBI operation takes an argument or
* not. The macros handles invoking the asm with or without the
* register argument as appropriate.
*/
#define __TLBI_0(op, arg) asm ("tlbi " #op)
#define __TLBI_1(op, arg) asm ("tlbi " #op ", %0" : : "r" (arg))
#define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)
#define __tlbi(op, ...) __TLBI_N(op, ##__VA_ARGS__, 1, 0)
#define __tlbi_user(op, arg) do { \
if (arm64_kernel_unmapped_at_el0()) \
__tlbi(op, (arg) | USER_ASID_FLAG); \
} while (0)
/*
* TLB Management
* ==============
*
* The TLB specific code is expected to perform whatever tests it needs
* to determine if it should invalidate the TLB for each call. Start
* addresses are inclusive and end addresses are exclusive; it is safe to
* round these addresses down.
*
* flush_tlb_all()
*
* Invalidate the entire TLB.
*
* flush_tlb_mm(mm)
*
* Invalidate all TLB entries in a particular address space.
* - mm - mm_struct describing address space
*
* flush_tlb_range(mm,start,end)
*
* Invalidate a range of TLB entries in the specified address
* space.
* - mm - mm_struct describing address space
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
*
* flush_tlb_page(vaddr,vma)
*
* Invalidate the specified page in the specified address range.
* - vaddr - virtual address (may not be aligned)
* - vma - vma_struct describing address range
*
* flush_kern_tlb_page(kaddr)
*
* Invalidate the TLB entry for the specified page. The address
* will be in the kernels virtual memory space. Current uses
* only require the D-TLB to be invalidated.
* - kaddr - Kernel virtual memory address
*/
/*
* Non shared TLB invalidation is only valid here if there are no other PEs in
* the sharability domain
*/
#ifdef CONFIG_ARM64_NON_SHARED_TLBI
#define __DSB_FOR_TLBI(...) dsb(nsh##__VA_ARGS__)
#define __TLBI(TYPE, ...) __tlbi(TYPE, ##__VA_ARGS__)
#define __TLBI_USER(TYPE, ...) __tlbi_user(TYPE, ##__VA_ARGS__)
#else
#define __DSB_FOR_TLBI(...) dsb(ish##__VA_ARGS__)
#define __TLBI(TYPE, ...) __tlbi(TYPE##is, ##__VA_ARGS__)
#define __TLBI_USER(TYPE, ...) __tlbi_user(TYPE##is, ##__VA_ARGS__)
#endif
static inline void local_flush_tlb_all(void)
{
dsb(nshst);
__tlbi(vmalle1);
dsb(nsh);
isb();
}
static inline void flush_tlb_all(void)
{
__DSB_FOR_TLBI(st);
__TLBI(vmalle1);
__DSB_FOR_TLBI();
isb();
}
static inline void flush_tlb_mm(struct mm_struct *mm)
{
unsigned long asid = ASID(mm) << 48;
__DSB_FOR_TLBI(st);
__TLBI(aside1, asid);
__TLBI_USER(aside1, asid);
__DSB_FOR_TLBI();
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long uaddr)
{
unsigned long addr = uaddr >> 12 | (ASID(vma->vm_mm) << 48);
__DSB_FOR_TLBI(st);
__TLBI(vale1, addr);
__TLBI_USER(vale1, addr);
__DSB_FOR_TLBI();
}
/*
* This is meant to avoid soft lock-ups on large TLB flushing ranges and not
* necessarily a performance improvement.
*/
#define MAX_TLB_RANGE (1024UL << PAGE_SHIFT)
static inline void __flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end,
bool last_level)
{
unsigned long asid = ASID(vma->vm_mm) << 48;
unsigned long addr;
if ((end - start) > MAX_TLB_RANGE) {
flush_tlb_mm(vma->vm_mm);
return;
}
start = asid | (start >> 12);
end = asid | (end >> 12);
__DSB_FOR_TLBI(st);
for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) {
if (last_level) {
__TLBI(vale1, addr);
__TLBI_USER(vale1, addr);
} else {
__TLBI(vae1, addr);
__TLBI_USER(vae1, addr);
}
}
__DSB_FOR_TLBI();
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
__flush_tlb_range(vma, start, end, false);
}
static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
unsigned long addr;
if ((end - start) > MAX_TLB_RANGE) {
flush_tlb_all();
return;
}
start >>= 12;
end >>= 12;
__DSB_FOR_TLBI(st);
for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
__TLBI(vaae1, addr);
__DSB_FOR_TLBI();
isb();
}
/*
* Used to invalidate the TLB (walk caches) corresponding to intermediate page
* table levels (pgd/pud/pmd).
*/
static inline void __flush_tlb_pgtable(struct mm_struct *mm,
unsigned long uaddr)
{
unsigned long addr = uaddr >> 12 | (ASID(mm) << 48);
__TLBI(vae1, addr);
__TLBI_USER(vae1, addr);
__DSB_FOR_TLBI();
}
#endif
#endif