/* * Contiguous Memory Allocator * * Copyright (c) 2010-2011 by Samsung Electronics. * Copyright IBM Corporation, 2013 * Copyright LG Electronics Inc., 2014 * Written by: * Marek Szyprowski * Michal Nazarewicz * Aneesh Kumar K.V * Joonsoo Kim * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License or (at your optional) any later version of the license. */ #define pr_fmt(fmt) "cma: " fmt #ifdef CONFIG_CMA_DEBUG #ifndef DEBUG # define DEBUG #endif #endif #define CREATE_TRACE_POINTS #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cma.h" struct cma cma_areas[MAX_CMA_AREAS]; unsigned cma_area_count; static DEFINE_MUTEX(cma_mutex); phys_addr_t cma_get_base(const struct cma *cma) { return PFN_PHYS(cma->base_pfn); } unsigned long cma_get_size(const struct cma *cma) { return cma->count << PAGE_SHIFT; } static unsigned long cma_bitmap_aligned_mask(const struct cma *cma, unsigned int align_order) { if (align_order <= cma->order_per_bit) return 0; return (1UL << (align_order - cma->order_per_bit)) - 1; } /* * Find the offset of the base PFN from the specified align_order. * The value returned is represented in order_per_bits. */ static unsigned long cma_bitmap_aligned_offset(const struct cma *cma, unsigned int align_order) { return (cma->base_pfn & ((1UL << align_order) - 1)) >> cma->order_per_bit; } static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma, unsigned long pages) { return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit; } static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, unsigned int count) { unsigned long bitmap_no, bitmap_count; bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; bitmap_count = cma_bitmap_pages_to_bits(cma, count); mutex_lock(&cma->lock); bitmap_clear(cma->bitmap, bitmap_no, bitmap_count); mutex_unlock(&cma->lock); } static int __init cma_activate_area(struct cma *cma) { int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long); unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; unsigned i = cma->count >> pageblock_order; struct zone *zone; cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); if (!cma->bitmap) { cma->count = 0; return -ENOMEM; } WARN_ON_ONCE(!pfn_valid(pfn)); zone = page_zone(pfn_to_page(pfn)); do { unsigned j; base_pfn = pfn; for (j = pageblock_nr_pages; j; --j, pfn++) { WARN_ON_ONCE(!pfn_valid(pfn)); /* * alloc_contig_range requires the pfn range * specified to be in the same zone. Make this * simple by forcing the entire CMA resv range * to be in the same zone. */ if (page_zone(pfn_to_page(pfn)) != zone) goto err; } init_cma_reserved_pageblock(pfn_to_page(base_pfn)); } while (--i); mutex_init(&cma->lock); #ifdef CONFIG_CMA_DEBUGFS INIT_HLIST_HEAD(&cma->mem_head); spin_lock_init(&cma->mem_head_lock); #endif return 0; err: kfree(cma->bitmap); cma->count = 0; return -EINVAL; } static int __init cma_init_reserved_areas(void) { int i; for (i = 0; i < cma_area_count; i++) { int ret = cma_activate_area(&cma_areas[i]); if (ret) return ret; } return 0; } core_initcall(cma_init_reserved_areas); /** * cma_init_reserved_mem() - create custom contiguous area from reserved memory * @base: Base address of the reserved area * @size: Size of the reserved area (in bytes), * @order_per_bit: Order of pages represented by one bit on bitmap. * @res_cma: Pointer to store the created cma region. * * This function creates custom contiguous area from already reserved memory. */ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, unsigned int order_per_bit, struct cma **res_cma) { struct cma *cma; phys_addr_t alignment; /* Sanity checks */ if (cma_area_count == ARRAY_SIZE(cma_areas)) { pr_err("Not enough slots for CMA reserved regions!\n"); return -ENOSPC; } if (!size || !memblock_is_region_reserved(base, size)) return -EINVAL; /* ensure minimal alignment required by mm core */ alignment = PAGE_SIZE << max_t(unsigned long, MAX_ORDER - 1, pageblock_order); /* alignment should be aligned with order_per_bit */ if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit)) return -EINVAL; if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size) return -EINVAL; /* * Each reserved area must be initialised later, when more kernel * subsystems (like slab allocator) are available. */ cma = &cma_areas[cma_area_count]; cma->base_pfn = PFN_DOWN(base); cma->count = size >> PAGE_SHIFT; cma->order_per_bit = order_per_bit; *res_cma = cma; cma_area_count++; totalcma_pages += (size / PAGE_SIZE); return 0; } /** * cma_declare_contiguous() - reserve custom contiguous area * @base: Base address of the reserved area optional, use 0 for any * @size: Size of the reserved area (in bytes), * @limit: End address of the reserved memory (optional, 0 for any). * @alignment: Alignment for the CMA area, should be power of 2 or zero * @order_per_bit: Order of pages represented by one bit on bitmap. * @fixed: hint about where to place the reserved area * @res_cma: Pointer to store the created cma region. * * This function reserves memory from early allocator. It should be * called by arch specific code once the early allocator (memblock or bootmem) * has been activated and all other subsystems have already allocated/reserved * memory. This function allows to create custom reserved areas. * * If @fixed is true, reserve contiguous area at exactly @base. If false, * reserve in range from @base to @limit. */ int __init cma_declare_contiguous(phys_addr_t base, phys_addr_t size, phys_addr_t limit, phys_addr_t alignment, unsigned int order_per_bit, bool fixed, struct cma **res_cma) { phys_addr_t memblock_end = memblock_end_of_DRAM(); phys_addr_t highmem_start; int ret = 0; #ifdef CONFIG_X86 /* * high_memory isn't direct mapped memory so retrieving its physical * address isn't appropriate. But it would be useful to check the * physical address of the highmem boundary so it's justifiable to get * the physical address from it. On x86 there is a validation check for * this case, so the following workaround is needed to avoid it. */ highmem_start = __pa_nodebug(high_memory); #else highmem_start = __pa(high_memory); #endif pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", __func__, &size, &base, &limit, &alignment); if (cma_area_count == ARRAY_SIZE(cma_areas)) { pr_err("Not enough slots for CMA reserved regions!\n"); return -ENOSPC; } if (!size) return -EINVAL; if (alignment && !is_power_of_2(alignment)) return -EINVAL; /* * Sanitise input arguments. * Pages both ends in CMA area could be merged into adjacent unmovable * migratetype page by page allocator's buddy algorithm. In the case, * you couldn't get a contiguous memory, which is not what we want. */ alignment = max(alignment, (phys_addr_t)PAGE_SIZE << max_t(unsigned long, MAX_ORDER - 1, pageblock_order)); if (fixed && base & (alignment - 1)) { ret = -EINVAL; pr_err("Region at %pa must be aligned to %pa bytes\n", &base, &alignment); goto err; } base = ALIGN(base, alignment); size = ALIGN(size, alignment); limit &= ~(alignment - 1); if (!base) fixed = false; /* size should be aligned with order_per_bit */ if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) return -EINVAL; /* * If allocating at a fixed base the request region must not cross the * low/high memory boundary. */ if (fixed && base < highmem_start && base + size > highmem_start) { ret = -EINVAL; pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", &base, &highmem_start); goto err; } /* * If the limit is unspecified or above the memblock end, its effective * value will be the memblock end. Set it explicitly to simplify further * checks. */ if (limit == 0 || limit > memblock_end) limit = memblock_end; if (base + size > limit) { ret = -EINVAL; pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n", &size, &base, &limit); goto err; } /* Reserve memory */ if (fixed) { if (memblock_is_region_reserved(base, size) || memblock_reserve(base, size) < 0) { ret = -EBUSY; goto err; } } else { phys_addr_t addr = 0; /* * All pages in the reserved area must come from the same zone. * If the requested region crosses the low/high memory boundary, * try allocating from high memory first and fall back to low * memory in case of failure. */ if (base < highmem_start && limit > highmem_start) { addr = memblock_alloc_range(size, alignment, highmem_start, limit, MEMBLOCK_NONE); limit = highmem_start; } if (!addr) { addr = memblock_alloc_range(size, alignment, base, limit, MEMBLOCK_NONE); if (!addr) { ret = -ENOMEM; goto err; } } /* * kmemleak scans/reads tracked objects for pointers to other * objects but this address isn't mapped and accessible */ kmemleak_ignore_phys(addr); base = addr; } ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma); if (ret) goto free_mem; pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M, &base); return 0; free_mem: memblock_free(base, size); err: pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); return ret; } static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr, void *data) { struct page *page = virt_to_page(addr); pgprot_t prot = *(pgprot_t *)data; set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); return 0; } static void __dma_remap(struct page *page, size_t size, pgprot_t prot) { unsigned long start = (unsigned long) page_address(page); unsigned end = start + size; int err; err = apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); if (err) pr_err("***%s: error=%d, pfn=%lx\n", __func__, err, page_to_pfn(page)); dsb(sy); flush_tlb_kernel_range(start, end); } static void __dma_clear_buffer(struct page *page, size_t size) { void *ptr; /* * Ensure that the allocated pages are zeroed, and that any data * lurking in the kernel direct-mapped region is invalidated. * The zeroing can be skipped for VPR resize as it is not * accessible by cpu for either read or write. Since VPR's * coherent device is the only device that has heap resize notifier * and that too when resize is enabled, the API * dma_contiguous_should_replace_page() would return true * if and only if the cma is VPR and the resize is enabled. */ ptr = page_address(page); if (ptr) { if (!dma_contiguous_should_replace_page(page)) memset(ptr, 0, size); __dma_flush_area(ptr, size); /* comment out as not present for arm64 */ /* outer_flush_range(__pa(ptr), __pa(ptr) + size);*/ } } struct page *cma_alloc_at(struct cma *cma, size_t count, unsigned int align, phys_addr_t at_addr, bool map_non_cached) { unsigned long mask, offset; unsigned long pfn = -1; unsigned long start = 0; unsigned long bitmap_maxno, bitmap_no, bitmap_count; struct page *page = NULL; int ret; unsigned long start_pfn = __phys_to_pfn(at_addr); if (!cma || !cma->count) return NULL; pr_debug("%s(cma %p, count %zu, align %d)\n", __func__, (void *)cma, count, align); if (!count) return NULL; mask = cma_bitmap_aligned_mask(cma, align); offset = cma_bitmap_aligned_offset(cma, align); bitmap_maxno = cma_bitmap_maxno(cma); bitmap_count = cma_bitmap_pages_to_bits(cma, count); if (bitmap_count > bitmap_maxno) return NULL; if (start_pfn && start_pfn < cma->base_pfn) return NULL; start = start_pfn ? start_pfn - cma->base_pfn : start; for (;;) { unsigned long timeout = jiffies + msecs_to_jiffies(8000); int retries = 0; mutex_lock(&cma->lock); bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap, bitmap_maxno, start, bitmap_count, mask, offset); if (bitmap_no >= bitmap_maxno || (start_pfn && start != bitmap_no)) { mutex_unlock(&cma->lock); break; } bitmap_set(cma->bitmap, bitmap_no, bitmap_count); /* * It's safe to drop the lock here. We've marked this region for * our exclusive use. If the migration fails we will take the * lock again and unmark it. */ mutex_unlock(&cma->lock); pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); retry: mutex_lock(&cma_mutex); ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA); mutex_unlock(&cma_mutex); if (ret == 0) { page = pfn_to_page(pfn); break; } cma_clear_bitmap(cma, pfn, count); if (start_pfn && time_before(jiffies, timeout)) { /* Possible migration contention from * __get_user_pages(). Retry after a bit of sleep. */ if (retries >= 5) { msleep(retries > 10 ? 3 : 1); invalidate_bh_lrus(); } else { cond_resched(); } retries++; goto retry; } else if (ret != -EBUSY || start_pfn) { break; } pr_debug("%s(): memory range at %p is busy, retrying\n", __func__, pfn_to_page(pfn)); /* try again with a bit different memory target */ start = bitmap_no + mask + 1; } trace_cma_alloc(pfn, page, count, align); pr_debug("%s(): returned %p\n", __func__, page); if (page) { __dma_remap(page, count << PAGE_SHIFT, pgprot_writecombine(PAGE_KERNEL)); __dma_clear_buffer(page, count << PAGE_SHIFT); if(map_non_cached) __dma_remap(page, count << PAGE_SHIFT, pgprot_noncached(PAGE_KERNEL)); } return page; } /** * cma_alloc() - allocate pages from contiguous area * @cma: Contiguous memory region for which the allocation is performed. * @count: Requested number of pages. * @align: Requested alignment of pages (in PAGE_SIZE order). * * This function allocates part of contiguous memory on specific * contiguous memory area. */ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align) { return cma_alloc_at(cma, count, align, 0, false); } /** * cma_release() - release allocated pages * @cma: Contiguous memory region for which the allocation is performed. * @pages: Allocated pages. * @count: Number of allocated pages. * * This function releases memory allocated by alloc_cma(). * It returns false when provided pages do not belong to contiguous area and * true otherwise. */ bool cma_release(struct cma *cma, const struct page *pages, unsigned int count) { unsigned long pfn; if (!cma || !pages) return false; pr_debug("%s(page %p)\n", __func__, (void *)pages); pfn = page_to_pfn(pages); if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count) return false; VM_BUG_ON(pfn + count > cma->base_pfn + cma->count); __dma_remap((struct page *)pages, count << PAGE_SHIFT, PAGE_KERNEL_EXEC); free_contig_range(pfn, count); cma_clear_bitmap(cma, pfn, count); trace_cma_release(pfn, pages, count); return true; } #ifdef CONFIG_DMA_CMA int dma_get_contiguous_stats(struct device *dev, struct dma_contiguous_stats *stats) { struct cma *cma = NULL; if ((!dev) || !stats) return -EINVAL; if (dev->cma_area) cma = dev->cma_area; if (!cma) return -EINVAL; stats->size = (cma->count) << PAGE_SHIFT; stats->base = (cma->base_pfn) << PAGE_SHIFT; return 0; } #define MAX_REPLACE_DEV 16 static struct device *replace_dev_list[MAX_REPLACE_DEV]; static atomic_t replace_dev_count; bool dma_contiguous_should_replace_page(struct page *page) { int i; ulong pfn; struct cma *cma; struct device *dev; int count = atomic_read(&replace_dev_count); if (!page) return false; pfn = page_to_pfn(page); for (i = 0; i < count; i++) { dev = replace_dev_list[i]; if (!dev) continue; cma = dev->cma_area; if (!cma) continue; if (pfn >= cma->base_pfn && pfn < cma->base_pfn + cma->count) return true; } return false; } /* Enable replacing pages during get_user_pages. * any ref count on CMA page from get_user_pages * makes the page not migratable and can cause * CMA allocation failure. Enabling replace * would force replacing the CMA pages with non-CMA * pages during get_user_pages */ int dma_contiguous_enable_replace_pages(struct device *dev) { int idx; struct cma *cma; if (!dev) return -EINVAL; idx = atomic_inc_return(&replace_dev_count); if (idx > MAX_REPLACE_DEV) return -EINVAL; replace_dev_list[idx - 1] = dev; cma = dev->cma_area; if (cma) { pr_info("enabled page replacement for spfn=%lx, epfn=%lx\n", cma->base_pfn, cma->base_pfn + cma->count); } return 0; } #endif /* CONFIG_DMA_CMA */