6e17c6de3d
- Yosry has also eliminated cgroup's atomic rstat flushing. - Nhat Pham adds the new cachestat() syscall. It provides userspace with the ability to query pagecache status - a similar concept to mincore() but more powerful and with improved usability. - Mel Gorman provides more optimizations for compaction, reducing the prevalence of page rescanning. - Lorenzo Stoakes has done some maintanance work on the get_user_pages() interface. - Liam Howlett continues with cleanups and maintenance work to the maple tree code. Peng Zhang also does some work on maple tree. - Johannes Weiner has done some cleanup work on the compaction code. - David Hildenbrand has contributed additional selftests for get_user_pages(). - Thomas Gleixner has contributed some maintenance and optimization work for the vmalloc code. - Baolin Wang has provided some compaction cleanups, - SeongJae Park continues maintenance work on the DAMON code. - Huang Ying has done some maintenance on the swap code's usage of device refcounting. - Christoph Hellwig has some cleanups for the filemap/directio code. - Ryan Roberts provides two patch series which yield some rationalization of the kernel's access to pte entries - use the provided APIs rather than open-coding accesses. - Lorenzo Stoakes has some fixes to the interaction between pagecache and directio access to file mappings. - John Hubbard has a series of fixes to the MM selftesting code. - ZhangPeng continues the folio conversion campaign. - Hugh Dickins has been working on the pagetable handling code, mainly with a view to reducing the load on the mmap_lock. - Catalin Marinas has reduced the arm64 kmalloc() minimum alignment from 128 to 8. - Domenico Cerasuolo has improved the zswap reclaim mechanism by reorganizing the LRU management. - Matthew Wilcox provides some fixups to make gfs2 work better with the buffer_head code. - Vishal Moola also has done some folio conversion work. - Matthew Wilcox has removed the remnants of the pagevec code - their functionality is migrated over to struct folio_batch. -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZJejewAKCRDdBJ7gKXxA joggAPwKMfT9lvDBEUnJagY7dbDPky1cSYZdJKxxM2cApGa42gEA6Cl8HRAWqSOh J0qXCzqaaN8+BuEyLGDVPaXur9KirwY= =B7yQ -----END PGP SIGNATURE----- Merge tag 'mm-stable-2023-06-24-19-15' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull mm updates from Andrew Morton: - Yosry Ahmed brought back some cgroup v1 stats in OOM logs - Yosry has also eliminated cgroup's atomic rstat flushing - Nhat Pham adds the new cachestat() syscall. It provides userspace with the ability to query pagecache status - a similar concept to mincore() but more powerful and with improved usability - Mel Gorman provides more optimizations for compaction, reducing the prevalence of page rescanning - Lorenzo Stoakes has done some maintanance work on the get_user_pages() interface - Liam Howlett continues with cleanups and maintenance work to the maple tree code. Peng Zhang also does some work on maple tree - Johannes Weiner has done some cleanup work on the compaction code - David Hildenbrand has contributed additional selftests for get_user_pages() - Thomas Gleixner has contributed some maintenance and optimization work for the vmalloc code - Baolin Wang has provided some compaction cleanups, - SeongJae Park continues maintenance work on the DAMON code - Huang Ying has done some maintenance on the swap code's usage of device refcounting - Christoph Hellwig has some cleanups for the filemap/directio code - Ryan Roberts provides two patch series which yield some rationalization of the kernel's access to pte entries - use the provided APIs rather than open-coding accesses - Lorenzo Stoakes has some fixes to the interaction between pagecache and directio access to file mappings - John Hubbard has a series of fixes to the MM selftesting code - ZhangPeng continues the folio conversion campaign - Hugh Dickins has been working on the pagetable handling code, mainly with a view to reducing the load on the mmap_lock - Catalin Marinas has reduced the arm64 kmalloc() minimum alignment from 128 to 8 - Domenico Cerasuolo has improved the zswap reclaim mechanism by reorganizing the LRU management - Matthew Wilcox provides some fixups to make gfs2 work better with the buffer_head code - Vishal Moola also has done some folio conversion work - Matthew Wilcox has removed the remnants of the pagevec code - their functionality is migrated over to struct folio_batch * tag 'mm-stable-2023-06-24-19-15' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (380 commits) mm/hugetlb: remove hugetlb_set_page_subpool() mm: nommu: correct the range of mmap_sem_read_lock in task_mem() hugetlb: revert use of page_cache_next_miss() Revert "page cache: fix page_cache_next/prev_miss off by one" mm/vmscan: fix root proactive reclaim unthrottling unbalanced node mm: memcg: rename and document global_reclaim() mm: kill [add|del]_page_to_lru_list() mm: compaction: convert to use a folio in isolate_migratepages_block() mm: zswap: fix double invalidate with exclusive loads mm: remove unnecessary pagevec includes mm: remove references to pagevec mm: rename invalidate_mapping_pagevec to mapping_try_invalidate mm: remove struct pagevec net: convert sunrpc from pagevec to folio_batch i915: convert i915_gpu_error to use a folio_batch pagevec: rename fbatch_count() mm: remove check_move_unevictable_pages() drm: convert drm_gem_put_pages() to use a folio_batch i915: convert shmem_sg_free_table() to use a folio_batch scatterlist: add sg_set_folio() ...
216 lines
4.5 KiB
C
216 lines
4.5 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* DMA memory management for framework level HCD code (hc_driver)
|
|
*
|
|
* This implementation plugs in through generic "usb_bus" level methods,
|
|
* and should work with all USB controllers, regardless of bus type.
|
|
*
|
|
* Released under the GPLv2 only.
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/device.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/io.h>
|
|
#include <linux/dma-mapping.h>
|
|
#include <linux/dmapool.h>
|
|
#include <linux/genalloc.h>
|
|
#include <linux/usb.h>
|
|
#include <linux/usb/hcd.h>
|
|
|
|
|
|
/*
|
|
* DMA-Coherent Buffers
|
|
*/
|
|
|
|
/* FIXME tune these based on pool statistics ... */
|
|
static size_t pool_max[HCD_BUFFER_POOLS] = {
|
|
32, 128, 512, 2048,
|
|
};
|
|
|
|
void __init usb_init_pool_max(void)
|
|
{
|
|
/*
|
|
* The pool_max values must never be smaller than
|
|
* ARCH_DMA_MINALIGN.
|
|
*/
|
|
if (ARCH_DMA_MINALIGN <= 32)
|
|
; /* Original value is okay */
|
|
else if (ARCH_DMA_MINALIGN <= 64)
|
|
pool_max[0] = 64;
|
|
else if (ARCH_DMA_MINALIGN <= 128)
|
|
pool_max[0] = 0; /* Don't use this pool */
|
|
else
|
|
BUILD_BUG(); /* We don't allow this */
|
|
}
|
|
|
|
/* SETUP primitives */
|
|
|
|
/**
|
|
* hcd_buffer_create - initialize buffer pools
|
|
* @hcd: the bus whose buffer pools are to be initialized
|
|
*
|
|
* Context: task context, might sleep
|
|
*
|
|
* Call this as part of initializing a host controller that uses the dma
|
|
* memory allocators. It initializes some pools of dma-coherent memory that
|
|
* will be shared by all drivers using that controller.
|
|
*
|
|
* Call hcd_buffer_destroy() to clean up after using those pools.
|
|
*
|
|
* Return: 0 if successful. A negative errno value otherwise.
|
|
*/
|
|
int hcd_buffer_create(struct usb_hcd *hcd)
|
|
{
|
|
char name[16];
|
|
int i, size;
|
|
|
|
if (hcd->localmem_pool || !hcd_uses_dma(hcd))
|
|
return 0;
|
|
|
|
for (i = 0; i < HCD_BUFFER_POOLS; i++) {
|
|
size = pool_max[i];
|
|
if (!size)
|
|
continue;
|
|
snprintf(name, sizeof(name), "buffer-%d", size);
|
|
hcd->pool[i] = dma_pool_create(name, hcd->self.sysdev,
|
|
size, size, 0);
|
|
if (!hcd->pool[i]) {
|
|
hcd_buffer_destroy(hcd);
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
|
|
/**
|
|
* hcd_buffer_destroy - deallocate buffer pools
|
|
* @hcd: the bus whose buffer pools are to be destroyed
|
|
*
|
|
* Context: task context, might sleep
|
|
*
|
|
* This frees the buffer pools created by hcd_buffer_create().
|
|
*/
|
|
void hcd_buffer_destroy(struct usb_hcd *hcd)
|
|
{
|
|
int i;
|
|
|
|
if (!IS_ENABLED(CONFIG_HAS_DMA))
|
|
return;
|
|
|
|
for (i = 0; i < HCD_BUFFER_POOLS; i++) {
|
|
dma_pool_destroy(hcd->pool[i]);
|
|
hcd->pool[i] = NULL;
|
|
}
|
|
}
|
|
|
|
|
|
/* sometimes alloc/free could use kmalloc with GFP_DMA, for
|
|
* better sharing and to leverage mm/slab.c intelligence.
|
|
*/
|
|
|
|
void *hcd_buffer_alloc(
|
|
struct usb_bus *bus,
|
|
size_t size,
|
|
gfp_t mem_flags,
|
|
dma_addr_t *dma
|
|
)
|
|
{
|
|
struct usb_hcd *hcd = bus_to_hcd(bus);
|
|
int i;
|
|
|
|
if (size == 0)
|
|
return NULL;
|
|
|
|
if (hcd->localmem_pool)
|
|
return gen_pool_dma_alloc(hcd->localmem_pool, size, dma);
|
|
|
|
/* some USB hosts just use PIO */
|
|
if (!hcd_uses_dma(hcd)) {
|
|
*dma = ~(dma_addr_t) 0;
|
|
return kmalloc(size, mem_flags);
|
|
}
|
|
|
|
for (i = 0; i < HCD_BUFFER_POOLS; i++) {
|
|
if (size <= pool_max[i])
|
|
return dma_pool_alloc(hcd->pool[i], mem_flags, dma);
|
|
}
|
|
return dma_alloc_coherent(hcd->self.sysdev, size, dma, mem_flags);
|
|
}
|
|
|
|
void hcd_buffer_free(
|
|
struct usb_bus *bus,
|
|
size_t size,
|
|
void *addr,
|
|
dma_addr_t dma
|
|
)
|
|
{
|
|
struct usb_hcd *hcd = bus_to_hcd(bus);
|
|
int i;
|
|
|
|
if (!addr)
|
|
return;
|
|
|
|
if (hcd->localmem_pool) {
|
|
gen_pool_free(hcd->localmem_pool, (unsigned long)addr, size);
|
|
return;
|
|
}
|
|
|
|
if (!hcd_uses_dma(hcd)) {
|
|
kfree(addr);
|
|
return;
|
|
}
|
|
|
|
for (i = 0; i < HCD_BUFFER_POOLS; i++) {
|
|
if (size <= pool_max[i]) {
|
|
dma_pool_free(hcd->pool[i], addr, dma);
|
|
return;
|
|
}
|
|
}
|
|
dma_free_coherent(hcd->self.sysdev, size, addr, dma);
|
|
}
|
|
|
|
void *hcd_buffer_alloc_pages(struct usb_hcd *hcd,
|
|
size_t size, gfp_t mem_flags, dma_addr_t *dma)
|
|
{
|
|
if (size == 0)
|
|
return NULL;
|
|
|
|
if (hcd->localmem_pool)
|
|
return gen_pool_dma_alloc_align(hcd->localmem_pool,
|
|
size, dma, PAGE_SIZE);
|
|
|
|
/* some USB hosts just use PIO */
|
|
if (!hcd_uses_dma(hcd)) {
|
|
*dma = DMA_MAPPING_ERROR;
|
|
return (void *)__get_free_pages(mem_flags,
|
|
get_order(size));
|
|
}
|
|
|
|
return dma_alloc_coherent(hcd->self.sysdev,
|
|
size, dma, mem_flags);
|
|
}
|
|
|
|
void hcd_buffer_free_pages(struct usb_hcd *hcd,
|
|
size_t size, void *addr, dma_addr_t dma)
|
|
{
|
|
if (!addr)
|
|
return;
|
|
|
|
if (hcd->localmem_pool) {
|
|
gen_pool_free(hcd->localmem_pool,
|
|
(unsigned long)addr, size);
|
|
return;
|
|
}
|
|
|
|
if (!hcd_uses_dma(hcd)) {
|
|
free_pages((unsigned long)addr, get_order(size));
|
|
return;
|
|
}
|
|
|
|
dma_free_coherent(hcd->self.sysdev, size, addr, dma);
|
|
}
|