2019-05-26 23:55:05 -07:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2005-10-05 19:06:20 -07:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
|
|
|
|
* Rewrite, cleanup:
|
2005-11-21 01:12:32 -07:00
|
|
|
* Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
|
2005-10-05 19:06:20 -07:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _ASM_IOMMU_H
|
|
|
|
#define _ASM_IOMMU_H
|
2005-12-16 14:43:46 -07:00
|
|
|
#ifdef __KERNEL__
|
2005-10-05 19:06:20 -07:00
|
|
|
|
2006-10-29 22:15:59 -07:00
|
|
|
#include <linux/compiler.h>
|
2005-10-05 19:06:20 -07:00
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/device.h>
|
2020-09-22 06:31:03 -07:00
|
|
|
#include <linux/dma-map-ops.h>
|
2007-10-18 23:40:25 -07:00
|
|
|
#include <linux/bitops.h>
|
2007-05-03 05:28:32 -07:00
|
|
|
#include <asm/machdep.h>
|
2006-10-29 22:15:59 -07:00
|
|
|
#include <asm/types.h>
|
2015-03-30 22:00:48 -07:00
|
|
|
#include <asm/pci-bridge.h>
|
2018-07-05 09:24:57 -07:00
|
|
|
#include <asm/asm-const.h>
|
2006-10-29 22:15:59 -07:00
|
|
|
|
2013-12-09 00:17:01 -07:00
|
|
|
#define IOMMU_PAGE_SHIFT_4K 12
|
|
|
|
#define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K)
|
|
|
|
#define IOMMU_PAGE_MASK_4K (~((1 << IOMMU_PAGE_SHIFT_4K) - 1))
|
2020-04-20 11:36:36 -07:00
|
|
|
#define IOMMU_PAGE_ALIGN_4K(addr) ALIGN(addr, IOMMU_PAGE_SIZE_4K)
|
2006-10-29 22:15:59 -07:00
|
|
|
|
2013-12-09 00:17:03 -07:00
|
|
|
#define IOMMU_PAGE_SIZE(tblptr) (ASM_CONST(1) << (tblptr)->it_page_shift)
|
|
|
|
#define IOMMU_PAGE_MASK(tblptr) (~((1 << (tblptr)->it_page_shift) - 1))
|
2020-04-20 11:36:36 -07:00
|
|
|
#define IOMMU_PAGE_ALIGN(addr, tblptr) ALIGN(addr, IOMMU_PAGE_SIZE(tblptr))
|
2013-12-09 00:17:03 -07:00
|
|
|
|
2023-08-17 09:24:08 -07:00
|
|
|
#define DIRECT64_PROPNAME "linux,direct64-ddr-window-info"
|
|
|
|
#define DMA64_PROPNAME "linux,dma64-ddr-window-info"
|
|
|
|
|
2024-05-13 18:46:08 -07:00
|
|
|
#define MIN_DDW_VPMEM_DMA_WINDOW SZ_2G
|
|
|
|
|
2006-11-10 23:25:18 -07:00
|
|
|
/* Boot time flags */
|
|
|
|
extern int iommu_is_off;
|
|
|
|
extern int iommu_force_on;
|
2006-10-29 22:15:59 -07:00
|
|
|
|
2015-06-04 23:35:06 -07:00
|
|
|
struct iommu_table_ops {
|
2015-06-04 23:35:15 -07:00
|
|
|
/*
|
|
|
|
* When called with direction==DMA_NONE, it is equal to clear().
|
|
|
|
* uaddr is a linear map address.
|
|
|
|
*/
|
2015-06-04 23:35:06 -07:00
|
|
|
int (*set)(struct iommu_table *tbl,
|
|
|
|
long index, long npages,
|
|
|
|
unsigned long uaddr,
|
|
|
|
enum dma_data_direction direction,
|
2016-08-03 13:46:00 -07:00
|
|
|
unsigned long attrs);
|
2015-06-04 23:35:15 -07:00
|
|
|
#ifdef CONFIG_IOMMU_API
|
|
|
|
/*
|
|
|
|
* Exchanges existing TCE with new TCE plus direction bits;
|
|
|
|
* returns old TCE and DMA direction mask.
|
|
|
|
* @tce is a physical address.
|
|
|
|
*/
|
2019-08-29 01:52:48 -07:00
|
|
|
int (*xchg_no_kill)(struct iommu_table *tbl,
|
2015-06-04 23:35:15 -07:00
|
|
|
long index,
|
|
|
|
unsigned long *hpa,
|
2022-05-05 22:37:55 -07:00
|
|
|
enum dma_data_direction *direction);
|
2019-08-29 01:52:48 -07:00
|
|
|
|
|
|
|
void (*tce_kill)(struct iommu_table *tbl,
|
|
|
|
unsigned long index,
|
2022-05-05 22:37:55 -07:00
|
|
|
unsigned long pages);
|
2018-07-03 23:13:47 -07:00
|
|
|
|
2018-07-03 23:13:49 -07:00
|
|
|
__be64 *(*useraddrptr)(struct iommu_table *tbl, long index, bool alloc);
|
2015-06-04 23:35:15 -07:00
|
|
|
#endif
|
2015-06-04 23:35:06 -07:00
|
|
|
void (*clear)(struct iommu_table *tbl,
|
|
|
|
long index, long npages);
|
2015-06-04 23:35:15 -07:00
|
|
|
/* get() returns a physical address */
|
2015-06-04 23:35:06 -07:00
|
|
|
unsigned long (*get)(struct iommu_table *tbl, long index);
|
|
|
|
void (*flush)(struct iommu_table *tbl);
|
2015-06-04 23:35:20 -07:00
|
|
|
void (*free)(struct iommu_table *tbl);
|
2015-06-04 23:35:06 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
/* These are used by VIO */
|
|
|
|
extern struct iommu_table_ops iommu_table_lpar_multi_ops;
|
|
|
|
extern struct iommu_table_ops iommu_table_pseries_ops;
|
|
|
|
|
2005-10-05 19:06:20 -07:00
|
|
|
/*
|
|
|
|
* IOMAP_MAX_ORDER defines the largest contiguous block
|
|
|
|
* of dma space we can get. IOMAP_MAX_ORDER = 13
|
|
|
|
* allows up to 2**12 pages (4096 * 4096) = 16 MB
|
|
|
|
*/
|
2006-10-29 22:15:59 -07:00
|
|
|
#define IOMAP_MAX_ORDER 13
|
2005-10-05 19:06:20 -07:00
|
|
|
|
2012-06-07 11:14:48 -07:00
|
|
|
#define IOMMU_POOL_HASHBITS 2
|
|
|
|
#define IOMMU_NR_POOLS (1 << IOMMU_POOL_HASHBITS)
|
|
|
|
|
|
|
|
struct iommu_pool {
|
|
|
|
unsigned long start;
|
|
|
|
unsigned long end;
|
|
|
|
unsigned long hint;
|
|
|
|
spinlock_t lock;
|
|
|
|
} ____cacheline_aligned_in_smp;
|
|
|
|
|
2005-10-05 19:06:20 -07:00
|
|
|
struct iommu_table {
|
|
|
|
unsigned long it_busno; /* Bus number this table belongs to */
|
|
|
|
unsigned long it_size; /* Size of iommu table in entries */
|
2015-06-04 23:35:19 -07:00
|
|
|
unsigned long it_indirect_levels;
|
|
|
|
unsigned long it_level_size;
|
2015-06-04 23:35:22 -07:00
|
|
|
unsigned long it_allocated_size;
|
2005-10-05 19:06:20 -07:00
|
|
|
unsigned long it_offset; /* Offset into global table */
|
|
|
|
unsigned long it_base; /* mapped address of tce table */
|
|
|
|
unsigned long it_index; /* which iommu table this is */
|
|
|
|
unsigned long it_type; /* type: PCI or Virtual Bus */
|
|
|
|
unsigned long it_blocksize; /* Entries in each block (cacheline) */
|
2012-06-07 11:14:48 -07:00
|
|
|
unsigned long poolsize;
|
|
|
|
unsigned long nr_pools;
|
|
|
|
struct iommu_pool large_pool;
|
|
|
|
struct iommu_pool pools[IOMMU_NR_POOLS];
|
2005-10-05 19:06:20 -07:00
|
|
|
unsigned long *it_map; /* A simple allocation bitmap for now */
|
2013-12-09 00:17:02 -07:00
|
|
|
unsigned long it_page_shift;/* table iommu page size */
|
2015-06-04 23:35:09 -07:00
|
|
|
struct list_head it_group_list;/* List of iommu_table_group_link */
|
2018-07-03 23:13:46 -07:00
|
|
|
__be64 *it_userspace; /* userspace view of the table */
|
2015-06-04 23:35:06 -07:00
|
|
|
struct iommu_table_ops *it_ops;
|
2017-03-21 21:21:50 -07:00
|
|
|
struct kref it_kref;
|
2018-07-03 23:13:49 -07:00
|
|
|
int it_nid;
|
2019-07-17 22:11:39 -07:00
|
|
|
unsigned long it_reserved_start; /* Start of not-DMA-able (MMIO) area */
|
|
|
|
unsigned long it_reserved_end;
|
2005-10-05 19:06:20 -07:00
|
|
|
};
|
|
|
|
|
2018-10-15 03:08:41 -07:00
|
|
|
#define IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry) \
|
2018-07-03 23:13:49 -07:00
|
|
|
((tbl)->it_ops->useraddrptr((tbl), (entry), false))
|
2015-06-04 23:35:25 -07:00
|
|
|
#define IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry) \
|
2018-07-03 23:13:49 -07:00
|
|
|
((tbl)->it_ops->useraddrptr((tbl), (entry), true))
|
2015-06-04 23:35:25 -07:00
|
|
|
|
2013-12-09 00:17:03 -07:00
|
|
|
/* Pure 2^n version of get_order */
|
|
|
|
static inline __attribute_const__
|
|
|
|
int get_iommu_order(unsigned long size, struct iommu_table *tbl)
|
|
|
|
{
|
|
|
|
return __ilog2((size - 1) >> tbl->it_page_shift) + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2005-10-05 19:06:20 -07:00
|
|
|
struct scatterlist;
|
|
|
|
|
2015-06-23 22:25:22 -07:00
|
|
|
#ifdef CONFIG_PPC64
|
|
|
|
|
|
|
|
static inline void set_iommu_table_base(struct device *dev,
|
|
|
|
struct iommu_table *base)
|
2009-09-21 01:26:35 -07:00
|
|
|
{
|
2015-06-23 22:25:22 -07:00
|
|
|
dev->archdata.iommu_table_base = base;
|
2009-09-21 01:26:35 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void *get_iommu_table_base(struct device *dev)
|
|
|
|
{
|
2015-06-23 22:25:22 -07:00
|
|
|
return dev->archdata.iommu_table_base;
|
2009-09-21 01:26:35 -07:00
|
|
|
}
|
|
|
|
|
2015-06-23 22:25:22 -07:00
|
|
|
extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
|
|
|
|
|
2017-03-21 21:21:50 -07:00
|
|
|
extern struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl);
|
|
|
|
extern int iommu_tce_table_put(struct iommu_table *tbl);
|
2005-10-05 19:06:20 -07:00
|
|
|
|
|
|
|
/* Initializes an iommu_table based in values set in the passed-in
|
|
|
|
* structure
|
|
|
|
*/
|
2019-07-17 22:11:39 -07:00
|
|
|
extern struct iommu_table *iommu_init_table(struct iommu_table *tbl,
|
|
|
|
int nid, unsigned long res_start, unsigned long res_end);
|
2021-08-16 23:39:20 -07:00
|
|
|
bool iommu_table_in_use(struct iommu_table *tbl);
|
2024-06-24 05:38:21 -07:00
|
|
|
extern void iommu_table_reserve_pages(struct iommu_table *tbl,
|
|
|
|
unsigned long res_start, unsigned long res_end);
|
|
|
|
extern void iommu_table_clear(struct iommu_table *tbl);
|
2019-07-17 22:11:39 -07:00
|
|
|
|
2015-06-04 23:35:26 -07:00
|
|
|
#define IOMMU_TABLE_GROUP_MAX_TABLES 2
|
2015-06-04 23:35:08 -07:00
|
|
|
|
2015-06-04 23:35:10 -07:00
|
|
|
struct iommu_table_group;
|
|
|
|
|
|
|
|
struct iommu_table_group_ops {
|
2015-06-04 23:35:22 -07:00
|
|
|
unsigned long (*get_table_size)(
|
|
|
|
__u32 page_shift,
|
|
|
|
__u64 window_size,
|
|
|
|
__u32 levels);
|
2015-06-04 23:35:20 -07:00
|
|
|
long (*create_table)(struct iommu_table_group *table_group,
|
|
|
|
int num,
|
|
|
|
__u32 page_shift,
|
|
|
|
__u64 window_size,
|
|
|
|
__u32 levels,
|
|
|
|
struct iommu_table **ptbl);
|
|
|
|
long (*set_window)(struct iommu_table_group *table_group,
|
|
|
|
int num,
|
|
|
|
struct iommu_table *tblnew);
|
|
|
|
long (*unset_window)(struct iommu_table_group *table_group,
|
|
|
|
int num);
|
2015-06-04 23:35:10 -07:00
|
|
|
/* Switch ownership from platform code to external user (e.g. VFIO) */
|
powerpc/iommu: Reimplement the iommu_table_group_ops for pSeries
PPC64 IOMMU API defines iommu_table_group_ops which handles DMA
windows for PEs, their ownership transfer, create/set/unset the TCE
tables for the Dynamic DMA wundows(DDW). VFIOS uses these APIs for
support on POWER.
The commit 9d67c9433509 ("powerpc/iommu: Add "borrowing"
iommu_table_group_ops") implemented partial support for this API with
"borrow" mechanism wherein the DMA windows if created already by the
host driver, they would be available for VFIO to use. Also, it didn't
have the support to control/modify the window size or the IO page
size.
The current patch implements all the necessary iommu_table_group_ops
APIs there by avoiding the "borrrowing". So, just the way it is on the
PowerNV platform, with this patch the iommu table group ownership is
transferred to the VFIO PPC subdriver, the iommu table, DMA windows
creation/deletion all driven through the APIs.
The pSeries uses the query-pe-dma-window, create-pe-dma-window and
reset-pe-dma-window RTAS calls for DMA window creation, deletion and
reset to defaul. The RTAs calls do show some minor differences to the
way things are to be handled on the pSeries which are listed below.
* On pSeries, the default DMA window size is "fixed" cannot be custom
sized as requested by the user. For non-SRIOV VFs, It is fixed at 2GB
and for SRIOV VFs, its variable sized based on the capacity assigned
to it during the VF assignment to the LPAR. So, for the default DMA
window alone the size if requested less than tce32_size, the smaller
size is enforced using the iommu table->it_size.
* The DMA start address for 32-bit window is 0, and for the 64-bit
window in case of PowerNV is hardcoded to TVE select (bit 59) at 512PiB
offset. This address is returned at the time of create_table() API call
(even before the window is created), the subsequent set_window() call
actually opens the DMA window. On pSeries, the DMA start address for
32-bit window is known from the 'ibm,dma-window' DT property. However,
the 64-bit window start address is not known until the create-pe-dma
RTAS call is made. So, the create_table() which returns the DMA window
start address actually opens the DMA window and returns the DMA start
address as returned by the Hypervisor for the create-pe-dma RTAS call.
* The reset-pe-dma RTAS call resets the DMA windows and restores the
default DMA window, however it does not clear the TCE table entries
if there are any. In case of ownership transfer from platform domain
which used direct mapping, the patch chooses remove-pe-dma instead of
reset-pe for the 64-bit window intentionally so that the
clear_dma_window() is called.
Other than the DMA window management changes mentioned above, the
patch also brings back the userspace view for the single level TCE
as it existed before commit 090bad39b237a ("powerpc/powernv: Add
indirect levels to it_userspace") along with the relavent
refactoring.
Signed-off-by: Shivaprasad G Bhat <sbhat@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/171923275958.1397.907964437142542242.stgit@linux.ibm.com
2024-06-24 05:39:23 -07:00
|
|
|
long (*take_ownership)(struct iommu_table_group *table_group, struct device *dev);
|
2015-06-04 23:35:10 -07:00
|
|
|
/* Switch ownership from external user (e.g. VFIO) back to core */
|
powerpc/iommu: Reimplement the iommu_table_group_ops for pSeries
PPC64 IOMMU API defines iommu_table_group_ops which handles DMA
windows for PEs, their ownership transfer, create/set/unset the TCE
tables for the Dynamic DMA wundows(DDW). VFIOS uses these APIs for
support on POWER.
The commit 9d67c9433509 ("powerpc/iommu: Add "borrowing"
iommu_table_group_ops") implemented partial support for this API with
"borrow" mechanism wherein the DMA windows if created already by the
host driver, they would be available for VFIO to use. Also, it didn't
have the support to control/modify the window size or the IO page
size.
The current patch implements all the necessary iommu_table_group_ops
APIs there by avoiding the "borrrowing". So, just the way it is on the
PowerNV platform, with this patch the iommu table group ownership is
transferred to the VFIO PPC subdriver, the iommu table, DMA windows
creation/deletion all driven through the APIs.
The pSeries uses the query-pe-dma-window, create-pe-dma-window and
reset-pe-dma-window RTAS calls for DMA window creation, deletion and
reset to defaul. The RTAs calls do show some minor differences to the
way things are to be handled on the pSeries which are listed below.
* On pSeries, the default DMA window size is "fixed" cannot be custom
sized as requested by the user. For non-SRIOV VFs, It is fixed at 2GB
and for SRIOV VFs, its variable sized based on the capacity assigned
to it during the VF assignment to the LPAR. So, for the default DMA
window alone the size if requested less than tce32_size, the smaller
size is enforced using the iommu table->it_size.
* The DMA start address for 32-bit window is 0, and for the 64-bit
window in case of PowerNV is hardcoded to TVE select (bit 59) at 512PiB
offset. This address is returned at the time of create_table() API call
(even before the window is created), the subsequent set_window() call
actually opens the DMA window. On pSeries, the DMA start address for
32-bit window is known from the 'ibm,dma-window' DT property. However,
the 64-bit window start address is not known until the create-pe-dma
RTAS call is made. So, the create_table() which returns the DMA window
start address actually opens the DMA window and returns the DMA start
address as returned by the Hypervisor for the create-pe-dma RTAS call.
* The reset-pe-dma RTAS call resets the DMA windows and restores the
default DMA window, however it does not clear the TCE table entries
if there are any. In case of ownership transfer from platform domain
which used direct mapping, the patch chooses remove-pe-dma instead of
reset-pe for the 64-bit window intentionally so that the
clear_dma_window() is called.
Other than the DMA window management changes mentioned above, the
patch also brings back the userspace view for the single level TCE
as it existed before commit 090bad39b237a ("powerpc/powernv: Add
indirect levels to it_userspace") along with the relavent
refactoring.
Signed-off-by: Shivaprasad G Bhat <sbhat@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://msgid.link/171923275958.1397.907964437142542242.stgit@linux.ibm.com
2024-06-24 05:39:23 -07:00
|
|
|
void (*release_ownership)(struct iommu_table_group *table_group, struct device *dev);
|
2015-06-04 23:35:10 -07:00
|
|
|
};
|
|
|
|
|
2015-06-04 23:35:09 -07:00
|
|
|
struct iommu_table_group_link {
|
|
|
|
struct list_head next;
|
|
|
|
struct rcu_head rcu;
|
|
|
|
struct iommu_table_group *table_group;
|
|
|
|
};
|
|
|
|
|
2015-06-04 23:35:08 -07:00
|
|
|
struct iommu_table_group {
|
2015-06-04 23:35:20 -07:00
|
|
|
/* IOMMU properties */
|
|
|
|
__u32 tce32_start;
|
|
|
|
__u32 tce32_size;
|
|
|
|
__u64 pgsizes; /* Bitmap of supported page sizes */
|
|
|
|
__u32 max_dynamic_windows_supported;
|
|
|
|
__u32 max_levels;
|
|
|
|
|
2015-06-04 23:35:08 -07:00
|
|
|
struct iommu_group *group;
|
|
|
|
struct iommu_table *tables[IOMMU_TABLE_GROUP_MAX_TABLES];
|
2015-06-04 23:35:10 -07:00
|
|
|
struct iommu_table_group_ops *ops;
|
2015-06-04 23:35:08 -07:00
|
|
|
};
|
|
|
|
|
2013-11-20 23:43:14 -07:00
|
|
|
#ifdef CONFIG_IOMMU_API
|
2015-06-04 23:35:08 -07:00
|
|
|
|
|
|
|
extern void iommu_register_group(struct iommu_table_group *table_group,
|
2013-05-20 20:33:09 -07:00
|
|
|
int pci_domain_number, unsigned long pe_num);
|
2018-12-19 01:52:21 -07:00
|
|
|
extern int iommu_add_device(struct iommu_table_group *table_group,
|
|
|
|
struct device *dev);
|
2018-12-19 01:52:15 -07:00
|
|
|
extern long iommu_tce_xchg(struct mm_struct *mm, struct iommu_table *tbl,
|
|
|
|
unsigned long entry, unsigned long *hpa,
|
|
|
|
enum dma_data_direction *direction);
|
2019-08-29 01:52:48 -07:00
|
|
|
extern long iommu_tce_xchg_no_kill(struct mm_struct *mm,
|
|
|
|
struct iommu_table *tbl,
|
|
|
|
unsigned long entry, unsigned long *hpa,
|
|
|
|
enum dma_data_direction *direction);
|
|
|
|
extern void iommu_tce_kill(struct iommu_table *tbl,
|
|
|
|
unsigned long entry, unsigned long pages);
|
2024-06-24 05:39:10 -07:00
|
|
|
int dev_has_iommu_table(struct device *dev, void *data);
|
2023-03-06 10:30:20 -07:00
|
|
|
|
2013-11-20 23:43:14 -07:00
|
|
|
#else
|
2015-06-04 23:35:08 -07:00
|
|
|
static inline void iommu_register_group(struct iommu_table_group *table_group,
|
2013-11-20 23:43:14 -07:00
|
|
|
int pci_domain_number,
|
|
|
|
unsigned long pe_num)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2018-12-19 01:52:21 -07:00
|
|
|
static inline int iommu_add_device(struct iommu_table_group *table_group,
|
|
|
|
struct device *dev)
|
2013-11-20 23:43:14 -07:00
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2024-06-24 05:39:10 -07:00
|
|
|
|
|
|
|
static inline int dev_has_iommu_table(struct device *dev, void *data)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
2013-11-20 23:43:14 -07:00
|
|
|
#endif /* !CONFIG_IOMMU_API */
|
|
|
|
|
2019-02-13 00:01:04 -07:00
|
|
|
u64 dma_iommu_get_required_mask(struct device *dev);
|
2015-06-23 22:25:22 -07:00
|
|
|
#else
|
|
|
|
|
|
|
|
static inline void *get_iommu_table_base(struct device *dev)
|
|
|
|
{
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int dma_iommu_dma_supported(struct device *dev, u64 mask)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* CONFIG_PPC64 */
|
|
|
|
|
2014-11-05 07:28:30 -07:00
|
|
|
extern int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
|
|
|
|
struct scatterlist *sglist, int nelems,
|
|
|
|
unsigned long mask,
|
|
|
|
enum dma_data_direction direction,
|
2016-08-03 13:46:00 -07:00
|
|
|
unsigned long attrs);
|
2014-11-05 07:28:30 -07:00
|
|
|
extern void ppc_iommu_unmap_sg(struct iommu_table *tbl,
|
|
|
|
struct scatterlist *sglist,
|
|
|
|
int nelems,
|
|
|
|
enum dma_data_direction direction,
|
2016-08-03 13:46:00 -07:00
|
|
|
unsigned long attrs);
|
2005-10-05 19:06:20 -07:00
|
|
|
|
2008-02-04 23:28:08 -07:00
|
|
|
extern void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
|
|
|
|
size_t size, dma_addr_t *dma_handle,
|
|
|
|
unsigned long mask, gfp_t flag, int node);
|
2005-10-05 19:06:20 -07:00
|
|
|
extern void iommu_free_coherent(struct iommu_table *tbl, size_t size,
|
2006-11-10 23:25:02 -07:00
|
|
|
void *vaddr, dma_addr_t dma_handle);
|
2008-10-27 13:38:08 -07:00
|
|
|
extern dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
|
|
|
|
struct page *page, unsigned long offset,
|
|
|
|
size_t size, unsigned long mask,
|
|
|
|
enum dma_data_direction direction,
|
2016-08-03 13:46:00 -07:00
|
|
|
unsigned long attrs);
|
2008-10-27 13:38:08 -07:00
|
|
|
extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
|
|
|
|
size_t size, enum dma_data_direction direction,
|
2016-08-03 13:46:00 -07:00
|
|
|
unsigned long attrs);
|
2005-10-05 19:06:20 -07:00
|
|
|
|
2021-12-16 15:00:27 -07:00
|
|
|
void __init iommu_init_early_pSeries(void);
|
2015-03-30 22:00:48 -07:00
|
|
|
extern void iommu_init_early_dart(struct pci_controller_ops *controller_ops);
|
2007-02-04 15:36:55 -07:00
|
|
|
extern void iommu_init_early_pasemi(void);
|
2005-10-05 19:06:20 -07:00
|
|
|
|
2007-05-03 05:28:32 -07:00
|
|
|
#if defined(CONFIG_PPC64) && defined(CONFIG_PM)
|
|
|
|
static inline void iommu_restore(void)
|
|
|
|
{
|
|
|
|
if (ppc_md.iommu_restore)
|
|
|
|
ppc_md.iommu_restore();
|
|
|
|
}
|
|
|
|
#endif
|
2005-10-05 19:06:20 -07:00
|
|
|
|
2013-05-20 20:33:09 -07:00
|
|
|
/* The API to support IOMMU operations for VFIO */
|
2017-03-21 21:21:55 -07:00
|
|
|
extern int iommu_tce_check_ioba(unsigned long page_shift,
|
|
|
|
unsigned long offset, unsigned long size,
|
|
|
|
unsigned long ioba, unsigned long npages);
|
|
|
|
extern int iommu_tce_check_gpa(unsigned long page_shift,
|
|
|
|
unsigned long gpa);
|
|
|
|
|
|
|
|
#define iommu_tce_clear_param_check(tbl, ioba, tce_value, npages) \
|
|
|
|
(iommu_tce_check_ioba((tbl)->it_page_shift, \
|
|
|
|
(tbl)->it_offset, (tbl)->it_size, \
|
|
|
|
(ioba), (npages)) || (tce_value))
|
|
|
|
#define iommu_tce_put_param_check(tbl, ioba, gpa) \
|
|
|
|
(iommu_tce_check_ioba((tbl)->it_page_shift, \
|
|
|
|
(tbl)->it_offset, (tbl)->it_size, \
|
|
|
|
(ioba), 1) || \
|
|
|
|
iommu_tce_check_gpa((tbl)->it_page_shift, (gpa)))
|
2013-05-20 20:33:09 -07:00
|
|
|
|
|
|
|
extern void iommu_flush_tce(struct iommu_table *tbl);
|
|
|
|
|
|
|
|
extern enum dma_data_direction iommu_tce_direction(unsigned long tce);
|
2015-06-04 23:35:05 -07:00
|
|
|
extern unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir);
|
2013-05-20 20:33:09 -07:00
|
|
|
|
2019-02-13 00:01:09 -07:00
|
|
|
#ifdef CONFIG_PPC_CELL_NATIVE
|
|
|
|
extern bool iommu_fixed_is_weak;
|
|
|
|
#else
|
|
|
|
#define iommu_fixed_is_weak false
|
|
|
|
#endif
|
|
|
|
|
2019-02-13 00:01:33 -07:00
|
|
|
extern const struct dma_map_ops dma_iommu_ops;
|
|
|
|
|
2005-12-16 14:43:46 -07:00
|
|
|
#endif /* __KERNEL__ */
|
2005-10-05 19:06:20 -07:00
|
|
|
#endif /* _ASM_IOMMU_H */
|