iommu/amd: Rename struct amd_io_pgtable iopt to pgtbl
There is struct protection_domain iopt and struct amd_io_pgtable iopt. Next patches are going to want to write domain.iopt.iopt.xx which is quite unnatural to read. Give one of them a different name, amd_io_pgtable has fewer references so call it pgtbl, to match pgtbl_cfg, instead. Suggested-by: Alejandro Jimenez <alejandro.j.jimenez@oracle.com> Reviewed-by: Vasant Hegde <vasant.hegde@amd.com> Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/6-v2-831cdc4d00f3+1a315-amd_iopgtbl_jgg@nvidia.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
1ed2d21d47
commit
670b57796c
@ -519,7 +519,7 @@ struct amd_irte_ops;
|
|||||||
#define AMD_IOMMU_FLAG_TRANS_PRE_ENABLED (1 << 0)
|
#define AMD_IOMMU_FLAG_TRANS_PRE_ENABLED (1 << 0)
|
||||||
|
|
||||||
#define io_pgtable_to_data(x) \
|
#define io_pgtable_to_data(x) \
|
||||||
container_of((x), struct amd_io_pgtable, iop)
|
container_of((x), struct amd_io_pgtable, pgtbl)
|
||||||
|
|
||||||
#define io_pgtable_ops_to_data(x) \
|
#define io_pgtable_ops_to_data(x) \
|
||||||
io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
|
io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
|
||||||
@ -540,7 +540,7 @@ struct gcr3_tbl_info {
|
|||||||
|
|
||||||
struct amd_io_pgtable {
|
struct amd_io_pgtable {
|
||||||
struct io_pgtable_cfg pgtbl_cfg;
|
struct io_pgtable_cfg pgtbl_cfg;
|
||||||
struct io_pgtable iop;
|
struct io_pgtable pgtbl;
|
||||||
int mode;
|
int mode;
|
||||||
u64 *root;
|
u64 *root;
|
||||||
u64 *pgd; /* v2 pgtable pgd pointer */
|
u64 *pgd; /* v2 pgtable pgd pointer */
|
||||||
|
@ -541,7 +541,7 @@ static int iommu_v1_read_and_clear_dirty(struct io_pgtable_ops *ops,
|
|||||||
*/
|
*/
|
||||||
static void v1_free_pgtable(struct io_pgtable *iop)
|
static void v1_free_pgtable(struct io_pgtable *iop)
|
||||||
{
|
{
|
||||||
struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop);
|
struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, pgtbl);
|
||||||
LIST_HEAD(freelist);
|
LIST_HEAD(freelist);
|
||||||
|
|
||||||
if (pgtable->mode == PAGE_MODE_NONE)
|
if (pgtable->mode == PAGE_MODE_NONE)
|
||||||
@ -569,12 +569,12 @@ static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
|
|||||||
cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE;
|
cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE;
|
||||||
cfg->tlb = &v1_flush_ops;
|
cfg->tlb = &v1_flush_ops;
|
||||||
|
|
||||||
pgtable->iop.ops.map_pages = iommu_v1_map_pages;
|
pgtable->pgtbl.ops.map_pages = iommu_v1_map_pages;
|
||||||
pgtable->iop.ops.unmap_pages = iommu_v1_unmap_pages;
|
pgtable->pgtbl.ops.unmap_pages = iommu_v1_unmap_pages;
|
||||||
pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys;
|
pgtable->pgtbl.ops.iova_to_phys = iommu_v1_iova_to_phys;
|
||||||
pgtable->iop.ops.read_and_clear_dirty = iommu_v1_read_and_clear_dirty;
|
pgtable->pgtbl.ops.read_and_clear_dirty = iommu_v1_read_and_clear_dirty;
|
||||||
|
|
||||||
return &pgtable->iop;
|
return &pgtable->pgtbl;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns = {
|
struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns = {
|
||||||
|
@ -234,7 +234,7 @@ static int iommu_v2_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
|
|||||||
int prot, gfp_t gfp, size_t *mapped)
|
int prot, gfp_t gfp, size_t *mapped)
|
||||||
{
|
{
|
||||||
struct protection_domain *pdom = io_pgtable_ops_to_domain(ops);
|
struct protection_domain *pdom = io_pgtable_ops_to_domain(ops);
|
||||||
struct io_pgtable_cfg *cfg = &pdom->iop.iop.cfg;
|
struct io_pgtable_cfg *cfg = &pdom->iop.pgtbl.cfg;
|
||||||
u64 *pte;
|
u64 *pte;
|
||||||
unsigned long map_size;
|
unsigned long map_size;
|
||||||
unsigned long mapped_size = 0;
|
unsigned long mapped_size = 0;
|
||||||
@ -281,7 +281,7 @@ static unsigned long iommu_v2_unmap_pages(struct io_pgtable_ops *ops,
|
|||||||
struct iommu_iotlb_gather *gather)
|
struct iommu_iotlb_gather *gather)
|
||||||
{
|
{
|
||||||
struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
|
struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
|
||||||
struct io_pgtable_cfg *cfg = &pgtable->iop.cfg;
|
struct io_pgtable_cfg *cfg = &pgtable->pgtbl.cfg;
|
||||||
unsigned long unmap_size;
|
unsigned long unmap_size;
|
||||||
unsigned long unmapped = 0;
|
unsigned long unmapped = 0;
|
||||||
size_t size = pgcount << __ffs(pgsize);
|
size_t size = pgcount << __ffs(pgsize);
|
||||||
@ -346,7 +346,7 @@ static const struct iommu_flush_ops v2_flush_ops = {
|
|||||||
|
|
||||||
static void v2_free_pgtable(struct io_pgtable *iop)
|
static void v2_free_pgtable(struct io_pgtable *iop)
|
||||||
{
|
{
|
||||||
struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop);
|
struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, pgtbl);
|
||||||
|
|
||||||
if (!pgtable || !pgtable->pgd)
|
if (!pgtable || !pgtable->pgd)
|
||||||
return;
|
return;
|
||||||
@ -369,16 +369,16 @@ static struct io_pgtable *v2_alloc_pgtable(struct io_pgtable_cfg *cfg, void *coo
|
|||||||
if (get_pgtable_level() == PAGE_MODE_5_LEVEL)
|
if (get_pgtable_level() == PAGE_MODE_5_LEVEL)
|
||||||
ias = 57;
|
ias = 57;
|
||||||
|
|
||||||
pgtable->iop.ops.map_pages = iommu_v2_map_pages;
|
pgtable->pgtbl.ops.map_pages = iommu_v2_map_pages;
|
||||||
pgtable->iop.ops.unmap_pages = iommu_v2_unmap_pages;
|
pgtable->pgtbl.ops.unmap_pages = iommu_v2_unmap_pages;
|
||||||
pgtable->iop.ops.iova_to_phys = iommu_v2_iova_to_phys;
|
pgtable->pgtbl.ops.iova_to_phys = iommu_v2_iova_to_phys;
|
||||||
|
|
||||||
cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES_V2,
|
cfg->pgsize_bitmap = AMD_IOMMU_PGSIZES_V2,
|
||||||
cfg->ias = ias,
|
cfg->ias = ias,
|
||||||
cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE,
|
cfg->oas = IOMMU_OUT_ADDR_BIT_SIZE,
|
||||||
cfg->tlb = &v2_flush_ops;
|
cfg->tlb = &v2_flush_ops;
|
||||||
|
|
||||||
return &pgtable->iop;
|
return &pgtable->pgtbl;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct io_pgtable_init_fns io_pgtable_amd_iommu_v2_init_fns = {
|
struct io_pgtable_init_fns io_pgtable_amd_iommu_v2_init_fns = {
|
||||||
|
@ -2258,7 +2258,7 @@ void protection_domain_free(struct protection_domain *domain)
|
|||||||
WARN_ON(!list_empty(&domain->dev_list));
|
WARN_ON(!list_empty(&domain->dev_list));
|
||||||
|
|
||||||
if (domain->iop.pgtbl_cfg.tlb)
|
if (domain->iop.pgtbl_cfg.tlb)
|
||||||
free_io_pgtable_ops(&domain->iop.iop.ops);
|
free_io_pgtable_ops(&domain->iop.pgtbl.ops);
|
||||||
|
|
||||||
if (domain->id)
|
if (domain->id)
|
||||||
domain_id_free(domain->id);
|
domain_id_free(domain->id);
|
||||||
@ -2366,7 +2366,7 @@ static struct iommu_domain *do_iommu_domain_alloc(unsigned int type,
|
|||||||
domain->domain.geometry.aperture_start = 0;
|
domain->domain.geometry.aperture_start = 0;
|
||||||
domain->domain.geometry.aperture_end = dma_max_address();
|
domain->domain.geometry.aperture_end = dma_max_address();
|
||||||
domain->domain.geometry.force_aperture = true;
|
domain->domain.geometry.force_aperture = true;
|
||||||
domain->domain.pgsize_bitmap = domain->iop.iop.cfg.pgsize_bitmap;
|
domain->domain.pgsize_bitmap = domain->iop.pgtbl.cfg.pgsize_bitmap;
|
||||||
|
|
||||||
if (iommu) {
|
if (iommu) {
|
||||||
domain->domain.type = type;
|
domain->domain.type = type;
|
||||||
@ -2510,7 +2510,7 @@ static int amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
|
|||||||
unsigned long iova, size_t size)
|
unsigned long iova, size_t size)
|
||||||
{
|
{
|
||||||
struct protection_domain *domain = to_pdomain(dom);
|
struct protection_domain *domain = to_pdomain(dom);
|
||||||
struct io_pgtable_ops *ops = &domain->iop.iop.ops;
|
struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops;
|
||||||
|
|
||||||
if (ops->map_pages)
|
if (ops->map_pages)
|
||||||
domain_flush_np_cache(domain, iova, size);
|
domain_flush_np_cache(domain, iova, size);
|
||||||
@ -2522,7 +2522,7 @@ static int amd_iommu_map_pages(struct iommu_domain *dom, unsigned long iova,
|
|||||||
int iommu_prot, gfp_t gfp, size_t *mapped)
|
int iommu_prot, gfp_t gfp, size_t *mapped)
|
||||||
{
|
{
|
||||||
struct protection_domain *domain = to_pdomain(dom);
|
struct protection_domain *domain = to_pdomain(dom);
|
||||||
struct io_pgtable_ops *ops = &domain->iop.iop.ops;
|
struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops;
|
||||||
int prot = 0;
|
int prot = 0;
|
||||||
int ret = -EINVAL;
|
int ret = -EINVAL;
|
||||||
|
|
||||||
@ -2569,7 +2569,7 @@ static size_t amd_iommu_unmap_pages(struct iommu_domain *dom, unsigned long iova
|
|||||||
struct iommu_iotlb_gather *gather)
|
struct iommu_iotlb_gather *gather)
|
||||||
{
|
{
|
||||||
struct protection_domain *domain = to_pdomain(dom);
|
struct protection_domain *domain = to_pdomain(dom);
|
||||||
struct io_pgtable_ops *ops = &domain->iop.iop.ops;
|
struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops;
|
||||||
size_t r;
|
size_t r;
|
||||||
|
|
||||||
if ((domain->pd_mode == PD_MODE_V1) &&
|
if ((domain->pd_mode == PD_MODE_V1) &&
|
||||||
@ -2588,7 +2588,7 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
|
|||||||
dma_addr_t iova)
|
dma_addr_t iova)
|
||||||
{
|
{
|
||||||
struct protection_domain *domain = to_pdomain(dom);
|
struct protection_domain *domain = to_pdomain(dom);
|
||||||
struct io_pgtable_ops *ops = &domain->iop.iop.ops;
|
struct io_pgtable_ops *ops = &domain->iop.pgtbl.ops;
|
||||||
|
|
||||||
return ops->iova_to_phys(ops, iova);
|
return ops->iova_to_phys(ops, iova);
|
||||||
}
|
}
|
||||||
@ -2666,7 +2666,7 @@ static int amd_iommu_read_and_clear_dirty(struct iommu_domain *domain,
|
|||||||
struct iommu_dirty_bitmap *dirty)
|
struct iommu_dirty_bitmap *dirty)
|
||||||
{
|
{
|
||||||
struct protection_domain *pdomain = to_pdomain(domain);
|
struct protection_domain *pdomain = to_pdomain(domain);
|
||||||
struct io_pgtable_ops *ops = &pdomain->iop.iop.ops;
|
struct io_pgtable_ops *ops = &pdomain->iop.pgtbl.ops;
|
||||||
unsigned long lflags;
|
unsigned long lflags;
|
||||||
|
|
||||||
if (!ops || !ops->read_and_clear_dirty)
|
if (!ops || !ops->read_and_clear_dirty)
|
||||||
|
Loading…
Reference in New Issue
Block a user