iommu/amd: Add SVA domain support
- Allocate SVA domain and setup mmu notifier. In free path unregister mmu notifier and free protection domain. - Add mmu notifier callback function. It will retrieve SVA protection domain and invalidates IO/TLB. Signed-off-by: Vasant Hegde <vasant.hegde@amd.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Link: https://lore.kernel.org/r/20240418103400.6229-16-vasant.hegde@amd.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
80af5a4520
commit
a5a91e5484
@ -45,6 +45,11 @@ extern enum io_pgtable_fmt amd_iommu_pgtable;
|
||||
extern int amd_iommu_gpt_level;
|
||||
|
||||
/* Protection domain ops */
|
||||
struct protection_domain *protection_domain_alloc(unsigned int type);
|
||||
void protection_domain_free(struct protection_domain *domain);
|
||||
struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev,
|
||||
struct mm_struct *mm);
|
||||
void amd_iommu_domain_free(struct iommu_domain *dom);
|
||||
int iommu_sva_set_dev_pasid(struct iommu_domain *domain,
|
||||
struct device *dev, ioasid_t pasid);
|
||||
void amd_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid);
|
||||
|
@ -586,6 +586,7 @@ struct protection_domain {
|
||||
unsigned dev_cnt; /* devices assigned to this domain */
|
||||
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
|
||||
|
||||
struct mmu_notifier mn; /* mmu notifier for the SVA domain */
|
||||
struct list_head dev_data_list; /* List of pdom_dev_data */
|
||||
};
|
||||
|
||||
|
@ -2280,7 +2280,7 @@ static void cleanup_domain(struct protection_domain *domain)
|
||||
WARN_ON(domain->dev_cnt != 0);
|
||||
}
|
||||
|
||||
static void protection_domain_free(struct protection_domain *domain)
|
||||
void protection_domain_free(struct protection_domain *domain)
|
||||
{
|
||||
if (!domain)
|
||||
return;
|
||||
@ -2323,7 +2323,7 @@ static int protection_domain_init_v2(struct protection_domain *pdom)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct protection_domain *protection_domain_alloc(unsigned int type)
|
||||
struct protection_domain *protection_domain_alloc(unsigned int type)
|
||||
{
|
||||
struct io_pgtable_ops *pgtbl_ops;
|
||||
struct protection_domain *domain;
|
||||
@ -2346,6 +2346,7 @@ static struct protection_domain *protection_domain_alloc(unsigned int type)
|
||||
switch (type) {
|
||||
/* No need to allocate io pgtable ops in passthrough mode */
|
||||
case IOMMU_DOMAIN_IDENTITY:
|
||||
case IOMMU_DOMAIN_SVA:
|
||||
return domain;
|
||||
case IOMMU_DOMAIN_DMA:
|
||||
pgtable = amd_iommu_pgtable;
|
||||
@ -2465,7 +2466,7 @@ amd_iommu_domain_alloc_user(struct device *dev, u32 flags,
|
||||
return do_iommu_domain_alloc(type, dev, flags);
|
||||
}
|
||||
|
||||
static void amd_iommu_domain_free(struct iommu_domain *dom)
|
||||
void amd_iommu_domain_free(struct iommu_domain *dom)
|
||||
{
|
||||
struct protection_domain *domain;
|
||||
unsigned long flags;
|
||||
@ -2833,6 +2834,7 @@ static int amd_iommu_dev_enable_feature(struct device *dev,
|
||||
|
||||
switch (feat) {
|
||||
case IOMMU_DEV_FEAT_IOPF:
|
||||
case IOMMU_DEV_FEAT_SVA:
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
@ -2848,6 +2850,7 @@ static int amd_iommu_dev_disable_feature(struct device *dev,
|
||||
|
||||
switch (feat) {
|
||||
case IOMMU_DEV_FEAT_IOPF:
|
||||
case IOMMU_DEV_FEAT_SVA:
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
@ -2860,6 +2863,7 @@ const struct iommu_ops amd_iommu_ops = {
|
||||
.capable = amd_iommu_capable,
|
||||
.domain_alloc = amd_iommu_domain_alloc,
|
||||
.domain_alloc_user = amd_iommu_domain_alloc_user,
|
||||
.domain_alloc_sva = amd_iommu_domain_alloc_sva,
|
||||
.probe_device = amd_iommu_probe_device,
|
||||
.release_device = amd_iommu_release_device,
|
||||
.probe_finalize = amd_iommu_probe_finalize,
|
||||
|
@ -56,6 +56,49 @@ static void remove_pdom_dev_pasid(struct protection_domain *pdom,
|
||||
}
|
||||
}
|
||||
|
||||
static void sva_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
|
||||
struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
struct pdom_dev_data *pdom_dev_data;
|
||||
struct protection_domain *sva_pdom;
|
||||
unsigned long flags;
|
||||
|
||||
sva_pdom = container_of(mn, struct protection_domain, mn);
|
||||
|
||||
spin_lock_irqsave(&sva_pdom->lock, flags);
|
||||
|
||||
for_each_pdom_dev_data(pdom_dev_data, sva_pdom) {
|
||||
amd_iommu_dev_flush_pasid_pages(pdom_dev_data->dev_data,
|
||||
pdom_dev_data->pasid,
|
||||
start, end - start);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&sva_pdom->lock, flags);
|
||||
}
|
||||
|
||||
static void sva_mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
|
||||
{
|
||||
struct pdom_dev_data *pdom_dev_data, *next;
|
||||
struct protection_domain *sva_pdom;
|
||||
unsigned long flags;
|
||||
|
||||
sva_pdom = container_of(mn, struct protection_domain, mn);
|
||||
|
||||
spin_lock_irqsave(&sva_pdom->lock, flags);
|
||||
|
||||
/* Assume dev_data_list contains same PASID with different devices */
|
||||
for_each_pdom_dev_data_safe(pdom_dev_data, next, sva_pdom)
|
||||
remove_dev_pasid(pdom_dev_data);
|
||||
|
||||
spin_unlock_irqrestore(&sva_pdom->lock, flags);
|
||||
}
|
||||
|
||||
static const struct mmu_notifier_ops sva_mn = {
|
||||
.arch_invalidate_secondary_tlbs = sva_arch_invalidate_secondary_tlbs,
|
||||
.release = sva_mn_release,
|
||||
};
|
||||
|
||||
int iommu_sva_set_dev_pasid(struct iommu_domain *domain,
|
||||
struct device *dev, ioasid_t pasid)
|
||||
{
|
||||
@ -120,3 +163,40 @@ void amd_iommu_remove_dev_pasid(struct device *dev, ioasid_t pasid)
|
||||
|
||||
spin_unlock_irqrestore(&sva_pdom->lock, flags);
|
||||
}
|
||||
|
||||
static void iommu_sva_domain_free(struct iommu_domain *domain)
|
||||
{
|
||||
struct protection_domain *sva_pdom = to_pdomain(domain);
|
||||
|
||||
if (sva_pdom->mn.ops)
|
||||
mmu_notifier_unregister(&sva_pdom->mn, domain->mm);
|
||||
|
||||
amd_iommu_domain_free(domain);
|
||||
}
|
||||
|
||||
static const struct iommu_domain_ops amd_sva_domain_ops = {
|
||||
.set_dev_pasid = iommu_sva_set_dev_pasid,
|
||||
.free = iommu_sva_domain_free
|
||||
};
|
||||
|
||||
struct iommu_domain *amd_iommu_domain_alloc_sva(struct device *dev,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
struct protection_domain *pdom;
|
||||
int ret;
|
||||
|
||||
pdom = protection_domain_alloc(IOMMU_DOMAIN_SVA);
|
||||
if (!pdom)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
pdom->domain.ops = &amd_sva_domain_ops;
|
||||
pdom->mn.ops = &sva_mn;
|
||||
|
||||
ret = mmu_notifier_register(&pdom->mn, mm);
|
||||
if (ret) {
|
||||
protection_domain_free(pdom);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return &pdom->domain;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user