1

iommu/amd: Add support for enable/disable IOPF

Return success from enable_feature(IOPF) path as this interface is going
away. Instead we will enable/disable IOPF support in attach/detach device
path.

In attach device path, if device is capable of PRI, then we will add it to
per IOMMU IOPF queue and enable PPR support in IOMMU. Also it will
attach device to domain even if it fails to enable PRI or add device to
IOPF queue as device can continue to work without PRI support.

In detach device patch it follows following sequence:
  - Flush the queue for the given device
  - Disable PPR support in DTE[devid]
  - Remove device from IOPF queue
  - Disable device PRI

Also add IOMMU_IOPF as dependency to AMD_IOMMU driver.

Co-developed-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Signed-off-by: Vasant Hegde <vasant.hegde@amd.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20240418103400.6229-13-vasant.hegde@amd.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Vasant Hegde 2024-04-18 10:33:57 +00:00 committed by Joerg Roedel
parent 978d626b8f
commit c4cb231111
4 changed files with 77 additions and 8 deletions

View File

@ -10,6 +10,7 @@ config AMD_IOMMU
select IOMMU_API
select IOMMU_IOVA
select IOMMU_IO_PGTABLE
select IOMMU_IOPF
select IOMMUFD_DRIVER if IOMMUFD
depends on X86_64 && PCI && ACPI && HAVE_CMPXCHG_DOUBLE
help

View File

@ -51,6 +51,10 @@ int amd_iommu_iopf_init(struct amd_iommu *iommu);
void amd_iommu_iopf_uninit(struct amd_iommu *iommu);
void amd_iommu_page_response(struct device *dev, struct iopf_fault *evt,
struct iommu_page_response *resp);
int amd_iommu_iopf_add_device(struct amd_iommu *iommu,
struct iommu_dev_data *dev_data);
void amd_iommu_iopf_remove_device(struct amd_iommu *iommu,
struct iommu_dev_data *dev_data);
/* GCR3 setup */
int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data,

View File

@ -2057,8 +2057,17 @@ static int do_attach(struct iommu_dev_data *dev_data,
if (ret)
return ret;
if (pdev)
if (pdev) {
pdev_enable_caps(pdev);
/*
* Device can continue to function even if IOPF
* enablement failed. Hence in error path just
* disable device PRI support.
*/
if (amd_iommu_iopf_add_device(iommu, dev_data))
pdev_disable_cap_pri(pdev);
}
} else if (pdev) {
pdev_enable_cap_ats(pdev);
}
@ -2130,12 +2139,11 @@ out:
*/
static void detach_device(struct device *dev)
{
struct protection_domain *domain;
struct iommu_dev_data *dev_data;
struct iommu_dev_data *dev_data = dev_iommu_priv_get(dev);
struct protection_domain *domain = dev_data->domain;
struct amd_iommu *iommu = get_amd_iommu_from_dev_data(dev_data);
unsigned long flags;
dev_data = dev_iommu_priv_get(dev);
domain = dev_data->domain;
bool ppr = dev_data->ppr;
spin_lock_irqsave(&domain->lock, flags);
@ -2150,8 +2158,19 @@ static void detach_device(struct device *dev)
if (WARN_ON(!dev_data->domain))
goto out;
if (ppr) {
iopf_queue_flush_dev(dev);
/* Updated here so that it gets reflected in DTE */
dev_data->ppr = false;
}
do_detach(dev_data);
/* Remove IOPF handler */
if (ppr)
amd_iommu_iopf_remove_device(iommu, dev_data);
if (dev_is_pci(dev))
pdev_disable_caps(to_pci_dev(dev));
@ -2814,9 +2833,11 @@ static const struct iommu_dirty_ops amd_dirty_ops = {
static int amd_iommu_dev_enable_feature(struct device *dev,
enum iommu_dev_features feat)
{
int ret;
int ret = 0;
switch (feat) {
case IOMMU_DEV_FEAT_IOPF:
break;
default:
ret = -EINVAL;
break;
@ -2827,9 +2848,11 @@ static int amd_iommu_dev_enable_feature(struct device *dev,
static int amd_iommu_dev_disable_feature(struct device *dev,
enum iommu_dev_features feat)
{
int ret;
int ret = 0;
switch (feat) {
case IOMMU_DEV_FEAT_IOPF:
break;
default:
ret = -EINVAL;
break;

View File

@ -243,3 +243,44 @@ void amd_iommu_page_response(struct device *dev, struct iopf_fault *evt,
{
amd_iommu_complete_ppr(dev, resp->pasid, resp->code, resp->grpid);
}
int amd_iommu_iopf_add_device(struct amd_iommu *iommu,
struct iommu_dev_data *dev_data)
{
unsigned long flags;
int ret = 0;
if (!dev_data->pri_enabled)
return ret;
raw_spin_lock_irqsave(&iommu->lock, flags);
if (!iommu->iopf_queue) {
ret = -EINVAL;
goto out_unlock;
}
ret = iopf_queue_add_device(iommu->iopf_queue, dev_data->dev);
if (ret)
goto out_unlock;
dev_data->ppr = true;
out_unlock:
raw_spin_unlock_irqrestore(&iommu->lock, flags);
return ret;
}
/* Its assumed that caller has verified that device was added to iopf queue */
void amd_iommu_iopf_remove_device(struct amd_iommu *iommu,
struct iommu_dev_data *dev_data)
{
unsigned long flags;
raw_spin_lock_irqsave(&iommu->lock, flags);
iopf_queue_remove_device(iommu->iopf_queue, dev_data->dev);
dev_data->ppr = false;
raw_spin_unlock_irqrestore(&iommu->lock, flags);
}