1

iommu/vt-d: Refactor IOTLB and Dev-IOTLB flush for batching

Extracts IOTLB and Dev-IOTLB invalidation logic from cache tag flush
interfaces into dedicated helper functions. It prepares the codebase
for upcoming changes to support batched cache invalidations.

To enable direct use of qi_flush helpers in the new functions,
iommu->flush.flush_iotlb and quirk_extra_dev_tlb_flush() are opened up.

No functional changes are intended.

Co-developed-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Signed-off-by: Tina Zhang <tina.zhang@intel.com>
Link: https://lore.kernel.org/r/20240815065221.50328-3-tina.zhang@intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Tina Zhang 2024-09-02 10:27:22 +08:00 committed by Joerg Roedel
parent f701c9f36b
commit 3297d047cd
3 changed files with 83 additions and 67 deletions

View File

@ -255,6 +255,78 @@ static unsigned long calculate_psi_aligned_address(unsigned long start,
return ALIGN_DOWN(start, VTD_PAGE_SIZE << mask);
}
static void cache_tag_flush_iotlb(struct dmar_domain *domain, struct cache_tag *tag,
unsigned long addr, unsigned long pages,
unsigned long mask, int ih)
{
struct intel_iommu *iommu = tag->iommu;
u64 type = DMA_TLB_PSI_FLUSH;
if (domain->use_first_level) {
qi_flush_piotlb(iommu, tag->domain_id, tag->pasid, addr, pages, ih);
return;
}
/*
* Fallback to domain selective flush if no PSI support or the size
* is too big.
*/
if (!cap_pgsel_inv(iommu->cap) ||
mask > cap_max_amask_val(iommu->cap) || pages == -1) {
addr = 0;
mask = 0;
ih = 0;
type = DMA_TLB_DSI_FLUSH;
}
if (ecap_qis(iommu->ecap))
qi_flush_iotlb(iommu, tag->domain_id, addr | ih, mask, type);
else
__iommu_flush_iotlb(iommu, tag->domain_id, addr | ih, mask, type);
}
static void cache_tag_flush_devtlb_psi(struct dmar_domain *domain, struct cache_tag *tag,
unsigned long addr, unsigned long mask)
{
struct intel_iommu *iommu = tag->iommu;
struct device_domain_info *info;
u16 sid;
info = dev_iommu_priv_get(tag->dev);
sid = PCI_DEVID(info->bus, info->devfn);
if (tag->pasid == IOMMU_NO_PASID) {
qi_flush_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep,
addr, mask);
if (info->dtlb_extra_inval)
qi_flush_dev_iotlb(iommu, sid, info->pfsid,
info->ats_qdep, addr, mask);
return;
}
qi_flush_dev_iotlb_pasid(iommu, sid, info->pfsid, tag->pasid,
info->ats_qdep, addr, mask);
if (info->dtlb_extra_inval)
qi_flush_dev_iotlb_pasid(iommu, sid, info->pfsid, tag->pasid,
info->ats_qdep, addr, mask);
}
static void cache_tag_flush_devtlb_all(struct dmar_domain *domain, struct cache_tag *tag)
{
struct intel_iommu *iommu = tag->iommu;
struct device_domain_info *info;
u16 sid;
info = dev_iommu_priv_get(tag->dev);
sid = PCI_DEVID(info->bus, info->devfn);
qi_flush_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep, 0,
MAX_AGAW_PFN_WIDTH);
if (info->dtlb_extra_inval)
qi_flush_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep, 0,
MAX_AGAW_PFN_WIDTH);
}
/*
* Invalidates a range of IOVA from @start (inclusive) to @end (inclusive)
* when the memory mappings in the target domain have been modified.
@ -270,30 +342,10 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
spin_lock_irqsave(&domain->cache_lock, flags);
list_for_each_entry(tag, &domain->cache_tags, node) {
struct intel_iommu *iommu = tag->iommu;
struct device_domain_info *info;
u16 sid;
switch (tag->type) {
case CACHE_TAG_IOTLB:
case CACHE_TAG_NESTING_IOTLB:
if (domain->use_first_level) {
qi_flush_piotlb(iommu, tag->domain_id,
tag->pasid, addr, pages, ih);
} else {
/*
* Fallback to domain selective flush if no
* PSI support or the size is too big.
*/
if (!cap_pgsel_inv(iommu->cap) ||
mask > cap_max_amask_val(iommu->cap))
iommu->flush.flush_iotlb(iommu, tag->domain_id,
0, 0, DMA_TLB_DSI_FLUSH);
else
iommu->flush.flush_iotlb(iommu, tag->domain_id,
addr | ih, mask,
DMA_TLB_PSI_FLUSH);
}
cache_tag_flush_iotlb(domain, tag, addr, pages, mask, ih);
break;
case CACHE_TAG_NESTING_DEVTLB:
/*
@ -307,18 +359,7 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
mask = MAX_AGAW_PFN_WIDTH;
fallthrough;
case CACHE_TAG_DEVTLB:
info = dev_iommu_priv_get(tag->dev);
sid = PCI_DEVID(info->bus, info->devfn);
if (tag->pasid == IOMMU_NO_PASID)
qi_flush_dev_iotlb(iommu, sid, info->pfsid,
info->ats_qdep, addr, mask);
else
qi_flush_dev_iotlb_pasid(iommu, sid, info->pfsid,
tag->pasid, info->ats_qdep,
addr, mask);
quirk_extra_dev_tlb_flush(info, addr, mask, tag->pasid, info->ats_qdep);
cache_tag_flush_devtlb_psi(domain, tag, addr, mask);
break;
}
@ -338,29 +379,14 @@ void cache_tag_flush_all(struct dmar_domain *domain)
spin_lock_irqsave(&domain->cache_lock, flags);
list_for_each_entry(tag, &domain->cache_tags, node) {
struct intel_iommu *iommu = tag->iommu;
struct device_domain_info *info;
u16 sid;
switch (tag->type) {
case CACHE_TAG_IOTLB:
case CACHE_TAG_NESTING_IOTLB:
if (domain->use_first_level)
qi_flush_piotlb(iommu, tag->domain_id,
tag->pasid, 0, -1, 0);
else
iommu->flush.flush_iotlb(iommu, tag->domain_id,
0, 0, DMA_TLB_DSI_FLUSH);
cache_tag_flush_iotlb(domain, tag, 0, -1, 0, 0);
break;
case CACHE_TAG_DEVTLB:
case CACHE_TAG_NESTING_DEVTLB:
info = dev_iommu_priv_get(tag->dev);
sid = PCI_DEVID(info->bus, info->devfn);
qi_flush_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep,
0, MAX_AGAW_PFN_WIDTH);
quirk_extra_dev_tlb_flush(info, 0, MAX_AGAW_PFN_WIDTH,
IOMMU_NO_PASID, info->ats_qdep);
cache_tag_flush_devtlb_all(domain, tag);
break;
}
@ -399,20 +425,8 @@ void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
}
if (tag->type == CACHE_TAG_IOTLB ||
tag->type == CACHE_TAG_NESTING_IOTLB) {
/*
* Fallback to domain selective flush if no
* PSI support or the size is too big.
*/
if (!cap_pgsel_inv(iommu->cap) ||
mask > cap_max_amask_val(iommu->cap))
iommu->flush.flush_iotlb(iommu, tag->domain_id,
0, 0, DMA_TLB_DSI_FLUSH);
else
iommu->flush.flush_iotlb(iommu, tag->domain_id,
addr, mask,
DMA_TLB_PSI_FLUSH);
}
tag->type == CACHE_TAG_NESTING_IOTLB)
cache_tag_flush_iotlb(domain, tag, addr, pages, mask, 0);
trace_cache_tag_flush_range_np(tag, start, end, addr, pages, mask);
}

View File

@ -1184,9 +1184,8 @@ static void __iommu_flush_context(struct intel_iommu *iommu,
raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
}
/* return value determine if we need a write buffer flush */
static void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did,
u64 addr, unsigned int size_order, u64 type)
void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type)
{
int tlb_offset = ecap_iotlb_offset(iommu->ecap);
u64 val = 0, val_iva = 0;

View File

@ -1206,6 +1206,9 @@ void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu,
int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
unsigned int count, unsigned long options);
void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type);
/*
* Options used in qi_submit_sync:
* QI_OPT_WAIT_DRAIN - Wait for PRQ drain completion, spec 6.5.2.8.