iommu/arm-smmu-v3: Enable HTTU for stage1 with io-pgtable mapping
If io-pgtable quirk flag indicates support for hardware update of dirty state, enable HA/HD bits in the SMMU CD and also set the DBM bit in the page descriptor. Now report the dirty page tracking capability of SMMUv3 and select IOMMUFD_DRIVER for ARM_SMMU_V3 if IOMMUFD is enabled. Co-developed-by: Keqian Zhu <zhukeqian1@huawei.com> Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com> Signed-off-by: Kunkun Jiang <jiangkunkun@huawei.com> Signed-off-by: Joao Martins <joao.m.martins@oracle.com> Reviewed-by: Ryan Roberts <ryan.roberts@arm.com> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Reviewed-by: Nicolin Chen <nicolinc@nvidia.com> Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> Link: https://lore.kernel.org/r/20240703101604.2576-6-shameerali.kolothum.thodi@huawei.com Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
parent
eb054d67b2
commit
25c776dd03
@ -394,6 +394,7 @@ config ARM_SMMU_V3
|
||||
select IOMMU_API
|
||||
select IOMMU_IO_PGTABLE_LPAE
|
||||
select GENERIC_MSI_IRQ
|
||||
select IOMMUFD_DRIVER if IOMMUFD
|
||||
help
|
||||
Support for implementations of the ARM System MMU architecture
|
||||
version 3 providing translation support to a PCIe root complex.
|
||||
|
@ -1352,6 +1352,12 @@ void arm_smmu_make_s1_cd(struct arm_smmu_cd *target,
|
||||
CTXDESC_CD_0_ASET |
|
||||
FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid)
|
||||
);
|
||||
|
||||
/* To enable dirty flag update, set both Access flag and dirty state update */
|
||||
if (pgtbl_cfg->quirks & IO_PGTABLE_QUIRK_ARM_HD)
|
||||
target->data[0] |= cpu_to_le64(CTXDESC_CD_0_TCR_HA |
|
||||
CTXDESC_CD_0_TCR_HD);
|
||||
|
||||
target->data[1] = cpu_to_le64(pgtbl_cfg->arm_lpae_s1_cfg.ttbr &
|
||||
CTXDESC_CD_1_TTB0_MASK);
|
||||
target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s1_cfg.mair);
|
||||
@ -2235,6 +2241,13 @@ static const struct iommu_flush_ops arm_smmu_flush_ops = {
|
||||
.tlb_add_page = arm_smmu_tlb_inv_page_nosync,
|
||||
};
|
||||
|
||||
static bool arm_smmu_dbm_capable(struct arm_smmu_device *smmu)
|
||||
{
|
||||
u32 features = (ARM_SMMU_FEAT_HD | ARM_SMMU_FEAT_COHERENCY);
|
||||
|
||||
return (smmu->features & features) == features;
|
||||
}
|
||||
|
||||
/* IOMMU API */
|
||||
static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
|
||||
{
|
||||
@ -2247,6 +2260,8 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
|
||||
case IOMMU_CAP_NOEXEC:
|
||||
case IOMMU_CAP_DEFERRED_FLUSH:
|
||||
return true;
|
||||
case IOMMU_CAP_DIRTY_TRACKING:
|
||||
return arm_smmu_dbm_capable(master->smmu);
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
@ -304,6 +304,9 @@ struct arm_smmu_cd {
|
||||
#define CTXDESC_CD_0_TCR_IPS GENMASK_ULL(34, 32)
|
||||
#define CTXDESC_CD_0_TCR_TBI0 (1ULL << 38)
|
||||
|
||||
#define CTXDESC_CD_0_TCR_HA (1UL << 43)
|
||||
#define CTXDESC_CD_0_TCR_HD (1UL << 42)
|
||||
|
||||
#define CTXDESC_CD_0_AA64 (1UL << 41)
|
||||
#define CTXDESC_CD_0_S (1UL << 44)
|
||||
#define CTXDESC_CD_0_R (1UL << 45)
|
||||
|
@ -440,6 +440,8 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
|
||||
pte = ARM_LPAE_PTE_nG;
|
||||
if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
|
||||
pte |= ARM_LPAE_PTE_AP_RDONLY;
|
||||
else if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_HD)
|
||||
pte |= ARM_LPAE_PTE_DBM;
|
||||
if (!(prot & IOMMU_PRIV))
|
||||
pte |= ARM_LPAE_PTE_AP_UNPRIV;
|
||||
} else {
|
||||
@ -929,7 +931,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
|
||||
|
||||
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
|
||||
IO_PGTABLE_QUIRK_ARM_TTBR1 |
|
||||
IO_PGTABLE_QUIRK_ARM_OUTER_WBWA))
|
||||
IO_PGTABLE_QUIRK_ARM_OUTER_WBWA |
|
||||
IO_PGTABLE_QUIRK_ARM_HD))
|
||||
return NULL;
|
||||
|
||||
data = arm_lpae_alloc_pgtable(cfg);
|
||||
|
Loading…
Reference in New Issue
Block a user