PCI/MSI: Move mask and unmask helpers to msi.h
The upcoming support for per device MSI interrupt domains needs to share some of the inline helpers with the MSI implementation. Move them to the header file. Signed-off-by: Ahmed S. Darwish <darwi@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Acked-by: Bjorn Helgaas <bhelgaas@google.com> Link: https://lore.kernel.org/r/20221111122014.640052354@linutronix.de
This commit is contained in:
parent
db537dd3bf
commit
c93fd5266c
@ -16,7 +16,7 @@
|
||||
static int pci_msi_enable = 1;
|
||||
int pci_msi_ignore_mask;
|
||||
|
||||
static noinline void pci_msi_update_mask(struct msi_desc *desc, u32 clear, u32 set)
|
||||
void pci_msi_update_mask(struct msi_desc *desc, u32 clear, u32 set)
|
||||
{
|
||||
raw_spinlock_t *lock = &to_pci_dev(desc->dev)->msi_lock;
|
||||
unsigned long flags;
|
||||
@ -32,65 +32,6 @@ static noinline void pci_msi_update_mask(struct msi_desc *desc, u32 clear, u32 s
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
}
|
||||
|
||||
static inline void pci_msi_mask(struct msi_desc *desc, u32 mask)
|
||||
{
|
||||
pci_msi_update_mask(desc, 0, mask);
|
||||
}
|
||||
|
||||
static inline void pci_msi_unmask(struct msi_desc *desc, u32 mask)
|
||||
{
|
||||
pci_msi_update_mask(desc, mask, 0);
|
||||
}
|
||||
|
||||
static inline void __iomem *pci_msix_desc_addr(struct msi_desc *desc)
|
||||
{
|
||||
return desc->pci.mask_base + desc->msi_index * PCI_MSIX_ENTRY_SIZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* This internal function does not flush PCI writes to the device. All
|
||||
* users must ensure that they read from the device before either assuming
|
||||
* that the device state is up to date, or returning out of this file.
|
||||
* It does not affect the msi_desc::msix_ctrl cache either. Use with care!
|
||||
*/
|
||||
static void pci_msix_write_vector_ctrl(struct msi_desc *desc, u32 ctrl)
|
||||
{
|
||||
void __iomem *desc_addr = pci_msix_desc_addr(desc);
|
||||
|
||||
if (desc->pci.msi_attrib.can_mask)
|
||||
writel(ctrl, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
|
||||
}
|
||||
|
||||
static inline void pci_msix_mask(struct msi_desc *desc)
|
||||
{
|
||||
desc->pci.msix_ctrl |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
|
||||
pci_msix_write_vector_ctrl(desc, desc->pci.msix_ctrl);
|
||||
/* Flush write to device */
|
||||
readl(desc->pci.mask_base);
|
||||
}
|
||||
|
||||
static inline void pci_msix_unmask(struct msi_desc *desc)
|
||||
{
|
||||
desc->pci.msix_ctrl &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
|
||||
pci_msix_write_vector_ctrl(desc, desc->pci.msix_ctrl);
|
||||
}
|
||||
|
||||
static void __pci_msi_mask_desc(struct msi_desc *desc, u32 mask)
|
||||
{
|
||||
if (desc->pci.msi_attrib.is_msix)
|
||||
pci_msix_mask(desc);
|
||||
else
|
||||
pci_msi_mask(desc, mask);
|
||||
}
|
||||
|
||||
static void __pci_msi_unmask_desc(struct msi_desc *desc, u32 mask)
|
||||
{
|
||||
if (desc->pci.msi_attrib.is_msix)
|
||||
pci_msix_unmask(desc);
|
||||
else
|
||||
pci_msi_unmask(desc, mask);
|
||||
}
|
||||
|
||||
/**
|
||||
* pci_msi_mask_irq - Generic IRQ chip callback to mask PCI/MSI interrupts
|
||||
* @data: pointer to irqdata associated to that interrupt
|
||||
|
@ -8,6 +8,83 @@
|
||||
int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
|
||||
void pci_msi_teardown_msi_irqs(struct pci_dev *dev);
|
||||
|
||||
/* Mask/unmask helpers */
|
||||
void pci_msi_update_mask(struct msi_desc *desc, u32 clear, u32 set);
|
||||
|
||||
static inline void pci_msi_mask(struct msi_desc *desc, u32 mask)
|
||||
{
|
||||
pci_msi_update_mask(desc, 0, mask);
|
||||
}
|
||||
|
||||
static inline void pci_msi_unmask(struct msi_desc *desc, u32 mask)
|
||||
{
|
||||
pci_msi_update_mask(desc, mask, 0);
|
||||
}
|
||||
|
||||
static inline void __iomem *pci_msix_desc_addr(struct msi_desc *desc)
|
||||
{
|
||||
return desc->pci.mask_base + desc->msi_index * PCI_MSIX_ENTRY_SIZE;
|
||||
}
|
||||
|
||||
/*
|
||||
* This internal function does not flush PCI writes to the device. All
|
||||
* users must ensure that they read from the device before either assuming
|
||||
* that the device state is up to date, or returning out of this file.
|
||||
* It does not affect the msi_desc::msix_ctrl cache either. Use with care!
|
||||
*/
|
||||
static inline void pci_msix_write_vector_ctrl(struct msi_desc *desc, u32 ctrl)
|
||||
{
|
||||
void __iomem *desc_addr = pci_msix_desc_addr(desc);
|
||||
|
||||
if (desc->pci.msi_attrib.can_mask)
|
||||
writel(ctrl, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL);
|
||||
}
|
||||
|
||||
static inline void pci_msix_mask(struct msi_desc *desc)
|
||||
{
|
||||
desc->pci.msix_ctrl |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
|
||||
pci_msix_write_vector_ctrl(desc, desc->pci.msix_ctrl);
|
||||
/* Flush write to device */
|
||||
readl(desc->pci.mask_base);
|
||||
}
|
||||
|
||||
static inline void pci_msix_unmask(struct msi_desc *desc)
|
||||
{
|
||||
desc->pci.msix_ctrl &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
|
||||
pci_msix_write_vector_ctrl(desc, desc->pci.msix_ctrl);
|
||||
}
|
||||
|
||||
static inline void __pci_msi_mask_desc(struct msi_desc *desc, u32 mask)
|
||||
{
|
||||
if (desc->pci.msi_attrib.is_msix)
|
||||
pci_msix_mask(desc);
|
||||
else
|
||||
pci_msi_mask(desc, mask);
|
||||
}
|
||||
|
||||
static inline void __pci_msi_unmask_desc(struct msi_desc *desc, u32 mask)
|
||||
{
|
||||
if (desc->pci.msi_attrib.is_msix)
|
||||
pci_msix_unmask(desc);
|
||||
else
|
||||
pci_msi_unmask(desc, mask);
|
||||
}
|
||||
|
||||
/*
|
||||
* PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to
|
||||
* mask all MSI interrupts by clearing the MSI enable bit does not work
|
||||
* reliably as devices without an INTx disable bit will then generate a
|
||||
* level IRQ which will never be cleared.
|
||||
*/
|
||||
static inline __attribute_const__ u32 msi_multi_mask(struct msi_desc *desc)
|
||||
{
|
||||
/* Don't shift by >= width of type */
|
||||
if (desc->pci.msi_attrib.multi_cap >= 5)
|
||||
return 0xffffffff;
|
||||
return (1 << (1 << desc->pci.msi_attrib.multi_cap)) - 1;
|
||||
}
|
||||
|
||||
/* Legacy (!IRQDOMAIN) fallbacks */
|
||||
#ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
|
||||
int pci_msi_legacy_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
|
||||
void pci_msi_legacy_teardown_msi_irqs(struct pci_dev *dev);
|
||||
@ -23,17 +100,3 @@ static inline void pci_msi_legacy_teardown_msi_irqs(struct pci_dev *dev)
|
||||
WARN_ON_ONCE(1);
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to
|
||||
* mask all MSI interrupts by clearing the MSI enable bit does not work
|
||||
* reliably as devices without an INTx disable bit will then generate a
|
||||
* level IRQ which will never be cleared.
|
||||
*/
|
||||
static inline __attribute_const__ u32 msi_multi_mask(struct msi_desc *desc)
|
||||
{
|
||||
/* Don't shift by >= width of type */
|
||||
if (desc->pci.msi_attrib.multi_cap >= 5)
|
||||
return 0xffffffff;
|
||||
return (1 << (1 << desc->pci.msi_attrib.multi_cap)) - 1;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user