- Fix a case for sifive-plic where an interrupt gets disabled *and* masked and
remains masked when it gets reenabled later - Plug a small race in GIC-v4 where userspace can force an affinity change of a virtual CPU (vPE) in its unmapping path - Do not mix the two sets of ocelot irqchip's registers in the mask calculation of the main interrupt sticky register - Other smaller fixlets and cleanups -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmcU5d0ACgkQEsHwGGHe VUrNARAAtS7zkD4zQcJx2r0sr/jkLoWszP3+6Xgz/DKiT6+MUn7An+GmwvCPwZYd rVqQBBpt6Fzfn8tYY53lLC9C5zvp04aXfKv7/qNsvxI3XmDozwkjQ3pu83TYhXsn 4dWZyUlk3BtyY9Au4aoK0c1X7zaxoGaHs+kQa5PJvPr7bW+fL1RqkpdyOzb3G3+A rj/KdKz42B54cKCSfQ0321tz/9Ts24ewr8vIhIoVDuHbhZ+gQc9GueZuK75Lm2iT YZZuBXYUueIESwkRnK2PtpTJn9Q2hF/z52SqPr33D2jCcMFk1WuscBG2kMie/ifL HZyVE1ynhg8RRJRMxJ2H14aWNZbYa+PnFoK9B2oAquUDRJ/ef7laTpjXQjyYgH/X xjNdW/lm/yklxc1vmVvPmfGtP0joc17cYix8rGLxymH7oOvvcOxhJRwWmUCLuC78 y1LRwPZxgbC4iK1Rar9IfIzsVMgWQUoGDY3NgoA95xiBcYrfrXjHFgIasKTn5tJd sQKA4DOlVNotCplWf+Vo801CvXQSr7vra+5apcEYJOTTUEfnPT6LcNXh9S5obpsq aB0pMgT1xWimIWCwLvwEVYwkKEeYW+TskuM3x1Movzk4BSW1yNZUseoBl1jXPclg xxSuCNnc5gCn5CbYBt4qrVR+vNga+TYbvvD3KfnDUkWSZKqsjbg= =LwSw -----END PGP SIGNATURE----- Merge tag 'irq_urgent_for_v6.12_rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull irq fixes from Borislav Petkov: - Fix a case for sifive-plic where an interrupt gets disabled *and* masked and remains masked when it gets reenabled later - Plug a small race in GIC-v4 where userspace can force an affinity change of a virtual CPU (vPE) in its unmapping path - Do not mix the two sets of ocelot irqchip's registers in the mask calculation of the main interrupt sticky register - Other smaller fixlets and cleanups * tag 'irq_urgent_for_v6.12_rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: irqchip/renesas-rzg2l: Fix missing put_device irqchip/riscv-intc: Fix SMP=n boot with ACPI irqchip/sifive-plic: Unmask interrupt in plic_irq_enable() irqchip/gic-v4: Don't allow a VMOVP on a dying VPE irqchip/sifive-plic: Return error code on failure irqchip/riscv-imsic: Fix output text of base address irqchip/ocelot: Comment sticky register clearing code irqchip/ocelot: Fix trigger register address irqchip: Remove obsolete config ARM_GIC_V3_ITS_PCI
This commit is contained in:
commit
949c9ef59b
@ -45,13 +45,6 @@ config ARM_GIC_V3_ITS
|
|||||||
select IRQ_MSI_LIB
|
select IRQ_MSI_LIB
|
||||||
default ARM_GIC_V3
|
default ARM_GIC_V3
|
||||||
|
|
||||||
config ARM_GIC_V3_ITS_PCI
|
|
||||||
bool
|
|
||||||
depends on ARM_GIC_V3_ITS
|
|
||||||
depends on PCI
|
|
||||||
depends on PCI_MSI
|
|
||||||
default ARM_GIC_V3_ITS
|
|
||||||
|
|
||||||
config ARM_GIC_V3_ITS_FSL_MC
|
config ARM_GIC_V3_ITS_FSL_MC
|
||||||
bool
|
bool
|
||||||
depends on ARM_GIC_V3_ITS
|
depends on ARM_GIC_V3_ITS
|
||||||
|
@ -797,8 +797,8 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
|
|||||||
its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
|
its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
|
||||||
|
|
||||||
if (!desc->its_vmapp_cmd.valid) {
|
if (!desc->its_vmapp_cmd.valid) {
|
||||||
|
alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
|
||||||
if (is_v4_1(its)) {
|
if (is_v4_1(its)) {
|
||||||
alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
|
|
||||||
its_encode_alloc(cmd, alloc);
|
its_encode_alloc(cmd, alloc);
|
||||||
/*
|
/*
|
||||||
* Unmapping a VPE is self-synchronizing on GICv4.1,
|
* Unmapping a VPE is self-synchronizing on GICv4.1,
|
||||||
@ -817,13 +817,13 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
|
|||||||
its_encode_vpt_addr(cmd, vpt_addr);
|
its_encode_vpt_addr(cmd, vpt_addr);
|
||||||
its_encode_vpt_size(cmd, LPI_NRBITS - 1);
|
its_encode_vpt_size(cmd, LPI_NRBITS - 1);
|
||||||
|
|
||||||
|
alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
|
||||||
|
|
||||||
if (!is_v4_1(its))
|
if (!is_v4_1(its))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
|
vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
|
||||||
|
|
||||||
alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
|
|
||||||
|
|
||||||
its_encode_alloc(cmd, alloc);
|
its_encode_alloc(cmd, alloc);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -3806,6 +3806,13 @@ static int its_vpe_set_affinity(struct irq_data *d,
|
|||||||
struct cpumask *table_mask;
|
struct cpumask *table_mask;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check if we're racing against a VPE being destroyed, for
|
||||||
|
* which we don't want to allow a VMOVP.
|
||||||
|
*/
|
||||||
|
if (!atomic_read(&vpe->vmapp_count))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Changing affinity is mega expensive, so let's be as lazy as
|
* Changing affinity is mega expensive, so let's be as lazy as
|
||||||
* we can and only do it if we really have to. Also, if mapped
|
* we can and only do it if we really have to. Also, if mapped
|
||||||
@ -4463,9 +4470,8 @@ static int its_vpe_init(struct its_vpe *vpe)
|
|||||||
raw_spin_lock_init(&vpe->vpe_lock);
|
raw_spin_lock_init(&vpe->vpe_lock);
|
||||||
vpe->vpe_id = vpe_id;
|
vpe->vpe_id = vpe_id;
|
||||||
vpe->vpt_page = vpt_page;
|
vpe->vpt_page = vpt_page;
|
||||||
if (gic_rdists->has_rvpeid)
|
atomic_set(&vpe->vmapp_count, 0);
|
||||||
atomic_set(&vpe->vmapp_count, 0);
|
if (!gic_rdists->has_rvpeid)
|
||||||
else
|
|
||||||
vpe->vpe_proxy_event = -1;
|
vpe->vpe_proxy_event = -1;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -37,7 +37,7 @@ static struct chip_props ocelot_props = {
|
|||||||
.reg_off_ena_clr = 0x1c,
|
.reg_off_ena_clr = 0x1c,
|
||||||
.reg_off_ena_set = 0x20,
|
.reg_off_ena_set = 0x20,
|
||||||
.reg_off_ident = 0x38,
|
.reg_off_ident = 0x38,
|
||||||
.reg_off_trigger = 0x5c,
|
.reg_off_trigger = 0x4,
|
||||||
.n_irq = 24,
|
.n_irq = 24,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -70,7 +70,7 @@ static struct chip_props jaguar2_props = {
|
|||||||
.reg_off_ena_clr = 0x1c,
|
.reg_off_ena_clr = 0x1c,
|
||||||
.reg_off_ena_set = 0x20,
|
.reg_off_ena_set = 0x20,
|
||||||
.reg_off_ident = 0x38,
|
.reg_off_ident = 0x38,
|
||||||
.reg_off_trigger = 0x5c,
|
.reg_off_trigger = 0x4,
|
||||||
.n_irq = 29,
|
.n_irq = 29,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -84,6 +84,12 @@ static void ocelot_irq_unmask(struct irq_data *data)
|
|||||||
u32 val;
|
u32 val;
|
||||||
|
|
||||||
irq_gc_lock(gc);
|
irq_gc_lock(gc);
|
||||||
|
/*
|
||||||
|
* Clear sticky bits for edge mode interrupts.
|
||||||
|
* Serval has only one trigger register replication, but the adjacent
|
||||||
|
* register is always read as zero, so there's no need to handle this
|
||||||
|
* case separately.
|
||||||
|
*/
|
||||||
val = irq_reg_readl(gc, ICPU_CFG_INTR_INTR_TRIGGER(p, 0)) |
|
val = irq_reg_readl(gc, ICPU_CFG_INTR_INTR_TRIGGER(p, 0)) |
|
||||||
irq_reg_readl(gc, ICPU_CFG_INTR_INTR_TRIGGER(p, 1));
|
irq_reg_readl(gc, ICPU_CFG_INTR_INTR_TRIGGER(p, 1));
|
||||||
if (!(val & mask))
|
if (!(val & mask))
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/bitfield.h>
|
#include <linux/bitfield.h>
|
||||||
|
#include <linux/cleanup.h>
|
||||||
#include <linux/clk.h>
|
#include <linux/clk.h>
|
||||||
#include <linux/err.h>
|
#include <linux/err.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
@ -530,12 +531,12 @@ static int rzg2l_irqc_parse_interrupts(struct rzg2l_irqc_priv *priv,
|
|||||||
static int rzg2l_irqc_common_init(struct device_node *node, struct device_node *parent,
|
static int rzg2l_irqc_common_init(struct device_node *node, struct device_node *parent,
|
||||||
const struct irq_chip *irq_chip)
|
const struct irq_chip *irq_chip)
|
||||||
{
|
{
|
||||||
|
struct platform_device *pdev = of_find_device_by_node(node);
|
||||||
|
struct device *dev __free(put_device) = pdev ? &pdev->dev : NULL;
|
||||||
struct irq_domain *irq_domain, *parent_domain;
|
struct irq_domain *irq_domain, *parent_domain;
|
||||||
struct platform_device *pdev;
|
|
||||||
struct reset_control *resetn;
|
struct reset_control *resetn;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
pdev = of_find_device_by_node(node);
|
|
||||||
if (!pdev)
|
if (!pdev)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
@ -591,6 +592,17 @@ static int rzg2l_irqc_common_init(struct device_node *node, struct device_node *
|
|||||||
|
|
||||||
register_syscore_ops(&rzg2l_irqc_syscore_ops);
|
register_syscore_ops(&rzg2l_irqc_syscore_ops);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Prevent the cleanup function from invoking put_device by assigning
|
||||||
|
* NULL to dev.
|
||||||
|
*
|
||||||
|
* make coccicheck will complain about missing put_device calls, but
|
||||||
|
* those are false positives, as dev will be automatically "put" via
|
||||||
|
* __free_put_device on the failing path.
|
||||||
|
* On the successful path we don't actually want to "put" dev.
|
||||||
|
*/
|
||||||
|
dev = NULL;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
pm_put:
|
pm_put:
|
||||||
|
@ -341,7 +341,7 @@ int imsic_irqdomain_init(void)
|
|||||||
imsic->fwnode, global->hart_index_bits, global->guest_index_bits);
|
imsic->fwnode, global->hart_index_bits, global->guest_index_bits);
|
||||||
pr_info("%pfwP: group-index-bits: %d, group-index-shift: %d\n",
|
pr_info("%pfwP: group-index-bits: %d, group-index-shift: %d\n",
|
||||||
imsic->fwnode, global->group_index_bits, global->group_index_shift);
|
imsic->fwnode, global->group_index_bits, global->group_index_shift);
|
||||||
pr_info("%pfwP: per-CPU IDs %d at base PPN %pa\n",
|
pr_info("%pfwP: per-CPU IDs %d at base address %pa\n",
|
||||||
imsic->fwnode, global->nr_ids, &global->base_addr);
|
imsic->fwnode, global->nr_ids, &global->base_addr);
|
||||||
pr_info("%pfwP: total %d interrupts available\n",
|
pr_info("%pfwP: total %d interrupts available\n",
|
||||||
imsic->fwnode, num_possible_cpus() * (global->nr_ids - 1));
|
imsic->fwnode, num_possible_cpus() * (global->nr_ids - 1));
|
||||||
|
@ -265,7 +265,7 @@ struct rintc_data {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static u32 nr_rintc;
|
static u32 nr_rintc;
|
||||||
static struct rintc_data *rintc_acpi_data[NR_CPUS];
|
static struct rintc_data **rintc_acpi_data;
|
||||||
|
|
||||||
#define for_each_matching_plic(_plic_id) \
|
#define for_each_matching_plic(_plic_id) \
|
||||||
unsigned int _plic; \
|
unsigned int _plic; \
|
||||||
@ -329,13 +329,30 @@ int acpi_rintc_get_imsic_mmio_info(u32 index, struct resource *res)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __init riscv_intc_acpi_match(union acpi_subtable_headers *header,
|
||||||
|
const unsigned long end)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header,
|
static int __init riscv_intc_acpi_init(union acpi_subtable_headers *header,
|
||||||
const unsigned long end)
|
const unsigned long end)
|
||||||
{
|
{
|
||||||
struct acpi_madt_rintc *rintc;
|
struct acpi_madt_rintc *rintc;
|
||||||
struct fwnode_handle *fn;
|
struct fwnode_handle *fn;
|
||||||
|
int count;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
|
if (!rintc_acpi_data) {
|
||||||
|
count = acpi_table_parse_madt(ACPI_MADT_TYPE_RINTC, riscv_intc_acpi_match, 0);
|
||||||
|
if (count <= 0)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
rintc_acpi_data = kcalloc(count, sizeof(*rintc_acpi_data), GFP_KERNEL);
|
||||||
|
if (!rintc_acpi_data)
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
rintc = (struct acpi_madt_rintc *)header;
|
rintc = (struct acpi_madt_rintc *)header;
|
||||||
rintc_acpi_data[nr_rintc] = kzalloc(sizeof(*rintc_acpi_data[0]), GFP_KERNEL);
|
rintc_acpi_data[nr_rintc] = kzalloc(sizeof(*rintc_acpi_data[0]), GFP_KERNEL);
|
||||||
if (!rintc_acpi_data[nr_rintc])
|
if (!rintc_acpi_data[nr_rintc])
|
||||||
|
@ -126,16 +126,6 @@ static inline void plic_irq_toggle(const struct cpumask *mask,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void plic_irq_enable(struct irq_data *d)
|
|
||||||
{
|
|
||||||
plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void plic_irq_disable(struct irq_data *d)
|
|
||||||
{
|
|
||||||
plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void plic_irq_unmask(struct irq_data *d)
|
static void plic_irq_unmask(struct irq_data *d)
|
||||||
{
|
{
|
||||||
struct plic_priv *priv = irq_data_get_irq_chip_data(d);
|
struct plic_priv *priv = irq_data_get_irq_chip_data(d);
|
||||||
@ -150,6 +140,17 @@ static void plic_irq_mask(struct irq_data *d)
|
|||||||
writel(0, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
|
writel(0, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void plic_irq_enable(struct irq_data *d)
|
||||||
|
{
|
||||||
|
plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1);
|
||||||
|
plic_irq_unmask(d);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void plic_irq_disable(struct irq_data *d)
|
||||||
|
{
|
||||||
|
plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0);
|
||||||
|
}
|
||||||
|
|
||||||
static void plic_irq_eoi(struct irq_data *d)
|
static void plic_irq_eoi(struct irq_data *d)
|
||||||
{
|
{
|
||||||
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
|
struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
|
||||||
@ -626,8 +627,10 @@ static int plic_probe(struct fwnode_handle *fwnode)
|
|||||||
|
|
||||||
handler->enable_save = kcalloc(DIV_ROUND_UP(nr_irqs, 32),
|
handler->enable_save = kcalloc(DIV_ROUND_UP(nr_irqs, 32),
|
||||||
sizeof(*handler->enable_save), GFP_KERNEL);
|
sizeof(*handler->enable_save), GFP_KERNEL);
|
||||||
if (!handler->enable_save)
|
if (!handler->enable_save) {
|
||||||
|
error = -ENOMEM;
|
||||||
goto fail_cleanup_contexts;
|
goto fail_cleanup_contexts;
|
||||||
|
}
|
||||||
done:
|
done:
|
||||||
for (hwirq = 1; hwirq <= nr_irqs; hwirq++) {
|
for (hwirq = 1; hwirq <= nr_irqs; hwirq++) {
|
||||||
plic_toggle(handler, hwirq, 0);
|
plic_toggle(handler, hwirq, 0);
|
||||||
@ -639,8 +642,10 @@ done:
|
|||||||
|
|
||||||
priv->irqdomain = irq_domain_create_linear(fwnode, nr_irqs + 1,
|
priv->irqdomain = irq_domain_create_linear(fwnode, nr_irqs + 1,
|
||||||
&plic_irqdomain_ops, priv);
|
&plic_irqdomain_ops, priv);
|
||||||
if (WARN_ON(!priv->irqdomain))
|
if (WARN_ON(!priv->irqdomain)) {
|
||||||
|
error = -ENOMEM;
|
||||||
goto fail_cleanup_contexts;
|
goto fail_cleanup_contexts;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We can have multiple PLIC instances so setup global state
|
* We can have multiple PLIC instances so setup global state
|
||||||
|
@ -66,10 +66,12 @@ struct its_vpe {
|
|||||||
bool enabled;
|
bool enabled;
|
||||||
bool group;
|
bool group;
|
||||||
} sgi_config[16];
|
} sgi_config[16];
|
||||||
atomic_t vmapp_count;
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* Track the VPE being mapped */
|
||||||
|
atomic_t vmapp_count;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ensures mutual exclusion between affinity setting of the
|
* Ensures mutual exclusion between affinity setting of the
|
||||||
* vPE and vLPI operations using vpe->col_idx.
|
* vPE and vLPI operations using vpe->col_idx.
|
||||||
|
Loading…
Reference in New Issue
Block a user