Misc fixes:
- Fix x86 IRQ vector leak caused by a CPU offlining race - Fix build failure in the riscv-imsic irqchip driver caused by an API-change semantic conflict - Fix use-after-free in irq_find_at_or_after() Signed-off-by: Ingo Molnar <mingo@kernel.org> -----BEGIN PGP SIGNATURE----- iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmZRwMURHG1pbmdvQGtl cm5lbC5vcmcACgkQEnMQ0APhK1h/zQ//TTrgyXi6+1xXY4R0LDU45j+wavMTMkq3 kM3eUeyXgy+FDtvLRVaYgEAYbtuR4LGFN9qmVuEHJPZQwpi3AFlnGFUFjFUvyE43 xJuOtHoxFv3mj09VgRGsjZvzp8bxYSkEn3h0ryTWGUHzR+QmoQmYWrU6HExgXw3R +s8pvi14g6R/+PAy05cF0k1J7aeSsYaOfd38D/XnpyhuhXvPMS2eHgovV6I5Qhk4 5lV6rzJv8XlKxVr7bOYJkRePE3z0HMtx0G7eo8eYERBQapHede18V8imv4OpUiua vmG8cFhF4Lq9KFdEtiVuf1X9/XH3PoEKTGA81oqQ9lLN9USx7ME/Peg6U5ezvEkp YmQx2LS12DWqYp5PZQTN0CHnfmMLgksmyGELM3JE/dFFCVh4HdpMrh+2wLwWGRJ3 JLzAJh3YwcPhayLpNVgsSF9AtLKTkDoS0bHd43mHnB6VaEKkus8zbeuCxYAsUeMJ 5wCZw3xQjTZEaMMNd1hJN5O/9TX2of+T6Z4C4cacMBmwpD7vX5oXmDYLE/wUHw6m 9Z67fvOvTdIf3MkYSqjGXFKD1JobL/PmwCfaaGUQFVJkbX5WVNDk6C1zgs5FhmuY U/AcYfadbNdLVXrN3VLnX6Gmb7gFPShOAE1GgXGeszSReI4pbOUy2zopRGAEWSZS fRu8nyveGjw= =vxJh -----END PGP SIGNATURE----- Merge tag 'irq-urgent-2024-05-25' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull irq fixes from Ingo Molnar: - Fix x86 IRQ vector leak caused by a CPU offlining race - Fix build failure in the riscv-imsic irqchip driver caused by an API-change semantic conflict - Fix use-after-free in irq_find_at_or_after() * tag 'irq-urgent-2024-05-25' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: genirq/irqdesc: Prevent use-after-free in irq_find_at_or_after() genirq/cpuhotplug, x86/vector: Prevent vector leak during CPU offline irqchip/riscv-imsic: Fixup riscv_ipi_set_virq_range() conflict
This commit is contained in:
commit
a0db36ed57
@ -1035,7 +1035,8 @@ static void __vector_schedule_cleanup(struct apic_chip_data *apicd)
|
|||||||
add_timer_on(&cl->timer, cpu);
|
add_timer_on(&cl->timer, cpu);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
apicd->prev_vector = 0;
|
pr_warn("IRQ %u schedule cleanup for offline CPU %u\n", apicd->irq, cpu);
|
||||||
|
free_moved_vector(apicd);
|
||||||
}
|
}
|
||||||
raw_spin_unlock(&vector_lock);
|
raw_spin_unlock(&vector_lock);
|
||||||
}
|
}
|
||||||
@ -1072,6 +1073,7 @@ void irq_complete_move(struct irq_cfg *cfg)
|
|||||||
*/
|
*/
|
||||||
void irq_force_complete_move(struct irq_desc *desc)
|
void irq_force_complete_move(struct irq_desc *desc)
|
||||||
{
|
{
|
||||||
|
unsigned int cpu = smp_processor_id();
|
||||||
struct apic_chip_data *apicd;
|
struct apic_chip_data *apicd;
|
||||||
struct irq_data *irqd;
|
struct irq_data *irqd;
|
||||||
unsigned int vector;
|
unsigned int vector;
|
||||||
@ -1096,10 +1098,11 @@ void irq_force_complete_move(struct irq_desc *desc)
|
|||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If prev_vector is empty, no action required.
|
* If prev_vector is empty or the descriptor is neither currently
|
||||||
|
* nor previously on the outgoing CPU no action required.
|
||||||
*/
|
*/
|
||||||
vector = apicd->prev_vector;
|
vector = apicd->prev_vector;
|
||||||
if (!vector)
|
if (!vector || (apicd->cpu != cpu && apicd->prev_cpu != cpu))
|
||||||
goto unlock;
|
goto unlock;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -69,6 +69,14 @@ static bool migrate_one_irq(struct irq_desc *desc)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Complete an eventually pending irq move cleanup. If this
|
||||||
|
* interrupt was moved in hard irq context, then the vectors need
|
||||||
|
* to be cleaned up. It can't wait until this interrupt actually
|
||||||
|
* happens and this CPU was involved.
|
||||||
|
*/
|
||||||
|
irq_force_complete_move(desc);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* No move required, if:
|
* No move required, if:
|
||||||
* - Interrupt is per cpu
|
* - Interrupt is per cpu
|
||||||
@ -87,14 +95,6 @@ static bool migrate_one_irq(struct irq_desc *desc)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Complete an eventually pending irq move cleanup. If this
|
|
||||||
* interrupt was moved in hard irq context, then the vectors need
|
|
||||||
* to be cleaned up. It can't wait until this interrupt actually
|
|
||||||
* happens and this CPU was involved.
|
|
||||||
*/
|
|
||||||
irq_force_complete_move(desc);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If there is a setaffinity pending, then try to reuse the pending
|
* If there is a setaffinity pending, then try to reuse the pending
|
||||||
* mask, so the last change of the affinity does not get lost. If
|
* mask, so the last change of the affinity does not get lost. If
|
||||||
|
@ -160,7 +160,10 @@ static int irq_find_free_area(unsigned int from, unsigned int cnt)
|
|||||||
static unsigned int irq_find_at_or_after(unsigned int offset)
|
static unsigned int irq_find_at_or_after(unsigned int offset)
|
||||||
{
|
{
|
||||||
unsigned long index = offset;
|
unsigned long index = offset;
|
||||||
struct irq_desc *desc = mt_find(&sparse_irqs, &index, nr_irqs);
|
struct irq_desc *desc;
|
||||||
|
|
||||||
|
guard(rcu)();
|
||||||
|
desc = mt_find(&sparse_irqs, &index, nr_irqs);
|
||||||
|
|
||||||
return desc ? irq_desc_get_irq(desc) : nr_irqs;
|
return desc ? irq_desc_get_irq(desc) : nr_irqs;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user