2008-10-16 02:32:24 -07:00
|
|
|
/*
|
|
|
|
* Common interrupt code for 32 and 64 bit
|
|
|
|
*/
|
|
|
|
#include <linux/cpu.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/kernel_stat.h>
|
|
|
|
#include <linux/seq_file.h>
|
2009-01-04 03:52:17 -07:00
|
|
|
#include <linux/smp.h>
|
2009-02-06 15:09:41 -07:00
|
|
|
#include <linux/ftrace.h>
|
2008-10-16 02:32:24 -07:00
|
|
|
|
2009-02-17 05:58:15 -07:00
|
|
|
#include <asm/apic.h>
|
2008-10-16 02:32:24 -07:00
|
|
|
#include <asm/io_apic.h>
|
2008-12-23 07:15:17 -07:00
|
|
|
#include <asm/irq.h>
|
2009-02-06 15:09:41 -07:00
|
|
|
#include <asm/idle.h>
|
2009-05-27 12:56:52 -07:00
|
|
|
#include <asm/mce.h>
|
2009-04-10 11:33:10 -07:00
|
|
|
#include <asm/hw_irq.h>
|
2008-10-16 02:32:24 -07:00
|
|
|
|
|
|
|
atomic_t irq_err_count;
|
|
|
|
|
2009-03-04 11:56:05 -07:00
|
|
|
/* Function pointer for generic interrupt vector handling */
|
|
|
|
void (*generic_interrupt_extension)(void) = NULL;
|
|
|
|
|
2008-10-16 03:18:50 -07:00
|
|
|
/*
|
|
|
|
* 'what should we do if we get a hw irq event on an illegal vector'.
|
|
|
|
* each architecture has to answer this themselves.
|
|
|
|
*/
|
|
|
|
void ack_bad_irq(unsigned int irq)
|
|
|
|
{
|
2009-04-12 09:47:39 -07:00
|
|
|
if (printk_ratelimit())
|
|
|
|
pr_err("unexpected IRQ trap at vector %02x\n", irq);
|
2008-10-16 03:18:50 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Currently unexpected vectors happen only on SMP and APIC.
|
|
|
|
* We _must_ ack these because every local APIC has only N
|
|
|
|
* irq slots per priority level, and a 'hanging, unacked' IRQ
|
|
|
|
* holds up an irq slot - in excessive cases (when multiple
|
|
|
|
* unexpected vectors occur) that might lock up the APIC
|
|
|
|
* completely.
|
|
|
|
* But only ack when the APIC is enabled -AK
|
|
|
|
*/
|
2009-04-12 09:47:41 -07:00
|
|
|
ack_APIC_irq();
|
2008-10-16 03:18:50 -07:00
|
|
|
}
|
|
|
|
|
2009-01-18 08:38:57 -07:00
|
|
|
#define irq_stats(x) (&per_cpu(irq_stat, x))
|
2008-10-16 02:32:24 -07:00
|
|
|
/*
|
|
|
|
* /proc/interrupts printing:
|
|
|
|
*/
|
2009-03-12 05:45:15 -07:00
|
|
|
static int show_other_interrupts(struct seq_file *p, int prec)
|
2008-10-16 02:32:24 -07:00
|
|
|
{
|
|
|
|
int j;
|
|
|
|
|
2009-03-12 05:45:15 -07:00
|
|
|
seq_printf(p, "%*s: ", prec, "NMI");
|
2008-10-16 02:32:24 -07:00
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
|
|
|
|
seq_printf(p, " Non-maskable interrupts\n");
|
|
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
2009-03-12 05:45:15 -07:00
|
|
|
seq_printf(p, "%*s: ", prec, "LOC");
|
2008-10-16 02:32:24 -07:00
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
|
|
|
|
seq_printf(p, " Local timer interrupts\n");
|
2009-03-22 13:38:34 -07:00
|
|
|
|
|
|
|
seq_printf(p, "%*s: ", prec, "SPU");
|
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
|
|
|
|
seq_printf(p, " Spurious interrupts\n");
|
2009-03-24 18:50:34 -07:00
|
|
|
seq_printf(p, "%*s: ", prec, "CNT");
|
2008-12-03 02:39:53 -07:00
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
|
|
|
|
seq_printf(p, " Performance counter interrupts\n");
|
2009-04-13 12:24:50 -07:00
|
|
|
seq_printf(p, "%*s: ", prec, "PND");
|
2009-04-06 02:45:03 -07:00
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ", irq_stats(j)->apic_pending_irqs);
|
|
|
|
seq_printf(p, " Performance pending work\n");
|
2008-10-16 02:32:24 -07:00
|
|
|
#endif
|
2009-03-04 11:56:05 -07:00
|
|
|
if (generic_interrupt_extension) {
|
2009-03-24 18:50:34 -07:00
|
|
|
seq_printf(p, "%*s: ", prec, "PLT");
|
2009-03-04 11:56:05 -07:00
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ", irq_stats(j)->generic_irqs);
|
|
|
|
seq_printf(p, " Platform interrupts\n");
|
|
|
|
}
|
2008-10-16 02:32:24 -07:00
|
|
|
#ifdef CONFIG_SMP
|
2009-03-12 05:45:15 -07:00
|
|
|
seq_printf(p, "%*s: ", prec, "RES");
|
2008-10-16 02:32:24 -07:00
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
|
|
|
|
seq_printf(p, " Rescheduling interrupts\n");
|
2009-03-12 05:45:15 -07:00
|
|
|
seq_printf(p, "%*s: ", prec, "CAL");
|
2008-10-16 02:32:24 -07:00
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
|
|
|
|
seq_printf(p, " Function call interrupts\n");
|
2009-03-12 05:45:15 -07:00
|
|
|
seq_printf(p, "%*s: ", prec, "TLB");
|
2008-10-16 02:32:24 -07:00
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
|
|
|
|
seq_printf(p, " TLB shootdowns\n");
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_X86_MCE
|
2009-03-12 05:45:15 -07:00
|
|
|
seq_printf(p, "%*s: ", prec, "TRM");
|
2008-10-16 02:32:24 -07:00
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
|
|
|
|
seq_printf(p, " Thermal event interrupts\n");
|
x86, mce: use 64bit machine check code on 32bit
The 64bit machine check code is in many ways much better than
the 32bit machine check code: it is more specification compliant,
is cleaner, only has a single code base versus one per CPU,
has better infrastructure for recovery, has a cleaner way to communicate
with user space etc. etc.
Use the 64bit code for 32bit too.
This is the second attempt to do this. There was one a couple of years
ago to unify this code for 32bit and 64bit. Back then this ran into some
trouble with K7s and was reverted.
I believe this time the K7 problems (and some others) are addressed.
I went over the old handlers and was very careful to retain
all quirks.
But of course this needs a lot of testing on old systems. On newer
64bit capable systems I don't expect much problems because they have been
already tested with the 64bit kernel.
I made this a CONFIG for now that still allows to select the old
machine check code. This is mostly to make testing easier,
if someone runs into a problem we can ask them to try
with the CONFIG switched.
The new code is default y for more coverage.
Once there is confidence the 64bit code works well on older hardware
too the CONFIG_X86_OLD_MCE and the associated code can be easily
removed.
This causes a behaviour change for 32bit installations. They now
have to install the mcelog package to be able to log
corrected machine checks.
The 64bit machine check code only handles CPUs which support the
standard Intel machine check architecture described in the IA32 SDM.
The 32bit code has special support for some older CPUs which
have non standard machine check architectures, in particular
WinChip C3 and Intel P5. I made those a separate CONFIG option
and kept them for now. The WinChip variant could be probably
removed without too much pain, it doesn't really do anything
interesting. P5 is also disabled by default (like it
was before) because many motherboards have it miswired, but
according to Alan Cox a few embedded setups use that one.
Forward ported/heavily changed version of old patch, original patch
included review/fixes from Thomas Gleixner, Bert Wesarg.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2009-04-28 10:07:31 -07:00
|
|
|
# ifdef CONFIG_X86_MCE_THRESHOLD
|
2009-03-12 05:45:15 -07:00
|
|
|
seq_printf(p, "%*s: ", prec, "THR");
|
2008-10-16 02:32:24 -07:00
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
|
|
|
|
seq_printf(p, " Threshold APIC interrupts\n");
|
|
|
|
# endif
|
2009-05-27 12:56:52 -07:00
|
|
|
#endif
|
2009-07-08 15:31:41 -07:00
|
|
|
#ifdef CONFIG_X86_MCE
|
2009-05-27 12:56:52 -07:00
|
|
|
seq_printf(p, "%*s: ", prec, "MCE");
|
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
|
|
|
|
seq_printf(p, " Machine check exceptions\n");
|
2009-05-27 12:56:57 -07:00
|
|
|
seq_printf(p, "%*s: ", prec, "MCP");
|
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
|
|
|
|
seq_printf(p, " Machine check polls\n");
|
2008-10-16 02:32:24 -07:00
|
|
|
#endif
|
2009-03-12 05:45:15 -07:00
|
|
|
seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
|
2008-10-16 02:32:24 -07:00
|
|
|
#if defined(CONFIG_X86_IO_APIC)
|
2009-03-12 05:45:15 -07:00
|
|
|
seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
|
2008-10-16 02:32:24 -07:00
|
|
|
#endif
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int show_interrupts(struct seq_file *p, void *v)
|
|
|
|
{
|
|
|
|
unsigned long flags, any_count = 0;
|
2009-03-12 05:45:15 -07:00
|
|
|
int i = *(loff_t *) v, j, prec;
|
2008-10-16 02:32:24 -07:00
|
|
|
struct irqaction *action;
|
|
|
|
struct irq_desc *desc;
|
|
|
|
|
|
|
|
if (i > nr_irqs)
|
|
|
|
return 0;
|
|
|
|
|
2009-03-12 05:45:15 -07:00
|
|
|
for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
|
|
|
|
j *= 10;
|
|
|
|
|
2008-10-16 02:32:24 -07:00
|
|
|
if (i == nr_irqs)
|
2009-03-12 05:45:15 -07:00
|
|
|
return show_other_interrupts(p, prec);
|
2008-10-16 02:32:24 -07:00
|
|
|
|
|
|
|
/* print header */
|
|
|
|
if (i == 0) {
|
2009-03-12 05:45:15 -07:00
|
|
|
seq_printf(p, "%*s", prec + 8, "");
|
2008-10-16 02:32:24 -07:00
|
|
|
for_each_online_cpu(j)
|
2008-10-21 06:49:59 -07:00
|
|
|
seq_printf(p, "CPU%-8d", j);
|
2008-10-16 02:32:24 -07:00
|
|
|
seq_putc(p, '\n');
|
|
|
|
}
|
|
|
|
|
|
|
|
desc = irq_to_desc(i);
|
2008-12-05 19:58:31 -07:00
|
|
|
if (!desc)
|
|
|
|
return 0;
|
|
|
|
|
2008-10-16 02:32:24 -07:00
|
|
|
spin_lock_irqsave(&desc->lock, flags);
|
|
|
|
for_each_online_cpu(j)
|
|
|
|
any_count |= kstat_irqs_cpu(i, j);
|
|
|
|
action = desc->action;
|
|
|
|
if (!action && !any_count)
|
|
|
|
goto out;
|
|
|
|
|
2009-03-12 05:45:15 -07:00
|
|
|
seq_printf(p, "%*d: ", prec, i);
|
2008-10-16 02:32:24 -07:00
|
|
|
for_each_online_cpu(j)
|
|
|
|
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
|
|
|
|
seq_printf(p, " %8s", desc->chip->name);
|
|
|
|
seq_printf(p, "-%-8s", desc->name);
|
|
|
|
|
|
|
|
if (action) {
|
|
|
|
seq_printf(p, " %s", action->name);
|
|
|
|
while ((action = action->next) != NULL)
|
|
|
|
seq_printf(p, ", %s", action->name);
|
|
|
|
}
|
|
|
|
|
|
|
|
seq_putc(p, '\n');
|
|
|
|
out:
|
|
|
|
spin_unlock_irqrestore(&desc->lock, flags);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* /proc/stat helpers
|
|
|
|
*/
|
|
|
|
u64 arch_irq_stat_cpu(unsigned int cpu)
|
|
|
|
{
|
|
|
|
u64 sum = irq_stats(cpu)->__nmi_count;
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_LOCAL_APIC
|
|
|
|
sum += irq_stats(cpu)->apic_timer_irqs;
|
2009-03-22 13:38:34 -07:00
|
|
|
sum += irq_stats(cpu)->irq_spurious_count;
|
2008-12-03 02:39:53 -07:00
|
|
|
sum += irq_stats(cpu)->apic_perf_irqs;
|
2009-04-06 02:45:03 -07:00
|
|
|
sum += irq_stats(cpu)->apic_pending_irqs;
|
2008-10-16 02:32:24 -07:00
|
|
|
#endif
|
2009-03-04 11:56:05 -07:00
|
|
|
if (generic_interrupt_extension)
|
|
|
|
sum += irq_stats(cpu)->generic_irqs;
|
2008-10-16 02:32:24 -07:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
sum += irq_stats(cpu)->irq_resched_count;
|
|
|
|
sum += irq_stats(cpu)->irq_call_count;
|
|
|
|
sum += irq_stats(cpu)->irq_tlb_count;
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_X86_MCE
|
|
|
|
sum += irq_stats(cpu)->irq_thermal_count;
|
x86, mce: use 64bit machine check code on 32bit
The 64bit machine check code is in many ways much better than
the 32bit machine check code: it is more specification compliant,
is cleaner, only has a single code base versus one per CPU,
has better infrastructure for recovery, has a cleaner way to communicate
with user space etc. etc.
Use the 64bit code for 32bit too.
This is the second attempt to do this. There was one a couple of years
ago to unify this code for 32bit and 64bit. Back then this ran into some
trouble with K7s and was reverted.
I believe this time the K7 problems (and some others) are addressed.
I went over the old handlers and was very careful to retain
all quirks.
But of course this needs a lot of testing on old systems. On newer
64bit capable systems I don't expect much problems because they have been
already tested with the 64bit kernel.
I made this a CONFIG for now that still allows to select the old
machine check code. This is mostly to make testing easier,
if someone runs into a problem we can ask them to try
with the CONFIG switched.
The new code is default y for more coverage.
Once there is confidence the 64bit code works well on older hardware
too the CONFIG_X86_OLD_MCE and the associated code can be easily
removed.
This causes a behaviour change for 32bit installations. They now
have to install the mcelog package to be able to log
corrected machine checks.
The 64bit machine check code only handles CPUs which support the
standard Intel machine check architecture described in the IA32 SDM.
The 32bit code has special support for some older CPUs which
have non standard machine check architectures, in particular
WinChip C3 and Intel P5. I made those a separate CONFIG option
and kept them for now. The WinChip variant could be probably
removed without too much pain, it doesn't really do anything
interesting. P5 is also disabled by default (like it
was before) because many motherboards have it miswired, but
according to Alan Cox a few embedded setups use that one.
Forward ported/heavily changed version of old patch, original patch
included review/fixes from Thomas Gleixner, Bert Wesarg.
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
2009-04-28 10:07:31 -07:00
|
|
|
# ifdef CONFIG_X86_MCE_THRESHOLD
|
2008-10-16 02:32:24 -07:00
|
|
|
sum += irq_stats(cpu)->irq_threshold_count;
|
2009-04-12 09:47:39 -07:00
|
|
|
# endif
|
2009-06-02 00:53:23 -07:00
|
|
|
#endif
|
2009-07-08 15:31:41 -07:00
|
|
|
#ifdef CONFIG_X86_MCE
|
2009-06-02 00:53:23 -07:00
|
|
|
sum += per_cpu(mce_exception_count, cpu);
|
|
|
|
sum += per_cpu(mce_poll_count, cpu);
|
2008-10-16 02:32:24 -07:00
|
|
|
#endif
|
|
|
|
return sum;
|
|
|
|
}
|
|
|
|
|
|
|
|
u64 arch_irq_stat(void)
|
|
|
|
{
|
|
|
|
u64 sum = atomic_read(&irq_err_count);
|
|
|
|
|
|
|
|
#ifdef CONFIG_X86_IO_APIC
|
|
|
|
sum += atomic_read(&irq_mis_count);
|
|
|
|
#endif
|
|
|
|
return sum;
|
|
|
|
}
|
2008-12-23 07:15:17 -07:00
|
|
|
|
2009-02-06 15:09:41 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* do_IRQ handles all normal device IRQ's (the special
|
|
|
|
* SMP cross-CPU interrupts have their own specific
|
|
|
|
* handlers).
|
|
|
|
*/
|
|
|
|
unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
struct pt_regs *old_regs = set_irq_regs(regs);
|
|
|
|
|
|
|
|
/* high bit used in ret_from_ code */
|
|
|
|
unsigned vector = ~regs->orig_ax;
|
|
|
|
unsigned irq;
|
|
|
|
|
|
|
|
exit_idle();
|
|
|
|
irq_enter();
|
|
|
|
|
|
|
|
irq = __get_cpu_var(vector_irq)[vector];
|
|
|
|
|
|
|
|
if (!handle_irq(irq, regs)) {
|
2009-04-12 09:47:41 -07:00
|
|
|
ack_APIC_irq();
|
2009-02-06 15:09:41 -07:00
|
|
|
|
|
|
|
if (printk_ratelimit())
|
2009-04-12 09:47:39 -07:00
|
|
|
pr_emerg("%s: %d.%d No irq handler for vector (irq %d)\n",
|
|
|
|
__func__, smp_processor_id(), vector, irq);
|
2009-02-06 15:09:41 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
irq_exit();
|
|
|
|
|
|
|
|
set_irq_regs(old_regs);
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2009-03-04 11:56:05 -07:00
|
|
|
/*
|
|
|
|
* Handler for GENERIC_INTERRUPT_VECTOR.
|
|
|
|
*/
|
|
|
|
void smp_generic_interrupt(struct pt_regs *regs)
|
|
|
|
{
|
|
|
|
struct pt_regs *old_regs = set_irq_regs(regs);
|
|
|
|
|
|
|
|
ack_APIC_irq();
|
|
|
|
|
|
|
|
exit_idle();
|
|
|
|
|
|
|
|
irq_enter();
|
|
|
|
|
|
|
|
inc_irq_stat(generic_irqs);
|
|
|
|
|
|
|
|
if (generic_interrupt_extension)
|
|
|
|
generic_interrupt_extension();
|
|
|
|
|
|
|
|
irq_exit();
|
|
|
|
|
|
|
|
set_irq_regs(old_regs);
|
|
|
|
}
|
|
|
|
|
2008-12-23 07:15:17 -07:00
|
|
|
EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
|