1
linux/arch/mips/loongson64/loongson-3/hpet.c

289 lines
6.2 KiB
C
Raw Normal View History

#include <linux/init.h>
#include <linux/pci.h>
#include <linux/percpu.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <asm/hpet.h>
#include <asm/time.h>
#define SMBUS_CFG_BASE (loongson_sysconf.ht_control_base + 0x0300a000)
#define SMBUS_PCI_REG40 0x40
#define SMBUS_PCI_REG64 0x64
#define SMBUS_PCI_REGB4 0xb4
#define HPET_MIN_CYCLES 16
#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES * 12)
MIPS: hpet: Choose a safe value for the ETIME check This patch is borrowed from x86 hpet driver and explaind below: Due to the overly intelligent design of HPETs, we need to workaround the problem that the compare value which we write is already behind the actual counter value at the point where the value hits the real compare register. This happens for two reasons: 1) We read out the counter, add the delta and write the result to the compare register. When a NMI hits between the read out and the write then the counter can be ahead of the event already. 2) The write to the compare register is delayed by up to two HPET cycles in AMD chipsets. We can work around this by reading back the compare register to make sure that the written value has hit the hardware. But that is bad performance wise for the normal case where the event is far enough in the future. As we already know that the write can be delayed by up to two cycles we can avoid the read back of the compare register completely if we make the decision whether the delta has elapsed already or not based on the following calculation: cmp = event - actual_count; If cmp is less than 64 HPET clock cycles, then we decide that the event has happened already and return -ETIME. That covers the above #1 and #2 problems which would cause a wait for HPET wraparound (~306 seconds). Signed-off-by: Huacai Chen <chenhc@lemote.com> Cc: Aurelien Jarno <aurelien@aurel32.net> Cc: Steven J. Hill <Steven.Hill@imgtec.com> Cc: Fuxin Zhang <zhangfx@lemote.com> Cc: Zhangjin Wu <wuzhangjin@gmail.com> Cc: Huacai Chen <chenhc@lemote.com> Cc: linux-mips@linux-mips.org Cc: stable@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/12162/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-01-21 06:09:50 -07:00
static DEFINE_SPINLOCK(hpet_lock);
DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device);
static unsigned int smbus_read(int offset)
{
return *(volatile unsigned int *)(SMBUS_CFG_BASE + offset);
}
static void smbus_write(int offset, int data)
{
*(volatile unsigned int *)(SMBUS_CFG_BASE + offset) = data;
}
static void smbus_enable(int offset, int bit)
{
unsigned int cfg = smbus_read(offset);
cfg |= bit;
smbus_write(offset, cfg);
}
static int hpet_read(int offset)
{
return *(volatile unsigned int *)(HPET_MMIO_ADDR + offset);
}
static void hpet_write(int offset, int data)
{
*(volatile unsigned int *)(HPET_MMIO_ADDR + offset) = data;
}
static void hpet_start_counter(void)
{
unsigned int cfg = hpet_read(HPET_CFG);
cfg |= HPET_CFG_ENABLE;
hpet_write(HPET_CFG, cfg);
}
static void hpet_stop_counter(void)
{
unsigned int cfg = hpet_read(HPET_CFG);
cfg &= ~HPET_CFG_ENABLE;
hpet_write(HPET_CFG, cfg);
}
static void hpet_reset_counter(void)
{
hpet_write(HPET_COUNTER, 0);
hpet_write(HPET_COUNTER + 4, 0);
}
static void hpet_restart_counter(void)
{
hpet_stop_counter();
hpet_reset_counter();
hpet_start_counter();
}
static void hpet_enable_legacy_int(void)
{
/* Do nothing on Loongson-3 */
}
static int hpet_set_state_periodic(struct clock_event_device *evt)
{
int cfg;
spin_lock(&hpet_lock);
pr_info("set clock event to periodic mode!\n");
/* stop counter */
hpet_stop_counter();
/* enables the timer0 to generate a periodic interrupt */
cfg = hpet_read(HPET_T0_CFG);
cfg &= ~HPET_TN_LEVEL;
cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | HPET_TN_SETVAL |
HPET_TN_32BIT;
hpet_write(HPET_T0_CFG, cfg);
/* set the comparator */
hpet_write(HPET_T0_CMP, HPET_COMPARE_VAL);
udelay(1);
hpet_write(HPET_T0_CMP, HPET_COMPARE_VAL);
/* start counter */
hpet_start_counter();
spin_unlock(&hpet_lock);
return 0;
}
static int hpet_set_state_shutdown(struct clock_event_device *evt)
{
int cfg;
spin_lock(&hpet_lock);
cfg = hpet_read(HPET_T0_CFG);
cfg &= ~HPET_TN_ENABLE;
hpet_write(HPET_T0_CFG, cfg);
spin_unlock(&hpet_lock);
return 0;
}
static int hpet_set_state_oneshot(struct clock_event_device *evt)
{
int cfg;
spin_lock(&hpet_lock);
pr_info("set clock event to one shot mode!\n");
cfg = hpet_read(HPET_T0_CFG);
/*
* set timer0 type
* 1 : periodic interrupt
* 0 : non-periodic(oneshot) interrupt
*/
cfg &= ~HPET_TN_PERIODIC;
cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
hpet_write(HPET_T0_CFG, cfg);
spin_unlock(&hpet_lock);
return 0;
}
static int hpet_tick_resume(struct clock_event_device *evt)
{
spin_lock(&hpet_lock);
hpet_enable_legacy_int();
spin_unlock(&hpet_lock);
return 0;
}
static int hpet_next_event(unsigned long delta,
struct clock_event_device *evt)
{
u32 cnt;
s32 res;
cnt = hpet_read(HPET_COUNTER);
cnt += (u32) delta;
hpet_write(HPET_T0_CMP, cnt);
res = (s32)(cnt - hpet_read(HPET_COUNTER));
MIPS: hpet: Choose a safe value for the ETIME check This patch is borrowed from x86 hpet driver and explaind below: Due to the overly intelligent design of HPETs, we need to workaround the problem that the compare value which we write is already behind the actual counter value at the point where the value hits the real compare register. This happens for two reasons: 1) We read out the counter, add the delta and write the result to the compare register. When a NMI hits between the read out and the write then the counter can be ahead of the event already. 2) The write to the compare register is delayed by up to two HPET cycles in AMD chipsets. We can work around this by reading back the compare register to make sure that the written value has hit the hardware. But that is bad performance wise for the normal case where the event is far enough in the future. As we already know that the write can be delayed by up to two cycles we can avoid the read back of the compare register completely if we make the decision whether the delta has elapsed already or not based on the following calculation: cmp = event - actual_count; If cmp is less than 64 HPET clock cycles, then we decide that the event has happened already and return -ETIME. That covers the above #1 and #2 problems which would cause a wait for HPET wraparound (~306 seconds). Signed-off-by: Huacai Chen <chenhc@lemote.com> Cc: Aurelien Jarno <aurelien@aurel32.net> Cc: Steven J. Hill <Steven.Hill@imgtec.com> Cc: Fuxin Zhang <zhangfx@lemote.com> Cc: Zhangjin Wu <wuzhangjin@gmail.com> Cc: Huacai Chen <chenhc@lemote.com> Cc: linux-mips@linux-mips.org Cc: stable@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/12162/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-01-21 06:09:50 -07:00
return res < HPET_MIN_CYCLES ? -ETIME : 0;
}
static irqreturn_t hpet_irq_handler(int irq, void *data)
{
int is_irq;
struct clock_event_device *cd;
unsigned int cpu = smp_processor_id();
is_irq = hpet_read(HPET_STATUS);
if (is_irq & HPET_T0_IRS) {
/* clear the TIMER0 irq status register */
hpet_write(HPET_STATUS, HPET_T0_IRS);
cd = &per_cpu(hpet_clockevent_device, cpu);
cd->event_handler(cd);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static struct irqaction hpet_irq = {
.handler = hpet_irq_handler,
genirq: Remove the deprecated 'IRQF_DISABLED' request_irq() flag entirely The IRQF_DISABLED flag is a NOOP and has been scheduled for removal since Linux v2.6.36 by commit 6932bf37bed4 ("genirq: Remove IRQF_DISABLED from core code"). According to commit e58aa3d2d0cc ("genirq: Run irq handlers with interrupts disabled"), running IRQ handlers with interrupts enabled can cause stack overflows when the interrupt line of the issuing device is still active. This patch ends the grace period for IRQF_DISABLED (i.e., SA_INTERRUPT in older versions of Linux) and removes the definition and all remaining usages of this flag. There's still a few non-functional references left in the kernel source: - The bigger hunk in Documentation/scsi/ncr53c8xx.txt is removed entirely as IRQF_DISABLED is gone now; the usage in older kernel versions (including the old SA_INTERRUPT flag) should be discouraged. The trouble of using IRQF_SHARED is a general problem and not specific to any driver. - I left the reference in Documentation/PCI/MSI-HOWTO.txt untouched since it has already been removed in linux-next. - All remaining references are changelogs that I suggest to keep. Signed-off-by: Valentin Rothberg <valentinrothberg@gmail.com> Cc: Afzal Mohammed <afzal@ti.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Brian Norris <computersforpeace@gmail.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Dan Carpenter <dan.carpenter@oracle.com> Cc: David Woodhouse <dwmw2@infradead.org> Cc: Ewan Milne <emilne@redhat.com> Cc: Eyal Perry <eyalpe@mellanox.com> Cc: Felipe Balbi <balbi@ti.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Hannes Reinecke <hare@suse.de> Cc: Hongliang Tao <taohl@lemote.com> Cc: Huacai Chen <chenhc@lemote.com> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Keerthy <j-keerthy@ti.com> Cc: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Nishanth Menon <nm@ti.com> Cc: Paul Bolle <pebolle@tiscali.nl> Cc: Peter Ujfalusi <peter.ujfalusi@ti.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Quentin Lambert <lambert.quentin@gmail.com> Cc: Rajendra Nayak <rnayak@ti.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Santosh Shilimkar <santosh.shilimkar@ti.com> Cc: Sricharan R <r.sricharan@ti.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Lindgren <tony@atomide.com> Cc: Zhou Wang <wangzhou1@hisilicon.com> Cc: iss_storagedev@hp.com Cc: linux-mips@linux-mips.org Cc: linux-mtd@lists.infradead.org Link: http://lkml.kernel.org/r/1425565425-12604-1-git-send-email-valentinrothberg@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-03-05 07:23:08 -07:00
.flags = IRQF_NOBALANCING | IRQF_TIMER,
.name = "hpet",
};
/*
* hpet address assignation and irq setting should be done in bios.
* but pmon don't do this, we just setup here directly.
* The operation under is normal. unfortunately, hpet_setup process
* is before pci initialize.
*
* {
* struct pci_dev *pdev;
*
* pdev = pci_get_device(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
* pci_write_config_word(pdev, SMBUS_PCI_REGB4, HPET_ADDR);
*
* ...
* }
*/
static void hpet_setup(void)
{
/* set hpet base address */
smbus_write(SMBUS_PCI_REGB4, HPET_ADDR);
/* enable decoding of access to HPET MMIO*/
smbus_enable(SMBUS_PCI_REG40, (1 << 28));
/* HPET irq enable */
smbus_enable(SMBUS_PCI_REG64, (1 << 10));
hpet_enable_legacy_int();
}
void __init setup_hpet_timer(void)
{
unsigned int cpu = smp_processor_id();
struct clock_event_device *cd;
hpet_setup();
cd = &per_cpu(hpet_clockevent_device, cpu);
cd->name = "hpet";
cd->rating = 100;
cd->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
cd->set_state_shutdown = hpet_set_state_shutdown;
cd->set_state_periodic = hpet_set_state_periodic;
cd->set_state_oneshot = hpet_set_state_oneshot;
cd->tick_resume = hpet_tick_resume;
cd->set_next_event = hpet_next_event;
cd->irq = HPET_T0_IRQ;
cd->cpumask = cpumask_of(cpu);
clockevent_set_clock(cd, HPET_FREQ);
cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
cd->max_delta_ticks = 0x7fffffff;
MIPS: hpet: Choose a safe value for the ETIME check This patch is borrowed from x86 hpet driver and explaind below: Due to the overly intelligent design of HPETs, we need to workaround the problem that the compare value which we write is already behind the actual counter value at the point where the value hits the real compare register. This happens for two reasons: 1) We read out the counter, add the delta and write the result to the compare register. When a NMI hits between the read out and the write then the counter can be ahead of the event already. 2) The write to the compare register is delayed by up to two HPET cycles in AMD chipsets. We can work around this by reading back the compare register to make sure that the written value has hit the hardware. But that is bad performance wise for the normal case where the event is far enough in the future. As we already know that the write can be delayed by up to two cycles we can avoid the read back of the compare register completely if we make the decision whether the delta has elapsed already or not based on the following calculation: cmp = event - actual_count; If cmp is less than 64 HPET clock cycles, then we decide that the event has happened already and return -ETIME. That covers the above #1 and #2 problems which would cause a wait for HPET wraparound (~306 seconds). Signed-off-by: Huacai Chen <chenhc@lemote.com> Cc: Aurelien Jarno <aurelien@aurel32.net> Cc: Steven J. Hill <Steven.Hill@imgtec.com> Cc: Fuxin Zhang <zhangfx@lemote.com> Cc: Zhangjin Wu <wuzhangjin@gmail.com> Cc: Huacai Chen <chenhc@lemote.com> Cc: linux-mips@linux-mips.org Cc: stable@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/12162/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
2016-01-21 06:09:50 -07:00
cd->min_delta_ns = clockevent_delta2ns(HPET_MIN_PROG_DELTA, cd);
cd->min_delta_ticks = HPET_MIN_PROG_DELTA;
clockevents_register_device(cd);
setup_irq(HPET_T0_IRQ, &hpet_irq);
pr_info("hpet clock event device register\n");
}
static u64 hpet_read_counter(struct clocksource *cs)
{
return (u64)hpet_read(HPET_COUNTER);
}
static void hpet_suspend(struct clocksource *cs)
{
}
static void hpet_resume(struct clocksource *cs)
{
hpet_setup();
hpet_restart_counter();
}
static struct clocksource csrc_hpet = {
.name = "hpet",
/* mips clocksource rating is less than 300, so hpet is better. */
.rating = 300,
.read = hpet_read_counter,
.mask = CLOCKSOURCE_MASK(32),
/* oneshot mode work normal with this flag */
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.suspend = hpet_suspend,
.resume = hpet_resume,
.mult = 0,
.shift = 10,
};
int __init init_hpet_clocksource(void)
{
csrc_hpet.mult = clocksource_hz2mult(HPET_FREQ, csrc_hpet.shift);
return clocksource_register_hz(&csrc_hpet, HPET_FREQ);
}
arch_initcall(init_hpet_clocksource);