2009-02-28 02:44:28 -07:00
|
|
|
/*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
|
|
* for more details.
|
|
|
|
*
|
|
|
|
* Copyright (C) 1994 by Waldorf Electronics
|
|
|
|
* Copyright (C) 1995 - 2000, 01, 03 by Ralf Baechle
|
|
|
|
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
|
2014-03-31 16:57:28 -07:00
|
|
|
* Copyright (C) 2007, 2014 Maciej W. Rozycki
|
2009-02-28 02:44:28 -07:00
|
|
|
*/
|
2017-08-23 11:17:48 -07:00
|
|
|
#include <linux/delay.h>
|
2016-08-21 12:58:15 -07:00
|
|
|
#include <linux/export.h>
|
2009-02-28 02:44:28 -07:00
|
|
|
#include <linux/param.h>
|
|
|
|
#include <linux/smp.h>
|
2014-04-06 13:42:49 -07:00
|
|
|
#include <linux/stringify.h>
|
2009-02-28 02:44:28 -07:00
|
|
|
|
2014-04-06 13:42:49 -07:00
|
|
|
#include <asm/asm.h>
|
2009-02-28 02:44:28 -07:00
|
|
|
#include <asm/compiler.h>
|
|
|
|
|
2014-03-31 16:57:28 -07:00
|
|
|
#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
|
|
|
|
#define GCC_DADDI_IMM_ASM() "I"
|
|
|
|
#else
|
|
|
|
#define GCC_DADDI_IMM_ASM() "r"
|
|
|
|
#endif
|
|
|
|
|
2020-03-25 00:45:29 -07:00
|
|
|
#ifndef CONFIG_HAVE_PLAT_DELAY
|
|
|
|
|
2012-09-28 11:34:10 -07:00
|
|
|
void __delay(unsigned long loops)
|
2009-02-28 02:44:28 -07:00
|
|
|
{
|
|
|
|
__asm__ __volatile__ (
|
|
|
|
" .set noreorder \n"
|
|
|
|
" .align 3 \n"
|
|
|
|
"1: bnez %0, 1b \n"
|
2014-04-06 13:42:49 -07:00
|
|
|
" " __stringify(LONG_SUBU) " %0, %1 \n"
|
2009-02-28 02:44:28 -07:00
|
|
|
" .set reorder \n"
|
|
|
|
: "=r" (loops)
|
2014-03-31 16:57:28 -07:00
|
|
|
: GCC_DADDI_IMM_ASM() (1), "0" (loops));
|
2009-02-28 02:44:28 -07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__delay);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Division by multiplication: you don't have to worry about
|
|
|
|
* loss of precision.
|
|
|
|
*
|
2013-01-22 04:59:30 -07:00
|
|
|
* Use only for very small delays ( < 1 msec). Should probably use a
|
2009-02-28 02:44:28 -07:00
|
|
|
* lookup table, really, as the multiplications take much too long with
|
|
|
|
* short delays. This is a "reasonable" implementation, though (and the
|
|
|
|
* first constant multiplications gets optimized away if the delay is
|
|
|
|
* a constant)
|
|
|
|
*/
|
|
|
|
|
|
|
|
void __udelay(unsigned long us)
|
|
|
|
{
|
2010-03-10 08:16:04 -07:00
|
|
|
unsigned int lpj = raw_current_cpu_data.udelay_val;
|
2009-02-28 02:44:28 -07:00
|
|
|
|
2009-06-08 19:12:48 -07:00
|
|
|
__delay((us * 0x000010c7ull * HZ * lpj) >> 32);
|
2009-02-28 02:44:28 -07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__udelay);
|
|
|
|
|
|
|
|
void __ndelay(unsigned long ns)
|
|
|
|
{
|
2010-03-10 08:16:04 -07:00
|
|
|
unsigned int lpj = raw_current_cpu_data.udelay_val;
|
2009-02-28 02:44:28 -07:00
|
|
|
|
2009-06-08 19:12:48 -07:00
|
|
|
__delay((ns * 0x00000005ull * HZ * lpj) >> 32);
|
2009-02-28 02:44:28 -07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL(__ndelay);
|
2020-03-25 00:45:29 -07:00
|
|
|
|
|
|
|
#endif
|