2007-10-15 16:41:44 -07:00
|
|
|
/* atomic.S: These things are too big to do inline.
|
2005-04-16 15:20:36 -07:00
|
|
|
*
|
2007-10-15 16:41:44 -07:00
|
|
|
* Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
|
2005-04-16 15:20:36 -07:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <asm/asi.h>
|
2007-10-15 16:41:44 -07:00
|
|
|
#include <asm/backoff.h>
|
2005-04-16 15:20:36 -07:00
|
|
|
|
|
|
|
.text
|
|
|
|
|
|
|
|
/* Two versions of the atomic routines, one that
|
|
|
|
* does not return a value and does not perform
|
|
|
|
* memory barriers, and a second which returns
|
|
|
|
* a value and does the barriers.
|
|
|
|
*/
|
|
|
|
.globl atomic_add
|
|
|
|
.type atomic_add,#function
|
|
|
|
atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
|
2007-10-15 16:41:44 -07:00
|
|
|
BACKOFF_SETUP(%o2)
|
2005-04-16 15:20:36 -07:00
|
|
|
1: lduw [%o1], %g1
|
|
|
|
add %g1, %o0, %g7
|
|
|
|
cas [%o1], %g1, %g7
|
|
|
|
cmp %g1, %g7
|
2010-08-18 22:53:26 -07:00
|
|
|
bne,pn %icc, BACKOFF_LABEL(2f, 1b)
|
2005-04-16 15:20:36 -07:00
|
|
|
nop
|
|
|
|
retl
|
|
|
|
nop
|
2007-10-15 16:41:44 -07:00
|
|
|
2: BACKOFF_SPIN(%o2, %o3, 1b)
|
2005-04-16 15:20:36 -07:00
|
|
|
.size atomic_add, .-atomic_add
|
|
|
|
|
|
|
|
.globl atomic_sub
|
|
|
|
.type atomic_sub,#function
|
|
|
|
atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
|
2007-10-15 16:41:44 -07:00
|
|
|
BACKOFF_SETUP(%o2)
|
2005-04-16 15:20:36 -07:00
|
|
|
1: lduw [%o1], %g1
|
|
|
|
sub %g1, %o0, %g7
|
|
|
|
cas [%o1], %g1, %g7
|
|
|
|
cmp %g1, %g7
|
2010-08-18 22:53:26 -07:00
|
|
|
bne,pn %icc, BACKOFF_LABEL(2f, 1b)
|
2005-04-16 15:20:36 -07:00
|
|
|
nop
|
|
|
|
retl
|
|
|
|
nop
|
2007-10-15 16:41:44 -07:00
|
|
|
2: BACKOFF_SPIN(%o2, %o3, 1b)
|
2005-04-16 15:20:36 -07:00
|
|
|
.size atomic_sub, .-atomic_sub
|
|
|
|
|
|
|
|
.globl atomic_add_ret
|
|
|
|
.type atomic_add_ret,#function
|
|
|
|
atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
|
2007-10-15 16:41:44 -07:00
|
|
|
BACKOFF_SETUP(%o2)
|
2005-04-16 15:20:36 -07:00
|
|
|
1: lduw [%o1], %g1
|
|
|
|
add %g1, %o0, %g7
|
|
|
|
cas [%o1], %g1, %g7
|
|
|
|
cmp %g1, %g7
|
2010-08-18 22:53:26 -07:00
|
|
|
bne,pn %icc, BACKOFF_LABEL(2f, 1b)
|
2010-08-18 01:03:37 -07:00
|
|
|
add %g1, %o0, %g1
|
2005-04-16 15:20:36 -07:00
|
|
|
retl
|
2010-08-18 01:03:37 -07:00
|
|
|
sra %g1, 0, %o0
|
2007-10-15 16:41:44 -07:00
|
|
|
2: BACKOFF_SPIN(%o2, %o3, 1b)
|
2005-04-16 15:20:36 -07:00
|
|
|
.size atomic_add_ret, .-atomic_add_ret
|
|
|
|
|
|
|
|
.globl atomic_sub_ret
|
|
|
|
.type atomic_sub_ret,#function
|
|
|
|
atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
|
2007-10-15 16:41:44 -07:00
|
|
|
BACKOFF_SETUP(%o2)
|
2005-04-16 15:20:36 -07:00
|
|
|
1: lduw [%o1], %g1
|
|
|
|
sub %g1, %o0, %g7
|
|
|
|
cas [%o1], %g1, %g7
|
|
|
|
cmp %g1, %g7
|
2010-08-18 22:53:26 -07:00
|
|
|
bne,pn %icc, BACKOFF_LABEL(2f, 1b)
|
2010-08-18 01:03:37 -07:00
|
|
|
sub %g1, %o0, %g1
|
2005-04-16 15:20:36 -07:00
|
|
|
retl
|
2010-08-18 01:03:37 -07:00
|
|
|
sra %g1, 0, %o0
|
2007-10-15 16:41:44 -07:00
|
|
|
2: BACKOFF_SPIN(%o2, %o3, 1b)
|
2005-04-16 15:20:36 -07:00
|
|
|
.size atomic_sub_ret, .-atomic_sub_ret
|
|
|
|
|
|
|
|
.globl atomic64_add
|
|
|
|
.type atomic64_add,#function
|
|
|
|
atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
|
2007-10-15 16:41:44 -07:00
|
|
|
BACKOFF_SETUP(%o2)
|
2005-04-16 15:20:36 -07:00
|
|
|
1: ldx [%o1], %g1
|
|
|
|
add %g1, %o0, %g7
|
|
|
|
casx [%o1], %g1, %g7
|
|
|
|
cmp %g1, %g7
|
2010-08-18 22:53:26 -07:00
|
|
|
bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
|
2005-04-16 15:20:36 -07:00
|
|
|
nop
|
|
|
|
retl
|
|
|
|
nop
|
2007-10-15 16:41:44 -07:00
|
|
|
2: BACKOFF_SPIN(%o2, %o3, 1b)
|
2005-04-16 15:20:36 -07:00
|
|
|
.size atomic64_add, .-atomic64_add
|
|
|
|
|
|
|
|
.globl atomic64_sub
|
|
|
|
.type atomic64_sub,#function
|
|
|
|
atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
|
2007-10-15 16:41:44 -07:00
|
|
|
BACKOFF_SETUP(%o2)
|
2005-04-16 15:20:36 -07:00
|
|
|
1: ldx [%o1], %g1
|
|
|
|
sub %g1, %o0, %g7
|
|
|
|
casx [%o1], %g1, %g7
|
|
|
|
cmp %g1, %g7
|
2010-08-18 22:53:26 -07:00
|
|
|
bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
|
2005-04-16 15:20:36 -07:00
|
|
|
nop
|
|
|
|
retl
|
|
|
|
nop
|
2007-10-15 16:41:44 -07:00
|
|
|
2: BACKOFF_SPIN(%o2, %o3, 1b)
|
2005-04-16 15:20:36 -07:00
|
|
|
.size atomic64_sub, .-atomic64_sub
|
|
|
|
|
|
|
|
.globl atomic64_add_ret
|
|
|
|
.type atomic64_add_ret,#function
|
|
|
|
atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
|
2007-10-15 16:41:44 -07:00
|
|
|
BACKOFF_SETUP(%o2)
|
2005-04-16 15:20:36 -07:00
|
|
|
1: ldx [%o1], %g1
|
|
|
|
add %g1, %o0, %g7
|
|
|
|
casx [%o1], %g1, %g7
|
|
|
|
cmp %g1, %g7
|
2010-08-18 22:53:26 -07:00
|
|
|
bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
|
[SPARC64]: Avoid membar instructions in delay slots.
In particular, avoid membar instructions in the delay
slot of a jmpl instruction.
UltraSPARC-I, II, IIi, and IIe have a bug, documented in
the UltraSPARC-IIi User's Manual, Appendix K, Erratum 51
The long and short of it is that if the IMU unit misses
on a branch or jmpl, and there is a store buffer synchronizing
membar in the delay slot, the chip can stop fetching instructions.
If interrupts are enabled or some other trap is enabled, the
chip will unwedge itself, but performance will suffer.
We already had a workaround for this bug in a few spots, but
it's better to have the entire tree sanitized for this rule.
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-27 15:42:04 -07:00
|
|
|
nop
|
2010-08-18 01:03:37 -07:00
|
|
|
retl
|
|
|
|
add %g1, %o0, %o0
|
2007-10-15 16:41:44 -07:00
|
|
|
2: BACKOFF_SPIN(%o2, %o3, 1b)
|
2005-04-16 15:20:36 -07:00
|
|
|
.size atomic64_add_ret, .-atomic64_add_ret
|
|
|
|
|
|
|
|
.globl atomic64_sub_ret
|
|
|
|
.type atomic64_sub_ret,#function
|
|
|
|
atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
|
2007-10-15 16:41:44 -07:00
|
|
|
BACKOFF_SETUP(%o2)
|
2005-04-16 15:20:36 -07:00
|
|
|
1: ldx [%o1], %g1
|
|
|
|
sub %g1, %o0, %g7
|
|
|
|
casx [%o1], %g1, %g7
|
|
|
|
cmp %g1, %g7
|
2010-08-18 22:53:26 -07:00
|
|
|
bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
|
[SPARC64]: Avoid membar instructions in delay slots.
In particular, avoid membar instructions in the delay
slot of a jmpl instruction.
UltraSPARC-I, II, IIi, and IIe have a bug, documented in
the UltraSPARC-IIi User's Manual, Appendix K, Erratum 51
The long and short of it is that if the IMU unit misses
on a branch or jmpl, and there is a store buffer synchronizing
membar in the delay slot, the chip can stop fetching instructions.
If interrupts are enabled or some other trap is enabled, the
chip will unwedge itself, but performance will suffer.
We already had a workaround for this bug in a few spots, but
it's better to have the entire tree sanitized for this rule.
Signed-off-by: David S. Miller <davem@davemloft.net>
2005-06-27 15:42:04 -07:00
|
|
|
nop
|
2010-08-18 01:03:37 -07:00
|
|
|
retl
|
|
|
|
sub %g1, %o0, %o0
|
2007-10-15 16:41:44 -07:00
|
|
|
2: BACKOFF_SPIN(%o2, %o3, 1b)
|
2005-04-16 15:20:36 -07:00
|
|
|
.size atomic64_sub_ret, .-atomic64_sub_ret
|