1
linux/arch/arm64/lib/copy_in_user.S
Catalin Marinas 0aea86a217 arm64: User access library functions
This patch add support for various user access functions. These
functions use the standard LDR/STR instructions and not the LDRT/STRT
variants in order to allow kernel addresses (after set_fs(KERNEL_DS)).

Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Tony Lindgren <tony@atomide.com>
Acked-by: Nicolas Pitre <nico@linaro.org>
Acked-by: Olof Johansson <olof@lixom.net>
Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
2012-09-17 13:42:11 +01:00

64 lines
1.4 KiB
ArmAsm

/*
* Copy from user space to user space
*
* Copyright (C) 2012 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
/*
* Copy from user space to user space (alignment handled by the hardware)
*
* Parameters:
* x0 - to
* x1 - from
* x2 - n
* Returns:
* x0 - bytes not copied
*/
ENTRY(__copy_in_user)
add x4, x0, x2 // upper user buffer boundary
subs x2, x2, #8
b.mi 2f
1:
USER(9f, ldr x3, [x1], #8 )
subs x2, x2, #8
USER(9f, str x3, [x0], #8 )
b.pl 1b
2: adds x2, x2, #4
b.mi 3f
USER(9f, ldr w3, [x1], #4 )
sub x2, x2, #4
USER(9f, str w3, [x0], #4 )
3: adds x2, x2, #2
b.mi 4f
USER(9f, ldrh w3, [x1], #2 )
sub x2, x2, #2
USER(9f, strh w3, [x0], #2 )
4: adds x2, x2, #1
b.mi 5f
USER(9f, ldrb w3, [x1] )
USER(9f, strb w3, [x0] )
5: mov x0, #0
ret
ENDPROC(__copy_in_user)
.section .fixup,"ax"
.align 2
9: sub x0, x4, x0 // bytes not copied
ret
.previous