2011-06-07 02:49:55 -07:00
|
|
|
/*
|
|
|
|
* User address space access functions.
|
|
|
|
*
|
|
|
|
* For licencing details see kernel-base/COPYING
|
|
|
|
*/
|
|
|
|
|
2017-03-25 16:33:21 -07:00
|
|
|
#include <linux/uaccess.h>
|
2016-07-13 17:18:57 -07:00
|
|
|
#include <linux/export.h>
|
2011-06-07 02:49:55 -07:00
|
|
|
|
|
|
|
/*
|
2013-10-24 03:52:06 -07:00
|
|
|
* We rely on the nested NMI work to allow atomic faults from the NMI path; the
|
|
|
|
* nested NMI paths are careful to preserve CR2.
|
2011-06-07 02:49:55 -07:00
|
|
|
*/
|
|
|
|
unsigned long
|
|
|
|
copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
|
|
|
|
{
|
2013-10-24 03:52:06 -07:00
|
|
|
unsigned long ret;
|
2011-06-07 02:49:55 -07:00
|
|
|
|
2012-06-11 06:44:26 -07:00
|
|
|
if (__range_not_ok(from, n, TASK_SIZE))
|
2015-06-22 12:38:43 -07:00
|
|
|
return n;
|
2013-10-24 03:52:06 -07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Even though this function is typically called from NMI/IRQ context
|
|
|
|
* disable pagefaults so that its behaviour is consistent even when
|
|
|
|
* called form other contexts.
|
|
|
|
*/
|
|
|
|
pagefault_disable();
|
|
|
|
ret = __copy_from_user_inatomic(to, from, n);
|
|
|
|
pagefault_enable();
|
|
|
|
|
2013-10-30 13:16:22 -07:00
|
|
|
return ret;
|
2011-06-07 02:49:55 -07:00
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(copy_from_user_nmi);
|