Merge mm-hotfixes-stable into mm-stable to pick up depended-upon changes.
This commit is contained in:
commit
63773d2b59
3
.mailmap
3
.mailmap
@ -70,6 +70,8 @@ Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang@unisoc.com>
|
||||
Baolin Wang <baolin.wang@linux.alibaba.com> <baolin.wang7@gmail.com>
|
||||
Bart Van Assche <bvanassche@acm.org> <bart.vanassche@sandisk.com>
|
||||
Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com>
|
||||
Ben Dooks <ben-linux@fluff.org> <ben.dooks@simtec.co.uk>
|
||||
Ben Dooks <ben-linux@fluff.org> <ben.dooks@sifive.com>
|
||||
Ben Gardner <bgardner@wabtec.com>
|
||||
Ben M Cahill <ben.m.cahill@intel.com>
|
||||
Ben Widawsky <bwidawsk@kernel.org> <ben@bwidawsk.net>
|
||||
@ -233,6 +235,7 @@ Jisheng Zhang <jszhang@kernel.org> <Jisheng.Zhang@synaptics.com>
|
||||
Johan Hovold <johan@kernel.org> <jhovold@gmail.com>
|
||||
Johan Hovold <johan@kernel.org> <johan@hovoldconsulting.com>
|
||||
John Crispin <john@phrozen.org> <blogic@openwrt.org>
|
||||
John Keeping <john@keeping.me.uk> <john@metanate.com>
|
||||
John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
|
||||
John Stultz <johnstul@us.ibm.com>
|
||||
<jon.toppins+linux@gmail.com> <jtoppins@cumulusnetworks.com>
|
||||
|
@ -35,7 +35,7 @@ Documentation written by Tom Zanussi
|
||||
in place of an explicit value field - this is simply a count of
|
||||
event hits. If 'values' isn't specified, an implicit 'hitcount'
|
||||
value will be automatically created and used as the only value.
|
||||
Keys can be any field, or the special string 'stacktrace', which
|
||||
Keys can be any field, or the special string 'common_stacktrace', which
|
||||
will use the event's kernel stacktrace as the key. The keywords
|
||||
'keys' or 'key' can be used to specify keys, and the keywords
|
||||
'values', 'vals', or 'val' can be used to specify values. Compound
|
||||
@ -54,7 +54,7 @@ Documentation written by Tom Zanussi
|
||||
'compatible' if the fields named in the trigger share the same
|
||||
number and type of fields and those fields also have the same names.
|
||||
Note that any two events always share the compatible 'hitcount' and
|
||||
'stacktrace' fields and can therefore be combined using those
|
||||
'common_stacktrace' fields and can therefore be combined using those
|
||||
fields, however pointless that may be.
|
||||
|
||||
'hist' triggers add a 'hist' file to each event's subdirectory.
|
||||
@ -547,9 +547,9 @@ Extended error information
|
||||
the hist trigger display symbolic call_sites, we can have the hist
|
||||
trigger additionally display the complete set of kernel stack traces
|
||||
that led to each call_site. To do that, we simply use the special
|
||||
value 'stacktrace' for the key parameter::
|
||||
value 'common_stacktrace' for the key parameter::
|
||||
|
||||
# echo 'hist:keys=stacktrace:values=bytes_req,bytes_alloc:sort=bytes_alloc' > \
|
||||
# echo 'hist:keys=common_stacktrace:values=bytes_req,bytes_alloc:sort=bytes_alloc' > \
|
||||
/sys/kernel/tracing/events/kmem/kmalloc/trigger
|
||||
|
||||
The above trigger will use the kernel stack trace in effect when an
|
||||
@ -561,9 +561,9 @@ Extended error information
|
||||
every callpath to a kmalloc for a kernel compile)::
|
||||
|
||||
# cat /sys/kernel/tracing/events/kmem/kmalloc/hist
|
||||
# trigger info: hist:keys=stacktrace:vals=bytes_req,bytes_alloc:sort=bytes_alloc:size=2048 [active]
|
||||
# trigger info: hist:keys=common_stacktrace:vals=bytes_req,bytes_alloc:sort=bytes_alloc:size=2048 [active]
|
||||
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
__kmalloc_track_caller+0x10b/0x1a0
|
||||
kmemdup+0x20/0x50
|
||||
hidraw_report_event+0x8a/0x120 [hid]
|
||||
@ -581,7 +581,7 @@ Extended error information
|
||||
cpu_startup_entry+0x315/0x3e0
|
||||
rest_init+0x7c/0x80
|
||||
} hitcount: 3 bytes_req: 21 bytes_alloc: 24
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
__kmalloc_track_caller+0x10b/0x1a0
|
||||
kmemdup+0x20/0x50
|
||||
hidraw_report_event+0x8a/0x120 [hid]
|
||||
@ -596,7 +596,7 @@ Extended error information
|
||||
do_IRQ+0x5a/0xf0
|
||||
ret_from_intr+0x0/0x30
|
||||
} hitcount: 3 bytes_req: 21 bytes_alloc: 24
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
kmem_cache_alloc_trace+0xeb/0x150
|
||||
aa_alloc_task_context+0x27/0x40
|
||||
apparmor_cred_prepare+0x1f/0x50
|
||||
@ -608,7 +608,7 @@ Extended error information
|
||||
.
|
||||
.
|
||||
.
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
__kmalloc+0x11b/0x1b0
|
||||
i915_gem_execbuffer2+0x6c/0x2c0 [i915]
|
||||
drm_ioctl+0x349/0x670 [drm]
|
||||
@ -616,7 +616,7 @@ Extended error information
|
||||
SyS_ioctl+0x81/0xa0
|
||||
system_call_fastpath+0x12/0x6a
|
||||
} hitcount: 17726 bytes_req: 13944120 bytes_alloc: 19593808
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
__kmalloc+0x11b/0x1b0
|
||||
load_elf_phdrs+0x76/0xa0
|
||||
load_elf_binary+0x102/0x1650
|
||||
@ -625,7 +625,7 @@ Extended error information
|
||||
SyS_execve+0x3a/0x50
|
||||
return_from_execve+0x0/0x23
|
||||
} hitcount: 33348 bytes_req: 17152128 bytes_alloc: 20226048
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
kmem_cache_alloc_trace+0xeb/0x150
|
||||
apparmor_file_alloc_security+0x27/0x40
|
||||
security_file_alloc+0x16/0x20
|
||||
@ -636,7 +636,7 @@ Extended error information
|
||||
SyS_open+0x1e/0x20
|
||||
system_call_fastpath+0x12/0x6a
|
||||
} hitcount: 4766422 bytes_req: 9532844 bytes_alloc: 38131376
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
__kmalloc+0x11b/0x1b0
|
||||
seq_buf_alloc+0x1b/0x50
|
||||
seq_read+0x2cc/0x370
|
||||
@ -1026,7 +1026,7 @@ Extended error information
|
||||
First we set up an initially paused stacktrace trigger on the
|
||||
netif_receive_skb event::
|
||||
|
||||
# echo 'hist:key=stacktrace:vals=len:pause' > \
|
||||
# echo 'hist:key=common_stacktrace:vals=len:pause' > \
|
||||
/sys/kernel/tracing/events/net/netif_receive_skb/trigger
|
||||
|
||||
Next, we set up an 'enable_hist' trigger on the sched_process_exec
|
||||
@ -1060,9 +1060,9 @@ Extended error information
|
||||
$ wget https://www.kernel.org/pub/linux/kernel/v3.x/patch-3.19.xz
|
||||
|
||||
# cat /sys/kernel/tracing/events/net/netif_receive_skb/hist
|
||||
# trigger info: hist:keys=stacktrace:vals=len:sort=hitcount:size=2048 [paused]
|
||||
# trigger info: hist:keys=common_stacktrace:vals=len:sort=hitcount:size=2048 [paused]
|
||||
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
__netif_receive_skb_core+0x46d/0x990
|
||||
__netif_receive_skb+0x18/0x60
|
||||
netif_receive_skb_internal+0x23/0x90
|
||||
@ -1079,7 +1079,7 @@ Extended error information
|
||||
kthread+0xd2/0xf0
|
||||
ret_from_fork+0x42/0x70
|
||||
} hitcount: 85 len: 28884
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
__netif_receive_skb_core+0x46d/0x990
|
||||
__netif_receive_skb+0x18/0x60
|
||||
netif_receive_skb_internal+0x23/0x90
|
||||
@ -1097,7 +1097,7 @@ Extended error information
|
||||
irq_thread+0x11f/0x150
|
||||
kthread+0xd2/0xf0
|
||||
} hitcount: 98 len: 664329
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
__netif_receive_skb_core+0x46d/0x990
|
||||
__netif_receive_skb+0x18/0x60
|
||||
process_backlog+0xa8/0x150
|
||||
@ -1115,7 +1115,7 @@ Extended error information
|
||||
inet_sendmsg+0x64/0xa0
|
||||
sock_sendmsg+0x3d/0x50
|
||||
} hitcount: 115 len: 13030
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
__netif_receive_skb_core+0x46d/0x990
|
||||
__netif_receive_skb+0x18/0x60
|
||||
netif_receive_skb_internal+0x23/0x90
|
||||
@ -1142,14 +1142,14 @@ Extended error information
|
||||
into the histogram. In order to avoid having to set everything up
|
||||
again, we can just clear the histogram first::
|
||||
|
||||
# echo 'hist:key=stacktrace:vals=len:clear' >> \
|
||||
# echo 'hist:key=common_stacktrace:vals=len:clear' >> \
|
||||
/sys/kernel/tracing/events/net/netif_receive_skb/trigger
|
||||
|
||||
Just to verify that it is in fact cleared, here's what we now see in
|
||||
the hist file::
|
||||
|
||||
# cat /sys/kernel/tracing/events/net/netif_receive_skb/hist
|
||||
# trigger info: hist:keys=stacktrace:vals=len:sort=hitcount:size=2048 [paused]
|
||||
# trigger info: hist:keys=common_stacktrace:vals=len:sort=hitcount:size=2048 [paused]
|
||||
|
||||
Totals:
|
||||
Hits: 0
|
||||
@ -1485,12 +1485,12 @@ Extended error information
|
||||
|
||||
And here's an example that shows how to combine histogram data from
|
||||
any two events even if they don't share any 'compatible' fields
|
||||
other than 'hitcount' and 'stacktrace'. These commands create a
|
||||
other than 'hitcount' and 'common_stacktrace'. These commands create a
|
||||
couple of triggers named 'bar' using those fields::
|
||||
|
||||
# echo 'hist:name=bar:key=stacktrace:val=hitcount' > \
|
||||
# echo 'hist:name=bar:key=common_stacktrace:val=hitcount' > \
|
||||
/sys/kernel/tracing/events/sched/sched_process_fork/trigger
|
||||
# echo 'hist:name=bar:key=stacktrace:val=hitcount' > \
|
||||
# echo 'hist:name=bar:key=common_stacktrace:val=hitcount' > \
|
||||
/sys/kernel/tracing/events/net/netif_rx/trigger
|
||||
|
||||
And displaying the output of either shows some interesting if
|
||||
@ -1501,16 +1501,16 @@ Extended error information
|
||||
|
||||
# event histogram
|
||||
#
|
||||
# trigger info: hist:name=bar:keys=stacktrace:vals=hitcount:sort=hitcount:size=2048 [active]
|
||||
# trigger info: hist:name=bar:keys=common_stacktrace:vals=hitcount:sort=hitcount:size=2048 [active]
|
||||
#
|
||||
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
kernel_clone+0x18e/0x330
|
||||
kernel_thread+0x29/0x30
|
||||
kthreadd+0x154/0x1b0
|
||||
ret_from_fork+0x3f/0x70
|
||||
} hitcount: 1
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
netif_rx_internal+0xb2/0xd0
|
||||
netif_rx_ni+0x20/0x70
|
||||
dev_loopback_xmit+0xaa/0xd0
|
||||
@ -1528,7 +1528,7 @@ Extended error information
|
||||
call_cpuidle+0x3b/0x60
|
||||
cpu_startup_entry+0x22d/0x310
|
||||
} hitcount: 1
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
netif_rx_internal+0xb2/0xd0
|
||||
netif_rx_ni+0x20/0x70
|
||||
dev_loopback_xmit+0xaa/0xd0
|
||||
@ -1543,7 +1543,7 @@ Extended error information
|
||||
SyS_sendto+0xe/0x10
|
||||
entry_SYSCALL_64_fastpath+0x12/0x6a
|
||||
} hitcount: 2
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
netif_rx_internal+0xb2/0xd0
|
||||
netif_rx+0x1c/0x60
|
||||
loopback_xmit+0x6c/0xb0
|
||||
@ -1561,7 +1561,7 @@ Extended error information
|
||||
sock_sendmsg+0x38/0x50
|
||||
___sys_sendmsg+0x14e/0x270
|
||||
} hitcount: 76
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
netif_rx_internal+0xb2/0xd0
|
||||
netif_rx+0x1c/0x60
|
||||
loopback_xmit+0x6c/0xb0
|
||||
@ -1579,7 +1579,7 @@ Extended error information
|
||||
sock_sendmsg+0x38/0x50
|
||||
___sys_sendmsg+0x269/0x270
|
||||
} hitcount: 77
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
netif_rx_internal+0xb2/0xd0
|
||||
netif_rx+0x1c/0x60
|
||||
loopback_xmit+0x6c/0xb0
|
||||
@ -1597,7 +1597,7 @@ Extended error information
|
||||
sock_sendmsg+0x38/0x50
|
||||
SYSC_sendto+0xef/0x170
|
||||
} hitcount: 88
|
||||
{ stacktrace:
|
||||
{ common_stacktrace:
|
||||
kernel_clone+0x18e/0x330
|
||||
SyS_clone+0x19/0x20
|
||||
entry_SYSCALL_64_fastpath+0x12/0x6a
|
||||
@ -1949,7 +1949,7 @@ uninterruptible state::
|
||||
|
||||
# cd /sys/kernel/tracing
|
||||
# echo 's:block_lat pid_t pid; u64 delta; unsigned long[] stack;' > dynamic_events
|
||||
# echo 'hist:keys=next_pid:ts=common_timestamp.usecs,st=stacktrace if prev_state == 2' >> events/sched/sched_switch/trigger
|
||||
# echo 'hist:keys=next_pid:ts=common_timestamp.usecs,st=common_stacktrace if prev_state == 2' >> events/sched/sched_switch/trigger
|
||||
# echo 'hist:keys=prev_pid:delta=common_timestamp.usecs-$ts,s=$st:onmax($delta).trace(block_lat,prev_pid,$delta,$s)' >> events/sched/sched_switch/trigger
|
||||
# echo 1 > events/synthetic/block_lat/enable
|
||||
# cat trace
|
||||
|
@ -5,6 +5,11 @@ KCSAN_SANITIZE := n
|
||||
|
||||
targets += trampoline_$(BITS).o purgatory.ro
|
||||
|
||||
# When profile-guided optimization is enabled, llvm emits two different
|
||||
# overlapping text sections, which is not supported by kexec. Remove profile
|
||||
# optimization flags.
|
||||
KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%,$(KBUILD_CFLAGS))
|
||||
|
||||
LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined
|
||||
|
||||
$(obj)/purgatory.ro: $(obj)/trampoline_$(BITS).o FORCE
|
||||
|
@ -35,6 +35,11 @@ CFLAGS_sha256.o := -D__DISABLE_EXPORTS
|
||||
CFLAGS_string.o := -D__DISABLE_EXPORTS
|
||||
CFLAGS_ctype.o := -D__DISABLE_EXPORTS
|
||||
|
||||
# When profile-guided optimization is enabled, llvm emits two different
|
||||
# overlapping text sections, which is not supported by kexec. Remove profile
|
||||
# optimization flags.
|
||||
KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%,$(KBUILD_CFLAGS))
|
||||
|
||||
# When linking purgatory.ro with -r unresolved symbols are not checked,
|
||||
# also link a purgatory.chk binary without -r to check for unresolved symbols.
|
||||
PURGATORY_LDFLAGS := -e purgatory_start -z nodefaultlib
|
||||
|
@ -773,8 +773,6 @@
|
||||
.octa 0x3F893781E95FE1576CDA64D2BA0CB204
|
||||
|
||||
#ifdef CONFIG_AS_GFNI
|
||||
.section .rodata.cst8, "aM", @progbits, 8
|
||||
.align 8
|
||||
/* AES affine: */
|
||||
#define tf_aff_const BV8(1, 1, 0, 0, 0, 1, 1, 0)
|
||||
.Ltf_aff_bitmatrix:
|
||||
|
@ -14,6 +14,11 @@ $(obj)/sha256.o: $(srctree)/lib/crypto/sha256.c FORCE
|
||||
|
||||
CFLAGS_sha256.o := -D__DISABLE_EXPORTS
|
||||
|
||||
# When profile-guided optimization is enabled, llvm emits two different
|
||||
# overlapping text sections, which is not supported by kexec. Remove profile
|
||||
# optimization flags.
|
||||
KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%,$(KBUILD_CFLAGS))
|
||||
|
||||
# When linking purgatory.ro with -r unresolved symbols are not checked,
|
||||
# also link a purgatory.chk binary without -r to check for unresolved symbols.
|
||||
PURGATORY_LDFLAGS := -e purgatory_start -z nodefaultlib
|
||||
|
@ -12,7 +12,6 @@
|
||||
#include <linux/shmem_fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/udmabuf.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/iosys-map.h>
|
||||
|
||||
@ -207,9 +206,7 @@ static long udmabuf_create(struct miscdevice *device,
|
||||
struct udmabuf *ubuf;
|
||||
struct dma_buf *buf;
|
||||
pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
|
||||
struct page *page, *hpage = NULL;
|
||||
pgoff_t subpgoff, maxsubpgs;
|
||||
struct hstate *hpstate;
|
||||
struct page *page;
|
||||
int seals, ret = -EINVAL;
|
||||
u32 i, flags;
|
||||
|
||||
@ -245,7 +242,7 @@ static long udmabuf_create(struct miscdevice *device,
|
||||
if (!memfd)
|
||||
goto err;
|
||||
mapping = memfd->f_mapping;
|
||||
if (!shmem_mapping(mapping) && !is_file_hugepages(memfd))
|
||||
if (!shmem_mapping(mapping))
|
||||
goto err;
|
||||
seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
|
||||
if (seals == -EINVAL)
|
||||
@ -256,48 +253,16 @@ static long udmabuf_create(struct miscdevice *device,
|
||||
goto err;
|
||||
pgoff = list[i].offset >> PAGE_SHIFT;
|
||||
pgcnt = list[i].size >> PAGE_SHIFT;
|
||||
if (is_file_hugepages(memfd)) {
|
||||
hpstate = hstate_file(memfd);
|
||||
pgoff = list[i].offset >> huge_page_shift(hpstate);
|
||||
subpgoff = (list[i].offset &
|
||||
~huge_page_mask(hpstate)) >> PAGE_SHIFT;
|
||||
maxsubpgs = huge_page_size(hpstate) >> PAGE_SHIFT;
|
||||
}
|
||||
for (pgidx = 0; pgidx < pgcnt; pgidx++) {
|
||||
if (is_file_hugepages(memfd)) {
|
||||
if (!hpage) {
|
||||
hpage = find_get_page_flags(mapping, pgoff,
|
||||
FGP_ACCESSED);
|
||||
if (!hpage) {
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
page = hpage + subpgoff;
|
||||
get_page(page);
|
||||
subpgoff++;
|
||||
if (subpgoff == maxsubpgs) {
|
||||
put_page(hpage);
|
||||
hpage = NULL;
|
||||
subpgoff = 0;
|
||||
pgoff++;
|
||||
}
|
||||
} else {
|
||||
page = shmem_read_mapping_page(mapping,
|
||||
pgoff + pgidx);
|
||||
if (IS_ERR(page)) {
|
||||
ret = PTR_ERR(page);
|
||||
goto err;
|
||||
}
|
||||
page = shmem_read_mapping_page(mapping, pgoff + pgidx);
|
||||
if (IS_ERR(page)) {
|
||||
ret = PTR_ERR(page);
|
||||
goto err;
|
||||
}
|
||||
ubuf->pages[pgbuf++] = page;
|
||||
}
|
||||
fput(memfd);
|
||||
memfd = NULL;
|
||||
if (hpage) {
|
||||
put_page(hpage);
|
||||
hpage = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
exp_info.ops = &udmabuf_ops;
|
||||
|
@ -132,7 +132,7 @@
|
||||
#define ATC_DST_PIP BIT(12) /* Destination Picture-in-Picture enabled */
|
||||
#define ATC_SRC_DSCR_DIS BIT(16) /* Src Descriptor fetch disable */
|
||||
#define ATC_DST_DSCR_DIS BIT(20) /* Dst Descriptor fetch disable */
|
||||
#define ATC_FC GENMASK(22, 21) /* Choose Flow Controller */
|
||||
#define ATC_FC GENMASK(23, 21) /* Choose Flow Controller */
|
||||
#define ATC_FC_MEM2MEM 0x0 /* Mem-to-Mem (DMA) */
|
||||
#define ATC_FC_MEM2PER 0x1 /* Mem-to-Periph (DMA) */
|
||||
#define ATC_FC_PER2MEM 0x2 /* Periph-to-Mem (DMA) */
|
||||
@ -153,8 +153,6 @@
|
||||
#define ATC_AUTO BIT(31) /* Auto multiple buffer tx enable */
|
||||
|
||||
/* Bitfields in CFG */
|
||||
#define ATC_PER_MSB(h) ((0x30U & (h)) >> 4) /* Extract most significant bits of a handshaking identifier */
|
||||
|
||||
#define ATC_SRC_PER GENMASK(3, 0) /* Channel src rq associated with periph handshaking ifc h */
|
||||
#define ATC_DST_PER GENMASK(7, 4) /* Channel dst rq associated with periph handshaking ifc h */
|
||||
#define ATC_SRC_REP BIT(8) /* Source Replay Mod */
|
||||
@ -181,10 +179,15 @@
|
||||
#define ATC_DPIP_HOLE GENMASK(15, 0)
|
||||
#define ATC_DPIP_BOUNDARY GENMASK(25, 16)
|
||||
|
||||
#define ATC_SRC_PER_ID(id) (FIELD_PREP(ATC_SRC_PER_MSB, (id)) | \
|
||||
FIELD_PREP(ATC_SRC_PER, (id)))
|
||||
#define ATC_DST_PER_ID(id) (FIELD_PREP(ATC_DST_PER_MSB, (id)) | \
|
||||
FIELD_PREP(ATC_DST_PER, (id)))
|
||||
#define ATC_PER_MSB GENMASK(5, 4) /* Extract MSBs of a handshaking identifier */
|
||||
#define ATC_SRC_PER_ID(id) \
|
||||
({ typeof(id) _id = (id); \
|
||||
FIELD_PREP(ATC_SRC_PER_MSB, FIELD_GET(ATC_PER_MSB, _id)) | \
|
||||
FIELD_PREP(ATC_SRC_PER, _id); })
|
||||
#define ATC_DST_PER_ID(id) \
|
||||
({ typeof(id) _id = (id); \
|
||||
FIELD_PREP(ATC_DST_PER_MSB, FIELD_GET(ATC_PER_MSB, _id)) | \
|
||||
FIELD_PREP(ATC_DST_PER, _id); })
|
||||
|
||||
|
||||
|
||||
|
@ -1102,6 +1102,8 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
|
||||
NULL,
|
||||
src_addr, dst_addr,
|
||||
xt, xt->sgl);
|
||||
if (!first)
|
||||
return NULL;
|
||||
|
||||
/* Length of the block is (BLEN+1) microblocks. */
|
||||
for (i = 0; i < xt->numf - 1; i++)
|
||||
@ -1132,8 +1134,9 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
|
||||
src_addr, dst_addr,
|
||||
xt, chunk);
|
||||
if (!desc) {
|
||||
list_splice_tail_init(&first->descs_list,
|
||||
&atchan->free_descs_list);
|
||||
if (first)
|
||||
list_splice_tail_init(&first->descs_list,
|
||||
&atchan->free_descs_list);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -277,7 +277,6 @@ static int idxd_cdev_open(struct inode *inode, struct file *filp)
|
||||
if (wq_dedicated(wq)) {
|
||||
rc = idxd_wq_set_pasid(wq, pasid);
|
||||
if (rc < 0) {
|
||||
iommu_sva_unbind_device(sva);
|
||||
dev_err(dev, "wq set pasid failed: %d\n", rc);
|
||||
goto failed_set_pasid;
|
||||
}
|
||||
|
@ -1050,7 +1050,7 @@ static bool _trigger(struct pl330_thread *thrd)
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool _start(struct pl330_thread *thrd)
|
||||
static bool pl330_start_thread(struct pl330_thread *thrd)
|
||||
{
|
||||
switch (_state(thrd)) {
|
||||
case PL330_STATE_FAULT_COMPLETING:
|
||||
@ -1702,7 +1702,7 @@ static int pl330_update(struct pl330_dmac *pl330)
|
||||
thrd->req_running = -1;
|
||||
|
||||
/* Get going again ASAP */
|
||||
_start(thrd);
|
||||
pl330_start_thread(thrd);
|
||||
|
||||
/* For now, just make a list of callbacks to be done */
|
||||
list_add_tail(&descdone->rqd, &pl330->req_done);
|
||||
@ -2089,7 +2089,7 @@ static void pl330_tasklet(struct tasklet_struct *t)
|
||||
} else {
|
||||
/* Make sure the PL330 Channel thread is active */
|
||||
spin_lock(&pch->thread->dmac->lock);
|
||||
_start(pch->thread);
|
||||
pl330_start_thread(pch->thread);
|
||||
spin_unlock(&pch->thread->dmac->lock);
|
||||
}
|
||||
|
||||
@ -2107,7 +2107,7 @@ static void pl330_tasklet(struct tasklet_struct *t)
|
||||
if (power_down) {
|
||||
pch->active = true;
|
||||
spin_lock(&pch->thread->dmac->lock);
|
||||
_start(pch->thread);
|
||||
pl330_start_thread(pch->thread);
|
||||
spin_unlock(&pch->thread->dmac->lock);
|
||||
power_down = false;
|
||||
}
|
||||
|
@ -5527,7 +5527,7 @@ static int udma_probe(struct platform_device *pdev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int udma_pm_suspend(struct device *dev)
|
||||
static int __maybe_unused udma_pm_suspend(struct device *dev)
|
||||
{
|
||||
struct udma_dev *ud = dev_get_drvdata(dev);
|
||||
struct dma_device *dma_dev = &ud->ddev;
|
||||
@ -5549,7 +5549,7 @@ static int udma_pm_suspend(struct device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int udma_pm_resume(struct device *dev)
|
||||
static int __maybe_unused udma_pm_resume(struct device *dev)
|
||||
{
|
||||
struct udma_dev *ud = dev_get_drvdata(dev);
|
||||
struct dma_device *dma_dev = &ud->ddev;
|
||||
|
@ -1828,7 +1828,7 @@ int dm_cache_metadata_abort(struct dm_cache_metadata *cmd)
|
||||
* Replacement block manager (new_bm) is created and old_bm destroyed outside of
|
||||
* cmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of
|
||||
* shrinker associated with the block manager's bufio client vs cmd root_lock).
|
||||
* - must take shrinker_mutex without holding cmd->root_lock
|
||||
* - must take shrinker_rwsem without holding cmd->root_lock
|
||||
*/
|
||||
new_bm = dm_block_manager_create(cmd->bdev, DM_CACHE_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
|
||||
CACHE_MAX_CONCURRENT_LOCKS);
|
||||
|
@ -1887,7 +1887,7 @@ int dm_pool_abort_metadata(struct dm_pool_metadata *pmd)
|
||||
* Replacement block manager (new_bm) is created and old_bm destroyed outside of
|
||||
* pmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of
|
||||
* shrinker associated with the block manager's bufio client vs pmd root_lock).
|
||||
* - must take shrinker_mutex without holding pmd->root_lock
|
||||
* - must take shrinker_rwsem without holding pmd->root_lock
|
||||
*/
|
||||
new_bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
|
||||
THIN_MAX_CONCURRENT_LOCKS);
|
||||
|
@ -70,7 +70,7 @@ static int phy_g12a_mipi_dphy_analog_power_on(struct phy *phy)
|
||||
HHI_MIPI_CNTL1_BANDGAP);
|
||||
|
||||
regmap_write(priv->regmap, HHI_MIPI_CNTL2,
|
||||
FIELD_PREP(HHI_MIPI_CNTL2_DIF_TX_CTL0, 0x459) |
|
||||
FIELD_PREP(HHI_MIPI_CNTL2_DIF_TX_CTL0, 0x45a) |
|
||||
FIELD_PREP(HHI_MIPI_CNTL2_DIF_TX_CTL1, 0x2680));
|
||||
|
||||
reg = DSI_LANE_CLK;
|
||||
|
@ -237,11 +237,11 @@ static int mtk_hdmi_pll_calc(struct mtk_hdmi_phy *hdmi_phy, struct clk_hw *hw,
|
||||
*/
|
||||
if (tmds_clk < 54 * MEGA)
|
||||
txposdiv = 8;
|
||||
else if (tmds_clk >= 54 * MEGA && tmds_clk < 148.35 * MEGA)
|
||||
else if (tmds_clk >= 54 * MEGA && (tmds_clk * 100) < 14835 * MEGA)
|
||||
txposdiv = 4;
|
||||
else if (tmds_clk >= 148.35 * MEGA && tmds_clk < 296.7 * MEGA)
|
||||
else if ((tmds_clk * 100) >= 14835 * MEGA && (tmds_clk * 10) < 2967 * MEGA)
|
||||
txposdiv = 2;
|
||||
else if (tmds_clk >= 296.7 * MEGA && tmds_clk <= 594 * MEGA)
|
||||
else if ((tmds_clk * 10) >= 2967 * MEGA && tmds_clk <= 594 * MEGA)
|
||||
txposdiv = 1;
|
||||
else
|
||||
return -EINVAL;
|
||||
@ -324,12 +324,12 @@ static int mtk_hdmi_pll_drv_setting(struct clk_hw *hw)
|
||||
clk_channel_bias = 0x34; /* 20mA */
|
||||
impedance_en = 0xf;
|
||||
impedance = 0x36; /* 100ohm */
|
||||
} else if (pixel_clk >= 74.175 * MEGA && pixel_clk <= 300 * MEGA) {
|
||||
} else if (((u64)pixel_clk * 1000) >= 74175 * MEGA && pixel_clk <= 300 * MEGA) {
|
||||
data_channel_bias = 0x34; /* 20mA */
|
||||
clk_channel_bias = 0x2c; /* 16mA */
|
||||
impedance_en = 0xf;
|
||||
impedance = 0x36; /* 100ohm */
|
||||
} else if (pixel_clk >= 27 * MEGA && pixel_clk < 74.175 * MEGA) {
|
||||
} else if (pixel_clk >= 27 * MEGA && ((u64)pixel_clk * 1000) < 74175 * MEGA) {
|
||||
data_channel_bias = 0x14; /* 10mA */
|
||||
clk_channel_bias = 0x14; /* 10mA */
|
||||
impedance_en = 0x0;
|
||||
|
@ -2472,7 +2472,7 @@ static int qmp_combo_com_init(struct qmp_combo *qmp)
|
||||
ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
|
||||
if (ret) {
|
||||
dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret);
|
||||
goto err_unlock;
|
||||
goto err_decrement_count;
|
||||
}
|
||||
|
||||
ret = reset_control_bulk_assert(cfg->num_resets, qmp->resets);
|
||||
@ -2522,7 +2522,8 @@ err_assert_reset:
|
||||
reset_control_bulk_assert(cfg->num_resets, qmp->resets);
|
||||
err_disable_regulators:
|
||||
regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
|
||||
err_unlock:
|
||||
err_decrement_count:
|
||||
qmp->init_count--;
|
||||
mutex_unlock(&qmp->phy_mutex);
|
||||
|
||||
return ret;
|
||||
|
@ -379,7 +379,7 @@ static int qmp_pcie_msm8996_com_init(struct qmp_phy *qphy)
|
||||
ret = regulator_bulk_enable(cfg->num_vregs, qmp->vregs);
|
||||
if (ret) {
|
||||
dev_err(qmp->dev, "failed to enable regulators, err=%d\n", ret);
|
||||
goto err_unlock;
|
||||
goto err_decrement_count;
|
||||
}
|
||||
|
||||
ret = reset_control_bulk_assert(cfg->num_resets, qmp->resets);
|
||||
@ -409,7 +409,8 @@ err_assert_reset:
|
||||
reset_control_bulk_assert(cfg->num_resets, qmp->resets);
|
||||
err_disable_regulators:
|
||||
regulator_bulk_disable(cfg->num_vregs, qmp->vregs);
|
||||
err_unlock:
|
||||
err_decrement_count:
|
||||
qmp->init_count--;
|
||||
mutex_unlock(&qmp->phy_mutex);
|
||||
|
||||
return ret;
|
||||
|
@ -115,11 +115,11 @@ struct phy_override_seq {
|
||||
*
|
||||
* @cfg_ahb_clk: AHB2PHY interface clock
|
||||
* @ref_clk: phy reference clock
|
||||
* @iface_clk: phy interface clock
|
||||
* @phy_reset: phy reset control
|
||||
* @vregs: regulator supplies bulk data
|
||||
* @phy_initialized: if PHY has been initialized correctly
|
||||
* @mode: contains the current mode the PHY is in
|
||||
* @update_seq_cfg: tuning parameters for phy init
|
||||
*/
|
||||
struct qcom_snps_hsphy {
|
||||
struct phy *phy;
|
||||
|
@ -1805,7 +1805,11 @@ static int ep_autoremove_wake_function(struct wait_queue_entry *wq_entry,
|
||||
{
|
||||
int ret = default_wake_function(wq_entry, mode, sync, key);
|
||||
|
||||
list_del_init(&wq_entry->entry);
|
||||
/*
|
||||
* Pairs with list_empty_careful in ep_poll, and ensures future loop
|
||||
* iterations see the cause of this wakeup.
|
||||
*/
|
||||
list_del_init_careful(&wq_entry->entry);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -285,6 +285,14 @@ void nilfs_btnode_abort_change_key(struct address_space *btnc,
|
||||
if (nbh == NULL) { /* blocksize == pagesize */
|
||||
xa_erase_irq(&btnc->i_pages, newkey);
|
||||
unlock_page(ctxt->bh->b_page);
|
||||
} else
|
||||
brelse(nbh);
|
||||
} else {
|
||||
/*
|
||||
* When canceling a buffer that a prepare operation has
|
||||
* allocated to copy a node block to another location, use
|
||||
* nilfs_btnode_delete() to initialize and release the buffer
|
||||
* so that the buffer flags will not be in an inconsistent
|
||||
* state when it is reallocated.
|
||||
*/
|
||||
nilfs_btnode_delete(nbh);
|
||||
}
|
||||
}
|
||||
|
@ -370,7 +370,15 @@ void nilfs_clear_dirty_pages(struct address_space *mapping, bool silent)
|
||||
struct folio *folio = fbatch.folios[i];
|
||||
|
||||
folio_lock(folio);
|
||||
nilfs_clear_dirty_page(&folio->page, silent);
|
||||
|
||||
/*
|
||||
* This folio may have been removed from the address
|
||||
* space by truncation or invalidation when the lock
|
||||
* was acquired. Skip processing in that case.
|
||||
*/
|
||||
if (likely(folio->mapping == mapping))
|
||||
nilfs_clear_dirty_page(&folio->page, silent);
|
||||
|
||||
folio_unlock(folio);
|
||||
}
|
||||
folio_batch_release(&fbatch);
|
||||
|
@ -101,6 +101,12 @@ int nilfs_segbuf_extend_segsum(struct nilfs_segment_buffer *segbuf)
|
||||
if (unlikely(!bh))
|
||||
return -ENOMEM;
|
||||
|
||||
lock_buffer(bh);
|
||||
if (!buffer_uptodate(bh)) {
|
||||
memset(bh->b_data, 0, bh->b_size);
|
||||
set_buffer_uptodate(bh);
|
||||
}
|
||||
unlock_buffer(bh);
|
||||
nilfs_segbuf_add_segsum_buffer(segbuf, bh);
|
||||
return 0;
|
||||
}
|
||||
|
@ -981,10 +981,13 @@ static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
|
||||
unsigned int isz, srsz;
|
||||
|
||||
bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root;
|
||||
|
||||
lock_buffer(bh_sr);
|
||||
raw_sr = (struct nilfs_super_root *)bh_sr->b_data;
|
||||
isz = nilfs->ns_inode_size;
|
||||
srsz = NILFS_SR_BYTES(isz);
|
||||
|
||||
raw_sr->sr_sum = 0; /* Ensure initialization within this update */
|
||||
raw_sr->sr_bytes = cpu_to_le16(srsz);
|
||||
raw_sr->sr_nongc_ctime
|
||||
= cpu_to_le64(nilfs_doing_gc() ?
|
||||
@ -998,6 +1001,8 @@ static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci,
|
||||
nilfs_write_inode_common(nilfs->ns_sufile, (void *)raw_sr +
|
||||
NILFS_SR_SUFILE_OFFSET(isz), 1);
|
||||
memset((void *)raw_sr + srsz, 0, nilfs->ns_blocksize - srsz);
|
||||
set_buffer_uptodate(bh_sr);
|
||||
unlock_buffer(bh_sr);
|
||||
}
|
||||
|
||||
static void nilfs_redirty_inodes(struct list_head *head)
|
||||
@ -1780,6 +1785,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
|
||||
list_for_each_entry(segbuf, logs, sb_list) {
|
||||
list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
|
||||
b_assoc_buffers) {
|
||||
clear_buffer_uptodate(bh);
|
||||
if (bh->b_page != bd_page) {
|
||||
if (bd_page)
|
||||
end_page_writeback(bd_page);
|
||||
@ -1791,6 +1797,7 @@ static void nilfs_abort_logs(struct list_head *logs, int err)
|
||||
b_assoc_buffers) {
|
||||
clear_buffer_async_write(bh);
|
||||
if (bh == segbuf->sb_super_root) {
|
||||
clear_buffer_uptodate(bh);
|
||||
if (bh->b_page != bd_page) {
|
||||
end_page_writeback(bd_page);
|
||||
bd_page = bh->b_page;
|
||||
|
@ -779,6 +779,15 @@ int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
|
||||
goto out_header;
|
||||
|
||||
sui->ncleansegs -= nsegs - newnsegs;
|
||||
|
||||
/*
|
||||
* If the sufile is successfully truncated, immediately adjust
|
||||
* the segment allocation space while locking the semaphore
|
||||
* "mi_sem" so that nilfs_sufile_alloc() never allocates
|
||||
* segments in the truncated space.
|
||||
*/
|
||||
sui->allocmax = newnsegs - 1;
|
||||
sui->allocmin = 0;
|
||||
}
|
||||
|
||||
kaddr = kmap_atomic(header_bh->b_page);
|
||||
|
@ -372,10 +372,31 @@ static int nilfs_move_2nd_super(struct super_block *sb, loff_t sb2off)
|
||||
goto out;
|
||||
}
|
||||
nsbp = (void *)nsbh->b_data + offset;
|
||||
memset(nsbp, 0, nilfs->ns_blocksize);
|
||||
|
||||
lock_buffer(nsbh);
|
||||
if (sb2i >= 0) {
|
||||
/*
|
||||
* The position of the second superblock only changes by 4KiB,
|
||||
* which is larger than the maximum superblock data size
|
||||
* (= 1KiB), so there is no need to use memmove() to allow
|
||||
* overlap between source and destination.
|
||||
*/
|
||||
memcpy(nsbp, nilfs->ns_sbp[sb2i], nilfs->ns_sbsize);
|
||||
|
||||
/*
|
||||
* Zero fill after copy to avoid overwriting in case of move
|
||||
* within the same block.
|
||||
*/
|
||||
memset(nsbh->b_data, 0, offset);
|
||||
memset((void *)nsbp + nilfs->ns_sbsize, 0,
|
||||
nsbh->b_size - offset - nilfs->ns_sbsize);
|
||||
} else {
|
||||
memset(nsbh->b_data, 0, nsbh->b_size);
|
||||
}
|
||||
set_buffer_uptodate(nsbh);
|
||||
unlock_buffer(nsbh);
|
||||
|
||||
if (sb2i >= 0) {
|
||||
memcpy(nsbp, nilfs->ns_sbp[sb2i], nilfs->ns_sbsize);
|
||||
brelse(nilfs->ns_sbh[sb2i]);
|
||||
nilfs->ns_sbh[sb2i] = nsbh;
|
||||
nilfs->ns_sbp[sb2i] = nsbp;
|
||||
|
@ -405,6 +405,18 @@ unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs)
|
||||
100));
|
||||
}
|
||||
|
||||
/**
|
||||
* nilfs_max_segment_count - calculate the maximum number of segments
|
||||
* @nilfs: nilfs object
|
||||
*/
|
||||
static u64 nilfs_max_segment_count(struct the_nilfs *nilfs)
|
||||
{
|
||||
u64 max_count = U64_MAX;
|
||||
|
||||
do_div(max_count, nilfs->ns_blocks_per_segment);
|
||||
return min_t(u64, max_count, ULONG_MAX);
|
||||
}
|
||||
|
||||
void nilfs_set_nsegments(struct the_nilfs *nilfs, unsigned long nsegs)
|
||||
{
|
||||
nilfs->ns_nsegments = nsegs;
|
||||
@ -414,6 +426,8 @@ void nilfs_set_nsegments(struct the_nilfs *nilfs, unsigned long nsegs)
|
||||
static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
|
||||
struct nilfs_super_block *sbp)
|
||||
{
|
||||
u64 nsegments, nblocks;
|
||||
|
||||
if (le32_to_cpu(sbp->s_rev_level) < NILFS_MIN_SUPP_REV) {
|
||||
nilfs_err(nilfs->ns_sb,
|
||||
"unsupported revision (superblock rev.=%d.%d, current rev.=%d.%d). Please check the version of mkfs.nilfs(2).",
|
||||
@ -457,7 +471,34 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
nilfs_set_nsegments(nilfs, le64_to_cpu(sbp->s_nsegments));
|
||||
nsegments = le64_to_cpu(sbp->s_nsegments);
|
||||
if (nsegments > nilfs_max_segment_count(nilfs)) {
|
||||
nilfs_err(nilfs->ns_sb,
|
||||
"segment count %llu exceeds upper limit (%llu segments)",
|
||||
(unsigned long long)nsegments,
|
||||
(unsigned long long)nilfs_max_segment_count(nilfs));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
nblocks = sb_bdev_nr_blocks(nilfs->ns_sb);
|
||||
if (nblocks) {
|
||||
u64 min_block_count = nsegments * nilfs->ns_blocks_per_segment;
|
||||
/*
|
||||
* To avoid failing to mount early device images without a
|
||||
* second superblock, exclude that block count from the
|
||||
* "min_block_count" calculation.
|
||||
*/
|
||||
|
||||
if (nblocks < min_block_count) {
|
||||
nilfs_err(nilfs->ns_sb,
|
||||
"total number of segment blocks %llu exceeds device size (%llu blocks)",
|
||||
(unsigned long long)min_block_count,
|
||||
(unsigned long long)nblocks);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
nilfs_set_nsegments(nilfs, nsegments);
|
||||
nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed);
|
||||
return 0;
|
||||
}
|
||||
|
@ -2100,14 +2100,20 @@ static long ocfs2_fallocate(struct file *file, int mode, loff_t offset,
|
||||
struct ocfs2_space_resv sr;
|
||||
int change_size = 1;
|
||||
int cmd = OCFS2_IOC_RESVSP64;
|
||||
int ret = 0;
|
||||
|
||||
if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
|
||||
return -EOPNOTSUPP;
|
||||
if (!ocfs2_writes_unwritten_extents(osb))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (mode & FALLOC_FL_KEEP_SIZE)
|
||||
if (mode & FALLOC_FL_KEEP_SIZE) {
|
||||
change_size = 0;
|
||||
} else {
|
||||
ret = inode_newsize_ok(inode, offset + len);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (mode & FALLOC_FL_PUNCH_HOLE)
|
||||
cmd = OCFS2_IOC_UNRESVSP64;
|
||||
|
@ -952,8 +952,10 @@ static void ocfs2_disable_quotas(struct ocfs2_super *osb)
|
||||
for (type = 0; type < OCFS2_MAXQUOTAS; type++) {
|
||||
if (!sb_has_quota_loaded(sb, type))
|
||||
continue;
|
||||
oinfo = sb_dqinfo(sb, type)->dqi_priv;
|
||||
cancel_delayed_work_sync(&oinfo->dqi_sync_work);
|
||||
if (!sb_has_quota_suspended(sb, type)) {
|
||||
oinfo = sb_dqinfo(sb, type)->dqi_priv;
|
||||
cancel_delayed_work_sync(&oinfo->dqi_sync_work);
|
||||
}
|
||||
inode = igrab(sb->s_dquot.files[type]);
|
||||
/* Turn off quotas. This will remove all dquot structures from
|
||||
* memory and so they will be automatically synced to global
|
||||
|
@ -54,7 +54,7 @@ static char *sb_writers_name[SB_FREEZE_LEVELS] = {
|
||||
* One thing we have to be careful of with a per-sb shrinker is that we don't
|
||||
* drop the last active reference to the superblock from within the shrinker.
|
||||
* If that happens we could trigger unregistering the shrinker from within the
|
||||
* shrinker path and that leads to deadlock on the shrinker_mutex. Hence we
|
||||
* shrinker path and that leads to deadlock on the shrinker_rwsem. Hence we
|
||||
* take a passive reference to the superblock to avoid this from occurring.
|
||||
*/
|
||||
static unsigned long super_cache_scan(struct shrinker *shrink,
|
||||
|
@ -1322,6 +1322,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
|
||||
bool basic_ioctls;
|
||||
unsigned long start, end, vma_end;
|
||||
struct vma_iterator vmi;
|
||||
pgoff_t pgoff;
|
||||
|
||||
user_uffdio_register = (struct uffdio_register __user *) arg;
|
||||
|
||||
@ -1449,6 +1450,8 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
|
||||
|
||||
vma_iter_set(&vmi, start);
|
||||
prev = vma_prev(&vmi);
|
||||
if (vma->vm_start < start)
|
||||
prev = vma;
|
||||
|
||||
ret = 0;
|
||||
for_each_vma_range(vmi, vma, end) {
|
||||
@ -1472,8 +1475,9 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx,
|
||||
vma_end = min(end, vma->vm_end);
|
||||
|
||||
new_flags = (vma->vm_flags & ~__VM_UFFD_FLAGS) | vm_flags;
|
||||
pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
|
||||
prev = vma_merge(&vmi, mm, prev, start, vma_end, new_flags,
|
||||
vma->anon_vma, vma->vm_file, vma->vm_pgoff,
|
||||
vma->anon_vma, vma->vm_file, pgoff,
|
||||
vma_policy(vma),
|
||||
((struct vm_userfaultfd_ctx){ ctx }),
|
||||
anon_vma_name(vma));
|
||||
@ -1553,6 +1557,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
|
||||
unsigned long start, end, vma_end;
|
||||
const void __user *buf = (void __user *)arg;
|
||||
struct vma_iterator vmi;
|
||||
pgoff_t pgoff;
|
||||
|
||||
ret = -EFAULT;
|
||||
if (copy_from_user(&uffdio_unregister, buf, sizeof(uffdio_unregister)))
|
||||
@ -1615,6 +1620,9 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
|
||||
|
||||
vma_iter_set(&vmi, start);
|
||||
prev = vma_prev(&vmi);
|
||||
if (vma->vm_start < start)
|
||||
prev = vma;
|
||||
|
||||
ret = 0;
|
||||
for_each_vma_range(vmi, vma, end) {
|
||||
cond_resched();
|
||||
@ -1652,8 +1660,9 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx,
|
||||
uffd_wp_range(vma, start, vma_end - start, false);
|
||||
|
||||
new_flags = vma->vm_flags & ~__VM_UFFD_FLAGS;
|
||||
pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
|
||||
prev = vma_merge(&vmi, mm, prev, start, vma_end, new_flags,
|
||||
vma->anon_vma, vma->vm_file, vma->vm_pgoff,
|
||||
vma->anon_vma, vma->vm_file, pgoff,
|
||||
vma_policy(vma),
|
||||
NULL_VM_UFFD_CTX, anon_vma_name(vma));
|
||||
if (prev) {
|
||||
|
@ -2566,12 +2566,6 @@ static inline int deny_write_access(struct file *file)
|
||||
struct inode *inode = file_inode(file);
|
||||
return atomic_dec_unless_positive(&inode->i_writecount) ? 0 : -ETXTBSY;
|
||||
}
|
||||
static inline int exclusive_deny_write_access(struct file *file)
|
||||
{
|
||||
int old = 0;
|
||||
struct inode *inode = file_inode(file);
|
||||
return atomic_try_cmpxchg(&inode->i_writecount, &old, -1) ? 0 : -ETXTBSY;
|
||||
}
|
||||
static inline void put_write_access(struct inode * inode)
|
||||
{
|
||||
atomic_dec(&inode->i_writecount);
|
||||
|
@ -806,6 +806,7 @@ enum {
|
||||
FILTER_TRACE_FN,
|
||||
FILTER_COMM,
|
||||
FILTER_CPU,
|
||||
FILTER_STACKTRACE,
|
||||
};
|
||||
|
||||
extern int trace_event_raw_init(struct trace_event_call *call);
|
||||
|
@ -17,9 +17,10 @@
|
||||
|
||||
#ifdef CONFIG_USER_EVENTS
|
||||
struct user_event_mm {
|
||||
struct list_head link;
|
||||
struct list_head mms_link;
|
||||
struct list_head enablers;
|
||||
struct mm_struct *mm;
|
||||
/* Used for one-shot lists, protected by event_mutex */
|
||||
struct user_event_mm *next;
|
||||
refcount_t refcnt;
|
||||
refcount_t tasks;
|
||||
|
@ -68,7 +68,7 @@ DECLARE_EVENT_CLASS(writeback_folio_template,
|
||||
strscpy_pad(__entry->name,
|
||||
bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
|
||||
NULL), 32);
|
||||
__entry->ino = mapping ? mapping->host->i_ino : 0;
|
||||
__entry->ino = (mapping && mapping->host) ? mapping->host->i_ino : 0;
|
||||
__entry->index = folio->index;
|
||||
),
|
||||
|
||||
|
@ -901,10 +901,22 @@ static int kexec_purgatory_setup_sechdrs(struct purgatory_info *pi,
|
||||
}
|
||||
|
||||
offset = ALIGN(offset, align);
|
||||
|
||||
/*
|
||||
* Check if the segment contains the entry point, if so,
|
||||
* calculate the value of image->start based on it.
|
||||
* If the compiler has produced more than one .text section
|
||||
* (Eg: .text.hot), they are generally after the main .text
|
||||
* section, and they shall not be used to calculate
|
||||
* image->start. So do not re-calculate image->start if it
|
||||
* is not set to the initial value, and warn the user so they
|
||||
* have a chance to fix their purgatory's linker script.
|
||||
*/
|
||||
if (sechdrs[i].sh_flags & SHF_EXECINSTR &&
|
||||
pi->ehdr->e_entry >= sechdrs[i].sh_addr &&
|
||||
pi->ehdr->e_entry < (sechdrs[i].sh_addr
|
||||
+ sechdrs[i].sh_size)) {
|
||||
+ sechdrs[i].sh_size) &&
|
||||
!WARN_ON(kbuf->image->start != pi->ehdr->e_entry)) {
|
||||
kbuf->image->start -= sechdrs[i].sh_addr;
|
||||
kbuf->image->start += kbuf->mem + offset;
|
||||
}
|
||||
|
@ -3057,53 +3057,11 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
|
||||
return load_module(&info, uargs, 0);
|
||||
}
|
||||
|
||||
static int file_init_module(struct file *file, const char __user * uargs, int flags)
|
||||
SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
|
||||
{
|
||||
struct load_info info = { };
|
||||
void *buf = NULL;
|
||||
int len;
|
||||
|
||||
len = kernel_read_file(file, 0, &buf, INT_MAX, NULL,
|
||||
READING_MODULE);
|
||||
if (len < 0) {
|
||||
mod_stat_inc(&failed_kreads);
|
||||
mod_stat_add_long(len, &invalid_kread_bytes);
|
||||
return len;
|
||||
}
|
||||
|
||||
if (flags & MODULE_INIT_COMPRESSED_FILE) {
|
||||
int err = module_decompress(&info, buf, len);
|
||||
vfree(buf); /* compressed data is no longer needed */
|
||||
if (err) {
|
||||
mod_stat_inc(&failed_decompress);
|
||||
mod_stat_add_long(len, &invalid_decompress_bytes);
|
||||
return err;
|
||||
}
|
||||
} else {
|
||||
info.hdr = buf;
|
||||
info.len = len;
|
||||
}
|
||||
|
||||
return load_module(&info, uargs, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* kernel_read_file() will already deny write access, but module
|
||||
* loading wants _exclusive_ access to the file, so we do that
|
||||
* here, along with basic sanity checks.
|
||||
*/
|
||||
static int prepare_file_for_module_load(struct file *file)
|
||||
{
|
||||
if (!file || !(file->f_mode & FMODE_READ))
|
||||
return -EBADF;
|
||||
if (!S_ISREG(file_inode(file)->i_mode))
|
||||
return -EINVAL;
|
||||
return exclusive_deny_write_access(file);
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
|
||||
{
|
||||
struct fd f;
|
||||
int err;
|
||||
|
||||
err = may_init_module();
|
||||
@ -3117,14 +3075,28 @@ SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
|
||||
|MODULE_INIT_COMPRESSED_FILE))
|
||||
return -EINVAL;
|
||||
|
||||
f = fdget(fd);
|
||||
err = prepare_file_for_module_load(f.file);
|
||||
if (!err) {
|
||||
err = file_init_module(f.file, uargs, flags);
|
||||
allow_write_access(f.file);
|
||||
len = kernel_read_file_from_fd(fd, 0, &buf, INT_MAX, NULL,
|
||||
READING_MODULE);
|
||||
if (len < 0) {
|
||||
mod_stat_inc(&failed_kreads);
|
||||
mod_stat_add_long(len, &invalid_kread_bytes);
|
||||
return len;
|
||||
}
|
||||
fdput(f);
|
||||
return err;
|
||||
|
||||
if (flags & MODULE_INIT_COMPRESSED_FILE) {
|
||||
err = module_decompress(&info, buf, len);
|
||||
vfree(buf); /* compressed data is no longer needed */
|
||||
if (err) {
|
||||
mod_stat_inc(&failed_decompress);
|
||||
mod_stat_add_long(len, &invalid_decompress_bytes);
|
||||
return err;
|
||||
}
|
||||
} else {
|
||||
info.hdr = buf;
|
||||
info.len = len;
|
||||
}
|
||||
|
||||
return load_module(&info, uargs, flags);
|
||||
}
|
||||
|
||||
/* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */
|
||||
|
@ -60,6 +60,7 @@
|
||||
*/
|
||||
bool ring_buffer_expanded;
|
||||
|
||||
#ifdef CONFIG_FTRACE_STARTUP_TEST
|
||||
/*
|
||||
* We need to change this state when a selftest is running.
|
||||
* A selftest will lurk into the ring-buffer to count the
|
||||
@ -75,7 +76,6 @@ static bool __read_mostly tracing_selftest_running;
|
||||
*/
|
||||
bool __read_mostly tracing_selftest_disabled;
|
||||
|
||||
#ifdef CONFIG_FTRACE_STARTUP_TEST
|
||||
void __init disable_tracing_selftest(const char *reason)
|
||||
{
|
||||
if (!tracing_selftest_disabled) {
|
||||
@ -83,6 +83,9 @@ void __init disable_tracing_selftest(const char *reason)
|
||||
pr_info("Ftrace startup test is disabled due to %s\n", reason);
|
||||
}
|
||||
}
|
||||
#else
|
||||
#define tracing_selftest_running 0
|
||||
#define tracing_selftest_disabled 0
|
||||
#endif
|
||||
|
||||
/* Pipe tracepoints to printk */
|
||||
@ -1051,7 +1054,10 @@ int __trace_array_puts(struct trace_array *tr, unsigned long ip,
|
||||
if (!(tr->trace_flags & TRACE_ITER_PRINTK))
|
||||
return 0;
|
||||
|
||||
if (unlikely(tracing_selftest_running || tracing_disabled))
|
||||
if (unlikely(tracing_selftest_running && tr == &global_trace))
|
||||
return 0;
|
||||
|
||||
if (unlikely(tracing_disabled))
|
||||
return 0;
|
||||
|
||||
alloc = sizeof(*entry) + size + 2; /* possible \n added */
|
||||
@ -2041,6 +2047,24 @@ static int run_tracer_selftest(struct tracer *type)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int do_run_tracer_selftest(struct tracer *type)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Tests can take a long time, especially if they are run one after the
|
||||
* other, as does happen during bootup when all the tracers are
|
||||
* registered. This could cause the soft lockup watchdog to trigger.
|
||||
*/
|
||||
cond_resched();
|
||||
|
||||
tracing_selftest_running = true;
|
||||
ret = run_tracer_selftest(type);
|
||||
tracing_selftest_running = false;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __init int init_trace_selftests(void)
|
||||
{
|
||||
struct trace_selftests *p, *n;
|
||||
@ -2092,6 +2116,10 @@ static inline int run_tracer_selftest(struct tracer *type)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int do_run_tracer_selftest(struct tracer *type)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_FTRACE_STARTUP_TEST */
|
||||
|
||||
static void add_tracer_options(struct trace_array *tr, struct tracer *t);
|
||||
@ -2127,8 +2155,6 @@ int __init register_tracer(struct tracer *type)
|
||||
|
||||
mutex_lock(&trace_types_lock);
|
||||
|
||||
tracing_selftest_running = true;
|
||||
|
||||
for (t = trace_types; t; t = t->next) {
|
||||
if (strcmp(type->name, t->name) == 0) {
|
||||
/* already found */
|
||||
@ -2157,7 +2183,7 @@ int __init register_tracer(struct tracer *type)
|
||||
/* store the tracer for __set_tracer_option */
|
||||
type->flags->trace = type;
|
||||
|
||||
ret = run_tracer_selftest(type);
|
||||
ret = do_run_tracer_selftest(type);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
@ -2166,7 +2192,6 @@ int __init register_tracer(struct tracer *type)
|
||||
add_tracer_options(&global_trace, type);
|
||||
|
||||
out:
|
||||
tracing_selftest_running = false;
|
||||
mutex_unlock(&trace_types_lock);
|
||||
|
||||
if (ret || !default_bootup_tracer)
|
||||
@ -3490,7 +3515,7 @@ __trace_array_vprintk(struct trace_buffer *buffer,
|
||||
unsigned int trace_ctx;
|
||||
char *tbuffer;
|
||||
|
||||
if (tracing_disabled || tracing_selftest_running)
|
||||
if (tracing_disabled)
|
||||
return 0;
|
||||
|
||||
/* Don't pollute graph traces with trace_vprintk internals */
|
||||
@ -3538,6 +3563,9 @@ __printf(3, 0)
|
||||
int trace_array_vprintk(struct trace_array *tr,
|
||||
unsigned long ip, const char *fmt, va_list args)
|
||||
{
|
||||
if (tracing_selftest_running && tr == &global_trace)
|
||||
return 0;
|
||||
|
||||
return __trace_array_vprintk(tr->array_buffer.buffer, ip, fmt, args);
|
||||
}
|
||||
|
||||
@ -5752,7 +5780,7 @@ static const char readme_msg[] =
|
||||
"\t table using the key(s) and value(s) named, and the value of a\n"
|
||||
"\t sum called 'hitcount' is incremented. Keys and values\n"
|
||||
"\t correspond to fields in the event's format description. Keys\n"
|
||||
"\t can be any field, or the special string 'stacktrace'.\n"
|
||||
"\t can be any field, or the special string 'common_stacktrace'.\n"
|
||||
"\t Compound keys consisting of up to two fields can be specified\n"
|
||||
"\t by the 'keys' keyword. Values must correspond to numeric\n"
|
||||
"\t fields. Sort keys consisting of up to two fields can be\n"
|
||||
|
@ -194,6 +194,8 @@ static int trace_define_generic_fields(void)
|
||||
__generic_field(int, common_cpu, FILTER_CPU);
|
||||
__generic_field(char *, COMM, FILTER_COMM);
|
||||
__generic_field(char *, comm, FILTER_COMM);
|
||||
__generic_field(char *, stacktrace, FILTER_STACKTRACE);
|
||||
__generic_field(char *, STACKTRACE, FILTER_STACKTRACE);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1364,7 +1364,7 @@ static const char *hist_field_name(struct hist_field *field,
|
||||
if (field->field)
|
||||
field_name = field->field->name;
|
||||
else
|
||||
field_name = "stacktrace";
|
||||
field_name = "common_stacktrace";
|
||||
} else if (field->flags & HIST_FIELD_FL_HITCOUNT)
|
||||
field_name = "hitcount";
|
||||
|
||||
@ -2367,7 +2367,7 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
|
||||
hist_data->enable_timestamps = true;
|
||||
if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS)
|
||||
hist_data->attrs->ts_in_usecs = true;
|
||||
} else if (strcmp(field_name, "stacktrace") == 0) {
|
||||
} else if (strcmp(field_name, "common_stacktrace") == 0) {
|
||||
*flags |= HIST_FIELD_FL_STACKTRACE;
|
||||
} else if (strcmp(field_name, "common_cpu") == 0)
|
||||
*flags |= HIST_FIELD_FL_CPU;
|
||||
@ -2378,11 +2378,15 @@ parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
|
||||
if (!field || !field->size) {
|
||||
/*
|
||||
* For backward compatibility, if field_name
|
||||
* was "cpu", then we treat this the same as
|
||||
* common_cpu. This also works for "CPU".
|
||||
* was "cpu" or "stacktrace", then we treat this
|
||||
* the same as common_cpu and common_stacktrace
|
||||
* respectively. This also works for "CPU", and
|
||||
* "STACKTRACE".
|
||||
*/
|
||||
if (field && field->filter_type == FILTER_CPU) {
|
||||
*flags |= HIST_FIELD_FL_CPU;
|
||||
} else if (field && field->filter_type == FILTER_STACKTRACE) {
|
||||
*flags |= HIST_FIELD_FL_STACKTRACE;
|
||||
} else {
|
||||
hist_err(tr, HIST_ERR_FIELD_NOT_FOUND,
|
||||
errpos(field_name));
|
||||
@ -4238,13 +4242,19 @@ static int __create_val_field(struct hist_trigger_data *hist_data,
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Some types cannot be a value */
|
||||
if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT |
|
||||
HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2 |
|
||||
HIST_FIELD_FL_SYM | HIST_FIELD_FL_SYM_OFFSET |
|
||||
HIST_FIELD_FL_SYSCALL | HIST_FIELD_FL_STACKTRACE)) {
|
||||
hist_err(file->tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(field_str));
|
||||
ret = -EINVAL;
|
||||
/* values and variables should not have some modifiers */
|
||||
if (hist_field->flags & HIST_FIELD_FL_VAR) {
|
||||
/* Variable */
|
||||
if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT |
|
||||
HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2))
|
||||
goto err;
|
||||
} else {
|
||||
/* Value */
|
||||
if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT |
|
||||
HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2 |
|
||||
HIST_FIELD_FL_SYM | HIST_FIELD_FL_SYM_OFFSET |
|
||||
HIST_FIELD_FL_SYSCALL | HIST_FIELD_FL_STACKTRACE))
|
||||
goto err;
|
||||
}
|
||||
|
||||
hist_data->fields[val_idx] = hist_field;
|
||||
@ -4256,6 +4266,9 @@ static int __create_val_field(struct hist_trigger_data *hist_data,
|
||||
ret = -EINVAL;
|
||||
out:
|
||||
return ret;
|
||||
err:
|
||||
hist_err(file->tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(field_str));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int create_val_field(struct hist_trigger_data *hist_data,
|
||||
@ -5385,7 +5398,7 @@ static void hist_trigger_print_key(struct seq_file *m,
|
||||
if (key_field->field)
|
||||
seq_printf(m, "%s.stacktrace", key_field->field->name);
|
||||
else
|
||||
seq_puts(m, "stacktrace:\n");
|
||||
seq_puts(m, "common_stacktrace:\n");
|
||||
hist_trigger_stacktrace_print(m,
|
||||
key + key_field->offset,
|
||||
HIST_STACKTRACE_DEPTH);
|
||||
@ -5968,7 +5981,7 @@ static int event_hist_trigger_print(struct seq_file *m,
|
||||
if (field->field)
|
||||
seq_printf(m, "%s.stacktrace", field->field->name);
|
||||
else
|
||||
seq_puts(m, "stacktrace");
|
||||
seq_puts(m, "common_stacktrace");
|
||||
} else
|
||||
hist_field_print(m, field);
|
||||
}
|
||||
|
@ -96,12 +96,12 @@ struct user_event {
|
||||
* these to track enablement sites that are tied to an event.
|
||||
*/
|
||||
struct user_event_enabler {
|
||||
struct list_head link;
|
||||
struct list_head mm_enablers_link;
|
||||
struct user_event *event;
|
||||
unsigned long addr;
|
||||
|
||||
/* Track enable bit, flags, etc. Aligned for bitops. */
|
||||
unsigned int values;
|
||||
unsigned long values;
|
||||
};
|
||||
|
||||
/* Bits 0-5 are for the bit to update upon enable/disable (0-63 allowed) */
|
||||
@ -116,7 +116,9 @@ struct user_event_enabler {
|
||||
/* Only duplicate the bit value */
|
||||
#define ENABLE_VAL_DUP_MASK ENABLE_VAL_BIT_MASK
|
||||
|
||||
#define ENABLE_BITOPS(e) ((unsigned long *)&(e)->values)
|
||||
#define ENABLE_BITOPS(e) (&(e)->values)
|
||||
|
||||
#define ENABLE_BIT(e) ((int)((e)->values & ENABLE_VAL_BIT_MASK))
|
||||
|
||||
/* Used for asynchronous faulting in of pages */
|
||||
struct user_event_enabler_fault {
|
||||
@ -153,7 +155,7 @@ struct user_event_file_info {
|
||||
#define VALIDATOR_REL (1 << 1)
|
||||
|
||||
struct user_event_validator {
|
||||
struct list_head link;
|
||||
struct list_head user_event_link;
|
||||
int offset;
|
||||
int flags;
|
||||
};
|
||||
@ -259,7 +261,7 @@ error:
|
||||
|
||||
static void user_event_enabler_destroy(struct user_event_enabler *enabler)
|
||||
{
|
||||
list_del_rcu(&enabler->link);
|
||||
list_del_rcu(&enabler->mm_enablers_link);
|
||||
|
||||
/* No longer tracking the event via the enabler */
|
||||
refcount_dec(&enabler->event->refcnt);
|
||||
@ -423,9 +425,9 @@ static int user_event_enabler_write(struct user_event_mm *mm,
|
||||
|
||||
/* Update bit atomically, user tracers must be atomic as well */
|
||||
if (enabler->event && enabler->event->status)
|
||||
set_bit(enabler->values & ENABLE_VAL_BIT_MASK, ptr);
|
||||
set_bit(ENABLE_BIT(enabler), ptr);
|
||||
else
|
||||
clear_bit(enabler->values & ENABLE_VAL_BIT_MASK, ptr);
|
||||
clear_bit(ENABLE_BIT(enabler), ptr);
|
||||
|
||||
kunmap_local(kaddr);
|
||||
unpin_user_pages_dirty_lock(&page, 1, true);
|
||||
@ -437,11 +439,9 @@ static bool user_event_enabler_exists(struct user_event_mm *mm,
|
||||
unsigned long uaddr, unsigned char bit)
|
||||
{
|
||||
struct user_event_enabler *enabler;
|
||||
struct user_event_enabler *next;
|
||||
|
||||
list_for_each_entry_safe(enabler, next, &mm->enablers, link) {
|
||||
if (enabler->addr == uaddr &&
|
||||
(enabler->values & ENABLE_VAL_BIT_MASK) == bit)
|
||||
list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) {
|
||||
if (enabler->addr == uaddr && ENABLE_BIT(enabler) == bit)
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -451,23 +451,36 @@ static bool user_event_enabler_exists(struct user_event_mm *mm,
|
||||
static void user_event_enabler_update(struct user_event *user)
|
||||
{
|
||||
struct user_event_enabler *enabler;
|
||||
struct user_event_mm *mm = user_event_mm_get_all(user);
|
||||
struct user_event_mm *next;
|
||||
struct user_event_mm *mm;
|
||||
int attempt;
|
||||
|
||||
lockdep_assert_held(&event_mutex);
|
||||
|
||||
/*
|
||||
* We need to build a one-shot list of all the mms that have an
|
||||
* enabler for the user_event passed in. This list is only valid
|
||||
* while holding the event_mutex. The only reason for this is due
|
||||
* to the global mm list being RCU protected and we use methods
|
||||
* which can wait (mmap_read_lock and pin_user_pages_remote).
|
||||
*
|
||||
* NOTE: user_event_mm_get_all() increments the ref count of each
|
||||
* mm that is added to the list to prevent removal timing windows.
|
||||
* We must always put each mm after they are used, which may wait.
|
||||
*/
|
||||
mm = user_event_mm_get_all(user);
|
||||
|
||||
while (mm) {
|
||||
next = mm->next;
|
||||
mmap_read_lock(mm->mm);
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry_rcu(enabler, &mm->enablers, link) {
|
||||
list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) {
|
||||
if (enabler->event == user) {
|
||||
attempt = 0;
|
||||
user_event_enabler_write(mm, enabler, true, &attempt);
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
mmap_read_unlock(mm->mm);
|
||||
user_event_mm_put(mm);
|
||||
mm = next;
|
||||
@ -495,7 +508,9 @@ static bool user_event_enabler_dup(struct user_event_enabler *orig,
|
||||
enabler->values = orig->values & ENABLE_VAL_DUP_MASK;
|
||||
|
||||
refcount_inc(&enabler->event->refcnt);
|
||||
list_add_rcu(&enabler->link, &mm->enablers);
|
||||
|
||||
/* Enablers not exposed yet, RCU not required */
|
||||
list_add(&enabler->mm_enablers_link, &mm->enablers);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -513,6 +528,14 @@ static struct user_event_mm *user_event_mm_get_all(struct user_event *user)
|
||||
struct user_event_enabler *enabler;
|
||||
struct user_event_mm *mm;
|
||||
|
||||
/*
|
||||
* We use the mm->next field to build a one-shot list from the global
|
||||
* RCU protected list. To build this list the event_mutex must be held.
|
||||
* This lets us build a list without requiring allocs that could fail
|
||||
* when user based events are most wanted for diagnostics.
|
||||
*/
|
||||
lockdep_assert_held(&event_mutex);
|
||||
|
||||
/*
|
||||
* We do not want to block fork/exec while enablements are being
|
||||
* updated, so we use RCU to walk the current tasks that have used
|
||||
@ -525,23 +548,24 @@ static struct user_event_mm *user_event_mm_get_all(struct user_event *user)
|
||||
*/
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry_rcu(mm, &user_event_mms, link)
|
||||
list_for_each_entry_rcu(enabler, &mm->enablers, link)
|
||||
list_for_each_entry_rcu(mm, &user_event_mms, mms_link) {
|
||||
list_for_each_entry_rcu(enabler, &mm->enablers, mm_enablers_link) {
|
||||
if (enabler->event == user) {
|
||||
mm->next = found;
|
||||
found = user_event_mm_get(mm);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
return found;
|
||||
}
|
||||
|
||||
static struct user_event_mm *user_event_mm_create(struct task_struct *t)
|
||||
static struct user_event_mm *user_event_mm_alloc(struct task_struct *t)
|
||||
{
|
||||
struct user_event_mm *user_mm;
|
||||
unsigned long flags;
|
||||
|
||||
user_mm = kzalloc(sizeof(*user_mm), GFP_KERNEL_ACCOUNT);
|
||||
|
||||
@ -553,12 +577,6 @@ static struct user_event_mm *user_event_mm_create(struct task_struct *t)
|
||||
refcount_set(&user_mm->refcnt, 1);
|
||||
refcount_set(&user_mm->tasks, 1);
|
||||
|
||||
spin_lock_irqsave(&user_event_mms_lock, flags);
|
||||
list_add_rcu(&user_mm->link, &user_event_mms);
|
||||
spin_unlock_irqrestore(&user_event_mms_lock, flags);
|
||||
|
||||
t->user_event_mm = user_mm;
|
||||
|
||||
/*
|
||||
* The lifetime of the memory descriptor can slightly outlast
|
||||
* the task lifetime if a ref to the user_event_mm is taken
|
||||
@ -572,6 +590,17 @@ static struct user_event_mm *user_event_mm_create(struct task_struct *t)
|
||||
return user_mm;
|
||||
}
|
||||
|
||||
static void user_event_mm_attach(struct user_event_mm *user_mm, struct task_struct *t)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&user_event_mms_lock, flags);
|
||||
list_add_rcu(&user_mm->mms_link, &user_event_mms);
|
||||
spin_unlock_irqrestore(&user_event_mms_lock, flags);
|
||||
|
||||
t->user_event_mm = user_mm;
|
||||
}
|
||||
|
||||
static struct user_event_mm *current_user_event_mm(void)
|
||||
{
|
||||
struct user_event_mm *user_mm = current->user_event_mm;
|
||||
@ -579,10 +608,12 @@ static struct user_event_mm *current_user_event_mm(void)
|
||||
if (user_mm)
|
||||
goto inc;
|
||||
|
||||
user_mm = user_event_mm_create(current);
|
||||
user_mm = user_event_mm_alloc(current);
|
||||
|
||||
if (!user_mm)
|
||||
goto error;
|
||||
|
||||
user_event_mm_attach(user_mm, current);
|
||||
inc:
|
||||
refcount_inc(&user_mm->refcnt);
|
||||
error:
|
||||
@ -593,7 +624,7 @@ static void user_event_mm_destroy(struct user_event_mm *mm)
|
||||
{
|
||||
struct user_event_enabler *enabler, *next;
|
||||
|
||||
list_for_each_entry_safe(enabler, next, &mm->enablers, link)
|
||||
list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link)
|
||||
user_event_enabler_destroy(enabler);
|
||||
|
||||
mmdrop(mm->mm);
|
||||
@ -630,7 +661,7 @@ void user_event_mm_remove(struct task_struct *t)
|
||||
|
||||
/* Remove the mm from the list, so it can no longer be enabled */
|
||||
spin_lock_irqsave(&user_event_mms_lock, flags);
|
||||
list_del_rcu(&mm->link);
|
||||
list_del_rcu(&mm->mms_link);
|
||||
spin_unlock_irqrestore(&user_event_mms_lock, flags);
|
||||
|
||||
/*
|
||||
@ -670,7 +701,7 @@ void user_event_mm_remove(struct task_struct *t)
|
||||
|
||||
void user_event_mm_dup(struct task_struct *t, struct user_event_mm *old_mm)
|
||||
{
|
||||
struct user_event_mm *mm = user_event_mm_create(t);
|
||||
struct user_event_mm *mm = user_event_mm_alloc(t);
|
||||
struct user_event_enabler *enabler;
|
||||
|
||||
if (!mm)
|
||||
@ -678,16 +709,18 @@ void user_event_mm_dup(struct task_struct *t, struct user_event_mm *old_mm)
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry_rcu(enabler, &old_mm->enablers, link)
|
||||
list_for_each_entry_rcu(enabler, &old_mm->enablers, mm_enablers_link) {
|
||||
if (!user_event_enabler_dup(enabler, mm))
|
||||
goto error;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
user_event_mm_attach(mm, t);
|
||||
return;
|
||||
error:
|
||||
rcu_read_unlock();
|
||||
user_event_mm_remove(t);
|
||||
user_event_mm_destroy(mm);
|
||||
}
|
||||
|
||||
static bool current_user_event_enabler_exists(unsigned long uaddr,
|
||||
@ -748,7 +781,7 @@ retry:
|
||||
*/
|
||||
if (!*write_result) {
|
||||
refcount_inc(&enabler->event->refcnt);
|
||||
list_add_rcu(&enabler->link, &user_mm->enablers);
|
||||
list_add_rcu(&enabler->mm_enablers_link, &user_mm->enablers);
|
||||
}
|
||||
|
||||
mutex_unlock(&event_mutex);
|
||||
@ -904,8 +937,8 @@ static void user_event_destroy_validators(struct user_event *user)
|
||||
struct user_event_validator *validator, *next;
|
||||
struct list_head *head = &user->validators;
|
||||
|
||||
list_for_each_entry_safe(validator, next, head, link) {
|
||||
list_del(&validator->link);
|
||||
list_for_each_entry_safe(validator, next, head, user_event_link) {
|
||||
list_del(&validator->user_event_link);
|
||||
kfree(validator);
|
||||
}
|
||||
}
|
||||
@ -959,7 +992,7 @@ add_validator:
|
||||
validator->offset = offset;
|
||||
|
||||
/* Want sequential access when validating */
|
||||
list_add_tail(&validator->link, &user->validators);
|
||||
list_add_tail(&validator->user_event_link, &user->validators);
|
||||
|
||||
add_field:
|
||||
field->type = type;
|
||||
@ -1349,7 +1382,7 @@ static int user_event_validate(struct user_event *user, void *data, int len)
|
||||
void *pos, *end = data + len;
|
||||
u32 loc, offset, size;
|
||||
|
||||
list_for_each_entry(validator, head, link) {
|
||||
list_for_each_entry(validator, head, user_event_link) {
|
||||
pos = data + validator->offset;
|
||||
|
||||
/* Already done min_size check, no bounds check here */
|
||||
@ -2270,9 +2303,9 @@ static long user_events_ioctl_unreg(unsigned long uarg)
|
||||
*/
|
||||
mutex_lock(&event_mutex);
|
||||
|
||||
list_for_each_entry_safe(enabler, next, &mm->enablers, link)
|
||||
list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link) {
|
||||
if (enabler->addr == reg.disable_addr &&
|
||||
(enabler->values & ENABLE_VAL_BIT_MASK) == reg.disable_bit) {
|
||||
ENABLE_BIT(enabler) == reg.disable_bit) {
|
||||
set_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler));
|
||||
|
||||
if (!test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)))
|
||||
@ -2281,6 +2314,7 @@ static long user_events_ioctl_unreg(unsigned long uarg)
|
||||
/* Removed at least one */
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&event_mutex);
|
||||
|
||||
|
@ -1652,6 +1652,8 @@ static enum hrtimer_restart timerlat_irq(struct hrtimer *timer)
|
||||
osnoise_stop_tracing();
|
||||
notify_new_max_latency(diff);
|
||||
|
||||
wake_up_process(tlat->kthread);
|
||||
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
}
|
||||
|
@ -848,6 +848,12 @@ trace_selftest_startup_function_graph(struct tracer *trace,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
|
||||
/*
|
||||
* These tests can take some time to run. Make sure on non PREEMPT
|
||||
* kernels, we do not trigger the softlockup detector.
|
||||
*/
|
||||
cond_resched();
|
||||
|
||||
tracing_reset_online_cpus(&tr->array_buffer);
|
||||
set_graph_array(tr);
|
||||
|
||||
@ -869,6 +875,8 @@ trace_selftest_startup_function_graph(struct tracer *trace,
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
cond_resched();
|
||||
|
||||
ret = register_ftrace_graph(&fgraph_ops);
|
||||
if (ret) {
|
||||
warn_failed_init_tracer(trace, ret);
|
||||
@ -891,6 +899,8 @@ trace_selftest_startup_function_graph(struct tracer *trace,
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
cond_resched();
|
||||
|
||||
tracing_start();
|
||||
|
||||
if (!ret && !count) {
|
||||
|
@ -27,6 +27,8 @@
|
||||
#include <linux/string.h>
|
||||
#include <linux/xarray.h>
|
||||
|
||||
#include "radix-tree.h"
|
||||
|
||||
/*
|
||||
* Radix tree node cache.
|
||||
*/
|
||||
|
8
lib/radix-tree.h
Normal file
8
lib/radix-tree.h
Normal file
@ -0,0 +1,8 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/* radix-tree helpers that are only shared with xarray */
|
||||
|
||||
struct kmem_cache;
|
||||
struct rcu_head;
|
||||
|
||||
extern struct kmem_cache *radix_tree_node_cachep;
|
||||
extern void radix_tree_node_rcu_free(struct rcu_head *head);
|
@ -369,7 +369,7 @@ vm_map_ram_test(void)
|
||||
int i;
|
||||
|
||||
map_nr_pages = nr_pages > 0 ? nr_pages:1;
|
||||
pages = kmalloc(map_nr_pages * sizeof(struct page), GFP_KERNEL);
|
||||
pages = kcalloc(map_nr_pages, sizeof(struct page *), GFP_KERNEL);
|
||||
if (!pages)
|
||||
return -1;
|
||||
|
||||
|
@ -12,6 +12,8 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/xarray.h>
|
||||
|
||||
#include "radix-tree.h"
|
||||
|
||||
/*
|
||||
* Coding conventions in this file:
|
||||
*
|
||||
@ -247,10 +249,6 @@ void *xas_load(struct xa_state *xas)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xas_load);
|
||||
|
||||
/* Move the radix tree node cache here */
|
||||
extern struct kmem_cache *radix_tree_node_cachep;
|
||||
extern void radix_tree_node_rcu_free(struct rcu_head *head);
|
||||
|
||||
#define XA_RCU_FREE ((struct xarray *)1)
|
||||
|
||||
static void xa_node_free(struct xa_node *node)
|
||||
|
@ -551,6 +551,8 @@ int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs)
|
||||
return -EINVAL;
|
||||
if (attrs->min_nr_regions > attrs->max_nr_regions)
|
||||
return -EINVAL;
|
||||
if (attrs->sample_interval > attrs->aggr_interval)
|
||||
return -EINVAL;
|
||||
|
||||
damon_update_monitoring_results(ctx, attrs);
|
||||
ctx->attrs = *attrs;
|
||||
|
26
mm/filemap.c
26
mm/filemap.c
@ -1728,7 +1728,9 @@ bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm,
|
||||
*
|
||||
* Return: The index of the gap if found, otherwise an index outside the
|
||||
* range specified (in which case 'return - index >= max_scan' will be true).
|
||||
* In the rare case of index wrap-around, 0 will be returned.
|
||||
* In the rare case of index wrap-around, 0 will be returned. 0 will also
|
||||
* be returned if index == 0 and there is a gap at the index. We can not
|
||||
* wrap-around if passed index == 0.
|
||||
*/
|
||||
pgoff_t page_cache_next_miss(struct address_space *mapping,
|
||||
pgoff_t index, unsigned long max_scan)
|
||||
@ -1738,12 +1740,13 @@ pgoff_t page_cache_next_miss(struct address_space *mapping,
|
||||
while (max_scan--) {
|
||||
void *entry = xas_next(&xas);
|
||||
if (!entry || xa_is_value(entry))
|
||||
break;
|
||||
if (xas.xa_index == 0)
|
||||
break;
|
||||
return xas.xa_index;
|
||||
if (xas.xa_index == 0 && index != 0)
|
||||
return xas.xa_index;
|
||||
}
|
||||
|
||||
return xas.xa_index;
|
||||
/* No gaps in range and no wrap-around, return index beyond range */
|
||||
return xas.xa_index + 1;
|
||||
}
|
||||
EXPORT_SYMBOL(page_cache_next_miss);
|
||||
|
||||
@ -1764,7 +1767,9 @@ EXPORT_SYMBOL(page_cache_next_miss);
|
||||
*
|
||||
* Return: The index of the gap if found, otherwise an index outside the
|
||||
* range specified (in which case 'index - return >= max_scan' will be true).
|
||||
* In the rare case of wrap-around, ULONG_MAX will be returned.
|
||||
* In the rare case of wrap-around, ULONG_MAX will be returned. ULONG_MAX
|
||||
* will also be returned if index == ULONG_MAX and there is a gap at the
|
||||
* index. We can not wrap-around if passed index == ULONG_MAX.
|
||||
*/
|
||||
pgoff_t page_cache_prev_miss(struct address_space *mapping,
|
||||
pgoff_t index, unsigned long max_scan)
|
||||
@ -1774,12 +1779,13 @@ pgoff_t page_cache_prev_miss(struct address_space *mapping,
|
||||
while (max_scan--) {
|
||||
void *entry = xas_prev(&xas);
|
||||
if (!entry || xa_is_value(entry))
|
||||
break;
|
||||
if (xas.xa_index == ULONG_MAX)
|
||||
break;
|
||||
return xas.xa_index;
|
||||
if (xas.xa_index == ULONG_MAX && index != ULONG_MAX)
|
||||
return xas.xa_index;
|
||||
}
|
||||
|
||||
return xas.xa_index;
|
||||
/* No gaps in range and no wrap-around, return index beyond range */
|
||||
return xas.xa_index - 1;
|
||||
}
|
||||
EXPORT_SYMBOL(page_cache_prev_miss);
|
||||
|
||||
|
@ -380,6 +380,7 @@ static int gup_test_release(struct inode *inode, struct file *file)
|
||||
static const struct file_operations gup_test_fops = {
|
||||
.open = nonseekable_open,
|
||||
.unlocked_ioctl = gup_test_ioctl,
|
||||
.compat_ioctl = compat_ptr_ioctl,
|
||||
.release = gup_test_release,
|
||||
};
|
||||
|
||||
|
@ -2089,7 +2089,6 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr,
|
||||
TTU_IGNORE_MLOCK | TTU_BATCH_FLUSH);
|
||||
|
||||
xas_lock_irq(&xas);
|
||||
xas_set(&xas, index);
|
||||
|
||||
VM_BUG_ON_PAGE(page != xas_load(&xas), page);
|
||||
|
||||
|
@ -371,12 +371,15 @@ SYSCALL_DEFINE2(memfd_create,
|
||||
|
||||
inode->i_mode &= ~0111;
|
||||
file_seals = memfd_file_seals_ptr(file);
|
||||
*file_seals &= ~F_SEAL_SEAL;
|
||||
*file_seals |= F_SEAL_EXEC;
|
||||
if (file_seals) {
|
||||
*file_seals &= ~F_SEAL_SEAL;
|
||||
*file_seals |= F_SEAL_EXEC;
|
||||
}
|
||||
} else if (flags & MFD_ALLOW_SEALING) {
|
||||
/* MFD_EXEC and MFD_ALLOW_SEALING are set */
|
||||
file_seals = memfd_file_seals_ptr(file);
|
||||
*file_seals &= ~F_SEAL_SEAL;
|
||||
if (file_seals)
|
||||
*file_seals &= ~F_SEAL_SEAL;
|
||||
}
|
||||
|
||||
fd_install(fd, file);
|
||||
|
@ -824,7 +824,7 @@ static int do_mprotect_pkey(unsigned long start, size_t len,
|
||||
}
|
||||
tlb_finish_mmu(&tlb);
|
||||
|
||||
if (!error && vma_iter_end(&vmi) < end)
|
||||
if (!error && tmp < end)
|
||||
error = -ENOMEM;
|
||||
|
||||
out:
|
||||
|
@ -5,12 +5,10 @@
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/shrinker.h>
|
||||
#include <linux/memcontrol.h>
|
||||
#include <linux/srcu.h>
|
||||
|
||||
/* defined in vmscan.c */
|
||||
extern struct mutex shrinker_mutex;
|
||||
extern struct rw_semaphore shrinker_rwsem;
|
||||
extern struct list_head shrinker_list;
|
||||
extern struct srcu_struct shrinker_srcu;
|
||||
|
||||
static DEFINE_IDA(shrinker_debugfs_ida);
|
||||
static struct dentry *shrinker_debugfs_root;
|
||||
@ -51,13 +49,18 @@ static int shrinker_debugfs_count_show(struct seq_file *m, void *v)
|
||||
struct mem_cgroup *memcg;
|
||||
unsigned long total;
|
||||
bool memcg_aware;
|
||||
int ret = 0, nid, srcu_idx;
|
||||
int ret, nid;
|
||||
|
||||
count_per_node = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
|
||||
if (!count_per_node)
|
||||
return -ENOMEM;
|
||||
|
||||
srcu_idx = srcu_read_lock(&shrinker_srcu);
|
||||
ret = down_read_killable(&shrinker_rwsem);
|
||||
if (ret) {
|
||||
kfree(count_per_node);
|
||||
return ret;
|
||||
}
|
||||
rcu_read_lock();
|
||||
|
||||
memcg_aware = shrinker->flags & SHRINKER_MEMCG_AWARE;
|
||||
|
||||
@ -88,7 +91,8 @@ static int shrinker_debugfs_count_show(struct seq_file *m, void *v)
|
||||
}
|
||||
} while ((memcg = mem_cgroup_iter(NULL, memcg, NULL)) != NULL);
|
||||
|
||||
srcu_read_unlock(&shrinker_srcu, srcu_idx);
|
||||
rcu_read_unlock();
|
||||
up_read(&shrinker_rwsem);
|
||||
|
||||
kfree(count_per_node);
|
||||
return ret;
|
||||
@ -111,8 +115,9 @@ static ssize_t shrinker_debugfs_scan_write(struct file *file,
|
||||
.gfp_mask = GFP_KERNEL,
|
||||
};
|
||||
struct mem_cgroup *memcg = NULL;
|
||||
int nid, srcu_idx;
|
||||
int nid;
|
||||
char kbuf[72];
|
||||
ssize_t ret;
|
||||
|
||||
read_len = size < (sizeof(kbuf) - 1) ? size : (sizeof(kbuf) - 1);
|
||||
if (copy_from_user(kbuf, buf, read_len))
|
||||
@ -141,7 +146,11 @@ static ssize_t shrinker_debugfs_scan_write(struct file *file,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
srcu_idx = srcu_read_lock(&shrinker_srcu);
|
||||
ret = down_read_killable(&shrinker_rwsem);
|
||||
if (ret) {
|
||||
mem_cgroup_put(memcg);
|
||||
return ret;
|
||||
}
|
||||
|
||||
sc.nid = nid;
|
||||
sc.memcg = memcg;
|
||||
@ -150,7 +159,7 @@ static ssize_t shrinker_debugfs_scan_write(struct file *file,
|
||||
|
||||
shrinker->scan_objects(shrinker, &sc);
|
||||
|
||||
srcu_read_unlock(&shrinker_srcu, srcu_idx);
|
||||
up_read(&shrinker_rwsem);
|
||||
mem_cgroup_put(memcg);
|
||||
|
||||
return size;
|
||||
@ -168,7 +177,7 @@ int shrinker_debugfs_add(struct shrinker *shrinker)
|
||||
char buf[128];
|
||||
int id;
|
||||
|
||||
lockdep_assert_held(&shrinker_mutex);
|
||||
lockdep_assert_held(&shrinker_rwsem);
|
||||
|
||||
/* debugfs isn't initialized yet, add debugfs entries later. */
|
||||
if (!shrinker_debugfs_root)
|
||||
@ -211,7 +220,7 @@ int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
|
||||
if (!new)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&shrinker_mutex);
|
||||
down_write(&shrinker_rwsem);
|
||||
|
||||
old = shrinker->name;
|
||||
shrinker->name = new;
|
||||
@ -229,7 +238,7 @@ int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
|
||||
shrinker->debugfs_entry = entry;
|
||||
}
|
||||
|
||||
mutex_unlock(&shrinker_mutex);
|
||||
up_write(&shrinker_rwsem);
|
||||
|
||||
kfree_const(old);
|
||||
|
||||
@ -242,7 +251,7 @@ struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
|
||||
{
|
||||
struct dentry *entry = shrinker->debugfs_entry;
|
||||
|
||||
lockdep_assert_held(&shrinker_mutex);
|
||||
lockdep_assert_held(&shrinker_rwsem);
|
||||
|
||||
kfree_const(shrinker->name);
|
||||
shrinker->name = NULL;
|
||||
@ -271,14 +280,14 @@ static int __init shrinker_debugfs_init(void)
|
||||
shrinker_debugfs_root = dentry;
|
||||
|
||||
/* Create debugfs entries for shrinkers registered at boot */
|
||||
mutex_lock(&shrinker_mutex);
|
||||
down_write(&shrinker_rwsem);
|
||||
list_for_each_entry(shrinker, &shrinker_list, list)
|
||||
if (!shrinker->debugfs_entry) {
|
||||
ret = shrinker_debugfs_add(shrinker);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&shrinker_mutex);
|
||||
up_write(&shrinker_rwsem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
17
mm/vmalloc.c
17
mm/vmalloc.c
@ -3148,11 +3148,20 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
|
||||
* allocation request, free them via vfree() if any.
|
||||
*/
|
||||
if (area->nr_pages != nr_small_pages) {
|
||||
/* vm_area_alloc_pages() can also fail due to a fatal signal */
|
||||
if (!fatal_signal_pending(current))
|
||||
/*
|
||||
* vm_area_alloc_pages() can fail due to insufficient memory but
|
||||
* also:-
|
||||
*
|
||||
* - a pending fatal signal
|
||||
* - insufficient huge page-order pages
|
||||
*
|
||||
* Since we always retry allocations at order-0 in the huge page
|
||||
* case a warning for either is spurious.
|
||||
*/
|
||||
if (!fatal_signal_pending(current) && page_order == 0)
|
||||
warn_alloc(gfp_mask, NULL,
|
||||
"vmalloc error: size %lu, page order %u, failed to allocate pages",
|
||||
area->nr_pages * PAGE_SIZE, page_order);
|
||||
"vmalloc error: size %lu, failed to allocate pages",
|
||||
area->nr_pages * PAGE_SIZE);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
138
mm/vmscan.c
138
mm/vmscan.c
@ -35,7 +35,7 @@
|
||||
#include <linux/cpuset.h>
|
||||
#include <linux/compaction.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/freezer.h>
|
||||
@ -57,7 +57,6 @@
|
||||
#include <linux/khugepaged.h>
|
||||
#include <linux/rculist_nulls.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/srcu.h>
|
||||
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/div64.h>
|
||||
@ -190,9 +189,7 @@ struct scan_control {
|
||||
int vm_swappiness = 60;
|
||||
|
||||
LIST_HEAD(shrinker_list);
|
||||
DEFINE_MUTEX(shrinker_mutex);
|
||||
DEFINE_SRCU(shrinker_srcu);
|
||||
static atomic_t shrinker_srcu_generation = ATOMIC_INIT(0);
|
||||
DECLARE_RWSEM(shrinker_rwsem);
|
||||
|
||||
#ifdef CONFIG_MEMCG
|
||||
static int shrinker_nr_max;
|
||||
@ -211,21 +208,8 @@ static inline int shrinker_defer_size(int nr_items)
|
||||
static struct shrinker_info *shrinker_info_protected(struct mem_cgroup *memcg,
|
||||
int nid)
|
||||
{
|
||||
return srcu_dereference_check(memcg->nodeinfo[nid]->shrinker_info,
|
||||
&shrinker_srcu,
|
||||
lockdep_is_held(&shrinker_mutex));
|
||||
}
|
||||
|
||||
static struct shrinker_info *shrinker_info_srcu(struct mem_cgroup *memcg,
|
||||
int nid)
|
||||
{
|
||||
return srcu_dereference(memcg->nodeinfo[nid]->shrinker_info,
|
||||
&shrinker_srcu);
|
||||
}
|
||||
|
||||
static void free_shrinker_info_rcu(struct rcu_head *head)
|
||||
{
|
||||
kvfree(container_of(head, struct shrinker_info, rcu));
|
||||
return rcu_dereference_protected(memcg->nodeinfo[nid]->shrinker_info,
|
||||
lockdep_is_held(&shrinker_rwsem));
|
||||
}
|
||||
|
||||
static int expand_one_shrinker_info(struct mem_cgroup *memcg,
|
||||
@ -266,7 +250,7 @@ static int expand_one_shrinker_info(struct mem_cgroup *memcg,
|
||||
defer_size - old_defer_size);
|
||||
|
||||
rcu_assign_pointer(pn->shrinker_info, new);
|
||||
call_srcu(&shrinker_srcu, &old->rcu, free_shrinker_info_rcu);
|
||||
kvfree_rcu(old, rcu);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -292,7 +276,7 @@ int alloc_shrinker_info(struct mem_cgroup *memcg)
|
||||
int nid, size, ret = 0;
|
||||
int map_size, defer_size = 0;
|
||||
|
||||
mutex_lock(&shrinker_mutex);
|
||||
down_write(&shrinker_rwsem);
|
||||
map_size = shrinker_map_size(shrinker_nr_max);
|
||||
defer_size = shrinker_defer_size(shrinker_nr_max);
|
||||
size = map_size + defer_size;
|
||||
@ -308,7 +292,7 @@ int alloc_shrinker_info(struct mem_cgroup *memcg)
|
||||
info->map_nr_max = shrinker_nr_max;
|
||||
rcu_assign_pointer(memcg->nodeinfo[nid]->shrinker_info, info);
|
||||
}
|
||||
mutex_unlock(&shrinker_mutex);
|
||||
up_write(&shrinker_rwsem);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -324,7 +308,7 @@ static int expand_shrinker_info(int new_id)
|
||||
if (!root_mem_cgroup)
|
||||
goto out;
|
||||
|
||||
lockdep_assert_held(&shrinker_mutex);
|
||||
lockdep_assert_held(&shrinker_rwsem);
|
||||
|
||||
map_size = shrinker_map_size(new_nr_max);
|
||||
defer_size = shrinker_defer_size(new_nr_max);
|
||||
@ -352,16 +336,15 @@ void set_shrinker_bit(struct mem_cgroup *memcg, int nid, int shrinker_id)
|
||||
{
|
||||
if (shrinker_id >= 0 && memcg && !mem_cgroup_is_root(memcg)) {
|
||||
struct shrinker_info *info;
|
||||
int srcu_idx;
|
||||
|
||||
srcu_idx = srcu_read_lock(&shrinker_srcu);
|
||||
info = shrinker_info_srcu(memcg, nid);
|
||||
rcu_read_lock();
|
||||
info = rcu_dereference(memcg->nodeinfo[nid]->shrinker_info);
|
||||
if (!WARN_ON_ONCE(shrinker_id >= info->map_nr_max)) {
|
||||
/* Pairs with smp mb in shrink_slab() */
|
||||
smp_mb__before_atomic();
|
||||
set_bit(shrinker_id, info->map);
|
||||
}
|
||||
srcu_read_unlock(&shrinker_srcu, srcu_idx);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@ -374,7 +357,8 @@ static int prealloc_memcg_shrinker(struct shrinker *shrinker)
|
||||
if (mem_cgroup_disabled())
|
||||
return -ENOSYS;
|
||||
|
||||
mutex_lock(&shrinker_mutex);
|
||||
down_write(&shrinker_rwsem);
|
||||
/* This may call shrinker, so it must use down_read_trylock() */
|
||||
id = idr_alloc(&shrinker_idr, shrinker, 0, 0, GFP_KERNEL);
|
||||
if (id < 0)
|
||||
goto unlock;
|
||||
@ -388,7 +372,7 @@ static int prealloc_memcg_shrinker(struct shrinker *shrinker)
|
||||
shrinker->id = id;
|
||||
ret = 0;
|
||||
unlock:
|
||||
mutex_unlock(&shrinker_mutex);
|
||||
up_write(&shrinker_rwsem);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -398,7 +382,7 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
|
||||
|
||||
BUG_ON(id < 0);
|
||||
|
||||
lockdep_assert_held(&shrinker_mutex);
|
||||
lockdep_assert_held(&shrinker_rwsem);
|
||||
|
||||
idr_remove(&shrinker_idr, id);
|
||||
}
|
||||
@ -408,7 +392,7 @@ static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker,
|
||||
{
|
||||
struct shrinker_info *info;
|
||||
|
||||
info = shrinker_info_srcu(memcg, nid);
|
||||
info = shrinker_info_protected(memcg, nid);
|
||||
return atomic_long_xchg(&info->nr_deferred[shrinker->id], 0);
|
||||
}
|
||||
|
||||
@ -417,7 +401,7 @@ static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
|
||||
{
|
||||
struct shrinker_info *info;
|
||||
|
||||
info = shrinker_info_srcu(memcg, nid);
|
||||
info = shrinker_info_protected(memcg, nid);
|
||||
return atomic_long_add_return(nr, &info->nr_deferred[shrinker->id]);
|
||||
}
|
||||
|
||||
@ -433,7 +417,7 @@ void reparent_shrinker_deferred(struct mem_cgroup *memcg)
|
||||
parent = root_mem_cgroup;
|
||||
|
||||
/* Prevent from concurrent shrinker_info expand */
|
||||
mutex_lock(&shrinker_mutex);
|
||||
down_read(&shrinker_rwsem);
|
||||
for_each_node(nid) {
|
||||
child_info = shrinker_info_protected(memcg, nid);
|
||||
parent_info = shrinker_info_protected(parent, nid);
|
||||
@ -442,7 +426,7 @@ void reparent_shrinker_deferred(struct mem_cgroup *memcg)
|
||||
atomic_long_add(nr, &parent_info->nr_deferred[i]);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&shrinker_mutex);
|
||||
up_read(&shrinker_rwsem);
|
||||
}
|
||||
|
||||
static bool cgroup_reclaim(struct scan_control *sc)
|
||||
@ -743,9 +727,9 @@ void free_prealloced_shrinker(struct shrinker *shrinker)
|
||||
shrinker->name = NULL;
|
||||
#endif
|
||||
if (shrinker->flags & SHRINKER_MEMCG_AWARE) {
|
||||
mutex_lock(&shrinker_mutex);
|
||||
down_write(&shrinker_rwsem);
|
||||
unregister_memcg_shrinker(shrinker);
|
||||
mutex_unlock(&shrinker_mutex);
|
||||
up_write(&shrinker_rwsem);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -755,11 +739,11 @@ void free_prealloced_shrinker(struct shrinker *shrinker)
|
||||
|
||||
void register_shrinker_prepared(struct shrinker *shrinker)
|
||||
{
|
||||
mutex_lock(&shrinker_mutex);
|
||||
list_add_tail_rcu(&shrinker->list, &shrinker_list);
|
||||
down_write(&shrinker_rwsem);
|
||||
list_add_tail(&shrinker->list, &shrinker_list);
|
||||
shrinker->flags |= SHRINKER_REGISTERED;
|
||||
shrinker_debugfs_add(shrinker);
|
||||
mutex_unlock(&shrinker_mutex);
|
||||
up_write(&shrinker_rwsem);
|
||||
}
|
||||
|
||||
static int __register_shrinker(struct shrinker *shrinker)
|
||||
@ -810,16 +794,13 @@ void unregister_shrinker(struct shrinker *shrinker)
|
||||
if (!(shrinker->flags & SHRINKER_REGISTERED))
|
||||
return;
|
||||
|
||||
mutex_lock(&shrinker_mutex);
|
||||
list_del_rcu(&shrinker->list);
|
||||
down_write(&shrinker_rwsem);
|
||||
list_del(&shrinker->list);
|
||||
shrinker->flags &= ~SHRINKER_REGISTERED;
|
||||
if (shrinker->flags & SHRINKER_MEMCG_AWARE)
|
||||
unregister_memcg_shrinker(shrinker);
|
||||
debugfs_entry = shrinker_debugfs_detach(shrinker, &debugfs_id);
|
||||
mutex_unlock(&shrinker_mutex);
|
||||
|
||||
atomic_inc(&shrinker_srcu_generation);
|
||||
synchronize_srcu(&shrinker_srcu);
|
||||
up_write(&shrinker_rwsem);
|
||||
|
||||
shrinker_debugfs_remove(debugfs_entry, debugfs_id);
|
||||
|
||||
@ -831,13 +812,15 @@ EXPORT_SYMBOL(unregister_shrinker);
|
||||
/**
|
||||
* synchronize_shrinkers - Wait for all running shrinkers to complete.
|
||||
*
|
||||
* This is useful to guarantee that all shrinker invocations have seen an
|
||||
* update, before freeing memory.
|
||||
* This is equivalent to calling unregister_shrink() and register_shrinker(),
|
||||
* but atomically and with less overhead. This is useful to guarantee that all
|
||||
* shrinker invocations have seen an update, before freeing memory, similar to
|
||||
* rcu.
|
||||
*/
|
||||
void synchronize_shrinkers(void)
|
||||
{
|
||||
atomic_inc(&shrinker_srcu_generation);
|
||||
synchronize_srcu(&shrinker_srcu);
|
||||
down_write(&shrinker_rwsem);
|
||||
up_write(&shrinker_rwsem);
|
||||
}
|
||||
EXPORT_SYMBOL(synchronize_shrinkers);
|
||||
|
||||
@ -946,20 +929,19 @@ static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid,
|
||||
{
|
||||
struct shrinker_info *info;
|
||||
unsigned long ret, freed = 0;
|
||||
int srcu_idx, generation;
|
||||
int i = 0;
|
||||
int i;
|
||||
|
||||
if (!mem_cgroup_online(memcg))
|
||||
return 0;
|
||||
|
||||
again:
|
||||
srcu_idx = srcu_read_lock(&shrinker_srcu);
|
||||
info = shrinker_info_srcu(memcg, nid);
|
||||
if (!down_read_trylock(&shrinker_rwsem))
|
||||
return 0;
|
||||
|
||||
info = shrinker_info_protected(memcg, nid);
|
||||
if (unlikely(!info))
|
||||
goto unlock;
|
||||
|
||||
generation = atomic_read(&shrinker_srcu_generation);
|
||||
for_each_set_bit_from(i, info->map, info->map_nr_max) {
|
||||
for_each_set_bit(i, info->map, info->map_nr_max) {
|
||||
struct shrink_control sc = {
|
||||
.gfp_mask = gfp_mask,
|
||||
.nid = nid,
|
||||
@ -1005,14 +987,14 @@ again:
|
||||
set_shrinker_bit(memcg, nid, i);
|
||||
}
|
||||
freed += ret;
|
||||
if (atomic_read(&shrinker_srcu_generation) != generation) {
|
||||
srcu_read_unlock(&shrinker_srcu, srcu_idx);
|
||||
i++;
|
||||
goto again;
|
||||
|
||||
if (rwsem_is_contended(&shrinker_rwsem)) {
|
||||
freed = freed ? : 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
unlock:
|
||||
srcu_read_unlock(&shrinker_srcu, srcu_idx);
|
||||
up_read(&shrinker_rwsem);
|
||||
return freed;
|
||||
}
|
||||
#else /* CONFIG_MEMCG */
|
||||
@ -1049,7 +1031,6 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
|
||||
{
|
||||
unsigned long ret, freed = 0;
|
||||
struct shrinker *shrinker;
|
||||
int srcu_idx, generation;
|
||||
|
||||
/*
|
||||
* The root memcg might be allocated even though memcg is disabled
|
||||
@ -1061,11 +1042,10 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
|
||||
if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg))
|
||||
return shrink_slab_memcg(gfp_mask, nid, memcg, priority);
|
||||
|
||||
srcu_idx = srcu_read_lock(&shrinker_srcu);
|
||||
if (!down_read_trylock(&shrinker_rwsem))
|
||||
goto out;
|
||||
|
||||
generation = atomic_read(&shrinker_srcu_generation);
|
||||
list_for_each_entry_srcu(shrinker, &shrinker_list, list,
|
||||
srcu_read_lock_held(&shrinker_srcu)) {
|
||||
list_for_each_entry(shrinker, &shrinker_list, list) {
|
||||
struct shrink_control sc = {
|
||||
.gfp_mask = gfp_mask,
|
||||
.nid = nid,
|
||||
@ -1076,14 +1056,19 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid,
|
||||
if (ret == SHRINK_EMPTY)
|
||||
ret = 0;
|
||||
freed += ret;
|
||||
|
||||
if (atomic_read(&shrinker_srcu_generation) != generation) {
|
||||
/*
|
||||
* Bail out if someone want to register a new shrinker to
|
||||
* prevent the registration from being stalled for long periods
|
||||
* by parallel ongoing shrinking.
|
||||
*/
|
||||
if (rwsem_is_contended(&shrinker_rwsem)) {
|
||||
freed = freed ? : 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
srcu_read_unlock(&shrinker_srcu, srcu_idx);
|
||||
up_read(&shrinker_rwsem);
|
||||
out:
|
||||
cond_resched();
|
||||
return freed;
|
||||
}
|
||||
@ -4774,10 +4759,11 @@ static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op)
|
||||
{
|
||||
int seg;
|
||||
int old, new;
|
||||
unsigned long flags;
|
||||
int bin = get_random_u32_below(MEMCG_NR_BINS);
|
||||
struct pglist_data *pgdat = lruvec_pgdat(lruvec);
|
||||
|
||||
spin_lock(&pgdat->memcg_lru.lock);
|
||||
spin_lock_irqsave(&pgdat->memcg_lru.lock, flags);
|
||||
|
||||
VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list));
|
||||
|
||||
@ -4812,7 +4798,7 @@ static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op)
|
||||
if (!pgdat->memcg_lru.nr_memcgs[old] && old == get_memcg_gen(pgdat->memcg_lru.seq))
|
||||
WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1);
|
||||
|
||||
spin_unlock(&pgdat->memcg_lru.lock);
|
||||
spin_unlock_irqrestore(&pgdat->memcg_lru.lock, flags);
|
||||
}
|
||||
|
||||
void lru_gen_online_memcg(struct mem_cgroup *memcg)
|
||||
@ -4825,7 +4811,7 @@ void lru_gen_online_memcg(struct mem_cgroup *memcg)
|
||||
struct pglist_data *pgdat = NODE_DATA(nid);
|
||||
struct lruvec *lruvec = get_lruvec(memcg, nid);
|
||||
|
||||
spin_lock(&pgdat->memcg_lru.lock);
|
||||
spin_lock_irq(&pgdat->memcg_lru.lock);
|
||||
|
||||
VM_WARN_ON_ONCE(!hlist_nulls_unhashed(&lruvec->lrugen.list));
|
||||
|
||||
@ -4836,7 +4822,7 @@ void lru_gen_online_memcg(struct mem_cgroup *memcg)
|
||||
|
||||
lruvec->lrugen.gen = gen;
|
||||
|
||||
spin_unlock(&pgdat->memcg_lru.lock);
|
||||
spin_unlock_irq(&pgdat->memcg_lru.lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -4860,7 +4846,7 @@ void lru_gen_release_memcg(struct mem_cgroup *memcg)
|
||||
struct pglist_data *pgdat = NODE_DATA(nid);
|
||||
struct lruvec *lruvec = get_lruvec(memcg, nid);
|
||||
|
||||
spin_lock(&pgdat->memcg_lru.lock);
|
||||
spin_lock_irq(&pgdat->memcg_lru.lock);
|
||||
|
||||
VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list));
|
||||
|
||||
@ -4872,7 +4858,7 @@ void lru_gen_release_memcg(struct mem_cgroup *memcg)
|
||||
if (!pgdat->memcg_lru.nr_memcgs[gen] && gen == get_memcg_gen(pgdat->memcg_lru.seq))
|
||||
WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1);
|
||||
|
||||
spin_unlock(&pgdat->memcg_lru.lock);
|
||||
spin_unlock_irq(&pgdat->memcg_lru.lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
11
mm/zswap.c
11
mm/zswap.c
@ -1229,9 +1229,16 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
|
||||
goto reject;
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX: zswap reclaim does not work with cgroups yet. Without a
|
||||
* cgroup-aware entry LRU, we will push out entries system-wide based on
|
||||
* local cgroup limits.
|
||||
*/
|
||||
objcg = get_obj_cgroup_from_page(page);
|
||||
if (objcg && !obj_cgroup_may_zswap(objcg))
|
||||
goto shrink;
|
||||
if (objcg && !obj_cgroup_may_zswap(objcg)) {
|
||||
ret = -ENOMEM;
|
||||
goto reject;
|
||||
}
|
||||
|
||||
/* reclaim space if needed */
|
||||
if (zswap_is_full()) {
|
||||
|
@ -48,12 +48,12 @@ if IS_BUILTIN(CONFIG_COMMON_CLK):
|
||||
LX_GDBPARSED(CLK_GET_RATE_NOCACHE)
|
||||
|
||||
/* linux/fs.h */
|
||||
LX_VALUE(SB_RDONLY)
|
||||
LX_VALUE(SB_SYNCHRONOUS)
|
||||
LX_VALUE(SB_MANDLOCK)
|
||||
LX_VALUE(SB_DIRSYNC)
|
||||
LX_VALUE(SB_NOATIME)
|
||||
LX_VALUE(SB_NODIRATIME)
|
||||
LX_GDBPARSED(SB_RDONLY)
|
||||
LX_GDBPARSED(SB_SYNCHRONOUS)
|
||||
LX_GDBPARSED(SB_MANDLOCK)
|
||||
LX_GDBPARSED(SB_DIRSYNC)
|
||||
LX_GDBPARSED(SB_NOATIME)
|
||||
LX_GDBPARSED(SB_NODIRATIME)
|
||||
|
||||
/* linux/htimer.h */
|
||||
LX_GDBPARSED(hrtimer_resolution)
|
||||
|
@ -63,11 +63,11 @@ fi
|
||||
|
||||
# Extract GFP flags from the kernel source
|
||||
TMPFILE=`mktemp -t gfptranslate-XXXXXX` || exit 1
|
||||
grep -q ___GFP $SOURCE/include/linux/gfp.h
|
||||
grep -q ___GFP $SOURCE/include/linux/gfp_types.h
|
||||
if [ $? -eq 0 ]; then
|
||||
grep "^#define ___GFP" $SOURCE/include/linux/gfp.h | sed -e 's/u$//' | grep -v GFP_BITS > $TMPFILE
|
||||
grep "^#define ___GFP" $SOURCE/include/linux/gfp_types.h | sed -e 's/u$//' | grep -v GFP_BITS > $TMPFILE
|
||||
else
|
||||
grep "^#define __GFP" $SOURCE/include/linux/gfp.h | sed -e 's/(__force gfp_t)//' | sed -e 's/u)/)/' | grep -v GFP_BITS | sed -e 's/)\//) \//' > $TMPFILE
|
||||
grep "^#define __GFP" $SOURCE/include/linux/gfp_types.h | sed -e 's/(__force gfp_t)//' | sed -e 's/u)/)/' | grep -v GFP_BITS | sed -e 's/)\//) \//' > $TMPFILE
|
||||
fi
|
||||
|
||||
# Parse the flags
|
||||
|
@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
CFLAGS += -I. -I../../include -g -Og -Wall -D_LGPL_SOURCE -fsanitize=address \
|
||||
-fsanitize=undefined
|
||||
CFLAGS += -I. -I../../include -I../../../lib -g -Og -Wall \
|
||||
-D_LGPL_SOURCE -fsanitize=address -fsanitize=undefined
|
||||
LDFLAGS += -fsanitize=address -fsanitize=undefined
|
||||
LDLIBS+= -lpthread -lurcu
|
||||
TARGETS = main idr-test multiorder xarray maple
|
||||
@ -49,6 +49,7 @@ $(OFILES): Makefile *.h */*.h generated/map-shift.h generated/bit-length.h \
|
||||
../../../include/linux/xarray.h \
|
||||
../../../include/linux/maple_tree.h \
|
||||
../../../include/linux/radix-tree.h \
|
||||
../../../lib/radix-tree.h \
|
||||
../../../include/linux/idr.h
|
||||
|
||||
radix-tree.c: ../../../lib/radix-tree.c
|
||||
|
@ -0,0 +1,24 @@
|
||||
#!/bin/sh
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
# description: event trigger - test inter-event histogram trigger trace action with dynamic string param (legacy stack)
|
||||
# requires: set_event synthetic_events events/sched/sched_process_exec/hist "long[] stack' >> synthetic_events":README
|
||||
|
||||
fail() { #msg
|
||||
echo $1
|
||||
exit_fail
|
||||
}
|
||||
|
||||
echo "Test create synthetic event with stack"
|
||||
|
||||
# Test the old stacktrace keyword (for backward compatibility)
|
||||
echo 's:wake_lat pid_t pid; u64 delta; unsigned long[] stack;' > dynamic_events
|
||||
echo 'hist:keys=next_pid:ts=common_timestamp.usecs,st=stacktrace if prev_state == 1||prev_state == 2' >> events/sched/sched_switch/trigger
|
||||
echo 'hist:keys=prev_pid:delta=common_timestamp.usecs-$ts,s=$st:onmax($delta).trace(wake_lat,prev_pid,$delta,$s)' >> events/sched/sched_switch/trigger
|
||||
echo 1 > events/synthetic/wake_lat/enable
|
||||
sleep 1
|
||||
|
||||
if ! grep -q "=>.*sched" trace; then
|
||||
fail "Failed to create synthetic event with stack"
|
||||
fi
|
||||
|
||||
exit 0
|
@ -1,7 +1,7 @@
|
||||
#!/bin/sh
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
# description: event trigger - test inter-event histogram trigger trace action with dynamic string param
|
||||
# requires: set_event synthetic_events events/sched/sched_process_exec/hist "long[]' >> synthetic_events":README
|
||||
# requires: set_event synthetic_events events/sched/sched_process_exec/hist "can be any field, or the special string 'common_stacktrace'":README
|
||||
|
||||
fail() { #msg
|
||||
echo $1
|
||||
@ -10,9 +10,8 @@ fail() { #msg
|
||||
|
||||
echo "Test create synthetic event with stack"
|
||||
|
||||
|
||||
echo 's:wake_lat pid_t pid; u64 delta; unsigned long[] stack;' > dynamic_events
|
||||
echo 'hist:keys=next_pid:ts=common_timestamp.usecs,st=stacktrace if prev_state == 1||prev_state == 2' >> events/sched/sched_switch/trigger
|
||||
echo 'hist:keys=next_pid:ts=common_timestamp.usecs,st=common_stacktrace if prev_state == 1||prev_state == 2' >> events/sched/sched_switch/trigger
|
||||
echo 'hist:keys=prev_pid:delta=common_timestamp.usecs-$ts,s=$st:onmax($delta).trace(wake_lat,prev_pid,$delta,$s)' >> events/sched/sched_switch/trigger
|
||||
echo 1 > events/synthetic/wake_lat/enable
|
||||
sleep 1
|
||||
|
@ -5,12 +5,15 @@ LOCAL_HDRS += $(selfdir)/mm/local_config.h $(top_srcdir)/mm/gup_test.h
|
||||
|
||||
include local_config.mk
|
||||
|
||||
ifeq ($(ARCH),)
|
||||
|
||||
ifeq ($(CROSS_COMPILE),)
|
||||
uname_M := $(shell uname -m 2>/dev/null || echo not)
|
||||
else
|
||||
uname_M := $(shell echo $(CROSS_COMPILE) | grep -o '^[a-z0-9]\+')
|
||||
endif
|
||||
MACHINE ?= $(shell echo $(uname_M) | sed -e 's/aarch64.*/arm64/' -e 's/ppc64.*/ppc64/')
|
||||
ARCH ?= $(shell echo $(uname_M) | sed -e 's/aarch64.*/arm64/' -e 's/ppc64.*/ppc64/')
|
||||
endif
|
||||
|
||||
# Without this, failed build products remain, with up-to-date timestamps,
|
||||
# thus tricking Make (and you!) into believing that All Is Well, in subsequent
|
||||
@ -66,7 +69,7 @@ TEST_GEN_PROGS += ksm_tests
|
||||
TEST_GEN_PROGS += ksm_functional_tests
|
||||
TEST_GEN_PROGS += mdwe_test
|
||||
|
||||
ifeq ($(MACHINE),x86_64)
|
||||
ifeq ($(ARCH),x86_64)
|
||||
CAN_BUILD_I386 := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_32bit_program.c -m32)
|
||||
CAN_BUILD_X86_64 := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_64bit_program.c)
|
||||
CAN_BUILD_WITH_NOPIE := $(shell ./../x86/check_cc.sh "$(CC)" ../x86/trivial_program.c -no-pie)
|
||||
@ -88,13 +91,13 @@ TEST_GEN_PROGS += $(BINARIES_64)
|
||||
endif
|
||||
else
|
||||
|
||||
ifneq (,$(findstring $(MACHINE),ppc64))
|
||||
ifneq (,$(findstring $(ARCH),ppc64))
|
||||
TEST_GEN_PROGS += protection_keys
|
||||
endif
|
||||
|
||||
endif
|
||||
|
||||
ifneq (,$(filter $(MACHINE),arm64 ia64 mips64 parisc64 ppc64 riscv64 s390x sparc64 x86_64))
|
||||
ifneq (,$(filter $(ARCH),arm64 ia64 mips64 parisc64 ppc64 riscv64 s390x sparc64 x86_64))
|
||||
TEST_GEN_PROGS += va_high_addr_switch
|
||||
TEST_GEN_PROGS += virtual_address_range
|
||||
TEST_GEN_PROGS += write_to_hugetlbfs
|
||||
@ -113,7 +116,7 @@ $(TEST_GEN_PROGS): vm_util.c
|
||||
$(OUTPUT)/uffd-stress: uffd-common.c
|
||||
$(OUTPUT)/uffd-unit-tests: uffd-common.c
|
||||
|
||||
ifeq ($(MACHINE),x86_64)
|
||||
ifeq ($(ARCH),x86_64)
|
||||
BINARIES_32 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_32))
|
||||
BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64))
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user