ring-buffer: Align meta-page to sub-buffers for improved TLB usage
Previously, the mapped ring-buffer layout caused misalignment between the meta-page and sub-buffers when the sub-buffer size was not a multiple of PAGE_SIZE. This prevented hardware with larger TLB entries from utilizing them effectively. Add a padding with the zero-page between the meta-page and sub-buffers. Also update the ring-buffer map_test to verify that padding. Link: https://lore.kernel.org/20240628104611.1443542-1-vdonnefort@google.com Signed-off-by: Vincent Donnefort <vdonnefort@google.com> Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
This commit is contained in:
parent
d0f2d6e951
commit
eb2dcde9f9
@ -6852,10 +6852,10 @@ static void rb_setup_ids_meta_page(struct ring_buffer_per_cpu *cpu_buffer,
|
||||
/* install subbuf ID to kern VA translation */
|
||||
cpu_buffer->subbuf_ids = subbuf_ids;
|
||||
|
||||
meta->meta_page_size = PAGE_SIZE;
|
||||
meta->meta_struct_len = sizeof(*meta);
|
||||
meta->nr_subbufs = nr_subbufs;
|
||||
meta->subbuf_size = cpu_buffer->buffer->subbuf_size + BUF_PAGE_HDR_SIZE;
|
||||
meta->meta_page_size = meta->subbuf_size;
|
||||
|
||||
rb_update_meta_page(cpu_buffer);
|
||||
}
|
||||
@ -6949,6 +6949,12 @@ static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
|
||||
!(vma->vm_flags & VM_MAYSHARE))
|
||||
return -EPERM;
|
||||
|
||||
subbuf_order = cpu_buffer->buffer->subbuf_order;
|
||||
subbuf_pages = 1 << subbuf_order;
|
||||
|
||||
if (subbuf_order && pgoff % subbuf_pages)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Make sure the mapping cannot become writable later. Also tell the VM
|
||||
* to not touch these pages (VM_DONTCOPY | VM_DONTEXPAND).
|
||||
@ -6958,11 +6964,8 @@ static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
|
||||
|
||||
lockdep_assert_held(&cpu_buffer->mapping_lock);
|
||||
|
||||
subbuf_order = cpu_buffer->buffer->subbuf_order;
|
||||
subbuf_pages = 1 << subbuf_order;
|
||||
|
||||
nr_subbufs = cpu_buffer->nr_pages + 1; /* + reader-subbuf */
|
||||
nr_pages = ((nr_subbufs) << subbuf_order) - pgoff + 1; /* + meta-page */
|
||||
nr_pages = ((nr_subbufs + 1) << subbuf_order) - pgoff; /* + meta-page */
|
||||
|
||||
nr_vma_pages = vma_pages(vma);
|
||||
if (!nr_vma_pages || nr_vma_pages > nr_pages)
|
||||
@ -6975,20 +6978,24 @@ static int __rb_map_vma(struct ring_buffer_per_cpu *cpu_buffer,
|
||||
return -ENOMEM;
|
||||
|
||||
if (!pgoff) {
|
||||
unsigned long meta_page_padding;
|
||||
|
||||
pages[p++] = virt_to_page(cpu_buffer->meta_page);
|
||||
|
||||
/*
|
||||
* TODO: Align sub-buffers on their size, once
|
||||
* vm_insert_pages() supports the zero-page.
|
||||
* Pad with the zero-page to align the meta-page with the
|
||||
* sub-buffers.
|
||||
*/
|
||||
meta_page_padding = subbuf_pages - 1;
|
||||
while (meta_page_padding-- && p < nr_pages) {
|
||||
unsigned long __maybe_unused zero_addr =
|
||||
vma->vm_start + (PAGE_SIZE * p);
|
||||
|
||||
pages[p++] = ZERO_PAGE(zero_addr);
|
||||
}
|
||||
} else {
|
||||
/* Skip the meta-page */
|
||||
pgoff--;
|
||||
|
||||
if (pgoff % subbuf_pages) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
pgoff -= subbuf_pages;
|
||||
|
||||
s += pgoff / subbuf_pages;
|
||||
}
|
||||
|
@ -228,6 +228,20 @@ TEST_F(map, data_mmap)
|
||||
data = mmap(NULL, data_len, PROT_READ, MAP_SHARED,
|
||||
desc->cpu_fd, meta_len);
|
||||
ASSERT_EQ(data, MAP_FAILED);
|
||||
|
||||
/* Verify meta-page padding */
|
||||
if (desc->meta->meta_page_size > getpagesize()) {
|
||||
void *addr;
|
||||
|
||||
data_len = desc->meta->meta_page_size;
|
||||
data = mmap(NULL, data_len,
|
||||
PROT_READ, MAP_SHARED, desc->cpu_fd, 0);
|
||||
ASSERT_NE(data, MAP_FAILED);
|
||||
|
||||
addr = (void *)((unsigned long)data + getpagesize());
|
||||
ASSERT_EQ(*((int *)addr), 0);
|
||||
munmap(data, data_len);
|
||||
}
|
||||
}
|
||||
|
||||
FIXTURE(snapshot) {
|
||||
|
Loading…
Reference in New Issue
Block a user