1

drm/panthor: Fix firmware initialization on systems with a page size > 4k

The system and GPU MMU page size might differ, which becomes a
problem for FW sections that need to be mapped at explicit addresses
since our PAGE_SIZE alignment might cover a VA range that's
expected to be used for another section.

Make sure we never map more than we need.

Changes in v3:
- Add R-bs

Changes in v2:
- Plan for per-VM page sizes so the MCU VM and user VM can
  have different pages sizes

Fixes: 2718d91816 ("drm/panthor: Add the FW logical block")
Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Reviewed-by: Steven Price <steven.price@arm.com>
Reviewed-by: Liviu Dudau <liviu.dudau@arm.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20241030150231.768949-1-boris.brezillon@collabora.com
This commit is contained in:
Boris Brezillon 2024-10-30 16:02:31 +01:00
parent 72f7e16ecc
commit 5d01b56f05
4 changed files with 24 additions and 8 deletions

View File

@ -487,6 +487,7 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
struct panthor_fw_binary_iter *iter, struct panthor_fw_binary_iter *iter,
u32 ehdr) u32 ehdr)
{ {
ssize_t vm_pgsz = panthor_vm_page_size(ptdev->fw->vm);
struct panthor_fw_binary_section_entry_hdr hdr; struct panthor_fw_binary_section_entry_hdr hdr;
struct panthor_fw_section *section; struct panthor_fw_section *section;
u32 section_size; u32 section_size;
@ -515,8 +516,7 @@ static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
return -EINVAL; return -EINVAL;
} }
if ((hdr.va.start & ~PAGE_MASK) != 0 || if (!IS_ALIGNED(hdr.va.start, vm_pgsz) || !IS_ALIGNED(hdr.va.end, vm_pgsz)) {
(hdr.va.end & ~PAGE_MASK) != 0) {
drm_err(&ptdev->base, "Firmware corrupted, virtual addresses not page aligned: 0x%x-0x%x\n", drm_err(&ptdev->base, "Firmware corrupted, virtual addresses not page aligned: 0x%x-0x%x\n",
hdr.va.start, hdr.va.end); hdr.va.start, hdr.va.end);
return -EINVAL; return -EINVAL;

View File

@ -44,8 +44,7 @@ void panthor_kernel_bo_destroy(struct panthor_kernel_bo *bo)
to_panthor_bo(bo->obj)->exclusive_vm_root_gem != panthor_vm_root_gem(vm))) to_panthor_bo(bo->obj)->exclusive_vm_root_gem != panthor_vm_root_gem(vm)))
goto out_free_bo; goto out_free_bo;
ret = panthor_vm_unmap_range(vm, bo->va_node.start, ret = panthor_vm_unmap_range(vm, bo->va_node.start, bo->va_node.size);
panthor_kernel_bo_size(bo));
if (ret) if (ret)
goto out_free_bo; goto out_free_bo;
@ -95,10 +94,16 @@ panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm,
} }
bo = to_panthor_bo(&obj->base); bo = to_panthor_bo(&obj->base);
size = obj->base.size;
kbo->obj = &obj->base; kbo->obj = &obj->base;
bo->flags = bo_flags; bo->flags = bo_flags;
/* The system and GPU MMU page size might differ, which becomes a
* problem for FW sections that need to be mapped at explicit address
* since our PAGE_SIZE alignment might cover a VA range that's
* expected to be used for another section.
* Make sure we never map more than we need.
*/
size = ALIGN(size, panthor_vm_page_size(vm));
ret = panthor_vm_alloc_va(vm, gpu_va, size, &kbo->va_node); ret = panthor_vm_alloc_va(vm, gpu_va, size, &kbo->va_node);
if (ret) if (ret)
goto err_put_obj; goto err_put_obj;

View File

@ -826,6 +826,14 @@ void panthor_vm_idle(struct panthor_vm *vm)
mutex_unlock(&ptdev->mmu->as.slots_lock); mutex_unlock(&ptdev->mmu->as.slots_lock);
} }
u32 panthor_vm_page_size(struct panthor_vm *vm)
{
const struct io_pgtable *pgt = io_pgtable_ops_to_pgtable(vm->pgtbl_ops);
u32 pg_shift = ffs(pgt->cfg.pgsize_bitmap) - 1;
return 1u << pg_shift;
}
static void panthor_vm_stop(struct panthor_vm *vm) static void panthor_vm_stop(struct panthor_vm *vm)
{ {
drm_sched_stop(&vm->sched, NULL); drm_sched_stop(&vm->sched, NULL);
@ -1025,12 +1033,13 @@ int
panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size, panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size,
struct drm_mm_node *va_node) struct drm_mm_node *va_node)
{ {
ssize_t vm_pgsz = panthor_vm_page_size(vm);
int ret; int ret;
if (!size || (size & ~PAGE_MASK)) if (!size || !IS_ALIGNED(size, vm_pgsz))
return -EINVAL; return -EINVAL;
if (va != PANTHOR_VM_KERNEL_AUTO_VA && (va & ~PAGE_MASK)) if (va != PANTHOR_VM_KERNEL_AUTO_VA && !IS_ALIGNED(va, vm_pgsz))
return -EINVAL; return -EINVAL;
mutex_lock(&vm->mm_lock); mutex_lock(&vm->mm_lock);
@ -2366,11 +2375,12 @@ panthor_vm_bind_prepare_op_ctx(struct drm_file *file,
const struct drm_panthor_vm_bind_op *op, const struct drm_panthor_vm_bind_op *op,
struct panthor_vm_op_ctx *op_ctx) struct panthor_vm_op_ctx *op_ctx)
{ {
ssize_t vm_pgsz = panthor_vm_page_size(vm);
struct drm_gem_object *gem; struct drm_gem_object *gem;
int ret; int ret;
/* Aligned on page size. */ /* Aligned on page size. */
if ((op->va | op->size) & ~PAGE_MASK) if (!IS_ALIGNED(op->va | op->size, vm_pgsz))
return -EINVAL; return -EINVAL;
switch (op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) { switch (op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) {

View File

@ -30,6 +30,7 @@ panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset);
int panthor_vm_active(struct panthor_vm *vm); int panthor_vm_active(struct panthor_vm *vm);
void panthor_vm_idle(struct panthor_vm *vm); void panthor_vm_idle(struct panthor_vm *vm);
u32 panthor_vm_page_size(struct panthor_vm *vm);
int panthor_vm_as(struct panthor_vm *vm); int panthor_vm_as(struct panthor_vm *vm);
int panthor_vm_flush_all(struct panthor_vm *vm); int panthor_vm_flush_all(struct panthor_vm *vm);