1

drm fixes for 6.9-rc6

atomic-helpers:
 - Fix memory leak in drm_format_conv_state_copy()
 
 fbdev:
 - fbdefio: Fix address calculation
 
 amdgpu:
 - Suspend/resume fix
 - Don't expose gpu_od directory if it's empty
 - SDMA 4.4.2 fix
 - VPE fix
 - BO eviction fix
 - UMSCH fix
 - SMU 13.0.6 reset fixes
 - GPUVM flush accounting fix
 - SDMA 5.2 fix
 - Fix possible UAF in mes code
 
 amdkfd:
 - Eviction fence handling fix
 - Fix memory leak when GPU memory allocation fails
 - Fix dma-buf validation
 - Fix rescheduling of restore worker
 - SVM fix
 
 gma500:
 - Fix crash during boot
 
 etnaviv:
 - fix GC7000 TX clock gating
 - revert NPU UAPI changes
 
 xe:
 - Fix error paths on managed allocations
 - Fix PF/VF relay messages
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmYrGwAACgkQDHTzWXnE
 hr7WGBAAkJnPo6KEjLMNbuCOGM+X0+cFfGbwKtDEXDeXqHSpBBqPKHKpq/Phchzc
 7YV+HXcyKA5gStmb5+4sR203+jf2Gn5fUQ3vdsjXmOAJOE25RpLOtCYBKisyT3uQ
 OGo95PPEmB3pVyS0JFr5mKahJbdqTungpAw3/0WRgxVtkeI1rdRxv7IGnvhTyE85
 sliDSQECDBp73xCiW51fkvIezj0fteZfJ8G1qgfyk569zZzcGy8mmGfETHGbjx6x
 p0L7JP4vyApiRMyJtWNXllVA9vcgxLbez1PlJpkLeCGr86yrNaY1GDDW57Yfee65
 KnrvV1HTjzLGXrdP6n1JkkCHqMlWfZWhM/jVbbsLCRcDkJ5G6VQBlqSC/i/1EU7/
 9bqbn5mcWjh8nwa0VpmfF2mXXcj1sIAUGM1Gnahoep7K+Lch3rA+xU24n3d5Exvb
 5LNtT0wYOdRLddmEuNDsMfaP1icAp5VGFbO5VebDkgPK9iFv+Dlps0MN7HFF9SY0
 hQgFe7O3y/X1MJYIeAFAa229V52mtS8sSIAb9dd/ND+8smt2i+2TvGqdsWnyCN1X
 tQHKa3can04P+TzdnNEraERtNJSAxtHuJxy4PIOLiuIkpnuFhwVk85XYoAiMiO36
 iA3NrJz5LkeQfpC7uQN6cfYA/KUnFP9X14uUwkjTQjW3ZE6znWM=
 =H82n
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-2024-04-26' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Dave Airlie:
 "Regular weekly merge request, mostly amdgpu and misc bits in
  xe/etnaviv/gma500 and some core changes. Nothing too outlandish, seems
  to be about normal for this time of release.

  atomic-helpers:
   - Fix memory leak in drm_format_conv_state_copy()

  fbdev:
   - fbdefio: Fix address calculation

  amdgpu:
   - Suspend/resume fix
   - Don't expose gpu_od directory if it's empty
   - SDMA 4.4.2 fix
   - VPE fix
   - BO eviction fix
   - UMSCH fix
   - SMU 13.0.6 reset fixes
   - GPUVM flush accounting fix
   - SDMA 5.2 fix
   - Fix possible UAF in mes code

  amdkfd:
   - Eviction fence handling fix
   - Fix memory leak when GPU memory allocation fails
   - Fix dma-buf validation
   - Fix rescheduling of restore worker
   - SVM fix

  gma500:
   - Fix crash during boot

  etnaviv:
   - fix GC7000 TX clock gating
   - revert NPU UAPI changes

  xe:
   - Fix error paths on managed allocations
   - Fix PF/VF relay messages"

* tag 'drm-fixes-2024-04-26' of https://gitlab.freedesktop.org/drm/kernel: (23 commits)
  Revert "drm/etnaviv: Expose a few more chipspecs to userspace"
  drm/etnaviv: fix tx clock gating on some GC7000 variants
  drm/xe/guc: Fix arguments passed to relay G2H handlers
  drm/xe: call free_gsc_pkt only once on action add failure
  drm/xe: Remove sysfs only once on action add failure
  fbdev: fix incorrect address computation in deferred IO
  drm/amdgpu/mes: fix use-after-free issue
  drm/amdgpu/sdma5.2: use legacy HDP flush for SDMA2/3
  drm/amdgpu: Fix the ring buffer size for queue VM flush
  drm/amdkfd: Add VRAM accounting for SVM migration
  drm/amd/pm: Restore config space after reset
  drm/amdgpu/umsch: don't execute umsch test when GPU is in reset/suspend
  drm/amdkfd: Fix rescheduling of restore worker
  drm/amdgpu: Update BO eviction priorities
  drm/amdgpu/vpe: fix vpe dpm setup failed
  drm/amdgpu: Assign correct bits for SDMA HDP flush
  drm/amdgpu/pm: Remove gpu_od if it's an empty directory
  drm/amdkfd: make sure VM is ready for updating operations
  drm/amdgpu: Fix leak when GPU memory allocation fails
  drm/amdkfd: Fix eviction fence handling
  ...
This commit is contained in:
Linus Torvalds 2024-04-26 10:47:18 -07:00
commit 61ef6208e0
32 changed files with 130 additions and 242 deletions

View File

@ -1854,6 +1854,7 @@ err_node_allow:
err_bo_create:
amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags, xcp_id);
err_reserve_limit:
amdgpu_sync_free(&(*mem)->sync);
mutex_destroy(&(*mem)->lock);
if (gobj)
drm_gem_object_put(gobj);
@ -2900,13 +2901,12 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
amdgpu_sync_create(&sync_obj);
/* Validate BOs and map them to GPUVM (update VM page tables). */
/* Validate BOs managed by KFD */
list_for_each_entry(mem, &process_info->kfd_bo_list,
validate_list) {
struct amdgpu_bo *bo = mem->bo;
uint32_t domain = mem->domain;
struct kfd_mem_attachment *attachment;
struct dma_resv_iter cursor;
struct dma_fence *fence;
@ -2931,6 +2931,25 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
goto validate_map_fail;
}
}
}
if (failed_size)
pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
/* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO
* validations above would invalidate DMABuf imports again.
*/
ret = process_validate_vms(process_info, &exec.ticket);
if (ret) {
pr_debug("Validating VMs failed, ret: %d\n", ret);
goto validate_map_fail;
}
/* Update mappings managed by KFD. */
list_for_each_entry(mem, &process_info->kfd_bo_list,
validate_list) {
struct kfd_mem_attachment *attachment;
list_for_each_entry(attachment, &mem->attachments, list) {
if (!attachment->is_mapped)
continue;
@ -2947,18 +2966,6 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence __rcu *
}
}
if (failed_size)
pr_debug("0x%lx/0x%lx in system\n", failed_size, total_size);
/* Validate PDs, PTs and evicted DMABuf imports last. Otherwise BO
* validations above would invalidate DMABuf imports again.
*/
ret = process_validate_vms(process_info, &exec.ticket);
if (ret) {
pr_debug("Validating VMs failed, ret: %d\n", ret);
goto validate_map_fail;
}
/* Update mappings not managed by KFD */
list_for_each_entry(peer_vm, &process_info->vm_list_head,
vm_list_node) {

View File

@ -1132,6 +1132,7 @@ void amdgpu_mes_remove_ring(struct amdgpu_device *adev,
return;
amdgpu_mes_remove_hw_queue(adev, ring->hw_queue_id);
del_timer_sync(&ring->fence_drv.fallback_timer);
amdgpu_ring_fini(ring);
kfree(ring);
}

View File

@ -605,6 +605,8 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
else
amdgpu_bo_placement_from_domain(bo, bp->domain);
if (bp->type == ttm_bo_type_kernel)
bo->tbo.priority = 2;
else if (!(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE))
bo->tbo.priority = 1;
if (!bp->destroy)

View File

@ -774,6 +774,9 @@ static int umsch_mm_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (amdgpu_in_reset(adev) || adev->in_s0ix || adev->in_suspend)
return 0;
return umsch_mm_test(adev);
}

View File

@ -205,7 +205,7 @@ disable_dpm:
dpm_ctl &= 0xfffffffe; /* Disable DPM */
WREG32(vpe_get_reg_offset(vpe, 0, vpe->regs.dpm_enable), dpm_ctl);
dev_dbg(adev->dev, "%s: disable vpe dpm\n", __func__);
return 0;
return -EINVAL;
}
int amdgpu_vpe_psp_update_sram(struct amdgpu_device *adev)

View File

@ -9186,7 +9186,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
7 + /* PIPELINE_SYNC */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
2 + /* VM_FLUSH */
4 + /* VM_FLUSH */
8 + /* FENCE for VM_FLUSH */
20 + /* GDS switch */
4 + /* double SWITCH_BUFFER,
@ -9276,7 +9276,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
7 + /* gfx_v10_0_ring_emit_pipeline_sync */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
2 + /* gfx_v10_0_ring_emit_vm_flush */
8 + 8 + 8, /* gfx_v10_0_ring_emit_fence_kiq x3 for user fence, vm fence */
.emit_ib_size = 7, /* gfx_v10_0_ring_emit_ib_compute */
.emit_ib = gfx_v10_0_ring_emit_ib_compute,

View File

@ -6192,7 +6192,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
7 + /* PIPELINE_SYNC */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
2 + /* VM_FLUSH */
4 + /* VM_FLUSH */
8 + /* FENCE for VM_FLUSH */
20 + /* GDS switch */
5 + /* COND_EXEC */
@ -6278,7 +6278,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
7 + /* gfx_v11_0_ring_emit_pipeline_sync */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
2 + /* gfx_v11_0_ring_emit_vm_flush */
8 + 8 + 8, /* gfx_v11_0_ring_emit_fence_kiq x3 for user fence, vm fence */
.emit_ib_size = 7, /* gfx_v11_0_ring_emit_ib_compute */
.emit_ib = gfx_v11_0_ring_emit_ib_compute,

View File

@ -6981,7 +6981,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
7 + /* gfx_v9_0_ring_emit_pipeline_sync */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
2 + /* gfx_v9_0_ring_emit_vm_flush */
8 + 8 + 8 + /* gfx_v9_0_ring_emit_fence x3 for user fence, vm fence */
7 + /* gfx_v9_0_emit_mem_sync */
5 + /* gfx_v9_0_emit_wave_limit for updating mmSPI_WCL_PIPE_PERCENT_GFX register */
@ -7019,7 +7018,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_kiq = {
7 + /* gfx_v9_0_ring_emit_pipeline_sync */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 5 +
SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 7 +
2 + /* gfx_v9_0_ring_emit_vm_flush */
8 + 8 + 8, /* gfx_v9_0_ring_emit_fence_kiq x3 for user fence, vm fence */
.emit_ib_size = 7, /* gfx_v9_0_ring_emit_ib_compute */
.emit_fence = gfx_v9_0_ring_emit_fence_kiq,

View File

@ -368,7 +368,8 @@ static void sdma_v4_4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
u32 ref_and_mask = 0;
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0
<< (ring->me % adev->sdma.num_inst_per_aid);
sdma_v4_4_2_wait_reg_mem(ring, 0, 1,
adev->nbio.funcs->get_hdp_flush_done_offset(adev),

View File

@ -280,17 +280,21 @@ static void sdma_v5_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
u32 ref_and_mask = 0;
const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg;
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
if (ring->me > 1) {
amdgpu_asic_flush_hdp(adev, ring);
} else {
ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me;
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
amdgpu_ring_write(ring, ref_and_mask); /* reference */
amdgpu_ring_write(ring, ref_and_mask); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) |
SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2);
amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2);
amdgpu_ring_write(ring, ref_and_mask); /* reference */
amdgpu_ring_write(ring, ref_and_mask); /* mask */
amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
}
}
/**

View File

@ -144,6 +144,12 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe)
WREG32(vpe_get_reg_offset(vpe, j, regVPEC_CNTL), ret);
}
/* setup collaborate mode */
vpe_v6_1_set_collaborate_mode(vpe, true);
/* setup DPM */
if (amdgpu_vpe_configure_dpm(vpe))
dev_warn(adev->dev, "VPE failed to enable DPM\n");
/*
* For VPE 6.1.1, still only need to add master's offset, and psp will apply it to slave as well.
* Here use instance 0 as master.
@ -159,11 +165,7 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe)
adev->vpe.cmdbuf_cpu_addr[0] = f32_offset;
adev->vpe.cmdbuf_cpu_addr[1] = f32_cntl;
amdgpu_vpe_psp_update_sram(adev);
vpe_v6_1_set_collaborate_mode(vpe, true);
amdgpu_vpe_configure_dpm(vpe);
return 0;
return amdgpu_vpe_psp_update_sram(adev);
}
vpe_hdr = (const struct vpe_firmware_header_v1_0 *)adev->vpe.fw->data;
@ -196,8 +198,6 @@ static int vpe_v6_1_load_microcode(struct amdgpu_vpe *vpe)
}
vpe_v6_1_halt(vpe, false);
vpe_v6_1_set_collaborate_mode(vpe, true);
amdgpu_vpe_configure_dpm(vpe);
return 0;
}

View File

@ -509,10 +509,19 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
start = start_mgr << PAGE_SHIFT;
end = (last_mgr + 1) << PAGE_SHIFT;
r = amdgpu_amdkfd_reserve_mem_limit(node->adev,
prange->npages * PAGE_SIZE,
KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
node->xcp ? node->xcp->id : 0);
if (r) {
dev_dbg(node->adev->dev, "failed to reserve VRAM, r: %ld\n", r);
return -ENOSPC;
}
r = svm_range_vram_node_new(node, prange, true);
if (r) {
dev_dbg(node->adev->dev, "fail %ld to alloc vram\n", r);
return r;
goto out;
}
ttm_res_offset = (start_mgr - prange->start + prange->offset) << PAGE_SHIFT;
@ -545,6 +554,11 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
svm_range_vram_node_free(prange);
}
out:
amdgpu_amdkfd_unreserve_mem_limit(node->adev,
prange->npages * PAGE_SIZE,
KFD_IOC_ALLOC_MEM_FLAGS_VRAM,
node->xcp ? node->xcp->id : 0);
return r < 0 ? r : 0;
}

View File

@ -1922,6 +1922,8 @@ static int signal_eviction_fence(struct kfd_process *p)
rcu_read_lock();
ef = dma_fence_get_rcu_safe(&p->ef);
rcu_read_unlock();
if (!ef)
return -EINVAL;
ret = dma_fence_signal(ef);
dma_fence_put(ef);
@ -1949,10 +1951,9 @@ static void evict_process_worker(struct work_struct *work)
* they are responsible stopping the queues and scheduling
* the restore work.
*/
if (!signal_eviction_fence(p))
queue_delayed_work(kfd_restore_wq, &p->restore_work,
msecs_to_jiffies(PROCESS_RESTORE_TIME_MS));
else
if (signal_eviction_fence(p) ||
mod_delayed_work(kfd_restore_wq, &p->restore_work,
msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
kfd_process_restore_queues(p);
pr_debug("Finished evicting pasid 0x%x\n", p->pasid);
@ -2011,9 +2012,9 @@ static void restore_process_worker(struct work_struct *work)
if (ret) {
pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n",
p->pasid, PROCESS_BACK_OFF_TIME_MS);
ret = queue_delayed_work(kfd_restore_wq, &p->restore_work,
msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS));
WARN(!ret, "reschedule restore work failed\n");
if (mod_delayed_work(kfd_restore_wq, &p->restore_work,
msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)))
kfd_process_restore_queues(p);
}
}

View File

@ -3426,7 +3426,7 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
mm, KFD_MIGRATE_TRIGGER_PREFETCH);
*migrated = !r;
return r;
return 0;
}
int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)

View File

@ -3029,6 +3029,7 @@ static int dm_resume(void *handle)
dc_stream_release(dm_new_crtc_state->stream);
dm_new_crtc_state->stream = NULL;
}
dm_new_crtc_state->base.color_mgmt_changed = true;
}
for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {

View File

@ -4261,6 +4261,13 @@ static int amdgpu_od_set_init(struct amdgpu_device *adev)
}
}
/*
* If gpu_od is the only member in the list, that means gpu_od is an
* empty directory, so remove it.
*/
if (list_is_singular(&adev->pm.od_kobj_list))
goto err_out;
return 0;
err_out:

View File

@ -2294,6 +2294,17 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
return sizeof(*gpu_metrics);
}
static void smu_v13_0_6_restore_pci_config(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
int i;
for (i = 0; i < 16; i++)
pci_write_config_dword(adev->pdev, i * 4,
adev->pdev->saved_config_space[i]);
pci_restore_msi_state(adev->pdev);
}
static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
{
int ret = 0, index;
@ -2315,6 +2326,20 @@ static int smu_v13_0_6_mode2_reset(struct smu_context *smu)
/* Restore the config space saved during init */
amdgpu_device_load_pci_state(adev->pdev);
/* Certain platforms have switches which assign virtual BAR values to
* devices. OS uses the virtual BAR values and device behind the switch
* is assgined another BAR value. When device's config space registers
* are queried, switch returns the virtual BAR values. When mode-2 reset
* is performed, switch is unaware of it, and will continue to return
* the same virtual values to the OS.This affects
* pci_restore_config_space() API as it doesn't write the value saved if
* the current value read from config space is the same as what is
* saved. As a workaround, make sure the config space is restored
* always.
*/
if (!(adev->flags & AMD_IS_APU))
smu_v13_0_6_restore_pci_config(smu);
dev_dbg(smu->adev->dev, "wait for reset ack\n");
do {
ret = smu_cmn_wait_for_response(smu);

View File

@ -224,8 +224,8 @@ __drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane,
__drm_atomic_helper_plane_duplicate_state(plane, &new_shadow_plane_state->base);
drm_format_conv_state_copy(&shadow_plane_state->fmtcnv_state,
&new_shadow_plane_state->fmtcnv_state);
drm_format_conv_state_copy(&new_shadow_plane_state->fmtcnv_state,
&shadow_plane_state->fmtcnv_state);
}
EXPORT_SYMBOL(__drm_gem_duplicate_shadow_plane_state);

View File

@ -164,26 +164,6 @@ int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value)
*value = gpu->identity.eco_id;
break;
case ETNAVIV_PARAM_GPU_NN_CORE_COUNT:
*value = gpu->identity.nn_core_count;
break;
case ETNAVIV_PARAM_GPU_NN_MAD_PER_CORE:
*value = gpu->identity.nn_mad_per_core;
break;
case ETNAVIV_PARAM_GPU_TP_CORE_COUNT:
*value = gpu->identity.tp_core_count;
break;
case ETNAVIV_PARAM_GPU_ON_CHIP_SRAM_SIZE:
*value = gpu->identity.on_chip_sram_size;
break;
case ETNAVIV_PARAM_GPU_AXI_SRAM_SIZE:
*value = gpu->identity.axi_sram_size;
break;
default:
DBG("%s: invalid param: %u", dev_name(gpu->dev), param);
return -EINVAL;
@ -663,8 +643,8 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
/* Disable TX clock gating on affected core revisions. */
if (etnaviv_is_model_rev(gpu, GC4000, 0x5222) ||
etnaviv_is_model_rev(gpu, GC2000, 0x5108) ||
etnaviv_is_model_rev(gpu, GC2000, 0x6202) ||
etnaviv_is_model_rev(gpu, GC2000, 0x6203))
etnaviv_is_model_rev(gpu, GC7000, 0x6202) ||
etnaviv_is_model_rev(gpu, GC7000, 0x6203))
pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX;
/* Disable SE and RA clock gating on affected core revisions. */

View File

@ -54,18 +54,6 @@ struct etnaviv_chip_identity {
/* Number of Neural Network cores. */
u32 nn_core_count;
/* Number of MAD units per Neural Network core. */
u32 nn_mad_per_core;
/* Number of Tensor Processing cores. */
u32 tp_core_count;
/* Size in bytes of the SRAM inside the NPU. */
u32 on_chip_sram_size;
/* Size in bytes of the SRAM across the AXI bus. */
u32 axi_sram_size;
/* Size of the vertex cache. */
u32 vertex_cache_size;

View File

@ -17,10 +17,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 128,
.shader_core_count = 1,
.nn_core_count = 0,
.nn_mad_per_core = 0,
.tp_core_count = 0,
.on_chip_sram_size = 0,
.axi_sram_size = 0,
.vertex_cache_size = 8,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@ -52,11 +48,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.register_max = 64,
.thread_count = 256,
.shader_core_count = 1,
.nn_core_count = 0,
.nn_mad_per_core = 0,
.tp_core_count = 0,
.on_chip_sram_size = 0,
.axi_sram_size = 0,
.vertex_cache_size = 8,
.vertex_output_buffer_size = 512,
.pixel_pipes = 1,
@ -89,10 +80,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 512,
.shader_core_count = 2,
.nn_core_count = 0,
.nn_mad_per_core = 0,
.tp_core_count = 0,
.on_chip_sram_size = 0,
.axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@ -125,10 +112,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 512,
.shader_core_count = 2,
.nn_core_count = 0,
.nn_mad_per_core = 0,
.tp_core_count = 0,
.on_chip_sram_size = 0,
.axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@ -160,11 +143,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.register_max = 64,
.thread_count = 512,
.shader_core_count = 2,
.nn_core_count = 0,
.nn_mad_per_core = 0,
.tp_core_count = 0,
.on_chip_sram_size = 0,
.axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@ -197,10 +175,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 1024,
.shader_core_count = 4,
.nn_core_count = 0,
.nn_mad_per_core = 0,
.tp_core_count = 0,
.on_chip_sram_size = 0,
.axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 2,
@ -233,10 +207,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 256,
.shader_core_count = 1,
.nn_core_count = 8,
.nn_mad_per_core = 64,
.tp_core_count = 4,
.on_chip_sram_size = 524288,
.axi_sram_size = 1048576,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,
@ -269,10 +239,6 @@ static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
.thread_count = 256,
.shader_core_count = 1,
.nn_core_count = 6,
.nn_mad_per_core = 64,
.tp_core_count = 3,
.on_chip_sram_size = 262144,
.axi_sram_size = 0,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 1,

View File

@ -34,7 +34,6 @@ gma500_gfx-y += \
psb_intel_lvds.o \
psb_intel_modes.o \
psb_intel_sdvo.o \
psb_lid.o \
psb_irq.o
gma500_gfx-$(CONFIG_ACPI) += opregion.o

View File

@ -73,8 +73,7 @@ static int psb_backlight_setup(struct drm_device *dev)
}
psb_intel_lvds_set_brightness(dev, PSB_MAX_BRIGHTNESS);
/* This must occur after the backlight is properly initialised */
psb_lid_timer_init(dev_priv);
return 0;
}
@ -259,8 +258,6 @@ static int psb_chip_setup(struct drm_device *dev)
static void psb_chip_teardown(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
psb_lid_timer_takedown(dev_priv);
gma_intel_teardown_gmbus(dev);
}

View File

@ -162,7 +162,6 @@
#define PSB_NUM_VBLANKS 2
#define PSB_WATCHDOG_DELAY (HZ * 2)
#define PSB_LID_DELAY (HZ / 10)
#define PSB_MAX_BRIGHTNESS 100
@ -491,11 +490,7 @@ struct drm_psb_private {
/* Hotplug handling */
struct work_struct hotplug_work;
/* LID-Switch */
spinlock_t lid_lock;
struct timer_list lid_timer;
struct psb_intel_opregion opregion;
u32 lid_last_state;
/* Watchdog */
uint32_t apm_reg;
@ -591,10 +586,6 @@ struct psb_ops {
int i2c_bus; /* I2C bus identifier for Moorestown */
};
/* psb_lid.c */
extern void psb_lid_timer_init(struct drm_psb_private *dev_priv);
extern void psb_lid_timer_takedown(struct drm_psb_private *dev_priv);
/* modesetting */
extern void psb_modeset_init(struct drm_device *dev);
extern void psb_modeset_cleanup(struct drm_device *dev);

View File

@ -1,80 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/**************************************************************************
* Copyright (c) 2007, Intel Corporation.
*
* Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
**************************************************************************/
#include <linux/spinlock.h>
#include "psb_drv.h"
#include "psb_intel_reg.h"
#include "psb_reg.h"
static void psb_lid_timer_func(struct timer_list *t)
{
struct drm_psb_private *dev_priv = from_timer(dev_priv, t, lid_timer);
struct drm_device *dev = (struct drm_device *)&dev_priv->dev;
struct timer_list *lid_timer = &dev_priv->lid_timer;
unsigned long irq_flags;
u32 __iomem *lid_state = dev_priv->opregion.lid_state;
u32 pp_status;
if (readl(lid_state) == dev_priv->lid_last_state)
goto lid_timer_schedule;
if ((readl(lid_state)) & 0x01) {
/*lid state is open*/
REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON);
do {
pp_status = REG_READ(PP_STATUS);
} while ((pp_status & PP_ON) == 0 &&
(pp_status & PP_SEQUENCE_MASK) != 0);
if (REG_READ(PP_STATUS) & PP_ON) {
/*FIXME: should be backlight level before*/
psb_intel_lvds_set_brightness(dev, 100);
} else {
DRM_DEBUG("LVDS panel never powered up");
return;
}
} else {
psb_intel_lvds_set_brightness(dev, 0);
REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) & ~POWER_TARGET_ON);
do {
pp_status = REG_READ(PP_STATUS);
} while ((pp_status & PP_ON) == 0);
}
dev_priv->lid_last_state = readl(lid_state);
lid_timer_schedule:
spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
if (!timer_pending(lid_timer)) {
lid_timer->expires = jiffies + PSB_LID_DELAY;
add_timer(lid_timer);
}
spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
}
void psb_lid_timer_init(struct drm_psb_private *dev_priv)
{
struct timer_list *lid_timer = &dev_priv->lid_timer;
unsigned long irq_flags;
spin_lock_init(&dev_priv->lid_lock);
spin_lock_irqsave(&dev_priv->lid_lock, irq_flags);
timer_setup(lid_timer, psb_lid_timer_func, 0);
lid_timer->expires = jiffies + PSB_LID_DELAY;
add_timer(lid_timer);
spin_unlock_irqrestore(&dev_priv->lid_lock, irq_flags);
}
void psb_lid_timer_takedown(struct drm_psb_private *dev_priv)
{
del_timer_sync(&dev_priv->lid_timer);
}

View File

@ -378,7 +378,9 @@ static int gt_fw_domain_init(struct xe_gt *gt)
err);
/* Initialize CCS mode sysfs after early initialization of HW engines */
xe_gt_ccs_mode_sysfs_init(gt);
err = xe_gt_ccs_mode_sysfs_init(gt);
if (err)
goto err_force_wake;
/*
* Stash hardware-reported version. Since this register does not exist

View File

@ -167,25 +167,20 @@ static void xe_gt_ccs_mode_sysfs_fini(struct drm_device *drm, void *arg)
* and it is expected that there are no open drm clients while doing so.
* The number of available compute slices is exposed to user through a per-gt
* 'num_cslices' sysfs interface.
*
* Returns: Returns error value for failure and 0 for success.
*/
void xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt)
int xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt)
{
struct xe_device *xe = gt_to_xe(gt);
int err;
if (!xe_gt_ccs_mode_enabled(gt))
return;
return 0;
err = sysfs_create_files(gt->sysfs, gt_ccs_mode_attrs);
if (err) {
drm_warn(&xe->drm, "Sysfs creation for ccs_mode failed err: %d\n", err);
return;
}
if (err)
return err;
err = drmm_add_action_or_reset(&xe->drm, xe_gt_ccs_mode_sysfs_fini, gt);
if (err) {
sysfs_remove_files(gt->sysfs, gt_ccs_mode_attrs);
drm_warn(&xe->drm, "%s: drmm_add_action_or_reset failed, err: %d\n",
__func__, err);
}
return drmm_add_action_or_reset(&xe->drm, xe_gt_ccs_mode_sysfs_fini, gt);
}

View File

@ -12,7 +12,7 @@
#include "xe_platform_types.h"
void xe_gt_apply_ccs_mode(struct xe_gt *gt);
void xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt);
int xe_gt_ccs_mode_sysfs_init(struct xe_gt *gt);
static inline bool xe_gt_ccs_mode_enabled(const struct xe_gt *gt)
{

View File

@ -1054,10 +1054,10 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len)
adj_len);
break;
case XE_GUC_ACTION_GUC2PF_RELAY_FROM_VF:
ret = xe_guc_relay_process_guc2pf(&guc->relay, payload, adj_len);
ret = xe_guc_relay_process_guc2pf(&guc->relay, hxg, hxg_len);
break;
case XE_GUC_ACTION_GUC2VF_RELAY_FROM_PF:
ret = xe_guc_relay_process_guc2vf(&guc->relay, payload, adj_len);
ret = xe_guc_relay_process_guc2vf(&guc->relay, hxg, hxg_len);
break;
default:
drm_err(&xe->drm, "unexpected action 0x%04x\n", action);

View File

@ -53,7 +53,6 @@ static int huc_alloc_gsc_pkt(struct xe_huc *huc)
struct xe_gt *gt = huc_to_gt(huc);
struct xe_device *xe = gt_to_xe(gt);
struct xe_bo *bo;
int err;
/* we use a single object for both input and output */
bo = xe_bo_create_pin_map(xe, gt_to_tile(gt), NULL,
@ -66,13 +65,7 @@ static int huc_alloc_gsc_pkt(struct xe_huc *huc)
huc->gsc_pkt = bo;
err = drmm_add_action_or_reset(&xe->drm, free_gsc_pkt, huc);
if (err) {
free_gsc_pkt(&xe->drm, huc);
return err;
}
return 0;
return drmm_add_action_or_reset(&xe->drm, free_gsc_pkt, huc);
}
int xe_huc_init(struct xe_huc *huc)

View File

@ -196,7 +196,7 @@ err_mutex_unlock:
*/
static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf)
{
unsigned long offset = vmf->address - vmf->vma->vm_start;
unsigned long offset = vmf->pgoff << PAGE_SHIFT;
struct page *page = vmf->page;
file_update_time(vmf->vma->vm_file);

View File

@ -77,11 +77,6 @@ struct drm_etnaviv_timespec {
#define ETNAVIV_PARAM_GPU_PRODUCT_ID 0x1c
#define ETNAVIV_PARAM_GPU_CUSTOMER_ID 0x1d
#define ETNAVIV_PARAM_GPU_ECO_ID 0x1e
#define ETNAVIV_PARAM_GPU_NN_CORE_COUNT 0x1f
#define ETNAVIV_PARAM_GPU_NN_MAD_PER_CORE 0x20
#define ETNAVIV_PARAM_GPU_TP_CORE_COUNT 0x21
#define ETNAVIV_PARAM_GPU_ON_CHIP_SRAM_SIZE 0x22
#define ETNAVIV_PARAM_GPU_AXI_SRAM_SIZE 0x23
#define ETNA_MAX_PIPES 4