drm fixes for 6.12-rc4
msm: - Display: - move CRTC resource assignment to atomic_check otherwise to make consecutive calls to atomic_check() consistent - fix rounding / sign-extension issues with pclk calculation in case of DSC - cleanups to drop incorrect null checks in dpu snapshots - fix to use kvzalloc in dpu snapshot to avoid allocation issues in heavily loaded system cases - Fix to not program merge_3d block if dual LM is not being used - Fix to not flush merge_3d block if its not enabled otherwise this leads to false timeouts - GPU: - a7xx: add a fence wait before SMMU table update xe: - New workaround to Xe2 (Aradhya) - Fix unbalanced rpm put (Matthew Auld) - Remove fragile lock optimization (Matthew Brost) - Fix job release, delegating it to the drm scheduler (Matthew Brost) - Fix timestamp bit width for Xe2 (Lucas) - Fix external BO's dma-resv usag (Matthew Brost) - Fix returning success for timeout in wait_token (Nirmoy) - Initialize fence to avoid it being detected as signaled (Matthew Auld) - Improve cache flush for BMG (Matthew Auld) - Don't allow hflip for tile4 framebuffer on Xe2 (Juha-Pekka) amdgpu: - SR-IOV fix - CS chunk handling fix - MES fixes - SMU13 fixes amdkfd: - VRAM usage reporting fix radeon: - Fix possible_clones handling i915: - Two DP bandwidth related MST fixes ast: - Clear EDID on unplugged connectors host1x: - Fix boot on Tegra186 - Set DMA parameters mgag200: - Revert VBLANK support panel: - himax-hx83192: Adjust power and gamma qaic: - Sgtable loop fixes vmwgfx: - Limit display layout allocatino size - Handle allocation errors in connector checks - Clean up KMS code for 2d-only setup - Report surface-check errors correctly - Remove NULL test around kvfree() -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmcR9xwACgkQDHTzWXnE hr5auQ/7B5TSpaPdG48RY27Hhze2AeEGdxArW0fCl5IL+qUhe7s6MRrDRv5ZqAyW r6tfXjHAnhnVdjEHPMZi8dVLQ/Ge+pBh1g3vnBR/Ugj0/PbrnnLdk4NrnDc9FOok N0v3SfAI+9wqBc/US7+jXxn0RnkrC2HvQgUPiSmgEl8QFizbyQw+BLl3vmz6hEKv xuKtnI8Kc7CyxVvHEHcsd/KRYZW+0srBbpfeNnIYFPr2XiXToc/jwlT08j4kRx5g Hpa6RJfuR3QQ9haxZQPBG3LnV71nL4USwAVX22MHPGqb9XAKGFl/oMffDsouLjHy jlqrx4vrp/gfu/AcGC9l2XLCZ0yg6dBs1Od9nrwQEioJupGpRqm+dj81Mjd2MaeE 44fMWa7HF7FZ4u27sb4+9oDjouBf2j6hoMfsHLWazbLa+nXVnExcSj6zCY0osfFN 8+Ur+8Hzmj9b/Ugej0D4gVArzeb5WgRApc98PYAD+KFWt5HyGZmZTHjzPGyr6XuU NRdg1ziQJs1NRwUGbTN7+oAZLKi5lvtrquKKLVKr0MfKpxeb8rUX83Hdk8IYFY/l ULLMov+kBq9OCwntrTw3MyviNmUFmy0w1N8ArlvPmu3XmdY/IHQfMx8w7p3lV/0f txKiOgh5tLLD2Dxxjgla0J3XJsVjz7cnBvCKP+PR2kp2oWq/cMo= =MOo3 -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2024-10-18' of https://gitlab.freedesktop.org/drm/kernel Pull drm fixes from Dave Airlie: "Weekly fixes, msm and xe are the two main ones, with a bunch of scattered fixes including a largish revert in mgag200, then amdgpu, vmwgfx and scattering of other minor ones. All seems pretty regular. msm: - Display: - move CRTC resource assignment to atomic_check otherwise to make consecutive calls to atomic_check() consistent - fix rounding / sign-extension issues with pclk calculation in case of DSC - cleanups to drop incorrect null checks in dpu snapshots - fix to use kvzalloc in dpu snapshot to avoid allocation issues in heavily loaded system cases - Fix to not program merge_3d block if dual LM is not being used - Fix to not flush merge_3d block if its not enabled otherwise this leads to false timeouts - GPU: - a7xx: add a fence wait before SMMU table update xe: - New workaround to Xe2 (Aradhya) - Fix unbalanced rpm put (Matthew Auld) - Remove fragile lock optimization (Matthew Brost) - Fix job release, delegating it to the drm scheduler (Matthew Brost) - Fix timestamp bit width for Xe2 (Lucas) - Fix external BO's dma-resv usag (Matthew Brost) - Fix returning success for timeout in wait_token (Nirmoy) - Initialize fence to avoid it being detected as signaled (Matthew Auld) - Improve cache flush for BMG (Matthew Auld) - Don't allow hflip for tile4 framebuffer on Xe2 (Juha-Pekka) amdgpu: - SR-IOV fix - CS chunk handling fix - MES fixes - SMU13 fixes amdkfd: - VRAM usage reporting fix radeon: - Fix possible_clones handling i915: - Two DP bandwidth related MST fixes ast: - Clear EDID on unplugged connectors host1x: - Fix boot on Tegra186 - Set DMA parameters mgag200: - Revert VBLANK support panel: - himax-hx83192: Adjust power and gamma qaic: - Sgtable loop fixes vmwgfx: - Limit display layout allocatino size - Handle allocation errors in connector checks - Clean up KMS code for 2d-only setup - Report surface-check errors correctly - Remove NULL test around kvfree()" * tag 'drm-fixes-2024-10-18' of https://gitlab.freedesktop.org/drm/kernel: (45 commits) drm/ast: vga: Clear EDID if no display is connected drm/ast: sil164: Clear EDID if no display is connected Revert "drm/mgag200: Add vblank support" drm/amdgpu/swsmu: default to fullscreen 3D profile for dGPUs drm/i915/display: Don't allow tile4 framebuffer to do hflip on display20 or greater drm/xe/bmg: improve cache flushing behaviour drm/xe/xe_sync: initialise ufence.signalled drm/xe/ufence: ufence can be signaled right after wait_woken drm/xe: Use bookkeep slots for external BO's in exec IOCTL drm/xe/query: Increase timestamp width drm/xe: Don't free job in TDR drm/xe: Take job list lock in xe_sched_add_pending_job drm/xe: fix unbalanced rpm put() with declare_wedged() drm/xe: fix unbalanced rpm put() with fence_fini() drm/xe/xe2lpg: Extend Wa_15016589081 for xe2lpg drm/i915/dp_mst: Don't require DSC hblank quirk for a non-DSC compatible mode drm/i915/dp_mst: Handle error during DSC BW overhead/slice calculation drm/msm/a6xx+: Insert a fence wait before SMMU table update drm/msm/dpu: don't always program merge_3d block drm/msm/dpu: Don't always set merge_3d pending flush ...
This commit is contained in:
commit
5d97dde4d5
@ -496,7 +496,7 @@ static int encode_addr_size_pairs(struct dma_xfer *xfer, struct wrapper_list *wr
|
||||
nents = sgt->nents;
|
||||
nents_dma = nents;
|
||||
*size = QAIC_MANAGE_EXT_MSG_LENGTH - msg_hdr_len - sizeof(**out_trans);
|
||||
for_each_sgtable_sg(sgt, sg, i) {
|
||||
for_each_sgtable_dma_sg(sgt, sg, i) {
|
||||
*size -= sizeof(*asp);
|
||||
/* Save 1K for possible follow-up transactions. */
|
||||
if (*size < SZ_1K) {
|
||||
|
@ -184,7 +184,7 @@ static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_tabl
|
||||
nents = 0;
|
||||
|
||||
size = size ? size : PAGE_SIZE;
|
||||
for (sg = sgt_in->sgl; sg; sg = sg_next(sg)) {
|
||||
for_each_sgtable_dma_sg(sgt_in, sg, j) {
|
||||
len = sg_dma_len(sg);
|
||||
|
||||
if (!len)
|
||||
@ -221,7 +221,7 @@ static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_tabl
|
||||
|
||||
/* copy relevant sg node and fix page and length */
|
||||
sgn = sgf;
|
||||
for_each_sgtable_sg(sgt, sg, j) {
|
||||
for_each_sgtable_dma_sg(sgt, sg, j) {
|
||||
memcpy(sg, sgn, sizeof(*sg));
|
||||
if (sgn == sgf) {
|
||||
sg_dma_address(sg) += offf;
|
||||
@ -301,7 +301,7 @@ static int encode_reqs(struct qaic_device *qdev, struct bo_slice *slice,
|
||||
* fence.
|
||||
*/
|
||||
dev_addr = req->dev_addr;
|
||||
for_each_sgtable_sg(slice->sgt, sg, i) {
|
||||
for_each_sgtable_dma_sg(slice->sgt, sg, i) {
|
||||
slice->reqs[i].cmd = cmd;
|
||||
slice->reqs[i].src_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ?
|
||||
sg_dma_address(sg) : dev_addr);
|
||||
|
@ -265,7 +265,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
|
||||
|
||||
/* Only a single BO list is allowed to simplify handling. */
|
||||
if (p->bo_list)
|
||||
ret = -EINVAL;
|
||||
goto free_partial_kdata;
|
||||
|
||||
ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
|
||||
if (ret)
|
||||
|
@ -1635,11 +1635,9 @@ int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
r = device_create_file(adev->dev, &dev_attr_enforce_isolation);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
r = device_create_file(adev->dev, &dev_attr_enforce_isolation);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = device_create_file(adev->dev, &dev_attr_run_cleaner_shader);
|
||||
if (r)
|
||||
@ -1650,8 +1648,7 @@ int amdgpu_gfx_sysfs_isolation_shader_init(struct amdgpu_device *adev)
|
||||
|
||||
void amdgpu_gfx_sysfs_isolation_shader_fini(struct amdgpu_device *adev)
|
||||
{
|
||||
if (!amdgpu_sriov_vf(adev))
|
||||
device_remove_file(adev->dev, &dev_attr_enforce_isolation);
|
||||
device_remove_file(adev->dev, &dev_attr_enforce_isolation);
|
||||
device_remove_file(adev->dev, &dev_attr_run_cleaner_shader);
|
||||
}
|
||||
|
||||
|
@ -1203,8 +1203,10 @@ int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id,
|
||||
|
||||
r = amdgpu_ring_init(adev, ring, 1024, NULL, 0,
|
||||
AMDGPU_RING_PRIO_DEFAULT, NULL);
|
||||
if (r)
|
||||
if (r) {
|
||||
amdgpu_mes_unlock(&adev->mes);
|
||||
goto clean_up_memory;
|
||||
}
|
||||
|
||||
amdgpu_mes_ring_to_queue_props(adev, ring, &qprops);
|
||||
|
||||
@ -1237,7 +1239,6 @@ clean_up_ring:
|
||||
amdgpu_ring_fini(ring);
|
||||
clean_up_memory:
|
||||
kfree(ring);
|
||||
amdgpu_mes_unlock(&adev->mes);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -621,7 +621,7 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe)
|
||||
|
||||
if (amdgpu_mes_log_enable) {
|
||||
mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
|
||||
mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr;
|
||||
mes_set_hw_res_pkt.event_intr_history_gpu_mc_ptr = mes->event_log_gpu_addr + pipe * AMDGPU_MES_LOG_BUFFER_SIZE;
|
||||
}
|
||||
|
||||
return mes_v12_0_submit_pkt_and_poll_completion(mes, pipe,
|
||||
@ -1336,7 +1336,7 @@ static int mes_v12_0_sw_init(void *handle)
|
||||
adev->mes.kiq_hw_fini = &mes_v12_0_kiq_hw_fini;
|
||||
adev->mes.enable_legacy_queue_map = true;
|
||||
|
||||
adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE;
|
||||
adev->mes.event_log_size = adev->enable_uni_mes ? (AMDGPU_MAX_MES_PIPES * AMDGPU_MES_LOG_BUFFER_SIZE) : AMDGPU_MES_LOG_BUFFER_SIZE;
|
||||
|
||||
r = amdgpu_mes_init(adev);
|
||||
if (r)
|
||||
|
@ -1148,7 +1148,7 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
|
||||
|
||||
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM)
|
||||
size >>= 1;
|
||||
WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + PAGE_ALIGN(size));
|
||||
atomic64_add(PAGE_ALIGN(size), &pdd->vram_usage);
|
||||
}
|
||||
|
||||
mutex_unlock(&p->mutex);
|
||||
@ -1219,7 +1219,7 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
|
||||
kfd_process_device_remove_obj_handle(
|
||||
pdd, GET_IDR_HANDLE(args->handle));
|
||||
|
||||
WRITE_ONCE(pdd->vram_usage, pdd->vram_usage - size);
|
||||
atomic64_sub(size, &pdd->vram_usage);
|
||||
|
||||
err_unlock:
|
||||
err_pdd:
|
||||
@ -2347,7 +2347,7 @@ static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd,
|
||||
} else if (bo_bucket->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
|
||||
bo_bucket->restored_offset = offset;
|
||||
/* Update the VRAM usage count */
|
||||
WRITE_ONCE(pdd->vram_usage, pdd->vram_usage + bo_bucket->size);
|
||||
atomic64_add(bo_bucket->size, &pdd->vram_usage);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -775,7 +775,7 @@ struct kfd_process_device {
|
||||
enum kfd_pdd_bound bound;
|
||||
|
||||
/* VRAM usage */
|
||||
uint64_t vram_usage;
|
||||
atomic64_t vram_usage;
|
||||
struct attribute attr_vram;
|
||||
char vram_filename[MAX_SYSFS_FILENAME_LEN];
|
||||
|
||||
|
@ -332,7 +332,7 @@ static ssize_t kfd_procfs_show(struct kobject *kobj, struct attribute *attr,
|
||||
} else if (strncmp(attr->name, "vram_", 5) == 0) {
|
||||
struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
|
||||
attr_vram);
|
||||
return snprintf(buffer, PAGE_SIZE, "%llu\n", READ_ONCE(pdd->vram_usage));
|
||||
return snprintf(buffer, PAGE_SIZE, "%llu\n", atomic64_read(&pdd->vram_usage));
|
||||
} else if (strncmp(attr->name, "sdma_", 5) == 0) {
|
||||
struct kfd_process_device *pdd = container_of(attr, struct kfd_process_device,
|
||||
attr_sdma);
|
||||
@ -1625,7 +1625,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev,
|
||||
pdd->bound = PDD_UNBOUND;
|
||||
pdd->already_dequeued = false;
|
||||
pdd->runtime_inuse = false;
|
||||
pdd->vram_usage = 0;
|
||||
atomic64_set(&pdd->vram_usage, 0);
|
||||
pdd->sdma_past_activity_counter = 0;
|
||||
pdd->user_gpu_id = dev->id;
|
||||
atomic64_set(&pdd->evict_duration_counter, 0);
|
||||
|
@ -405,6 +405,27 @@ static void svm_range_bo_release(struct kref *kref)
|
||||
spin_lock(&svm_bo->list_lock);
|
||||
}
|
||||
spin_unlock(&svm_bo->list_lock);
|
||||
|
||||
if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
|
||||
struct kfd_process_device *pdd;
|
||||
struct kfd_process *p;
|
||||
struct mm_struct *mm;
|
||||
|
||||
mm = svm_bo->eviction_fence->mm;
|
||||
/*
|
||||
* The forked child process takes svm_bo device pages ref, svm_bo could be
|
||||
* released after parent process is gone.
|
||||
*/
|
||||
p = kfd_lookup_process_by_mm(mm);
|
||||
if (p) {
|
||||
pdd = kfd_get_process_device_data(svm_bo->node, p);
|
||||
if (pdd)
|
||||
atomic64_sub(amdgpu_bo_size(svm_bo->bo), &pdd->vram_usage);
|
||||
kfd_unref_process(p);
|
||||
}
|
||||
mmput(mm);
|
||||
}
|
||||
|
||||
if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base))
|
||||
/* We're not in the eviction worker. Signal the fence. */
|
||||
dma_fence_signal(&svm_bo->eviction_fence->base);
|
||||
@ -532,6 +553,7 @@ int
|
||||
svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
|
||||
bool clear)
|
||||
{
|
||||
struct kfd_process_device *pdd;
|
||||
struct amdgpu_bo_param bp;
|
||||
struct svm_range_bo *svm_bo;
|
||||
struct amdgpu_bo_user *ubo;
|
||||
@ -623,6 +645,10 @@ svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange,
|
||||
list_add(&prange->svm_bo_list, &svm_bo->range_list);
|
||||
spin_unlock(&svm_bo->list_lock);
|
||||
|
||||
pdd = svm_range_get_pdd_by_node(prange, node);
|
||||
if (pdd)
|
||||
atomic64_add(amdgpu_bo_size(bo), &pdd->vram_usage);
|
||||
|
||||
return 0;
|
||||
|
||||
reserve_bo_failed:
|
||||
|
@ -1264,7 +1264,11 @@ static int smu_sw_init(void *handle)
|
||||
smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
|
||||
smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
|
||||
smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
|
||||
smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
|
||||
|
||||
if (smu->is_apu)
|
||||
smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
|
||||
else
|
||||
smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D];
|
||||
|
||||
smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
|
||||
smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
|
||||
@ -2226,7 +2230,7 @@ static int smu_bump_power_profile_mode(struct smu_context *smu,
|
||||
static int smu_adjust_power_state_dynamic(struct smu_context *smu,
|
||||
enum amd_dpm_forced_level level,
|
||||
bool skip_display_settings,
|
||||
bool force_update)
|
||||
bool init)
|
||||
{
|
||||
int ret = 0;
|
||||
int index = 0;
|
||||
@ -2255,7 +2259,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
|
||||
}
|
||||
}
|
||||
|
||||
if (force_update || smu_dpm_ctx->dpm_level != level) {
|
||||
if (smu_dpm_ctx->dpm_level != level) {
|
||||
ret = smu_asic_set_performance_level(smu, level);
|
||||
if (ret) {
|
||||
dev_err(smu->adev->dev, "Failed to set performance level!");
|
||||
@ -2272,7 +2276,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
|
||||
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
|
||||
workload[0] = smu->workload_setting[index];
|
||||
|
||||
if (force_update || smu->power_profile_mode != workload[0])
|
||||
if (init || smu->power_profile_mode != workload[0])
|
||||
smu_bump_power_profile_mode(smu, workload, 0);
|
||||
}
|
||||
|
||||
|
@ -2555,18 +2555,16 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
|
||||
workload_mask = 1 << workload_type;
|
||||
|
||||
/* Add optimizations for SMU13.0.0/10. Reuse the power saving profile */
|
||||
if (smu->power_profile_mode == PP_SMC_POWER_PROFILE_COMPUTE) {
|
||||
if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
|
||||
((smu->adev->pm.fw_version == 0x004e6601) ||
|
||||
(smu->adev->pm.fw_version >= 0x004e7300))) ||
|
||||
(amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
|
||||
smu->adev->pm.fw_version >= 0x00504500)) {
|
||||
workload_type = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_WORKLOAD,
|
||||
PP_SMC_POWER_PROFILE_POWERSAVING);
|
||||
if (workload_type >= 0)
|
||||
workload_mask |= 1 << workload_type;
|
||||
}
|
||||
if ((amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 0) &&
|
||||
((smu->adev->pm.fw_version == 0x004e6601) ||
|
||||
(smu->adev->pm.fw_version >= 0x004e7300))) ||
|
||||
(amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 10) &&
|
||||
smu->adev->pm.fw_version >= 0x00504500)) {
|
||||
workload_type = smu_cmn_to_asic_specific_index(smu,
|
||||
CMN2ASIC_MAPPING_WORKLOAD,
|
||||
PP_SMC_POWER_PROFILE_POWERSAVING);
|
||||
if (workload_type >= 0)
|
||||
workload_mask |= 1 << workload_type;
|
||||
}
|
||||
|
||||
ret = smu_cmn_send_smc_msg_with_param(smu,
|
||||
|
@ -29,6 +29,8 @@ static int ast_sil164_connector_helper_get_modes(struct drm_connector *connector
|
||||
if (ast_connector->physical_status == connector_status_connected) {
|
||||
count = drm_connector_helper_get_modes(connector);
|
||||
} else {
|
||||
drm_edid_connector_update(connector, NULL);
|
||||
|
||||
/*
|
||||
* There's no EDID data without a connected monitor. Set BMC-
|
||||
* compatible modes in this case. The XGA default resolution
|
||||
|
@ -29,6 +29,8 @@ static int ast_vga_connector_helper_get_modes(struct drm_connector *connector)
|
||||
if (ast_connector->physical_status == connector_status_connected) {
|
||||
count = drm_connector_helper_get_modes(connector);
|
||||
} else {
|
||||
drm_edid_connector_update(connector, NULL);
|
||||
|
||||
/*
|
||||
* There's no EDID data without a connected monitor. Set BMC-
|
||||
* compatible modes in this case. The XGA default resolution
|
||||
|
@ -89,25 +89,19 @@ static int intel_dp_mst_max_dpt_bpp(const struct intel_crtc_state *crtc_state,
|
||||
|
||||
static int intel_dp_mst_bw_overhead(const struct intel_crtc_state *crtc_state,
|
||||
const struct intel_connector *connector,
|
||||
bool ssc, bool dsc, int bpp_x16)
|
||||
bool ssc, int dsc_slice_count, int bpp_x16)
|
||||
{
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&crtc_state->hw.adjusted_mode;
|
||||
unsigned long flags = DRM_DP_BW_OVERHEAD_MST;
|
||||
int dsc_slice_count = 0;
|
||||
int overhead;
|
||||
|
||||
flags |= intel_dp_is_uhbr(crtc_state) ? DRM_DP_BW_OVERHEAD_UHBR : 0;
|
||||
flags |= ssc ? DRM_DP_BW_OVERHEAD_SSC_REF_CLK : 0;
|
||||
flags |= crtc_state->fec_enable ? DRM_DP_BW_OVERHEAD_FEC : 0;
|
||||
|
||||
if (dsc) {
|
||||
if (dsc_slice_count)
|
||||
flags |= DRM_DP_BW_OVERHEAD_DSC;
|
||||
dsc_slice_count = intel_dp_dsc_get_slice_count(connector,
|
||||
adjusted_mode->clock,
|
||||
adjusted_mode->hdisplay,
|
||||
crtc_state->joiner_pipes);
|
||||
}
|
||||
|
||||
overhead = drm_dp_bw_overhead(crtc_state->lane_count,
|
||||
adjusted_mode->hdisplay,
|
||||
@ -153,6 +147,19 @@ static int intel_dp_mst_calc_pbn(int pixel_clock, int bpp_x16, int bw_overhead)
|
||||
return DIV_ROUND_UP(effective_data_rate * 64, 54 * 1000);
|
||||
}
|
||||
|
||||
static int intel_dp_mst_dsc_get_slice_count(const struct intel_connector *connector,
|
||||
const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&crtc_state->hw.adjusted_mode;
|
||||
int num_joined_pipes = crtc_state->joiner_pipes;
|
||||
|
||||
return intel_dp_dsc_get_slice_count(connector,
|
||||
adjusted_mode->clock,
|
||||
adjusted_mode->hdisplay,
|
||||
num_joined_pipes);
|
||||
}
|
||||
|
||||
static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *crtc_state,
|
||||
int max_bpp,
|
||||
@ -172,6 +179,7 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
|
||||
const struct drm_display_mode *adjusted_mode =
|
||||
&crtc_state->hw.adjusted_mode;
|
||||
int bpp, slots = -EINVAL;
|
||||
int dsc_slice_count = 0;
|
||||
int max_dpt_bpp;
|
||||
int ret = 0;
|
||||
|
||||
@ -203,6 +211,15 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
|
||||
drm_dbg_kms(&i915->drm, "Looking for slots in range min bpp %d max bpp %d\n",
|
||||
min_bpp, max_bpp);
|
||||
|
||||
if (dsc) {
|
||||
dsc_slice_count = intel_dp_mst_dsc_get_slice_count(connector, crtc_state);
|
||||
if (!dsc_slice_count) {
|
||||
drm_dbg_kms(&i915->drm, "Can't get valid DSC slice count\n");
|
||||
|
||||
return -ENOSPC;
|
||||
}
|
||||
}
|
||||
|
||||
for (bpp = max_bpp; bpp >= min_bpp; bpp -= step) {
|
||||
int local_bw_overhead;
|
||||
int remote_bw_overhead;
|
||||
@ -216,9 +233,9 @@ static int intel_dp_mst_find_vcpi_slots_for_bpp(struct intel_encoder *encoder,
|
||||
intel_dp_output_bpp(crtc_state->output_format, bpp));
|
||||
|
||||
local_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector,
|
||||
false, dsc, link_bpp_x16);
|
||||
false, dsc_slice_count, link_bpp_x16);
|
||||
remote_bw_overhead = intel_dp_mst_bw_overhead(crtc_state, connector,
|
||||
true, dsc, link_bpp_x16);
|
||||
true, dsc_slice_count, link_bpp_x16);
|
||||
|
||||
intel_dp_mst_compute_m_n(crtc_state, connector,
|
||||
local_bw_overhead,
|
||||
@ -449,6 +466,9 @@ hblank_expansion_quirk_needs_dsc(const struct intel_connector *connector,
|
||||
if (mode_hblank_period_ns(adjusted_mode) > hblank_limit)
|
||||
return false;
|
||||
|
||||
if (!intel_dp_mst_dsc_get_slice_count(connector, crtc_state))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -438,6 +438,19 @@ bool intel_fb_needs_64k_phys(u64 modifier)
|
||||
INTEL_PLANE_CAP_NEED64K_PHYS);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_fb_is_tile4_modifier: Check if a modifier is a tile4 modifier type
|
||||
* @modifier: Modifier to check
|
||||
*
|
||||
* Returns:
|
||||
* Returns %true if @modifier is a tile4 modifier.
|
||||
*/
|
||||
bool intel_fb_is_tile4_modifier(u64 modifier)
|
||||
{
|
||||
return plane_caps_contain_any(lookup_modifier(modifier)->plane_caps,
|
||||
INTEL_PLANE_CAP_TILING_4);
|
||||
}
|
||||
|
||||
static bool check_modifier_display_ver_range(const struct intel_modifier_desc *md,
|
||||
u8 display_ver_from, u8 display_ver_until)
|
||||
{
|
||||
|
@ -35,6 +35,7 @@ bool intel_fb_is_ccs_modifier(u64 modifier);
|
||||
bool intel_fb_is_rc_ccs_cc_modifier(u64 modifier);
|
||||
bool intel_fb_is_mc_ccs_modifier(u64 modifier);
|
||||
bool intel_fb_needs_64k_phys(u64 modifier);
|
||||
bool intel_fb_is_tile4_modifier(u64 modifier);
|
||||
|
||||
bool intel_fb_is_ccs_aux_plane(const struct drm_framebuffer *fb, int color_plane);
|
||||
int intel_fb_rc_ccs_cc_plane(const struct drm_framebuffer *fb);
|
||||
|
@ -1591,6 +1591,17 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Display20 onward tile4 hflip is not supported
|
||||
*/
|
||||
if (rotation & DRM_MODE_REFLECT_X &&
|
||||
intel_fb_is_tile4_modifier(fb->modifier) &&
|
||||
DISPLAY_VER(dev_priv) >= 20) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"horizontal flip is not supported with tile4 surface formats\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (drm_rotation_90_or_270(rotation)) {
|
||||
if (!intel_fb_supports_90_270_rotation(to_intel_framebuffer(fb))) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
|
@ -18,7 +18,6 @@
|
||||
#include <drm/drm_managed.h>
|
||||
#include <drm/drm_module.h>
|
||||
#include <drm/drm_pciids.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
|
||||
#include "mgag200_drv.h"
|
||||
|
||||
@ -85,34 +84,6 @@ resource_size_t mgag200_probe_vram(void __iomem *mem, resource_size_t size)
|
||||
return offset - 65536;
|
||||
}
|
||||
|
||||
static irqreturn_t mgag200_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = arg;
|
||||
struct mga_device *mdev = to_mga_device(dev);
|
||||
struct drm_crtc *crtc;
|
||||
u32 status, ien;
|
||||
|
||||
status = RREG32(MGAREG_STATUS);
|
||||
|
||||
if (status & MGAREG_STATUS_VLINEPEN) {
|
||||
ien = RREG32(MGAREG_IEN);
|
||||
if (!(ien & MGAREG_IEN_VLINEIEN))
|
||||
goto out;
|
||||
|
||||
crtc = drm_crtc_from_index(dev, 0);
|
||||
if (WARN_ON_ONCE(!crtc))
|
||||
goto out;
|
||||
drm_crtc_handle_vblank(crtc);
|
||||
|
||||
WREG32(MGAREG_ICLEAR, MGAREG_ICLEAR_VLINEICLR);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
out:
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
/*
|
||||
* DRM driver
|
||||
*/
|
||||
@ -196,7 +167,6 @@ int mgag200_device_init(struct mga_device *mdev,
|
||||
const struct mgag200_device_funcs *funcs)
|
||||
{
|
||||
struct drm_device *dev = &mdev->base;
|
||||
struct pci_dev *pdev = to_pci_dev(dev->dev);
|
||||
u8 crtcext3, misc;
|
||||
int ret;
|
||||
|
||||
@ -223,14 +193,6 @@ int mgag200_device_init(struct mga_device *mdev,
|
||||
mutex_unlock(&mdev->rmmio_lock);
|
||||
|
||||
WREG32(MGAREG_IEN, 0);
|
||||
WREG32(MGAREG_ICLEAR, MGAREG_ICLEAR_VLINEICLR);
|
||||
|
||||
ret = devm_request_irq(&pdev->dev, pdev->irq, mgag200_irq_handler, IRQF_SHARED,
|
||||
dev->driver->name, dev);
|
||||
if (ret) {
|
||||
drm_err(dev, "Failed to acquire interrupt, error %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -391,24 +391,17 @@ int mgag200_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_st
|
||||
void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *old_state);
|
||||
void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *old_state);
|
||||
void mgag200_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *old_state);
|
||||
bool mgag200_crtc_helper_get_scanout_position(struct drm_crtc *crtc, bool in_vblank_irq,
|
||||
int *vpos, int *hpos,
|
||||
ktime_t *stime, ktime_t *etime,
|
||||
const struct drm_display_mode *mode);
|
||||
|
||||
#define MGAG200_CRTC_HELPER_FUNCS \
|
||||
.mode_valid = mgag200_crtc_helper_mode_valid, \
|
||||
.atomic_check = mgag200_crtc_helper_atomic_check, \
|
||||
.atomic_flush = mgag200_crtc_helper_atomic_flush, \
|
||||
.atomic_enable = mgag200_crtc_helper_atomic_enable, \
|
||||
.atomic_disable = mgag200_crtc_helper_atomic_disable, \
|
||||
.get_scanout_position = mgag200_crtc_helper_get_scanout_position
|
||||
.atomic_disable = mgag200_crtc_helper_atomic_disable
|
||||
|
||||
void mgag200_crtc_reset(struct drm_crtc *crtc);
|
||||
struct drm_crtc_state *mgag200_crtc_atomic_duplicate_state(struct drm_crtc *crtc);
|
||||
void mgag200_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_state *crtc_state);
|
||||
int mgag200_crtc_enable_vblank(struct drm_crtc *crtc);
|
||||
void mgag200_crtc_disable_vblank(struct drm_crtc *crtc);
|
||||
|
||||
#define MGAG200_CRTC_FUNCS \
|
||||
.reset = mgag200_crtc_reset, \
|
||||
@ -416,10 +409,7 @@ void mgag200_crtc_disable_vblank(struct drm_crtc *crtc);
|
||||
.set_config = drm_atomic_helper_set_config, \
|
||||
.page_flip = drm_atomic_helper_page_flip, \
|
||||
.atomic_duplicate_state = mgag200_crtc_atomic_duplicate_state, \
|
||||
.atomic_destroy_state = mgag200_crtc_atomic_destroy_state, \
|
||||
.enable_vblank = mgag200_crtc_enable_vblank, \
|
||||
.disable_vblank = mgag200_crtc_disable_vblank, \
|
||||
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp
|
||||
.atomic_destroy_state = mgag200_crtc_atomic_destroy_state
|
||||
|
||||
void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mode *mode,
|
||||
bool set_vidrst);
|
||||
|
@ -8,7 +8,6 @@
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_gem_atomic_helper.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
|
||||
#include "mgag200_drv.h"
|
||||
|
||||
@ -404,9 +403,5 @@ struct mga_device *mgag200_g200_device_create(struct pci_dev *pdev, const struct
|
||||
drm_mode_config_reset(dev);
|
||||
drm_kms_helper_poll_init(dev);
|
||||
|
||||
ret = drm_vblank_init(dev, 1);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return mdev;
|
||||
}
|
||||
|
@ -8,7 +8,6 @@
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_gem_atomic_helper.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
|
||||
#include "mgag200_drv.h"
|
||||
|
||||
@ -276,9 +275,5 @@ struct mga_device *mgag200_g200eh_device_create(struct pci_dev *pdev, const stru
|
||||
drm_mode_config_reset(dev);
|
||||
drm_kms_helper_poll_init(dev);
|
||||
|
||||
ret = drm_vblank_init(dev, 1);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return mdev;
|
||||
}
|
||||
|
@ -7,7 +7,6 @@
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_gem_atomic_helper.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
|
||||
#include "mgag200_drv.h"
|
||||
|
||||
@ -181,9 +180,5 @@ struct mga_device *mgag200_g200eh3_device_create(struct pci_dev *pdev,
|
||||
drm_mode_config_reset(dev);
|
||||
drm_kms_helper_poll_init(dev);
|
||||
|
||||
ret = drm_vblank_init(dev, 1);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return mdev;
|
||||
}
|
||||
|
@ -8,7 +8,6 @@
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_gem_atomic_helper.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
|
||||
#include "mgag200_drv.h"
|
||||
|
||||
@ -206,8 +205,6 @@ static void mgag200_g200er_crtc_helper_atomic_enable(struct drm_crtc *crtc,
|
||||
mgag200_crtc_set_gamma_linear(mdev, format);
|
||||
|
||||
mgag200_enable_display(mdev);
|
||||
|
||||
drm_crtc_vblank_on(crtc);
|
||||
}
|
||||
|
||||
static const struct drm_crtc_helper_funcs mgag200_g200er_crtc_helper_funcs = {
|
||||
@ -215,8 +212,7 @@ static const struct drm_crtc_helper_funcs mgag200_g200er_crtc_helper_funcs = {
|
||||
.atomic_check = mgag200_crtc_helper_atomic_check,
|
||||
.atomic_flush = mgag200_crtc_helper_atomic_flush,
|
||||
.atomic_enable = mgag200_g200er_crtc_helper_atomic_enable,
|
||||
.atomic_disable = mgag200_crtc_helper_atomic_disable,
|
||||
.get_scanout_position = mgag200_crtc_helper_get_scanout_position,
|
||||
.atomic_disable = mgag200_crtc_helper_atomic_disable
|
||||
};
|
||||
|
||||
static const struct drm_crtc_funcs mgag200_g200er_crtc_funcs = {
|
||||
@ -312,9 +308,5 @@ struct mga_device *mgag200_g200er_device_create(struct pci_dev *pdev, const stru
|
||||
drm_mode_config_reset(dev);
|
||||
drm_kms_helper_poll_init(dev);
|
||||
|
||||
ret = drm_vblank_init(dev, 1);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return mdev;
|
||||
}
|
||||
|
@ -8,7 +8,6 @@
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_gem_atomic_helper.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
|
||||
#include "mgag200_drv.h"
|
||||
|
||||
@ -207,8 +206,6 @@ static void mgag200_g200ev_crtc_helper_atomic_enable(struct drm_crtc *crtc,
|
||||
mgag200_crtc_set_gamma_linear(mdev, format);
|
||||
|
||||
mgag200_enable_display(mdev);
|
||||
|
||||
drm_crtc_vblank_on(crtc);
|
||||
}
|
||||
|
||||
static const struct drm_crtc_helper_funcs mgag200_g200ev_crtc_helper_funcs = {
|
||||
@ -216,8 +213,7 @@ static const struct drm_crtc_helper_funcs mgag200_g200ev_crtc_helper_funcs = {
|
||||
.atomic_check = mgag200_crtc_helper_atomic_check,
|
||||
.atomic_flush = mgag200_crtc_helper_atomic_flush,
|
||||
.atomic_enable = mgag200_g200ev_crtc_helper_atomic_enable,
|
||||
.atomic_disable = mgag200_crtc_helper_atomic_disable,
|
||||
.get_scanout_position = mgag200_crtc_helper_get_scanout_position,
|
||||
.atomic_disable = mgag200_crtc_helper_atomic_disable
|
||||
};
|
||||
|
||||
static const struct drm_crtc_funcs mgag200_g200ev_crtc_funcs = {
|
||||
@ -317,9 +313,5 @@ struct mga_device *mgag200_g200ev_device_create(struct pci_dev *pdev, const stru
|
||||
drm_mode_config_reset(dev);
|
||||
drm_kms_helper_poll_init(dev);
|
||||
|
||||
ret = drm_vblank_init(dev, 1);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return mdev;
|
||||
}
|
||||
|
@ -7,7 +7,6 @@
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_gem_atomic_helper.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
|
||||
#include "mgag200_drv.h"
|
||||
|
||||
@ -199,9 +198,5 @@ struct mga_device *mgag200_g200ew3_device_create(struct pci_dev *pdev,
|
||||
drm_mode_config_reset(dev);
|
||||
drm_kms_helper_poll_init(dev);
|
||||
|
||||
ret = drm_vblank_init(dev, 1);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return mdev;
|
||||
}
|
||||
|
@ -8,7 +8,6 @@
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_gem_atomic_helper.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
|
||||
#include "mgag200_drv.h"
|
||||
|
||||
@ -338,8 +337,6 @@ static void mgag200_g200se_crtc_helper_atomic_enable(struct drm_crtc *crtc,
|
||||
mgag200_crtc_set_gamma_linear(mdev, format);
|
||||
|
||||
mgag200_enable_display(mdev);
|
||||
|
||||
drm_crtc_vblank_on(crtc);
|
||||
}
|
||||
|
||||
static const struct drm_crtc_helper_funcs mgag200_g200se_crtc_helper_funcs = {
|
||||
@ -347,8 +344,7 @@ static const struct drm_crtc_helper_funcs mgag200_g200se_crtc_helper_funcs = {
|
||||
.atomic_check = mgag200_crtc_helper_atomic_check,
|
||||
.atomic_flush = mgag200_crtc_helper_atomic_flush,
|
||||
.atomic_enable = mgag200_g200se_crtc_helper_atomic_enable,
|
||||
.atomic_disable = mgag200_crtc_helper_atomic_disable,
|
||||
.get_scanout_position = mgag200_crtc_helper_get_scanout_position,
|
||||
.atomic_disable = mgag200_crtc_helper_atomic_disable
|
||||
};
|
||||
|
||||
static const struct drm_crtc_funcs mgag200_g200se_crtc_funcs = {
|
||||
@ -517,9 +513,5 @@ struct mga_device *mgag200_g200se_device_create(struct pci_dev *pdev, const stru
|
||||
drm_mode_config_reset(dev);
|
||||
drm_kms_helper_poll_init(dev);
|
||||
|
||||
ret = drm_vblank_init(dev, 1);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return mdev;
|
||||
}
|
||||
|
@ -8,7 +8,6 @@
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_gem_atomic_helper.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
|
||||
#include "mgag200_drv.h"
|
||||
|
||||
@ -323,9 +322,5 @@ struct mga_device *mgag200_g200wb_device_create(struct pci_dev *pdev, const stru
|
||||
drm_mode_config_reset(dev);
|
||||
drm_kms_helper_poll_init(dev);
|
||||
|
||||
ret = drm_vblank_init(dev, 1);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return mdev;
|
||||
}
|
||||
|
@ -22,7 +22,6 @@
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_panic.h>
|
||||
#include <drm/drm_print.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
|
||||
#include "mgag200_ddc.h"
|
||||
#include "mgag200_drv.h"
|
||||
@ -227,14 +226,7 @@ void mgag200_set_mode_regs(struct mga_device *mdev, const struct drm_display_mod
|
||||
vblkstr = mode->crtc_vblank_start;
|
||||
vblkend = vtotal + 1;
|
||||
|
||||
/*
|
||||
* There's no VBLANK interrupt on Matrox chipsets, so we use
|
||||
* the VLINE interrupt instead. It triggers when the current
|
||||
* <linecomp> has been reached. For VBLANK, this is the first
|
||||
* non-visible line at the bottom of the screen. Therefore,
|
||||
* keep <linecomp> in sync with <vblkstr>.
|
||||
*/
|
||||
linecomp = vblkstr;
|
||||
linecomp = vdispend;
|
||||
|
||||
misc = RREG8(MGA_MISC_IN);
|
||||
|
||||
@ -645,8 +637,6 @@ void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_s
|
||||
struct mgag200_crtc_state *mgag200_crtc_state = to_mgag200_crtc_state(crtc_state);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct mga_device *mdev = to_mga_device(dev);
|
||||
struct drm_pending_vblank_event *event;
|
||||
unsigned long flags;
|
||||
|
||||
if (crtc_state->enable && crtc_state->color_mgmt_changed) {
|
||||
const struct drm_format_info *format = mgag200_crtc_state->format;
|
||||
@ -656,18 +646,6 @@ void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_s
|
||||
else
|
||||
mgag200_crtc_set_gamma_linear(mdev, format);
|
||||
}
|
||||
|
||||
event = crtc->state->event;
|
||||
if (event) {
|
||||
crtc->state->event = NULL;
|
||||
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
if (drm_crtc_vblank_get(crtc) != 0)
|
||||
drm_crtc_send_vblank_event(crtc, event);
|
||||
else
|
||||
drm_crtc_arm_vblank_event(crtc, event);
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *old_state)
|
||||
@ -692,44 +670,15 @@ void mgag200_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_
|
||||
mgag200_crtc_set_gamma_linear(mdev, format);
|
||||
|
||||
mgag200_enable_display(mdev);
|
||||
|
||||
drm_crtc_vblank_on(crtc);
|
||||
}
|
||||
|
||||
void mgag200_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *old_state)
|
||||
{
|
||||
struct mga_device *mdev = to_mga_device(crtc->dev);
|
||||
|
||||
drm_crtc_vblank_off(crtc);
|
||||
|
||||
mgag200_disable_display(mdev);
|
||||
}
|
||||
|
||||
bool mgag200_crtc_helper_get_scanout_position(struct drm_crtc *crtc, bool in_vblank_irq,
|
||||
int *vpos, int *hpos,
|
||||
ktime_t *stime, ktime_t *etime,
|
||||
const struct drm_display_mode *mode)
|
||||
{
|
||||
struct mga_device *mdev = to_mga_device(crtc->dev);
|
||||
u32 vcount;
|
||||
|
||||
if (stime)
|
||||
*stime = ktime_get();
|
||||
|
||||
if (vpos) {
|
||||
vcount = RREG32(MGAREG_VCOUNT);
|
||||
*vpos = vcount & GENMASK(11, 0);
|
||||
}
|
||||
|
||||
if (hpos)
|
||||
*hpos = mode->htotal >> 1; // near middle of scanline on average
|
||||
|
||||
if (etime)
|
||||
*etime = ktime_get();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void mgag200_crtc_reset(struct drm_crtc *crtc)
|
||||
{
|
||||
struct mgag200_crtc_state *mgag200_crtc_state;
|
||||
@ -774,30 +723,6 @@ void mgag200_crtc_atomic_destroy_state(struct drm_crtc *crtc, struct drm_crtc_st
|
||||
kfree(mgag200_crtc_state);
|
||||
}
|
||||
|
||||
int mgag200_crtc_enable_vblank(struct drm_crtc *crtc)
|
||||
{
|
||||
struct mga_device *mdev = to_mga_device(crtc->dev);
|
||||
u32 ien;
|
||||
|
||||
WREG32(MGAREG_ICLEAR, MGAREG_ICLEAR_VLINEICLR);
|
||||
|
||||
ien = RREG32(MGAREG_IEN);
|
||||
ien |= MGAREG_IEN_VLINEIEN;
|
||||
WREG32(MGAREG_IEN, ien);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mgag200_crtc_disable_vblank(struct drm_crtc *crtc)
|
||||
{
|
||||
struct mga_device *mdev = to_mga_device(crtc->dev);
|
||||
u32 ien;
|
||||
|
||||
ien = RREG32(MGAREG_IEN);
|
||||
ien &= ~(MGAREG_IEN_VLINEIEN);
|
||||
WREG32(MGAREG_IEN, ien);
|
||||
}
|
||||
|
||||
/*
|
||||
* Mode config
|
||||
*/
|
||||
|
@ -101,9 +101,10 @@ static void get_stats_counter(struct msm_ringbuffer *ring, u32 counter,
|
||||
}
|
||||
|
||||
static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
|
||||
struct msm_ringbuffer *ring, struct msm_file_private *ctx)
|
||||
struct msm_ringbuffer *ring, struct msm_gem_submit *submit)
|
||||
{
|
||||
bool sysprof = refcount_read(&a6xx_gpu->base.base.sysprof_active) > 1;
|
||||
struct msm_file_private *ctx = submit->queue->ctx;
|
||||
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
|
||||
phys_addr_t ttbr;
|
||||
u32 asid;
|
||||
@ -115,6 +116,15 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
|
||||
if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
|
||||
return;
|
||||
|
||||
if (adreno_gpu->info->family >= ADRENO_7XX_GEN1) {
|
||||
/* Wait for previous submit to complete before continuing: */
|
||||
OUT_PKT7(ring, CP_WAIT_TIMESTAMP, 4);
|
||||
OUT_RING(ring, 0);
|
||||
OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
|
||||
OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
|
||||
OUT_RING(ring, submit->seqno - 1);
|
||||
}
|
||||
|
||||
if (!sysprof) {
|
||||
if (!adreno_is_a7xx(adreno_gpu)) {
|
||||
/* Turn off protected mode to write to special registers */
|
||||
@ -193,7 +203,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||
struct msm_ringbuffer *ring = submit->ring;
|
||||
unsigned int i, ibs = 0;
|
||||
|
||||
a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx);
|
||||
a6xx_set_pagetable(a6xx_gpu, ring, submit);
|
||||
|
||||
get_stats_counter(ring, REG_A6XX_RBBM_PERFCTR_CP(0),
|
||||
rbmemptr_stats(ring, index, cpcycles_start));
|
||||
@ -283,7 +293,7 @@ static void a7xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
|
||||
OUT_PKT7(ring, CP_THREAD_CONTROL, 1);
|
||||
OUT_RING(ring, CP_THREAD_CONTROL_0_SYNC_THREADS | CP_SET_THREAD_BR);
|
||||
|
||||
a6xx_set_pagetable(a6xx_gpu, ring, submit->queue->ctx);
|
||||
a6xx_set_pagetable(a6xx_gpu, ring, submit);
|
||||
|
||||
get_stats_counter(ring, REG_A7XX_RBBM_PERFCTR_CP(0),
|
||||
rbmemptr_stats(ring, index, cpcycles_start));
|
||||
|
@ -711,12 +711,13 @@ void dpu_crtc_complete_commit(struct drm_crtc *crtc)
|
||||
_dpu_crtc_complete_flip(crtc);
|
||||
}
|
||||
|
||||
static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
|
||||
static int _dpu_crtc_check_and_setup_lm_bounds(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
{
|
||||
struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
|
||||
struct drm_display_mode *adj_mode = &state->adjusted_mode;
|
||||
u32 crtc_split_width = adj_mode->hdisplay / cstate->num_mixers;
|
||||
struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < cstate->num_mixers; i++) {
|
||||
@ -727,7 +728,12 @@ static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
|
||||
r->y2 = adj_mode->vdisplay;
|
||||
|
||||
trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
|
||||
|
||||
if (drm_rect_width(r) > dpu_kms->catalog->caps->max_mixer_width)
|
||||
return -E2BIG;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void _dpu_crtc_get_pcc_coeff(struct drm_crtc_state *state,
|
||||
@ -803,7 +809,7 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
|
||||
DRM_DEBUG_ATOMIC("crtc%d\n", crtc->base.id);
|
||||
|
||||
_dpu_crtc_setup_lm_bounds(crtc, crtc->state);
|
||||
_dpu_crtc_check_and_setup_lm_bounds(crtc, crtc->state);
|
||||
|
||||
/* encoder will trigger pending mask now */
|
||||
drm_for_each_encoder_mask(encoder, crtc->dev, crtc->state->encoder_mask)
|
||||
@ -1091,9 +1097,6 @@ static void dpu_crtc_disable(struct drm_crtc *crtc,
|
||||
|
||||
dpu_core_perf_crtc_update(crtc, 0);
|
||||
|
||||
memset(cstate->mixers, 0, sizeof(cstate->mixers));
|
||||
cstate->num_mixers = 0;
|
||||
|
||||
/* disable clk & bw control until clk & bw properties are set */
|
||||
cstate->bw_control = false;
|
||||
cstate->bw_split_vote = false;
|
||||
@ -1192,8 +1195,11 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
if (crtc_state->active_changed)
|
||||
crtc_state->mode_changed = true;
|
||||
|
||||
if (cstate->num_mixers)
|
||||
_dpu_crtc_setup_lm_bounds(crtc, crtc_state);
|
||||
if (cstate->num_mixers) {
|
||||
rc = _dpu_crtc_check_and_setup_lm_bounds(crtc, crtc_state);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* FIXME: move this to dpu_plane_atomic_check? */
|
||||
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
|
||||
|
@ -624,6 +624,40 @@ static struct msm_display_topology dpu_encoder_get_topology(
|
||||
return topology;
|
||||
}
|
||||
|
||||
static void dpu_encoder_assign_crtc_resources(struct dpu_kms *dpu_kms,
|
||||
struct drm_encoder *drm_enc,
|
||||
struct dpu_global_state *global_state,
|
||||
struct drm_crtc_state *crtc_state)
|
||||
{
|
||||
struct dpu_crtc_state *cstate;
|
||||
struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
|
||||
struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
|
||||
struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC];
|
||||
int num_lm, num_ctl, num_dspp, i;
|
||||
|
||||
cstate = to_dpu_crtc_state(crtc_state);
|
||||
|
||||
memset(cstate->mixers, 0, sizeof(cstate->mixers));
|
||||
|
||||
num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
|
||||
drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
|
||||
num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
|
||||
drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
|
||||
num_dspp = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
|
||||
drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
|
||||
ARRAY_SIZE(hw_dspp));
|
||||
|
||||
for (i = 0; i < num_lm; i++) {
|
||||
int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
|
||||
|
||||
cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
|
||||
cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
|
||||
cstate->mixers[i].hw_dspp = i < num_dspp ? to_dpu_hw_dspp(hw_dspp[i]) : NULL;
|
||||
}
|
||||
|
||||
cstate->num_mixers = num_lm;
|
||||
}
|
||||
|
||||
static int dpu_encoder_virt_atomic_check(
|
||||
struct drm_encoder *drm_enc,
|
||||
struct drm_crtc_state *crtc_state,
|
||||
@ -692,6 +726,9 @@ static int dpu_encoder_virt_atomic_check(
|
||||
if (!crtc_state->active_changed || crtc_state->enable)
|
||||
ret = dpu_rm_reserve(&dpu_kms->rm, global_state,
|
||||
drm_enc, crtc_state, topology);
|
||||
if (!ret)
|
||||
dpu_encoder_assign_crtc_resources(dpu_kms, drm_enc,
|
||||
global_state, crtc_state);
|
||||
}
|
||||
|
||||
trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags);
|
||||
@ -1093,14 +1130,11 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
|
||||
struct dpu_encoder_virt *dpu_enc;
|
||||
struct msm_drm_private *priv;
|
||||
struct dpu_kms *dpu_kms;
|
||||
struct dpu_crtc_state *cstate;
|
||||
struct dpu_global_state *global_state;
|
||||
struct dpu_hw_blk *hw_pp[MAX_CHANNELS_PER_ENC];
|
||||
struct dpu_hw_blk *hw_ctl[MAX_CHANNELS_PER_ENC];
|
||||
struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC];
|
||||
struct dpu_hw_blk *hw_dspp[MAX_CHANNELS_PER_ENC] = { NULL };
|
||||
struct dpu_hw_blk *hw_dsc[MAX_CHANNELS_PER_ENC];
|
||||
int num_lm, num_ctl, num_pp, num_dsc;
|
||||
int num_ctl, num_pp, num_dsc;
|
||||
unsigned int dsc_mask = 0;
|
||||
int i;
|
||||
|
||||
@ -1129,11 +1163,6 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
|
||||
ARRAY_SIZE(hw_pp));
|
||||
num_ctl = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
|
||||
drm_enc->base.id, DPU_HW_BLK_CTL, hw_ctl, ARRAY_SIZE(hw_ctl));
|
||||
num_lm = dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
|
||||
drm_enc->base.id, DPU_HW_BLK_LM, hw_lm, ARRAY_SIZE(hw_lm));
|
||||
dpu_rm_get_assigned_resources(&dpu_kms->rm, global_state,
|
||||
drm_enc->base.id, DPU_HW_BLK_DSPP, hw_dspp,
|
||||
ARRAY_SIZE(hw_dspp));
|
||||
|
||||
for (i = 0; i < MAX_CHANNELS_PER_ENC; i++)
|
||||
dpu_enc->hw_pp[i] = i < num_pp ? to_dpu_hw_pingpong(hw_pp[i])
|
||||
@ -1159,36 +1188,23 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
|
||||
dpu_enc->cur_master->hw_cdm = hw_cdm ? to_dpu_hw_cdm(hw_cdm) : NULL;
|
||||
}
|
||||
|
||||
cstate = to_dpu_crtc_state(crtc_state);
|
||||
|
||||
for (i = 0; i < num_lm; i++) {
|
||||
int ctl_idx = (i < num_ctl) ? i : (num_ctl-1);
|
||||
|
||||
cstate->mixers[i].hw_lm = to_dpu_hw_mixer(hw_lm[i]);
|
||||
cstate->mixers[i].lm_ctl = to_dpu_hw_ctl(hw_ctl[ctl_idx]);
|
||||
cstate->mixers[i].hw_dspp = to_dpu_hw_dspp(hw_dspp[i]);
|
||||
}
|
||||
|
||||
cstate->num_mixers = num_lm;
|
||||
|
||||
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
|
||||
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
|
||||
|
||||
if (!dpu_enc->hw_pp[i]) {
|
||||
phys->hw_pp = dpu_enc->hw_pp[i];
|
||||
if (!phys->hw_pp) {
|
||||
DPU_ERROR_ENC(dpu_enc,
|
||||
"no pp block assigned at idx: %d\n", i);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!hw_ctl[i]) {
|
||||
phys->hw_ctl = i < num_ctl ? to_dpu_hw_ctl(hw_ctl[i]) : NULL;
|
||||
if (!phys->hw_ctl) {
|
||||
DPU_ERROR_ENC(dpu_enc,
|
||||
"no ctl block assigned at idx: %d\n", i);
|
||||
return;
|
||||
}
|
||||
|
||||
phys->hw_pp = dpu_enc->hw_pp[i];
|
||||
phys->hw_ctl = to_dpu_hw_ctl(hw_ctl[i]);
|
||||
|
||||
phys->cached_mode = crtc_state->adjusted_mode;
|
||||
if (phys->ops.atomic_mode_set)
|
||||
phys->ops.atomic_mode_set(phys, crtc_state, conn_state);
|
||||
|
@ -302,7 +302,7 @@ static void dpu_encoder_phys_vid_setup_timing_engine(
|
||||
intf_cfg.stream_sel = 0; /* Don't care value for video mode */
|
||||
intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
|
||||
intf_cfg.dsc = dpu_encoder_helper_get_dsc(phys_enc);
|
||||
if (phys_enc->hw_pp->merge_3d)
|
||||
if (intf_cfg.mode_3d && phys_enc->hw_pp->merge_3d)
|
||||
intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
|
||||
|
||||
spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
|
||||
@ -440,10 +440,12 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
|
||||
struct dpu_hw_ctl *ctl;
|
||||
const struct msm_format *fmt;
|
||||
u32 fmt_fourcc;
|
||||
u32 mode_3d;
|
||||
|
||||
ctl = phys_enc->hw_ctl;
|
||||
fmt_fourcc = dpu_encoder_get_drm_fmt(phys_enc);
|
||||
fmt = mdp_get_format(&phys_enc->dpu_kms->base, fmt_fourcc, 0);
|
||||
mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
|
||||
|
||||
DPU_DEBUG_VIDENC(phys_enc, "\n");
|
||||
|
||||
@ -466,7 +468,8 @@ static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
|
||||
goto skip_flush;
|
||||
|
||||
ctl->ops.update_pending_flush_intf(ctl, phys_enc->hw_intf->idx);
|
||||
if (ctl->ops.update_pending_flush_merge_3d && phys_enc->hw_pp->merge_3d)
|
||||
if (mode_3d && ctl->ops.update_pending_flush_merge_3d &&
|
||||
phys_enc->hw_pp->merge_3d)
|
||||
ctl->ops.update_pending_flush_merge_3d(ctl, phys_enc->hw_pp->merge_3d->idx);
|
||||
|
||||
if (ctl->ops.update_pending_flush_cdm && phys_enc->hw_cdm)
|
||||
|
@ -275,6 +275,7 @@ static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc)
|
||||
struct dpu_hw_pingpong *hw_pp;
|
||||
struct dpu_hw_cdm *hw_cdm;
|
||||
u32 pending_flush = 0;
|
||||
u32 mode_3d;
|
||||
|
||||
if (!phys_enc)
|
||||
return;
|
||||
@ -283,6 +284,7 @@ static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc)
|
||||
hw_pp = phys_enc->hw_pp;
|
||||
hw_ctl = phys_enc->hw_ctl;
|
||||
hw_cdm = phys_enc->hw_cdm;
|
||||
mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
|
||||
|
||||
DPU_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
|
||||
|
||||
@ -294,7 +296,8 @@ static void _dpu_encoder_phys_wb_update_flush(struct dpu_encoder_phys *phys_enc)
|
||||
if (hw_ctl->ops.update_pending_flush_wb)
|
||||
hw_ctl->ops.update_pending_flush_wb(hw_ctl, hw_wb->idx);
|
||||
|
||||
if (hw_ctl->ops.update_pending_flush_merge_3d && hw_pp && hw_pp->merge_3d)
|
||||
if (mode_3d && hw_ctl->ops.update_pending_flush_merge_3d &&
|
||||
hw_pp && hw_pp->merge_3d)
|
||||
hw_ctl->ops.update_pending_flush_merge_3d(hw_ctl,
|
||||
hw_pp->merge_3d->idx);
|
||||
|
||||
|
@ -26,7 +26,7 @@ static void msm_disp_state_dump_regs(u32 **reg, u32 aligned_len, void __iomem *b
|
||||
end_addr = base_addr + aligned_len;
|
||||
|
||||
if (!(*reg))
|
||||
*reg = kzalloc(len_padded, GFP_KERNEL);
|
||||
*reg = kvzalloc(len_padded, GFP_KERNEL);
|
||||
|
||||
if (*reg)
|
||||
dump_addr = *reg;
|
||||
@ -48,20 +48,21 @@ static void msm_disp_state_dump_regs(u32 **reg, u32 aligned_len, void __iomem *b
|
||||
}
|
||||
}
|
||||
|
||||
static void msm_disp_state_print_regs(u32 **reg, u32 len, void __iomem *base_addr,
|
||||
struct drm_printer *p)
|
||||
static void msm_disp_state_print_regs(const u32 *dump_addr, u32 len,
|
||||
void __iomem *base_addr, struct drm_printer *p)
|
||||
{
|
||||
int i;
|
||||
u32 *dump_addr = NULL;
|
||||
void __iomem *addr;
|
||||
u32 num_rows;
|
||||
|
||||
if (!dump_addr) {
|
||||
drm_printf(p, "Registers not stored\n");
|
||||
return;
|
||||
}
|
||||
|
||||
addr = base_addr;
|
||||
num_rows = len / REG_DUMP_ALIGN;
|
||||
|
||||
if (*reg)
|
||||
dump_addr = *reg;
|
||||
|
||||
for (i = 0; i < num_rows; i++) {
|
||||
drm_printf(p, "0x%lx : %08x %08x %08x %08x\n",
|
||||
(unsigned long)(addr - base_addr),
|
||||
@ -89,7 +90,7 @@ void msm_disp_state_print(struct msm_disp_state *state, struct drm_printer *p)
|
||||
|
||||
list_for_each_entry_safe(block, tmp, &state->blocks, node) {
|
||||
drm_printf(p, "====================%s================\n", block->name);
|
||||
msm_disp_state_print_regs(&block->state, block->size, block->base_addr, p);
|
||||
msm_disp_state_print_regs(block->state, block->size, block->base_addr, p);
|
||||
}
|
||||
|
||||
drm_printf(p, "===================dpu drm state================\n");
|
||||
@ -161,7 +162,7 @@ void msm_disp_state_free(void *data)
|
||||
|
||||
list_for_each_entry_safe(block, tmp, &disp_state->blocks, node) {
|
||||
list_del(&block->node);
|
||||
kfree(block->state);
|
||||
kvfree(block->state);
|
||||
kfree(block);
|
||||
}
|
||||
|
||||
|
@ -542,7 +542,7 @@ static unsigned long dsi_adjust_pclk_for_compression(const struct drm_display_mo
|
||||
|
||||
int new_htotal = mode->htotal - mode->hdisplay + new_hdisplay;
|
||||
|
||||
return new_htotal * mode->vtotal * drm_mode_vrefresh(mode);
|
||||
return mult_frac(mode->clock * 1000u, new_htotal, mode->htotal);
|
||||
}
|
||||
|
||||
static unsigned long dsi_get_pclk_rate(const struct drm_display_mode *mode,
|
||||
@ -550,7 +550,7 @@ static unsigned long dsi_get_pclk_rate(const struct drm_display_mode *mode,
|
||||
{
|
||||
unsigned long pclk_rate;
|
||||
|
||||
pclk_rate = mode->clock * 1000;
|
||||
pclk_rate = mode->clock * 1000u;
|
||||
|
||||
if (dsc)
|
||||
pclk_rate = dsi_adjust_pclk_for_compression(mode, dsc);
|
||||
|
@ -153,15 +153,6 @@ static inline u32 pll_get_pll_cmp(u64 fdata, unsigned long ref_clk)
|
||||
return dividend - 1;
|
||||
}
|
||||
|
||||
static inline u64 pll_cmp_to_fdata(u32 pll_cmp, unsigned long ref_clk)
|
||||
{
|
||||
u64 fdata = ((u64)pll_cmp) * ref_clk * 10;
|
||||
|
||||
do_div(fdata, HDMI_PLL_CMP_CNT);
|
||||
|
||||
return fdata;
|
||||
}
|
||||
|
||||
#define HDMI_REF_CLOCK_HZ ((u64)19200000)
|
||||
#define HDMI_MHZ_TO_HZ ((u64)1000000)
|
||||
static int pll_get_post_div(struct hdmi_8998_post_divider *pd, u64 bclk)
|
||||
|
@ -298,7 +298,7 @@ static int ivo_t109nw41_init(struct hx83102 *ctx)
|
||||
msleep(60);
|
||||
|
||||
hx83102_enable_extended_cmds(&dsi_ctx, true);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPOWER, 0x2c, 0xed, 0xed, 0x0f, 0xcf, 0x42,
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETPOWER, 0x2c, 0xed, 0xed, 0x27, 0xe7, 0x52,
|
||||
0xf5, 0x39, 0x36, 0x36, 0x36, 0x36, 0x32, 0x8b, 0x11, 0x65, 0x00, 0x88,
|
||||
0xfa, 0xff, 0xff, 0x8f, 0xff, 0x08, 0xd6, 0x33);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETDISP, 0x00, 0x47, 0xb0, 0x80, 0x00, 0x12,
|
||||
@ -343,11 +343,11 @@ static int ivo_t109nw41_init(struct hx83102 *ctx)
|
||||
0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGMA, 0x04, 0x04, 0x06, 0x0a, 0x0a, 0x05,
|
||||
0x12, 0x14, 0x17, 0x13, 0x2c, 0x33, 0x39, 0x4b, 0x4c, 0x56, 0x61, 0x78,
|
||||
0x7a, 0x41, 0x50, 0x68, 0x73, 0x04, 0x04, 0x06, 0x0a, 0x0a, 0x05, 0x12,
|
||||
0x14, 0x17, 0x13, 0x2c, 0x33, 0x39, 0x4b, 0x4c, 0x56, 0x61, 0x78, 0x7a,
|
||||
0x41, 0x50, 0x68, 0x73);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETGMA, 0x00, 0x07, 0x10, 0x17, 0x1c, 0x33,
|
||||
0x48, 0x50, 0x57, 0x50, 0x68, 0x6e, 0x71, 0x7f, 0x81, 0x8a, 0x8e, 0x9b,
|
||||
0x9c, 0x4d, 0x56, 0x5d, 0x73, 0x00, 0x07, 0x10, 0x17, 0x1c, 0x33, 0x48,
|
||||
0x50, 0x57, 0x50, 0x68, 0x6e, 0x71, 0x7f, 0x81, 0x8a, 0x8e, 0x9b, 0x9c,
|
||||
0x4d, 0x56, 0x5d, 0x73);
|
||||
mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83102_SETTP1, 0x07, 0x10, 0x10, 0x1a, 0x26, 0x9e,
|
||||
0x00, 0x4f, 0xa0, 0x14, 0x14, 0x00, 0x00, 0x00, 0x00, 0x12, 0x0a, 0x02,
|
||||
0x02, 0x00, 0x33, 0x02, 0x04, 0x18, 0x01);
|
||||
|
@ -43,7 +43,7 @@ static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
|
||||
struct drm_encoder *clone_encoder;
|
||||
uint32_t index_mask = 0;
|
||||
uint32_t index_mask = drm_encoder_mask(encoder);
|
||||
int count;
|
||||
|
||||
/* DIG routing gets problematic */
|
||||
|
@ -635,10 +635,8 @@ out:
|
||||
kunmap_atomic(d.src_addr);
|
||||
if (d.dst_addr)
|
||||
kunmap_atomic(d.dst_addr);
|
||||
if (src_pages)
|
||||
kvfree(src_pages);
|
||||
if (dst_pages)
|
||||
kvfree(dst_pages);
|
||||
kvfree(src_pages);
|
||||
kvfree(dst_pages);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -62,7 +62,7 @@
|
||||
#define VMWGFX_DRIVER_MINOR 20
|
||||
#define VMWGFX_DRIVER_PATCHLEVEL 0
|
||||
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
|
||||
#define VMWGFX_MAX_DISPLAYS 16
|
||||
#define VMWGFX_NUM_DISPLAY_UNITS 8
|
||||
#define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768
|
||||
|
||||
#define VMWGFX_MIN_INITIAL_WIDTH 1280
|
||||
@ -82,7 +82,7 @@
|
||||
#define VMWGFX_NUM_GB_CONTEXT 256
|
||||
#define VMWGFX_NUM_GB_SHADER 20000
|
||||
#define VMWGFX_NUM_GB_SURFACE 32768
|
||||
#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS
|
||||
#define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_NUM_DISPLAY_UNITS
|
||||
#define VMWGFX_NUM_DXCONTEXT 256
|
||||
#define VMWGFX_NUM_DXQUERY 512
|
||||
#define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\
|
||||
|
@ -1283,7 +1283,6 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
|
||||
{
|
||||
struct drm_device *dev = &dev_priv->drm;
|
||||
struct vmw_framebuffer_surface *vfbs;
|
||||
enum SVGA3dSurfaceFormat format;
|
||||
struct vmw_surface *surface;
|
||||
int ret;
|
||||
|
||||
@ -1320,34 +1319,6 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (mode_cmd->pixel_format) {
|
||||
case DRM_FORMAT_ARGB8888:
|
||||
format = SVGA3D_A8R8G8B8;
|
||||
break;
|
||||
case DRM_FORMAT_XRGB8888:
|
||||
format = SVGA3D_X8R8G8B8;
|
||||
break;
|
||||
case DRM_FORMAT_RGB565:
|
||||
format = SVGA3D_R5G6B5;
|
||||
break;
|
||||
case DRM_FORMAT_XRGB1555:
|
||||
format = SVGA3D_A1R5G5B5;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Invalid pixel format: %p4cc\n",
|
||||
&mode_cmd->pixel_format);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* For DX, surface format validation is done when surface->scanout
|
||||
* is set.
|
||||
*/
|
||||
if (!has_sm4_context(dev_priv) && format != surface->metadata.format) {
|
||||
DRM_ERROR("Invalid surface format for requested mode.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
|
||||
if (!vfbs) {
|
||||
ret = -ENOMEM;
|
||||
@ -1539,6 +1510,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
|
||||
DRM_ERROR("Surface size cannot exceed %dx%d\n",
|
||||
dev_priv->texture_max_width,
|
||||
dev_priv->texture_max_height);
|
||||
ret = -EINVAL;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
@ -2225,7 +2197,7 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_mode_config *mode_config = &dev->mode_config;
|
||||
struct drm_vmw_update_layout_arg *arg =
|
||||
(struct drm_vmw_update_layout_arg *)data;
|
||||
void __user *user_rects;
|
||||
const void __user *user_rects;
|
||||
struct drm_vmw_rect *rects;
|
||||
struct drm_rect *drm_rects;
|
||||
unsigned rects_size;
|
||||
@ -2237,6 +2209,8 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
|
||||
VMWGFX_MIN_INITIAL_HEIGHT};
|
||||
vmw_du_update_layout(dev_priv, 1, &def_rect);
|
||||
return 0;
|
||||
} else if (arg->num_outputs > VMWGFX_NUM_DISPLAY_UNITS) {
|
||||
return -E2BIG;
|
||||
}
|
||||
|
||||
rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
|
||||
|
@ -199,9 +199,6 @@ struct vmw_kms_dirty {
|
||||
s32 unit_y2;
|
||||
};
|
||||
|
||||
#define VMWGFX_NUM_DISPLAY_UNITS 8
|
||||
|
||||
|
||||
#define vmw_framebuffer_to_vfb(x) \
|
||||
container_of(x, struct vmw_framebuffer, base)
|
||||
#define vmw_framebuffer_to_vfbs(x) \
|
||||
|
@ -886,6 +886,10 @@ static int vmw_stdu_connector_atomic_check(struct drm_connector *conn,
|
||||
struct drm_crtc_state *new_crtc_state;
|
||||
|
||||
conn_state = drm_atomic_get_connector_state(state, conn);
|
||||
|
||||
if (IS_ERR(conn_state))
|
||||
return PTR_ERR(conn_state);
|
||||
|
||||
du = vmw_connector_to_stdu(conn);
|
||||
|
||||
if (!conn_state->crtc)
|
||||
|
@ -2276,9 +2276,12 @@ int vmw_dumb_create(struct drm_file *file_priv,
|
||||
const struct SVGA3dSurfaceDesc *desc = vmw_surface_get_desc(format);
|
||||
SVGA3dSurfaceAllFlags flags = SVGA3D_SURFACE_HINT_TEXTURE |
|
||||
SVGA3D_SURFACE_HINT_RENDERTARGET |
|
||||
SVGA3D_SURFACE_SCREENTARGET |
|
||||
SVGA3D_SURFACE_BIND_SHADER_RESOURCE |
|
||||
SVGA3D_SURFACE_BIND_RENDER_TARGET;
|
||||
SVGA3D_SURFACE_SCREENTARGET;
|
||||
|
||||
if (vmw_surface_is_dx_screen_target_format(format)) {
|
||||
flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE |
|
||||
SVGA3D_SURFACE_BIND_RENDER_TARGET;
|
||||
}
|
||||
|
||||
/*
|
||||
* Without mob support we're just going to use raw memory buffer
|
||||
|
@ -393,9 +393,6 @@
|
||||
|
||||
#define XE2_GLOBAL_INVAL XE_REG(0xb404)
|
||||
|
||||
#define SCRATCH1LPFC XE_REG(0xb474)
|
||||
#define EN_L3_RW_CCS_CACHE_FLUSH REG_BIT(0)
|
||||
|
||||
#define XE2LPM_L3SQCREG2 XE_REG_MCR(0xb604)
|
||||
|
||||
#define XE2LPM_L3SQCREG3 XE_REG_MCR(0xb608)
|
||||
|
@ -980,13 +980,13 @@ void xe_device_declare_wedged(struct xe_device *xe)
|
||||
return;
|
||||
}
|
||||
|
||||
xe_pm_runtime_get_noresume(xe);
|
||||
|
||||
if (drmm_add_action_or_reset(&xe->drm, xe_device_wedged_fini, xe)) {
|
||||
drm_err(&xe->drm, "Failed to register xe_device_wedged_fini clean-up. Although device is wedged.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
xe_pm_runtime_get_noresume(xe);
|
||||
|
||||
if (!atomic_xchg(&xe->wedged.flag, 1)) {
|
||||
xe->needs_flr_on_fini = true;
|
||||
drm_err(&xe->drm,
|
||||
|
@ -41,11 +41,6 @@
|
||||
* user knows an exec writes to a BO and reads from the BO in the next exec, it
|
||||
* is the user's responsibility to pass in / out fence between the two execs).
|
||||
*
|
||||
* Implicit dependencies for external BOs are handled by using the dma-buf
|
||||
* implicit dependency uAPI (TODO: add link). To make this works each exec must
|
||||
* install the job's fence into the DMA_RESV_USAGE_WRITE slot of every external
|
||||
* BO mapped in the VM.
|
||||
*
|
||||
* We do not allow a user to trigger a bind at exec time rather we have a VM
|
||||
* bind IOCTL which uses the same in / out fence interface as exec. In that
|
||||
* sense, a VM bind is basically the same operation as an exec from the user
|
||||
@ -59,8 +54,8 @@
|
||||
* behind any pending kernel operations on any external BOs in VM or any BOs
|
||||
* private to the VM. This is accomplished by the rebinds waiting on BOs
|
||||
* DMA_RESV_USAGE_KERNEL slot (kernel ops) and kernel ops waiting on all BOs
|
||||
* slots (inflight execs are in the DMA_RESV_USAGE_BOOKING for private BOs and
|
||||
* in DMA_RESV_USAGE_WRITE for external BOs).
|
||||
* slots (inflight execs are in the DMA_RESV_USAGE_BOOKKEEP for private BOs and
|
||||
* for external BOs).
|
||||
*
|
||||
* Rebinds / dma-resv usage applies to non-compute mode VMs only as for compute
|
||||
* mode VMs we use preempt fences and a rebind worker (TODO: add link).
|
||||
@ -304,7 +299,8 @@ retry:
|
||||
xe_sched_job_arm(job);
|
||||
if (!xe_vm_in_lr_mode(vm))
|
||||
drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, &job->drm.s_fence->finished,
|
||||
DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_WRITE);
|
||||
DMA_RESV_USAGE_BOOKKEEP,
|
||||
DMA_RESV_USAGE_BOOKKEEP);
|
||||
|
||||
for (i = 0; i < num_syncs; i++) {
|
||||
xe_sync_entry_signal(&syncs[i], &job->drm.s_fence->finished);
|
||||
|
@ -63,7 +63,9 @@ xe_sched_invalidate_job(struct xe_sched_job *job, int threshold)
|
||||
static inline void xe_sched_add_pending_job(struct xe_gpu_scheduler *sched,
|
||||
struct xe_sched_job *job)
|
||||
{
|
||||
spin_lock(&sched->base.job_list_lock);
|
||||
list_add(&job->drm.list, &sched->base.pending_list);
|
||||
spin_unlock(&sched->base.job_list_lock);
|
||||
}
|
||||
|
||||
static inline
|
||||
|
@ -108,7 +108,6 @@ static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
|
||||
return;
|
||||
|
||||
if (!xe_gt_is_media_type(gt)) {
|
||||
xe_mmio_write32(gt, SCRATCH1LPFC, EN_L3_RW_CCS_CACHE_FLUSH);
|
||||
reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL);
|
||||
reg |= CG_DIS_CNTLBUS;
|
||||
xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg);
|
||||
|
@ -37,6 +37,15 @@ static long tlb_timeout_jiffies(struct xe_gt *gt)
|
||||
return hw_tlb_timeout + 2 * delay;
|
||||
}
|
||||
|
||||
static void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence)
|
||||
{
|
||||
if (WARN_ON_ONCE(!fence->gt))
|
||||
return;
|
||||
|
||||
xe_pm_runtime_put(gt_to_xe(fence->gt));
|
||||
fence->gt = NULL; /* fini() should be called once */
|
||||
}
|
||||
|
||||
static void
|
||||
__invalidation_fence_signal(struct xe_device *xe, struct xe_gt_tlb_invalidation_fence *fence)
|
||||
{
|
||||
@ -204,7 +213,7 @@ static int send_tlb_invalidation(struct xe_guc *guc,
|
||||
tlb_timeout_jiffies(gt));
|
||||
}
|
||||
spin_unlock_irq(>->tlb_invalidation.pending_lock);
|
||||
} else if (ret < 0) {
|
||||
} else {
|
||||
__invalidation_fence_signal(xe, fence);
|
||||
}
|
||||
if (!ret) {
|
||||
@ -267,10 +276,8 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt)
|
||||
|
||||
xe_gt_tlb_invalidation_fence_init(gt, &fence, true);
|
||||
ret = xe_gt_tlb_invalidation_guc(gt, &fence);
|
||||
if (ret < 0) {
|
||||
xe_gt_tlb_invalidation_fence_fini(&fence);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
xe_gt_tlb_invalidation_fence_wait(&fence);
|
||||
} else if (xe_device_uc_enabled(xe) && !xe_device_wedged(xe)) {
|
||||
@ -496,7 +503,8 @@ static const struct dma_fence_ops invalidation_fence_ops = {
|
||||
* @stack: fence is stack variable
|
||||
*
|
||||
* Initialize TLB invalidation fence for use. xe_gt_tlb_invalidation_fence_fini
|
||||
* must be called if fence is not signaled.
|
||||
* will be automatically called when fence is signalled (all fences must signal),
|
||||
* even on error.
|
||||
*/
|
||||
void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
|
||||
struct xe_gt_tlb_invalidation_fence *fence,
|
||||
@ -516,14 +524,3 @@ void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
|
||||
dma_fence_get(&fence->base);
|
||||
fence->gt = gt;
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_gt_tlb_invalidation_fence_fini - Finalize TLB invalidation fence
|
||||
* @fence: TLB invalidation fence to finalize
|
||||
*
|
||||
* Drop PM ref which fence took durinig init.
|
||||
*/
|
||||
void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence)
|
||||
{
|
||||
xe_pm_runtime_put(gt_to_xe(fence->gt));
|
||||
}
|
||||
|
@ -28,7 +28,6 @@ int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len);
|
||||
void xe_gt_tlb_invalidation_fence_init(struct xe_gt *gt,
|
||||
struct xe_gt_tlb_invalidation_fence *fence,
|
||||
bool stack);
|
||||
void xe_gt_tlb_invalidation_fence_fini(struct xe_gt_tlb_invalidation_fence *fence);
|
||||
|
||||
static inline void
|
||||
xe_gt_tlb_invalidation_fence_wait(struct xe_gt_tlb_invalidation_fence *fence)
|
||||
|
@ -1030,10 +1030,13 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job)
|
||||
|
||||
/*
|
||||
* TDR has fired before free job worker. Common if exec queue
|
||||
* immediately closed after last fence signaled.
|
||||
* immediately closed after last fence signaled. Add back to pending
|
||||
* list so job can be freed and kick scheduler ensuring free job is not
|
||||
* lost.
|
||||
*/
|
||||
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) {
|
||||
guc_exec_queue_free_job(drm_job);
|
||||
xe_sched_add_pending_job(sched, job);
|
||||
xe_sched_submission_start(sched);
|
||||
|
||||
return DRM_GPU_SCHED_STAT_NOMINAL;
|
||||
}
|
||||
|
@ -161,7 +161,11 @@ query_engine_cycles(struct xe_device *xe,
|
||||
cpu_clock);
|
||||
|
||||
xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL);
|
||||
resp.width = 36;
|
||||
|
||||
if (GRAPHICS_VER(xe) >= 20)
|
||||
resp.width = 64;
|
||||
else
|
||||
resp.width = 36;
|
||||
|
||||
/* Only write to the output fields of user query */
|
||||
if (put_user(resp.cpu_timestamp, &query_ptr->cpu_timestamp))
|
||||
|
@ -58,7 +58,7 @@ static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr,
|
||||
if (!access_ok(ptr, sizeof(*ptr)))
|
||||
return ERR_PTR(-EFAULT);
|
||||
|
||||
ufence = kmalloc(sizeof(*ufence), GFP_KERNEL);
|
||||
ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
|
||||
if (!ufence)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
|
@ -3199,10 +3199,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
|
||||
|
||||
ret = xe_gt_tlb_invalidation_vma(tile->primary_gt,
|
||||
&fence[fence_id], vma);
|
||||
if (ret < 0) {
|
||||
xe_gt_tlb_invalidation_fence_fini(&fence[fence_id]);
|
||||
if (ret)
|
||||
goto wait;
|
||||
}
|
||||
++fence_id;
|
||||
|
||||
if (!tile->media_gt)
|
||||
@ -3214,10 +3212,8 @@ int xe_vm_invalidate_vma(struct xe_vma *vma)
|
||||
|
||||
ret = xe_gt_tlb_invalidation_vma(tile->media_gt,
|
||||
&fence[fence_id], vma);
|
||||
if (ret < 0) {
|
||||
xe_gt_tlb_invalidation_fence_fini(&fence[fence_id]);
|
||||
if (ret)
|
||||
goto wait;
|
||||
}
|
||||
++fence_id;
|
||||
}
|
||||
}
|
||||
|
@ -710,6 +710,10 @@ static const struct xe_rtp_entry_sr lrc_was[] = {
|
||||
DIS_PARTIAL_AUTOSTRIP |
|
||||
DIS_AUTOSTRIP))
|
||||
},
|
||||
{ XE_RTP_NAME("15016589081"),
|
||||
XE_RTP_RULES(GRAPHICS_VERSION(2004), ENGINE_CLASS(RENDER)),
|
||||
XE_RTP_ACTIONS(SET(CHICKEN_RASTER_1, DIS_CLIP_NEGATIVE_BOUNDING_BOX))
|
||||
},
|
||||
|
||||
/* Xe2_HPG */
|
||||
{ XE_RTP_NAME("15010599737"),
|
||||
|
@ -169,9 +169,6 @@ int xe_wait_user_fence_ioctl(struct drm_device *dev, void *data,
|
||||
args->timeout = 0;
|
||||
}
|
||||
|
||||
if (!timeout && !(err < 0))
|
||||
err = -ETIME;
|
||||
|
||||
if (q)
|
||||
xe_exec_queue_put(q);
|
||||
|
||||
|
@ -58,6 +58,7 @@ int host1x_memory_context_list_init(struct host1x *host1x)
|
||||
ctx->dev.parent = host1x->dev;
|
||||
ctx->dev.release = host1x_memory_context_release;
|
||||
|
||||
ctx->dev.dma_parms = &ctx->dma_parms;
|
||||
dma_set_max_seg_size(&ctx->dev, UINT_MAX);
|
||||
|
||||
err = device_add(&ctx->dev);
|
||||
|
@ -625,12 +625,6 @@ static int host1x_probe(struct platform_device *pdev)
|
||||
goto free_contexts;
|
||||
}
|
||||
|
||||
err = host1x_intr_init(host);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "failed to initialize interrupts\n");
|
||||
goto deinit_syncpt;
|
||||
}
|
||||
|
||||
pm_runtime_enable(&pdev->dev);
|
||||
|
||||
err = devm_tegra_core_dev_init_opp_table_common(&pdev->dev);
|
||||
@ -642,6 +636,12 @@ static int host1x_probe(struct platform_device *pdev)
|
||||
if (err)
|
||||
goto pm_disable;
|
||||
|
||||
err = host1x_intr_init(host);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev, "failed to initialize interrupts\n");
|
||||
goto pm_put;
|
||||
}
|
||||
|
||||
host1x_debug_init(host);
|
||||
|
||||
err = host1x_register(host);
|
||||
@ -658,13 +658,11 @@ unregister:
|
||||
host1x_unregister(host);
|
||||
deinit_debugfs:
|
||||
host1x_debug_deinit(host);
|
||||
|
||||
host1x_intr_deinit(host);
|
||||
pm_put:
|
||||
pm_runtime_put_sync_suspend(&pdev->dev);
|
||||
pm_disable:
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
host1x_intr_deinit(host);
|
||||
deinit_syncpt:
|
||||
host1x_syncpt_deinit(host);
|
||||
free_contexts:
|
||||
host1x_memory_context_list_free(&host->context_list);
|
||||
|
@ -466,6 +466,7 @@ struct host1x_memory_context {
|
||||
refcount_t ref;
|
||||
struct pid *owner;
|
||||
|
||||
struct device_dma_parameters dma_parms;
|
||||
struct device dev;
|
||||
u64 dma_mask;
|
||||
u32 stream_id;
|
||||
|
Loading…
Reference in New Issue
Block a user