Driver Changes:
- Increase invalidation timeout to avoid errors in some hosts (Shuicheng) - Flush worker on timeout (Badal) - Better handling for force wake failure (Shuicheng) - Improve argument check on user fence creation (Nirmoy) - Don't restart parallel queues multiple times on GT reset (Nirmoy) -----BEGIN PGP SIGNATURE----- iQJNBAABCAA3FiEE6rM8lpABPHM5FqyDm6KlpjDL6lMFAmcavkQZHGx1Y2FzLmRl bWFyY2hpQGludGVsLmNvbQAKCRCboqWmMMvqU99uD/0fiq9PPSGZQlQTiMYYWR80 EZ+u1xESm+VqgFJJBDcRiKRB3TIIISeH1IOse0JzRY5kVxPU8jqA1HFkuDjvVCMI cHc5T3WTsUQUhUIyMBTex+MEYBNmaKF0a+qfOA+Uh86v9Xt5LWVyViPXM2qY0T0E skGFulZlhqGRIb4EEA8pdDwmJeFcHxVRK9Xkc8kn9kzfgv4sxHCvbCE6pNNzR1w2 ofQ85omyQWguNH5cgj5koI6LQqoKYVztBIi3rNTgYsl+FKy8BNY5KX2+C5XMNdv1 /GHSj+jLaUGFZ3WdgWkRNtBFVtX0bLSTpQd3y85rENOwOnR3mr6VTWGIBUaB4fKS /xtqC4KJptpAT4MEDlYEQ4P6/YkG5PQZ8UYzdDJfWQtNkN4G5evYIUtZQ+h8mcyh mgs8+nbQbbKsHoqVKLTGdoV+1lpQRVPeWiezPTxNS1zuEQ+CwwKAjpAi1wImJDVz gu4JqHbr16oMY1if2SD9On7eYcqDXSUs2vR6JShtwbgpBUN8/UmH9RYQxkpIU3q2 bNLCIxGAgC+G1cdGIIp9kRLMu8QmfSzf653KLuUAUogEXOcV2xwTncZhSnxx0XD7 S9vR7/9alS65zEro0KQfwSJbBb3izXy00JfCUyquIAioi2nXLbaJ0imhBn3gBlma +1t2XGyhib3poqsiDpZY2w== =+1gt -----END PGP SIGNATURE----- Merge tag 'drm-xe-fixes-2024-10-24-1' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-fixes Driver Changes: - Increase invalidation timeout to avoid errors in some hosts (Shuicheng) - Flush worker on timeout (Badal) - Better handling for force wake failure (Shuicheng) - Improve argument check on user fence creation (Nirmoy) - Don't restart parallel queues multiple times on GT reset (Nirmoy) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Lucas De Marchi <lucas.demarchi@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/trlkoiewtc4x2cyhsxmj3atayyq4zwto4iryea5pvya2ymc3yp@fdx5nhwmiyem
This commit is contained in:
commit
4d95a12beb
@ -890,7 +890,7 @@ void xe_device_l2_flush(struct xe_device *xe)
|
||||
spin_lock(>->global_invl_lock);
|
||||
xe_mmio_write32(gt, XE2_GLOBAL_INVAL, 0x1);
|
||||
|
||||
if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 150, NULL, true))
|
||||
if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true))
|
||||
xe_gt_err_once(gt, "Global invalidation timeout\n");
|
||||
spin_unlock(>->global_invl_lock);
|
||||
|
||||
|
@ -115,9 +115,15 @@ static int __domain_wait(struct xe_gt *gt, struct xe_force_wake_domain *domain,
|
||||
XE_FORCE_WAKE_ACK_TIMEOUT_MS * USEC_PER_MSEC,
|
||||
&value, true);
|
||||
if (ret)
|
||||
xe_gt_notice(gt, "Force wake domain %d failed to ack %s (%pe) reg[%#x] = %#x\n",
|
||||
domain->id, str_wake_sleep(wake), ERR_PTR(ret),
|
||||
domain->reg_ack.addr, value);
|
||||
xe_gt_err(gt, "Force wake domain %d failed to ack %s (%pe) reg[%#x] = %#x\n",
|
||||
domain->id, str_wake_sleep(wake), ERR_PTR(ret),
|
||||
domain->reg_ack.addr, value);
|
||||
if (value == ~0) {
|
||||
xe_gt_err(gt,
|
||||
"Force wake domain %d: %s. MMIO unreliable (forcewake register returns 0xFFFFFFFF)!\n",
|
||||
domain->id, str_wake_sleep(wake));
|
||||
ret = -EIO;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -897,6 +897,24 @@ retry_same_fence:
|
||||
|
||||
ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
|
||||
|
||||
/*
|
||||
* Occasionally it is seen that the G2H worker starts running after a delay of more than
|
||||
* a second even after being queued and activated by the Linux workqueue subsystem. This
|
||||
* leads to G2H timeout error. The root cause of issue lies with scheduling latency of
|
||||
* Lunarlake Hybrid CPU. Issue dissappears if we disable Lunarlake atom cores from BIOS
|
||||
* and this is beyond xe kmd.
|
||||
*
|
||||
* TODO: Drop this change once workqueue scheduling delay issue is fixed on LNL Hybrid CPU.
|
||||
*/
|
||||
if (!ret) {
|
||||
flush_work(&ct->g2h_worker);
|
||||
if (g2h_fence.done) {
|
||||
xe_gt_warn(gt, "G2H fence %u, action %04x, done\n",
|
||||
g2h_fence.seqno, action[0]);
|
||||
ret = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure we serialize with completion side to prevent UAF with fence going out of scope on
|
||||
* the stack, since we have no clue if it will fire after the timeout before we can erase
|
||||
|
@ -1726,8 +1726,13 @@ void xe_guc_submit_stop(struct xe_guc *guc)
|
||||
|
||||
mutex_lock(&guc->submission_state.lock);
|
||||
|
||||
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
|
||||
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
|
||||
/* Prevent redundant attempts to stop parallel queues */
|
||||
if (q->guc->id != index)
|
||||
continue;
|
||||
|
||||
guc_exec_queue_stop(guc, q);
|
||||
}
|
||||
|
||||
mutex_unlock(&guc->submission_state.lock);
|
||||
|
||||
@ -1765,8 +1770,13 @@ int xe_guc_submit_start(struct xe_guc *guc)
|
||||
|
||||
mutex_lock(&guc->submission_state.lock);
|
||||
atomic_dec(&guc->submission_state.stopped);
|
||||
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q)
|
||||
xa_for_each(&guc->submission_state.exec_queue_lookup, index, q) {
|
||||
/* Prevent redundant attempts to start parallel queues */
|
||||
if (q->guc->id != index)
|
||||
continue;
|
||||
|
||||
guc_exec_queue_start(q);
|
||||
}
|
||||
mutex_unlock(&guc->submission_state.lock);
|
||||
|
||||
wake_up_all(&guc->ct.wq);
|
||||
|
@ -54,8 +54,9 @@ static struct xe_user_fence *user_fence_create(struct xe_device *xe, u64 addr,
|
||||
{
|
||||
struct xe_user_fence *ufence;
|
||||
u64 __user *ptr = u64_to_user_ptr(addr);
|
||||
u64 __maybe_unused prefetch_val;
|
||||
|
||||
if (!access_ok(ptr, sizeof(*ptr)))
|
||||
if (get_user(prefetch_val, ptr))
|
||||
return ERR_PTR(-EFAULT);
|
||||
|
||||
ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
|
||||
|
Loading…
Reference in New Issue
Block a user