1

Driver Changes:

- Fix error checking with xa_store() (Matthe Auld)
 - Fix missing freq restore on GSC load error (Vinay)
 - Fix wedged_mode file permission (Matt Roper)
 - Fix use-after-free in ct communication (Matthew Auld)
 -----BEGIN PGP SIGNATURE-----
 
 iQJNBAABCAA3FiEE6rM8lpABPHM5FqyDm6KlpjDL6lMFAmcIlxMZHGx1Y2FzLmRl
 bWFyY2hpQGludGVsLmNvbQAKCRCboqWmMMvqUwwnD/0bJkBEUHey7Q9SK02ZAV9Q
 Cw/Im7wM1CQd8dWfpJqF0mwoEYhgQDsm9crReDCzfhl6QxdT4TdcGs5HfQwfTlMJ
 7rZxmJ0VC5kAMvUq0uXiTvjC5KdmfySCCZ9in6vBBXmDzs9FKrycDjVjnutwaKY0
 TeH/D265RWY0U6Iy/0vs+dvJfmcV79xCwuHN8U6b80jq4RA+2VeSo0gK8MaOB5Rt
 bIkzTgSHFY8I+x9eApW24eIoTMr9svr1uQNeSR0J7ueM/iXCRMvQAqyzFjuNxrgo
 htcsNeDUFlLQF9cbhVgmDgIDrHas31nB8E7bXrBZEoSeV3ZuWKO4V0H6WdVjd4kG
 Dyjl6l36VhalGhsKz2p+eSNbwTtFTB6JMRipA0HXG28PgiClkIWFl17tQn8gjGD7
 eVk11+u165FzHAeGp0KTSrV5RiOG07XEE+sZsnHV+shFaK78Cn5+uzSTbsJNHQu2
 IZdVrUGro0lScdJXrul7qcEzlLxXKmVa8qwM00jwGw+GgNJ5KZQwVx75cA3XM/hh
 Sdhr+zbsx3Qi/yr1vUNnSTGWRIleHRa0bZMOvpmCinv+tQsnqaUBSTTRSVOj/xc4
 5NpD2Sum0Gz8drxeDvFdFpT4nvDHrtDvfrbigzHPABWO0CR14AYFS0UDXZKBNM9B
 R8GzPbS6tLwI9tcsZtYu3Q==
 =dYfY
 -----END PGP SIGNATURE-----

Merge tag 'drm-xe-fixes-2024-10-10' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-fixes

Driver Changes:
- Fix error checking with xa_store() (Matthe Auld)
- Fix missing freq restore on GSC load error (Vinay)
- Fix wedged_mode file permission (Matt Roper)
- Fix use-after-free in ct communication (Matthew Auld)

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Lucas De Marchi <lucas.demarchi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/jri65tmv3bjbhqhxs5smv45nazssxzhtwphojem4uufwtjuliy@gsdhlh6kzsdy
This commit is contained in:
Dave Airlie 2024-10-11 13:54:05 +10:00
commit ac44ff7cec
4 changed files with 33 additions and 26 deletions

View File

@ -187,7 +187,7 @@ void xe_debugfs_register(struct xe_device *xe)
debugfs_create_file("forcewake_all", 0400, root, xe,
&forcewake_all_fops);
debugfs_create_file("wedged_mode", 0400, root, xe,
debugfs_create_file("wedged_mode", 0600, root, xe,
&wedged_mode_fops);
for (mem_type = XE_PL_VRAM0; mem_type <= XE_PL_VRAM1; ++mem_type) {

View File

@ -874,7 +874,9 @@ int xe_gt_sanitize_freq(struct xe_gt *gt)
int ret = 0;
if ((!xe_uc_fw_is_available(&gt->uc.gsc.fw) ||
xe_uc_fw_is_loaded(&gt->uc.gsc.fw)) && XE_WA(gt, 22019338487))
xe_uc_fw_is_loaded(&gt->uc.gsc.fw) ||
xe_uc_fw_is_in_error_state(&gt->uc.gsc.fw)) &&
XE_WA(gt, 22019338487))
ret = xe_guc_pc_restore_stashed_freq(&gt->uc.guc.pc);
return ret;

View File

@ -667,16 +667,12 @@ static int __guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action,
num_g2h = 1;
if (g2h_fence_needs_alloc(g2h_fence)) {
void *ptr;
g2h_fence->seqno = next_ct_seqno(ct, true);
ptr = xa_store(&ct->fence_lookup,
g2h_fence->seqno,
g2h_fence, GFP_ATOMIC);
if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr);
ret = xa_err(xa_store(&ct->fence_lookup,
g2h_fence->seqno, g2h_fence,
GFP_ATOMIC));
if (ret)
goto out;
}
}
seqno = g2h_fence->seqno;
@ -879,14 +875,11 @@ retry:
retry_same_fence:
ret = guc_ct_send(ct, action, len, 0, 0, &g2h_fence);
if (unlikely(ret == -ENOMEM)) {
void *ptr;
/* Retry allocation /w GFP_KERNEL */
ptr = xa_store(&ct->fence_lookup,
g2h_fence.seqno,
&g2h_fence, GFP_KERNEL);
if (IS_ERR(ptr))
return PTR_ERR(ptr);
ret = xa_err(xa_store(&ct->fence_lookup, g2h_fence.seqno,
&g2h_fence, GFP_KERNEL));
if (ret)
return ret;
goto retry_same_fence;
} else if (unlikely(ret)) {
@ -903,16 +896,26 @@ retry_same_fence:
}
ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
/*
* Ensure we serialize with completion side to prevent UAF with fence going out of scope on
* the stack, since we have no clue if it will fire after the timeout before we can erase
* from the xa. Also we have some dependent loads and stores below for which we need the
* correct ordering, and we lack the needed barriers.
*/
mutex_lock(&ct->lock);
if (!ret) {
xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x",
g2h_fence.seqno, action[0]);
xe_gt_err(gt, "Timed out wait for G2H, fence %u, action %04x, done %s",
g2h_fence.seqno, action[0], str_yes_no(g2h_fence.done));
xa_erase_irq(&ct->fence_lookup, g2h_fence.seqno);
mutex_unlock(&ct->lock);
return -ETIME;
}
if (g2h_fence.retry) {
xe_gt_dbg(gt, "H2G action %#x retrying: reason %#x\n",
action[0], g2h_fence.reason);
mutex_unlock(&ct->lock);
goto retry;
}
if (g2h_fence.fail) {
@ -921,7 +924,12 @@ retry_same_fence:
ret = -EIO;
}
return ret > 0 ? response_buffer ? g2h_fence.response_len : g2h_fence.response_data : ret;
if (ret > 0)
ret = response_buffer ? g2h_fence.response_len : g2h_fence.response_data;
mutex_unlock(&ct->lock);
return ret;
}
/**

View File

@ -320,7 +320,6 @@ static void __release_guc_id(struct xe_guc *guc, struct xe_exec_queue *q, u32 xa
static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
{
int ret;
void *ptr;
int i;
/*
@ -340,12 +339,10 @@ static int alloc_guc_id(struct xe_guc *guc, struct xe_exec_queue *q)
q->guc->id = ret;
for (i = 0; i < q->width; ++i) {
ptr = xa_store(&guc->submission_state.exec_queue_lookup,
q->guc->id + i, q, GFP_NOWAIT);
if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr);
ret = xa_err(xa_store(&guc->submission_state.exec_queue_lookup,
q->guc->id + i, q, GFP_NOWAIT));
if (ret)
goto err_release;
}
}
return 0;