1

drm/panthor: Call panthor_sched_post_reset() even if the reset failed

We need to undo what was done in panthor_sched_pre_reset() even if the
reset failed. We just flag all previously running groups as terminated
when that happens to unblock things.

Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
Reviewed-by: Steven Price <steven.price@arm.com>
Reviewed-by: Liviu Dudau <liviu.dudau@arm.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240502183813.1612017-5-boris.brezillon@collabora.com
This commit is contained in:
Boris Brezillon 2024-05-02 20:38:12 +02:00
parent a257e81822
commit 3ce4322b1a
3 changed files with 16 additions and 12 deletions

View File

@ -129,13 +129,8 @@ static void panthor_device_reset_work(struct work_struct *work)
panthor_gpu_l2_power_on(ptdev);
panthor_mmu_post_reset(ptdev);
ret = panthor_fw_post_reset(ptdev);
if (ret)
goto out_dev_exit;
atomic_set(&ptdev->reset.pending, 0);
panthor_sched_post_reset(ptdev);
out_dev_exit:
panthor_sched_post_reset(ptdev, ret != 0);
drm_dev_exit(cookie);
if (ret) {

View File

@ -2733,15 +2733,22 @@ void panthor_sched_pre_reset(struct panthor_device *ptdev)
mutex_unlock(&sched->reset.lock);
}
void panthor_sched_post_reset(struct panthor_device *ptdev)
void panthor_sched_post_reset(struct panthor_device *ptdev, bool reset_failed)
{
struct panthor_scheduler *sched = ptdev->scheduler;
struct panthor_group *group, *group_tmp;
mutex_lock(&sched->reset.lock);
list_for_each_entry_safe(group, group_tmp, &sched->reset.stopped_groups, run_node)
list_for_each_entry_safe(group, group_tmp, &sched->reset.stopped_groups, run_node) {
/* Consider all previously running group as terminated if the
* reset failed.
*/
if (reset_failed)
group->state = PANTHOR_CS_GROUP_TERMINATED;
panthor_group_start(group);
}
/* We're done resetting the GPU, clear the reset.in_progress bit so we can
* kick the scheduler.
@ -2749,9 +2756,11 @@ void panthor_sched_post_reset(struct panthor_device *ptdev)
atomic_set(&sched->reset.in_progress, false);
mutex_unlock(&sched->reset.lock);
sched_queue_delayed_work(sched, tick, 0);
sched_queue_work(sched, sync_upd);
/* No need to queue a tick and update syncs if the reset failed. */
if (!reset_failed) {
sched_queue_delayed_work(sched, tick, 0);
sched_queue_work(sched, sync_upd);
}
}
static void group_sync_upd_work(struct work_struct *work)

View File

@ -40,7 +40,7 @@ void panthor_group_pool_destroy(struct panthor_file *pfile);
int panthor_sched_init(struct panthor_device *ptdev);
void panthor_sched_unplug(struct panthor_device *ptdev);
void panthor_sched_pre_reset(struct panthor_device *ptdev);
void panthor_sched_post_reset(struct panthor_device *ptdev);
void panthor_sched_post_reset(struct panthor_device *ptdev, bool reset_failed);
void panthor_sched_suspend(struct panthor_device *ptdev);
void panthor_sched_resume(struct panthor_device *ptdev);