1

drm/xe: Build PM into GuC CT layer

Take PM ref when any G2H are outstanding, drop when none are
outstanding.

To safely ensure we have PM ref when in the GuC CT layer, a PM ref needs
to be held when scheduler messages are pending too.

v2:
 - Add outer PM protections to xe_file_close (CI)
v3:
 - Only take PM ref 0->1 and drop on 1->0 (Matthew Auld)
v4:
 - Add assert to G2H increment function
v5:
 - Rebase
v6:
 - Declare xe as local variable in xe_file_close (CI)

Fixes: dd08ebf6c3 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: Nirmoy Das <nirmoy.das@intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: Nirmoy Das <nirmoy.das@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240719172905.1527927-5-matthew.brost@intel.com
(cherry picked from commit d930c19fdf)
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
This commit is contained in:
Matthew Brost 2024-07-19 10:29:05 -07:00 committed by Rodrigo Vivi
parent 64da63cd3f
commit 55ea73aacf
No known key found for this signature in database
GPG Key ID: FA625F640EEB13CA
3 changed files with 18 additions and 1 deletions

View File

@ -138,11 +138,14 @@ void xe_file_put(struct xe_file *xef)
static void xe_file_close(struct drm_device *dev, struct drm_file *file) static void xe_file_close(struct drm_device *dev, struct drm_file *file)
{ {
struct xe_device *xe = to_xe_device(dev);
struct xe_file *xef = file->driver_priv; struct xe_file *xef = file->driver_priv;
struct xe_vm *vm; struct xe_vm *vm;
struct xe_exec_queue *q; struct xe_exec_queue *q;
unsigned long idx; unsigned long idx;
xe_pm_runtime_get(xe);
/* /*
* No need for exec_queue.lock here as there is no contention for it * No need for exec_queue.lock here as there is no contention for it
* when FD is closing as IOCTLs presumably can't be modifying the * when FD is closing as IOCTLs presumably can't be modifying the
@ -159,6 +162,8 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file)
mutex_unlock(&xef->vm.lock); mutex_unlock(&xef->vm.lock);
xe_file_put(xef); xe_file_put(xef);
xe_pm_runtime_put(xe);
} }
static const struct drm_ioctl_desc xe_ioctls[] = { static const struct drm_ioctl_desc xe_ioctls[] = {

View File

@ -327,6 +327,8 @@ static void xe_guc_ct_set_state(struct xe_guc_ct *ct,
xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding == 0 || xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding == 0 ||
state == XE_GUC_CT_STATE_STOPPED); state == XE_GUC_CT_STATE_STOPPED);
if (ct->g2h_outstanding)
xe_pm_runtime_put(ct_to_xe(ct));
ct->g2h_outstanding = 0; ct->g2h_outstanding = 0;
ct->state = state; ct->state = state;
@ -495,10 +497,15 @@ static void h2g_reserve_space(struct xe_guc_ct *ct, u32 cmd_len)
static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h) static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h)
{ {
xe_gt_assert(ct_to_gt(ct), g2h_len <= ct->ctbs.g2h.info.space); xe_gt_assert(ct_to_gt(ct), g2h_len <= ct->ctbs.g2h.info.space);
xe_gt_assert(ct_to_gt(ct), (!g2h_len && !num_g2h) ||
(g2h_len && num_g2h));
if (g2h_len) { if (g2h_len) {
lockdep_assert_held(&ct->fast_lock); lockdep_assert_held(&ct->fast_lock);
if (!ct->g2h_outstanding)
xe_pm_runtime_get_noresume(ct_to_xe(ct));
ct->ctbs.g2h.info.space -= g2h_len; ct->ctbs.g2h.info.space -= g2h_len;
ct->g2h_outstanding += num_g2h; ct->g2h_outstanding += num_g2h;
} }
@ -511,7 +518,8 @@ static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)
ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space); ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space);
ct->ctbs.g2h.info.space += g2h_len; ct->ctbs.g2h.info.space += g2h_len;
--ct->g2h_outstanding; if (!--ct->g2h_outstanding)
xe_pm_runtime_put(ct_to_xe(ct));
} }
static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len) static void g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len)

View File

@ -1393,6 +1393,8 @@ static void guc_exec_queue_process_msg(struct xe_sched_msg *msg)
default: default:
XE_WARN_ON("Unknown message type"); XE_WARN_ON("Unknown message type");
} }
xe_pm_runtime_put(guc_to_xe(exec_queue_to_guc(msg->private_data)));
} }
static const struct drm_sched_backend_ops drm_sched_ops = { static const struct drm_sched_backend_ops drm_sched_ops = {
@ -1482,6 +1484,8 @@ static void guc_exec_queue_kill(struct xe_exec_queue *q)
static void guc_exec_queue_add_msg(struct xe_exec_queue *q, struct xe_sched_msg *msg, static void guc_exec_queue_add_msg(struct xe_exec_queue *q, struct xe_sched_msg *msg,
u32 opcode) u32 opcode)
{ {
xe_pm_runtime_get_noresume(guc_to_xe(exec_queue_to_guc(q)));
INIT_LIST_HEAD(&msg->link); INIT_LIST_HEAD(&msg->link);
msg->opcode = opcode; msg->opcode = opcode;
msg->private_data = q; msg->private_data = q;