drm/xe: Clean up VM / exec queue file lock usage.
Both the VM / exec queue file lock protect the lookup and reference to the object, nothing more. These locks are not intended anything else underneath them. XA have their own locking too, so no need to take the VM / exec queue file lock aside from when doing a lookup and reference get. Add some kernel doc to make this clear and cleanup a few typos too. Signed-off-by: Matthew Brost <matthew.brost@intel.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20240921011712.2681510-1-matthew.brost@intel.com (cherry picked from commit fe4f5d4b661666a45b48fe7f95443f8fefc09c8c) Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
This commit is contained in:
parent
1b30f87e08
commit
9e3c85ddea
@ -171,10 +171,8 @@ static void xe_file_close(struct drm_device *dev, struct drm_file *file)
|
||||
xe_exec_queue_kill(q);
|
||||
xe_exec_queue_put(q);
|
||||
}
|
||||
mutex_lock(&xef->vm.lock);
|
||||
xa_for_each(&xef->vm.xa, idx, vm)
|
||||
xe_vm_close_and_put(vm);
|
||||
mutex_unlock(&xef->vm.lock);
|
||||
|
||||
xe_file_put(xef);
|
||||
|
||||
|
@ -570,15 +570,23 @@ struct xe_file {
|
||||
struct {
|
||||
/** @vm.xe: xarray to store VMs */
|
||||
struct xarray xa;
|
||||
/** @vm.lock: protects file VM state */
|
||||
/**
|
||||
* @vm.lock: Protects VM lookup + reference and removal a from
|
||||
* file xarray. Not an intended to be an outer lock which does
|
||||
* thing while being held.
|
||||
*/
|
||||
struct mutex lock;
|
||||
} vm;
|
||||
|
||||
/** @exec_queue: Submission exec queue state for file */
|
||||
struct {
|
||||
/** @exec_queue.xe: xarray to store engines */
|
||||
/** @exec_queue.xa: xarray to store exece queues */
|
||||
struct xarray xa;
|
||||
/** @exec_queue.lock: protects file engine state */
|
||||
/**
|
||||
* @exec_queue.lock: Protects exec queue lookup + reference and
|
||||
* removal a frommfile xarray. Not an intended to be an outer
|
||||
* lock which does thing while being held.
|
||||
*/
|
||||
struct mutex lock;
|
||||
} exec_queue;
|
||||
|
||||
|
@ -283,8 +283,15 @@ static void show_run_ticks(struct drm_printer *p, struct drm_file *file)
|
||||
|
||||
/* Accumulate all the exec queues from this client */
|
||||
mutex_lock(&xef->exec_queue.lock);
|
||||
xa_for_each(&xef->exec_queue.xa, i, q)
|
||||
xa_for_each(&xef->exec_queue.xa, i, q) {
|
||||
xe_exec_queue_get(q);
|
||||
mutex_unlock(&xef->exec_queue.lock);
|
||||
|
||||
xe_exec_queue_update_run_ticks(q);
|
||||
|
||||
mutex_lock(&xef->exec_queue.lock);
|
||||
xe_exec_queue_put(q);
|
||||
}
|
||||
mutex_unlock(&xef->exec_queue.lock);
|
||||
|
||||
/* Get the total GPU cycles */
|
||||
|
@ -635,9 +635,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
|
||||
}
|
||||
}
|
||||
|
||||
mutex_lock(&xef->exec_queue.lock);
|
||||
err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL);
|
||||
mutex_unlock(&xef->exec_queue.lock);
|
||||
if (err)
|
||||
goto kill_exec_queue;
|
||||
|
||||
|
@ -1765,9 +1765,7 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
|
||||
if (IS_ERR(vm))
|
||||
return PTR_ERR(vm);
|
||||
|
||||
mutex_lock(&xef->vm.lock);
|
||||
err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
|
||||
mutex_unlock(&xef->vm.lock);
|
||||
if (err)
|
||||
goto err_close_and_put;
|
||||
|
||||
@ -1799,9 +1797,7 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
|
||||
return 0;
|
||||
|
||||
err_free_id:
|
||||
mutex_lock(&xef->vm.lock);
|
||||
xa_erase(&xef->vm.xa, id);
|
||||
mutex_unlock(&xef->vm.lock);
|
||||
err_close_and_put:
|
||||
xe_vm_close_and_put(vm);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user