aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
diff options
context:
space:
mode:
authorAlex Deucher <[email protected]>2025-04-16 20:59:08 +0000
committerAlex Deucher <[email protected]>2025-04-22 12:51:45 +0000
commitba324ffb25999e14b8488d085ae9916d6f4a6bcf (patch)
treeed6398499f24c6c55eb450ad78ba055f0d26f281 /drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
parentdrm/amd/display: Remove unused *vbios_smu_set_dprefclk (diff)
downloadkernel-ba324ffb25999e14b8488d085ae9916d6f4a6bcf.tar.gz
kernel-ba324ffb25999e14b8488d085ae9916d6f4a6bcf.zip
drm/amdgpu/userq: optimize enforce isolation and s/r
If user queues are disabled for all IPs in the case of suspend and resume and for gfx/compute in the case of enforce isolation, we can return early. Reviewed-by: Prike Liang <[email protected]> Signed-off-by: Alex Deucher <[email protected]>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c18
1 files changed, 18 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
index 59488acd89fa..17bf2d568ae2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userqueue.c
@@ -759,12 +759,16 @@ void amdgpu_userq_mgr_fini(struct amdgpu_userq_mgr *userq_mgr)
int amdgpu_userq_suspend(struct amdgpu_device *adev)
{
+ u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask(adev);
const struct amdgpu_userq_funcs *userq_funcs;
struct amdgpu_usermode_queue *queue;
struct amdgpu_userq_mgr *uqm, *tmp;
int queue_id;
int ret = 0;
+ if (!ip_mask)
+ return 0;
+
mutex_lock(&adev->userq_mutex);
list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
cancel_delayed_work_sync(&uqm->resume_work);
@@ -779,12 +783,16 @@ int amdgpu_userq_suspend(struct amdgpu_device *adev)
int amdgpu_userq_resume(struct amdgpu_device *adev)
{
+ u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask(adev);
const struct amdgpu_userq_funcs *userq_funcs;
struct amdgpu_usermode_queue *queue;
struct amdgpu_userq_mgr *uqm, *tmp;
int queue_id;
int ret = 0;
+ if (!ip_mask)
+ return 0;
+
mutex_lock(&adev->userq_mutex);
list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) {
idr_for_each_entry(&uqm->userq_idr, queue, queue_id) {
@@ -799,12 +807,17 @@ int amdgpu_userq_resume(struct amdgpu_device *adev)
int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
u32 idx)
{
+ u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask(adev);
const struct amdgpu_userq_funcs *userq_funcs;
struct amdgpu_usermode_queue *queue;
struct amdgpu_userq_mgr *uqm, *tmp;
int queue_id;
int ret = 0;
+ /* only need to stop gfx/compute */
+ if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
+ return 0;
+
mutex_lock(&adev->userq_mutex);
if (adev->userq_halt_for_enforce_isolation)
dev_warn(adev->dev, "userq scheduling already stopped!\n");
@@ -827,12 +840,17 @@ int amdgpu_userq_stop_sched_for_enforce_isolation(struct amdgpu_device *adev,
int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev,
u32 idx)
{
+ u32 ip_mask = amdgpu_userqueue_get_supported_ip_mask(adev);
const struct amdgpu_userq_funcs *userq_funcs;
struct amdgpu_usermode_queue *queue;
struct amdgpu_userq_mgr *uqm, *tmp;
int queue_id;
int ret = 0;
+ /* only need to stop gfx/compute */
+ if (!(ip_mask & ((1 << AMDGPU_HW_IP_GFX) | (1 << AMDGPU_HW_IP_COMPUTE))))
+ return 0;
+
mutex_lock(&adev->userq_mutex);
if (!adev->userq_halt_for_enforce_isolation)
dev_warn(adev->dev, "userq scheduling already started!\n");