aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
diff options
context:
space:
mode:
authorAlex Deucher <[email protected]>2025-06-16 21:45:05 +0000
committerAlex Deucher <[email protected]>2025-06-30 15:58:22 +0000
commit38b20968f3d8a603a979ac50ff6cf3553e0b3daf (patch)
tree98ef3fcac37571f4ea54a32a324bb8c7ed9e9529 /drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
parentdrm/amdgpu: move guilty handling into ring resets (diff)
downloadkernel-38b20968f3d8a603a979ac50ff6cf3553e0b3daf.tar.gz
kernel-38b20968f3d8a603a979ac50ff6cf3553e0b3daf.zip
drm/amdgpu: move scheduler wqueue handling into callbacks
Move the scheduler wqueue stopping and starting into the ring reset callbacks. On some IPs we have to reset an engine which may have multiple queues. Move the wqueue handling into the backend so we can handle them as needed based on the type of reset available. Reviewed-by: Christian König <[email protected]> Signed-off-by: Alex Deucher <[email protected]>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c17
1 files changed, 4 insertions, 13 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index cf5733d5d26d..7e26a44dcc1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -554,22 +554,16 @@ int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id)
struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id];
struct amdgpu_ring *gfx_ring = &sdma_instance->ring;
struct amdgpu_ring *page_ring = &sdma_instance->page;
- bool gfx_sched_stopped = false, page_sched_stopped = false;
mutex_lock(&sdma_instance->engine_reset_mutex);
/* Stop the scheduler's work queue for the GFX and page rings if they are running.
* This ensures that no new tasks are submitted to the queues while
* the reset is in progress.
*/
- if (!amdgpu_ring_sched_ready(gfx_ring)) {
- drm_sched_wqueue_stop(&gfx_ring->sched);
- gfx_sched_stopped = true;
- }
+ drm_sched_wqueue_stop(&gfx_ring->sched);
- if (adev->sdma.has_page_queue && !amdgpu_ring_sched_ready(page_ring)) {
+ if (adev->sdma.has_page_queue)
drm_sched_wqueue_stop(&page_ring->sched);
- page_sched_stopped = true;
- }
if (sdma_instance->funcs->stop_kernel_queue) {
sdma_instance->funcs->stop_kernel_queue(gfx_ring);
@@ -596,12 +590,9 @@ exit:
* to be submitted to the queues after the reset is complete.
*/
if (!ret) {
- if (gfx_sched_stopped && amdgpu_ring_sched_ready(gfx_ring)) {
- drm_sched_wqueue_start(&gfx_ring->sched);
- }
- if (page_sched_stopped && amdgpu_ring_sched_ready(page_ring)) {
+ drm_sched_wqueue_start(&gfx_ring->sched);
+ if (adev->sdma.has_page_queue)
drm_sched_wqueue_start(&page_ring->sched);
- }
}
mutex_unlock(&sdma_instance->engine_reset_mutex);