diff options
| author | Dave Airlie <[email protected]> | 2025-03-09 22:19:25 +0000 |
|---|---|---|
| committer | Dave Airlie <[email protected]> | 2025-03-09 23:04:52 +0000 |
| commit | 236f475d29f8e585a72fb6fac7f8bb4dc4b162b7 (patch) | |
| tree | a5b2d1e4bbb4a8f808aa351f386c432e6cb2102a /drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | |
| parent | Merge tag 'drm-misc-next-2025-03-06' of https://gitlab.freedesktop.org/drm/mi... (diff) | |
| parent | drm/amdkfd: Add support for more per-process flag (diff) | |
| download | kernel-236f475d29f8e585a72fb6fac7f8bb4dc4b162b7.tar.gz kernel-236f475d29f8e585a72fb6fac7f8bb4dc4b162b7.zip | |
Merge tag 'amd-drm-next-6.15-2025-03-07' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amdgpu:
- Fix spelling typos
- RAS updates
- VCN 5.0.1 updates
- SubVP fixes
- DCN 4.0.1 fixes
- MSO DPCD fixes
- DIO encoder refactor
- PCON fixes
- Misc cleanups
- DMCUB fixes
- USB4 DP fixes
- DM cleanups
- Backlight cleanups and fixes
- Support platform backlight curves
- Misc code cleanups
- SMU 14 fixes
- JPEG 4.0.3 reset updates
- SR-IOV fixes
- SVM fixes
- GC 12 DCC fixes
- DC DCE 6.x fix
- Hiberation fix
amdkfd:
- Fix possible NULL pointer in queue validation
- Remove unnecessary CP domain validation
- SDMA queue reset support
- Add per process flags
radeon:
- Fix spelling typos
- RS400 hyperZ fix
UAPI:
- Add KFD per process flags for setting precision
Proposed user space: https://github.com/ROCm/ROCR-Runtime/commit/2a64fa5e06e80e0af36df4ce0c76ae52eeec0a9d
Signed-off-by: Dave Airlie <[email protected]>
From: Alex Deucher <[email protected]>
Link: https://patchwork.freedesktop.org/patch/msgid/[email protected]
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c | 264 |
1 files changed, 153 insertions, 111 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 105e59f6132b..dff1a8859036 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -95,10 +95,10 @@ static const struct amdgpu_hwip_reg_entry vcn_reg_list_2_5[] = { static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev); static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev); static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev); -static int vcn_v2_5_set_powergating_state(struct amdgpu_ip_block *ip_block, - enum amd_powergating_state state); -static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev, - int inst_idx, struct dpg_pause_state *new_state); +static int vcn_v2_5_set_pg_state(struct amdgpu_vcn_inst *vinst, + enum amd_powergating_state state); +static int vcn_v2_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, + struct dpg_pause_state *new_state); static int vcn_v2_5_sriov_start(struct amdgpu_device *adev); static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev); @@ -118,11 +118,13 @@ static int amdgpu_ih_clientid_vcns[] = { static int vcn_v2_5_early_init(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; + int i, r; if (amdgpu_sriov_vf(adev)) { adev->vcn.num_vcn_inst = 2; adev->vcn.harvest_config = 0; - adev->vcn.num_enc_rings = 1; + for (i = 0; i < adev->vcn.num_vcn_inst; i++) + adev->vcn.inst[i].num_enc_rings = 1; } else { u32 harvest; int i; @@ -131,13 +133,12 @@ static int vcn_v2_5_early_init(struct amdgpu_ip_block *ip_block) harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING); if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) adev->vcn.harvest_config |= 1 << i; + adev->vcn.inst[i].num_enc_rings = 2; } if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 | - AMDGPU_VCN_HARVEST_VCN1)) + AMDGPU_VCN_HARVEST_VCN1)) /* both instances are harvested, disable the block */ return -ENOENT; - - adev->vcn.num_enc_rings = 2; } vcn_v2_5_set_dec_ring_funcs(adev); @@ -145,7 +146,15 @@ static int vcn_v2_5_early_init(struct amdgpu_ip_block *ip_block) vcn_v2_5_set_irq_funcs(adev); vcn_v2_5_set_ras_funcs(adev); - return amdgpu_vcn_early_init(adev); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + adev->vcn.inst[i].set_pg_state = vcn_v2_5_set_pg_state; + + r = amdgpu_vcn_early_init(adev, i); + if (r) + return r; + } + + return 0; } /** @@ -164,6 +173,8 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block) struct amdgpu_device *adev = ip_block->adev; for (j = 0; j < adev->vcn.num_vcn_inst; j++) { + volatile struct amdgpu_fw_shared *fw_shared; + if (adev->vcn.harvest_config & (1 << j)) continue; /* VCN DEC TRAP */ @@ -173,7 +184,7 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block) return r; /* VCN ENC TRAP */ - for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + for (i = 0; i < adev->vcn.inst[j].num_enc_rings; ++i) { r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j], i + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[j].irq); if (r) @@ -185,39 +196,33 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block) VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].ras_poison_irq); if (r) return r; - } - - r = amdgpu_vcn_sw_init(adev); - if (r) - return r; - amdgpu_vcn_setup_ucode(adev); + r = amdgpu_vcn_sw_init(adev, j); + if (r) + return r; - r = amdgpu_vcn_resume(adev); - if (r) - return r; + amdgpu_vcn_setup_ucode(adev, j); - for (j = 0; j < adev->vcn.num_vcn_inst; j++) { - volatile struct amdgpu_fw_shared *fw_shared; + r = amdgpu_vcn_resume(adev, j); + if (r) + return r; - if (adev->vcn.harvest_config & (1 << j)) - continue; - adev->vcn.internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET; - adev->vcn.internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET; - adev->vcn.internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET; - adev->vcn.internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET; - adev->vcn.internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET; - adev->vcn.internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET; + adev->vcn.inst[j].internal.context_id = mmUVD_CONTEXT_ID_INTERNAL_OFFSET; + adev->vcn.inst[j].internal.ib_vmid = mmUVD_LMI_RBC_IB_VMID_INTERNAL_OFFSET; + adev->vcn.inst[j].internal.ib_bar_low = mmUVD_LMI_RBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET; + adev->vcn.inst[j].internal.ib_bar_high = mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET; + adev->vcn.inst[j].internal.ib_size = mmUVD_RBC_IB_SIZE_INTERNAL_OFFSET; + adev->vcn.inst[j].internal.gp_scratch8 = mmUVD_GP_SCRATCH8_INTERNAL_OFFSET; - adev->vcn.internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET; + adev->vcn.inst[j].internal.scratch9 = mmUVD_SCRATCH9_INTERNAL_OFFSET; adev->vcn.inst[j].external.scratch9 = SOC15_REG_OFFSET(VCN, j, mmUVD_SCRATCH9); - adev->vcn.internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET; + adev->vcn.inst[j].internal.data0 = mmUVD_GPCOM_VCPU_DATA0_INTERNAL_OFFSET; adev->vcn.inst[j].external.data0 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA0); - adev->vcn.internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET; + adev->vcn.inst[j].internal.data1 = mmUVD_GPCOM_VCPU_DATA1_INTERNAL_OFFSET; adev->vcn.inst[j].external.data1 = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_DATA1); - adev->vcn.internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET; + adev->vcn.inst[j].internal.cmd = mmUVD_GPCOM_VCPU_CMD_INTERNAL_OFFSET; adev->vcn.inst[j].external.cmd = SOC15_REG_OFFSET(VCN, j, mmUVD_GPCOM_VCPU_CMD); - adev->vcn.internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET; + adev->vcn.inst[j].internal.nop = mmUVD_NO_OP_INTERNAL_OFFSET; adev->vcn.inst[j].external.nop = SOC15_REG_OFFSET(VCN, j, mmUVD_NO_OP); ring = &adev->vcn.inst[j].ring_dec; @@ -237,7 +242,7 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + for (i = 0; i < adev->vcn.inst[j].num_enc_rings; ++i) { enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i); ring = &adev->vcn.inst[j].ring_enc[i]; @@ -265,6 +270,9 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block) if (amdgpu_vcnfw_log) amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]); + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) + adev->vcn.inst[j].pause_dpg_mode = vcn_v2_5_pause_dpg_mode; } if (amdgpu_sriov_vf(adev)) { @@ -273,9 +281,6 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block) return r; } - if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) - adev->vcn.pause_dpg_mode = vcn_v2_5_pause_dpg_mode; - r = amdgpu_vcn_ras_sw_init(adev); if (r) return r; @@ -319,15 +324,18 @@ static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block) if (amdgpu_sriov_vf(adev)) amdgpu_virt_free_mm_table(adev); - r = amdgpu_vcn_suspend(adev); - if (r) - return r; - - r = amdgpu_vcn_sw_fini(adev); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + r = amdgpu_vcn_suspend(adev, i); + if (r) + return r; + r = amdgpu_vcn_sw_fini(adev, i); + if (r) + return r; + } kfree(adev->vcn.ip_dump); - return r; + return 0; } /** @@ -366,7 +374,7 @@ static int vcn_v2_5_hw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + for (i = 0; i < adev->vcn.inst[j].num_enc_rings; ++i) { ring = &adev->vcn.inst[j].ring_enc[i]; r = amdgpu_ring_test_helper(ring); if (r) @@ -390,19 +398,21 @@ static int vcn_v2_5_hw_fini(struct amdgpu_ip_block *ip_block) struct amdgpu_device *adev = ip_block->adev; int i; - cancel_delayed_work_sync(&adev->vcn.idle_work); - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i]; + if (adev->vcn.harvest_config & (1 << i)) continue; + cancel_delayed_work_sync(&vinst->idle_work); + if ((adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) || - (adev->vcn.cur_state != AMD_PG_STATE_GATE && + (vinst->cur_state != AMD_PG_STATE_GATE && RREG32_SOC15(VCN, i, mmUVD_STATUS))) - vcn_v2_5_set_powergating_state(ip_block, AMD_PG_STATE_GATE); + vinst->set_pg_state(vinst, AMD_PG_STATE_GATE); if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) - amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0); + amdgpu_irq_put(adev, &vinst->ras_poison_irq, 0); } return 0; @@ -417,15 +427,20 @@ static int vcn_v2_5_hw_fini(struct amdgpu_ip_block *ip_block) */ static int vcn_v2_5_suspend(struct amdgpu_ip_block *ip_block) { - int r; + struct amdgpu_device *adev = ip_block->adev; + int r, i; r = vcn_v2_5_hw_fini(ip_block); if (r) return r; - r = amdgpu_vcn_suspend(ip_block->adev); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + r = amdgpu_vcn_suspend(ip_block->adev, i); + if (r) + return r; + } - return r; + return 0; } /** @@ -437,11 +452,14 @@ static int vcn_v2_5_suspend(struct amdgpu_ip_block *ip_block) */ static int vcn_v2_5_resume(struct amdgpu_ip_block *ip_block) { - int r; + struct amdgpu_device *adev = ip_block->adev; + int r, i; - r = amdgpu_vcn_resume(ip_block->adev); - if (r) - return r; + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + r = amdgpu_vcn_resume(ip_block->adev, i); + if (r) + return r; + } r = vcn_v2_5_hw_init(ip_block); @@ -451,13 +469,14 @@ static int vcn_v2_5_resume(struct amdgpu_ip_block *ip_block) /** * vcn_v2_5_mc_resume - memory controller programming * - * @adev: amdgpu_device pointer - * @i: instance to resume + * @vinst: VCN instance * * Let the VCN memory controller know it's offsets */ -static void vcn_v2_5_mc_resume(struct amdgpu_device *adev, int i) +static void vcn_v2_5_mc_resume(struct amdgpu_vcn_inst *vinst) { + struct amdgpu_device *adev = vinst->adev; + int i = vinst->inst; uint32_t size; uint32_t offset; @@ -510,8 +529,11 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev, int i) AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared))); } -static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) +static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst, + bool indirect) { + struct amdgpu_device *adev = vinst->adev; + int inst_idx = vinst->inst; uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[inst_idx].fw->size + 4); uint32_t offset; @@ -609,13 +631,14 @@ static void vcn_v2_5_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx /** * vcn_v2_5_disable_clock_gating - disable VCN clock gating * - * @adev: amdgpu_device pointer - * @i: instance to disable clockgating on + * @vinst: VCN instance * * Disable clock gating for VCN block */ -static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev, int i) +static void vcn_v2_5_disable_clock_gating(struct amdgpu_vcn_inst *vinst) { + struct amdgpu_device *adev = vinst->adev; + int i = vinst->inst; uint32_t data; if (adev->vcn.harvest_config & (1 << i)) @@ -721,9 +744,11 @@ static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev, int i) WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data); } -static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev, - uint8_t sram_sel, int inst_idx, uint8_t indirect) +static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_vcn_inst *vinst, + uint8_t sram_sel, uint8_t indirect) { + struct amdgpu_device *adev = vinst->adev; + int inst_idx = vinst->inst; uint32_t reg_data = 0; /* enable sw clock gating control */ @@ -772,13 +797,14 @@ static void vcn_v2_5_clock_gating_dpg_mode(struct amdgpu_device *adev, /** * vcn_v2_5_enable_clock_gating - enable VCN clock gating * - * @adev: amdgpu_device pointer - * @i: instance to enable clockgating on + * @vinst: VCN instance * * Enable clock gating for VCN block */ -static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev, int i) +static void vcn_v2_5_enable_clock_gating(struct amdgpu_vcn_inst *vinst) { + struct amdgpu_device *adev = vinst->adev; + int i = vinst->inst; uint32_t data = 0; if (adev->vcn.harvest_config & (1 << i)) @@ -829,9 +855,11 @@ static void vcn_v2_5_enable_clock_gating(struct amdgpu_device *adev, int i) WREG32_SOC15(VCN, i, mmUVD_SUVD_CGC_CTRL, data); } -static void vcn_v2_6_enable_ras(struct amdgpu_device *adev, int inst_idx, +static void vcn_v2_6_enable_ras(struct amdgpu_vcn_inst *vinst, bool indirect) { + struct amdgpu_device *adev = vinst->adev; + int inst_idx = vinst->inst; uint32_t tmp; if (amdgpu_ip_version(adev, UVD_HWIP, 0) != IP_VERSION(2, 6, 0)) @@ -856,8 +884,10 @@ static void vcn_v2_6_enable_ras(struct amdgpu_device *adev, int inst_idx, tmp, 0, indirect); } -static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) +static int vcn_v2_5_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect) { + struct amdgpu_device *adev = vinst->adev; + int inst_idx = vinst->inst; volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; struct amdgpu_ring *ring; uint32_t rb_bufsz, tmp; @@ -875,7 +905,7 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo adev->vcn.inst[inst_idx].dpg_sram_curr_addr = (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr; /* enable clock gating */ - vcn_v2_5_clock_gating_dpg_mode(adev, 0, inst_idx, indirect); + vcn_v2_5_clock_gating_dpg_mode(vinst, 0, indirect); /* enable VCPU clock */ tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); @@ -924,7 +954,7 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect); - vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect); + vcn_v2_5_mc_resume_dpg_mode(vinst, indirect); WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect); @@ -935,7 +965,7 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, 0, mmUVD_LMI_CTRL2), 0, 0, indirect); - vcn_v2_6_enable_ras(adev, inst_idx, indirect); + vcn_v2_6_enable_ras(vinst, indirect); /* unblock VCPU register access */ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( @@ -1000,8 +1030,10 @@ static int vcn_v2_5_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, boo return 0; } -static int vcn_v2_5_start(struct amdgpu_device *adev, int i) +static int vcn_v2_5_start(struct amdgpu_vcn_inst *vinst) { + struct amdgpu_device *adev = vinst->adev; + int i = vinst->inst; volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; struct amdgpu_ring *ring; @@ -1015,7 +1047,7 @@ static int vcn_v2_5_start(struct amdgpu_device *adev, int i) amdgpu_dpm_enable_vcn(adev, true, i); if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) - return vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram); + return vcn_v2_5_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram); /* disable register anti-hang mechanism */ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), 0, @@ -1029,7 +1061,7 @@ static int vcn_v2_5_start(struct amdgpu_device *adev, int i) return 0; /* SW clock gating */ - vcn_v2_5_disable_clock_gating(adev, i); + vcn_v2_5_disable_clock_gating(vinst); /* enable VCPU clock */ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_VCPU_CNTL), @@ -1074,7 +1106,7 @@ static int vcn_v2_5_start(struct amdgpu_device *adev, int i) (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT))); - vcn_v2_5_mc_resume(adev, i); + vcn_v2_5_mc_resume(vinst); /* VCN global tiling registers */ WREG32_SOC15(VCN, i, mmUVD_GFX8_ADDR_CONFIG, @@ -1379,8 +1411,10 @@ static int vcn_v2_5_sriov_start(struct amdgpu_device *adev) return vcn_v2_5_mmsch_start(adev, &adev->virt.mm_table); } -static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) +static int vcn_v2_5_stop_dpg_mode(struct amdgpu_vcn_inst *vinst) { + struct amdgpu_device *adev = vinst->adev; + int inst_idx = vinst->inst; uint32_t tmp; /* Wait for power status to be 1 */ @@ -1407,20 +1441,25 @@ static int vcn_v2_5_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx) return 0; } -static int vcn_v2_5_stop(struct amdgpu_device *adev, int i) +static int vcn_v2_5_stop(struct amdgpu_vcn_inst *vinst) { + struct amdgpu_device *adev = vinst->adev; + int i = vinst->inst; uint32_t tmp; int r; if (adev->vcn.harvest_config & (1 << i)) return 0; - if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) - return vcn_v2_5_stop_dpg_mode(adev, i); + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { + r = vcn_v2_5_stop_dpg_mode(vinst); + goto done; + } /* wait for vcn idle */ r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_STATUS, UVD_STATUS__IDLE, 0x7); if (r) - return r; + goto done; tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | UVD_LMI_STATUS__READ_CLEAN_MASK | @@ -1428,7 +1467,7 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev, int i) UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp); if (r) - return r; + goto done; /* block LMI UMC channel */ tmp = RREG32_SOC15(VCN, i, mmUVD_LMI_CTRL2); @@ -1439,7 +1478,7 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev, int i) UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; r = SOC15_WAIT_ON_RREG(VCN, i, mmUVD_LMI_STATUS, tmp, tmp); if (r) - return r; + goto done; /* block VCPU register access */ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_RB_ARB_CTRL), @@ -1458,22 +1497,25 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev, int i) /* clear status */ WREG32_SOC15(VCN, i, mmUVD_STATUS, 0); - vcn_v2_5_enable_clock_gating(adev, i); + vcn_v2_5_enable_clock_gating(vinst); /* enable register anti-hang mechanism */ WREG32_P(SOC15_REG_OFFSET(VCN, i, mmUVD_POWER_STATUS), UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); +done: if (adev->pm.dpm_enabled) amdgpu_dpm_enable_vcn(adev, false, i); - return 0; + return r; } -static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev, - int inst_idx, struct dpg_pause_state *new_state) +static int vcn_v2_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, + struct dpg_pause_state *new_state) { + struct amdgpu_device *adev = vinst->adev; + int inst_idx = vinst->inst; struct amdgpu_ring *ring; uint32_t reg_data = 0; int ret_code = 0; @@ -1743,16 +1785,16 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev) for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { if (adev->vcn.harvest_config & (1 << j)) continue; - for (i = 0; i < adev->vcn.num_enc_rings; ++i) { + for (i = 0; i < adev->vcn.inst[j].num_enc_rings; ++i) { adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs; adev->vcn.inst[j].ring_enc[i].me = j; } } } -static bool vcn_v2_5_is_idle(void *handle) +static bool vcn_v2_5_is_idle(struct amdgpu_ip_block *ip_block) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_device *adev = ip_block->adev; int i, ret = 1; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { @@ -1793,39 +1835,39 @@ static int vcn_v2_5_set_clockgating_state(struct amdgpu_ip_block *ip_block, return 0; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i]; + if (enable) { - if (!vcn_v2_5_is_idle(adev)) + if (!vcn_v2_5_is_idle(ip_block)) return -EBUSY; - vcn_v2_5_enable_clock_gating(adev, i); + vcn_v2_5_enable_clock_gating(vinst); } else { - vcn_v2_5_disable_clock_gating(adev, i); + vcn_v2_5_disable_clock_gating(vinst); } } return 0; } -static int vcn_v2_5_set_powergating_state(struct amdgpu_ip_block *ip_block, - enum amd_powergating_state state) +static int vcn_v2_5_set_pg_state(struct amdgpu_vcn_inst *vinst, + enum amd_powergating_state state) { - struct amdgpu_device *adev = ip_block->adev; - int ret = 0, i; + struct amdgpu_device *adev = vinst->adev; + int ret; if (amdgpu_sriov_vf(adev)) return 0; - if (state == adev->vcn.cur_state) + if (state == vinst->cur_state) return 0; - for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { - if (state == AMD_PG_STATE_GATE) - ret |= vcn_v2_5_stop(adev, i); - else - ret |= vcn_v2_5_start(adev, i); - } + if (state == AMD_PG_STATE_GATE) + ret = vcn_v2_5_stop(vinst); + else + ret = vcn_v2_5_start(vinst); if (!ret) - adev->vcn.cur_state = state; + vinst->cur_state = state; return ret; } @@ -1902,10 +1944,10 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev) for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue; - adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1; + adev->vcn.inst[i].irq.num_types = adev->vcn.inst[i].num_enc_rings + 1; adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs; - adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1; + adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.inst[i].num_enc_rings + 1; adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v2_6_ras_irq_funcs; } } @@ -1982,7 +2024,7 @@ static const struct amd_ip_funcs vcn_v2_5_ip_funcs = { .is_idle = vcn_v2_5_is_idle, .wait_for_idle = vcn_v2_5_wait_for_idle, .set_clockgating_state = vcn_v2_5_set_clockgating_state, - .set_powergating_state = vcn_v2_5_set_powergating_state, + .set_powergating_state = vcn_set_powergating_state, .dump_ip_state = vcn_v2_5_dump_ip_state, .print_ip_state = vcn_v2_5_print_ip_state, }; @@ -1999,7 +2041,7 @@ static const struct amd_ip_funcs vcn_v2_6_ip_funcs = { .is_idle = vcn_v2_5_is_idle, .wait_for_idle = vcn_v2_5_wait_for_idle, .set_clockgating_state = vcn_v2_5_set_clockgating_state, - .set_powergating_state = vcn_v2_5_set_powergating_state, + .set_powergating_state = vcn_set_powergating_state, .dump_ip_state = vcn_v2_5_dump_ip_state, .print_ip_state = vcn_v2_5_print_ip_state, }; |
