diff options
| author | Linus Torvalds <[email protected]> | 2022-03-24 23:19:43 +0000 |
|---|---|---|
| committer | Linus Torvalds <[email protected]> | 2022-03-24 23:19:43 +0000 |
| commit | b14ffae378aa1db993e62b01392e70d1e585fb23 (patch) | |
| tree | 0ac179d24e8a62ec4c2732ed18d90d83da4b82d7 /drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c | |
| parent | Merge branch 'akpm' (patches from Andrew) (diff) | |
| parent | Merge tag 'amd-drm-next-5.18-2022-03-18' of https://gitlab.freedesktop.org/ag... (diff) | |
| download | kernel-b14ffae378aa1db993e62b01392e70d1e585fb23.tar.gz kernel-b14ffae378aa1db993e62b01392e70d1e585fb23.zip | |
Merge tag 'drm-next-2022-03-24' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie:
"Lots of work all over, Intel improving DG2 support, amdkfd CRIU
support, msm new hw support, and faster fbdev support.
dma-buf:
- rename dma-buf-map to iosys-map
core:
- move buddy allocator to core
- add pci/platform init macros
- improve EDID parser deep color handling
- EDID timing type 7 support
- add GPD Win Max quirk
- add yes/no helpers to string_helpers
- flatten syncobj chains
- add nomodeset support to lots of drivers
- improve fb-helper clipping support
- add default property value interface
fbdev:
- improve fbdev ops speed
ttm:
- add a backpointer from ttm bo->ttm resource
dp:
- move displayport headers
- add a dp helper module
bridge:
- anx7625 atomic support, HDCP support
panel:
- split out panel-lvds and lvds bindings
- find panels in OF subnodes
privacy:
- add chromeos privacy screen support
fb:
- hot unplug fw fb on forced removal
simpledrm:
- request region instead of marking ioresource busy
- add panel oreintation property
udmabuf:
- fix oops with 0 pages
amdgpu:
- power management code cleanup
- Enable freesync video mode by default
- RAS code cleanup
- Improve VRAM access for debug using SDMA
- SR-IOV rework special register access and fixes
- profiling power state request ioctl
- expose IP discovery via sysfs
- Cyan skillfish updates
- GC 10.3.7, SDMA 5.2.7, DCN 3.1.6 updates
- expose benchmark tests via debugfs
- add module param to disable XGMI for testing
- GPU reset debugfs register dumping support
amdkfd:
- CRIU support
- SDMA queue fixes
radeon:
- UVD suspend fix
- iMac backlight fix
i915:
- minimal parallel submission for execlists
- DG2-G12 subplatform added
- DG2 programming workarounds
- DG2 accelerated migration support
- flat CCS and CCS engine support for XeHP
- initial small BAR support
- drop fake LMEM support
- ADL-N PCH support
- bigjoiner updates
- introduce VMA resources and async unbinding
- register definitions cleanups
- multi-FBC refactoring
- DG1 OPROM over SPI support
- ADL-N platform enabling
- opregion mailbox #5 support
- DP MST ESI improvements
- drm device based logging
- async flip optimisation for DG2
- CPU arch abstraction fixes
- improve GuC ADS init to work on aarch64
- tweak TTM LRU priority hint
- GuC 69.0.3 support
- remove short term execbuf pins
nouveau:
- higher DP/eDP bitrates
- backlight fixes
msm:
- dpu + dp support for sc8180x
- dp support for sm8350
- dpu + dsi support for qcm2290
- 10nm dsi phy tuning support
- bridge support for dp encoder
- gpu support for additional 7c3 SKUs
ingenic:
- HDMI support for JZ4780
- aux channel EDID support
ast:
- AST2600 support
- add wide screen support
- create DP/DVI connectors
omapdrm:
- fix implicit dma_buf fencing
vc4:
- add CSC + full range support
- better display firmware handoff
panfrost:
- add initial dual-core GPU support
stm:
- new revision support
- fb handover support
mediatek:
- transfer display binding document to yaml format.
- add mt8195 display device binding.
- allow commands to be sent during video mode.
- add wait_for_event for crtc disable by cmdq.
tegra:
- YUV format support
rcar-du:
- LVDS support for M3-W+ (R8A77961)
exynos:
- BGR pixel format for FIMD device"
* tag 'drm-next-2022-03-24' of git://anongit.freedesktop.org/drm/drm: (1529 commits)
drm/i915/display: Do not re-enable PSR after it was marked as not reliable
drm/i915/display: Fix HPD short pulse handling for eDP
drm/amdgpu: Use drm_mode_copy()
drm/radeon: Use drm_mode_copy()
drm/amdgpu: Use ternary operator in `vcn_v1_0_start()`
drm/amdgpu: Remove pointless on stack mode copies
drm/amd/pm: fix indenting in __smu_cmn_reg_print_error()
drm/amdgpu/dc: fix typos in comments
drm/amdgpu: fix typos in comments
drm/amd/pm: fix typos in comments
drm/amdgpu: Add stolen reserved memory for MI25 SRIOV.
drm/amdgpu: Merge get_reserved_allocation to get_vbios_allocations.
drm/amdkfd: evict svm bo worker handle error
drm/amdgpu/vcn: fix vcn ring test failure in igt reload test
drm/amdgpu: only allow secure submission on rings which support that
drm/amdgpu: fixed the warnings reported by kernel test robot
drm/amd/display: 3.2.177
drm/amd/display: [FW Promotion] Release 0.0.108.0
drm/amd/display: Add save/restore PANEL_PWRSEQ_REF_DIV2
drm/amd/display: Wait for hubp read line for Pollock
...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c | 53 |
1 files changed, 32 insertions, 21 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index da11ceba0698..c87263ed20ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -25,6 +25,7 @@ #include "amdgpu.h" #include "amdgpu_vcn.h" #include "amdgpu_pm.h" +#include "amdgpu_cs.h" #include "soc15.h" #include "soc15d.h" #include "vcn_v2_0.h" @@ -213,11 +214,14 @@ static int vcn_v3_0_sw_init(void *handle) return r; } - fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr; + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_SW_RING_FLAG) | cpu_to_le32(AMDGPU_VCN_MULTI_QUEUE_FLAG) | cpu_to_le32(AMDGPU_VCN_FW_SHARED_FLAG_0_RB); fw_shared->sw_ring.is_enabled = cpu_to_le32(DEC_SW_RING_ENABLED); + + if (amdgpu_vcnfw_log) + amdgpu_vcn_fwlog_init(&adev->vcn.inst[i]); } if (amdgpu_sriov_vf(adev)) { @@ -249,7 +253,7 @@ static int vcn_v3_0_sw_fini(void *handle) if (adev->vcn.harvest_config & (1 << i)) continue; - fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr; + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; fw_shared->present_flag_0 = 0; fw_shared->sw_ring.is_enabled = false; } @@ -295,6 +299,7 @@ static int vcn_v3_0_hw_init(void *handle) ring = &adev->vcn.inst[i].ring_dec; if (amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, i)) { ring->sched.ready = false; + ring->no_scheduler = true; dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name); } else { ring->wptr = 0; @@ -307,6 +312,7 @@ static int vcn_v3_0_hw_init(void *handle) ring = &adev->vcn.inst[i].ring_enc[j]; if (amdgpu_vcn_is_disabled_vcn(adev, VCN_ENCODE_RING, i)) { ring->sched.ready = false; + ring->no_scheduler = true; dev_info(adev->dev, "ring %s is disabled by hypervisor\n", ring->name); } else { ring->wptr = 0; @@ -469,9 +475,9 @@ static void vcn_v3_0_mc_resume(struct amdgpu_device *adev, int inst) /* non-cache window */ WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW, - lower_32_bits(adev->vcn.inst[inst].fw_shared_gpu_addr)); + lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr)); WREG32_SOC15(VCN, inst, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH, - upper_32_bits(adev->vcn.inst[inst].fw_shared_gpu_addr)); + upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr)); WREG32_SOC15(VCN, inst, mmUVD_VCPU_NONCACHE_OFFSET0, 0); WREG32_SOC15(VCN, inst, mmUVD_VCPU_NONCACHE_SIZE0, AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_fw_shared))); @@ -558,10 +564,10 @@ static void vcn_v3_0_mc_resume_dpg_mode(struct amdgpu_device *adev, int inst_idx /* non-cache window */ WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), - lower_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect); + lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), - upper_32_bits(adev->vcn.inst[inst_idx].fw_shared_gpu_addr), 0, indirect); + upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( VCN, inst_idx, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); WREG32_SOC15_DPG_MODE(inst_idx, SOC15_DPG_MODE_OFFSET( @@ -923,7 +929,7 @@ static void vcn_v3_0_enable_clock_gating(struct amdgpu_device *adev, int inst) static int vcn_v3_0_start_dpg_mode(struct amdgpu_device *adev, int inst_idx, bool indirect) { - volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr; + volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; struct amdgpu_ring *ring; uint32_t rb_bufsz, tmp; @@ -1220,7 +1226,7 @@ static int vcn_v3_0_start(struct amdgpu_device *adev) tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); WREG32_SOC15(VCN, i, mmUVD_RBC_RB_CNTL, tmp); - fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr; + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; fw_shared->multi_queue.decode_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); /* programm the RB_BASE for ring buffer */ @@ -1611,7 +1617,7 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev, if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(3, 0, 33)) { /* Restore */ - fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr; + fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET); ring = &adev->vcn.inst[inst_idx].ring_enc[0]; ring->wptr = 0; @@ -1700,7 +1706,7 @@ static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring) if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { /*whenever update RBC_RB_WPTR, we save the wptr in shared rb.wptr and scratch2 */ - fw_shared = adev->vcn.inst[ring->me].fw_shared_cpu_addr; + fw_shared = adev->vcn.inst[ring->me].fw_shared.cpu_addr; fw_shared->rb.wptr = lower_32_bits(ring->wptr); WREG32_SOC15(VCN, ring->me, mmUVD_SCRATCH2, lower_32_bits(ring->wptr)); @@ -1780,6 +1786,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_DEC, .align_mask = 0x3f, .nop = VCN_DEC_SW_CMD_NO_OP, + .secure_submission_supported = true, .vmhub = AMDGPU_MMHUB_0, .get_rptr = vcn_v3_0_dec_ring_get_rptr, .get_wptr = vcn_v3_0_dec_ring_get_wptr, @@ -1806,21 +1813,23 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = { .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; -static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p) +static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p, + struct amdgpu_job *job) { struct drm_gpu_scheduler **scheds; /* The create msg must be in the first IB submitted */ - if (atomic_read(&p->entity->fence_seq)) + if (atomic_read(&job->base.entity->fence_seq)) return -EINVAL; scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC] [AMDGPU_RING_PRIO_DEFAULT].sched; - drm_sched_entity_modify_sched(p->entity, scheds, 1); + drm_sched_entity_modify_sched(job->base.entity, scheds, 1); return 0; } -static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr) +static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job, + uint64_t addr) { struct ttm_operation_ctx ctx = { false, false }; struct amdgpu_bo_va_mapping *map; @@ -1891,7 +1900,7 @@ static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr) if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11) continue; - r = vcn_v3_0_limit_sched(p); + r = vcn_v3_0_limit_sched(p, job); if (r) goto out; } @@ -1902,10 +1911,10 @@ out: } static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, - uint32_t ib_idx) + struct amdgpu_job *job, + struct amdgpu_ib *ib) { - struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched); - struct amdgpu_ib *ib = &p->job->ibs[ib_idx]; + struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched); uint32_t msg_lo = 0, msg_hi = 0; unsigned i; int r; @@ -1915,8 +1924,8 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, return 0; for (i = 0; i < ib->length_dw; i += 2) { - uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i); - uint32_t val = amdgpu_get_ib_value(p, ib_idx, i + 1); + uint32_t reg = amdgpu_ib_get_value(ib, i); + uint32_t val = amdgpu_ib_get_value(ib, i + 1); if (reg == PACKET0(p->adev->vcn.internal.data0, 0)) { msg_lo = val; @@ -1924,7 +1933,8 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, msg_hi = val; } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0) && val == 0) { - r = vcn_v3_0_dec_msg(p, ((u64)msg_hi) << 32 | msg_lo); + r = vcn_v3_0_dec_msg(p, job, + ((u64)msg_hi) << 32 | msg_lo); if (r) return r; } @@ -1935,6 +1945,7 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = { .type = AMDGPU_RING_TYPE_VCN_DEC, .align_mask = 0xf, + .secure_submission_supported = true, .vmhub = AMDGPU_MMHUB_0, .get_rptr = vcn_v3_0_dec_ring_get_rptr, .get_wptr = vcn_v3_0_dec_ring_get_wptr, |
