diff options
Diffstat (limited to 'drivers/gpu/drm')
846 files changed, 22970 insertions, 12599 deletions
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 5050ac32bba2..4dafbdc8f86a 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -104,7 +104,11 @@ obj-$(CONFIG_DRM_PANEL_BACKLIGHT_QUIRKS) += drm_panel_backlight_quirks.o # obj-$(CONFIG_DRM_EXEC) += drm_exec.o obj-$(CONFIG_DRM_GPUVM) += drm_gpuvm.o -obj-$(CONFIG_DRM_GPUSVM) += drm_gpusvm.o + +drm_gpusvm_helper-y := \ + drm_gpusvm.o\ + drm_pagemap.o +obj-$(CONFIG_DRM_GPUSVM) += drm_gpusvm_helper.o obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 87080c06e5fc..930de203d533 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -66,7 +66,7 @@ amdgpu-y += amdgpu_device.o amdgpu_doorbell_mgr.o amdgpu_kms.o \ amdgpu_fw_attestation.o amdgpu_securedisplay.o \ amdgpu_eeprom.o amdgpu_mca.o amdgpu_psp_ta.o amdgpu_lsdma.o \ amdgpu_ring_mux.o amdgpu_xcp.o amdgpu_seq64.o amdgpu_aca.o amdgpu_dev_coredump.o \ - amdgpu_cper.o amdgpu_userq_fence.o amdgpu_eviction_fence.o + amdgpu_cper.o amdgpu_userq_fence.o amdgpu_eviction_fence.o amdgpu_ip.o amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c index e13fbd974141..9569dc16dd3d 100644 --- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c +++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c @@ -71,18 +71,29 @@ aldebaran_get_reset_handler(struct amdgpu_reset_control *reset_ctl, return NULL; } +static inline uint32_t aldebaran_get_ip_block_mask(struct amdgpu_device *adev) +{ + uint32_t ip_block_mask = BIT(AMD_IP_BLOCK_TYPE_GFX) | + BIT(AMD_IP_BLOCK_TYPE_SDMA); + + if (adev->aid_mask) + ip_block_mask |= BIT(AMD_IP_BLOCK_TYPE_IH); + + return ip_block_mask; +} + static int aldebaran_mode2_suspend_ip(struct amdgpu_device *adev) { + uint32_t ip_block_mask = aldebaran_get_ip_block_mask(adev); + uint32_t ip_block; int r, i; amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); for (i = adev->num_ip_blocks - 1; i >= 0; i--) { - if (!(adev->ip_blocks[i].version->type == - AMD_IP_BLOCK_TYPE_GFX || - adev->ip_blocks[i].version->type == - AMD_IP_BLOCK_TYPE_SDMA)) + ip_block = BIT(adev->ip_blocks[i].version->type); + if (!(ip_block_mask & ip_block)) continue; r = amdgpu_ip_block_suspend(&adev->ip_blocks[i]); @@ -200,8 +211,10 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl, static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev) { struct amdgpu_firmware_info *ucode_list[AMDGPU_UCODE_ID_MAXIMUM]; + uint32_t ip_block_mask = aldebaran_get_ip_block_mask(adev); struct amdgpu_firmware_info *ucode; struct amdgpu_ip_block *cmn_block; + struct amdgpu_ip_block *ih_block; int ucode_count = 0; int i, r; @@ -243,6 +256,18 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev) if (r) return r; + if (ip_block_mask & BIT(AMD_IP_BLOCK_TYPE_IH)) { + ih_block = amdgpu_device_ip_get_ip_block(adev, + AMD_IP_BLOCK_TYPE_IH); + if (unlikely(!ih_block)) { + dev_err(adev->dev, "Failed to get IH handle\n"); + return -EINVAL; + } + r = amdgpu_ip_block_resume(ih_block); + if (r) + return r; + } + /* Reinit GFXHUB */ adev->gfxhub.funcs->init(adev); r = adev->gfxhub.funcs->gart_enable(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index a5ccd0ada16a..ef3af170dda4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -470,9 +470,6 @@ struct amdgpu_sa_manager { void *cpu_ptr; }; -int amdgpu_fence_slab_init(void); -void amdgpu_fence_slab_fini(void); - /* * IRQS. */ @@ -886,6 +883,7 @@ struct amdgpu_mqd_prop { uint64_t csa_addr; uint64_t fence_address; bool tmz_queue; + bool kernel_queue; }; struct amdgpu_mqd { @@ -1282,6 +1280,7 @@ struct amdgpu_device { bool debug_exp_resets; bool debug_disable_gpu_ring_reset; bool debug_vm_userptr; + bool debug_disable_ce_logs; /* Protection for the following isolation structure */ struct mutex enforce_isolation_mutex; @@ -1336,6 +1335,11 @@ static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev) return container_of(bdev, struct amdgpu_device, mman.bdev); } +static inline bool amdgpu_is_multi_aid(struct amdgpu_device *adev) +{ + return !!adev->aid_mask; +} + int amdgpu_device_init(struct amdgpu_device *adev, uint32_t flags); void amdgpu_device_fini_hw(struct amdgpu_device *adev); @@ -1387,7 +1391,8 @@ void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev, void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev, u64 reg_addr, u64 reg_data); u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev); -bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type); +bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev, + enum amd_asic_type asic_type); bool amdgpu_device_has_dc_support(struct amdgpu_device *adev); void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev); @@ -1558,16 +1563,16 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, int amdgpu_device_mode1_reset(struct amdgpu_device *adev); int amdgpu_device_link_reset(struct amdgpu_device *adev); -bool amdgpu_device_supports_atpx(struct drm_device *dev); -bool amdgpu_device_supports_px(struct drm_device *dev); -bool amdgpu_device_supports_boco(struct drm_device *dev); -bool amdgpu_device_supports_smart_shift(struct drm_device *dev); -int amdgpu_device_supports_baco(struct drm_device *dev); +bool amdgpu_device_supports_atpx(struct amdgpu_device *adev); +bool amdgpu_device_supports_px(struct amdgpu_device *adev); +bool amdgpu_device_supports_boco(struct amdgpu_device *adev); +bool amdgpu_device_supports_smart_shift(struct amdgpu_device *adev); +int amdgpu_device_supports_baco(struct amdgpu_device *adev); void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev); bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, struct amdgpu_device *peer_adev); -int amdgpu_device_baco_enter(struct drm_device *dev); -int amdgpu_device_baco_exit(struct drm_device *dev); +int amdgpu_device_baco_enter(struct amdgpu_device *adev); +int amdgpu_device_baco_exit(struct amdgpu_device *adev); void amdgpu_device_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring); @@ -1619,6 +1624,7 @@ void amdgpu_driver_release_kms(struct drm_device *dev); int amdgpu_device_ip_suspend(struct amdgpu_device *adev); int amdgpu_device_prepare(struct drm_device *dev); +void amdgpu_device_complete(struct drm_device *dev); int amdgpu_device_suspend(struct drm_device *dev, bool fbcon); int amdgpu_device_resume(struct drm_device *dev, bool fbcon); u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc); @@ -1669,7 +1675,8 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, u8 perf_req, bool advertise); int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, u8 dev_state, bool drv_state); -int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state); +int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev, + enum amdgpu_ss ss_state); int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset, u64 *tmr_size); @@ -1700,8 +1707,11 @@ static inline void amdgpu_acpi_release(void) { } static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; } static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, u8 dev_state, bool drv_state) { return 0; } -static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev, - enum amdgpu_ss ss_state) { return 0; } +static inline int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev, + enum amdgpu_ss ss_state) +{ + return 0; +} static inline void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps) { } #endif @@ -1714,7 +1724,7 @@ static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return #endif #if defined(CONFIG_DRM_AMD_ISP) -int amdgpu_acpi_get_isp4_dev_hid(u8 (*hid)[ACPI_ID_LEN]); +int amdgpu_acpi_get_isp4_dev(struct acpi_device **dev); #endif void amdgpu_register_gpu_instance(struct amdgpu_device *adev); @@ -1760,4 +1770,19 @@ extern const struct attribute_group amdgpu_flash_attr_group; void amdgpu_set_init_level(struct amdgpu_device *adev, enum amdgpu_init_lvl_id lvl); + +static inline int amdgpu_device_bus_status_check(struct amdgpu_device *adev) +{ + u32 status; + int r; + + r = pci_read_config_dword(adev->pdev, PCI_COMMAND, &status); + if (r || PCI_POSSIBLE_ERROR(status)) { + dev_err(adev->dev, "device lost from bus!"); + return -ENODEV; + } + + return 0; +} + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c index 3835f2592914..cbc40cad581b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_aca.c @@ -115,6 +115,11 @@ static void aca_smu_bank_dump(struct amdgpu_device *adev, int idx, int total, st u64 event_id = qctx ? qctx->evid.event_id : RAS_EVENT_INVALID_ID; int i; + if (adev->debug_disable_ce_logs && + bank->smu_err_type == ACA_SMU_TYPE_CE && + !ACA_BANK_ERR_IS_DEFFERED(bank)) + return; + RAS_EVENT_LOG(adev, event_id, HW_ERR "Accelerator Check Architecture events logged\n"); /* plus 1 for output format, e.g: ACA[08/08]: xxxx */ for (i = 0; i < ARRAY_SIZE(aca_regs); i++) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index f5466c592d94..6c62e27b9800 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -811,18 +811,18 @@ int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, /** * amdgpu_acpi_smart_shift_update - update dGPU device state to SBIOS * - * @dev: drm_device pointer + * @adev: amdgpu device pointer * @ss_state: current smart shift event * * returns 0 on success, * otherwise return error number. */ -int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state) +int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev, + enum amdgpu_ss ss_state) { - struct amdgpu_device *adev = drm_to_adev(dev); int r; - if (!amdgpu_device_supports_smart_shift(dev)) + if (!amdgpu_device_supports_smart_shift(adev)) return 0; switch (ss_state) { @@ -1545,7 +1545,7 @@ static int isp_match_acpi_device_ids(struct device *dev, const void *data) return acpi_match_device(data, dev) ? 1 : 0; } -int amdgpu_acpi_get_isp4_dev_hid(u8 (*hid)[ACPI_ID_LEN]) +int amdgpu_acpi_get_isp4_dev(struct acpi_device **dev) { struct device *pdev __free(put_device) = NULL; struct acpi_device *acpi_pdev; @@ -1559,7 +1559,7 @@ int amdgpu_acpi_get_isp4_dev_hid(u8 (*hid)[ACPI_ID_LEN]) if (!acpi_pdev) return -ENODEV; - strscpy(*hid, acpi_device_hid(acpi_pdev)); + *dev = acpi_pdev; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 55161e5cdc30..fbe7616555c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -248,18 +248,34 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry); } -void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm) +void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc) { if (adev->kfd.dev) - kgd2kfd_suspend(adev->kfd.dev, run_pm); + kgd2kfd_suspend(adev->kfd.dev, suspend_proc); } -int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm) +int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc) { int r = 0; if (adev->kfd.dev) - r = kgd2kfd_resume(adev->kfd.dev, run_pm); + r = kgd2kfd_resume(adev->kfd.dev, resume_proc); + + return r; +} + +void amdgpu_amdkfd_suspend_process(struct amdgpu_device *adev) +{ + if (adev->kfd.dev) + kgd2kfd_suspend_process(adev->kfd.dev); +} + +int amdgpu_amdkfd_resume_process(struct amdgpu_device *adev) +{ + int r = 0; + + if (adev->kfd.dev) + r = kgd2kfd_resume_process(adev->kfd.dev); return r; } @@ -749,12 +765,12 @@ int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev, int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev) { - return kgd2kfd_check_and_lock_kfd(); + return kgd2kfd_check_and_lock_kfd(adev->kfd.dev); } void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev) { - kgd2kfd_unlock_kfd(); + kgd2kfd_unlock_kfd(adev->kfd.dev); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index b6ca41859b53..33eb4826b58b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -154,8 +154,10 @@ struct amdkfd_process_info { int amdgpu_amdkfd_init(void); void amdgpu_amdkfd_fini(void); -void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm); -int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm); +void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc); +int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc); +void amdgpu_amdkfd_suspend_process(struct amdgpu_device *adev); +int amdgpu_amdkfd_resume_process(struct amdgpu_device *adev); void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, const void *ih_ring_entry); void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev); @@ -411,16 +413,18 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf); bool kgd2kfd_device_init(struct kfd_dev *kfd, const struct kgd2kfd_shared_resources *gpu_resources); void kgd2kfd_device_exit(struct kfd_dev *kfd); -void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm); -int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm); +void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc); +int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc); +void kgd2kfd_suspend_process(struct kfd_dev *kfd); +int kgd2kfd_resume_process(struct kfd_dev *kfd); int kgd2kfd_pre_reset(struct kfd_dev *kfd, struct amdgpu_reset_context *reset_context); int kgd2kfd_post_reset(struct kfd_dev *kfd); void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry); void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd); void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask); -int kgd2kfd_check_and_lock_kfd(void); -void kgd2kfd_unlock_kfd(void); +int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd); +void kgd2kfd_unlock_kfd(struct kfd_dev *kfd); int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id); int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id); bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id); @@ -454,11 +458,20 @@ static inline void kgd2kfd_device_exit(struct kfd_dev *kfd) { } -static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) +static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc) { } -static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) +static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc) +{ + return 0; +} + +static inline void kgd2kfd_suspend_process(struct kfd_dev *kfd) +{ +} + +static inline int kgd2kfd_resume_process(struct kfd_dev *kfd) { return 0; } @@ -489,12 +502,12 @@ void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) { } -static inline int kgd2kfd_check_and_lock_kfd(void) +static inline int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd) { return 0; } -static inline void kgd2kfd_unlock_kfd(void) +static inline void kgd2kfd_unlock_kfd(struct kfd_dev *kfd) { } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index ffbaa8bc5eea..1105a09e55dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -320,7 +320,7 @@ static void set_barrier_auto_waitcnt(struct amdgpu_device *adev, bool enable_wai if (!down_read_trylock(&adev->reset_domain->sem)) return; - amdgpu_amdkfd_suspend(adev, false); + amdgpu_amdkfd_suspend(adev, true); if (suspend_resume_compute_scheduler(adev, true)) goto out; @@ -333,7 +333,7 @@ static void set_barrier_auto_waitcnt(struct amdgpu_device *adev, bool enable_wai out: suspend_resume_compute_scheduler(adev, false); - amdgpu_amdkfd_resume(adev, false); + amdgpu_amdkfd_resume(adev, true); up_read(&adev->reset_domain->sem); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index ca4a6b82817f..df77558e03ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -561,6 +561,13 @@ static uint32_t read_vmid_from_vmfault_reg(struct amdgpu_device *adev) return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); } +static uint32_t kgd_hqd_sdma_get_doorbell(struct amdgpu_device *adev, + int engine, int queue) + +{ + return 0; +} + const struct kfd2kgd_calls gfx_v7_kfd2kgd = { .program_sh_mem_settings = kgd_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, @@ -578,4 +585,5 @@ const struct kfd2kgd_calls gfx_v7_kfd2kgd = { .set_scratch_backing_va = set_scratch_backing_va, .set_vm_context_page_table_base = set_vm_context_page_table_base, .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg, + .hqd_sdma_get_doorbell = kgd_hqd_sdma_get_doorbell, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 0f3e2944edd7..e68c0fa8d751 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -582,6 +582,13 @@ static void set_vm_context_page_table_base(struct amdgpu_device *adev, lower_32_bits(page_table_base)); } +static uint32_t kgd_hqd_sdma_get_doorbell(struct amdgpu_device *adev, + int engine, int queue) + +{ + return 0; +} + const struct kfd2kgd_calls gfx_v8_kfd2kgd = { .program_sh_mem_settings = kgd_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, @@ -599,4 +606,5 @@ const struct kfd2kgd_calls gfx_v8_kfd2kgd = { get_atc_vmid_pasid_mapping_info, .set_scratch_backing_va = set_scratch_backing_va, .set_vm_context_page_table_base = set_vm_context_page_table_base, + .hqd_sdma_get_doorbell = kgd_hqd_sdma_get_doorbell, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c index 5a234eadae8b..25252231a68a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cper.c @@ -212,7 +212,7 @@ int amdgpu_cper_entry_fill_bad_page_threshold_section(struct amdgpu_device *adev NONSTD_SEC_OFFSET(hdr->sec_cnt, idx)); amdgpu_cper_entry_fill_section_desc(adev, section_desc, true, false, - CPER_SEV_NUM, RUNTIME, NONSTD_SEC_LEN, + CPER_SEV_FATAL, RUNTIME, NONSTD_SEC_LEN, NONSTD_SEC_OFFSET(hdr->sec_cnt, idx)); section->hdr.valid_bits.err_info_cnt = 1; @@ -326,7 +326,9 @@ int amdgpu_cper_generate_bp_threshold_record(struct amdgpu_device *adev) return -ENOMEM; } - amdgpu_cper_entry_fill_hdr(adev, bp_threshold, AMDGPU_CPER_TYPE_BP_THRESHOLD, CPER_SEV_NUM); + amdgpu_cper_entry_fill_hdr(adev, bp_threshold, + AMDGPU_CPER_TYPE_BP_THRESHOLD, + CPER_SEV_FATAL); ret = amdgpu_cper_entry_fill_bad_page_threshold_section(adev, bp_threshold, 0); if (ret) return ret; @@ -457,7 +459,7 @@ calc: void amdgpu_cper_ring_write(struct amdgpu_ring *ring, void *src, int count) { - u64 pos, wptr_old, rptr = *ring->rptr_cpu_addr & ring->ptr_mask; + u64 pos, wptr_old, rptr; int rec_cnt_dw = count >> 2; u32 chunk, ent_sz; u8 *s = (u8 *)src; @@ -470,9 +472,11 @@ void amdgpu_cper_ring_write(struct amdgpu_ring *ring, void *src, int count) return; } + mutex_lock(&ring->adev->cper.ring_lock); + wptr_old = ring->wptr; + rptr = *ring->rptr_cpu_addr & ring->ptr_mask; - mutex_lock(&ring->adev->cper.ring_lock); while (count) { ent_sz = amdgpu_cper_ring_get_ent_sz(ring, ring->wptr); chunk = umin(ent_sz, count); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index a2adaacf6adb..d3f220be2ef9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1139,6 +1139,9 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) } } + if (!amdgpu_vm_ready(vm)) + return -EINVAL; + r = amdgpu_vm_clear_freed(adev, vm, NULL); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 85567d0d9545..f5d5c45ddc0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -944,6 +944,7 @@ static void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) drm_sched_entity_fini(entity); } } + kref_put(&ctx->refcount, amdgpu_ctx_fini); } } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index dac4b926e7be..0e6e2e2acf5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -1902,7 +1902,7 @@ no_preempt: continue; } job = to_amdgpu_job(s_job); - if (preempted && (&job->hw_fence) == fence) + if (preempted && (&job->hw_fence.base) == fence) /* mark the job as preempted */ job->preemption_status |= AMDGPU_IB_PREEMPTED; } @@ -2131,6 +2131,55 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) return 0; } +static int amdgpu_pt_info_read(struct seq_file *m, void *unused) +{ + struct drm_file *file; + struct amdgpu_fpriv *fpriv; + struct amdgpu_bo *root_bo; + int r; + + file = m->private; + if (!file) + return -EINVAL; + + fpriv = file->driver_priv; + if (!fpriv || !fpriv->vm.root.bo) + return -ENODEV; + + root_bo = amdgpu_bo_ref(fpriv->vm.root.bo); + r = amdgpu_bo_reserve(root_bo, true); + if (r) { + amdgpu_bo_unref(&root_bo); + return -EINVAL; + } + + seq_printf(m, "gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(fpriv->vm.root.bo)); + + amdgpu_bo_unreserve(root_bo); + amdgpu_bo_unref(&root_bo); + + return 0; +} + +static int amdgpu_pt_info_open(struct inode *inode, struct file *file) +{ + return single_open(file, amdgpu_pt_info_read, inode->i_private); +} + +static const struct file_operations amdgpu_pt_info_fops = { + .owner = THIS_MODULE, + .open = amdgpu_pt_info_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +void amdgpu_debugfs_vm_init(struct drm_file *file) +{ + debugfs_create_file("vm_pagetable_info", 0444, file->debugfs_client, file, + &amdgpu_pt_info_fops); +} + #else int amdgpu_debugfs_init(struct amdgpu_device *adev) { @@ -2140,4 +2189,7 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) { return 0; } +void amdgpu_debugfs_vm_init(struct drm_file *file) +{ +} #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h index 0425432d8659..e7b3c38e5186 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h @@ -33,4 +33,5 @@ void amdgpu_debugfs_fence_init(struct amdgpu_device *adev); void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev); void amdgpu_debugfs_gem_init(struct amdgpu_device *adev); void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev); +void amdgpu_debugfs_vm_init(struct drm_file *file); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index a59f194e3360..01d234cf8156 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -232,7 +232,7 @@ static int amdgpu_device_attr_sysfs_init(struct amdgpu_device *adev) { int ret = 0; - if (!amdgpu_sriov_vf(adev)) + if (amdgpu_nbio_is_replay_cnt_supported(adev)) ret = sysfs_create_file(&adev->dev->kobj, &dev_attr_pcie_replay_count.attr); @@ -241,7 +241,7 @@ static int amdgpu_device_attr_sysfs_init(struct amdgpu_device *adev) static void amdgpu_device_attr_sysfs_fini(struct amdgpu_device *adev) { - if (!amdgpu_sriov_vf(adev)) + if (amdgpu_nbio_is_replay_cnt_supported(adev)) sysfs_remove_file(&adev->dev->kobj, &dev_attr_pcie_replay_count.attr); } @@ -411,19 +411,16 @@ static const struct attribute_group amdgpu_board_attrs_group = { static void amdgpu_device_get_pcie_info(struct amdgpu_device *adev); - /** * amdgpu_device_supports_px - Is the device a dGPU with ATPX power control * - * @dev: drm_device pointer + * @adev: amdgpu device pointer * * Returns true if the device is a dGPU with ATPX power control, * otherwise return false. */ -bool amdgpu_device_supports_px(struct drm_device *dev) +bool amdgpu_device_supports_px(struct amdgpu_device *adev) { - struct amdgpu_device *adev = drm_to_adev(dev); - if ((adev->flags & AMD_IS_PX) && !amdgpu_is_atpx_hybrid()) return true; return false; @@ -432,15 +429,13 @@ bool amdgpu_device_supports_px(struct drm_device *dev) /** * amdgpu_device_supports_boco - Is the device a dGPU with ACPI power resources * - * @dev: drm_device pointer + * @adev: amdgpu device pointer * * Returns true if the device is a dGPU with ACPI power control, * otherwise return false. */ -bool amdgpu_device_supports_boco(struct drm_device *dev) +bool amdgpu_device_supports_boco(struct amdgpu_device *adev) { - struct amdgpu_device *adev = drm_to_adev(dev); - if (!IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) return false; @@ -453,29 +448,24 @@ bool amdgpu_device_supports_boco(struct drm_device *dev) /** * amdgpu_device_supports_baco - Does the device support BACO * - * @dev: drm_device pointer + * @adev: amdgpu device pointer * * Return: * 1 if the device supports BACO; * 3 if the device supports MACO (only works if BACO is supported) * otherwise return 0. */ -int amdgpu_device_supports_baco(struct drm_device *dev) +int amdgpu_device_supports_baco(struct amdgpu_device *adev) { - struct amdgpu_device *adev = drm_to_adev(dev); - return amdgpu_asic_supports_baco(adev); } void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev) { - struct drm_device *dev; int bamaco_support; - dev = adev_to_drm(adev); - adev->pm.rpm_mode = AMDGPU_RUNPM_NONE; - bamaco_support = amdgpu_device_supports_baco(dev); + bamaco_support = amdgpu_device_supports_baco(adev); switch (amdgpu_runtime_pm) { case 2: @@ -495,10 +485,12 @@ void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev) break; case -1: case -2: - if (amdgpu_device_supports_px(dev)) { /* enable PX as runtime mode */ + if (amdgpu_device_supports_px(adev)) { + /* enable PX as runtime mode */ adev->pm.rpm_mode = AMDGPU_RUNPM_PX; dev_info(adev->dev, "Using ATPX for runtime pm\n"); - } else if (amdgpu_device_supports_boco(dev)) { /* enable boco as runtime mode */ + } else if (amdgpu_device_supports_boco(adev)) { + /* enable boco as runtime mode */ adev->pm.rpm_mode = AMDGPU_RUNPM_BOCO; dev_info(adev->dev, "Using BOCO for runtime pm\n"); } else { @@ -547,14 +539,14 @@ no_runtime_pm: * amdgpu_device_supports_smart_shift - Is the device dGPU with * smart shift support * - * @dev: drm_device pointer + * @adev: amdgpu device pointer * * Returns true if the device is a dGPU with Smart Shift support, * otherwise returns false. */ -bool amdgpu_device_supports_smart_shift(struct drm_device *dev) +bool amdgpu_device_supports_smart_shift(struct amdgpu_device *adev) { - return (amdgpu_device_supports_boco(dev) && + return (amdgpu_device_supports_boco(adev) && amdgpu_acpi_is_power_shift_control_supported()); } @@ -1288,14 +1280,14 @@ u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev) */ static uint32_t amdgpu_invalid_rreg(struct amdgpu_device *adev, uint32_t reg) { - DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); + dev_err(adev->dev, "Invalid callback to read register 0x%04X\n", reg); BUG(); return 0; } static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg) { - DRM_ERROR("Invalid callback to read register 0x%llX\n", reg); + dev_err(adev->dev, "Invalid callback to read register 0x%llX\n", reg); BUG(); return 0; } @@ -1312,15 +1304,17 @@ static uint32_t amdgpu_invalid_rreg_ext(struct amdgpu_device *adev, uint64_t reg */ static void amdgpu_invalid_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) { - DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", - reg, v); + dev_err(adev->dev, + "Invalid callback to write register 0x%04X with 0x%08X\n", reg, + v); BUG(); } static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, uint32_t v) { - DRM_ERROR("Invalid callback to write register 0x%llX with 0x%08X\n", - reg, v); + dev_err(adev->dev, + "Invalid callback to write register 0x%llX with 0x%08X\n", reg, + v); BUG(); } @@ -1336,14 +1330,15 @@ static void amdgpu_invalid_wreg_ext(struct amdgpu_device *adev, uint64_t reg, ui */ static uint64_t amdgpu_invalid_rreg64(struct amdgpu_device *adev, uint32_t reg) { - DRM_ERROR("Invalid callback to read 64 bit register 0x%04X\n", reg); + dev_err(adev->dev, "Invalid callback to read 64 bit register 0x%04X\n", + reg); BUG(); return 0; } static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t reg) { - DRM_ERROR("Invalid callback to read register 0x%llX\n", reg); + dev_err(adev->dev, "Invalid callback to read register 0x%llX\n", reg); BUG(); return 0; } @@ -1360,15 +1355,17 @@ static uint64_t amdgpu_invalid_rreg64_ext(struct amdgpu_device *adev, uint64_t r */ static void amdgpu_invalid_wreg64(struct amdgpu_device *adev, uint32_t reg, uint64_t v) { - DRM_ERROR("Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n", - reg, v); + dev_err(adev->dev, + "Invalid callback to write 64 bit register 0x%04X with 0x%08llX\n", + reg, v); BUG(); } static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg, uint64_t v) { - DRM_ERROR("Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n", - reg, v); + dev_err(adev->dev, + "Invalid callback to write 64 bit register 0x%llX with 0x%08llX\n", + reg, v); BUG(); } @@ -1386,8 +1383,9 @@ static void amdgpu_invalid_wreg64_ext(struct amdgpu_device *adev, uint64_t reg, static uint32_t amdgpu_block_invalid_rreg(struct amdgpu_device *adev, uint32_t block, uint32_t reg) { - DRM_ERROR("Invalid callback to read register 0x%04X in block 0x%04X\n", - reg, block); + dev_err(adev->dev, + "Invalid callback to read register 0x%04X in block 0x%04X\n", + reg, block); BUG(); return 0; } @@ -1407,8 +1405,9 @@ static void amdgpu_block_invalid_wreg(struct amdgpu_device *adev, uint32_t block, uint32_t reg, uint32_t v) { - DRM_ERROR("Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n", - reg, block, v); + dev_err(adev->dev, + "Invalid block callback to write register 0x%04X in block 0x%04X with 0x%08X\n", + reg, block, v); BUG(); } @@ -1694,7 +1693,9 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) /* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */ if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR)) - DRM_WARN("System can't access extended configuration space, please check!!\n"); + dev_warn( + adev->dev, + "System can't access extended configuration space, please check!!\n"); /* skip if the bios has already enabled large BAR */ if (adev->gmc.real_vram_size && @@ -1734,9 +1735,10 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) r = pci_resize_resource(adev->pdev, 0, rbar_size); if (r == -ENOSPC) - DRM_INFO("Not enough PCI address space for a large BAR."); + dev_info(adev->dev, + "Not enough PCI address space for a large BAR."); else if (r && r != -ENOTSUPP) - DRM_ERROR("Problem resizing BAR0 (%d).", r); + dev_err(adev->dev, "Problem resizing BAR0 (%d).", r); pci_assign_unassigned_bus_resources(adev->pdev->bus); @@ -1838,8 +1840,8 @@ bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev) case 0: return false; default: - DRM_ERROR("Invalid value for amdgpu.seamless: %d\n", - amdgpu_seamless); + dev_err(adev->dev, "Invalid value for amdgpu.seamless: %d\n", + amdgpu_seamless); return false; } @@ -2015,7 +2017,7 @@ static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev) return; if (!is_os_64) { - DRM_WARN("Not 64-bit OS, feature not supported\n"); + dev_warn(adev->dev, "Not 64-bit OS, feature not supported\n"); goto def_value; } si_meminfo(&si); @@ -2030,7 +2032,7 @@ static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev) if (total_memory < dram_size_seven_GB) goto def_value1; } else { - DRM_WARN("Smu memory pool size not supported\n"); + dev_warn(adev->dev, "Smu memory pool size not supported\n"); goto def_value; } adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28; @@ -2038,7 +2040,7 @@ static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev) return; def_value1: - DRM_WARN("No enough system memory\n"); + dev_warn(adev->dev, "No enough system memory\n"); def_value: adev->pm.smu_prv_buffer_size = 0; } @@ -2190,7 +2192,8 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, struct drm_device *dev = pci_get_drvdata(pdev); int r; - if (amdgpu_device_supports_px(dev) && state == VGA_SWITCHEROO_OFF) + if (amdgpu_device_supports_px(drm_to_adev(dev)) && + state == VGA_SWITCHEROO_OFF) return; if (state == VGA_SWITCHEROO_ON) { @@ -2202,12 +2205,13 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, amdgpu_device_load_pci_state(pdev); r = pci_enable_device(pdev); if (r) - DRM_WARN("pci_enable_device failed (%d)\n", r); + dev_warn(&pdev->dev, "pci_enable_device failed (%d)\n", + r); amdgpu_device_resume(dev, true); dev->switch_power_state = DRM_SWITCH_POWER_ON; } else { - pr_info("switched off\n"); + dev_info(&pdev->dev, "switched off\n"); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; amdgpu_device_prepare(dev); amdgpu_device_suspend(dev, true); @@ -2274,8 +2278,9 @@ int amdgpu_device_ip_set_clockgating_state(void *dev, r = adev->ip_blocks[i].version->funcs->set_clockgating_state( &adev->ip_blocks[i], state); if (r) - DRM_ERROR("set_clockgating_state of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_err(adev->dev, + "set_clockgating_state of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); } return r; } @@ -2308,8 +2313,9 @@ int amdgpu_device_ip_set_powergating_state(void *dev, r = adev->ip_blocks[i].version->funcs->set_powergating_state( &adev->ip_blocks[i], state); if (r) - DRM_ERROR("set_powergating_state of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_err(adev->dev, + "set_powergating_state of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); } return r; } @@ -2525,9 +2531,11 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev) } } - DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n", - amdgpu_virtual_display, pci_address_name, - adev->enable_virtual_display, adev->mode_info.num_crtc); + dev_info( + adev->dev, + "virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n", + amdgpu_virtual_display, pci_address_name, + adev->enable_virtual_display, adev->mode_info.num_crtc); kfree(pciaddstr); } @@ -2538,8 +2546,9 @@ void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) { adev->mode_info.num_crtc = 1; adev->enable_virtual_display = true; - DRM_INFO("virtual_display:%d, num_crtc:%d\n", - adev->enable_virtual_display, adev->mode_info.num_crtc); + dev_info(adev->dev, "virtual_display:%d, num_crtc:%d\n", + adev->enable_virtual_display, + adev->mode_info.num_crtc); } } @@ -2561,9 +2570,6 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) adev->firmware.gpu_info_fw = NULL; - if (adev->mman.discovery_bin) - return 0; - switch (adev->asic_type) { default: return 0; @@ -2585,6 +2591,8 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) chip_name = "arcturus"; break; case CHIP_NAVI12: + if (adev->mman.discovery_bin) + return 0; chip_name = "navi12"; break; } @@ -2773,21 +2781,29 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) if (!amdgpu_device_pcie_dynamic_switching_supported(adev)) adev->pm.pp_feature &= ~PP_PCIE_DPM_MASK; + adev->virt.is_xgmi_node_migrate_enabled = false; + if (amdgpu_sriov_vf(adev)) { + adev->virt.is_xgmi_node_migrate_enabled = + amdgpu_ip_version((adev), GC_HWIP, 0) == IP_VERSION(9, 4, 4); + } + total = true; for (i = 0; i < adev->num_ip_blocks; i++) { ip_block = &adev->ip_blocks[i]; if ((amdgpu_ip_block_mask & (1 << i)) == 0) { - DRM_WARN("disabled ip block: %d <%s>\n", - i, adev->ip_blocks[i].version->funcs->name); + dev_warn(adev->dev, "disabled ip block: %d <%s>\n", i, + adev->ip_blocks[i].version->funcs->name); adev->ip_blocks[i].status.valid = false; } else if (ip_block->version->funcs->early_init) { r = ip_block->version->funcs->early_init(ip_block); if (r == -ENOENT) { adev->ip_blocks[i].status.valid = false; } else if (r) { - DRM_ERROR("early_init of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_err(adev->dev, + "early_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, + r); total = false; } else { adev->ip_blocks[i].status.valid = true; @@ -2868,8 +2884,10 @@ static int amdgpu_device_ip_hw_init_phase1(struct amdgpu_device *adev) adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_IH) { r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { - DRM_ERROR("hw_init of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_err(adev->dev, + "hw_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, + r); return r; } adev->ip_blocks[i].status.hw = true; @@ -2893,8 +2911,9 @@ static int amdgpu_device_ip_hw_init_phase2(struct amdgpu_device *adev) continue; r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { - DRM_ERROR("hw_init of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_err(adev->dev, + "hw_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); return r; } adev->ip_blocks[i].status.hw = true; @@ -2932,8 +2951,11 @@ static int amdgpu_device_fw_loading(struct amdgpu_device *adev) } else { r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { - DRM_ERROR("hw_init of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_err(adev->dev, + "hw_init of IP block <%s> failed %d\n", + adev->ip_blocks[i] + .version->funcs->name, + r); return r; } adev->ip_blocks[i].status.hw = true; @@ -2988,25 +3010,29 @@ static int amdgpu_device_init_schedulers(struct amdgpu_device *adev) r = drm_sched_init(&ring->sched, &args); if (r) { - DRM_ERROR("Failed to create scheduler on ring %s.\n", - ring->name); + dev_err(adev->dev, + "Failed to create scheduler on ring %s.\n", + ring->name); return r; } r = amdgpu_uvd_entity_init(adev, ring); if (r) { - DRM_ERROR("Failed to create UVD scheduling entity on ring %s.\n", - ring->name); + dev_err(adev->dev, + "Failed to create UVD scheduling entity on ring %s.\n", + ring->name); return r; } r = amdgpu_vce_entity_init(adev, ring); if (r) { - DRM_ERROR("Failed to create VCE scheduling entity on ring %s.\n", - ring->name); + dev_err(adev->dev, + "Failed to create VCE scheduling entity on ring %s.\n", + ring->name); return r; } } - amdgpu_xcp_update_partition_sched_list(adev); + if (adev->xcp_mgr) + amdgpu_xcp_update_partition_sched_list(adev); return 0; } @@ -3038,8 +3064,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->funcs->sw_init) { r = adev->ip_blocks[i].version->funcs->sw_init(&adev->ip_blocks[i]); if (r) { - DRM_ERROR("sw_init of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_err(adev->dev, + "sw_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, + r); goto init_failed; } } @@ -3053,7 +3081,8 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) /* need to do common hw init early so everything is set up for gmc */ r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { - DRM_ERROR("hw_init %d failed %d\n", i, r); + dev_err(adev->dev, "hw_init %d failed %d\n", i, + r); goto init_failed; } adev->ip_blocks[i].status.hw = true; @@ -3065,17 +3094,21 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) r = amdgpu_device_mem_scratch_init(adev); if (r) { - DRM_ERROR("amdgpu_mem_scratch_init failed %d\n", r); + dev_err(adev->dev, + "amdgpu_mem_scratch_init failed %d\n", + r); goto init_failed; } r = adev->ip_blocks[i].version->funcs->hw_init(&adev->ip_blocks[i]); if (r) { - DRM_ERROR("hw_init %d failed %d\n", i, r); + dev_err(adev->dev, "hw_init %d failed %d\n", i, + r); goto init_failed; } r = amdgpu_device_wb_init(adev); if (r) { - DRM_ERROR("amdgpu_device_wb_init failed %d\n", r); + dev_err(adev->dev, + "amdgpu_device_wb_init failed %d\n", r); goto init_failed; } adev->ip_blocks[i].status.hw = true; @@ -3087,14 +3120,16 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) AMDGPU_GEM_DOMAIN_GTT, AMDGPU_CSA_SIZE); if (r) { - DRM_ERROR("allocate CSA failed %d\n", r); + dev_err(adev->dev, + "allocate CSA failed %d\n", r); goto init_failed; } } r = amdgpu_seq64_init(adev); if (r) { - DRM_ERROR("allocate seq64 failed %d\n", r); + dev_err(adev->dev, "allocate seq64 failed %d\n", + r); goto init_failed; } } @@ -3235,6 +3270,7 @@ static bool amdgpu_device_check_vram_lost(struct amdgpu_device *adev) * always assumed to be lost. */ switch (amdgpu_asic_reset_method(adev)) { + case AMD_RESET_METHOD_LEGACY: case AMD_RESET_METHOD_LINK: case AMD_RESET_METHOD_BACO: case AMD_RESET_METHOD_MODE1: @@ -3284,8 +3320,10 @@ int amdgpu_device_set_cg_state(struct amdgpu_device *adev, r = adev->ip_blocks[i].version->funcs->set_clockgating_state(&adev->ip_blocks[i], state); if (r) { - DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_err(adev->dev, + "set_clockgating_state(gate) of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, + r); return r; } } @@ -3321,8 +3359,10 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev, r = adev->ip_blocks[i].version->funcs->set_powergating_state(&adev->ip_blocks[i], state); if (r) { - DRM_ERROR("set_powergating_state(gate) of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_err(adev->dev, + "set_powergating_state(gate) of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, + r); return r; } } @@ -3388,8 +3428,10 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->funcs->late_init) { r = adev->ip_blocks[i].version->funcs->late_init(&adev->ip_blocks[i]); if (r) { - DRM_ERROR("late_init of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_err(adev->dev, + "late_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, + r); return r; } } @@ -3398,7 +3440,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) r = amdgpu_ras_late_init(adev); if (r) { - DRM_ERROR("amdgpu_ras_late_init failed %d", r); + dev_err(adev->dev, "amdgpu_ras_late_init failed %d", r); return r; } @@ -3412,7 +3454,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) r = amdgpu_device_enable_mgpu_fan_boost(); if (r) - DRM_ERROR("enable mgpu fan boost failed (%d).\n", r); + dev_err(adev->dev, "enable mgpu fan boost failed (%d).\n", r); /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */ if (amdgpu_passthrough(adev) && @@ -3445,7 +3487,9 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) r = amdgpu_xgmi_set_pstate(gpu_instance->adev, AMDGPU_XGMI_PSTATE_MIN); if (r) { - DRM_ERROR("pstate setting failed (%d).\n", r); + dev_err(adev->dev, + "pstate setting failed (%d).\n", + r); break; } } @@ -3459,17 +3503,19 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) static void amdgpu_ip_block_hw_fini(struct amdgpu_ip_block *ip_block) { + struct amdgpu_device *adev = ip_block->adev; int r; if (!ip_block->version->funcs->hw_fini) { - DRM_ERROR("hw_fini of IP block <%s> not defined\n", - ip_block->version->funcs->name); + dev_err(adev->dev, "hw_fini of IP block <%s> not defined\n", + ip_block->version->funcs->name); } else { r = ip_block->version->funcs->hw_fini(ip_block); /* XXX handle errors */ if (r) { - DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", - ip_block->version->funcs->name, r); + dev_dbg(adev->dev, + "hw_fini of IP block <%s> failed %d\n", + ip_block->version->funcs->name, r); } } @@ -3510,15 +3556,16 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) r = adev->ip_blocks[i].version->funcs->early_fini(&adev->ip_blocks[i]); if (r) { - DRM_DEBUG("early_fini of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_dbg(adev->dev, + "early_fini of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); } } amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); - amdgpu_amdkfd_suspend(adev, false); + amdgpu_amdkfd_suspend(adev, true); amdgpu_userq_suspend(adev); /* Workaround for ASICs need to disable SMC first */ @@ -3533,7 +3580,8 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev)) { if (amdgpu_virt_release_full_gpu(adev, false)) - DRM_ERROR("failed to release exclusive mode on fini\n"); + dev_err(adev->dev, + "failed to release exclusive mode on fini\n"); } return 0; @@ -3581,8 +3629,10 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev) r = adev->ip_blocks[i].version->funcs->sw_fini(&adev->ip_blocks[i]); /* XXX handle errors */ if (r) { - DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", - adev->ip_blocks[i].version->funcs->name, r); + dev_dbg(adev->dev, + "sw_fini of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, + r); } } adev->ip_blocks[i].status.sw = false; @@ -3615,7 +3665,7 @@ static void amdgpu_device_delayed_init_work_handler(struct work_struct *work) r = amdgpu_ib_ring_tests(adev); if (r) - DRM_ERROR("ib ring test failed (%d).\n", r); + dev_err(adev->dev, "ib ring test failed (%d).\n", r); } static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work) @@ -3756,8 +3806,9 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state); if (r) { - DRM_ERROR("SMC failed to set mp1 state %d, %d\n", - adev->mp1_state, r); + dev_err(adev->dev, + "SMC failed to set mp1 state %d, %d\n", + adev->mp1_state, r); return r; } } @@ -4041,12 +4092,14 @@ static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev) /** * amdgpu_device_asic_has_dc_support - determine if DC supports the asic * + * @pdev : pci device context * @asic_type: AMD asic type * * Check if there is DC (new modesetting infrastructre) support for an asic. * returns true if DC has support, false if not. */ -bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) +bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev, + enum amd_asic_type asic_type) { switch (asic_type) { #ifdef CONFIG_DRM_AMDGPU_SI @@ -4089,7 +4142,9 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) #else default: if (amdgpu_dc > 0) - DRM_INFO_ONCE("Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n"); + dev_info_once( + &pdev->dev, + "Display Core has been requested via kernel parameter but isn't supported by ASIC, ignoring\n"); return false; #endif } @@ -4108,7 +4163,7 @@ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev) (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)) return false; - return amdgpu_device_asic_has_dc_support(adev->asic_type); + return amdgpu_device_asic_has_dc_support(adev->pdev, adev->asic_type); } static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) @@ -4130,13 +4185,13 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { task_barrier_enter(&hive->tb); - adev->asic_reset_res = amdgpu_device_baco_enter(adev_to_drm(adev)); + adev->asic_reset_res = amdgpu_device_baco_enter(adev); if (adev->asic_reset_res) goto fail; task_barrier_exit(&hive->tb); - adev->asic_reset_res = amdgpu_device_baco_exit(adev_to_drm(adev)); + adev->asic_reset_res = amdgpu_device_baco_exit(adev); if (adev->asic_reset_res) goto fail; @@ -4150,7 +4205,8 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) fail: if (adev->asic_reset_res) - DRM_WARN("ASIC reset failed with error, %d for drm dev, %s", + dev_warn(adev->dev, + "ASIC reset failed with error, %d for drm dev, %s", adev->asic_reset_res, adev_to_drm(adev)->unique); amdgpu_put_xgmi_hive(hive); } @@ -4164,18 +4220,10 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) int ret = 0; /* - * By default timeout for non compute jobs is 10000 - * and 60000 for compute jobs. - * In SR-IOV or passthrough mode, timeout for compute - * jobs are 60000 by default. + * By default timeout for jobs is 10 sec */ - adev->gfx_timeout = msecs_to_jiffies(10000); + adev->compute_timeout = adev->gfx_timeout = msecs_to_jiffies(10000); adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; - if (amdgpu_sriov_vf(adev)) - adev->compute_timeout = amdgpu_sriov_is_pp_one_vf(adev) ? - msecs_to_jiffies(60000) : msecs_to_jiffies(10000); - else - adev->compute_timeout = msecs_to_jiffies(60000); if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { while ((timeout_setting = strsep(&input, ",")) && @@ -4274,7 +4322,7 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev) adev->gfx.mcbp = true; if (adev->gfx.mcbp) - DRM_INFO("MCBP is enabled\n"); + dev_info(adev->dev, "MCBP is enabled\n"); } /** @@ -4290,7 +4338,6 @@ static void amdgpu_device_set_mcbp(struct amdgpu_device *adev) int amdgpu_device_init(struct amdgpu_device *adev, uint32_t flags) { - struct drm_device *ddev = adev_to_drm(adev); struct pci_dev *pdev = adev->pdev; int r, i; bool px = false; @@ -4342,9 +4389,11 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg; adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg; - DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", - amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, - pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); + dev_info( + adev->dev, + "initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", + amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, + pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); /* mutex initialization are all done here so we * can recall function without having locking issues @@ -4461,8 +4510,10 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (!adev->rmmio) return -ENOMEM; - DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); - DRM_INFO("register mmio size: %u\n", (unsigned int)adev->rmmio_size); + dev_info(adev->dev, "register mmio base: 0x%08X\n", + (uint32_t)adev->rmmio_base); + dev_info(adev->dev, "register mmio size: %u\n", + (unsigned int)adev->rmmio_size); /* * Reset domain needs to be present early, before XGMI hive discovered @@ -4599,7 +4650,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = -EINVAL; goto failed; } - DRM_INFO("GPU posting now...\n"); + dev_info(adev->dev, "GPU posting now...\n"); r = amdgpu_device_asic_init(adev); if (r) { dev_err(adev->dev, "gpu post error!\n"); @@ -4709,12 +4760,12 @@ fence_driver_init: r = amdgpu_pm_sysfs_init(adev); if (r) - DRM_ERROR("registering pm sysfs failed (%d).\n", r); + dev_err(adev->dev, "registering pm sysfs failed (%d).\n", r); r = amdgpu_ucode_sysfs_init(adev); if (r) { adev->ucode_sysfs_en = false; - DRM_ERROR("Creating firmware sysfs failed (%d).\n", r); + dev_err(adev->dev, "Creating firmware sysfs failed (%d).\n", r); } else adev->ucode_sysfs_en = true; @@ -4747,7 +4798,7 @@ fence_driver_init: if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA) vga_client_register(adev->pdev, amdgpu_device_vga_set_decode); - px = amdgpu_device_supports_px(ddev); + px = amdgpu_device_supports_px(adev); if (px || (!dev_is_removable(&adev->pdev->dev) && apple_gmux_detect(NULL, NULL))) @@ -4913,7 +4964,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) kfree(adev->xcp_mgr); adev->xcp_mgr = NULL; - px = amdgpu_device_supports_px(adev_to_drm(adev)); + px = amdgpu_device_supports_px(adev); if (px || (!dev_is_removable(&adev->pdev->dev) && apple_gmux_detect(NULL, NULL))) @@ -4962,8 +5013,16 @@ static int amdgpu_device_evict_resources(struct amdgpu_device *adev) return 0; ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM); - if (ret) - DRM_WARN("evicting device resources failed\n"); + if (ret) { + dev_warn(adev->dev, "evicting device resources failed\n"); + return ret; + } + + if (adev->in_s4) { + ret = ttm_device_prepare_hibernation(&adev->mman.bdev); + if (ret) + dev_err(adev->dev, "prepare hibernation failed, %d\n", ret); + } return ret; } @@ -5035,6 +5094,28 @@ int amdgpu_device_prepare(struct drm_device *dev) } /** + * amdgpu_device_complete - complete power state transition + * + * @dev: drm dev pointer + * + * Undo the changes from amdgpu_device_prepare. This will be + * called on all resume transitions, including those that failed. + */ +void amdgpu_device_complete(struct drm_device *dev) +{ + struct amdgpu_device *adev = drm_to_adev(dev); + int i; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_blocks[i].status.valid) + continue; + if (!adev->ip_blocks[i].version->funcs->complete) + continue; + adev->ip_blocks[i].version->funcs->complete(&adev->ip_blocks[i]); + } +} + +/** * amdgpu_device_suspend - initiate device suspend * * @dev: drm dev pointer @@ -5055,14 +5136,16 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients) adev->in_suspend = true; if (amdgpu_sriov_vf(adev)) { + if (!adev->in_s0ix && !adev->in_runpm) + amdgpu_amdkfd_suspend_process(adev); amdgpu_virt_fini_data_exchange(adev); r = amdgpu_virt_request_full_gpu(adev, false); if (r) return r; } - if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3)) - DRM_WARN("smart shift update failed\n"); + if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D3)) + dev_warn(adev->dev, "smart shift update failed\n"); if (notify_clients) drm_client_dev_suspend(adev_to_drm(adev), false); @@ -5074,7 +5157,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients) amdgpu_device_ip_suspend_phase1(adev); if (!adev->in_s0ix) { - amdgpu_amdkfd_suspend(adev, adev->in_runpm); + amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm); amdgpu_userq_suspend(adev); } @@ -5098,6 +5181,32 @@ int amdgpu_device_suspend(struct drm_device *dev, bool notify_clients) return 0; } +static inline int amdgpu_virt_resume(struct amdgpu_device *adev) +{ + int r; + unsigned int prev_physical_node_id = adev->gmc.xgmi.physical_node_id; + + /* During VM resume, QEMU programming of VF MSIX table (register GFXMSIX_VECT0_ADDR_LO) + * may not work. The access could be blocked by nBIF protection as VF isn't in + * exclusive access mode. Exclusive access is enabled now, disable/enable MSIX + * so that QEMU reprograms MSIX table. + */ + amdgpu_restore_msix(adev); + + r = adev->gfxhub.funcs->get_xgmi_info(adev); + if (r) + return r; + + dev_info(adev->dev, "xgmi node, old id %d, new id %d\n", + prev_physical_node_id, adev->gmc.xgmi.physical_node_id); + + adev->vm_manager.vram_base_offset = adev->gfxhub.funcs->get_mc_fb_offset(adev); + adev->vm_manager.vram_base_offset += + adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; + + return 0; +} + /** * amdgpu_device_resume - initiate device resume * @@ -5119,6 +5228,12 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients) return r; } + if (amdgpu_virt_xgmi_migrate_enabled(adev)) { + r = amdgpu_virt_resume(adev); + if (r) + goto exit; + } + if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; @@ -5140,7 +5255,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool notify_clients) } if (!adev->in_s0ix) { - r = amdgpu_amdkfd_resume(adev, adev->in_runpm); + r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm); if (r) goto exit; @@ -5159,6 +5274,9 @@ exit: if (amdgpu_sriov_vf(adev)) { amdgpu_virt_init_data_exchange(adev); amdgpu_virt_release_full_gpu(adev, true); + + if (!adev->in_s0ix && !r && !adev->in_runpm) + r = amdgpu_amdkfd_resume_process(adev); } if (r) @@ -5193,10 +5311,12 @@ exit: dev->dev->power.disable_depth--; #endif } + + amdgpu_vram_mgr_clear_reset_blocks(adev); adev->in_suspend = false; - if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0)) - DRM_WARN("smart shift update failed\n"); + if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DEV_D0)) + dev_warn(adev->dev, "smart shift update failed\n"); return 0; } @@ -5727,7 +5847,9 @@ int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context) amdgpu_coredump(tmp_adev, false, vram_lost, reset_context->job); if (vram_lost) { - DRM_INFO("VRAM is lost due to GPU reset!\n"); + dev_info( + tmp_adev->dev, + "VRAM is lost due to GPU reset!\n"); amdgpu_inc_vram_lost(tmp_adev); } @@ -6006,29 +6128,20 @@ static int amdgpu_device_health_check(struct list_head *device_list_handle) { struct amdgpu_device *tmp_adev; int ret = 0; - u32 status; list_for_each_entry(tmp_adev, device_list_handle, reset_list) { - pci_read_config_dword(tmp_adev->pdev, PCI_COMMAND, &status); - if (PCI_POSSIBLE_ERROR(status)) { - dev_err(tmp_adev->dev, "device lost from bus!"); - ret = -ENODEV; - } + ret |= amdgpu_device_bus_status_check(tmp_adev); } return ret; } -static int amdgpu_device_halt_activities(struct amdgpu_device *adev, - struct amdgpu_job *job, - struct amdgpu_reset_context *reset_context, - struct list_head *device_list, - struct amdgpu_hive_info *hive, - bool need_emergency_restart) +static int amdgpu_device_recovery_prepare(struct amdgpu_device *adev, + struct list_head *device_list, + struct amdgpu_hive_info *hive) { - struct list_head *device_list_handle = NULL; struct amdgpu_device *tmp_adev = NULL; - int i, r = 0; + int r; /* * Build list of devices to reset. @@ -6045,26 +6158,55 @@ static int amdgpu_device_halt_activities(struct amdgpu_device *adev, } if (!list_is_first(&adev->reset_list, device_list)) list_rotate_to_front(&adev->reset_list, device_list); - device_list_handle = device_list; } else { list_add_tail(&adev->reset_list, device_list); - device_list_handle = device_list; } if (!amdgpu_sriov_vf(adev) && (!adev->pcie_reset_ctx.occurs_dpc)) { - r = amdgpu_device_health_check(device_list_handle); + r = amdgpu_device_health_check(device_list); if (r) return r; } - /* We need to lock reset domain only once both for XGMI and single device */ - tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, - reset_list); + return 0; +} + +static void amdgpu_device_recovery_get_reset_lock(struct amdgpu_device *adev, + struct list_head *device_list) +{ + struct amdgpu_device *tmp_adev = NULL; + + if (list_empty(device_list)) + return; + tmp_adev = + list_first_entry(device_list, struct amdgpu_device, reset_list); amdgpu_device_lock_reset_domain(tmp_adev->reset_domain); +} - /* block all schedulers and reset given job's ring */ - list_for_each_entry(tmp_adev, device_list_handle, reset_list) { +static void amdgpu_device_recovery_put_reset_lock(struct amdgpu_device *adev, + struct list_head *device_list) +{ + struct amdgpu_device *tmp_adev = NULL; + if (list_empty(device_list)) + return; + tmp_adev = + list_first_entry(device_list, struct amdgpu_device, reset_list); + amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain); +} + +static void amdgpu_device_halt_activities(struct amdgpu_device *adev, + struct amdgpu_job *job, + struct amdgpu_reset_context *reset_context, + struct list_head *device_list, + struct amdgpu_hive_info *hive, + bool need_emergency_restart) +{ + struct amdgpu_device *tmp_adev = NULL; + int i; + + /* block all schedulers and reset given job's ring */ + list_for_each_entry(tmp_adev, device_list, reset_list) { amdgpu_device_set_mp1_state(tmp_adev); /* @@ -6113,8 +6255,6 @@ static int amdgpu_device_halt_activities(struct amdgpu_device *adev, } atomic_inc(&tmp_adev->gpu_reset_counter); } - - return r; } static int amdgpu_device_asic_reset(struct amdgpu_device *adev, @@ -6219,8 +6359,10 @@ static int amdgpu_device_sched_resume(struct list_head *device_list, amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); } else { dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter)); - if (amdgpu_acpi_smart_shift_update(adev_to_drm(tmp_adev), AMDGPU_SS_DEV_D0)) - DRM_WARN("smart shift update failed\n"); + if (amdgpu_acpi_smart_shift_update(tmp_adev, + AMDGPU_SS_DEV_D0)) + dev_warn(tmp_adev->dev, + "smart shift update failed\n"); } } @@ -6252,11 +6394,6 @@ static void amdgpu_device_gpu_resume(struct amdgpu_device *adev, amdgpu_ras_set_error_query_ready(tmp_adev, true); } - - tmp_adev = list_first_entry(device_list, struct amdgpu_device, - reset_list); - amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain); - } @@ -6306,7 +6443,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, */ if (need_emergency_restart && amdgpu_ras_get_context(adev) && amdgpu_ras_get_context(adev)->reboot) { - DRM_WARN("Emergency reboot."); + dev_warn(adev->dev, "Emergency reboot."); ksys_sync_helper(); emergency_restart(); @@ -6324,11 +6461,14 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, reset_context->hive = hive; INIT_LIST_HEAD(&device_list); - r = amdgpu_device_halt_activities(adev, job, reset_context, &device_list, - hive, need_emergency_restart); - if (r) + if (amdgpu_device_recovery_prepare(adev, &device_list, hive)) goto end_reset; + /* We need to lock reset domain only once both for XGMI and single device */ + amdgpu_device_recovery_get_reset_lock(adev, &device_list); + + amdgpu_device_halt_activities(adev, job, reset_context, &device_list, + hive, need_emergency_restart); if (need_emergency_restart) goto skip_sched_resume; /* @@ -6337,7 +6477,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, * * job->base holds a reference to parent fence */ - if (job && dma_fence_is_signaled(&job->hw_fence)) { + if (job && dma_fence_is_signaled(&job->hw_fence.base)) { job_signaled = true; dev_info(adev->dev, "Guilty job already signaled, skipping HW reset"); goto skip_hw_reset; @@ -6345,13 +6485,15 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, r = amdgpu_device_asic_reset(adev, &device_list, reset_context); if (r) - goto end_reset; + goto reset_unlock; skip_hw_reset: r = amdgpu_device_sched_resume(&device_list, reset_context, job_signaled); if (r) - goto end_reset; + goto reset_unlock; skip_sched_resume: amdgpu_device_gpu_resume(adev, &device_list, need_emergency_restart); +reset_unlock: + amdgpu_device_recovery_put_reset_lock(adev, &device_list); end_reset: if (hive) { mutex_unlock(&hive->hive_lock); @@ -6692,12 +6834,11 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, #endif } -int amdgpu_device_baco_enter(struct drm_device *dev) +int amdgpu_device_baco_enter(struct amdgpu_device *adev) { - struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); - if (!amdgpu_device_supports_baco(dev)) + if (!amdgpu_device_supports_baco(adev)) return -ENOTSUPP; if (ras && adev->ras_enabled && @@ -6707,13 +6848,12 @@ int amdgpu_device_baco_enter(struct drm_device *dev) return amdgpu_dpm_baco_enter(adev); } -int amdgpu_device_baco_exit(struct drm_device *dev) +int amdgpu_device_baco_exit(struct amdgpu_device *adev) { - struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); int ret = 0; - if (!amdgpu_device_supports_baco(dev)) + if (!amdgpu_device_supports_baco(adev)) return -ENOTSUPP; ret = amdgpu_dpm_baco_exit(adev); @@ -6747,7 +6887,6 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); struct amdgpu_reset_context reset_context; struct list_head device_list; - int r = 0; dev_info(adev->dev, "PCI error: detected callback!!\n"); @@ -6772,14 +6911,14 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta memset(&reset_context, 0, sizeof(reset_context)); INIT_LIST_HEAD(&device_list); - r = amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list, - hive, false); + amdgpu_device_recovery_prepare(adev, &device_list, hive); + amdgpu_device_recovery_get_reset_lock(adev, &device_list); + amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list, + hive, false); if (hive) { mutex_unlock(&hive->hive_lock); amdgpu_put_xgmi_hive(hive); } - if (r) - return PCI_ERS_RESULT_DISCONNECT; return PCI_ERS_RESULT_NEED_RESET; case pci_channel_io_perm_failure: /* Permanent error, prepare for device removal */ @@ -6889,8 +7028,8 @@ out: if (hive) { list_for_each_entry(tmp_adev, &device_list, reset_list) amdgpu_device_unset_mp1_state(tmp_adev); - amdgpu_device_unlock_reset_domain(adev->reset_domain); } + amdgpu_device_recovery_put_reset_lock(adev, &device_list); } if (hive) { @@ -6936,6 +7075,7 @@ void amdgpu_pci_resume(struct pci_dev *pdev) amdgpu_device_sched_resume(&device_list, NULL, NULL); amdgpu_device_gpu_resume(adev, &device_list, false); + amdgpu_device_recovery_put_reset_lock(adev, &device_list); adev->pcie_reset_ctx.occurs_dpc = false; if (hive) { @@ -6960,11 +7100,11 @@ bool amdgpu_device_cache_pci_state(struct pci_dev *pdev) adev->pci_state = pci_store_saved_state(pdev); if (!adev->pci_state) { - DRM_ERROR("Failed to store PCI saved state"); + dev_err(adev->dev, "Failed to store PCI saved state"); return false; } } else { - DRM_WARN("Failed to save PCI state, err:%d\n", r); + dev_warn(adev->dev, "Failed to save PCI state, err:%d\n", r); return false; } @@ -6985,7 +7125,7 @@ bool amdgpu_device_load_pci_state(struct pci_dev *pdev) if (!r) { pci_restore_state(pdev); } else { - DRM_WARN("Failed to load PCI state, err:%d\n", r); + dev_warn(adev->dev, "Failed to load PCI state, err:%d\n", r); return false; } @@ -7231,7 +7371,7 @@ struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev, dep = amdgpu_sync_peek_fence(&isolation->prev, ring); r = amdgpu_sync_fence(&isolation->active, &f->finished, GFP_NOWAIT); if (r) - DRM_WARN("OOM tracking isolation\n"); + dev_warn(adev->dev, "OOM tracking isolation\n"); out_grab_ref: dma_fence_get(dep); @@ -7299,9 +7439,11 @@ uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev, tmp_ = RREG32(reg_addr); loop--; if (!loop) { - DRM_WARN("Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn", - inst, reg_name, (uint32_t)expected_value, - (uint32_t)(tmp_ & (mask))); + dev_warn( + adev->dev, + "Register(%d) [%s] failed to reach value 0x%08x != 0x%08xn", + inst, reg_name, (uint32_t)expected_value, + (uint32_t)(tmp_ & (mask))); ret = -ETIMEDOUT; break; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index a0e9bf9b2710..efe0058b48ca 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -276,7 +276,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, u32 msg; if (!amdgpu_sriov_vf(adev)) { - /* It can take up to a second for IFWI init to complete on some dGPUs, + /* It can take up to two second for IFWI init to complete on some dGPUs, * but generally it should be in the 60-100ms range. Normally this starts * as soon as the device gets power so by the time the OS loads this has long * completed. However, when a card is hotplugged via e.g., USB4, we need to @@ -284,7 +284,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev, * continue. */ - for (i = 0; i < 1000; i++) { + for (i = 0; i < 2000; i++) { msg = RREG32(mmMP0_SMN_C2PMSG_33); if (msg & 0x80000000) break; @@ -321,10 +321,12 @@ static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, const struct firmware *fw; int r; - r = request_firmware(&fw, fw_name, adev->dev); + r = firmware_request_nowarn(&fw, fw_name, adev->dev); if (r) { - dev_err(adev->dev, "can't load firmware \"%s\"\n", - fw_name); + if (amdgpu_discovery == 2) + dev_err(adev->dev, "can't load firmware \"%s\"\n", fw_name); + else + drm_info(&adev->ddev, "Optional firmware \"%s\" was not found\n", fw_name); return r; } @@ -459,16 +461,12 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev) /* Read from file if it is the preferred option */ fw_name = amdgpu_discovery_get_fw_name(adev); if (fw_name != NULL) { - dev_info(adev->dev, "use ip discovery information from file"); + drm_dbg(&adev->ddev, "use ip discovery information from file"); r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin, fw_name); - - if (r) { - dev_err(adev->dev, "failed to read ip discovery binary from file\n"); - r = -EINVAL; + if (r) goto out; - } - } else { + drm_dbg(&adev->ddev, "use ip discovery information from memory"); r = amdgpu_discovery_read_binary_from_mem( adev, adev->mman.discovery_bin); if (r) @@ -1338,10 +1336,8 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) int r; r = amdgpu_discovery_init(adev); - if (r) { - DRM_ERROR("amdgpu_discovery_init failed\n"); + if (r) return r; - } wafl_ver = 0; adev->gfx.xcc_mask = 0; @@ -2559,38 +2555,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_RAVEN: - case CHIP_VEGA20: - case CHIP_ARCTURUS: - case CHIP_ALDEBARAN: - /* this is not fatal. We have a fallback below - * if the new firmwares are not present. some of - * this will be overridden below to keep things - * consistent with the current behavior. + /* This is not fatal. We only need the discovery + * binary for sysfs. We don't need it for a + * functional system. */ - r = amdgpu_discovery_reg_base_init(adev); - if (!r) { - amdgpu_discovery_harvest_ip(adev); - amdgpu_discovery_get_gfx_info(adev); - amdgpu_discovery_get_mall_info(adev); - amdgpu_discovery_get_vcn_info(adev); - } - break; - default: - r = amdgpu_discovery_reg_base_init(adev); - if (r) - return -EINVAL; - - amdgpu_discovery_harvest_ip(adev); - amdgpu_discovery_get_gfx_info(adev); - amdgpu_discovery_get_mall_info(adev); - amdgpu_discovery_get_vcn_info(adev); - break; - } - - switch (adev->asic_type) { - case CHIP_VEGA10: + amdgpu_discovery_init(adev); vega10_reg_base_init(adev); adev->sdma.num_instances = 2; adev->gmc.num_umc = 4; @@ -2613,6 +2582,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0); break; case CHIP_VEGA12: + /* This is not fatal. We only need the discovery + * binary for sysfs. We don't need it for a + * functional system. + */ + amdgpu_discovery_init(adev); vega10_reg_base_init(adev); adev->sdma.num_instances = 2; adev->gmc.num_umc = 4; @@ -2635,6 +2609,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1); break; case CHIP_RAVEN: + /* This is not fatal. We only need the discovery + * binary for sysfs. We don't need it for a + * functional system. + */ + amdgpu_discovery_init(adev); vega10_reg_base_init(adev); adev->sdma.num_instances = 1; adev->vcn.num_vcn_inst = 1; @@ -2676,6 +2655,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) } break; case CHIP_VEGA20: + /* This is not fatal. We only need the discovery + * binary for sysfs. We don't need it for a + * functional system. + */ + amdgpu_discovery_init(adev); vega20_reg_base_init(adev); adev->sdma.num_instances = 2; adev->gmc.num_umc = 8; @@ -2699,6 +2683,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0); break; case CHIP_ARCTURUS: + /* This is not fatal. We only need the discovery + * binary for sysfs. We don't need it for a + * functional system. + */ + amdgpu_discovery_init(adev); arct_reg_base_init(adev); adev->sdma.num_instances = 8; adev->vcn.num_vcn_inst = 2; @@ -2727,6 +2716,11 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0); break; case CHIP_ALDEBARAN: + /* This is not fatal. We only need the discovery + * binary for sysfs. We don't need it for a + * functional system. + */ + amdgpu_discovery_init(adev); aldebaran_reg_base_init(adev); adev->sdma.num_instances = 5; adev->vcn.num_vcn_inst = 2; @@ -2753,6 +2747,16 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0); break; default: + r = amdgpu_discovery_reg_base_init(adev); + if (r) { + drm_err(&adev->ddev, "discovery failed: %d\n", r); + return r; + } + + amdgpu_discovery_harvest_ip(adev); + amdgpu_discovery_get_gfx_info(adev); + amdgpu_discovery_get_mall_info(adev); + amdgpu_discovery_get_vcn_info(adev); break; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 35c778426a7c..51bab32fd8c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -1196,13 +1196,14 @@ static int amdgpu_display_get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb static int amdgpu_display_gem_fb_verify_and_init(struct drm_device *dev, struct amdgpu_framebuffer *rfb, struct drm_file *file_priv, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { int ret; rfb->base.obj[0] = obj; - drm_helper_mode_fill_fb_struct(dev, &rfb->base, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, &rfb->base, info, mode_cmd); /* Verify that the modifier is supported. */ if (!drm_any_plane_has_format(dev, mode_cmd->pixel_format, mode_cmd->modifier[0])) { @@ -1297,6 +1298,7 @@ static int amdgpu_display_framebuffer_init(struct drm_device *dev, struct drm_framebuffer * amdgpu_display_user_framebuffer_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { struct amdgpu_framebuffer *amdgpu_fb; @@ -1317,7 +1319,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */ bo = gem_to_amdgpu_bo(obj); domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags); - if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) { + if (drm_gem_is_imported(obj) && !(domains & AMDGPU_GEM_DOMAIN_GTT)) { drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n"); drm_gem_object_put(obj); return ERR_PTR(-EINVAL); @@ -1330,7 +1332,7 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, } ret = amdgpu_display_gem_fb_verify_and_init(dev, amdgpu_fb, file_priv, - mode_cmd, obj); + info, mode_cmd, obj); if (ret) { kfree(amdgpu_fb); drm_gem_object_put(obj); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h index dfa0d642ac16..930c171473b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.h @@ -44,6 +44,7 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev, struct drm_framebuffer * amdgpu_display_user_framebuffer_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd); const struct drm_format_info * amdgpu_lookup_format_info(u32 format, uint64_t modifier); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 44e120f9f764..ce27cb5bb05e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -285,6 +285,36 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf, return ret; } +static int amdgpu_dma_buf_vmap(struct dma_buf *dma_buf, struct iosys_map *map) +{ + struct drm_gem_object *obj = dma_buf->priv; + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + int ret; + + /* + * Pin to keep buffer in place while it's vmap'ed. The actual + * domain is not that important as long as it's mapable. Using + * GTT and VRAM should be compatible with most use cases. + */ + ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM); + if (ret) + return ret; + ret = drm_gem_dmabuf_vmap(dma_buf, map); + if (ret) + amdgpu_bo_unpin(bo); + + return ret; +} + +static void amdgpu_dma_buf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map) +{ + struct drm_gem_object *obj = dma_buf->priv; + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + + drm_gem_dmabuf_vunmap(dma_buf, map); + amdgpu_bo_unpin(bo); +} + const struct dma_buf_ops amdgpu_dmabuf_ops = { .attach = amdgpu_dma_buf_attach, .pin = amdgpu_dma_buf_pin, @@ -294,8 +324,8 @@ const struct dma_buf_ops amdgpu_dmabuf_ops = { .release = drm_gem_dmabuf_release, .begin_cpu_access = amdgpu_dma_buf_begin_cpu_access, .mmap = drm_gem_dmabuf_mmap, - .vmap = drm_gem_dmabuf_vmap, - .vunmap = drm_gem_dmabuf_vunmap, + .vmap = amdgpu_dma_buf_vmap, + .vunmap = amdgpu_dma_buf_vunmap, }; /** @@ -513,7 +543,7 @@ bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev, if (!adev) return false; - if (obj->import_attach) { + if (drm_gem_is_imported(obj)) { struct dma_buf *dma_buf = obj->import_attach->dmabuf; if (dma_buf->ops != &amdgpu_dmabuf_ops) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c index 3f3662e8b871..3040437d99c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_doorbell_mgr.c @@ -41,7 +41,8 @@ u32 amdgpu_mm_rdoorbell(struct amdgpu_device *adev, u32 index) if (index < adev->doorbell.num_kernel_doorbells) return readl(adev->doorbell.cpu_addr + index); - DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); + dev_err(adev->dev, "reading beyond doorbell aperture: 0x%08x!\n", + index); return 0; } @@ -63,7 +64,8 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v) if (index < adev->doorbell.num_kernel_doorbells) writel(v, adev->doorbell.cpu_addr + index); else - DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); + dev_err(adev->dev, + "writing beyond doorbell aperture: 0x%08x!\n", index); } /** @@ -83,7 +85,8 @@ u64 amdgpu_mm_rdoorbell64(struct amdgpu_device *adev, u32 index) if (index < adev->doorbell.num_kernel_doorbells) return atomic64_read((atomic64_t *)(adev->doorbell.cpu_addr + index)); - DRM_ERROR("reading beyond doorbell aperture: 0x%08x!\n", index); + dev_err(adev->dev, "reading beyond doorbell aperture: 0x%08x!\n", + index); return 0; } @@ -105,7 +108,8 @@ void amdgpu_mm_wdoorbell64(struct amdgpu_device *adev, u32 index, u64 v) if (index < adev->doorbell.num_kernel_doorbells) atomic64_set((atomic64_t *)(adev->doorbell.cpu_addr + index), v); else - DRM_ERROR("writing beyond doorbell aperture: 0x%08x!\n", index); + dev_err(adev->dev, + "writing beyond doorbell aperture: 0x%08x!\n", index); } /** @@ -166,7 +170,8 @@ int amdgpu_doorbell_create_kernel_doorbells(struct amdgpu_device *adev) NULL, (void **)&adev->doorbell.cpu_addr); if (r) { - DRM_ERROR("Failed to allocate kernel doorbells, err=%d\n", r); + dev_err(adev->dev, + "Failed to allocate kernel doorbells, err=%d\n", r); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 4db92e0a60da..395c6be901ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -144,6 +144,7 @@ enum AMDGPU_DEBUG_MASK { AMDGPU_DEBUG_DISABLE_GPU_RING_RESET = BIT(6), AMDGPU_DEBUG_SMU_POOL = BIT(7), AMDGPU_DEBUG_VM_USERPTR = BIT(8), + AMDGPU_DEBUG_DISABLE_RAS_CE_LOG = BIT(9) }; unsigned int amdgpu_vram_limit = UINT_MAX; @@ -361,12 +362,12 @@ module_param_named(svm_default_granularity, amdgpu_svm_default_granularity, uint * The second one is for Compute. The third and fourth ones are * for SDMA and Video. * - * By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video) - * jobs is 10000. The timeout for compute is 60000. + * By default(with no lockup_timeout settings), the timeout for all jobs is 10000. */ -MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and 60000 for compute jobs; " - "for passthrough or sriov, 10000 for all jobs. 0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; " - "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video]."); +MODULE_PARM_DESC(lockup_timeout, + "GPU lockup timeout in ms (default: 10000 for all jobs. " + "0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; " + "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video]."); module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444); /** @@ -2278,6 +2279,11 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev) pr_info("debug: VM mode debug for userptr is enabled\n"); adev->debug_vm_userptr = true; } + + if (amdgpu_debug_mask & AMDGPU_DEBUG_DISABLE_RAS_CE_LOG) { + pr_info("debug: disable kernel logs of correctable errors\n"); + adev->debug_disable_ce_logs = true; + } } static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags) @@ -2321,7 +2327,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, amdgpu_aspm = 0; if (amdgpu_virtual_display || - amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK)) + amdgpu_device_asic_has_dc_support(pdev, flags & AMD_ASIC_MASK)) supports_atomic = true; if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) { @@ -2451,10 +2457,10 @@ retry_init: if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) { /* only need to skip on ATPX */ - if (amdgpu_device_supports_px(ddev)) + if (amdgpu_device_supports_px(adev)) dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); /* we want direct complete for BOCO */ - if (amdgpu_device_supports_boco(ddev)) + if (amdgpu_device_supports_boco(adev)) dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_SMART_PREPARE | DPM_FLAG_SMART_SUSPEND | DPM_FLAG_MAY_SKIP_RESUME); @@ -2487,9 +2493,9 @@ retry_init: * into D0 state. Then there will be a PMFW-aware D-state * transition(D0->D3) on runpm suspend. */ - if (amdgpu_device_supports_baco(ddev) && + if (amdgpu_device_supports_baco(adev) && !(adev->flags & AMD_IS_APU) && - (adev->asic_type >= CHIP_NAVI10)) + adev->asic_type >= CHIP_NAVI10) amdgpu_get_secondary_funcs(adev); } @@ -2506,6 +2512,7 @@ amdgpu_pci_remove(struct pci_dev *pdev) struct drm_device *dev = pci_get_drvdata(pdev); struct amdgpu_device *adev = drm_to_adev(dev); + amdgpu_ras_eeprom_check_and_recover(adev); amdgpu_xcp_dev_unplug(adev); amdgpu_gmc_prepare_nps_mode_change(adev); drm_dev_unplug(dev); @@ -2535,6 +2542,10 @@ amdgpu_pci_shutdown(struct pci_dev *pdev) if (amdgpu_ras_intr_triggered()) return; + /* device maybe not resumed here, return immediately in this case */ + if (adev->in_s4 && adev->in_suspend) + return; + /* if we are running in a VM, make sure the device * torn down properly on reboot/shutdown. * unfortunately we can't detect certain @@ -2551,11 +2562,14 @@ static int amdgpu_pmops_prepare(struct device *dev) struct drm_device *drm_dev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(drm_dev); + /* device maybe not resumed here, return immediately in this case */ + if (adev->in_s4 && adev->in_suspend) + return 0; + /* Return a positive number here so * DPM_FLAG_SMART_SUSPEND works properly */ - if (amdgpu_device_supports_boco(drm_dev) && - pm_runtime_suspended(dev)) + if (amdgpu_device_supports_boco(adev) && pm_runtime_suspended(dev)) return 1; /* if we will not support s3 or s2i for the device @@ -2570,7 +2584,7 @@ static int amdgpu_pmops_prepare(struct device *dev) static void amdgpu_pmops_complete(struct device *dev) { - /* nothing to do */ + amdgpu_device_complete(dev_get_drvdata(dev)); } static int amdgpu_pmops_suspend(struct device *dev) @@ -2650,12 +2664,21 @@ static int amdgpu_pmops_thaw(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + /* do not resume device if it's normal hibernation */ + if (!pm_hibernate_is_recovering()) + return 0; + return amdgpu_device_resume(drm_dev, true); } static int amdgpu_pmops_poweroff(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_to_adev(drm_dev); + + /* device maybe not resumed here, return immediately in this case */ + if (adev->in_s4 && adev->in_suspend) + return 0; return amdgpu_device_suspend(drm_dev, true); } @@ -2828,7 +2851,7 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) /* nothing to do */ } else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) { - amdgpu_device_baco_enter(drm_dev); + amdgpu_device_baco_enter(adev); } dev_dbg(&pdev->dev, "asic/device is runtime suspended\n"); @@ -2869,7 +2892,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) pci_set_master(pdev); } else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) || (adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) { - amdgpu_device_baco_exit(drm_dev); + amdgpu_device_baco_exit(adev); } ret = amdgpu_device_resume(drm_dev, false); if (ret) { @@ -3107,10 +3130,6 @@ static int __init amdgpu_init(void) if (r) goto error_sync; - r = amdgpu_fence_slab_init(); - if (r) - goto error_fence; - r = amdgpu_userq_fence_slab_init(); if (r) goto error_fence; @@ -3145,7 +3164,6 @@ static void __exit amdgpu_exit(void) amdgpu_unregister_atpx_handler(); amdgpu_acpi_release(); amdgpu_sync_fini(); - amdgpu_fence_slab_fini(); amdgpu_userq_fence_slab_fini(); mmu_notifier_synchronize(); amdgpu_xcp_drv_release(); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 8cecf25996ed..9e7506965cab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -42,37 +42,6 @@ #include "amdgpu_reset.h" /* - * Fences mark an event in the GPUs pipeline and are used - * for GPU/CPU synchronization. When the fence is written, - * it is expected that all buffers associated with that fence - * are no longer in use by the associated ring on the GPU and - * that the relevant GPU caches have been flushed. - */ - -struct amdgpu_fence { - struct dma_fence base; - - /* RB, DMA, etc. */ - struct amdgpu_ring *ring; - ktime_t start_timestamp; -}; - -static struct kmem_cache *amdgpu_fence_slab; - -int amdgpu_fence_slab_init(void) -{ - amdgpu_fence_slab = KMEM_CACHE(amdgpu_fence, SLAB_HWCACHE_ALIGN); - if (!amdgpu_fence_slab) - return -ENOMEM; - return 0; -} - -void amdgpu_fence_slab_fini(void) -{ - rcu_barrier(); - kmem_cache_destroy(amdgpu_fence_slab); -} -/* * Cast helper */ static const struct dma_fence_ops amdgpu_fence_ops; @@ -130,14 +99,14 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring) * * @ring: ring the fence is associated with * @f: resulting fence object - * @job: job the fence is embedded in + * @af: amdgpu fence input * @flags: flags to pass into the subordinate .emit_fence() call * * Emits a fence command on the requested ring (all asics). * Returns 0 on success, -ENOMEM on failure. */ -int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job, - unsigned int flags) +int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, + struct amdgpu_fence *af, unsigned int flags) { struct amdgpu_device *adev = ring->adev; struct dma_fence *fence; @@ -146,40 +115,35 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amd uint32_t seq; int r; - if (job == NULL) { - /* create a sperate hw fence */ - am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC); - if (am_fence == NULL) + if (!af) { + /* create a separate hw fence */ + am_fence = kzalloc(sizeof(*am_fence), GFP_KERNEL); + if (!am_fence) return -ENOMEM; - fence = &am_fence->base; - am_fence->ring = ring; + am_fence->context = 0; } else { - /* take use of job-embedded fence */ - fence = &job->hw_fence; + am_fence = af; } + fence = &am_fence->base; + am_fence->ring = ring; seq = ++ring->fence_drv.sync_seq; - if (job && job->job_run_counter) { - /* reinit seq for resubmitted jobs */ - fence->seqno = seq; - /* TO be inline with external fence creation and other drivers */ + am_fence->seq = seq; + if (af) { + dma_fence_init(fence, &amdgpu_job_fence_ops, + &ring->fence_drv.lock, + adev->fence_context + ring->idx, seq); + /* Against remove in amdgpu_job_{free, free_cb} */ dma_fence_get(fence); } else { - if (job) { - dma_fence_init(fence, &amdgpu_job_fence_ops, - &ring->fence_drv.lock, - adev->fence_context + ring->idx, seq); - /* Against remove in amdgpu_job_{free, free_cb} */ - dma_fence_get(fence); - } else { - dma_fence_init(fence, &amdgpu_fence_ops, - &ring->fence_drv.lock, - adev->fence_context + ring->idx, seq); - } + dma_fence_init(fence, &amdgpu_fence_ops, + &ring->fence_drv.lock, + adev->fence_context + ring->idx, seq); } amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, seq, flags | AMDGPU_FENCE_FLAG_INT); + amdgpu_fence_save_wptr(fence); pm_runtime_get_noresume(adev_to_drm(adev)->dev); ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; if (unlikely(rcu_dereference_protected(*ptr, 1))) { @@ -292,6 +256,7 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring) do { struct dma_fence *fence, **ptr; + struct amdgpu_fence *am_fence; ++last_seq; last_seq &= drv->num_fences_mask; @@ -304,6 +269,12 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring) if (!fence) continue; + /* Save the wptr in the fence driver so we know what the last processed + * wptr was. This is required for re-emitting the ring state for + * queues that are reset but are not guilty and thus have no guilty fence. + */ + am_fence = container_of(fence, struct amdgpu_fence, base); + drv->signalled_wptr = am_fence->wptr; dma_fence_signal(fence); dma_fence_put(fence); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); @@ -326,7 +297,9 @@ static void amdgpu_fence_fallback(struct timer_list *t) fence_drv.fallback_timer); if (amdgpu_fence_process(ring)) - DRM_WARN("Fence fallback timer expired on ring %s\n", ring->name); + dev_warn(ring->adev->dev, + "Fence fallback timer expired on ring %s\n", + ring->name); } /** @@ -718,7 +691,7 @@ void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring) * it right here or we won't be able to track them in fence_drv * and they will remain unsignaled during sa_bo free. */ - job = container_of(old, struct amdgpu_job, hw_fence); + job = container_of(old, struct amdgpu_job, hw_fence.base); if (!job->base.s_fence && !dma_fence_is_signaled(old)) dma_fence_signal(old); RCU_INIT_POINTER(*ptr, NULL); @@ -764,6 +737,86 @@ void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring) amdgpu_fence_process(ring); } + +/** + * Kernel queue reset handling + * + * The driver can reset individual queues for most engines, but those queues + * may contain work from multiple contexts. Resetting the queue will reset + * lose all of that state. In order to minimize the collateral damage, the + * driver will save the ring contents which are not associated with the guilty + * context prior to resetting the queue. After resetting the queue the queue + * contents from the other contexts is re-emitted to the rings so that it can + * be processed by the engine. To handle this, we save the queue's write + * pointer (wptr) in the fences associated with each context. If we get a + * queue timeout, we can then use the wptrs from the fences to determine + * which data needs to be saved out of the queue's ring buffer. + */ + +/** + * amdgpu_fence_driver_guilty_force_completion - force signal of specified sequence + * + * @fence: fence of the ring to signal + * + */ +void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence) +{ + dma_fence_set_error(&fence->base, -ETIME); + amdgpu_fence_write(fence->ring, fence->seq); + amdgpu_fence_process(fence->ring); +} + +void amdgpu_fence_save_wptr(struct dma_fence *fence) +{ + struct amdgpu_fence *am_fence = container_of(fence, struct amdgpu_fence, base); + + am_fence->wptr = am_fence->ring->wptr; +} + +static void amdgpu_ring_backup_unprocessed_command(struct amdgpu_ring *ring, + u64 start_wptr, u32 end_wptr) +{ + unsigned int first_idx = start_wptr & ring->buf_mask; + unsigned int last_idx = end_wptr & ring->buf_mask; + unsigned int i; + + /* Backup the contents of the ring buffer. */ + for (i = first_idx; i != last_idx; ++i, i &= ring->buf_mask) + ring->ring_backup[ring->ring_backup_entries_to_copy++] = ring->ring[i]; +} + +void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring, + struct amdgpu_fence *guilty_fence) +{ + struct dma_fence *unprocessed; + struct dma_fence __rcu **ptr; + struct amdgpu_fence *fence; + u64 wptr, i, seqno; + + seqno = amdgpu_fence_read(ring); + wptr = ring->fence_drv.signalled_wptr; + ring->ring_backup_entries_to_copy = 0; + + for (i = seqno + 1; i <= ring->fence_drv.sync_seq; ++i) { + ptr = &ring->fence_drv.fences[i & ring->fence_drv.num_fences_mask]; + rcu_read_lock(); + unprocessed = rcu_dereference(*ptr); + + if (unprocessed && !dma_fence_is_signaled(unprocessed)) { + fence = container_of(unprocessed, struct amdgpu_fence, base); + + /* save everything if the ring is not guilty, otherwise + * just save the content from other contexts. + */ + if (!guilty_fence || (fence->context != guilty_fence->context)) + amdgpu_ring_backup_unprocessed_command(ring, wptr, + fence->wptr); + wptr = fence->wptr; + } + rcu_read_unlock(); + } +} + /* * Common fence implementation */ @@ -780,7 +833,7 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f) static const char *amdgpu_job_fence_get_timeline_name(struct dma_fence *f) { - struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence); + struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base); return (const char *)to_amdgpu_ring(job->base.sched)->name; } @@ -810,7 +863,7 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f) */ static bool amdgpu_job_fence_enable_signaling(struct dma_fence *f) { - struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence); + struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence.base); if (!timer_pending(&to_amdgpu_ring(job->base.sched)->fence_drv.fallback_timer)) amdgpu_fence_schedule_fallback(to_amdgpu_ring(job->base.sched)); @@ -830,7 +883,7 @@ static void amdgpu_fence_free(struct rcu_head *rcu) struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); /* free fence_slab if it's separated fence*/ - kmem_cache_free(amdgpu_fence_slab, to_amdgpu_fence(f)); + kfree(to_amdgpu_fence(f)); } /** @@ -845,7 +898,7 @@ static void amdgpu_job_fence_free(struct rcu_head *rcu) struct dma_fence *f = container_of(rcu, struct dma_fence, rcu); /* free job if fence has a parent job */ - kfree(container_of(f, struct amdgpu_job, hw_fence)); + kfree(container_of(f, struct amdgpu_job, hw_fence.base)); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c index 1ae88c459da5..b0082aa7f3c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c @@ -144,7 +144,8 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) /* If algo exists, it means that the i2c_adapter's initialized */ if (!adev->pm.fru_eeprom_i2c_bus || !adev->pm.fru_eeprom_i2c_bus->algo) { - DRM_WARN("Cannot access FRU, EEPROM accessor not initialized"); + dev_warn(adev->dev, + "Cannot access FRU, EEPROM accessor not initialized"); return -ENODEV; } @@ -152,19 +153,22 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, fru_addr, buf, sizeof(buf)); if (len != 8) { - DRM_ERROR("Couldn't read the IPMI Common Header: %d", len); + dev_err(adev->dev, "Couldn't read the IPMI Common Header: %d", + len); return len < 0 ? len : -EIO; } if (buf[0] != 1) { - DRM_ERROR("Bad IPMI Common Header version: 0x%02x", buf[0]); + dev_err(adev->dev, "Bad IPMI Common Header version: 0x%02x", + buf[0]); return -EIO; } for (csum = 0; len > 0; len--) csum += buf[len - 1]; if (csum) { - DRM_ERROR("Bad IPMI Common Header checksum: 0x%02x", csum); + dev_err(adev->dev, "Bad IPMI Common Header checksum: 0x%02x", + csum); return -EIO; } @@ -179,12 +183,14 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) /* Read the header of the PIA. */ len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addr, buf, 3); if (len != 3) { - DRM_ERROR("Couldn't read the Product Info Area header: %d", len); + dev_err(adev->dev, + "Couldn't read the Product Info Area header: %d", len); return len < 0 ? len : -EIO; } if (buf[0] != 1) { - DRM_ERROR("Bad IPMI Product Info Area version: 0x%02x", buf[0]); + dev_err(adev->dev, "Bad IPMI Product Info Area version: 0x%02x", + buf[0]); return -EIO; } @@ -197,14 +203,16 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) len = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addr, pia, size); if (len != size) { kfree(pia); - DRM_ERROR("Couldn't read the Product Info Area: %d", len); + dev_err(adev->dev, "Couldn't read the Product Info Area: %d", + len); return len < 0 ? len : -EIO; } for (csum = 0; size > 0; size--) csum += pia[size - 1]; if (csum) { - DRM_ERROR("Bad Product Info Area checksum: 0x%02x", csum); + dev_err(adev->dev, "Bad Product Info Area checksum: 0x%02x", + csum); kfree(pia); return -EIO; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index e5e33a68d935..d1ccbfcf21fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -317,7 +317,7 @@ static int amdgpu_gem_object_open(struct drm_gem_object *obj, */ if (!vm->is_compute_context || !vm->process_info) return 0; - if (!obj->import_attach || + if (!drm_gem_is_imported(obj) || !dma_buf_is_dynamic(obj->import_attach->dmabuf)) return 0; mutex_lock_nested(&vm->process_info->lock, 1); @@ -1024,7 +1024,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, break; } case AMDGPU_GEM_OP_SET_PLACEMENT: - if (robj->tbo.base.import_attach && + if (drm_gem_is_imported(&robj->tbo.base) && args->value & AMDGPU_GEM_DOMAIN_VRAM) { r = -EINVAL; amdgpu_bo_unreserve(robj); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index c5646af055ab..c80c8f543532 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -149,7 +149,7 @@ static bool amdgpu_gfx_is_graphics_multipipe_capable(struct amdgpu_device *adev) static bool amdgpu_gfx_is_compute_multipipe_capable(struct amdgpu_device *adev) { if (amdgpu_compute_multipipe != -1) { - DRM_INFO("amdgpu: forcing compute pipe policy %d\n", + dev_info(adev->dev, "amdgpu: forcing compute pipe policy %d\n", amdgpu_compute_multipipe); return amdgpu_compute_multipipe == 1; } @@ -674,7 +674,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) * generation exposes more than 64 queues. If so, the * definition of queue_mask needs updating */ if (WARN_ON(i > (sizeof(queue_mask)*8))) { - DRM_ERROR("Invalid KCQ enabled: %d\n", i); + dev_err(adev->dev, "Invalid KCQ enabled: %d\n", i); break; } @@ -683,15 +683,15 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) amdgpu_device_flush_hdp(adev, NULL); - DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe, - kiq_ring->queue); + dev_info(adev->dev, "kiq ring mec %d pipe %d q %d\n", kiq_ring->me, + kiq_ring->pipe, kiq_ring->queue); spin_lock(&kiq->ring_lock); r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * adev->gfx.num_compute_rings + kiq->pmf->set_resources_size); if (r) { - DRM_ERROR("Failed to lock KIQ (%d).\n", r); + dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r); spin_unlock(&kiq->ring_lock); return r; } @@ -712,7 +712,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id) r = amdgpu_ring_test_helper(kiq_ring); spin_unlock(&kiq->ring_lock); if (r) - DRM_ERROR("KCQ enable failed\n"); + dev_err(adev->dev, "KCQ enable failed\n"); return r; } @@ -734,7 +734,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id) r = amdgpu_mes_map_legacy_queue(adev, &adev->gfx.gfx_ring[j]); if (r) { - DRM_ERROR("failed to map gfx queue\n"); + dev_err(adev->dev, "failed to map gfx queue\n"); return r; } } @@ -748,7 +748,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id) r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size * adev->gfx.num_gfx_rings); if (r) { - DRM_ERROR("Failed to lock KIQ (%d).\n", r); + dev_err(adev->dev, "Failed to lock KIQ (%d).\n", r); spin_unlock(&kiq->ring_lock); return r; } @@ -769,7 +769,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id) r = amdgpu_ring_test_helper(kiq_ring); spin_unlock(&kiq->ring_lock); if (r) - DRM_ERROR("KGQ enable failed\n"); + dev_err(adev->dev, "KGQ enable failed\n"); return r; } @@ -1030,7 +1030,7 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, ih_data.head = *ras_if; - DRM_ERROR("CP ECC ERROR IRQ\n"); + dev_err(adev->dev, "CP ECC ERROR IRQ\n"); amdgpu_ras_interrupt_dispatch(adev, &ih_data); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 6b0fbbb91e57..97b562a79ea8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -38,6 +38,13 @@ #include <drm/drm_drv.h> #include <drm/ttm/ttm_tt.h> +static const u64 four_gb = 0x100000000ULL; + +bool amdgpu_gmc_is_pdb0_enabled(struct amdgpu_device *adev) +{ + return adev->gmc.xgmi.connected_to_cpu || amdgpu_virt_xgmi_migrate_enabled(adev); +} + /** * amdgpu_gmc_pdb0_alloc - allocate vram for pdb0 * @@ -251,10 +258,20 @@ void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc u64 hive_vram_end = mc->xgmi.node_segment_size * mc->xgmi.num_physical_nodes - 1; mc->vram_start = mc->xgmi.node_segment_size * mc->xgmi.physical_node_id; mc->vram_end = mc->vram_start + mc->xgmi.node_segment_size - 1; - mc->gart_start = hive_vram_end + 1; + /* node_segment_size may not 4GB aligned on SRIOV, align up is needed. */ + mc->gart_start = ALIGN(hive_vram_end + 1, four_gb); mc->gart_end = mc->gart_start + mc->gart_size - 1; - mc->fb_start = hive_vram_start; - mc->fb_end = hive_vram_end; + if (amdgpu_virt_xgmi_migrate_enabled(adev)) { + /* set mc->vram_start to 0 to switch the returned GPU address of + * amdgpu_bo_create_reserved() from FB aperture to GART aperture. + */ + mc->vram_start = 0; + mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; + mc->visible_vram_size = min(mc->visible_vram_size, mc->real_vram_size); + } else { + mc->fb_start = hive_vram_start; + mc->fb_end = hive_vram_end; + } dev_info(adev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n", mc->mc_vram_size >> 20, mc->vram_start, mc->vram_end, mc->real_vram_size >> 20); @@ -276,7 +293,6 @@ void amdgpu_gmc_sysvm_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc void amdgpu_gmc_gart_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc, enum amdgpu_gart_placement gart_placement) { - const uint64_t four_gb = 0x100000000ULL; u64 size_af, size_bf; /*To avoid the hole, limit the max mc address to AMDGPU_GMC_HOLE_START*/ u64 max_mc_address = min(adev->gmc.mc_mask, AMDGPU_GMC_HOLE_START - 1); @@ -1041,9 +1057,7 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev) */ u64 vram_size = adev->gmc.xgmi.node_segment_size * adev->gmc.xgmi.num_physical_nodes; u64 pde0_page_size = (1ULL<<adev->gmc.vmid0_page_table_block_size)<<21; - u64 vram_addr = adev->vm_manager.vram_base_offset - - adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; - u64 vram_end = vram_addr + vram_size; + u64 vram_addr, vram_end; u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo); int idx; @@ -1056,6 +1070,11 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev) flags |= AMDGPU_PTE_FRAG((adev->gmc.vmid0_page_table_block_size + 9*1)); flags |= AMDGPU_PDE_PTE_FLAG(adev); + vram_addr = adev->vm_manager.vram_base_offset; + if (!amdgpu_virt_xgmi_migrate_enabled(adev)) + vram_addr -= adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; + vram_end = vram_addr + vram_size; + /* The first n PDE0 entries are used as PTE, * pointing to vram */ @@ -1429,3 +1448,232 @@ bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev) return false; } + +enum amdgpu_memory_partition +amdgpu_gmc_get_vf_memory_partition(struct amdgpu_device *adev) +{ + switch (adev->gmc.num_mem_partitions) { + case 0: + return UNKNOWN_MEMORY_PARTITION_MODE; + case 1: + return AMDGPU_NPS1_PARTITION_MODE; + case 2: + return AMDGPU_NPS2_PARTITION_MODE; + case 4: + return AMDGPU_NPS4_PARTITION_MODE; + case 8: + return AMDGPU_NPS8_PARTITION_MODE; + default: + return AMDGPU_NPS1_PARTITION_MODE; + } +} + +enum amdgpu_memory_partition +amdgpu_gmc_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes) +{ + enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE; + + if (adev->nbio.funcs && + adev->nbio.funcs->get_memory_partition_mode) + mode = adev->nbio.funcs->get_memory_partition_mode(adev, + supp_modes); + else + dev_warn(adev->dev, "memory partition mode query is not supported\n"); + + return mode; +} + +enum amdgpu_memory_partition +amdgpu_gmc_query_memory_partition(struct amdgpu_device *adev) +{ + if (amdgpu_sriov_vf(adev)) + return amdgpu_gmc_get_vf_memory_partition(adev); + else + return amdgpu_gmc_get_memory_partition(adev, NULL); +} + +static bool amdgpu_gmc_validate_partition_info(struct amdgpu_device *adev) +{ + enum amdgpu_memory_partition mode; + u32 supp_modes; + bool valid; + + mode = amdgpu_gmc_get_memory_partition(adev, &supp_modes); + + /* Mode detected by hardware not present in supported modes */ + if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) && + !(BIT(mode - 1) & supp_modes)) + return false; + + switch (mode) { + case UNKNOWN_MEMORY_PARTITION_MODE: + case AMDGPU_NPS1_PARTITION_MODE: + valid = (adev->gmc.num_mem_partitions == 1); + break; + case AMDGPU_NPS2_PARTITION_MODE: + valid = (adev->gmc.num_mem_partitions == 2); + break; + case AMDGPU_NPS4_PARTITION_MODE: + valid = (adev->gmc.num_mem_partitions == 3 || + adev->gmc.num_mem_partitions == 4); + break; + case AMDGPU_NPS8_PARTITION_MODE: + valid = (adev->gmc.num_mem_partitions == 8); + break; + default: + valid = false; + } + + return valid; +} + +static bool amdgpu_gmc_is_node_present(int *node_ids, int num_ids, int nid) +{ + int i; + + /* Check if node with id 'nid' is present in 'node_ids' array */ + for (i = 0; i < num_ids; ++i) + if (node_ids[i] == nid) + return true; + + return false; +} + +static void +amdgpu_gmc_init_acpi_mem_ranges(struct amdgpu_device *adev, + struct amdgpu_mem_partition_info *mem_ranges) +{ + struct amdgpu_numa_info numa_info; + int node_ids[AMDGPU_MAX_MEM_RANGES]; + int num_ranges = 0, ret; + int num_xcc, xcc_id; + uint32_t xcc_mask; + + num_xcc = NUM_XCC(adev->gfx.xcc_mask); + xcc_mask = (1U << num_xcc) - 1; + + for_each_inst(xcc_id, xcc_mask) { + ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info); + if (ret) + continue; + + if (numa_info.nid == NUMA_NO_NODE) { + mem_ranges[0].size = numa_info.size; + mem_ranges[0].numa.node = numa_info.nid; + num_ranges = 1; + break; + } + + if (amdgpu_gmc_is_node_present(node_ids, num_ranges, + numa_info.nid)) + continue; + + node_ids[num_ranges] = numa_info.nid; + mem_ranges[num_ranges].numa.node = numa_info.nid; + mem_ranges[num_ranges].size = numa_info.size; + ++num_ranges; + } + + adev->gmc.num_mem_partitions = num_ranges; +} + +void amdgpu_gmc_init_sw_mem_ranges(struct amdgpu_device *adev, + struct amdgpu_mem_partition_info *mem_ranges) +{ + enum amdgpu_memory_partition mode; + u32 start_addr = 0, size; + int i, r, l; + + mode = amdgpu_gmc_query_memory_partition(adev); + + switch (mode) { + case UNKNOWN_MEMORY_PARTITION_MODE: + adev->gmc.num_mem_partitions = 0; + break; + case AMDGPU_NPS1_PARTITION_MODE: + adev->gmc.num_mem_partitions = 1; + break; + case AMDGPU_NPS2_PARTITION_MODE: + adev->gmc.num_mem_partitions = 2; + break; + case AMDGPU_NPS4_PARTITION_MODE: + if (adev->flags & AMD_IS_APU) + adev->gmc.num_mem_partitions = 3; + else + adev->gmc.num_mem_partitions = 4; + break; + case AMDGPU_NPS8_PARTITION_MODE: + adev->gmc.num_mem_partitions = 8; + break; + default: + adev->gmc.num_mem_partitions = 1; + break; + } + + /* Use NPS range info, if populated */ + r = amdgpu_gmc_get_nps_memranges(adev, mem_ranges, + &adev->gmc.num_mem_partitions); + if (!r) { + l = 0; + for (i = 1; i < adev->gmc.num_mem_partitions; ++i) { + if (mem_ranges[i].range.lpfn > + mem_ranges[i - 1].range.lpfn) + l = i; + } + + } else { + if (!adev->gmc.num_mem_partitions) { + dev_warn(adev->dev, + "Not able to detect NPS mode, fall back to NPS1\n"); + adev->gmc.num_mem_partitions = 1; + } + /* Fallback to sw based calculation */ + size = (adev->gmc.real_vram_size + SZ_16M) >> AMDGPU_GPU_PAGE_SHIFT; + size /= adev->gmc.num_mem_partitions; + + for (i = 0; i < adev->gmc.num_mem_partitions; ++i) { + mem_ranges[i].range.fpfn = start_addr; + mem_ranges[i].size = + ((u64)size << AMDGPU_GPU_PAGE_SHIFT); + mem_ranges[i].range.lpfn = start_addr + size - 1; + start_addr += size; + } + + l = adev->gmc.num_mem_partitions - 1; + } + + /* Adjust the last one */ + mem_ranges[l].range.lpfn = + (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1; + mem_ranges[l].size = + adev->gmc.real_vram_size - + ((u64)mem_ranges[l].range.fpfn << AMDGPU_GPU_PAGE_SHIFT); +} + +int amdgpu_gmc_init_mem_ranges(struct amdgpu_device *adev) +{ + bool valid; + + adev->gmc.mem_partitions = kcalloc(AMDGPU_MAX_MEM_RANGES, + sizeof(struct amdgpu_mem_partition_info), + GFP_KERNEL); + if (!adev->gmc.mem_partitions) + return -ENOMEM; + + if (adev->gmc.is_app_apu) + amdgpu_gmc_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions); + else + amdgpu_gmc_init_sw_mem_ranges(adev, adev->gmc.mem_partitions); + + if (amdgpu_sriov_vf(adev)) + valid = true; + else + valid = amdgpu_gmc_validate_partition_info(adev); + if (!valid) { + /* TODO: handle invalid case */ + dev_warn(adev->dev, + "Mem ranges not matching with hardware config\n"); + } + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index 80fa29c26e9e..397c6ccdb903 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -84,6 +84,8 @@ enum amdgpu_memory_partition { #define AMDGPU_GMC_INIT_RESET_NPS BIT(0) +#define AMDGPU_MAX_MEM_RANGES 8 + /* * GMC page fault information */ @@ -394,6 +396,7 @@ static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr) return addr; } +bool amdgpu_gmc_is_pdb0_enabled(struct amdgpu_device *adev); int amdgpu_gmc_pdb0_alloc(struct amdgpu_device *adev); void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level, uint64_t *addr, uint64_t *flags); @@ -455,5 +458,13 @@ int amdgpu_gmc_request_memory_partition(struct amdgpu_device *adev, int nps_mode); void amdgpu_gmc_prepare_nps_mode_change(struct amdgpu_device *adev); bool amdgpu_gmc_need_reset_on_init(struct amdgpu_device *adev); - +enum amdgpu_memory_partition +amdgpu_gmc_get_vf_memory_partition(struct amdgpu_device *adev); +enum amdgpu_memory_partition +amdgpu_gmc_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes); +enum amdgpu_memory_partition +amdgpu_gmc_query_memory_partition(struct amdgpu_device *adev); +int amdgpu_gmc_init_mem_ranges(struct amdgpu_device *adev); +void amdgpu_gmc_init_sw_mem_ranges(struct amdgpu_device *adev, + struct amdgpu_mem_partition_info *mem_ranges); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c index 8179d0814db9..57101d24422f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c @@ -24,7 +24,6 @@ * Alex Deucher */ -#include <linux/export.h> #include <linux/pci.h> #include <drm/drm_edid.h> diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 802743efa3b3..7d9bcb72e8dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -128,6 +128,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, struct amdgpu_device *adev = ring->adev; struct amdgpu_ib *ib = &ibs[0]; struct dma_fence *tmp = NULL; + struct amdgpu_fence *af; bool need_ctx_switch; struct amdgpu_vm *vm; uint64_t fence_ctx; @@ -138,7 +139,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, int vmid = AMDGPU_JOB_GET_VMID(job); bool need_pipe_sync = false; unsigned int cond_exec; - unsigned int i; int r = 0; @@ -154,6 +154,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, csa_va = job->csa_va; gds_va = job->gds_va; init_shadow = job->init_shadow; + af = &job->hw_fence; + /* Save the context of the job for reset handling. + * The driver needs this so it can skip the ring + * contents for guilty contexts. + */ + af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0; } else { vm = NULL; fence_ctx = 0; @@ -161,6 +167,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, csa_va = 0; gds_va = 0; init_shadow = false; + af = NULL; } if (!ring->sched.ready) { @@ -282,7 +289,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, amdgpu_ring_init_cond_exec(ring, ring->cond_exe_gpu_addr); } - r = amdgpu_fence_emit(ring, f, job, fence_flags); + r = amdgpu_fence_emit(ring, f, af, fence_flags); if (r) { dev_err(adev->dev, "failed to emit fence (%d)\n", r); if (job && job->vmid) @@ -304,8 +311,17 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, ring->hw_prio == AMDGPU_GFX_PIPE_PRIO_HIGH) ring->funcs->emit_wave_limit(ring, false); + /* Save the wptr associated with this fence. + * This must be last for resets to work properly + * as we need to save the wptr associated with this + * fence so we know what rings contents to backup + * after we reset the queue. + */ + amdgpu_fence_save_wptr(*f); + amdgpu_ring_ib_end(ring); amdgpu_ring_commit(ring); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c index 30f16968b578..a6419246e9c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ih.c @@ -218,7 +218,7 @@ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih) restart_ih: count = AMDGPU_IH_MAX_NUM_IVS; - DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr); + dev_dbg(adev->dev, "%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr); /* Order reading of wptr vs. reading of IH ring data */ rmb(); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c new file mode 100644 index 000000000000..99e1cf4fc955 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.c @@ -0,0 +1,96 @@ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "amdgpu_ip.h" + +static int8_t amdgpu_logical_to_dev_inst(struct amdgpu_device *adev, + enum amd_hw_ip_block_type block, + int8_t inst) +{ + int8_t dev_inst; + + switch (block) { + case GC_HWIP: + case SDMA0_HWIP: + /* Both JPEG and VCN as JPEG is only alias of VCN */ + case VCN_HWIP: + dev_inst = adev->ip_map.dev_inst[block][inst]; + break; + default: + /* For rest of the IPs, no look up required. + * Assume 'logical instance == physical instance' for all configs. */ + dev_inst = inst; + break; + } + + return dev_inst; +} + +static uint32_t amdgpu_logical_to_dev_mask(struct amdgpu_device *adev, + enum amd_hw_ip_block_type block, + uint32_t mask) +{ + uint32_t dev_mask = 0; + int8_t log_inst, dev_inst; + + while (mask) { + log_inst = ffs(mask) - 1; + dev_inst = amdgpu_logical_to_dev_inst(adev, block, log_inst); + dev_mask |= (1 << dev_inst); + mask &= ~(1 << log_inst); + } + + return dev_mask; +} + +static void amdgpu_populate_ip_map(struct amdgpu_device *adev, + enum amd_hw_ip_block_type ip_block, + uint32_t inst_mask) +{ + int l = 0, i; + + while (inst_mask) { + i = ffs(inst_mask) - 1; + adev->ip_map.dev_inst[ip_block][l++] = i; + inst_mask &= ~(1 << i); + } + for (; l < HWIP_MAX_INSTANCE; l++) + adev->ip_map.dev_inst[ip_block][l] = -1; +} + +void amdgpu_ip_map_init(struct amdgpu_device *adev) +{ + u32 ip_map[][2] = { + { GC_HWIP, adev->gfx.xcc_mask }, + { SDMA0_HWIP, adev->sdma.sdma_mask }, + { VCN_HWIP, adev->vcn.inst_mask }, + }; + int i; + + for (i = 0; i < ARRAY_SIZE(ip_map); ++i) + amdgpu_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]); + + adev->ip_map.logical_to_dev_inst = amdgpu_logical_to_dev_inst; + adev->ip_map.logical_to_dev_mask = amdgpu_logical_to_dev_mask; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h new file mode 100644 index 000000000000..2490fd322aec --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ip.h @@ -0,0 +1,29 @@ +/* + * Copyright 2025 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_IP_H__ +#define __AMDGPU_IP_H__ + +void amdgpu_ip_map_init(struct amdgpu_device *adev); + +#endif /* __AMDGPU_IP_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 13c60cac4261..8112ffc85995 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -142,8 +142,9 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev) r = src->funcs->set(adev, src, k, AMDGPU_IRQ_STATE_DISABLE); if (r) - DRM_ERROR("error disabling interrupt (%d)\n", - r); + dev_err(adev->dev, + "error disabling interrupt (%d)\n", + r); } } } @@ -242,7 +243,7 @@ static bool amdgpu_msi_ok(struct amdgpu_device *adev) return true; } -static void amdgpu_restore_msix(struct amdgpu_device *adev) +void amdgpu_restore_msix(struct amdgpu_device *adev) { u16 ctrl; @@ -315,7 +316,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev) adev->irq.irq = irq; adev_to_drm(adev)->max_vblank_count = 0x00ffffff; - DRM_DEBUG("amdgpu: irq initialized.\n"); + dev_dbg(adev->dev, "amdgpu: irq initialized.\n"); return 0; free_vectors: @@ -461,10 +462,10 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev, src_id = entry.src_id; if (client_id >= AMDGPU_IRQ_CLIENTID_MAX) { - DRM_DEBUG("Invalid client_id in IV: %d\n", client_id); + dev_dbg(adev->dev, "Invalid client_id in IV: %d\n", client_id); } else if (src_id >= AMDGPU_MAX_IRQ_SRC_ID) { - DRM_DEBUG("Invalid src_id in IV: %d\n", src_id); + dev_dbg(adev->dev, "Invalid src_id in IV: %d\n", src_id); } else if (((client_id == AMDGPU_IRQ_CLIENTID_LEGACY) || (client_id == SOC15_IH_CLIENTID_ISP)) && @@ -472,18 +473,21 @@ void amdgpu_irq_dispatch(struct amdgpu_device *adev, generic_handle_domain_irq(adev->irq.domain, src_id); } else if (!adev->irq.client[client_id].sources) { - DRM_DEBUG("Unregistered interrupt client_id: %d src_id: %d\n", - client_id, src_id); + dev_dbg(adev->dev, + "Unregistered interrupt client_id: %d src_id: %d\n", + client_id, src_id); } else if ((src = adev->irq.client[client_id].sources[src_id])) { r = src->funcs->process(adev, src, &entry); if (r < 0) - DRM_ERROR("error processing interrupt (%d)\n", r); + dev_err(adev->dev, "error processing interrupt (%d)\n", + r); else if (r) handled = true; } else { - DRM_DEBUG("Unregistered interrupt src_id: %d of client_id:%d\n", + dev_dbg(adev->dev, + "Unregistered interrupt src_id: %d of client_id:%d\n", src_id, client_id); } @@ -620,7 +624,7 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned int type) { /* When the threshold is reached,the interrupt source may not be enabled.return -EINVAL */ - if (amdgpu_ras_is_rma(adev)) + if (amdgpu_ras_is_rma(adev) && !amdgpu_irq_enabled(adev, src, type)) return -EINVAL; if (!adev->irq.installed) @@ -732,7 +736,7 @@ int amdgpu_irq_add_domain(struct amdgpu_device *adev) adev->irq.domain = irq_domain_create_linear(NULL, AMDGPU_MAX_IRQ_SRC_ID, &amdgpu_hw_irqdomain_ops, adev); if (!adev->irq.domain) { - DRM_ERROR("GPU irq add domain failed\n"); + dev_err(adev->dev, "GPU irq add domain failed\n"); return -ENODEV; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h index 04c0b4fa17a4..9f0417456abd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h @@ -146,5 +146,6 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev); int amdgpu_irq_add_domain(struct amdgpu_device *adev); void amdgpu_irq_remove_domain(struct amdgpu_device *adev); unsigned amdgpu_irq_create_mapping(struct amdgpu_device *adev, unsigned src_id); +void amdgpu_restore_msix(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c index 43fc941dfa57..9cddbf50442a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.c @@ -33,6 +33,8 @@ #include "isp_v4_1_0.h" #include "isp_v4_1_1.h" +#define ISP_MC_ADDR_ALIGN (1024 * 32) + /** * isp_hw_init - start and test isp block * @@ -141,6 +143,179 @@ static int isp_set_powergating_state(struct amdgpu_ip_block *ip_block, return 0; } +static int is_valid_isp_device(struct device *isp_parent, struct device *amdgpu_dev) +{ + if (isp_parent != amdgpu_dev) + return -EINVAL; + + return 0; +} + +/** + * isp_user_buffer_alloc - create user buffer object (BO) for isp + * + * @dev: isp device handle + * @dmabuf: DMABUF handle for isp buffer allocated in system memory + * @buf_obj: GPU buffer object handle to initialize + * @buf_addr: GPU addr of the pinned BO to initialize + * + * Imports isp DMABUF to allocate and pin a user BO for isp internal use. It does + * GART alloc to generate GPU addr for BO to make it accessible through the + * GART aperture for ISP HW. + * + * This function is exported to allow the V4L2 isp device external to drm device + * to create and access the isp user BO. + * + * Returns: + * 0 on success, negative error code otherwise. + */ +int isp_user_buffer_alloc(struct device *dev, void *dmabuf, + void **buf_obj, u64 *buf_addr) +{ + struct platform_device *ispdev = to_platform_device(dev); + const struct isp_platform_data *isp_pdata; + struct amdgpu_device *adev; + struct mfd_cell *mfd_cell; + struct amdgpu_bo *bo; + u64 gpu_addr; + int ret; + + if (WARN_ON(!ispdev)) + return -ENODEV; + + if (WARN_ON(!buf_obj)) + return -EINVAL; + + if (WARN_ON(!buf_addr)) + return -EINVAL; + + mfd_cell = &ispdev->mfd_cell[0]; + if (!mfd_cell) + return -ENODEV; + + isp_pdata = mfd_cell->platform_data; + adev = isp_pdata->adev; + + ret = is_valid_isp_device(ispdev->dev.parent, adev->dev); + if (ret) + return ret; + + ret = amdgpu_bo_create_isp_user(adev, dmabuf, + AMDGPU_GEM_DOMAIN_GTT, &bo, &gpu_addr); + if (ret) { + drm_err(&adev->ddev, "failed to alloc gart user buffer (%d)", ret); + return ret; + } + + *buf_obj = (void *)bo; + *buf_addr = gpu_addr; + + return 0; +} +EXPORT_SYMBOL(isp_user_buffer_alloc); + +/** + * isp_user_buffer_free - free isp user buffer object (BO) + * + * @buf_obj: amdgpu isp user BO to free + * + * unpin and unref BO for isp internal use. + * + * This function is exported to allow the V4L2 isp device + * external to drm device to free the isp user BO. + */ +void isp_user_buffer_free(void *buf_obj) +{ + amdgpu_bo_free_isp_user(buf_obj); +} +EXPORT_SYMBOL(isp_user_buffer_free); + +/** + * isp_kernel_buffer_alloc - create kernel buffer object (BO) for isp + * + * @dev: isp device handle + * @size: size for the new BO + * @buf_obj: GPU BO handle to initialize + * @gpu_addr: GPU addr of the pinned BO + * @cpu_addr: CPU address mapping of BO + * + * Allocates and pins a kernel BO for internal isp firmware use. + * + * This function is exported to allow the V4L2 isp device + * external to drm device to create and access the kernel BO. + * + * Returns: + * 0 on success, negative error code otherwise. + */ +int isp_kernel_buffer_alloc(struct device *dev, u64 size, + void **buf_obj, u64 *gpu_addr, void **cpu_addr) +{ + struct platform_device *ispdev = to_platform_device(dev); + struct amdgpu_bo **bo = (struct amdgpu_bo **)buf_obj; + const struct isp_platform_data *isp_pdata; + struct amdgpu_device *adev; + struct mfd_cell *mfd_cell; + int ret; + + if (WARN_ON(!ispdev)) + return -ENODEV; + + if (WARN_ON(!buf_obj)) + return -EINVAL; + + if (WARN_ON(!gpu_addr)) + return -EINVAL; + + if (WARN_ON(!cpu_addr)) + return -EINVAL; + + mfd_cell = &ispdev->mfd_cell[0]; + if (!mfd_cell) + return -ENODEV; + + isp_pdata = mfd_cell->platform_data; + adev = isp_pdata->adev; + + ret = is_valid_isp_device(ispdev->dev.parent, adev->dev); + if (ret) + return ret; + + ret = amdgpu_bo_create_kernel(adev, + size, + ISP_MC_ADDR_ALIGN, + AMDGPU_GEM_DOMAIN_GTT, + bo, + gpu_addr, + cpu_addr); + if (!cpu_addr || ret) { + drm_err(&adev->ddev, "failed to alloc gart kernel buffer (%d)", ret); + return ret; + } + + return 0; +} +EXPORT_SYMBOL(isp_kernel_buffer_alloc); + +/** + * isp_kernel_buffer_free - free isp kernel buffer object (BO) + * + * @buf_obj: amdgpu isp user BO to free + * @gpu_addr: GPU addr of isp kernel BO + * @cpu_addr: CPU addr of isp kernel BO + * + * unmaps and unpin a isp kernel BO. + * + * This function is exported to allow the V4L2 isp device + * external to drm device to free the kernel BO. + */ +void isp_kernel_buffer_free(void **buf_obj, u64 *gpu_addr, void **cpu_addr) +{ + struct amdgpu_bo **bo = (struct amdgpu_bo **)buf_obj; + + amdgpu_bo_free_kernel(bo, gpu_addr, cpu_addr); +} +EXPORT_SYMBOL(isp_kernel_buffer_free); + static const struct amd_ip_funcs isp_ip_funcs = { .name = "isp_ip", .early_init = isp_early_init, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h index 4f3b7b5d9c1f..d6f4ffa4c97c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_isp.h @@ -28,16 +28,13 @@ #ifndef __AMDGPU_ISP_H__ #define __AMDGPU_ISP_H__ +#include <drm/amd/isp.h> +#include <linux/pm_domain.h> + #define ISP_REGS_OFFSET_END 0x629A4 struct amdgpu_isp; -struct isp_platform_data { - void *adev; - u32 asic_type; - resource_size_t base_rmmio_size; -}; - struct isp_funcs { int (*hw_init)(struct amdgpu_isp *isp); int (*hw_fini)(struct amdgpu_isp *isp); @@ -54,6 +51,7 @@ struct amdgpu_isp { struct isp_platform_data *isp_pdata; unsigned int harvest_config; const struct firmware *fw; + struct generic_pm_domain ispgpd; }; extern const struct amdgpu_ip_block_version isp_v4_1_0_ip_block; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 1e24590ae144..9b1c55115921 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -90,10 +90,9 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); struct amdgpu_job *job = to_amdgpu_job(s_job); struct drm_wedge_task_info *info = NULL; - struct amdgpu_task_info *ti; + struct amdgpu_task_info *ti = NULL; struct amdgpu_device *adev = ring->adev; - int idx; - int r; + int idx, r; if (!drm_dev_enter(adev_to_drm(adev), &idx)) { dev_info(adev->dev, "%s - device unplugged skipping recovery on scheduler:%s", @@ -113,6 +112,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) amdgpu_job_core_dump(adev, job); if (amdgpu_gpu_recovery && + amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_SOFT_RESET) && amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { dev_err(adev->dev, "ring %s timeout, but soft recovered\n", s_job->sched->name); @@ -132,47 +132,24 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) /* attempt a per ring reset */ if (unlikely(adev->debug_disable_gpu_ring_reset)) { dev_err(adev->dev, "Ring reset disabled by debug mask\n"); - } else if (amdgpu_gpu_recovery && ring->funcs->reset) { - bool is_guilty; - - dev_err(adev->dev, "Starting %s ring reset\n", s_job->sched->name); - /* stop the scheduler, but don't mess with the - * bad job yet because if ring reset fails - * we'll fall back to full GPU reset. - */ - drm_sched_wqueue_stop(&ring->sched); - - /* for engine resets, we need to reset the engine, - * but individual queues may be unaffected. - * check here to make sure the accounting is correct. - */ - if (ring->funcs->is_guilty) - is_guilty = ring->funcs->is_guilty(ring); - else - is_guilty = true; - - if (is_guilty) - dma_fence_set_error(&s_job->s_fence->finished, -ETIME); - - r = amdgpu_ring_reset(ring, job->vmid); + } else if (amdgpu_gpu_recovery && + amdgpu_ring_is_reset_type_supported(ring, AMDGPU_RESET_TYPE_PER_QUEUE) && + ring->funcs->reset) { + dev_err(adev->dev, "Starting %s ring reset\n", + s_job->sched->name); + r = amdgpu_ring_reset(ring, job->vmid, &job->hw_fence); if (!r) { - if (amdgpu_ring_sched_ready(ring)) - drm_sched_stop(&ring->sched, s_job); - if (is_guilty) { - atomic_inc(&ring->adev->gpu_reset_counter); - amdgpu_fence_driver_force_completion(ring); - } - if (amdgpu_ring_sched_ready(ring)) - drm_sched_start(&ring->sched, 0); - dev_err(adev->dev, "Ring %s reset succeeded\n", ring->sched.name); - drm_dev_wedged_event(adev_to_drm(adev), DRM_WEDGE_RECOVERY_NONE, info); + atomic_inc(&ring->adev->gpu_reset_counter); + dev_err(adev->dev, "Ring %s reset succeeded\n", + ring->sched.name); + drm_dev_wedged_event(adev_to_drm(adev), + DRM_WEDGE_RECOVERY_NONE, info); goto exit; } - dev_err(adev->dev, "Ring %s reset failure\n", ring->sched.name); + dev_err(adev->dev, "Ring %s reset failed\n", ring->sched.name); } - dma_fence_set_error(&s_job->s_fence->finished, -ETIME); - amdgpu_vm_put_task_info(ti); + dma_fence_set_error(&s_job->s_fence->finished, -ETIME); if (amdgpu_device_should_recover_gpu(ring->adev)) { struct amdgpu_reset_context reset_context; @@ -199,8 +176,9 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) } exit: + amdgpu_vm_put_task_info(ti); drm_dev_exit(idx); - return DRM_GPU_SCHED_STAT_NOMINAL; + return DRM_GPU_SCHED_STAT_RESET; } int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, @@ -275,8 +253,8 @@ void amdgpu_job_free_resources(struct amdgpu_job *job) /* Check if any fences where initialized */ if (job->base.s_fence && job->base.s_fence->finished.ops) f = &job->base.s_fence->finished; - else if (job->hw_fence.ops) - f = &job->hw_fence; + else if (job->hw_fence.base.ops) + f = &job->hw_fence.base; else f = NULL; @@ -293,10 +271,10 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job) amdgpu_sync_free(&job->explicit_sync); /* only put the hw fence if has embedded fence */ - if (!job->hw_fence.ops) + if (!job->hw_fence.base.ops) kfree(job); else - dma_fence_put(&job->hw_fence); + dma_fence_put(&job->hw_fence.base); } void amdgpu_job_set_gang_leader(struct amdgpu_job *job, @@ -325,10 +303,10 @@ void amdgpu_job_free(struct amdgpu_job *job) if (job->gang_submit != &job->base.s_fence->scheduled) dma_fence_put(job->gang_submit); - if (!job->hw_fence.ops) + if (!job->hw_fence.base.ops) kfree(job); else - dma_fence_put(&job->hw_fence); + dma_fence_put(&job->hw_fence.base); } struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job) @@ -387,13 +365,6 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job, dev_err(ring->adev->dev, "Error getting VM ID (%d)\n", r); goto error; } - /* - * The VM structure might be released after the VMID is - * assigned, we had multiple problems with people trying to use - * the VM pointer so better set it to NULL. - */ - if (!fence) - job->vm = NULL; return fence; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h index 529045d60412..2f302266662b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h @@ -48,7 +48,7 @@ struct amdgpu_job { struct drm_sched_job base; struct amdgpu_vm *vm; struct amdgpu_sync explicit_sync; - struct dma_fence hw_fence; + struct amdgpu_fence hw_fence; struct dma_fence *gang_submit; uint32_t preamble_status; uint32_t preemption_status; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c index dda29132dfb2..82d58ac7afb0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c @@ -463,7 +463,8 @@ int amdgpu_jpeg_reg_dump_init(struct amdgpu_device *adev, adev->jpeg.ip_dump = kcalloc(adev->jpeg.num_jpeg_inst * count, sizeof(uint32_t), GFP_KERNEL); if (!adev->jpeg.ip_dump) { - DRM_ERROR("Failed to allocate memory for JPEG IP Dump\n"); + dev_err(adev->dev, + "Failed to allocate memory for JPEG IP Dump\n"); return -ENOMEM; } adev->jpeg.reg_list = reg; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index d2ce7d86dbc8..8a76960803c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -91,7 +91,7 @@ void amdgpu_driver_unload_kms(struct drm_device *dev) if (adev->rmmio == NULL) return; - if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_UNLOAD)) + if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DRV_UNLOAD)) DRM_WARN("smart shift update failed\n"); amdgpu_acpi_fini(adev); @@ -161,7 +161,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) if (acpi_status) dev_dbg(dev->dev, "Error during ACPI methods call\n"); - if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_LOAD)) + if (amdgpu_acpi_smart_shift_update(adev, AMDGPU_SS_DRV_LOAD)) DRM_WARN("smart shift update failed\n"); out: @@ -399,6 +399,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, uint32_t ib_size_alignment = 0; enum amd_ip_block_type type; unsigned int num_rings = 0; + uint32_t num_slots = 0; unsigned int i, j; if (info->query_hw_ip.ip_instance >= AMDGPU_HW_IP_INSTANCE_MAX_COUNT) @@ -411,6 +412,12 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, if (adev->gfx.gfx_ring[i].sched.ready && !adev->gfx.gfx_ring[i].no_user_submission) ++num_rings; + + if (!adev->gfx.disable_uq) { + for (i = 0; i < AMDGPU_MES_MAX_GFX_PIPES; i++) + num_slots += hweight32(adev->mes.gfx_hqd_mask[i]); + } + ib_start_alignment = 32; ib_size_alignment = 32; break; @@ -420,6 +427,12 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, if (adev->gfx.compute_ring[i].sched.ready && !adev->gfx.compute_ring[i].no_user_submission) ++num_rings; + + if (!adev->sdma.disable_uq) { + for (i = 0; i < AMDGPU_MES_MAX_COMPUTE_PIPES; i++) + num_slots += hweight32(adev->mes.compute_hqd_mask[i]); + } + ib_start_alignment = 32; ib_size_alignment = 32; break; @@ -429,6 +442,12 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, if (adev->sdma.instance[i].ring.sched.ready && !adev->sdma.instance[i].ring.no_user_submission) ++num_rings; + + if (!adev->gfx.disable_uq) { + for (i = 0; i < AMDGPU_MES_MAX_SDMA_PIPES; i++) + num_slots += hweight32(adev->mes.sdma_hqd_mask[i]); + } + ib_start_alignment = 256; ib_size_alignment = 4; break; @@ -570,6 +589,7 @@ static int amdgpu_hw_ip_info(struct amdgpu_device *adev, } result->capabilities_flags = 0; result->available_rings = (1 << num_rings) - 1; + result->userq_num_slots = num_slots; result->ib_start_alignment = ib_start_alignment; result->ib_size_alignment = ib_size_alignment; return 0; @@ -1395,6 +1415,8 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) if (r) goto error_pasid; + amdgpu_debugfs_vm_init(file_priv); + r = amdgpu_vm_init(adev, &fpriv->vm, fpriv->xcp_id); if (r) goto error_pasid; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 6fa9fa11c8f3..135598502c8d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -47,7 +47,7 @@ static int amdgpu_mes_doorbell_init(struct amdgpu_device *adev) /* Bitmap for dynamic allocation of kernel doorbells */ mes->doorbell_bitmap = bitmap_zalloc(PAGE_SIZE / sizeof(u32), GFP_KERNEL); if (!mes->doorbell_bitmap) { - DRM_ERROR("Failed to allocate MES doorbell bitmap\n"); + dev_err(adev->dev, "Failed to allocate MES doorbell bitmap\n"); return -ENOMEM; } @@ -256,7 +256,7 @@ int amdgpu_mes_suspend(struct amdgpu_device *adev) r = adev->mes.funcs->suspend_gang(&adev->mes, &input); amdgpu_mes_unlock(&adev->mes); if (r) - DRM_ERROR("failed to suspend all gangs"); + dev_err(adev->dev, "failed to suspend all gangs"); return r; } @@ -280,7 +280,7 @@ int amdgpu_mes_resume(struct amdgpu_device *adev) r = adev->mes.funcs->resume_gang(&adev->mes, &input); amdgpu_mes_unlock(&adev->mes); if (r) - DRM_ERROR("failed to resume all gangs"); + dev_err(adev->dev, "failed to resume all gangs"); return r; } @@ -304,7 +304,7 @@ int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev, r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input); amdgpu_mes_unlock(&adev->mes); if (r) - DRM_ERROR("failed to map legacy queue\n"); + dev_err(adev->dev, "failed to map legacy queue\n"); return r; } @@ -329,7 +329,7 @@ int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev, r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input); amdgpu_mes_unlock(&adev->mes); if (r) - DRM_ERROR("failed to unmap legacy queue\n"); + dev_err(adev->dev, "failed to unmap legacy queue\n"); return r; } @@ -361,7 +361,7 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev, r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input); amdgpu_mes_unlock(&adev->mes); if (r) - DRM_ERROR("failed to reset legacy queue\n"); + dev_err(adev->dev, "failed to reset legacy queue\n"); return r; } @@ -469,7 +469,8 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev, int r; if (!adev->mes.funcs->misc_op) { - DRM_ERROR("mes set shader debugger is not supported!\n"); + dev_err(adev->dev, + "mes set shader debugger is not supported!\n"); return -EINVAL; } @@ -493,7 +494,7 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev, r = adev->mes.funcs->misc_op(&adev->mes, &op_input); if (r) - DRM_ERROR("failed to set_shader_debugger\n"); + dev_err(adev->dev, "failed to set_shader_debugger\n"); amdgpu_mes_unlock(&adev->mes); @@ -507,7 +508,8 @@ int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev, int r; if (!adev->mes.funcs->misc_op) { - DRM_ERROR("mes flush shader debugger is not supported!\n"); + dev_err(adev->dev, + "mes flush shader debugger is not supported!\n"); return -EINVAL; } @@ -519,7 +521,7 @@ int amdgpu_mes_flush_shader_debugger(struct amdgpu_device *adev, r = adev->mes.funcs->misc_op(&adev->mes, &op_input); if (r) - DRM_ERROR("failed to set_shader_debugger\n"); + dev_err(adev->dev, "failed to set_shader_debugger\n"); amdgpu_mes_unlock(&adev->mes); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c index d085687a47ea..a974265837f0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c @@ -53,6 +53,16 @@ u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev) return 0; } +bool amdgpu_nbio_is_replay_cnt_supported(struct amdgpu_device *adev) +{ + if (amdgpu_sriov_vf(adev) || !adev->asic_funcs || + !adev->asic_funcs->get_pcie_replay_count || + (!adev->nbio.funcs || !adev->nbio.funcs->get_pcie_replay_count)) + return false; + + return true; +} + int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) { int r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h index 79c2f807b9fe..b528de6a01f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h @@ -119,4 +119,6 @@ int amdgpu_nbio_ras_sw_init(struct amdgpu_device *adev); int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); u64 amdgpu_nbio_get_pcie_replay_count(struct amdgpu_device *adev); +bool amdgpu_nbio_is_replay_cnt_supported(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 73403744331a..122a88294883 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -32,6 +32,7 @@ #include <linux/list.h> #include <linux/slab.h> #include <linux/dma-buf.h> +#include <linux/export.h> #include <drm/drm_drv.h> #include <drm/amdgpu_drm.h> @@ -62,7 +63,7 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo) amdgpu_bo_kunmap(bo); - if (bo->tbo.base.import_attach) + if (drm_gem_is_imported(&bo->tbo.base)) drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg); drm_gem_object_release(&bo->tbo.base); amdgpu_bo_unref(&bo->parent); @@ -351,7 +352,6 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev, return 0; } -EXPORT_SYMBOL(amdgpu_bo_create_kernel); /** * amdgpu_bo_create_isp_user - create user BO for isp @@ -420,7 +420,6 @@ error_unreserve: return r; } -EXPORT_SYMBOL(amdgpu_bo_create_isp_user); /** * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location @@ -524,7 +523,6 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, if (cpu_addr) *cpu_addr = NULL; } -EXPORT_SYMBOL(amdgpu_bo_free_kernel); /** * amdgpu_bo_free_isp_user - free BO for isp use @@ -547,7 +545,6 @@ void amdgpu_bo_free_isp_user(struct amdgpu_bo *bo) } amdgpu_bo_unref(&bo); } -EXPORT_SYMBOL(amdgpu_bo_free_isp_user); /* Validate bo size is bit bigger than the request domain */ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev, @@ -939,7 +936,7 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain) domain = bo->preferred_domains & domain; /* A shared bo cannot be migrated to VRAM */ - if (bo->tbo.base.import_attach) { + if (drm_gem_is_imported(&bo->tbo.base)) { if (domain & AMDGPU_GEM_DOMAIN_GTT) domain = AMDGPU_GEM_DOMAIN_GTT; else @@ -967,7 +964,7 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain) */ domain = amdgpu_bo_get_preferred_domain(adev, domain); - if (bo->tbo.base.import_attach) + if (drm_gem_is_imported(&bo->tbo.base)) dma_buf_pin(bo->tbo.base.import_attach); /* force to pin into visible video ram */ @@ -1018,7 +1015,7 @@ void amdgpu_bo_unpin(struct amdgpu_bo *bo) if (bo->tbo.pin_count) return; - if (bo->tbo.base.import_attach) + if (drm_gem_is_imported(&bo->tbo.base)) dma_buf_unpin(bo->tbo.base.import_attach); if (bo->tbo.resource->mem_type == TTM_PL_VRAM) { @@ -1263,7 +1260,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, amdgpu_bo_kunmap(abo); - if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach && + if (abo->tbo.base.dma_buf && !drm_gem_is_imported(&abo->tbo.base) && old_mem && old_mem->mem_type != TTM_PL_SYSTEM) dma_buf_move_notify(abo->tbo.base.dma_buf); @@ -1473,6 +1470,26 @@ u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo) } /** + * amdgpu_bo_fb_aper_addr - return FB aperture GPU offset of the VRAM bo + * @bo: amdgpu VRAM buffer object for which we query the offset + * + * Returns: + * current FB aperture GPU offset of the object. + */ +u64 amdgpu_bo_fb_aper_addr(struct amdgpu_bo *bo) +{ + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + uint64_t offset, fb_base; + + WARN_ON_ONCE(bo->tbo.resource->mem_type != TTM_PL_VRAM); + + fb_base = adev->gmc.fb_start; + fb_base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; + offset = (bo->tbo.resource->start << PAGE_SHIFT) + fb_base; + return amdgpu_gmc_sign_extend(offset); +} + +/** * amdgpu_bo_gpu_offset_no_check - return GPU offset of bo * @bo: amdgpu object for which we query the offset * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 375448627f7b..c316920f3450 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -304,6 +304,7 @@ int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv, bool intr); int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr); u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo); +u64 amdgpu_bo_fb_aper_addr(struct amdgpu_bo *bo); u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo); uint32_t amdgpu_bo_mem_stats_placement(struct amdgpu_bo *bo); uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index e6f0b035e20b..23484317a5fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -252,6 +252,7 @@ static int psp_early_init(struct amdgpu_ip_block *ip_block) break; case IP_VERSION(14, 0, 2): case IP_VERSION(14, 0, 3): + adev->psp.sup_ifwi_up = !amdgpu_sriov_vf(adev); psp_v14_0_set_psp_funcs(psp); break; case IP_VERSION(14, 0, 5): @@ -574,9 +575,11 @@ static int psp_sw_fini(struct amdgpu_ip_block *ip_block) return 0; } -int psp_wait_for(struct psp_context *psp, uint32_t reg_index, - uint32_t reg_val, uint32_t mask, bool check_changed) +int psp_wait_for(struct psp_context *psp, uint32_t reg_index, uint32_t reg_val, + uint32_t mask, uint32_t flags) { + bool check_changed = flags & PSP_WAITREG_CHANGED; + bool verbose = !(flags & PSP_WAITREG_NOVERBOSE); uint32_t val; int i; struct amdgpu_device *adev = psp->adev; @@ -596,6 +599,11 @@ int psp_wait_for(struct psp_context *psp, uint32_t reg_index, udelay(1); } + if (verbose) + dev_err(adev->dev, + "psp reg (0x%x) wait timed out, mask: %x, read: %x exp: %x", + reg_index, mask, val, reg_val); + return -ETIME; } @@ -654,6 +662,10 @@ static const char *psp_gfx_cmd_name(enum psp_gfx_cmd_id cmd_id) return "BOOT_CFG"; case GFX_CMD_ID_CONFIG_SQ_PERFMON: return "CONFIG_SQ_PERFMON"; + case GFX_CMD_ID_FB_FW_RESERV_ADDR: + return "FB_FW_RESERV_ADDR"; + case GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR: + return "FB_FW_RESERV_EXT_ADDR"; default: return "UNKNOWN CMD"; } @@ -871,6 +883,8 @@ static int psp_tmr_init(struct psp_context *psp) &psp->tmr_bo, &psp->tmr_mc_addr, pptr); } + if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) && psp->tmr_bo) + psp->tmr_mc_addr = amdgpu_bo_fb_aper_addr(psp->tmr_bo); return ret; } @@ -984,6 +998,106 @@ int psp_get_fw_attestation_records_addr(struct psp_context *psp, return ret; } +static int psp_get_fw_reservation_info(struct psp_context *psp, + uint32_t cmd_id, + uint64_t *addr, + uint32_t *size) +{ + int ret; + uint32_t status; + struct psp_gfx_cmd_resp *cmd; + + cmd = acquire_psp_cmd_buf(psp); + + cmd->cmd_id = cmd_id; + + ret = psp_cmd_submit_buf(psp, NULL, cmd, + psp->fence_buf_mc_addr); + if (ret) { + release_psp_cmd_buf(psp); + return ret; + } + + status = cmd->resp.status; + if (status == PSP_ERR_UNKNOWN_COMMAND) { + release_psp_cmd_buf(psp); + *addr = 0; + *size = 0; + return 0; + } + + *addr = (uint64_t)cmd->resp.uresp.fw_reserve_info.reserve_base_address_hi << 32 | + cmd->resp.uresp.fw_reserve_info.reserve_base_address_lo; + *size = cmd->resp.uresp.fw_reserve_info.reserve_size; + + release_psp_cmd_buf(psp); + + return 0; +} + +int psp_update_fw_reservation(struct psp_context *psp) +{ + int ret; + uint64_t reserv_addr, reserv_addr_ext; + uint32_t reserv_size, reserv_size_ext, mp0_ip_ver; + struct amdgpu_device *adev = psp->adev; + + mp0_ip_ver = amdgpu_ip_version(adev, MP0_HWIP, 0); + + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + switch (mp0_ip_ver) { + case IP_VERSION(14, 0, 2): + if (adev->psp.sos.fw_version < 0x3b0e0d) + return 0; + break; + + case IP_VERSION(14, 0, 3): + if (adev->psp.sos.fw_version < 0x3a0e14) + return 0; + break; + + default: + return 0; + } + + ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_ADDR, &reserv_addr, &reserv_size); + if (ret) + return ret; + ret = psp_get_fw_reservation_info(psp, GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR, &reserv_addr_ext, &reserv_size_ext); + if (ret) + return ret; + + if (reserv_addr != adev->gmc.real_vram_size - reserv_size) { + dev_warn(adev->dev, "reserve fw region is not valid!\n"); + return 0; + } + + amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL); + + reserv_size = roundup(reserv_size, SZ_1M); + + ret = amdgpu_bo_create_kernel_at(adev, reserv_addr, reserv_size, &adev->mman.fw_reserved_memory, NULL); + if (ret) { + dev_err(adev->dev, "reserve fw region failed(%d)!\n", ret); + amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL); + return ret; + } + + reserv_size_ext = roundup(reserv_size_ext, SZ_1M); + + ret = amdgpu_bo_create_kernel_at(adev, reserv_addr_ext, reserv_size_ext, + &adev->mman.fw_reserved_memory_extend, NULL); + if (ret) { + dev_err(adev->dev, "reserve extend fw region failed(%d)!\n", ret); + amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL, NULL); + return ret; + } + + return 0; +} + static int psp_boot_config_get(struct amdgpu_device *adev, uint32_t *boot_cfg) { struct psp_context *psp = &adev->psp; @@ -1270,6 +1384,11 @@ int psp_ta_load(struct psp_context *psp, struct ta_context *context) psp_copy_fw(psp, context->bin_desc.start_addr, context->bin_desc.size_bytes); + if (amdgpu_virt_xgmi_migrate_enabled(psp->adev) && + context->mem_context.shared_bo) + context->mem_context.shared_mc_addr = + amdgpu_bo_fb_aper_addr(context->mem_context.shared_bo); + psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context); ret = psp_cmd_submit_buf(psp, NULL, cmd, @@ -2337,11 +2456,27 @@ bool amdgpu_psp_tos_reload_needed(struct amdgpu_device *adev) return false; } +static void psp_update_gpu_addresses(struct amdgpu_device *adev) +{ + struct psp_context *psp = &adev->psp; + + if (psp->cmd_buf_bo && psp->cmd_buf_mem) { + psp->fw_pri_mc_addr = amdgpu_bo_fb_aper_addr(psp->fw_pri_bo); + psp->fence_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->fence_buf_bo); + psp->cmd_buf_mc_addr = amdgpu_bo_fb_aper_addr(psp->cmd_buf_bo); + } + if (adev->firmware.rbuf && psp->km_ring.ring_mem) + psp->km_ring.ring_mem_mc_addr = amdgpu_bo_fb_aper_addr(adev->firmware.rbuf); +} + static int psp_hw_start(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; int ret; + if (amdgpu_virt_xgmi_migrate_enabled(adev)) + psp_update_gpu_addresses(adev); + if (!amdgpu_sriov_vf(adev)) { if ((is_psp_fw_valid(psp->kdb)) && (psp->funcs->bootloader_load_kdb != NULL)) { @@ -2440,6 +2575,14 @@ static int psp_hw_start(struct psp_context *psp) return ret; } + if (!amdgpu_in_reset(adev) && !adev->in_suspend) { + ret = psp_update_fw_reservation(psp); + if (ret) { + dev_err(adev->dev, "update fw reservation failed!\n"); + return ret; + } + } + if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) goto skip_pin_bo; @@ -3522,8 +3665,12 @@ int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name) uint8_t *ucode_array_start_addr; int err = 0; - err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED, - "amdgpu/%s_sos.bin", chip_name); + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_sos_kicker.bin", chip_name); + else + err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_sos.bin", chip_name); if (err) goto out; @@ -3799,8 +3946,12 @@ int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name) struct amdgpu_device *adev = psp->adev; int err; - err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED, - "amdgpu/%s_ta.bin", chip_name); + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_ta_kicker.bin", chip_name); + else + err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_ta.bin", chip_name); if (err) return err; @@ -4117,8 +4268,8 @@ rel_buf: static const struct bin_attribute psp_vbflash_bin_attr = { .attr = {.name = "psp_vbflash", .mode = 0660}, .size = 0, - .write_new = amdgpu_psp_vbflash_write, - .read_new = amdgpu_psp_vbflash_read, + .write = amdgpu_psp_vbflash_write, + .read = amdgpu_psp_vbflash_read, }; /** @@ -4181,7 +4332,7 @@ static umode_t amdgpu_bin_flash_attr_is_visible(struct kobject *kobj, const struct attribute_group amdgpu_flash_attr_group = { .attrs = flash_attrs, - .bin_attrs_new = bin_flash_attrs, + .bin_attrs = bin_flash_attrs, .is_bin_visible = amdgpu_bin_flash_attr_is_visible, .is_visible = amdgpu_flash_attr_is_visible, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index 428adc7f741d..237b624aa51c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -51,6 +51,17 @@ #define C2PMSG_CMD_SPI_GET_ROM_IMAGE_ADDR_HI 0x10 #define C2PMSG_CMD_SPI_GET_FLASH_IMAGE 0x11 +/* Command register bit 31 set to indicate readiness */ +#define MBOX_TOS_READY_FLAG (GFX_FLAG_RESPONSE) +#define MBOX_TOS_READY_MASK (GFX_CMD_RESPONSE_MASK | GFX_CMD_STATUS_MASK) + +/* Values to check for a successful GFX_CMD response wait. Check against + * both status bits and response state - helps to detect a command failure + * or other unexpected cases like a device drop reading all 0xFFs + */ +#define MBOX_TOS_RESP_FLAG (GFX_FLAG_RESPONSE) +#define MBOX_TOS_RESP_MASK (GFX_CMD_RESPONSE_MASK | GFX_CMD_STATUS_MASK) + extern const struct attribute_group amdgpu_flash_attr_group; enum psp_shared_mem_size { @@ -123,6 +134,9 @@ enum psp_reg_prog_id { PSP_REG_LAST }; +#define PSP_WAITREG_CHANGED BIT(0) /* check if the value has changed */ +#define PSP_WAITREG_NOVERBOSE BIT(1) /* No error verbose */ + struct psp_funcs { int (*init_microcode)(struct psp_context *psp); int (*wait_for_bootloader)(struct psp_context *psp); @@ -521,8 +535,8 @@ extern const struct amdgpu_ip_block_version psp_v13_0_ip_block; extern const struct amdgpu_ip_block_version psp_v13_0_4_ip_block; extern const struct amdgpu_ip_block_version psp_v14_0_ip_block; -extern int psp_wait_for(struct psp_context *psp, uint32_t reg_index, - uint32_t field_val, uint32_t mask, bool check_changed); +int psp_wait_for(struct psp_context *psp, uint32_t reg_index, + uint32_t field_val, uint32_t mask, uint32_t flags); extern int psp_wait_for_spirom_update(struct psp_context *psp, uint32_t reg_index, uint32_t field_val, uint32_t mask, uint32_t msec_timeout); @@ -588,7 +602,7 @@ int psp_init_cap_microcode(struct psp_context *psp, const char *chip_name); int psp_get_fw_attestation_records_addr(struct psp_context *psp, uint64_t *output_ptr); - +int psp_update_fw_reservation(struct psp_context *psp); int psp_load_fw_list(struct psp_context *psp, struct amdgpu_firmware_info **ucode_list, int ucode_count); void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index de0944947eaf..540817e296da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1107,6 +1107,9 @@ static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev, err_info->de_count, blk_name); } } else { + if (adev->debug_disable_ce_logs) + return; + for_each_ras_error(err_node, err_data) { err_info = &err_node->err_info; mcm_info = &err_info->mcm_info; @@ -2124,7 +2127,7 @@ static int amdgpu_ras_fs_init(struct amdgpu_device *adev) con->badpages_attr = bin_attr_gpu_vram_bad_pages; sysfs_bin_attr_init(&con->badpages_attr); bin_attrs[0] = &con->badpages_attr; - group.bin_attrs_new = bin_attrs; + group.bin_attrs = bin_attrs; } r = sysfs_create_group(&adev->dev->kobj, &group); @@ -2854,6 +2857,13 @@ static int __amdgpu_ras_convert_rec_array_from_rom(struct amdgpu_device *adev, if (amdgpu_umc_pages_in_a_row(adev, err_data, bps[0].retired_page << AMDGPU_GPU_PAGE_SHIFT)) return -EINVAL; + for (i = 0; i < adev->umc.retire_unit; i++) { + err_data->err_addr[i].address = bps[0].address; + err_data->err_addr[i].mem_channel = bps[0].mem_channel; + err_data->err_addr[i].bank = bps[0].bank; + err_data->err_addr[i].err_type = bps[0].err_type; + err_data->err_addr[i].mcumc_id = bps[0].mcumc_id; + } } else { if (amdgpu_ras_mca2pa_by_idx(adev, &bps[0], err_data)) return -EINVAL; @@ -2885,6 +2895,7 @@ static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev, struct eeprom_table_record *bps, struct ras_err_data *err_data, enum amdgpu_memory_partition nps) { + int i = 0; enum amdgpu_memory_partition save_nps; save_nps = (bps->retired_page >> UMC_NPS_SHIFT) & UMC_NPS_MASK; @@ -2894,6 +2905,13 @@ static int __amdgpu_ras_convert_rec_from_rom(struct amdgpu_device *adev, if (amdgpu_umc_pages_in_a_row(adev, err_data, bps->retired_page << AMDGPU_GPU_PAGE_SHIFT)) return -EINVAL; + for (i = 0; i < adev->umc.retire_unit; i++) { + err_data->err_addr[i].address = bps->address; + err_data->err_addr[i].mem_channel = bps->mem_channel; + err_data->err_addr[i].bank = bps->bank; + err_data->err_addr[i].err_type = bps->err_type; + err_data->err_addr[i].mcumc_id = bps->mcumc_id; + } } else { if (bps->address) { if (amdgpu_ras_mca2pa_by_idx(adev, bps, err_data)) @@ -3003,6 +3021,15 @@ int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev, return 0; } + if (!con->eeprom_control.is_eeprom_valid) { + dev_warn(adev->dev, + "Failed to save EEPROM table data because of EEPROM data corruption!"); + if (new_cnt) + *new_cnt = 0; + + return 0; + } + mutex_lock(&con->recovery_lock); control = &con->eeprom_control; data = con->eh_data; @@ -3294,7 +3321,6 @@ static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev, uint64_t de_queried_count; uint32_t new_detect_count, total_detect_count; uint32_t need_query_count = poison_creation_count; - bool query_data_timeout = false; enum ras_event_type type = RAS_EVENT_TYPE_POISON_CREATION; memset(&info, 0, sizeof(info)); @@ -3323,21 +3349,13 @@ static int amdgpu_ras_poison_creation_handler(struct amdgpu_device *adev, timeout = MAX_UMC_POISON_POLLING_TIME_ASYNC; if (timeout) { - if (!--timeout) { - query_data_timeout = true; + if (!--timeout) break; - } msleep(1); } } } while (total_detect_count < need_query_count); - if (query_data_timeout) { - dev_warn(adev->dev, "Can't find deferred error! count: %u\n", - (need_query_count - total_detect_count)); - return -ENOENT; - } - if (total_detect_count) schedule_delayed_work(&ras->page_retirement_dwork, 0); @@ -3488,8 +3506,7 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev) control = &con->eeprom_control; ret = amdgpu_ras_eeprom_init(control); - if (ret) - return ret; + control->is_eeprom_valid = !ret; if (!adev->umc.ras || !adev->umc.ras->convert_ras_err_addr) control->ras_num_pa_recs = control->ras_num_recs; @@ -3498,10 +3515,12 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev) adev->umc.ras->get_retire_flip_bits) adev->umc.ras->get_retire_flip_bits(adev); - if (control->ras_num_recs) { + if (control->ras_num_recs && control->is_eeprom_valid) { ret = amdgpu_ras_load_bad_pages(adev); - if (ret) - return ret; + if (ret) { + control->is_eeprom_valid = false; + return 0; + } amdgpu_dpm_send_hbm_bad_pages_num( adev, control->ras_num_bad_pages); @@ -3520,7 +3539,7 @@ int amdgpu_ras_init_badpage_info(struct amdgpu_device *adev) dev_warn(adev->dev, "Failed to format RAS EEPROM data in V3 version!\n"); } - return ret; + return 0; } int amdgpu_ras_recovery_init(struct amdgpu_device *adev, bool init_bp_info) @@ -4414,8 +4433,10 @@ void amdgpu_ras_clear_err_state(struct amdgpu_device *adev) struct amdgpu_ras *ras; ras = amdgpu_ras_get_context(adev); - if (ras) + if (ras) { ras->ras_err_state = 0; + ras->gpu_reset_flags = 0; + } } void amdgpu_ras_set_err_poison(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index 2c58e09e56f9..9bda9ad13f88 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -277,10 +277,11 @@ static int __write_table_header(struct amdgpu_ras_eeprom_control *control) up_read(&adev->reset_domain->sem); if (res < 0) { - DRM_ERROR("Failed to write EEPROM table header:%d", res); + dev_err(adev->dev, "Failed to write EEPROM table header:%d", + res); } else if (res < RAS_TABLE_HEADER_SIZE) { - DRM_ERROR("Short write:%d out of %d\n", - res, RAS_TABLE_HEADER_SIZE); + dev_err(adev->dev, "Short write:%d out of %d\n", res, + RAS_TABLE_HEADER_SIZE); res = -EIO; } else { res = 0; @@ -323,7 +324,8 @@ static int __write_table_ras_info(struct amdgpu_ras_eeprom_control *control) buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL); if (!buf) { - DRM_ERROR("Failed to alloc buf to write table ras info\n"); + dev_err(adev->dev, + "Failed to alloc buf to write table ras info\n"); return -ENOMEM; } @@ -338,10 +340,11 @@ static int __write_table_ras_info(struct amdgpu_ras_eeprom_control *control) up_read(&adev->reset_domain->sem); if (res < 0) { - DRM_ERROR("Failed to write EEPROM table ras info:%d", res); + dev_err(adev->dev, "Failed to write EEPROM table ras info:%d", + res); } else if (res < RAS_TABLE_V2_1_INFO_SIZE) { - DRM_ERROR("Short write:%d out of %d\n", - res, RAS_TABLE_V2_1_INFO_SIZE); + dev_err(adev->dev, "Short write:%d out of %d\n", res, + RAS_TABLE_V2_1_INFO_SIZE); res = -EIO; } else { res = 0; @@ -476,6 +479,8 @@ int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control) control->ras_num_recs = 0; control->ras_num_bad_pages = 0; + control->ras_num_mca_recs = 0; + control->ras_num_pa_recs = 0; control->ras_fri = 0; amdgpu_dpm_send_hbm_bad_pages_num(adev, control->ras_num_bad_pages); @@ -607,13 +612,13 @@ static int __amdgpu_ras_eeprom_write(struct amdgpu_ras_eeprom_control *control, buf, buf_size); up_read(&adev->reset_domain->sem); if (res < 0) { - DRM_ERROR("Writing %d EEPROM table records error:%d", - num, res); + dev_err(adev->dev, "Writing %d EEPROM table records error:%d", + num, res); } else if (res < buf_size) { /* Short write, return error. */ - DRM_ERROR("Wrote %d records out of %d", - res / RAS_TABLE_RECORD_SIZE, num); + dev_err(adev->dev, "Wrote %d records out of %d", + res / RAS_TABLE_RECORD_SIZE, num); res = -EIO; } else { res = 0; @@ -761,18 +766,17 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control) dev_warn(adev->dev, "Saved bad pages %d reaches threshold value %d\n", control->ras_num_bad_pages, ras->bad_page_cnt_threshold); - control->tbl_hdr.header = RAS_TABLE_HDR_BAD; - if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) { - control->tbl_rai.rma_status = GPU_RETIRED__ECC_REACH_THRESHOLD; - control->tbl_rai.health_percent = 0; - } - if ((amdgpu_bad_page_threshold != -1) && - (amdgpu_bad_page_threshold != -2)) + (amdgpu_bad_page_threshold != -2)) { + control->tbl_hdr.header = RAS_TABLE_HDR_BAD; + if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) { + control->tbl_rai.rma_status = GPU_RETIRED__ECC_REACH_THRESHOLD; + control->tbl_rai.health_percent = 0; + } ras->is_rma = true; - - /* ignore the -ENOTSUPP return value */ - amdgpu_dpm_send_rma_reason(adev); + /* ignore the -ENOTSUPP return value */ + amdgpu_dpm_send_rma_reason(adev); + } } if (control->tbl_hdr.version >= RAS_TABLE_VER_V2_1) @@ -787,8 +791,9 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control) buf_size = control->ras_num_recs * RAS_TABLE_RECORD_SIZE; buf = kcalloc(control->ras_num_recs, RAS_TABLE_RECORD_SIZE, GFP_KERNEL); if (!buf) { - DRM_ERROR("allocating memory for table of size %d bytes failed\n", - control->tbl_hdr.tbl_size); + dev_err(adev->dev, + "allocating memory for table of size %d bytes failed\n", + control->tbl_hdr.tbl_size); res = -ENOMEM; goto Out; } @@ -800,12 +805,11 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control) buf, buf_size); up_read(&adev->reset_domain->sem); if (res < 0) { - DRM_ERROR("EEPROM failed reading records:%d\n", - res); + dev_err(adev->dev, "EEPROM failed reading records:%d\n", res); goto Out; } else if (res < buf_size) { - DRM_ERROR("EEPROM read %d out of %d bytes\n", - res, buf_size); + dev_err(adev->dev, "EEPROM read %d out of %d bytes\n", res, + buf_size); res = -EIO; goto Out; } @@ -866,11 +870,12 @@ int amdgpu_ras_eeprom_append(struct amdgpu_ras_eeprom_control *control, return 0; if (num == 0) { - DRM_ERROR("will not append 0 records\n"); + dev_err(adev->dev, "will not append 0 records\n"); return -EINVAL; } else if (num > control->ras_max_record_count) { - DRM_ERROR("cannot append %d records than the size of table %d\n", - num, control->ras_max_record_count); + dev_err(adev->dev, + "cannot append %d records than the size of table %d\n", + num, control->ras_max_record_count); return -EINVAL; } @@ -924,13 +929,13 @@ static int __amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control, buf, buf_size); up_read(&adev->reset_domain->sem); if (res < 0) { - DRM_ERROR("Reading %d EEPROM table records error:%d", - num, res); + dev_err(adev->dev, "Reading %d EEPROM table records error:%d", + num, res); } else if (res < buf_size) { /* Short read, return error. */ - DRM_ERROR("Read %d records out of %d", - res / RAS_TABLE_RECORD_SIZE, num); + dev_err(adev->dev, "Read %d records out of %d", + res / RAS_TABLE_RECORD_SIZE, num); res = -EIO; } else { res = 0; @@ -964,11 +969,11 @@ int amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control, return 0; if (num == 0) { - DRM_ERROR("will not read 0 records\n"); + dev_err(adev->dev, "will not read 0 records\n"); return -EINVAL; } else if (num > control->ras_num_recs) { - DRM_ERROR("too many records to read:%d available:%d\n", - num, control->ras_num_recs); + dev_err(adev->dev, "too many records to read:%d available:%d\n", + num, control->ras_num_recs); return -EINVAL; } @@ -1300,7 +1305,8 @@ static int __verify_ras_table_checksum(struct amdgpu_ras_eeprom_control *control buf = kzalloc(buf_size, GFP_KERNEL); if (!buf) { - DRM_ERROR("Out of memory checking RAS table checksum.\n"); + dev_err(adev->dev, + "Out of memory checking RAS table checksum.\n"); return -ENOMEM; } @@ -1309,7 +1315,7 @@ static int __verify_ras_table_checksum(struct amdgpu_ras_eeprom_control *control control->ras_header_offset, buf, buf_size); if (res < buf_size) { - DRM_ERROR("Partial read for checksum, res:%d\n", res); + dev_err(adev->dev, "Partial read for checksum, res:%d\n", res); /* On partial reads, return -EIO. */ if (res >= 0) @@ -1334,7 +1340,8 @@ static int __read_table_ras_info(struct amdgpu_ras_eeprom_control *control) buf = kzalloc(RAS_TABLE_V2_1_INFO_SIZE, GFP_KERNEL); if (!buf) { - DRM_ERROR("Failed to alloc buf to read EEPROM table ras info\n"); + dev_err(adev->dev, + "Failed to alloc buf to read EEPROM table ras info\n"); return -ENOMEM; } @@ -1346,7 +1353,8 @@ static int __read_table_ras_info(struct amdgpu_ras_eeprom_control *control) control->i2c_address + control->ras_info_offset, buf, RAS_TABLE_V2_1_INFO_SIZE); if (res < RAS_TABLE_V2_1_INFO_SIZE) { - DRM_ERROR("Failed to read EEPROM table ras info, res:%d", res); + dev_err(adev->dev, + "Failed to read EEPROM table ras info, res:%d", res); res = res >= 0 ? -EIO : res; goto Out; } @@ -1387,7 +1395,8 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) control->i2c_address + control->ras_header_offset, buf, RAS_TABLE_HEADER_SIZE); if (res < RAS_TABLE_HEADER_SIZE) { - DRM_ERROR("Failed to read EEPROM table header, res:%d", res); + dev_err(adev->dev, "Failed to read EEPROM table header, res:%d", + res); return res >= 0 ? -EIO : res; } @@ -1452,8 +1461,9 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control) control->ras_num_mca_recs * adev->umc.retire_unit; if (hdr->header == RAS_TABLE_HDR_VAL) { - DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records", - control->ras_num_bad_pages); + dev_dbg(adev->dev, + "Found existing EEPROM table with %d records", + control->ras_num_bad_pages); if (hdr->version >= RAS_TABLE_VER_V2_1) { res = __read_table_ras_info(control); @@ -1521,3 +1531,31 @@ int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control) return res < 0 ? res : 0; } + +void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev) +{ + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + struct amdgpu_ras_eeprom_control *control; + int res; + + if (!__is_ras_eeprom_supported(adev) || !ras) + return; + control = &ras->eeprom_control; + if (!control->is_eeprom_valid) + return; + res = __verify_ras_table_checksum(control); + if (res) { + dev_warn(adev->dev, + "RAS table incorrect checksum or error:%d, try to recover\n", + res); + if (!amdgpu_ras_eeprom_reset_table(control)) + if (!amdgpu_ras_save_bad_pages(adev, NULL)) + if (!__verify_ras_table_checksum(control)) { + dev_info(adev->dev, "RAS table recovery succeed\n"); + return; + } + dev_err(adev->dev, "RAS table recovery failed\n"); + control->is_eeprom_valid = false; + } + return; +}
\ No newline at end of file diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h index ec6d7ea37ad0..ebfca4cb5688 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h @@ -114,6 +114,8 @@ struct amdgpu_ras_eeprom_control { /* Record channel info which occurred bad pages */ u32 bad_channel_bitmap; + + bool is_eeprom_valid; }; /* @@ -159,6 +161,8 @@ void amdgpu_ras_debugfs_set_ret_size(struct amdgpu_ras_eeprom_control *control); int amdgpu_ras_eeprom_check(struct amdgpu_ras_eeprom_control *control); +void amdgpu_ras_eeprom_check_and_recover(struct amdgpu_device *adev); + extern const struct file_operations amdgpu_ras_debugfs_eeprom_size_ops; extern const struct file_operations amdgpu_ras_debugfs_eeprom_table_ops; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 426834806fbf..6379bb25bf5c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -99,6 +99,29 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned int ndw) return 0; } +/** + * amdgpu_ring_alloc_reemit - allocate space on the ring buffer for reemit + * + * @ring: amdgpu_ring structure holding ring information + * @ndw: number of dwords to allocate in the ring buffer + * + * Allocate @ndw dwords in the ring buffer (all asics). + * doesn't check the max_dw limit as we may be reemitting + * several submissions. + */ +static void amdgpu_ring_alloc_reemit(struct amdgpu_ring *ring, unsigned int ndw) +{ + /* Align requested size with padding so unlock_commit can + * pad safely */ + ndw = (ndw + ring->funcs->align_mask) & ~ring->funcs->align_mask; + + ring->count_dw = ndw; + ring->wptr_old = ring->wptr; + + if (ring->funcs->begin_use) + ring->funcs->begin_use(ring); +} + /** amdgpu_ring_insert_nop - insert NOP packets * * @ring: amdgpu_ring structure holding ring information @@ -333,6 +356,12 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, /* Initialize cached_rptr to 0 */ ring->cached_rptr = 0; + if (!ring->ring_backup) { + ring->ring_backup = kvzalloc(ring->ring_size, GFP_KERNEL); + if (!ring->ring_backup) + return -ENOMEM; + } + /* Allocate ring buffer */ if (ring->ring_obj == NULL) { r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE, @@ -342,6 +371,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, (void **)&ring->ring); if (r) { dev_err(adev->dev, "(%d) ring create failed\n", r); + kvfree(ring->ring_backup); return r; } amdgpu_ring_clear_ring(ring); @@ -385,6 +415,8 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) amdgpu_bo_free_kernel(&ring->ring_obj, &ring->gpu_addr, (void **)&ring->ring); + kvfree(ring->ring_backup); + ring->ring_backup = NULL; dma_fence_put(ring->vmid_wait); ring->vmid_wait = NULL; @@ -427,6 +459,7 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid, { unsigned long flags; ktime_t deadline; + bool ret; if (unlikely(ring->adev->debug_disable_soft_recovery)) return false; @@ -441,12 +474,16 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid, dma_fence_set_error(fence, -ENODATA); spin_unlock_irqrestore(fence->lock, flags); - atomic_inc(&ring->adev->gpu_reset_counter); while (!dma_fence_is_signaled(fence) && ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0) ring->funcs->soft_recovery(ring, vmid); - return dma_fence_is_signaled(fence); + ret = dma_fence_is_signaled(fence); + /* increment the counter only if soft reset worked */ + if (ret) + atomic_inc(&ring->adev->gpu_reset_counter); + + return ret; } /* @@ -682,6 +719,7 @@ static void amdgpu_ring_to_mqd_prop(struct amdgpu_ring *ring, prop->eop_gpu_addr = ring->eop_gpu_addr; prop->use_doorbell = ring->use_doorbell; prop->doorbell_index = ring->doorbell_index; + prop->kernel_queue = true; /* map_queues packet doesn't need activate the queue, * so only kiq need set this field. @@ -753,3 +791,69 @@ bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring) return true; } + +void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring, + struct amdgpu_fence *guilty_fence) +{ + /* Stop the scheduler to prevent anybody else from touching the ring buffer. */ + drm_sched_wqueue_stop(&ring->sched); + /* back up the non-guilty commands */ + amdgpu_ring_backup_unprocessed_commands(ring, guilty_fence); +} + +int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring, + struct amdgpu_fence *guilty_fence) +{ + unsigned int i; + int r; + + /* verify that the ring is functional */ + r = amdgpu_ring_test_ring(ring); + if (r) + return r; + + /* signal the fence of the bad job */ + if (guilty_fence) + amdgpu_fence_driver_guilty_force_completion(guilty_fence); + /* Re-emit the non-guilty commands */ + if (ring->ring_backup_entries_to_copy) { + amdgpu_ring_alloc_reemit(ring, ring->ring_backup_entries_to_copy); + for (i = 0; i < ring->ring_backup_entries_to_copy; i++) + amdgpu_ring_write(ring, ring->ring_backup[i]); + amdgpu_ring_commit(ring); + } + /* Start the scheduler again */ + drm_sched_wqueue_start(&ring->sched); + return 0; +} + +bool amdgpu_ring_is_reset_type_supported(struct amdgpu_ring *ring, + u32 reset_type) +{ + switch (ring->funcs->type) { + case AMDGPU_RING_TYPE_GFX: + if (ring->adev->gfx.gfx_supported_reset & reset_type) + return true; + break; + case AMDGPU_RING_TYPE_COMPUTE: + if (ring->adev->gfx.compute_supported_reset & reset_type) + return true; + break; + case AMDGPU_RING_TYPE_SDMA: + if (ring->adev->sdma.supported_reset & reset_type) + return true; + break; + case AMDGPU_RING_TYPE_VCN_DEC: + case AMDGPU_RING_TYPE_VCN_ENC: + if (ring->adev->vcn.supported_reset & reset_type) + return true; + break; + case AMDGPU_RING_TYPE_VCN_JPEG: + if (ring->adev->jpeg.supported_reset & reset_type) + return true; + break; + default: + break; + } + return false; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h index b95b47110769..7670f5d82b9e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h @@ -118,6 +118,7 @@ struct amdgpu_fence_driver { /* sync_seq is protected by ring emission lock */ uint32_t sync_seq; atomic_t last_seq; + u64 signalled_wptr; bool initialized; struct amdgpu_irq_src *irq_src; unsigned irq_type; @@ -127,11 +128,35 @@ struct amdgpu_fence_driver { struct dma_fence **fences; }; +/* + * Fences mark an event in the GPUs pipeline and are used + * for GPU/CPU synchronization. When the fence is written, + * it is expected that all buffers associated with that fence + * are no longer in use by the associated ring on the GPU and + * that the relevant GPU caches have been flushed. + */ + +struct amdgpu_fence { + struct dma_fence base; + + /* RB, DMA, etc. */ + struct amdgpu_ring *ring; + ktime_t start_timestamp; + + /* wptr for the fence for resets */ + u64 wptr; + /* fence context for resets */ + u64 context; + uint32_t seq; +}; + extern const struct drm_sched_backend_ops amdgpu_sched_ops; void amdgpu_fence_driver_clear_job_fences(struct amdgpu_ring *ring); void amdgpu_fence_driver_set_error(struct amdgpu_ring *ring, int error); void amdgpu_fence_driver_force_completion(struct amdgpu_ring *ring); +void amdgpu_fence_driver_guilty_force_completion(struct amdgpu_fence *fence); +void amdgpu_fence_save_wptr(struct dma_fence *fence); int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring); int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring, @@ -141,8 +166,8 @@ void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev); void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev); int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev); void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev); -int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **fence, struct amdgpu_job *job, - unsigned flags); +int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, + struct amdgpu_fence *af, unsigned int flags); int amdgpu_fence_emit_polling(struct amdgpu_ring *ring, uint32_t *s, uint32_t timeout); bool amdgpu_fence_process(struct amdgpu_ring *ring); @@ -252,9 +277,9 @@ struct amdgpu_ring_funcs { void (*patch_cntl)(struct amdgpu_ring *ring, unsigned offset); void (*patch_ce)(struct amdgpu_ring *ring, unsigned offset); void (*patch_de)(struct amdgpu_ring *ring, unsigned offset); - int (*reset)(struct amdgpu_ring *ring, unsigned int vmid); + int (*reset)(struct amdgpu_ring *ring, unsigned int vmid, + struct amdgpu_fence *timedout_fence); void (*emit_cleaner_shader)(struct amdgpu_ring *ring); - bool (*is_guilty)(struct amdgpu_ring *ring); }; /** @@ -268,6 +293,9 @@ struct amdgpu_ring { struct amdgpu_bo *ring_obj; uint32_t *ring; + /* backups for resets */ + uint32_t *ring_backup; + unsigned int ring_backup_entries_to_copy; unsigned rptr_offs; u64 rptr_gpu_addr; volatile u32 *rptr_cpu_addr; @@ -409,7 +437,7 @@ struct amdgpu_ring { #define amdgpu_ring_patch_cntl(r, o) ((r)->funcs->patch_cntl((r), (o))) #define amdgpu_ring_patch_ce(r, o) ((r)->funcs->patch_ce((r), (o))) #define amdgpu_ring_patch_de(r, o) ((r)->funcs->patch_de((r), (o))) -#define amdgpu_ring_reset(r, v) (r)->funcs->reset((r), (v)) +#define amdgpu_ring_reset(r, v, f) (r)->funcs->reset((r), (v), (f)) unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type); int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw); @@ -534,4 +562,12 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev); void amdgpu_ib_pool_fini(struct amdgpu_device *adev); int amdgpu_ib_ring_tests(struct amdgpu_device *adev); bool amdgpu_ring_sched_ready(struct amdgpu_ring *ring); +void amdgpu_ring_backup_unprocessed_commands(struct amdgpu_ring *ring, + struct amdgpu_fence *guilty_fence); +void amdgpu_ring_reset_helper_begin(struct amdgpu_ring *ring, + struct amdgpu_fence *guilty_fence); +int amdgpu_ring_reset_helper_end(struct amdgpu_ring *ring, + struct amdgpu_fence *guilty_fence); +bool amdgpu_ring_is_reset_type_supported(struct amdgpu_ring *ring, + u32 reset_type); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index 6716ac281c49..8b8a04138711 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -534,91 +534,75 @@ bool amdgpu_sdma_is_shared_inv_eng(struct amdgpu_device *adev, struct amdgpu_rin static int amdgpu_sdma_soft_reset(struct amdgpu_device *adev, u32 instance_id) { struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id]; - int r = -EOPNOTSUPP; - switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { - case IP_VERSION(4, 4, 2): - case IP_VERSION(4, 4, 4): - case IP_VERSION(4, 4, 5): - /* For SDMA 4.x, use the existing DPM interface for backward compatibility */ - r = amdgpu_dpm_reset_sdma(adev, 1 << instance_id); - break; - case IP_VERSION(5, 0, 0): - case IP_VERSION(5, 0, 1): - case IP_VERSION(5, 0, 2): - case IP_VERSION(5, 0, 5): - case IP_VERSION(5, 2, 0): - case IP_VERSION(5, 2, 2): - case IP_VERSION(5, 2, 4): - case IP_VERSION(5, 2, 5): - case IP_VERSION(5, 2, 6): - case IP_VERSION(5, 2, 3): - case IP_VERSION(5, 2, 1): - case IP_VERSION(5, 2, 7): - if (sdma_instance->funcs->soft_reset_kernel_queue) - r = sdma_instance->funcs->soft_reset_kernel_queue(adev, instance_id); - break; - default: - break; - } + if (sdma_instance->funcs->soft_reset_kernel_queue) + return sdma_instance->funcs->soft_reset_kernel_queue(adev, instance_id); - return r; + return -EOPNOTSUPP; } /** * amdgpu_sdma_reset_engine - Reset a specific SDMA engine * @adev: Pointer to the AMDGPU device - * @instance_id: ID of the SDMA engine instance to reset + * @instance_id: Logical ID of the SDMA engine instance to reset + * @caller_handles_kernel_queues: Skip kernel queue processing. Caller + * will handle it. * * Returns: 0 on success, or a negative error code on failure. */ -int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id) +int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id, + bool caller_handles_kernel_queues) { int ret = 0; struct amdgpu_sdma_instance *sdma_instance = &adev->sdma.instance[instance_id]; struct amdgpu_ring *gfx_ring = &sdma_instance->ring; struct amdgpu_ring *page_ring = &sdma_instance->page; - bool gfx_sched_stopped = false, page_sched_stopped = false; mutex_lock(&sdma_instance->engine_reset_mutex); - /* Stop the scheduler's work queue for the GFX and page rings if they are running. - * This ensures that no new tasks are submitted to the queues while - * the reset is in progress. - */ - if (!amdgpu_ring_sched_ready(gfx_ring)) { + + if (!caller_handles_kernel_queues) { + /* Stop the scheduler's work queue for the GFX and page rings if they are running. + * This ensures that no new tasks are submitted to the queues while + * the reset is in progress. + */ drm_sched_wqueue_stop(&gfx_ring->sched); - gfx_sched_stopped = true; - } - if (adev->sdma.has_page_queue && !amdgpu_ring_sched_ready(page_ring)) { - drm_sched_wqueue_stop(&page_ring->sched); - page_sched_stopped = true; + if (adev->sdma.has_page_queue) + drm_sched_wqueue_stop(&page_ring->sched); } - if (sdma_instance->funcs->stop_kernel_queue) + if (sdma_instance->funcs->stop_kernel_queue) { sdma_instance->funcs->stop_kernel_queue(gfx_ring); + if (adev->sdma.has_page_queue) + sdma_instance->funcs->stop_kernel_queue(page_ring); + } /* Perform the SDMA reset for the specified instance */ ret = amdgpu_sdma_soft_reset(adev, instance_id); if (ret) { - dev_err(adev->dev, "Failed to reset SDMA instance %u\n", instance_id); + dev_err(adev->dev, "Failed to reset SDMA logical instance %u\n", instance_id); goto exit; } - if (sdma_instance->funcs->start_kernel_queue) + if (sdma_instance->funcs->start_kernel_queue) { sdma_instance->funcs->start_kernel_queue(gfx_ring); + if (adev->sdma.has_page_queue) + sdma_instance->funcs->start_kernel_queue(page_ring); + } exit: - /* Restart the scheduler's work queue for the GFX and page rings - * if they were stopped by this function. This allows new tasks - * to be submitted to the queues after the reset is complete. - */ - if (!ret) { - if (gfx_sched_stopped && amdgpu_ring_sched_ready(gfx_ring)) { + if (!caller_handles_kernel_queues) { + /* Restart the scheduler's work queue for the GFX and page rings + * if they were stopped by this function. This allows new tasks + * to be submitted to the queues after the reset is complete. + */ + if (!ret) { + amdgpu_fence_driver_force_completion(gfx_ring); drm_sched_wqueue_start(&gfx_ring->sched); - } - if (page_sched_stopped && amdgpu_ring_sched_ready(page_ring)) { - drm_sched_wqueue_start(&page_ring->sched); + if (adev->sdma.has_page_queue) { + amdgpu_fence_driver_force_completion(page_ring); + drm_sched_wqueue_start(&page_ring->sched); + } } } mutex_unlock(&sdma_instance->engine_reset_mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index e5f8951bbb6f..34311f32be4c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -172,7 +172,8 @@ struct amdgpu_buffer_funcs { uint32_t byte_count); }; -int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id); +int amdgpu_sdma_reset_engine(struct amdgpu_device *adev, uint32_t instance_id, + bool caller_handles_kernel_queues); #define amdgpu_emit_copy_buffer(adev, ib, s, d, b, t) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b), (t)) #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 9c5df35f05b7..27ab4e754b2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -299,7 +299,8 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, struct amdgpu_bo *abo_src, *abo_dst; if (!adev->mman.buffer_funcs_enabled) { - DRM_ERROR("Trying to move memory with ring turned off.\n"); + dev_err(adev->dev, + "Trying to move memory with ring turned off.\n"); return -EINVAL; } @@ -934,7 +935,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, if (gtt->userptr) { r = amdgpu_ttm_tt_pin_userptr(bdev, ttm); if (r) { - DRM_ERROR("failed to pin userptr\n"); + dev_err(adev->dev, "failed to pin userptr\n"); return r; } } else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) { @@ -1060,7 +1061,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev, /* if the pages have userptr pinning then clear that first */ if (gtt->userptr) { amdgpu_ttm_tt_unpin_userptr(bdev, ttm); - } else if (ttm->sg && gtt->gobj->import_attach) { + } else if (ttm->sg && drm_gem_is_imported(gtt->gobj)) { struct dma_buf_attachment *attach; attach = gtt->gobj->import_attach; @@ -1781,7 +1782,7 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) &ctx->c2p_bo, NULL); if (ret) { - DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret); + dev_err(adev->dev, "alloc c2p_bo failed(%d)!\n", ret); amdgpu_ttm_training_reserve_vram_fini(adev); return ret; } @@ -1793,7 +1794,7 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) adev, adev->gmc.real_vram_size - reserve_size, reserve_size, &adev->mman.fw_reserved_memory, NULL); if (ret) { - DRM_ERROR("alloc tmr failed(%d)!\n", ret); + dev_err(adev->dev, "alloc tmr failed(%d)!\n", ret); amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL); return ret; @@ -1864,13 +1865,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) adev->need_swiotlb, dma_addressing_limited(adev->dev)); if (r) { - DRM_ERROR("failed initializing buffer object driver(%d).\n", r); + dev_err(adev->dev, + "failed initializing buffer object driver(%d).\n", r); return r; } r = amdgpu_ttm_pools_init(adev); if (r) { - DRM_ERROR("failed to init ttm pools(%d).\n", r); + dev_err(adev->dev, "failed to init ttm pools(%d).\n", r); return r; } adev->mman.initialized = true; @@ -1878,7 +1880,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) /* Initialize VRAM pool with all of VRAM divided into pages */ r = amdgpu_vram_mgr_init(adev); if (r) { - DRM_ERROR("Failed initializing VRAM heap.\n"); + dev_err(adev->dev, "Failed initializing VRAM heap.\n"); return r; } @@ -1958,7 +1960,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) DRM_DEBUG_DRIVER("Skipped stolen memory reservation\n"); } - DRM_INFO("amdgpu: %uM of VRAM memory ready\n", + dev_info(adev->dev, "amdgpu: %uM of VRAM memory ready\n", (unsigned int)(adev->gmc.real_vram_size / (1024 * 1024))); /* Compute GTT size, either based on TTM limit @@ -1981,10 +1983,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) /* Initialize GTT memory pool */ r = amdgpu_gtt_mgr_init(adev, gtt_size); if (r) { - DRM_ERROR("Failed initializing GTT heap.\n"); + dev_err(adev->dev, "Failed initializing GTT heap.\n"); return r; } - DRM_INFO("amdgpu: %uM of GTT memory ready.\n", + dev_info(adev->dev, "amdgpu: %uM of GTT memory ready.\n", (unsigned int)(gtt_size / (1024 * 1024))); if (adev->flags & AMD_IS_APU) { @@ -1995,40 +1997,40 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) /* Initialize doorbell pool on PCI BAR */ r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_DOORBELL, adev->doorbell.size / PAGE_SIZE); if (r) { - DRM_ERROR("Failed initializing doorbell heap.\n"); + dev_err(adev->dev, "Failed initializing doorbell heap.\n"); return r; } /* Create a boorbell page for kernel usages */ r = amdgpu_doorbell_create_kernel_doorbells(adev); if (r) { - DRM_ERROR("Failed to initialize kernel doorbells.\n"); + dev_err(adev->dev, "Failed to initialize kernel doorbells.\n"); return r; } /* Initialize preemptible memory pool */ r = amdgpu_preempt_mgr_init(adev); if (r) { - DRM_ERROR("Failed initializing PREEMPT heap.\n"); + dev_err(adev->dev, "Failed initializing PREEMPT heap.\n"); return r; } /* Initialize various on-chip memory pools */ r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size); if (r) { - DRM_ERROR("Failed initializing GDS heap.\n"); + dev_err(adev->dev, "Failed initializing GDS heap.\n"); return r; } r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GWS, adev->gds.gws_size); if (r) { - DRM_ERROR("Failed initializing gws heap.\n"); + dev_err(adev->dev, "Failed initializing gws heap.\n"); return r; } r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_OA, adev->gds.oa_size); if (r) { - DRM_ERROR("Failed initializing oa heap.\n"); + dev_err(adev->dev, "Failed initializing oa heap.\n"); return r; } if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE, @@ -2060,6 +2062,8 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) /* return the FW reserved memory back to VRAM */ amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory, NULL, NULL); + amdgpu_bo_free_kernel(&adev->mman.fw_reserved_memory_extend, NULL, + NULL); if (adev->mman.stolen_reserved_size) amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory, NULL, NULL); @@ -2089,7 +2093,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_DOORBELL); ttm_device_fini(&adev->mman.bdev); adev->mman.initialized = false; - DRM_INFO("amdgpu: ttm finalized\n"); + dev_info(adev->dev, "amdgpu: ttm finalized\n"); } /** @@ -2121,8 +2125,9 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) DRM_SCHED_PRIORITY_KERNEL, &sched, 1, NULL); if (r) { - DRM_ERROR("Failed setting up TTM BO move entity (%d)\n", - r); + dev_err(adev->dev, + "Failed setting up TTM BO move entity (%d)\n", + r); return; } @@ -2130,8 +2135,9 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) DRM_SCHED_PRIORITY_NORMAL, &sched, 1, NULL); if (r) { - DRM_ERROR("Failed setting up TTM BO move entity (%d)\n", - r); + dev_err(adev->dev, + "Failed setting up TTM BO move entity (%d)\n", + r); goto error_free_entity; } } else { @@ -2202,7 +2208,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, int r; if (!direct_submit && !ring->sched.ready) { - DRM_ERROR("Trying to move memory with ring turned off.\n"); + dev_err(adev->dev, + "Trying to move memory with ring turned off.\n"); return -EINVAL; } @@ -2237,7 +2244,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, error_free: amdgpu_job_free(job); - DRM_ERROR("Error scheduling IBs (%d)\n", r); + dev_err(adev->dev, "Error scheduling IBs (%d)\n", r); return r; } @@ -2356,7 +2363,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, int r; if (!adev->mman.buffer_funcs_enabled) { - DRM_ERROR("Trying to clear memory with ring turned off.\n"); + dev_err(adev->dev, + "Trying to clear memory with ring turned off.\n"); return -EINVAL; } @@ -2416,7 +2424,7 @@ int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type) man = ttm_manager_type(&adev->mman.bdev, mem_type); break; default: - DRM_ERROR("Trying to evict invalid memory type\n"); + dev_err(adev->dev, "Trying to evict invalid memory type\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 208b7d1d8a27..2309df3f68a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -86,6 +86,7 @@ struct amdgpu_mman { uint32_t discovery_tmr_size; /* fw reserved memory */ struct amdgpu_bo *fw_reserved_memory; + struct amdgpu_bo *fw_reserved_memory_extend; /* firmware VRAM reservation */ u64 fw_vram_usage_start_offset; @@ -154,6 +155,7 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr, uint64_t start, uint64_t size); int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr, uint64_t start); +void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev); bool amdgpu_res_cpu_visible(struct amdgpu_device *adev, struct ttm_resource *res); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 2505c46a9c3d..e96f24e9ad57 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -30,6 +30,11 @@ #define AMDGPU_UCODE_NAME_MAX (128) +static const struct kicker_device kicker_device_list[] = { + {0x744B, 0x00}, + {0x7551, 0xC8} +}; + static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header *hdr) { DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes)); @@ -1155,6 +1160,9 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev) adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM; } + if (amdgpu_virt_xgmi_migrate_enabled(adev) && adev->firmware.fw_buf) + adev->firmware.fw_buf_mc = amdgpu_bo_fb_aper_addr(adev->firmware.fw_buf); + for (i = 0; i < adev->firmware.max_ucodes; i++) { ucode = &adev->firmware.ucode[i]; if (ucode->fw) { @@ -1387,6 +1395,19 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl return NULL; } +bool amdgpu_is_kicker_fw(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(kicker_device_list); i++) { + if (adev->pdev->device == kicker_device_list[i].device && + adev->pdev->revision == kicker_device_list[i].revision) + return true; + } + + return false; +} + void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len) { int maj, min, rev; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index 9e89c3487be5..6349aad6da35 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -605,6 +605,11 @@ struct amdgpu_firmware { uint32_t pldm_version; }; +struct kicker_device{ + unsigned short device; + u8 revision; +}; + void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr); void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr); void amdgpu_ucode_print_imu_hdr(const struct common_firmware_header *hdr); @@ -632,5 +637,6 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type); const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id); void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len); +bool amdgpu_is_kicker_fw(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c index 295e7186e156..8190c24a649a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c @@ -318,6 +318,10 @@ amdgpu_userq_destroy(struct drm_file *filp, int queue_id) amdgpu_bo_unreserve(queue->db_obj.obj); } amdgpu_bo_unref(&queue->db_obj.obj); + +#if defined(CONFIG_DEBUG_FS) + debugfs_remove_recursive(queue->debugfs_queue); +#endif r = amdgpu_userq_unmap_helper(uq_mgr, queue); amdgpu_userq_cleanup(uq_mgr, queue, queue_id); mutex_unlock(&uq_mgr->userq_mutex); @@ -343,6 +347,46 @@ static int amdgpu_userq_priority_permit(struct drm_file *filp, return -EACCES; } +#if defined(CONFIG_DEBUG_FS) +static int amdgpu_mqd_info_read(struct seq_file *m, void *unused) +{ + struct amdgpu_usermode_queue *queue = m->private; + struct amdgpu_bo *bo; + int r; + + if (!queue || !queue->mqd.obj) + return -EINVAL; + + bo = amdgpu_bo_ref(queue->mqd.obj); + r = amdgpu_bo_reserve(bo, true); + if (r) { + amdgpu_bo_unref(&bo); + return -EINVAL; + } + + seq_printf(m, "queue_type %d\n", queue->queue_type); + seq_printf(m, "mqd_gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(queue->mqd.obj)); + + amdgpu_bo_unreserve(bo); + amdgpu_bo_unref(&bo); + + return 0; +} + +static int amdgpu_mqd_info_open(struct inode *inode, struct file *file) +{ + return single_open(file, amdgpu_mqd_info_read, inode->i_private); +} + +static const struct file_operations amdgpu_mqd_info_fops = { + .owner = THIS_MODULE, + .open = amdgpu_mqd_info_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + static int amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) { @@ -352,6 +396,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) const struct amdgpu_userq_funcs *uq_funcs; struct amdgpu_usermode_queue *queue; struct amdgpu_db_info db_info; + char *queue_name; bool skip_map_queue; uint64_t index; int qid, r = 0; @@ -426,6 +471,7 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) if (index == (uint64_t)-EINVAL) { drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n"); kfree(queue); + r = -EINVAL; goto unlock; } @@ -475,6 +521,18 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args) } } + queue_name = kasprintf(GFP_KERNEL, "queue-%d", qid); + if (!queue_name) { + r = -ENOMEM; + goto unlock; + } + +#if defined(CONFIG_DEBUG_FS) + /* Queue dentry per client to hold MQD information */ + queue->debugfs_queue = debugfs_create_dir(queue_name, filp->debugfs_client); + debugfs_create_file("mqd_info", 0444, queue->debugfs_queue, queue, &amdgpu_mqd_info_fops); +#endif + kfree(queue_name); args->out.queue_id = qid; @@ -664,7 +722,7 @@ static void amdgpu_userq_restore_worker(struct work_struct *work) struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr); int ret; - flush_work(&fpriv->evf_mgr.suspend_work.work); + flush_delayed_work(&fpriv->evf_mgr.suspend_work); mutex_lock(&uq_mgr->userq_mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h index ec040c2fd6c9..b1ca91b7cda4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h @@ -65,6 +65,7 @@ struct amdgpu_usermode_queue { struct dma_fence *last_fence; u32 xcp_id; int priority; + struct dentry *debugfs_queue; }; struct amdgpu_userq_funcs { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index c8885c3d54b3..f1f67521c29c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -134,6 +134,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i) mutex_init(&adev->vcn.inst[i].vcn1_jpeg1_workaround); mutex_init(&adev->vcn.inst[i].vcn_pg_lock); + mutex_init(&adev->vcn.inst[i].engine_reset_mutex); atomic_set(&adev->vcn.inst[i].total_submission_cnt, 0); INIT_DELAYED_WORK(&adev->vcn.inst[i].idle_work, amdgpu_vcn_idle_work_handler); atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0); @@ -1451,3 +1452,78 @@ int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block, return ret; } + +/** + * amdgpu_vcn_reset_engine - Reset a specific VCN engine + * @adev: Pointer to the AMDGPU device + * @instance_id: VCN engine instance to reset + * + * Returns: 0 on success, or a negative error code on failure. + */ +static int amdgpu_vcn_reset_engine(struct amdgpu_device *adev, + uint32_t instance_id) +{ + struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[instance_id]; + int r, i; + + mutex_lock(&vinst->engine_reset_mutex); + /* Stop the scheduler's work queue for the dec and enc rings if they are running. + * This ensures that no new tasks are submitted to the queues while + * the reset is in progress. + */ + drm_sched_wqueue_stop(&vinst->ring_dec.sched); + for (i = 0; i < vinst->num_enc_rings; i++) + drm_sched_wqueue_stop(&vinst->ring_enc[i].sched); + + /* Perform the VCN reset for the specified instance */ + r = vinst->reset(vinst); + if (r) + goto unlock; + r = amdgpu_ring_test_ring(&vinst->ring_dec); + if (r) + goto unlock; + for (i = 0; i < vinst->num_enc_rings; i++) { + r = amdgpu_ring_test_ring(&vinst->ring_enc[i]); + if (r) + goto unlock; + } + amdgpu_fence_driver_force_completion(&vinst->ring_dec); + for (i = 0; i < vinst->num_enc_rings; i++) + amdgpu_fence_driver_force_completion(&vinst->ring_enc[i]); + + /* Restart the scheduler's work queue for the dec and enc rings + * if they were stopped by this function. This allows new tasks + * to be submitted to the queues after the reset is complete. + */ + drm_sched_wqueue_start(&vinst->ring_dec.sched); + for (i = 0; i < vinst->num_enc_rings; i++) + drm_sched_wqueue_start(&vinst->ring_enc[i].sched); + +unlock: + mutex_unlock(&vinst->engine_reset_mutex); + + return r; +} + +/** + * amdgpu_vcn_ring_reset - Reset a VCN ring + * @ring: ring to reset + * @vmid: vmid of guilty job + * @timedout_fence: fence of timed out job + * + * This helper is for VCN blocks without unified queues because + * resetting the engine resets all queues in that case. With + * unified queues we have one queue per engine. + * Returns: 0 on success, or a negative error code on failure. + */ +int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) +{ + struct amdgpu_device *adev = ring->adev; + + if (adev->vcn.inst[ring->me].using_unified_queue) + return -EINVAL; + + return amdgpu_vcn_reset_engine(adev, ring->me); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h index 83adf81defc7..0bc0a94d7cf0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h @@ -330,7 +330,9 @@ struct amdgpu_vcn_inst { struct dpg_pause_state *new_state); int (*set_pg_state)(struct amdgpu_vcn_inst *vinst, enum amd_powergating_state state); + int (*reset)(struct amdgpu_vcn_inst *vinst); bool using_unified_queue; + struct mutex engine_reset_mutex; }; struct amdgpu_vcn_ras { @@ -552,5 +554,7 @@ void amdgpu_debugfs_vcn_sched_mask_init(struct amdgpu_device *adev); int vcn_set_powergating_state(struct amdgpu_ip_block *ip_block, enum amd_powergating_state state); - +int amdgpu_vcn_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *guilty_fence); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 577c6194db78..3da3ebb1d9a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -152,8 +152,10 @@ enum AMDGIM_REG_ACCESS_FLAG { AMDGIM_FEATURE_MMHUB_REG_RLC_EN = (1 << 1), /* Use RLC to program GC regs */ AMDGIM_FEATURE_GC_REG_RLC_EN = (1 << 2), - /* Use PSP to program L1_TLB_CNTL*/ + /* Use PSP to program L1_TLB_CNTL */ AMDGIM_FEATURE_L1_TLB_CNTL_PSP_EN = (1 << 3), + /* Use RLCG to program SQ_CONFIG1 */ + AMDGIM_FEATURE_REG_ACCESS_SQ_CONFIG = (1 << 4), }; struct amdgim_pf2vf_info_v1 { @@ -301,6 +303,9 @@ struct amdgpu_virt { union amd_sriov_ras_caps ras_telemetry_en_caps; struct amdgpu_virt_ras ras; struct amd_sriov_ras_telemetry_error_count count_cache; + + /* hibernate and resume with different VF feature for xgmi enabled system */ + bool is_xgmi_node_migrate_enabled; }; struct amdgpu_video_codec_info; @@ -343,6 +348,10 @@ struct amdgpu_video_codec_info; #define amdgpu_sriov_rlcg_error_report_enabled(adev) \ (amdgpu_sriov_reg_indirect_mmhub(adev) || amdgpu_sriov_reg_indirect_gc(adev)) +#define amdgpu_sriov_reg_access_sq_config(adev) \ +(amdgpu_sriov_vf((adev)) && \ + ((adev)->virt.reg_access & (AMDGIM_FEATURE_REG_ACCESS_SQ_CONFIG))) + #define amdgpu_passthrough(adev) \ ((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE) @@ -386,6 +395,10 @@ static inline bool is_virtual_machine(void) ((adev)->virt.gim_feature & AMDGIM_FEATURE_VCN_RB_DECOUPLE) #define amdgpu_sriov_is_mes_info_enable(adev) \ ((adev)->virt.gim_feature & AMDGIM_FEATURE_MES_INFO_ENABLE) + +#define amdgpu_virt_xgmi_migrate_enabled(adev) \ + ((adev)->virt.is_xgmi_node_migrate_enabled && (adev)->gmc.xgmi.node_segment_size != 0) + bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); void amdgpu_virt_init_setting(struct amdgpu_device *adev); int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 0ff95a56c2ce..c39bb06ebda1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -654,11 +654,10 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm, * Check if all VM PDs/PTs are ready for updates * * Returns: - * True if VM is not evicting. + * True if VM is not evicting and all VM entities are not stopped */ bool amdgpu_vm_ready(struct amdgpu_vm *vm) { - bool empty; bool ret; amdgpu_vm_eviction_lock(vm); @@ -666,10 +665,18 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm) amdgpu_vm_eviction_unlock(vm); spin_lock(&vm->status_lock); - empty = list_empty(&vm->evicted); + ret &= list_empty(&vm->evicted); spin_unlock(&vm->status_lock); - return ret && empty; + spin_lock(&vm->immediate.lock); + ret &= !vm->immediate.stopped; + spin_unlock(&vm->immediate.lock); + + spin_lock(&vm->delayed.lock); + ret &= !vm->delayed.stopped; + spin_unlock(&vm->delayed.lock); + + return ret; } /** @@ -765,6 +772,7 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool cleaner_shader_needed = false; bool pasid_mapping_needed = false; struct dma_fence *fence = NULL; + struct amdgpu_fence *af; unsigned int patch; int r; @@ -830,6 +838,9 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, r = amdgpu_fence_emit(ring, &fence, NULL, 0); if (r) return r; + /* this is part of the job's context */ + af = container_of(fence, struct amdgpu_fence, base); + af->context = job->base.s_fence ? job->base.s_fence->finished.context : 0; } if (vm_flush_needed) { @@ -1271,7 +1282,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, } else { struct drm_gem_object *obj = &bo->tbo.base; - if (obj->import_attach && bo_va->is_xgmi) { + if (drm_gem_is_imported(obj) && bo_va->is_xgmi) { struct dma_buf *dma_buf = obj->import_attach->dmabuf; struct drm_gem_object *gobj = dma_buf->priv; struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); @@ -1631,7 +1642,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, * validation */ if (vm->is_compute_context && - bo_va->base.bo->tbo.base.import_attach && + drm_gem_is_imported(&bo_va->base.bo->tbo.base) && (!bo_va->base.bo->tbo.resource || bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM)) amdgpu_vm_bo_evicted_user(&bo_va->base); @@ -2395,10 +2406,11 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, else adev->vm_manager.fragment_size = amdgpu_vm_fragment_size; - DRM_INFO("vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n", - vm_size, adev->vm_manager.num_level + 1, - adev->vm_manager.block_size, - adev->vm_manager.fragment_size); + dev_info( + adev->dev, + "vm size is %u GB, %u levels, block size is %u-bit, fragment size is %u-bit\n", + vm_size, adev->vm_manager.num_level + 1, + adev->vm_manager.block_size, adev->vm_manager.fragment_size); } /** @@ -2409,13 +2421,11 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, */ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout) { - timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, - DMA_RESV_USAGE_BOOKKEEP, - true, timeout); + timeout = drm_sched_entity_flush(&vm->immediate, timeout); if (timeout <= 0) return timeout; - return dma_fence_wait_timeout(vm->last_unlocked, true, timeout); + return drm_sched_entity_flush(&vm->delayed, timeout); } static void amdgpu_vm_destroy_task_info(struct kref *kref) @@ -2565,8 +2575,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & AMDGPU_VM_USE_CPU_FOR_GFX); - DRM_DEBUG_DRIVER("VM update mode is %s\n", - vm->use_cpu_for_update ? "CPU" : "SDMA"); + dev_dbg(adev->dev, "VM update mode is %s\n", + vm->use_cpu_for_update ? "CPU" : "SDMA"); WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)), "CPU update of VM recommended only for large BAR system\n"); @@ -2608,7 +2618,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, r = amdgpu_vm_create_task_info(vm); if (r) - DRM_DEBUG("Failed to create task info for VM\n"); + dev_dbg(adev->dev, "Failed to create task info for VM\n"); amdgpu_bo_unreserve(vm->root.bo); amdgpu_bo_unref(&root_bo); @@ -2659,8 +2669,8 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) /* Update VM state */ vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode & AMDGPU_VM_USE_CPU_FOR_COMPUTE); - DRM_DEBUG_DRIVER("VM update mode is %s\n", - vm->use_cpu_for_update ? "CPU" : "SDMA"); + dev_dbg(adev->dev, "VM update mode is %s\n", + vm->use_cpu_for_update ? "CPU" : "SDMA"); WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)), "CPU update of VM recommended only for large BAR system\n"); @@ -2983,7 +2993,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, error_unlock: amdgpu_bo_unreserve(root); if (r < 0) - DRM_ERROR("Can't handle page fault (%d)\n", r); + dev_err(adev->dev, "Can't handle page fault (%d)\n", r); error_unref: amdgpu_bo_unref(&root); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index abdc52b0895a..78f9e86ccc09 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -648,9 +648,8 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man, list_for_each_entry(block, &vres->blocks, link) vis_usage += amdgpu_vram_mgr_vis_size(adev, block); - amdgpu_vram_mgr_do_reserve(man); - drm_buddy_free_list(mm, &vres->blocks, vres->flags); + amdgpu_vram_mgr_do_reserve(man); mutex_unlock(&mgr->lock); atomic64_sub(vis_usage, &mgr->vis_usage); @@ -783,6 +782,23 @@ uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr) } /** + * amdgpu_vram_mgr_clear_reset_blocks - reset clear blocks + * + * @adev: amdgpu device pointer + * + * Reset the cleared drm buddy blocks. + */ +void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev) +{ + struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; + struct drm_buddy *mm = &mgr->mm; + + mutex_lock(&mgr->lock); + drm_buddy_reset_clear(mm, false); + mutex_unlock(&mgr->lock); +} + +/** * amdgpu_vram_mgr_intersects - test each drm buddy block for intersection * * @man: TTM memory type manager diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h index b256cbc2bc27..2c88d5fd87da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h @@ -66,7 +66,10 @@ to_amdgpu_vram_mgr_resource(struct ttm_resource *res) static inline void amdgpu_vram_mgr_set_cleared(struct ttm_resource *res) { - to_amdgpu_vram_mgr_resource(res)->flags |= DRM_BUDDY_CLEARED; + struct amdgpu_vram_mgr_resource *ares = to_amdgpu_vram_mgr_resource(res); + + WARN_ON(ares->flags & DRM_BUDDY_CLEARED); + ares->flags |= DRM_BUDDY_CLEARED; } #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c index 322816805bfb..c417f8689220 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c @@ -218,15 +218,27 @@ int amdgpu_xcp_restore_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) return __amdgpu_xcp_switch_partition_mode(xcp_mgr, xcp_mgr->mode); } -int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) +static bool __amdgpu_xcp_is_cached_mode_valid(struct amdgpu_xcp_mgr *xcp_mgr) { - int mode; + if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode) + return true; if (!amdgpu_sriov_vf(xcp_mgr->adev) && xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) - return xcp_mgr->mode; + return true; - if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode) + if (xcp_mgr->mode != AMDGPU_XCP_MODE_NONE && + xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS) + return true; + + return false; +} + +int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) +{ + int mode; + + if (__amdgpu_xcp_is_cached_mode_valid(xcp_mgr)) return xcp_mgr->mode; if (!(flags & AMDGPU_XCP_FL_LOCKED)) @@ -445,6 +457,222 @@ void amdgpu_xcp_release_sched(struct amdgpu_device *adev, } } +int amdgpu_xcp_select_scheds(struct amdgpu_device *adev, + u32 hw_ip, u32 hw_prio, + struct amdgpu_fpriv *fpriv, + unsigned int *num_scheds, + struct drm_gpu_scheduler ***scheds) +{ + u32 sel_xcp_id; + int i; + struct amdgpu_xcp_mgr *xcp_mgr = adev->xcp_mgr; + + if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) { + u32 least_ref_cnt = ~0; + + fpriv->xcp_id = 0; + for (i = 0; i < xcp_mgr->num_xcps; i++) { + u32 total_ref_cnt; + + total_ref_cnt = atomic_read(&xcp_mgr->xcp[i].ref_cnt); + if (total_ref_cnt < least_ref_cnt) { + fpriv->xcp_id = i; + least_ref_cnt = total_ref_cnt; + } + } + } + sel_xcp_id = fpriv->xcp_id; + + if (xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) { + *num_scheds = + xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds; + *scheds = + xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched; + atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt); + dev_dbg(adev->dev, "Selected partition #%d", sel_xcp_id); + } else { + dev_err(adev->dev, "Failed to schedule partition #%d.", sel_xcp_id); + return -ENOENT; + } + + return 0; +} + +static void amdgpu_set_xcp_id(struct amdgpu_device *adev, + uint32_t inst_idx, + struct amdgpu_ring *ring) +{ + int xcp_id; + enum AMDGPU_XCP_IP_BLOCK ip_blk; + uint32_t inst_mask; + + ring->xcp_id = AMDGPU_XCP_NO_PARTITION; + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) + adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id; + if ((adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) || + (ring->funcs->type == AMDGPU_RING_TYPE_CPER)) + return; + + inst_mask = 1 << inst_idx; + + switch (ring->funcs->type) { + case AMDGPU_HW_IP_GFX: + case AMDGPU_RING_TYPE_COMPUTE: + case AMDGPU_RING_TYPE_KIQ: + ip_blk = AMDGPU_XCP_GFX; + break; + case AMDGPU_RING_TYPE_SDMA: + ip_blk = AMDGPU_XCP_SDMA; + break; + case AMDGPU_RING_TYPE_VCN_ENC: + case AMDGPU_RING_TYPE_VCN_JPEG: + ip_blk = AMDGPU_XCP_VCN; + break; + default: + dev_err(adev->dev, "Not support ring type %d!", ring->funcs->type); + return; + } + + for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) { + if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) { + ring->xcp_id = xcp_id; + dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name, + ring->xcp_id); + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) + adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id; + break; + } + } +} + +static void amdgpu_xcp_gpu_sched_update(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + unsigned int sel_xcp_id) +{ + unsigned int *num_gpu_sched; + + num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id] + .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds; + adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio] + .sched[(*num_gpu_sched)++] = &ring->sched; + dev_dbg(adev->dev, "%s :[%d] gpu_sched[%d][%d] = %d", + ring->name, sel_xcp_id, ring->funcs->type, + ring->hw_prio, *num_gpu_sched); +} + +static int amdgpu_xcp_sched_list_update(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring; + int i; + + for (i = 0; i < MAX_XCP; i++) { + atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0); + memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched)); + } + + if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) + return 0; + + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { + ring = adev->rings[i]; + if (!ring || !ring->sched.ready || ring->no_scheduler) + continue; + + amdgpu_xcp_gpu_sched_update(adev, ring, ring->xcp_id); + + /* VCN may be shared by two partitions under CPX MODE in certain + * configs. + */ + if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC || + ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) && + (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst)) + amdgpu_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1); + } + + return 0; +} + +int amdgpu_xcp_update_partition_sched_list(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->num_rings; i++) { + struct amdgpu_ring *ring = adev->rings[i]; + + if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE || + ring->funcs->type == AMDGPU_RING_TYPE_KIQ) + amdgpu_set_xcp_id(adev, ring->xcc_id, ring); + else + amdgpu_set_xcp_id(adev, ring->me, ring); + } + + return amdgpu_xcp_sched_list_update(adev); +} + +void amdgpu_xcp_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr) +{ + struct amdgpu_device *adev = xcp_mgr->adev; + + xcp_mgr->supp_xcp_modes = 0; + + switch (NUM_XCC(adev->gfx.xcc_mask)) { + case 8: + xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | + BIT(AMDGPU_DPX_PARTITION_MODE) | + BIT(AMDGPU_QPX_PARTITION_MODE) | + BIT(AMDGPU_CPX_PARTITION_MODE); + break; + case 6: + xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | + BIT(AMDGPU_TPX_PARTITION_MODE) | + BIT(AMDGPU_CPX_PARTITION_MODE); + break; + case 4: + xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | + BIT(AMDGPU_DPX_PARTITION_MODE) | + BIT(AMDGPU_CPX_PARTITION_MODE); + break; + case 2: + xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | + BIT(AMDGPU_CPX_PARTITION_MODE); + break; + case 1: + xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | + BIT(AMDGPU_CPX_PARTITION_MODE); + break; + + default: + break; + } +} + +int amdgpu_xcp_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) +{ + /* TODO: + * Stop user queues and threads, and make sure GPU is empty of work. + */ + + if (flags & AMDGPU_XCP_OPS_KFD) + amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev); + + return 0; +} + +int amdgpu_xcp_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) +{ + int ret = 0; + + if (flags & AMDGPU_XCP_OPS_KFD) { + amdgpu_amdkfd_device_probe(xcp_mgr->adev); + amdgpu_amdkfd_device_init(xcp_mgr->adev); + /* If KFD init failed, return failure */ + if (!xcp_mgr->adev->kfd.init_complete) + ret = -EIO; + } + + return ret; +} + /*====================== xcp sysfs - configuration ======================*/ #define XCP_CFG_SYSFS_RES_ATTR_SHOW(_name) \ static ssize_t amdgpu_xcp_res_sysfs_##_name##_show( \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h index 454b33f889fb..70a0f8400b57 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h @@ -39,6 +39,8 @@ #define AMDGPU_XCP_NO_PARTITION (~0) +#define AMDGPU_XCP_OPS_KFD (1 << 0) + struct amdgpu_fpriv; enum AMDGPU_XCP_IP_BLOCK { @@ -144,10 +146,6 @@ struct amdgpu_xcp_mgr_funcs { int (*suspend)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); int (*prepare_resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); int (*resume)(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); - int (*select_scheds)(struct amdgpu_device *adev, - u32 hw_ip, u32 hw_prio, struct amdgpu_fpriv *fpriv, - unsigned int *num_scheds, struct drm_gpu_scheduler ***scheds); - int (*update_partition_sched_list)(struct amdgpu_device *adev); }; int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id); @@ -176,19 +174,18 @@ int amdgpu_xcp_open_device(struct amdgpu_device *adev, struct drm_file *file_priv); void amdgpu_xcp_release_sched(struct amdgpu_device *adev, struct amdgpu_ctx_entity *entity); - +int amdgpu_xcp_select_scheds(struct amdgpu_device *adev, + u32 hw_ip, u32 hw_prio, + struct amdgpu_fpriv *fpriv, + unsigned int *num_scheds, + struct drm_gpu_scheduler ***scheds); +void amdgpu_xcp_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr); +int amdgpu_xcp_update_partition_sched_list(struct amdgpu_device *adev); +int amdgpu_xcp_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags); +int amdgpu_xcp_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags); void amdgpu_xcp_sysfs_init(struct amdgpu_device *adev); void amdgpu_xcp_sysfs_fini(struct amdgpu_device *adev); -#define amdgpu_xcp_select_scheds(adev, e, c, d, x, y) \ - ((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \ - (adev)->xcp_mgr->funcs->select_scheds ? \ - (adev)->xcp_mgr->funcs->select_scheds((adev), (e), (c), (d), (x), (y)) : -ENOENT) -#define amdgpu_xcp_update_partition_sched_list(adev) \ - ((adev)->xcp_mgr && (adev)->xcp_mgr->funcs && \ - (adev)->xcp_mgr->funcs->update_partition_sched_list ? \ - (adev)->xcp_mgr->funcs->update_partition_sched_list(adev) : 0) - static inline int amdgpu_xcp_get_num_xcp(struct amdgpu_xcp_mgr *xcp_mgr) { if (!xcp_mgr) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index d9ad37711c3e..1ede308a7c67 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -1771,16 +1771,25 @@ void amdgpu_xgmi_early_init(struct amdgpu_device *adev) case IP_VERSION(9, 4, 0): case IP_VERSION(9, 4, 1): case IP_VERSION(9, 4, 2): - adev->gmc.xgmi.max_speed = XGMI_SPEED_25GT; + /* 25 GT/s */ + adev->gmc.xgmi.max_speed = 25; adev->gmc.xgmi.max_width = 16; break; case IP_VERSION(9, 4, 3): case IP_VERSION(9, 4, 4): case IP_VERSION(9, 5, 0): - adev->gmc.xgmi.max_speed = XGMI_SPEED_32GT; + /* 32 GT/s */ + adev->gmc.xgmi.max_speed = 32; adev->gmc.xgmi.max_width = 16; break; default: break; } } + +void amgpu_xgmi_set_max_speed_width(struct amdgpu_device *adev, + uint16_t max_speed, uint8_t max_width) +{ + adev->gmc.xgmi.max_speed = max_speed; + adev->gmc.xgmi.max_width = max_width; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h index f994be985f42..bba0b26fee8f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h @@ -25,12 +25,6 @@ #include <drm/task_barrier.h> #include "amdgpu_ras.h" -enum amdgpu_xgmi_link_speed { - XGMI_SPEED_16GT = 16, - XGMI_SPEED_25GT = 25, - XGMI_SPEED_32GT = 32 -}; - struct amdgpu_hive_info { struct kobject kobj; uint64_t hive_id; @@ -97,7 +91,7 @@ struct amdgpu_xgmi { struct ras_common_if *ras_if; bool connected_to_cpu; struct amdgpu_xgmi_ras *ras; - enum amdgpu_xgmi_link_speed max_speed; + uint16_t max_speed; uint8_t max_width; }; @@ -130,4 +124,6 @@ int amdgpu_xgmi_get_ext_link(struct amdgpu_device *adev, int link_num); void amdgpu_xgmi_early_init(struct amdgpu_device *adev); uint32_t amdgpu_xgmi_get_max_bandwidth(struct amdgpu_device *adev); +void amgpu_xgmi_set_max_speed_width(struct amdgpu_device *adev, + uint16_t max_speed, uint8_t max_width); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h index 92ca13097aaa..33edad1f9dcd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h @@ -113,7 +113,8 @@ union amd_sriov_reg_access_flags { uint32_t vf_reg_access_mmhub : 1; uint32_t vf_reg_access_gc : 1; uint32_t vf_reg_access_l1_tlb_cntl : 1; - uint32_t reserved : 28; + uint32_t vf_reg_access_sq_config : 1; + uint32_t reserved : 27; } flags; uint32_t all; }; diff --git a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c index 1c083304ae77..811124ff88a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c +++ b/drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c @@ -29,12 +29,11 @@ #include "gfx_v9_4_3.h" #include "gfxhub_v1_2.h" #include "sdma_v4_4_2.h" +#include "amdgpu_ip.h" #define XCP_INST_MASK(num_inst, xcp_id) \ (num_inst ? GENMASK(num_inst - 1, 0) << (xcp_id * num_inst) : 0) -#define AMDGPU_XCP_OPS_KFD (1 << 0) - void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev) { int i; @@ -62,234 +61,6 @@ void aqua_vanjaram_doorbell_index_init(struct amdgpu_device *adev) adev->doorbell_index.max_assignment = AMDGPU_DOORBELL_LAYOUT1_MAX_ASSIGNMENT << 1; } -static bool aqua_vanjaram_xcp_vcn_shared(struct amdgpu_device *adev) -{ - return (adev->xcp_mgr->num_xcps > adev->vcn.num_vcn_inst); -} - -static void aqua_vanjaram_set_xcp_id(struct amdgpu_device *adev, - uint32_t inst_idx, struct amdgpu_ring *ring) -{ - int xcp_id; - enum AMDGPU_XCP_IP_BLOCK ip_blk; - uint32_t inst_mask; - - ring->xcp_id = AMDGPU_XCP_NO_PARTITION; - if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) - adev->gfx.enforce_isolation[0].xcp_id = ring->xcp_id; - if ((adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) || - (ring->funcs->type == AMDGPU_RING_TYPE_CPER)) - return; - - inst_mask = 1 << inst_idx; - - switch (ring->funcs->type) { - case AMDGPU_HW_IP_GFX: - case AMDGPU_RING_TYPE_COMPUTE: - case AMDGPU_RING_TYPE_KIQ: - ip_blk = AMDGPU_XCP_GFX; - break; - case AMDGPU_RING_TYPE_SDMA: - ip_blk = AMDGPU_XCP_SDMA; - break; - case AMDGPU_RING_TYPE_VCN_ENC: - case AMDGPU_RING_TYPE_VCN_JPEG: - ip_blk = AMDGPU_XCP_VCN; - break; - default: - DRM_ERROR("Not support ring type %d!", ring->funcs->type); - return; - } - - for (xcp_id = 0; xcp_id < adev->xcp_mgr->num_xcps; xcp_id++) { - if (adev->xcp_mgr->xcp[xcp_id].ip[ip_blk].inst_mask & inst_mask) { - ring->xcp_id = xcp_id; - dev_dbg(adev->dev, "ring:%s xcp_id :%u", ring->name, - ring->xcp_id); - if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) - adev->gfx.enforce_isolation[xcp_id].xcp_id = xcp_id; - break; - } - } -} - -static void aqua_vanjaram_xcp_gpu_sched_update( - struct amdgpu_device *adev, - struct amdgpu_ring *ring, - unsigned int sel_xcp_id) -{ - unsigned int *num_gpu_sched; - - num_gpu_sched = &adev->xcp_mgr->xcp[sel_xcp_id] - .gpu_sched[ring->funcs->type][ring->hw_prio].num_scheds; - adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[ring->funcs->type][ring->hw_prio] - .sched[(*num_gpu_sched)++] = &ring->sched; - DRM_DEBUG("%s :[%d] gpu_sched[%d][%d] = %d", ring->name, - sel_xcp_id, ring->funcs->type, - ring->hw_prio, *num_gpu_sched); -} - -static int aqua_vanjaram_xcp_sched_list_update( - struct amdgpu_device *adev) -{ - struct amdgpu_ring *ring; - int i; - - for (i = 0; i < MAX_XCP; i++) { - atomic_set(&adev->xcp_mgr->xcp[i].ref_cnt, 0); - memset(adev->xcp_mgr->xcp[i].gpu_sched, 0, sizeof(adev->xcp_mgr->xcp->gpu_sched)); - } - - if (adev->xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) - return 0; - - for (i = 0; i < AMDGPU_MAX_RINGS; i++) { - ring = adev->rings[i]; - if (!ring || !ring->sched.ready || ring->no_scheduler) - continue; - - aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id); - - /* VCN may be shared by two partitions under CPX MODE in certain - * configs. - */ - if ((ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC || - ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) && - aqua_vanjaram_xcp_vcn_shared(adev)) - aqua_vanjaram_xcp_gpu_sched_update(adev, ring, ring->xcp_id + 1); - } - - return 0; -} - -static int aqua_vanjaram_update_partition_sched_list(struct amdgpu_device *adev) -{ - int i; - - for (i = 0; i < adev->num_rings; i++) { - struct amdgpu_ring *ring = adev->rings[i]; - - if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE || - ring->funcs->type == AMDGPU_RING_TYPE_KIQ) - aqua_vanjaram_set_xcp_id(adev, ring->xcc_id, ring); - else - aqua_vanjaram_set_xcp_id(adev, ring->me, ring); - } - - return aqua_vanjaram_xcp_sched_list_update(adev); -} - -static int aqua_vanjaram_select_scheds( - struct amdgpu_device *adev, - u32 hw_ip, - u32 hw_prio, - struct amdgpu_fpriv *fpriv, - unsigned int *num_scheds, - struct drm_gpu_scheduler ***scheds) -{ - u32 sel_xcp_id; - int i; - - if (fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION) { - u32 least_ref_cnt = ~0; - - fpriv->xcp_id = 0; - for (i = 0; i < adev->xcp_mgr->num_xcps; i++) { - u32 total_ref_cnt; - - total_ref_cnt = atomic_read(&adev->xcp_mgr->xcp[i].ref_cnt); - if (total_ref_cnt < least_ref_cnt) { - fpriv->xcp_id = i; - least_ref_cnt = total_ref_cnt; - } - } - } - sel_xcp_id = fpriv->xcp_id; - - if (adev->xcp_mgr->xcp[sel_xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds) { - *num_scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].num_scheds; - *scheds = adev->xcp_mgr->xcp[fpriv->xcp_id].gpu_sched[hw_ip][hw_prio].sched; - atomic_inc(&adev->xcp_mgr->xcp[sel_xcp_id].ref_cnt); - DRM_DEBUG("Selected partition #%d", sel_xcp_id); - } else { - DRM_ERROR("Failed to schedule partition #%d.", sel_xcp_id); - return -ENOENT; - } - - return 0; -} - -static int8_t aqua_vanjaram_logical_to_dev_inst(struct amdgpu_device *adev, - enum amd_hw_ip_block_type block, - int8_t inst) -{ - int8_t dev_inst; - - switch (block) { - case GC_HWIP: - case SDMA0_HWIP: - /* Both JPEG and VCN as JPEG is only alias of VCN */ - case VCN_HWIP: - dev_inst = adev->ip_map.dev_inst[block][inst]; - break; - default: - /* For rest of the IPs, no look up required. - * Assume 'logical instance == physical instance' for all configs. */ - dev_inst = inst; - break; - } - - return dev_inst; -} - -static uint32_t aqua_vanjaram_logical_to_dev_mask(struct amdgpu_device *adev, - enum amd_hw_ip_block_type block, - uint32_t mask) -{ - uint32_t dev_mask = 0; - int8_t log_inst, dev_inst; - - while (mask) { - log_inst = ffs(mask) - 1; - dev_inst = aqua_vanjaram_logical_to_dev_inst(adev, block, log_inst); - dev_mask |= (1 << dev_inst); - mask &= ~(1 << log_inst); - } - - return dev_mask; -} - -static void aqua_vanjaram_populate_ip_map(struct amdgpu_device *adev, - enum amd_hw_ip_block_type ip_block, - uint32_t inst_mask) -{ - int l = 0, i; - - while (inst_mask) { - i = ffs(inst_mask) - 1; - adev->ip_map.dev_inst[ip_block][l++] = i; - inst_mask &= ~(1 << i); - } - for (; l < HWIP_MAX_INSTANCE; l++) - adev->ip_map.dev_inst[ip_block][l] = -1; -} - -void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev) -{ - u32 ip_map[][2] = { - { GC_HWIP, adev->gfx.xcc_mask }, - { SDMA0_HWIP, adev->sdma.sdma_mask }, - { VCN_HWIP, adev->vcn.inst_mask }, - }; - int i; - - for (i = 0; i < ARRAY_SIZE(ip_map); ++i) - aqua_vanjaram_populate_ip_map(adev, ip_map[i][0], ip_map[i][1]); - - adev->ip_map.logical_to_dev_inst = aqua_vanjaram_logical_to_dev_inst; - adev->ip_map.logical_to_dev_mask = aqua_vanjaram_logical_to_dev_mask; -} - /* Fixed pattern for smn addressing on different AIDs: * bit[34]: indicate cross AID access * bit[33:32]: indicate target AID id @@ -353,11 +124,14 @@ static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) if (adev->nbio.funcs->get_compute_partition_mode) { mode = adev->nbio.funcs->get_compute_partition_mode(adev); - if (mode != derv_mode) + if (mode != derv_mode) { dev_warn( adev->dev, "Mismatch in compute partition mode - reported : %d derived : %d", mode, derv_mode); + if (derv_mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE) + amdgpu_device_bus_status_check(adev); + } } return mode; @@ -453,6 +227,7 @@ static int __aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr *xcp_mgr, uint16_t *nps_modes) { struct amdgpu_device *adev = xcp_mgr->adev; + uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); if (!num_xcp || !nps_modes || !(xcp_mgr->supp_xcp_modes & BIT(px_mode))) return -EINVAL; @@ -476,12 +251,14 @@ static int __aqua_vanjaram_get_px_mode_info(struct amdgpu_xcp_mgr *xcp_mgr, *num_xcp = 4; *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | BIT(AMDGPU_NPS4_PARTITION_MODE); + if (gc_ver == IP_VERSION(9, 5, 0)) + *nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE); break; case AMDGPU_CPX_PARTITION_MODE: *num_xcp = NUM_XCC(adev->gfx.xcc_mask); *nps_modes = BIT(AMDGPU_NPS1_PARTITION_MODE) | BIT(AMDGPU_NPS4_PARTITION_MODE); - if (amdgpu_sriov_vf(adev)) + if (gc_ver == IP_VERSION(9, 5, 0)) *nps_modes |= BIT(AMDGPU_NPS2_PARTITION_MODE); break; default: @@ -593,72 +370,6 @@ static bool __aqua_vanjaram_is_valid_mode(struct amdgpu_xcp_mgr *xcp_mgr, return false; } -static int __aqua_vanjaram_pre_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) -{ - /* TODO: - * Stop user queues and threads, and make sure GPU is empty of work. - */ - - if (flags & AMDGPU_XCP_OPS_KFD) - amdgpu_amdkfd_device_fini_sw(xcp_mgr->adev); - - return 0; -} - -static int __aqua_vanjaram_post_partition_switch(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) -{ - int ret = 0; - - if (flags & AMDGPU_XCP_OPS_KFD) { - amdgpu_amdkfd_device_probe(xcp_mgr->adev); - amdgpu_amdkfd_device_init(xcp_mgr->adev); - /* If KFD init failed, return failure */ - if (!xcp_mgr->adev->kfd.init_complete) - ret = -EIO; - } - - return ret; -} - -static void -__aqua_vanjaram_update_supported_modes(struct amdgpu_xcp_mgr *xcp_mgr) -{ - struct amdgpu_device *adev = xcp_mgr->adev; - - xcp_mgr->supp_xcp_modes = 0; - - switch (NUM_XCC(adev->gfx.xcc_mask)) { - case 8: - xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | - BIT(AMDGPU_DPX_PARTITION_MODE) | - BIT(AMDGPU_QPX_PARTITION_MODE) | - BIT(AMDGPU_CPX_PARTITION_MODE); - break; - case 6: - xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | - BIT(AMDGPU_TPX_PARTITION_MODE) | - BIT(AMDGPU_CPX_PARTITION_MODE); - break; - case 4: - xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | - BIT(AMDGPU_DPX_PARTITION_MODE) | - BIT(AMDGPU_CPX_PARTITION_MODE); - break; - /* this seems only existing in emulation phase */ - case 2: - xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | - BIT(AMDGPU_CPX_PARTITION_MODE); - break; - case 1: - xcp_mgr->supp_xcp_modes = BIT(AMDGPU_SPX_PARTITION_MODE) | - BIT(AMDGPU_CPX_PARTITION_MODE); - break; - - default: - break; - } -} - static void __aqua_vanjaram_update_available_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) { int mode; @@ -705,7 +416,7 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, goto out; } - ret = __aqua_vanjaram_pre_partition_switch(xcp_mgr, flags); + ret = amdgpu_xcp_pre_partition_switch(xcp_mgr, flags); if (ret) goto unlock; @@ -718,7 +429,7 @@ static int aqua_vanjaram_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, *num_xcps = num_xcc / num_xcc_per_xcp; amdgpu_xcp_init(xcp_mgr, *num_xcps, mode); - ret = __aqua_vanjaram_post_partition_switch(xcp_mgr, flags); + ret = amdgpu_xcp_post_partition_switch(xcp_mgr, flags); if (!ret) __aqua_vanjaram_update_available_partition_mode(xcp_mgr); unlock: @@ -801,9 +512,6 @@ struct amdgpu_xcp_mgr_funcs aqua_vanjaram_xcp_funcs = { .get_ip_details = &aqua_vanjaram_get_xcp_ip_details, .get_xcp_res_info = &aqua_vanjaram_get_xcp_res_info, .get_xcp_mem_id = &aqua_vanjaram_get_xcp_mem_id, - .select_scheds = &aqua_vanjaram_select_scheds, - .update_partition_sched_list = - &aqua_vanjaram_update_partition_sched_list }; static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) @@ -818,7 +526,7 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev) if (ret) return ret; - __aqua_vanjaram_update_supported_modes(adev->xcp_mgr); + amdgpu_xcp_update_supported_modes(adev->xcp_mgr); /* TODO: Default memory node affinity init */ return ret; @@ -858,7 +566,7 @@ int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev) if (ret) return ret; - aqua_vanjaram_ip_map_init(adev); + amdgpu_ip_map_init(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 75ea071744eb..7bd506f06eb1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4952,11 +4952,15 @@ static int gfx_v10_0_sw_init(struct amdgpu_ip_block *ip_block) } } } - /* TODO: Add queue reset mask when FW fully supports it */ + adev->gfx.gfx_supported_reset = amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); adev->gfx.compute_supported_reset = amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); + if (!amdgpu_sriov_vf(adev)) { + adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + } r = amdgpu_gfx_kiq_init(adev, GFX10_MEC_HPD_SIZE, 0); if (r) { @@ -9046,21 +9050,6 @@ static void gfx_v10_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, ref, mask); } -static void gfx_v10_0_ring_soft_recovery(struct amdgpu_ring *ring, - unsigned int vmid) -{ - struct amdgpu_device *adev = ring->adev; - uint32_t value = 0; - - value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); - value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); - value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); - value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); - amdgpu_gfx_rlc_enter_safe_mode(adev, 0); - WREG32_SOC15(GC, 0, mmSQ_CMD, value); - amdgpu_gfx_rlc_exit_safe_mode(adev, 0); -} - static void gfx_v10_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, uint32_t me, uint32_t pipe, @@ -9522,7 +9511,9 @@ static void gfx_v10_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) amdgpu_ring_insert_nop(ring, num_nop - 1); } -static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) +static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; @@ -9532,15 +9523,14 @@ static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) u64 addr; int r; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; - if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + spin_lock_irqsave(&kiq->ring_lock, flags); - if (amdgpu_ring_alloc(kiq_ring, 5 + 7 + 7 + kiq->pmf->map_queues_size)) { + if (amdgpu_ring_alloc(kiq_ring, 5 + 7 + 7)) { spin_unlock_irqrestore(&kiq->ring_lock, flags); return -ENOMEM; } @@ -9560,12 +9550,9 @@ static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) 0, 1, 0x20); gfx_v10_0_ring_emit_reg_wait(kiq_ring, SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0, 0xffffffff); - kiq->pmf->kiq_map_queues(kiq_ring, ring); amdgpu_ring_commit(kiq_ring); - - spin_unlock_irqrestore(&kiq->ring_lock, flags); - r = amdgpu_ring_test_ring(kiq_ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); if (r) return r; @@ -9575,11 +9562,25 @@ static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) return r; } - return amdgpu_ring_test_ring(ring); + spin_lock_irqsave(&kiq->ring_lock, flags); + + if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size)) { + spin_unlock_irqrestore(&kiq->ring_lock, flags); + return -ENOMEM; + } + kiq->pmf->kiq_map_queues(kiq_ring, ring); + amdgpu_ring_commit(kiq_ring); + r = amdgpu_ring_test_ring(kiq_ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); + if (r) + return r; + + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring, - unsigned int vmid) + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; @@ -9587,12 +9588,11 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring, unsigned long flags; int i, r; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; - if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + spin_lock_irqsave(&kiq->ring_lock, flags); if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { @@ -9603,9 +9603,8 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring, kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES, 0, 0); amdgpu_ring_commit(kiq_ring); - spin_unlock_irqrestore(&kiq->ring_lock, flags); - r = amdgpu_ring_test_ring(kiq_ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); if (r) return r; @@ -9641,13 +9640,12 @@ static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring, } kiq->pmf->kiq_map_queues(kiq_ring, ring); amdgpu_ring_commit(kiq_ring); - spin_unlock_irqrestore(&kiq->ring_lock, flags); - r = amdgpu_ring_test_ring(kiq_ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); if (r) return r; - return amdgpu_ring_test_ring(ring); + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static void gfx_v10_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p) @@ -9882,7 +9880,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = { .emit_wreg = gfx_v10_0_ring_emit_wreg, .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait, - .soft_recovery = gfx_v10_0_ring_soft_recovery, .emit_mem_sync = gfx_v10_0_emit_mem_sync, .reset = gfx_v10_0_reset_kgq, .emit_cleaner_shader = gfx_v10_0_ring_emit_cleaner_shader, @@ -9923,7 +9920,6 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = { .emit_wreg = gfx_v10_0_ring_emit_wreg, .emit_reg_wait = gfx_v10_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait, - .soft_recovery = gfx_v10_0_ring_soft_recovery, .emit_mem_sync = gfx_v10_0_emit_mem_sync, .reset = gfx_v10_0_reset_kcq, .emit_cleaner_shader = gfx_v10_0_ring_emit_cleaner_shader, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index afd6d59164bf..c85de8c8f6f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -85,6 +85,7 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_kicker.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_1.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin"); @@ -759,6 +760,10 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, AMDGPU_UCODE_REQUIRED, "amdgpu/gc_11_0_0_rlc_1.bin"); + else if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, + AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_rlc_kicker.bin", ucode_prefix); else err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, AMDGPU_UCODE_REQUIRED, @@ -1607,9 +1612,9 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 3): if (!adev->gfx.disable_uq && - adev->gfx.me_fw_version >= 2390 && - adev->gfx.pfp_fw_version >= 2530 && - adev->gfx.mec_fw_version >= 2600 && + adev->gfx.me_fw_version >= 2420 && + adev->gfx.pfp_fw_version >= 2580 && + adev->gfx.mec_fw_version >= 2650 && adev->mes.fw_version[0] >= 120) { adev->userq_funcs[AMDGPU_HW_IP_GFX] = &userq_mes_funcs; adev->userq_funcs[AMDGPU_HW_IP_COMPUTE] = &userq_mes_funcs; @@ -1801,12 +1806,17 @@ static int gfx_v11_0_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(11, 0, 2): case IP_VERSION(11, 0, 3): if ((adev->gfx.me_fw_version >= 2280) && - (adev->gfx.mec_fw_version >= 2410)) { - adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; - adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + (adev->gfx.mec_fw_version >= 2410) && + !amdgpu_sriov_vf(adev)) { + adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; } break; default: + if (!amdgpu_sriov_vf(adev)) { + adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + } break; } @@ -4119,6 +4129,8 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, #endif if (prop->tmz_queue) tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1); + if (!prop->kernel_queue) + tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_NON_PRIV, 1); mqd->cp_gfx_hqd_cntl = tmp; /* set up cp_doorbell_control */ @@ -4271,8 +4283,10 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m, tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, prop->allow_tunneling); - tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); - tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); + if (prop->kernel_queue) { + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); + } if (prop->tmz_queue) tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1); mqd->cp_hqd_pq_control = tmp; @@ -6278,21 +6292,6 @@ static void gfx_v11_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, ref, mask, 0x20); } -static void gfx_v11_0_ring_soft_recovery(struct amdgpu_ring *ring, - unsigned vmid) -{ - struct amdgpu_device *adev = ring->adev; - uint32_t value = 0; - - value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); - value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); - value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); - value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); - amdgpu_gfx_rlc_enter_safe_mode(adev, 0); - WREG32_SOC15(GC, 0, regSQ_CMD, value); - amdgpu_gfx_rlc_exit_safe_mode(adev, 0); -} - static void gfx_v11_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, uint32_t me, uint32_t pipe, @@ -6806,13 +6805,14 @@ static int gfx_v11_reset_gfx_pipe(struct amdgpu_ring *ring) return 0; } -static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) +static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; int r; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; + amdgpu_ring_reset_helper_begin(ring, timedout_fence); r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false); if (r) { @@ -6835,7 +6835,7 @@ static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) return r; } - return amdgpu_ring_test_ring(ring); + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static int gfx_v11_0_reset_compute_pipe(struct amdgpu_ring *ring) @@ -6968,13 +6968,14 @@ static int gfx_v11_0_reset_compute_pipe(struct amdgpu_ring *ring) return 0; } -static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid) +static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; int r = 0; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; + amdgpu_ring_reset_helper_begin(ring, timedout_fence); r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true); if (r) { @@ -6995,7 +6996,7 @@ static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid) return r; } - return amdgpu_ring_test_ring(ring); + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static void gfx_v11_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p) @@ -7231,7 +7232,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = { .emit_wreg = gfx_v11_0_ring_emit_wreg, .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, - .soft_recovery = gfx_v11_0_ring_soft_recovery, .emit_mem_sync = gfx_v11_0_emit_mem_sync, .reset = gfx_v11_0_reset_kgq, .emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader, @@ -7273,7 +7273,6 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = { .emit_wreg = gfx_v11_0_ring_emit_wreg, .emit_reg_wait = gfx_v11_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait, - .soft_recovery = gfx_v11_0_ring_soft_recovery, .emit_mem_sync = gfx_v11_0_emit_mem_sync, .reset = gfx_v11_0_reset_kcq, .emit_cleaner_shader = gfx_v11_0_ring_emit_cleaner_shader, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index 1234c8d64e20..fd44d5503e28 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -79,6 +79,7 @@ MODULE_FIRMWARE("amdgpu/gc_12_0_1_pfp.bin"); MODULE_FIRMWARE("amdgpu/gc_12_0_1_me.bin"); MODULE_FIRMWARE("amdgpu/gc_12_0_1_mec.bin"); MODULE_FIRMWARE("amdgpu/gc_12_0_1_rlc.bin"); +MODULE_FIRMWARE("amdgpu/gc_12_0_1_rlc_kicker.bin"); MODULE_FIRMWARE("amdgpu/gc_12_0_1_toc.bin"); static const struct amdgpu_hwip_reg_entry gc_reg_list_12_0[] = { @@ -586,7 +587,7 @@ out: static int gfx_v12_0_init_microcode(struct amdgpu_device *adev) { - char ucode_prefix[15]; + char ucode_prefix[30]; int err; const struct rlc_firmware_header_v2_0 *rlc_hdr; uint16_t version_major; @@ -613,9 +614,14 @@ static int gfx_v12_0_init_microcode(struct amdgpu_device *adev) amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK); if (!amdgpu_sriov_vf(adev)) { - err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, - AMDGPU_UCODE_REQUIRED, - "amdgpu/%s_rlc.bin", ucode_prefix); + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, + AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_rlc_kicker.bin", ucode_prefix); + else + err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, + AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_rlc.bin", ucode_prefix); if (err) goto out; rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; @@ -1542,10 +1548,14 @@ static int gfx_v12_0_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(12, 0, 0): case IP_VERSION(12, 0, 1): if ((adev->gfx.me_fw_version >= 2660) && - (adev->gfx.mec_fw_version >= 2920)) { - adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; - adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + (adev->gfx.mec_fw_version >= 2920) && + !amdgpu_sriov_vf(adev)) { + adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + adev->gfx.gfx_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; } + break; + default: + break; } if (!adev->enable_mes_kiq) { @@ -3016,6 +3026,8 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m, #endif if (prop->tmz_queue) tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, TMZ_MATCH, 1); + if (!prop->kernel_queue) + tmp = REG_SET_FIELD(tmp, CP_GFX_HQD_CNTL, RB_NON_PRIV, 1); mqd->cp_gfx_hqd_cntl = tmp; /* set up cp_doorbell_control */ @@ -3165,8 +3177,10 @@ static int gfx_v12_0_compute_mqd_init(struct amdgpu_device *adev, void *m, (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1)); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1); tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH, 0); - tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); - tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); + if (prop->kernel_queue) { + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); + } if (prop->tmz_queue) tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TMZ, 1); mqd->cp_hqd_pq_control = tmp; @@ -4690,21 +4704,6 @@ static void gfx_v12_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring, ref, mask, 0x20); } -static void gfx_v12_0_ring_soft_recovery(struct amdgpu_ring *ring, - unsigned vmid) -{ - struct amdgpu_device *adev = ring->adev; - uint32_t value = 0; - - value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03); - value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01); - value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1); - value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid); - amdgpu_gfx_rlc_enter_safe_mode(adev, 0); - WREG32_SOC15(GC, 0, regSQ_CMD, value); - amdgpu_gfx_rlc_exit_safe_mode(adev, 0); -} - static void gfx_v12_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, uint32_t me, uint32_t pipe, @@ -5307,13 +5306,14 @@ static int gfx_v12_reset_gfx_pipe(struct amdgpu_ring *ring) return 0; } -static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) +static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; int r; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; + amdgpu_ring_reset_helper_begin(ring, timedout_fence); r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false); if (r) { @@ -5335,7 +5335,7 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) return r; } - return amdgpu_ring_test_ring(ring); + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static int gfx_v12_0_reset_compute_pipe(struct amdgpu_ring *ring) @@ -5421,13 +5421,14 @@ static int gfx_v12_0_reset_compute_pipe(struct amdgpu_ring *ring) return 0; } -static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid) +static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; int r; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; + amdgpu_ring_reset_helper_begin(ring, timedout_fence); r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, true); if (r) { @@ -5448,7 +5449,7 @@ static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid) return r; } - return amdgpu_ring_test_ring(ring); + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static void gfx_v12_0_ring_begin_use(struct amdgpu_ring *ring) @@ -5526,7 +5527,6 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = { .emit_wreg = gfx_v12_0_ring_emit_wreg, .emit_reg_wait = gfx_v12_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait, - .soft_recovery = gfx_v12_0_ring_soft_recovery, .emit_mem_sync = gfx_v12_0_emit_mem_sync, .reset = gfx_v12_0_reset_kgq, .emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader, @@ -5565,7 +5565,6 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = { .emit_wreg = gfx_v12_0_ring_emit_wreg, .emit_reg_wait = gfx_v12_0_ring_emit_reg_wait, .emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait, - .soft_recovery = gfx_v12_0_ring_soft_recovery, .emit_mem_sync = gfx_v12_0_emit_mem_sync, .reset = gfx_v12_0_reset_kcq, .emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index da0534ff1271..2aa323dab34e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -4884,76 +4884,6 @@ static void gfx_v7_0_emit_mem_sync_compute(struct amdgpu_ring *ring) amdgpu_ring_write(ring, 0x0000000A); /* poll interval */ } -static void gfx_v7_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, - int mem_space, int opt, uint32_t addr0, - uint32_t addr1, uint32_t ref, uint32_t mask, - uint32_t inv) -{ - amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); - amdgpu_ring_write(ring, - /* memory (1) or register (0) */ - (WAIT_REG_MEM_MEM_SPACE(mem_space) | - WAIT_REG_MEM_OPERATION(opt) | /* wait */ - WAIT_REG_MEM_FUNCTION(3) | /* equal */ - WAIT_REG_MEM_ENGINE(eng_sel))); - - if (mem_space) - BUG_ON(addr0 & 0x3); /* Dword align */ - amdgpu_ring_write(ring, addr0); - amdgpu_ring_write(ring, addr1); - amdgpu_ring_write(ring, ref); - amdgpu_ring_write(ring, mask); - amdgpu_ring_write(ring, inv); /* poll interval */ -} - -static void gfx_v7_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, - uint32_t val, uint32_t mask) -{ - gfx_v7_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); -} - -static int gfx_v7_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) -{ - struct amdgpu_device *adev = ring->adev; - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; - struct amdgpu_ring *kiq_ring = &kiq->ring; - unsigned long flags; - u32 tmp; - int r; - - if (amdgpu_sriov_vf(adev)) - return -EINVAL; - - if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) - return -EINVAL; - - spin_lock_irqsave(&kiq->ring_lock, flags); - - if (amdgpu_ring_alloc(kiq_ring, 5)) { - spin_unlock_irqrestore(&kiq->ring_lock, flags); - return -ENOMEM; - } - - tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid); - gfx_v7_0_ring_emit_wreg(kiq_ring, mmCP_VMID_RESET, tmp); - amdgpu_ring_commit(kiq_ring); - - spin_unlock_irqrestore(&kiq->ring_lock, flags); - - r = amdgpu_ring_test_ring(kiq_ring); - if (r) - return r; - - if (amdgpu_ring_alloc(ring, 7 + 12 + 5)) - return -ENOMEM; - gfx_v7_0_ring_emit_fence_gfx(ring, ring->fence_drv.gpu_addr, - ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC); - gfx_v7_0_ring_emit_reg_wait(ring, mmCP_VMID_RESET, 0, 0xffff); - gfx_v7_0_ring_emit_wreg(ring, mmCP_VMID_RESET, 0); - - return amdgpu_ring_test_ring(ring); -} - static const struct amd_ip_funcs gfx_v7_0_ip_funcs = { .name = "gfx_v7_0", .early_init = gfx_v7_0_early_init, @@ -5003,7 +4933,6 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { .emit_wreg = gfx_v7_0_ring_emit_wreg, .soft_recovery = gfx_v7_0_ring_soft_recovery, .emit_mem_sync = gfx_v7_0_emit_mem_sync, - .reset = gfx_v7_0_reset_kgq, }; static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 5ee2237d8ee8..367449d8061b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -4640,6 +4640,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation)); /* reset ring buffer */ ring->wptr = 0; + atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); amdgpu_ring_clear_ring(ring); } return 0; @@ -6339,34 +6340,6 @@ static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, amdgpu_ring_write(ring, val); } -static void gfx_v8_0_wait_reg_mem(struct amdgpu_ring *ring, int eng_sel, - int mem_space, int opt, uint32_t addr0, - uint32_t addr1, uint32_t ref, uint32_t mask, - uint32_t inv) -{ - amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); - amdgpu_ring_write(ring, - /* memory (1) or register (0) */ - (WAIT_REG_MEM_MEM_SPACE(mem_space) | - WAIT_REG_MEM_OPERATION(opt) | /* wait */ - WAIT_REG_MEM_FUNCTION(3) | /* equal */ - WAIT_REG_MEM_ENGINE(eng_sel))); - - if (mem_space) - BUG_ON(addr0 & 0x3); /* Dword align */ - amdgpu_ring_write(ring, addr0); - amdgpu_ring_write(ring, addr1); - amdgpu_ring_write(ring, ref); - amdgpu_ring_write(ring, mask); - amdgpu_ring_write(ring, inv); /* poll interval */ -} - -static void gfx_v8_0_ring_emit_reg_wait(struct amdgpu_ring *ring, uint32_t reg, - uint32_t val, uint32_t mask) -{ - gfx_v8_0_wait_reg_mem(ring, 0, 0, 0, reg, 0, val, mask, 0x20); -} - static void gfx_v8_0_ring_soft_recovery(struct amdgpu_ring *ring, unsigned vmid) { struct amdgpu_device *adev = ring->adev; @@ -6843,48 +6816,6 @@ static void gfx_v8_0_emit_wave_limit(struct amdgpu_ring *ring, bool enable) } -static int gfx_v8_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) -{ - struct amdgpu_device *adev = ring->adev; - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; - struct amdgpu_ring *kiq_ring = &kiq->ring; - unsigned long flags; - u32 tmp; - int r; - - if (amdgpu_sriov_vf(adev)) - return -EINVAL; - - if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) - return -EINVAL; - - spin_lock_irqsave(&kiq->ring_lock, flags); - - if (amdgpu_ring_alloc(kiq_ring, 5)) { - spin_unlock_irqrestore(&kiq->ring_lock, flags); - return -ENOMEM; - } - - tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid); - gfx_v8_0_ring_emit_wreg(kiq_ring, mmCP_VMID_RESET, tmp); - amdgpu_ring_commit(kiq_ring); - - spin_unlock_irqrestore(&kiq->ring_lock, flags); - - r = amdgpu_ring_test_ring(kiq_ring); - if (r) - return r; - - if (amdgpu_ring_alloc(ring, 7 + 12 + 5)) - return -ENOMEM; - gfx_v8_0_ring_emit_fence_gfx(ring, ring->fence_drv.gpu_addr, - ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC); - gfx_v8_0_ring_emit_reg_wait(ring, mmCP_VMID_RESET, 0, 0xffff); - gfx_v8_0_ring_emit_wreg(ring, mmCP_VMID_RESET, 0); - - return amdgpu_ring_test_ring(ring); -} - static const struct amd_ip_funcs gfx_v8_0_ip_funcs = { .name = "gfx_v8_0", .early_init = gfx_v8_0_early_init, @@ -6950,7 +6881,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { .emit_wreg = gfx_v8_0_ring_emit_wreg, .soft_recovery = gfx_v8_0_ring_soft_recovery, .emit_mem_sync = gfx_v8_0_emit_mem_sync, - .reset = gfx_v8_0_reset_kgq, }; static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index d377a7c57d5e..20b30f4b3c7d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2235,6 +2235,25 @@ static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block) } switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { + case IP_VERSION(9, 0, 1): + case IP_VERSION(9, 2, 1): + case IP_VERSION(9, 4, 0): + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 3, 0): + adev->gfx.cleaner_shader_ptr = gfx_9_4_2_cleaner_shader_hex; + adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_2_cleaner_shader_hex); + if (adev->gfx.me_fw_version >= 167 && + adev->gfx.pfp_fw_version >= 196 && + adev->gfx.mec_fw_version >= 474) { + adev->gfx.enable_cleaner_shader = true; + r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); + if (r) { + adev->gfx.enable_cleaner_shader = false; + dev_err(adev->dev, "Failed to initialize cleaner shader\n"); + } + } + break; case IP_VERSION(9, 4, 2): adev->gfx.cleaner_shader_ptr = gfx_9_4_2_cleaner_shader_hex; adev->gfx.cleaner_shader_size = sizeof(gfx_9_4_2_cleaner_shader_hex); @@ -2391,6 +2410,8 @@ static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block) amdgpu_get_soft_full_reset_mask(&adev->gfx.gfx_ring[0]); adev->gfx.compute_supported_reset = amdgpu_get_soft_full_reset_mask(&adev->gfx.compute_ring[0]); + if (!amdgpu_sriov_vf(adev)) + adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_gfx_kiq_init(adev, GFX9_MEC_HPD_SIZE, 0); if (r) { @@ -7152,53 +7173,9 @@ static void gfx_v9_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop) amdgpu_ring_insert_nop(ring, num_nop - 1); } -static int gfx_v9_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid) -{ - struct amdgpu_device *adev = ring->adev; - struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; - struct amdgpu_ring *kiq_ring = &kiq->ring; - unsigned long flags; - u32 tmp; - int r; - - if (amdgpu_sriov_vf(adev)) - return -EINVAL; - - if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) - return -EINVAL; - - spin_lock_irqsave(&kiq->ring_lock, flags); - - if (amdgpu_ring_alloc(kiq_ring, 5)) { - spin_unlock_irqrestore(&kiq->ring_lock, flags); - return -ENOMEM; - } - - tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid); - gfx_v9_0_ring_emit_wreg(kiq_ring, - SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), tmp); - amdgpu_ring_commit(kiq_ring); - - spin_unlock_irqrestore(&kiq->ring_lock, flags); - - r = amdgpu_ring_test_ring(kiq_ring); - if (r) - return r; - - if (amdgpu_ring_alloc(ring, 7 + 7 + 5)) - return -ENOMEM; - gfx_v9_0_ring_emit_fence(ring, ring->fence_drv.gpu_addr, - ring->fence_drv.sync_seq, AMDGPU_FENCE_FLAG_EXEC); - gfx_v9_0_ring_emit_reg_wait(ring, - SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0, 0xffff); - gfx_v9_0_ring_emit_wreg(ring, - SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0); - - return amdgpu_ring_test_ring(ring); -} - static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring, - unsigned int vmid) + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; struct amdgpu_kiq *kiq = &adev->gfx.kiq[0]; @@ -7206,12 +7183,11 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring, unsigned long flags; int i, r; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; - if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + spin_lock_irqsave(&kiq->ring_lock, flags); if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { @@ -7261,13 +7237,13 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring, } kiq->pmf->kiq_map_queues(kiq_ring, ring); amdgpu_ring_commit(kiq_ring); - spin_unlock_irqrestore(&kiq->ring_lock, flags); r = amdgpu_ring_test_ring(kiq_ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); if (r) { DRM_ERROR("fail to remap queue\n"); return r; } - return amdgpu_ring_test_ring(ring); + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static void gfx_v9_ip_print(struct amdgpu_ip_block *ip_block, struct drm_printer *p) @@ -7477,7 +7453,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_gfx = { .emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait, .soft_recovery = gfx_v9_0_ring_soft_recovery, .emit_mem_sync = gfx_v9_0_emit_mem_sync, - .reset = gfx_v9_0_reset_kgq, .emit_cleaner_shader = gfx_v9_0_ring_emit_cleaner_shader, .begin_use = amdgpu_gfx_enforce_isolation_ring_begin_use, .end_use = amdgpu_gfx_enforce_isolation_ring_end_use, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index c233edf60569..51babf5c78c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -1148,13 +1148,15 @@ static int gfx_v9_4_3_sw_init(struct amdgpu_ip_block *ip_block) switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { case IP_VERSION(9, 4, 3): case IP_VERSION(9, 4, 4): - if (adev->gfx.mec_fw_version >= 155) { + if ((adev->gfx.mec_fw_version >= 155) && + !amdgpu_sriov_vf(adev)) { adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE; } break; case IP_VERSION(9, 5, 0): - if (adev->gfx.mec_fw_version >= 21) { + if ((adev->gfx.mec_fw_version >= 21) && + !amdgpu_sriov_vf(adev)) { adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; adev->gfx.compute_supported_reset |= AMDGPU_RESET_TYPE_PER_PIPE; } @@ -1349,7 +1351,9 @@ static void gfx_v9_4_3_constants_init(struct amdgpu_device *adev) switch (amdgpu_ip_version(adev, GC_HWIP, 0)) { /* ToDo: GC 9.4.4 */ case IP_VERSION(9, 4, 3): - if (adev->gfx.mec_fw_version >= 184) + if (adev->gfx.mec_fw_version >= 184 && + (amdgpu_sriov_reg_access_sq_config(adev) || + !amdgpu_sriov_vf(adev))) adev->gmc.xnack_flags |= AMDGPU_GMC_XNACK_FLAG_CHAIN; break; case IP_VERSION(9, 5, 0): @@ -3552,7 +3556,8 @@ static int gfx_v9_4_3_reset_hw_pipe(struct amdgpu_ring *ring) } static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring, - unsigned int vmid) + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; struct amdgpu_kiq *kiq = &adev->gfx.kiq[ring->xcc_id]; @@ -3560,12 +3565,11 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring, unsigned long flags; int r; - if (amdgpu_sriov_vf(adev)) - return -EINVAL; - if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues) return -EINVAL; + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + spin_lock_irqsave(&kiq->ring_lock, flags); if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) { @@ -3591,7 +3595,9 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring, dev_err(adev->dev, "fail to wait on hqd deactive and will try pipe reset\n"); pipe_reset: - if(r) { + if (r) { + if (!(adev->gfx.compute_supported_reset & AMDGPU_RESET_TYPE_PER_PIPE)) + return -EOPNOTSUPP; r = gfx_v9_4_3_reset_hw_pipe(ring); dev_info(adev->dev, "ring: %s pipe reset :%s\n", ring->name, r ? "failed" : "successfully"); @@ -3612,14 +3618,14 @@ pipe_reset: } kiq->pmf->kiq_map_queues(kiq_ring, ring); amdgpu_ring_commit(kiq_ring); - spin_unlock_irqrestore(&kiq->ring_lock, flags); - r = amdgpu_ring_test_ring(kiq_ring); + spin_unlock_irqrestore(&kiq->ring_lock, flags); if (r) { dev_err(adev->dev, "fail to remap queue\n"); return r; } - return amdgpu_ring_test_ring(ring); + + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } enum amdgpu_gfx_cp_ras_mem_id { diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c index cb25f7f0dfc1..6c03bf9f1ae8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_2.c @@ -74,6 +74,8 @@ static void gfxhub_v1_2_setup_vm_pt_regs(struct amdgpu_device *adev, static void gfxhub_v1_2_xcc_init_gart_aperture_regs(struct amdgpu_device *adev, uint32_t xcc_mask) { + uint64_t gart_start = amdgpu_virt_xgmi_migrate_enabled(adev) ? + adev->gmc.vram_start : adev->gmc.fb_start; uint64_t pt_base; int i; @@ -91,10 +93,10 @@ static void gfxhub_v1_2_xcc_init_gart_aperture_regs(struct amdgpu_device *adev, if (adev->gmc.pdb0_bo) { WREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, - (u32)(adev->gmc.fb_start >> 12)); + (u32)(gart_start >> 12)); WREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, - (u32)(adev->gmc.fb_start >> 44)); + (u32)(gart_start >> 44)); WREG32_SOC15(GC, GET_INST(GC, i), regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, @@ -180,7 +182,7 @@ gfxhub_v1_2_xcc_init_system_aperture_regs(struct amdgpu_device *adev, /* In the case squeezing vram into GART aperture, we don't use * FB aperture and AGP aperture. Disable them. */ - if (adev->gmc.pdb0_bo) { + if (adev->gmc.pdb0_bo && adev->gmc.xgmi.connected_to_cpu) { WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_TOP, 0); WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_FB_LOCATION_BASE, 0x00FFFFFF); WREG32_SOC15(GC, GET_INST(GC, i), regMC_VM_AGP_TOP, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 78f65aea03f8..c4d69cf4e06c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -78,8 +78,6 @@ #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2 0x05ea #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_DCN2_BASE_IDX 2 -#define MAX_MEM_RANGES 8 - static const char * const gfxhub_client_ids[] = { "CB", "DB", @@ -411,11 +409,6 @@ static const uint32_t ecc_umc_mcumc_ctrl_mask_addrs[] = { (0x001d43e0 + 0x00001800), }; -static inline bool gmc_v9_0_is_multi_chiplet(struct amdgpu_device *adev) -{ - return !!adev->aid_mask; -} - static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned int type, @@ -649,7 +642,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, addr, entry->client_id, soc15_ih_clientid_name[entry->client_id]); - if (gmc_v9_0_is_multi_chiplet(adev)) + if (amdgpu_is_multi_aid(adev)) dev_err(adev->dev, " cookie node_id %d fault from die %s%d%s\n", node_id, node_id % 4 == 3 ? "RSV" : "AID", node_id / 4, node_id % 4 == 1 ? ".XCD0" : node_id % 4 == 2 ? ".XCD1" : ""); @@ -798,7 +791,7 @@ static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev, uint32_t vmhub) { if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) || - gmc_v9_0_is_multi_chiplet(adev)) + amdgpu_is_multi_aid(adev)) return false; return ((vmhub == AMDGPU_MMHUB0(0) || @@ -1128,8 +1121,8 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level, } static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, + struct amdgpu_vm *vm, struct amdgpu_bo *bo, - struct amdgpu_bo_va_mapping *mapping, uint64_t *flags) { struct amdgpu_device *bo_adev = amdgpu_ttm_adev(bo->tbo.bdev); @@ -1139,7 +1132,6 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, AMDGPU_GEM_CREATE_EXT_COHERENT); bool ext_coherent = bo->flags & AMDGPU_GEM_CREATE_EXT_COHERENT; bool uncached = bo->flags & AMDGPU_GEM_CREATE_UNCACHED; - struct amdgpu_vm *vm = mapping->bo_va->base.vm; unsigned int mtype_local, mtype; uint32_t gc_ip_version = amdgpu_ip_version(adev, GC_HWIP, 0); bool snoop = false; @@ -1169,7 +1161,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, mtype = MTYPE_UC; else mtype = MTYPE_NC; - if (mapping->bo_va->is_xgmi) + if (amdgpu_xgmi_same_hive(adev, bo_adev)) snoop = true; } } else { @@ -1261,7 +1253,8 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev, } if ((*flags & AMDGPU_PTE_VALID) && bo) - gmc_v9_0_get_coherence_flags(adev, bo, mapping, flags); + gmc_v9_0_get_coherence_flags(adev, mapping->bo_va->base.vm, bo, + flags); } static void gmc_v9_0_override_vm_pte_flags(struct amdgpu_device *adev, @@ -1382,46 +1375,6 @@ static unsigned int gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) return size; } -static enum amdgpu_memory_partition -gmc_v9_0_get_memory_partition(struct amdgpu_device *adev, u32 *supp_modes) -{ - enum amdgpu_memory_partition mode = UNKNOWN_MEMORY_PARTITION_MODE; - - if (adev->nbio.funcs->get_memory_partition_mode) - mode = adev->nbio.funcs->get_memory_partition_mode(adev, - supp_modes); - - return mode; -} - -static enum amdgpu_memory_partition -gmc_v9_0_query_vf_memory_partition(struct amdgpu_device *adev) -{ - switch (adev->gmc.num_mem_partitions) { - case 0: - return UNKNOWN_MEMORY_PARTITION_MODE; - case 1: - return AMDGPU_NPS1_PARTITION_MODE; - case 2: - return AMDGPU_NPS2_PARTITION_MODE; - case 4: - return AMDGPU_NPS4_PARTITION_MODE; - default: - return AMDGPU_NPS1_PARTITION_MODE; - } - - return AMDGPU_NPS1_PARTITION_MODE; -} - -static enum amdgpu_memory_partition -gmc_v9_0_query_memory_partition(struct amdgpu_device *adev) -{ - if (amdgpu_sriov_vf(adev)) - return gmc_v9_0_query_vf_memory_partition(adev); - - return gmc_v9_0_get_memory_partition(adev, NULL); -} - static bool gmc_v9_0_need_reset_on_init(struct amdgpu_device *adev) { if (adev->nbio.funcs && adev->nbio.funcs->is_nps_switch_requested && @@ -1443,7 +1396,7 @@ static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { .get_vm_pte = gmc_v9_0_get_vm_pte, .override_vm_pte_flags = gmc_v9_0_override_vm_pte_flags, .get_vbios_fb_size = gmc_v9_0_get_vbios_fb_size, - .query_mem_partition_mode = &gmc_v9_0_query_memory_partition, + .query_mem_partition_mode = &amdgpu_gmc_query_memory_partition, .request_mem_partition_mode = &amdgpu_gmc_request_memory_partition, .need_reset_on_init = &gmc_v9_0_need_reset_on_init, }; @@ -1550,7 +1503,7 @@ static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev) static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev) { - if (gmc_v9_0_is_multi_chiplet(adev)) + if (amdgpu_is_multi_aid(adev)) adev->gfxhub.funcs = &gfxhub_v1_2_funcs; else adev->gfxhub.funcs = &gfxhub_v1_0_funcs; @@ -1596,7 +1549,7 @@ static void gmc_v9_0_init_nps_details(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev) || (adev->flags & AMD_IS_APU)) return; - mode = gmc_v9_0_get_memory_partition(adev, &supp_modes); + mode = amdgpu_gmc_get_memory_partition(adev, &supp_modes); /* Mode detected by hardware and supported modes available */ if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) && supp_modes) { @@ -1632,7 +1585,7 @@ static int gmc_v9_0_early_init(struct amdgpu_ip_block *ip_block) */ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 0) || amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) || - gmc_v9_0_is_multi_chiplet(adev)) + amdgpu_is_multi_aid(adev)) adev->gmc.xgmi.supported = true; if (amdgpu_ip_version(adev, XGMI_HWIP, 0) == IP_VERSION(6, 1, 0)) { @@ -1719,7 +1672,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, /* add the xgmi offset of the physical node */ base += adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size; - if (adev->gmc.xgmi.connected_to_cpu) { + if (amdgpu_gmc_is_pdb0_enabled(adev)) { amdgpu_gmc_sysvm_location(adev, mc); } else { amdgpu_gmc_vram_location(adev, mc, base); @@ -1834,7 +1787,7 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev) return 0; } - if (adev->gmc.xgmi.connected_to_cpu) { + if (amdgpu_gmc_is_pdb0_enabled(adev)) { adev->gmc.vmid0_page_table_depth = 1; adev->gmc.vmid0_page_table_block_size = 12; } else { @@ -1860,7 +1813,7 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev) if (r) return r; - if (adev->gmc.xgmi.connected_to_cpu) + if (amdgpu_gmc_is_pdb0_enabled(adev)) r = amdgpu_gmc_pdb0_alloc(adev); } @@ -1882,188 +1835,6 @@ static void gmc_v9_0_save_registers(struct amdgpu_device *adev) adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0); } -static bool gmc_v9_0_validate_partition_info(struct amdgpu_device *adev) -{ - enum amdgpu_memory_partition mode; - u32 supp_modes; - bool valid; - - mode = gmc_v9_0_get_memory_partition(adev, &supp_modes); - - /* Mode detected by hardware not present in supported modes */ - if ((mode != UNKNOWN_MEMORY_PARTITION_MODE) && - !(BIT(mode - 1) & supp_modes)) - return false; - - switch (mode) { - case UNKNOWN_MEMORY_PARTITION_MODE: - case AMDGPU_NPS1_PARTITION_MODE: - valid = (adev->gmc.num_mem_partitions == 1); - break; - case AMDGPU_NPS2_PARTITION_MODE: - valid = (adev->gmc.num_mem_partitions == 2); - break; - case AMDGPU_NPS4_PARTITION_MODE: - valid = (adev->gmc.num_mem_partitions == 3 || - adev->gmc.num_mem_partitions == 4); - break; - default: - valid = false; - } - - return valid; -} - -static bool gmc_v9_0_is_node_present(int *node_ids, int num_ids, int nid) -{ - int i; - - /* Check if node with id 'nid' is present in 'node_ids' array */ - for (i = 0; i < num_ids; ++i) - if (node_ids[i] == nid) - return true; - - return false; -} - -static void -gmc_v9_0_init_acpi_mem_ranges(struct amdgpu_device *adev, - struct amdgpu_mem_partition_info *mem_ranges) -{ - struct amdgpu_numa_info numa_info; - int node_ids[MAX_MEM_RANGES]; - int num_ranges = 0, ret; - int num_xcc, xcc_id; - uint32_t xcc_mask; - - num_xcc = NUM_XCC(adev->gfx.xcc_mask); - xcc_mask = (1U << num_xcc) - 1; - - for_each_inst(xcc_id, xcc_mask) { - ret = amdgpu_acpi_get_mem_info(adev, xcc_id, &numa_info); - if (ret) - continue; - - if (numa_info.nid == NUMA_NO_NODE) { - mem_ranges[0].size = numa_info.size; - mem_ranges[0].numa.node = numa_info.nid; - num_ranges = 1; - break; - } - - if (gmc_v9_0_is_node_present(node_ids, num_ranges, - numa_info.nid)) - continue; - - node_ids[num_ranges] = numa_info.nid; - mem_ranges[num_ranges].numa.node = numa_info.nid; - mem_ranges[num_ranges].size = numa_info.size; - ++num_ranges; - } - - adev->gmc.num_mem_partitions = num_ranges; -} - -static void -gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev, - struct amdgpu_mem_partition_info *mem_ranges) -{ - enum amdgpu_memory_partition mode; - u32 start_addr = 0, size; - int i, r, l; - - mode = gmc_v9_0_query_memory_partition(adev); - - switch (mode) { - case UNKNOWN_MEMORY_PARTITION_MODE: - adev->gmc.num_mem_partitions = 0; - break; - case AMDGPU_NPS1_PARTITION_MODE: - adev->gmc.num_mem_partitions = 1; - break; - case AMDGPU_NPS2_PARTITION_MODE: - adev->gmc.num_mem_partitions = 2; - break; - case AMDGPU_NPS4_PARTITION_MODE: - if (adev->flags & AMD_IS_APU) - adev->gmc.num_mem_partitions = 3; - else - adev->gmc.num_mem_partitions = 4; - break; - default: - adev->gmc.num_mem_partitions = 1; - break; - } - - /* Use NPS range info, if populated */ - r = amdgpu_gmc_get_nps_memranges(adev, mem_ranges, - &adev->gmc.num_mem_partitions); - if (!r) { - l = 0; - for (i = 1; i < adev->gmc.num_mem_partitions; ++i) { - if (mem_ranges[i].range.lpfn > - mem_ranges[i - 1].range.lpfn) - l = i; - } - - } else { - if (!adev->gmc.num_mem_partitions) { - dev_err(adev->dev, - "Not able to detect NPS mode, fall back to NPS1"); - adev->gmc.num_mem_partitions = 1; - } - /* Fallback to sw based calculation */ - size = (adev->gmc.real_vram_size + SZ_16M) >> AMDGPU_GPU_PAGE_SHIFT; - size /= adev->gmc.num_mem_partitions; - - for (i = 0; i < adev->gmc.num_mem_partitions; ++i) { - mem_ranges[i].range.fpfn = start_addr; - mem_ranges[i].size = - ((u64)size << AMDGPU_GPU_PAGE_SHIFT); - mem_ranges[i].range.lpfn = start_addr + size - 1; - start_addr += size; - } - - l = adev->gmc.num_mem_partitions - 1; - } - - /* Adjust the last one */ - mem_ranges[l].range.lpfn = - (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) - 1; - mem_ranges[l].size = - adev->gmc.real_vram_size - - ((u64)mem_ranges[l].range.fpfn << AMDGPU_GPU_PAGE_SHIFT); -} - -static int gmc_v9_0_init_mem_ranges(struct amdgpu_device *adev) -{ - bool valid; - - adev->gmc.mem_partitions = kcalloc(MAX_MEM_RANGES, - sizeof(struct amdgpu_mem_partition_info), - GFP_KERNEL); - if (!adev->gmc.mem_partitions) - return -ENOMEM; - - /* TODO : Get the range from PSP/Discovery for dGPU */ - if (adev->gmc.is_app_apu) - gmc_v9_0_init_acpi_mem_ranges(adev, adev->gmc.mem_partitions); - else - gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions); - - if (amdgpu_sriov_vf(adev)) - valid = true; - else - valid = gmc_v9_0_validate_partition_info(adev); - if (!valid) { - /* TODO: handle invalid case */ - dev_WARN(adev->dev, - "Mem ranges not matching with hardware config"); - } - - return 0; -} - static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev) { adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM; @@ -2085,7 +1856,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block) spin_lock_init(&adev->gmc.invalidate_lock); - if (gmc_v9_0_is_multi_chiplet(adev)) { + if (amdgpu_is_multi_aid(adev)) { gmc_v9_4_3_init_vram_info(adev); } else if (!adev->bios) { if (adev->flags & AMD_IS_APU) { @@ -2235,8 +2006,8 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block) amdgpu_gmc_get_vbios_allocations(adev); - if (gmc_v9_0_is_multi_chiplet(adev)) { - r = gmc_v9_0_init_mem_ranges(adev); + if (amdgpu_is_multi_aid(adev)) { + r = amdgpu_gmc_init_mem_ranges(adev); if (r) return r; } @@ -2264,7 +2035,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block) adev->vm_manager.first_kfd_vmid = (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 1) || amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 2) || - gmc_v9_0_is_multi_chiplet(adev)) ? + amdgpu_is_multi_aid(adev)) ? 3 : 8; @@ -2276,7 +2047,7 @@ static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - if (gmc_v9_0_is_multi_chiplet(adev)) + if (amdgpu_is_multi_aid(adev)) amdgpu_gmc_sysfs_init(adev); return 0; @@ -2286,7 +2057,7 @@ static int gmc_v9_0_sw_fini(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; - if (gmc_v9_0_is_multi_chiplet(adev)) + if (amdgpu_is_multi_aid(adev)) amdgpu_gmc_sysfs_fini(adev); amdgpu_gmc_ras_fini(adev); @@ -2360,7 +2131,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) { int r; - if (adev->gmc.xgmi.connected_to_cpu) + if (amdgpu_gmc_is_pdb0_enabled(adev)) amdgpu_gmc_init_pdb0(adev); if (adev->gart.bo == NULL) { @@ -2518,7 +2289,7 @@ static int gmc_v9_0_resume(struct amdgpu_ip_block *ip_block) * information again. */ if (adev->gmc.reset_flags & AMDGPU_GMC_INIT_RESET_NPS) { - gmc_v9_0_init_sw_mem_ranges(adev, adev->gmc.mem_partitions); + amdgpu_gmc_init_sw_mem_ranges(adev, adev->gmc.mem_partitions); adev->gmc.reset_flags &= ~AMDGPU_GMC_INIT_RESET_NPS; } diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c index cfa91d709d49..cc626036ed9c 100644 --- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c @@ -32,6 +32,7 @@ #include "gc/gc_11_0_0_sh_mask.h" MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu_kicker.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_1_imu.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_2_imu.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_3_imu.bin"); @@ -51,8 +52,12 @@ static int imu_v11_0_init_microcode(struct amdgpu_device *adev) DRM_DEBUG("\n"); amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); - err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED, - "amdgpu/%s_imu.bin", ucode_prefix); + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_imu_kicker.bin", ucode_prefix); + else + err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_imu.bin", ucode_prefix); if (err) goto out; diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c index df898dbb746e..58cd87db8061 100644 --- a/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/imu_v12_0.c @@ -34,12 +34,13 @@ MODULE_FIRMWARE("amdgpu/gc_12_0_0_imu.bin"); MODULE_FIRMWARE("amdgpu/gc_12_0_1_imu.bin"); +MODULE_FIRMWARE("amdgpu/gc_12_0_1_imu_kicker.bin"); #define TRANSFER_RAM_MASK 0x001c0000 static int imu_v12_0_init_microcode(struct amdgpu_device *adev) { - char ucode_prefix[15]; + char ucode_prefix[30]; int err; const struct imu_firmware_header_v1_0 *imu_hdr; struct amdgpu_firmware_info *info = NULL; @@ -47,8 +48,12 @@ static int imu_v12_0_init_microcode(struct amdgpu_device *adev) DRM_DEBUG("\n"); amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); - err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED, - "amdgpu/%s_imu.bin", ucode_prefix); + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_imu_kicker.bin", ucode_prefix); + else + err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_imu.bin", ucode_prefix); if (err) goto out; @@ -362,7 +367,7 @@ static void program_imu_rlc_ram(struct amdgpu_device *adev, static void imu_v12_0_program_rlc_ram(struct amdgpu_device *adev) { u32 reg_data, size = 0; - const u32 *data; + const u32 *data = NULL; int r = -EINVAL; WREG32_SOC15(GC, 0, regGFX_IMU_RLC_RAM_INDEX, 0x2); diff --git a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c index 574880d67009..a887df520414 100644 --- a/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c +++ b/drivers/gpu/drm/amd/amdgpu/isp_v4_1_1.c @@ -29,6 +29,12 @@ #include "amdgpu.h" #include "isp_v4_1_1.h" +#define ISP_PERFORMANCE_STATE_LOW 0 +#define ISP_PERFORMANCE_STATE_HIGH 1 + +#define ISP_HIGH_PERFORMANC_XCLK 788 +#define ISP_HIGH_PERFORMANC_ICLK 788 + static const unsigned int isp_4_1_1_int_srcid[MAX_ISP411_INT_SRC] = { ISP_4_1__SRCID__ISP_RINGBUFFER_WPT9, ISP_4_1__SRCID__ISP_RINGBUFFER_WPT10, @@ -56,17 +62,137 @@ static struct gpiod_lookup_table isp_sensor_gpio_table = { }, }; +static int isp_poweroff(struct generic_pm_domain *genpd) +{ + struct amdgpu_isp *isp = container_of(genpd, struct amdgpu_isp, ispgpd); + struct amdgpu_device *adev = isp->adev; + + return amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ISP, true, 0); +} + +static int isp_poweron(struct generic_pm_domain *genpd) +{ + struct amdgpu_isp *isp = container_of(genpd, struct amdgpu_isp, ispgpd); + struct amdgpu_device *adev = isp->adev; + + return amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_ISP, false, 0); +} + +static int isp_set_performance_state(struct generic_pm_domain *genpd, + unsigned int state) +{ + struct amdgpu_isp *isp = container_of(genpd, struct amdgpu_isp, ispgpd); + struct amdgpu_device *adev = isp->adev; + u32 iclk, xclk; + int ret; + + switch (state) { + case ISP_PERFORMANCE_STATE_HIGH: + xclk = ISP_HIGH_PERFORMANC_XCLK; + iclk = ISP_HIGH_PERFORMANC_ICLK; + break; + case ISP_PERFORMANCE_STATE_LOW: + /* isp runs at default lowest clock-rate on power-on, do nothing */ + return 0; + default: + return -EINVAL; + } + + ret = amdgpu_dpm_set_soft_freq_range(adev, PP_ISPXCLK, xclk, 0); + if (ret) { + drm_err(&adev->ddev, "failed to set xclk %u to %u: %d\n", + xclk, state, ret); + return ret; + } + + ret = amdgpu_dpm_set_soft_freq_range(adev, PP_ISPICLK, iclk, 0); + if (ret) { + drm_err(&adev->ddev, "failed to set iclk %u to %u: %d\n", + iclk, state, ret); + return ret; + } + + return 0; +} + +static int isp_genpd_add_device(struct device *dev, void *data) +{ + struct generic_pm_domain *gpd = data; + struct platform_device *pdev = container_of(dev, struct platform_device, dev); + struct amdgpu_isp *isp = container_of(gpd, struct amdgpu_isp, ispgpd); + struct amdgpu_device *adev = isp->adev; + int ret; + + if (!pdev) + return -EINVAL; + + if (!dev->type->name) { + drm_dbg(&adev->ddev, "Invalid device type to add\n"); + goto exit; + } + + if (strcmp(dev->type->name, "mfd_device")) { + drm_dbg(&adev->ddev, "Invalid isp mfd device %s to add\n", pdev->mfd_cell->name); + goto exit; + } + + ret = pm_genpd_add_device(gpd, dev); + if (ret) { + drm_err(&adev->ddev, "Failed to add dev %s to genpd %d\n", + pdev->mfd_cell->name, ret); + return -ENODEV; + } + +exit: + /* Continue to add */ + return 0; +} + +static int isp_genpd_remove_device(struct device *dev, void *data) +{ + struct generic_pm_domain *gpd = data; + struct platform_device *pdev = container_of(dev, struct platform_device, dev); + struct amdgpu_isp *isp = container_of(gpd, struct amdgpu_isp, ispgpd); + struct amdgpu_device *adev = isp->adev; + int ret; + + if (!pdev) + return -EINVAL; + + if (!dev->type->name) { + drm_dbg(&adev->ddev, "Invalid device type to remove\n"); + goto exit; + } + + if (strcmp(dev->type->name, "mfd_device")) { + drm_dbg(&adev->ddev, "Invalid isp mfd device %s to remove\n", + pdev->mfd_cell->name); + goto exit; + } + + ret = pm_genpd_remove_device(dev); + if (ret) { + drm_err(&adev->ddev, "Failed to remove dev from genpd %d\n", ret); + return -ENODEV; + } + +exit: + /* Continue to remove */ + return 0; +} + static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp) { + const struct software_node *amd_camera_node, *isp4_node; struct amdgpu_device *adev = isp->adev; + struct acpi_device *acpi_dev; int idx, int_idx, num_res, r; - u8 isp_dev_hid[ACPI_ID_LEN]; u64 isp_base; if (adev->rmmio_size == 0 || adev->rmmio_size < 0x5289) return -EINVAL; - r = amdgpu_acpi_get_isp4_dev_hid(&isp_dev_hid); + r = amdgpu_acpi_get_isp4_dev(&acpi_dev); if (r) { drm_dbg(&adev->ddev, "Invalid isp platform detected (%d)", r); /* allow GPU init to progress */ @@ -74,18 +200,28 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp) } /* add GPIO resources required for OMNI5C10 sensor */ - if (!strcmp("OMNI5C10", isp_dev_hid)) { + if (!strcmp("OMNI5C10", acpi_device_hid(acpi_dev))) { gpiod_add_lookup_table(&isp_gpio_table); gpiod_add_lookup_table(&isp_sensor_gpio_table); } isp_base = adev->rmmio_base; + isp->ispgpd.name = "ISP_v_4_1_1"; + isp->ispgpd.power_off = isp_poweroff; + isp->ispgpd.power_on = isp_poweron; + isp->ispgpd.set_performance_state = isp_set_performance_state; + + r = pm_genpd_init(&isp->ispgpd, NULL, true); + if (r) { + drm_err(&adev->ddev, "failed to initialize genpd (%d)\n", r); + return -EINVAL; + } + isp->isp_cell = kcalloc(3, sizeof(struct mfd_cell), GFP_KERNEL); if (!isp->isp_cell) { r = -ENOMEM; - drm_err(&adev->ddev, - "%s: isp mfd cell alloc failed\n", __func__); + drm_err(&adev->ddev, "isp mfd cell alloc failed (%d)\n", r); goto failure; } @@ -95,19 +231,20 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp) GFP_KERNEL); if (!isp->isp_res) { r = -ENOMEM; - drm_err(&adev->ddev, - "%s: isp mfd res alloc failed\n", __func__); + drm_err(&adev->ddev, "isp mfd resource alloc failed (%d)\n", r); goto failure; } isp->isp_pdata = kzalloc(sizeof(*isp->isp_pdata), GFP_KERNEL); if (!isp->isp_pdata) { r = -ENOMEM; - drm_err(&adev->ddev, - "%s: isp platform data alloc failed\n", __func__); + drm_err(&adev->ddev, "isp platform data alloc failed (%d)\n", r); goto failure; } + amd_camera_node = (const struct software_node *)acpi_dev->driver_data; + isp4_node = software_node_find_by_name(amd_camera_node, "isp4"); + /* initialize isp platform data */ isp->isp_pdata->adev = (void *)adev; isp->isp_pdata->asic_type = adev->asic_type; @@ -136,14 +273,14 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp) isp->isp_cell[0].num_resources = num_res; isp->isp_cell[0].resources = &isp->isp_res[0]; isp->isp_cell[0].platform_data = isp->isp_pdata; + isp->isp_cell[0].swnode = isp4_node; isp->isp_cell[0].pdata_size = sizeof(struct isp_platform_data); /* initialize isp i2c platform data */ isp->isp_i2c_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL); if (!isp->isp_i2c_res) { r = -ENOMEM; - drm_err(&adev->ddev, - "%s: isp mfd res alloc failed\n", __func__); + drm_err(&adev->ddev, "isp mfd res alloc failed (%d)\n", r); goto failure; } @@ -162,8 +299,7 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp) isp->isp_gpio_res = kcalloc(1, sizeof(struct resource), GFP_KERNEL); if (!isp->isp_gpio_res) { r = -ENOMEM; - drm_err(&adev->ddev, - "%s: isp gpio res alloc failed\n", __func__); + drm_err(&adev->ddev, "isp gpio resource alloc failed (%d)\n", r); goto failure; } @@ -179,10 +315,23 @@ static int isp_v4_1_1_hw_init(struct amdgpu_isp *isp) isp->isp_cell[2].platform_data = isp->isp_pdata; isp->isp_cell[2].pdata_size = sizeof(struct isp_platform_data); - r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 3); + /* add only amd_isp_capture and amd_isp_i2c_designware to genpd */ + r = mfd_add_hotplug_devices(isp->parent, isp->isp_cell, 2); if (r) { - drm_err(&adev->ddev, - "%s: add mfd hotplug device failed\n", __func__); + drm_err(&adev->ddev, "add mfd hotplug device failed (%d)\n", r); + goto failure; + } + + r = device_for_each_child(isp->parent, &isp->ispgpd, + isp_genpd_add_device); + if (r) { + drm_err(&adev->ddev, "failed to add devices to genpd (%d)\n", r); + goto failure; + } + + r = mfd_add_hotplug_devices(isp->parent, &isp->isp_cell[2], 1); + if (r) { + drm_err(&adev->ddev, "add pinctl hotplug device failed (%d)\n", r); goto failure; } @@ -201,6 +350,9 @@ failure: static int isp_v4_1_1_hw_fini(struct amdgpu_isp *isp) { + device_for_each_child(isp->parent, NULL, + isp_genpd_remove_device); + mfd_remove_devices(isp->parent); kfree(isp->isp_res); diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c index 4cde8a8bcc83..58239c405fda 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -118,7 +118,10 @@ static int jpeg_v2_0_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE; + adev->jpeg.supported_reset = + amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec); + if (!amdgpu_sriov_vf(adev)) + adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_jpeg_sysfs_reset_mask_init(adev); return r; @@ -764,11 +767,20 @@ static int jpeg_v2_0_process_interrupt(struct amdgpu_device *adev, return 0; } -static int jpeg_v2_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) +static int jpeg_v2_0_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { - jpeg_v2_0_stop(ring->adev); - jpeg_v2_0_start(ring->adev); - return amdgpu_ring_test_helper(ring); + int r; + + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + r = jpeg_v2_0_stop(ring->adev); + if (r) + return r; + r = jpeg_v2_0_start(ring->adev); + if (r) + return r; + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static const struct amd_ip_funcs jpeg_v2_0_ip_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c index 8b39e114f3be..3e2c389242db 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c @@ -167,7 +167,10 @@ static int jpeg_v2_5_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE; + adev->jpeg.supported_reset = + amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec); + if (!amdgpu_sriov_vf(adev)) + adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_jpeg_sysfs_reset_mask_init(adev); return r; @@ -643,11 +646,14 @@ static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev, return 0; } -static int jpeg_v2_5_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) +static int jpeg_v2_5_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { + amdgpu_ring_reset_helper_begin(ring, timedout_fence); jpeg_v2_5_stop_inst(ring->adev, ring->me); jpeg_v2_5_start_inst(ring->adev, ring->me); - return amdgpu_ring_test_helper(ring); + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static const struct amd_ip_funcs jpeg_v2_5_ip_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c index 2f8510c2986b..a44eb2667664 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c @@ -132,7 +132,10 @@ static int jpeg_v3_0_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE; + adev->jpeg.supported_reset = + amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec); + if (!amdgpu_sriov_vf(adev)) + adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_jpeg_sysfs_reset_mask_init(adev); return r; @@ -555,11 +558,20 @@ static int jpeg_v3_0_process_interrupt(struct amdgpu_device *adev, return 0; } -static int jpeg_v3_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) +static int jpeg_v3_0_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { - jpeg_v3_0_stop(ring->adev); - jpeg_v3_0_start(ring->adev); - return amdgpu_ring_test_helper(ring); + int r; + + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + r = jpeg_v3_0_stop(ring->adev); + if (r) + return r; + r = jpeg_v3_0_start(ring->adev); + if (r) + return r; + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static const struct amd_ip_funcs jpeg_v3_0_ip_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c index f17ec5414fd6..da3ee69f1a3b 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c @@ -143,7 +143,10 @@ static int jpeg_v4_0_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE; + adev->jpeg.supported_reset = + amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec); + if (!amdgpu_sriov_vf(adev)) + adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_jpeg_sysfs_reset_mask_init(adev); return r; @@ -720,14 +723,20 @@ static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev, return 0; } -static int jpeg_v4_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) +static int jpeg_v4_0_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { - if (amdgpu_sriov_vf(ring->adev)) - return -EINVAL; + int r; - jpeg_v4_0_stop(ring->adev); - jpeg_v4_0_start(ring->adev); - return amdgpu_ring_test_helper(ring); + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + r = jpeg_v4_0_stop(ring->adev); + if (r) + return r; + r = jpeg_v4_0_start(ring->adev); + if (r) + return r; + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static const struct amd_ip_funcs jpeg_v4_0_ip_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c index 79e342d5ab28..b86288a69e7b 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c @@ -216,12 +216,11 @@ static int jpeg_v4_0_3_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - if (!amdgpu_sriov_vf(adev)) { - adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE; - r = amdgpu_jpeg_sysfs_reset_mask_init(adev); - if (r) - return r; - } + adev->jpeg.supported_reset = + amdgpu_get_soft_full_reset_mask(adev->jpeg.inst[0].ring_dec); + if (!amdgpu_sriov_vf(adev)) + adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + r = amdgpu_jpeg_sysfs_reset_mask_init(adev); return 0; } @@ -242,8 +241,7 @@ static int jpeg_v4_0_3_sw_fini(struct amdgpu_ip_block *ip_block) if (r) return r; - if (!amdgpu_sriov_vf(adev)) - amdgpu_jpeg_sysfs_reset_mask_fini(adev); + amdgpu_jpeg_sysfs_reset_mask_fini(adev); r = amdgpu_jpeg_sw_fini(adev); @@ -1143,14 +1141,17 @@ static void jpeg_v4_0_3_core_stall_reset(struct amdgpu_ring *ring) WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 0x00); } -static int jpeg_v4_0_3_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) +static int jpeg_v4_0_3_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { if (amdgpu_sriov_vf(ring->adev)) return -EOPNOTSUPP; + amdgpu_ring_reset_helper_begin(ring, timedout_fence); jpeg_v4_0_3_core_stall_reset(ring); jpeg_v4_0_3_start_jrbc(ring); - return amdgpu_ring_test_helper(ring); + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static const struct amd_ip_funcs jpeg_v4_0_3_ip_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c index 974030a5c03c..481d1a2dbe5a 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c @@ -174,9 +174,10 @@ static int jpeg_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - /* TODO: Add queue reset mask when FW fully supports it */ adev->jpeg.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]); + if (!amdgpu_sriov_vf(adev)) + adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_jpeg_sysfs_reset_mask_init(adev); if (r) return r; @@ -767,6 +768,22 @@ static int jpeg_v4_0_5_process_interrupt(struct amdgpu_device *adev, return 0; } +static int jpeg_v4_0_5_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) +{ + int r; + + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + r = jpeg_v4_0_5_stop(ring->adev); + if (r) + return r; + r = jpeg_v4_0_5_start(ring->adev); + if (r) + return r; + return amdgpu_ring_reset_helper_end(ring, timedout_fence); +} + static const struct amd_ip_funcs jpeg_v4_0_5_ip_funcs = { .name = "jpeg_v4_0_5", .early_init = jpeg_v4_0_5_early_init, @@ -812,6 +829,7 @@ static const struct amdgpu_ring_funcs jpeg_v4_0_5_dec_ring_vm_funcs = { .emit_wreg = jpeg_v2_0_dec_ring_emit_wreg, .emit_reg_wait = jpeg_v2_0_dec_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .reset = jpeg_v4_0_5_ring_reset, }; static void jpeg_v4_0_5_set_dec_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c index 31d213ccbe0a..e0a71909252b 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c @@ -120,13 +120,13 @@ static int jpeg_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - /* TODO: Add queue reset mask when FW fully supports it */ adev->jpeg.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]); + if (!amdgpu_sriov_vf(adev)) + adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_jpeg_sysfs_reset_mask_init(adev); - if (r) - return r; - return 0; + + return r; } /** @@ -644,6 +644,22 @@ static int jpeg_v5_0_0_process_interrupt(struct amdgpu_device *adev, return 0; } +static int jpeg_v5_0_0_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) +{ + int r; + + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + r = jpeg_v5_0_0_stop(ring->adev); + if (r) + return r; + r = jpeg_v5_0_0_start(ring->adev); + if (r) + return r; + return amdgpu_ring_reset_helper_end(ring, timedout_fence); +} + static const struct amd_ip_funcs jpeg_v5_0_0_ip_funcs = { .name = "jpeg_v5_0_0", .early_init = jpeg_v5_0_0_early_init, @@ -689,6 +705,7 @@ static const struct amdgpu_ring_funcs jpeg_v5_0_0_dec_ring_vm_funcs = { .emit_wreg = jpeg_v4_0_3_dec_ring_emit_wreg, .emit_reg_wait = jpeg_v4_0_3_dec_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .reset = jpeg_v5_0_0_ring_reset, }; static void jpeg_v5_0_0_set_dec_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c index 3b6f65a25646..54523dc1f702 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_1.c @@ -200,14 +200,13 @@ static int jpeg_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block) if (r) return r; - if (!amdgpu_sriov_vf(adev)) { - adev->jpeg.supported_reset = AMDGPU_RESET_TYPE_PER_QUEUE; - r = amdgpu_jpeg_sysfs_reset_mask_init(adev); - if (r) - return r; - } + adev->jpeg.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->jpeg.inst[0].ring_dec[0]); + if (!amdgpu_sriov_vf(adev)) + adev->jpeg.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + r = amdgpu_jpeg_sysfs_reset_mask_init(adev); - return 0; + return r; } /** @@ -226,8 +225,7 @@ static int jpeg_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block) if (r) return r; - if (!amdgpu_sriov_vf(adev)) - amdgpu_jpeg_sysfs_reset_mask_fini(adev); + amdgpu_jpeg_sysfs_reset_mask_fini(adev); r = amdgpu_jpeg_sw_fini(adev); @@ -834,14 +832,14 @@ static void jpeg_v5_0_1_core_stall_reset(struct amdgpu_ring *ring) WREG32_SOC15(JPEG, jpeg_inst, regJPEG_CORE_RST_CTRL, 0x00); } -static int jpeg_v5_0_1_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) +static int jpeg_v5_0_1_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { - if (amdgpu_sriov_vf(ring->adev)) - return -EOPNOTSUPP; - + amdgpu_ring_reset_helper_begin(ring, timedout_fence); jpeg_v5_0_1_core_stall_reset(ring); jpeg_v5_0_1_init_jrbc(ring); - return amdgpu_ring_test_helper(ring); + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static const struct amd_ip_funcs jpeg_v5_0_1_ip_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index c9eba537de09..28eb846280dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -1630,10 +1630,12 @@ static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block) if (r) goto failure; - r = mes_v11_0_set_hw_resources_1(&adev->mes); - if (r) { - DRM_ERROR("failed mes_v11_0_set_hw_resources_1, r=%d\n", r); - goto failure; + if ((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x50) { + r = mes_v11_0_set_hw_resources_1(&adev->mes); + if (r) { + DRM_ERROR("failed mes_v11_0_set_hw_resources_1, r=%d\n", r); + goto failure; + } } r = mes_v11_0_query_sched_status(&adev->mes); diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c index b4f17332d466..6b222630f3fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c @@ -1742,7 +1742,8 @@ static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block) if (r) goto failure; - mes_v12_0_set_hw_resources_1(&adev->mes, AMDGPU_MES_SCHED_PIPE); + if ((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x4b) + mes_v12_0_set_hw_resources_1(&adev->mes, AMDGPU_MES_SCHED_PIPE); mes_v12_0_init_aggregated_doorbell(&adev->mes); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c index 76167fadb292..cc688ae79e84 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_8.c @@ -76,6 +76,8 @@ static void mmhub_v1_8_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmi static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev) { + uint64_t gart_start = amdgpu_virt_xgmi_migrate_enabled(adev) ? + adev->gmc.vram_start : adev->gmc.fb_start; uint64_t pt_base; u32 inst_mask; int i; @@ -95,10 +97,10 @@ static void mmhub_v1_8_init_gart_aperture_regs(struct amdgpu_device *adev) if (adev->gmc.pdb0_bo) { WREG32_SOC15(MMHUB, i, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, - (u32)(adev->gmc.fb_start >> 12)); + (u32)(gart_start >> 12)); WREG32_SOC15(MMHUB, i, regVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, - (u32)(adev->gmc.fb_start >> 44)); + (u32)(gart_start >> 44)); WREG32_SOC15(MMHUB, i, regVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c index 134c4ec10887..910337dc28d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_0_1.c @@ -36,40 +36,47 @@ static const char *mmhub_client_ids_v3_0_1[][2] = { [0][0] = "VMC", + [1][0] = "ISPXT", + [2][0] = "ISPIXT", [4][0] = "DCEDMC", [5][0] = "DCEVGA", [6][0] = "MP0", [7][0] = "MP1", - [8][0] = "MPIO", - [16][0] = "HDP", - [17][0] = "LSDMA", - [18][0] = "JPEG", - [19][0] = "VCNU0", - [21][0] = "VSCH", - [22][0] = "VCNU1", - [23][0] = "VCN1", - [32+20][0] = "VCN0", - [2][1] = "DBGUNBIO", + [8][0] = "MPM", + [12][0] = "ISPTNR", + [14][0] = "ISPCRD0", + [15][0] = "ISPCRD1", + [16][0] = "ISPCRD2", + [22][0] = "HDP", + [23][0] = "LSDMA", + [24][0] = "JPEG", + [27][0] = "VSCH", + [28][0] = "VCNU", + [29][0] = "VCN", + [1][1] = "ISPXT", + [2][1] = "ISPIXT", [3][1] = "DCEDWB", [4][1] = "DCEDMC", [5][1] = "DCEVGA", [6][1] = "MP0", [7][1] = "MP1", - [8][1] = "MPIO", - [10][1] = "DBGU0", - [11][1] = "DBGU1", - [12][1] = "DBGU2", - [13][1] = "DBGU3", - [14][1] = "XDP", - [15][1] = "OSSSYS", - [16][1] = "HDP", - [17][1] = "LSDMA", - [18][1] = "JPEG", - [19][1] = "VCNU0", - [20][1] = "VCN0", - [21][1] = "VSCH", - [22][1] = "VCNU1", - [23][1] = "VCN1", + [8][1] = "MPM", + [10][1] = "ISPMWR0", + [11][1] = "ISPMWR1", + [12][1] = "ISPTNR", + [13][1] = "ISPSWR", + [14][1] = "ISPCWR0", + [15][1] = "ISPCWR1", + [16][1] = "ISPCWR2", + [17][1] = "ISPCWR3", + [18][1] = "XDP", + [21][1] = "OSSSYS", + [22][1] = "HDP", + [23][1] = "LSDMA", + [24][1] = "JPEG", + [27][1] = "VSCH", + [28][1] = "VCNU", + [29][1] = "VCN", }; static uint32_t mmhub_v3_0_1_get_invalidate_req(unsigned int vmid, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c index bc3d6c2fc87a..f6fc9778bc30 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v3_3.c @@ -40,30 +40,129 @@ static const char *mmhub_client_ids_v3_3[][2] = { [0][0] = "VMC", + [1][0] = "ISPXT", + [2][0] = "ISPIXT", [4][0] = "DCEDMC", [6][0] = "MP0", [7][0] = "MP1", [8][0] = "MPM", + [9][0] = "ISPPDPRD", + [10][0] = "ISPCSTATRD", + [11][0] = "ISPBYRPRD", + [12][0] = "ISPRGBPRD", + [13][0] = "ISPMCFPRD", + [14][0] = "ISPMCFPRD1", + [15][0] = "ISPYUVPRD", + [16][0] = "ISPMCSCRD", + [17][0] = "ISPGDCRD", + [18][0] = "ISPLMERD", + [22][0] = "ISPXT1", + [23][0] = "ISPIXT1", [24][0] = "HDP", [25][0] = "LSDMA", [26][0] = "JPEG", [27][0] = "VPE", + [28][0] = "VSCH", [29][0] = "VCNU", [30][0] = "VCN", + [1][1] = "ISPXT", + [2][1] = "ISPIXT", [3][1] = "DCEDWB", [4][1] = "DCEDMC", + [5][1] = "ISPCSISWR", [6][1] = "MP0", [7][1] = "MP1", [8][1] = "MPM", + [9][1] = "ISPPDPWR", + [10][1] = "ISPCSTATWR", + [11][1] = "ISPBYRPWR", + [12][1] = "ISPRGBPWR", + [13][1] = "ISPMCFPWR", + [14][1] = "ISPMWR0", + [15][1] = "ISPYUVPWR", + [16][1] = "ISPMCSCWR", + [17][1] = "ISPGDCWR", + [18][1] = "ISPLMEWR", + [20][1] = "ISPMWR2", [21][1] = "OSSSYS", + [22][1] = "ISPXT1", + [23][1] = "ISPIXT1", [24][1] = "HDP", [25][1] = "LSDMA", [26][1] = "JPEG", [27][1] = "VPE", + [28][1] = "VSCH", [29][1] = "VCNU", [30][1] = "VCN", }; +static const char *mmhub_client_ids_v3_3_1[][2] = { + [0][0] = "VMC", + [4][0] = "DCEDMC", + [6][0] = "MP0", + [7][0] = "MP1", + [8][0] = "MPM", + [24][0] = "HDP", + [25][0] = "LSDMA", + [26][0] = "JPEG0", + [27][0] = "VPE0", + [28][0] = "VSCH", + [29][0] = "VCNU0", + [30][0] = "VCN0", + [32+1][0] = "ISPXT", + [32+2][0] = "ISPIXT", + [32+9][0] = "ISPPDPRD", + [32+10][0] = "ISPCSTATRD", + [32+11][0] = "ISPBYRPRD", + [32+12][0] = "ISPRGBPRD", + [32+13][0] = "ISPMCFPRD", + [32+14][0] = "ISPMCFPRD1", + [32+15][0] = "ISPYUVPRD", + [32+16][0] = "ISPMCSCRD", + [32+17][0] = "ISPGDCRD", + [32+18][0] = "ISPLMERD", + [32+22][0] = "ISPXT1", + [32+23][0] = "ISPIXT1", + [32+26][0] = "JPEG1", + [32+27][0] = "VPE1", + [32+29][0] = "VCNU1", + [32+30][0] = "VCN1", + [3][1] = "DCEDWB", + [4][1] = "DCEDMC", + [6][1] = "MP0", + [7][1] = "MP1", + [8][1] = "MPM", + [21][1] = "OSSSYS", + [24][1] = "HDP", + [25][1] = "LSDMA", + [26][1] = "JPEG0", + [27][1] = "VPE0", + [28][1] = "VSCH", + [29][1] = "VCNU0", + [30][1] = "VCN0", + [32+1][1] = "ISPXT", + [32+2][1] = "ISPIXT", + [32+5][1] = "ISPCSISWR", + [32+9][1] = "ISPPDPWR", + [32+10][1] = "ISPCSTATWR", + [32+11][1] = "ISPBYRPWR", + [32+12][1] = "ISPRGBPWR", + [32+13][1] = "ISPMCFPWR", + [32+14][1] = "ISPMWR0", + [32+15][1] = "ISPYUVPWR", + [32+16][1] = "ISPMCSCWR", + [32+17][1] = "ISPGDCWR", + [32+18][1] = "ISPLMEWR", + [32+19][1] = "ISPMWR1", + [32+20][1] = "ISPMWR2", + [32+22][1] = "ISPXT1", + [32+23][1] = "ISPIXT1", + [32+26][1] = "JPEG1", + [32+27][1] = "VPE1", + [32+29][1] = "VCNU1", + [32+30][1] = "VCN1", +}; + static uint32_t mmhub_v3_3_get_invalidate_req(unsigned int vmid, uint32_t flush_type) { @@ -102,12 +201,16 @@ mmhub_v3_3_print_l2_protection_fault_status(struct amdgpu_device *adev, switch (amdgpu_ip_version(adev, MMHUB_HWIP, 0)) { case IP_VERSION(3, 3, 0): - case IP_VERSION(3, 3, 1): case IP_VERSION(3, 3, 2): mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_3) ? mmhub_client_ids_v3_3[cid][rw] : cid == 0x140 ? "UMSCH" : NULL; break; + case IP_VERSION(3, 3, 1): + mmhub_cid = cid < ARRAY_SIZE(mmhub_client_ids_v3_3_1) ? + mmhub_client_ids_v3_3_1[cid][rw] : + cid == 0x140 ? "UMSCH" : NULL; + break; default: mmhub_cid = NULL; break; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c index f2ab5001b492..951998454b25 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v4_1_0.c @@ -37,39 +37,31 @@ static const char *mmhub_client_ids_v4_1_0[][2] = { [0][0] = "VMC", [4][0] = "DCEDMC", - [5][0] = "DCEVGA", [6][0] = "MP0", [7][0] = "MP1", [8][0] = "MPIO", - [16][0] = "HDP", - [17][0] = "LSDMA", - [18][0] = "JPEG", - [19][0] = "VCNU0", - [21][0] = "VSCH", - [22][0] = "VCNU1", - [23][0] = "VCN1", - [32+20][0] = "VCN0", - [2][1] = "DBGUNBIO", + [16][0] = "LSDMA", + [17][0] = "JPEG", + [19][0] = "VCNU", + [22][0] = "VSCH", + [23][0] = "HDP", + [32+23][0] = "VCNRD", [3][1] = "DCEDWB", [4][1] = "DCEDMC", - [5][1] = "DCEVGA", [6][1] = "MP0", [7][1] = "MP1", [8][1] = "MPIO", [10][1] = "DBGU0", [11][1] = "DBGU1", - [12][1] = "DBGU2", - [13][1] = "DBGU3", + [12][1] = "DBGUNBIO", [14][1] = "XDP", [15][1] = "OSSSYS", - [16][1] = "HDP", - [17][1] = "LSDMA", - [18][1] = "JPEG", - [19][1] = "VCNU0", - [20][1] = "VCN0", - [21][1] = "VSCH", - [22][1] = "VCNU1", - [23][1] = "VCN1", + [16][1] = "LSDMA", + [17][1] = "JPEG", + [18][1] = "VCNWR", + [19][1] = "VCNU", + [22][1] = "VSCH", + [23][1] = "HDP", }; static uint32_t mmhub_v4_1_0_get_invalidate_req(unsigned int vmid, diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c index a376f072700d..1c22bc11c1f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_9.c @@ -31,9 +31,6 @@ #define NPS_MODE_MASK 0x000000FFL -/* Core 0 Port 0 counter */ -#define smnPCIEP_NAK_COUNTER 0x1A340218 - static void nbio_v7_9_remap_hdp_registers(struct amdgpu_device *adev) { WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL, @@ -467,22 +464,6 @@ static void nbio_v7_9_init_registers(struct amdgpu_device *adev) } } -static u64 nbio_v7_9_get_pcie_replay_count(struct amdgpu_device *adev) -{ - u32 val, nak_r, nak_g; - - if (adev->flags & AMD_IS_APU) - return 0; - - /* Get the number of NAKs received and generated */ - val = RREG32_PCIE(smnPCIEP_NAK_COUNTER); - nak_r = val & 0xFFFF; - nak_g = val >> 16; - - /* Add the total number of NAKs, i.e the number of replays */ - return (nak_r + nak_g); -} - #define MMIO_REG_HOLE_OFFSET 0x1A000 static void nbio_v7_9_set_reg_remap(struct amdgpu_device *adev) @@ -524,7 +505,6 @@ const struct amdgpu_nbio_funcs nbio_v7_9_funcs = { .get_memory_partition_mode = nbio_v7_9_get_memory_partition_mode, .is_nps_switch_requested = nbio_v7_9_is_nps_switch_requested, .init_registers = nbio_v7_9_init_registers, - .get_pcie_replay_count = nbio_v7_9_get_pcie_replay_count, .set_reg_remap = nbio_v7_9_set_reg_remap, }; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h index f4a91b126c73..73f87131a7e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h +++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h @@ -106,7 +106,9 @@ enum psp_gfx_cmd_id /*IDs of performance monitoring/profiling*/ GFX_CMD_ID_CONFIG_SQ_PERFMON = 0x00000046, /* Config CGTT_SQ_CLK_CTRL */ /* Dynamic memory partitioninig (NPS mode change)*/ - GFX_CMD_ID_FB_NPS_MODE = 0x00000048, /* Configure memory partitioning mode */ + GFX_CMD_ID_FB_NPS_MODE = 0x00000048, /* Configure memory partitioning mode */ + GFX_CMD_ID_FB_FW_RESERV_ADDR = 0x00000050, /* Query FW reservation addr */ + GFX_CMD_ID_FB_FW_RESERV_EXT_ADDR = 0x00000051, /* Query FW reservation extended addr */ }; /* PSP boot config sub-commands */ @@ -404,11 +406,19 @@ struct psp_gfx_uresp_bootcfg { uint32_t boot_cfg; /* boot config data */ }; +/* Command-specific response for fw reserve info */ +struct psp_gfx_uresp_fw_reserve_info { + uint32_t reserve_base_address_hi; + uint32_t reserve_base_address_lo; + uint32_t reserve_size; +}; + /* Union of command-specific responses for GPCOM ring. */ union psp_gfx_uresp { struct psp_gfx_uresp_reserved reserved; struct psp_gfx_uresp_bootcfg boot_cfg; struct psp_gfx_uresp_fwar_db_info fwar_db_info; + struct psp_gfx_uresp_fw_reserve_info fw_reserve_info; }; /* Structure of GFX Response buffer. diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index 145186a1e48f..3584b8c18fd9 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -94,7 +94,7 @@ static int psp_v10_0_ring_create(struct psp_context *psp, /* Wait for response flag (bit 31) in C2PMSG_64 */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); return ret; } @@ -115,7 +115,7 @@ static int psp_v10_0_ring_stop(struct psp_context *psp, /* Wait for response flag (bit 31) in C2PMSG_64 */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 215543575f47..6cc05d36e359 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -152,11 +152,9 @@ static int psp_v11_0_wait_for_bootloader(struct psp_context *psp) for (retry_loop = 0; retry_loop < 10; retry_loop++) { /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ - ret = psp_wait_for(psp, - SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, - 0x80000000, - false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE); if (ret == 0) return 0; @@ -252,8 +250,8 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp) /* there might be handshake issue with hardware which needs delay */ mdelay(20); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81), - RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), - 0, true); + RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), 0, + PSP_WAITREG_CHANGED); return ret; } @@ -277,11 +275,13 @@ static int psp_v11_0_ring_stop(struct psp_context *psp, /* Wait for response flag (bit 31) */ if (amdgpu_sriov_vf(adev)) - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); else - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); return ret; } @@ -317,13 +317,15 @@ static int psp_v11_0_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_101 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), - 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } else { /* Wait for sOS ready for ring creation */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0); if (ret) { DRM_ERROR("Failed to wait for sOS ready for ring creation\n"); return ret; @@ -347,8 +349,9 @@ static int psp_v11_0_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_64 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } return ret; @@ -381,7 +384,8 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp) offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64); - ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for(psp, offset, MBOX_TOS_READY_FLAG, + MBOX_TOS_READY_MASK, 0); if (ret) { DRM_INFO("psp is not working correctly before mode1 reset!\n"); @@ -395,7 +399,8 @@ static int psp_v11_0_mode1_reset(struct psp_context *psp) offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33); - ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false); + ret = psp_wait_for(psp, offset, MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, + 0); if (ret) { DRM_INFO("psp mode 1 reset failed!\n"); @@ -421,8 +426,9 @@ static int psp_v11_0_memory_training_send_msg(struct psp_context *psp, int msg) max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout; for (i = 0; i < max_wait; i++) { - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE); if (ret == 0) break; } @@ -601,7 +607,7 @@ static int psp_v11_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20)); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + 0x80000000, 0x80000000, 0); if (ret) return ret; @@ -638,7 +644,7 @@ static int psp_v11_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver) WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + 0x80000000, 0x80000000, 0); if (!ret) *fw_ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36); diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c index 5697760a819b..93787a90d598 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0_8.c @@ -41,8 +41,9 @@ static int psp_v11_0_8_ring_stop(struct psp_context *psp, /* there might be handshake issue with hardware which needs delay */ mdelay(20); /* Wait for response flag (bit 31) */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } else { /* Write the ring destroy command*/ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, @@ -50,8 +51,9 @@ static int psp_v11_0_8_ring_stop(struct psp_context *psp, /* there might be handshake issue with hardware which needs delay */ mdelay(20); /* Wait for response flag (bit 31) */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } return ret; @@ -87,13 +89,15 @@ static int psp_v11_0_8_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_101 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), - 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } else { /* Wait for sOS ready for ring creation */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0); if (ret) { DRM_ERROR("Failed to wait for trust OS ready for ring creation\n"); return ret; @@ -117,8 +121,9 @@ static int psp_v11_0_8_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_64 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c index 80153f837470..4c6450d62299 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c @@ -82,7 +82,7 @@ static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp) /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + 0x80000000, 0x80000000, 0); if (ret) return ret; @@ -97,7 +97,7 @@ static int psp_v12_0_bootloader_load_sysdrv(struct psp_context *psp) psp_gfxdrv_command_reg); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + 0x80000000, 0x80000000, 0); return ret; } @@ -118,7 +118,7 @@ static int psp_v12_0_bootloader_load_sos(struct psp_context *psp) /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + 0x80000000, 0x80000000, 0); if (ret) return ret; @@ -133,8 +133,8 @@ static int psp_v12_0_bootloader_load_sos(struct psp_context *psp) psp_gfxdrv_command_reg); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81), - RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), - 0, true); + RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), 0, + PSP_WAITREG_CHANGED); return ret; } @@ -163,7 +163,7 @@ static int psp_v12_0_ring_create(struct psp_context *psp, /* Wait for response flag (bit 31) in C2PMSG_64 */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); return ret; } @@ -184,11 +184,13 @@ static int psp_v12_0_ring_stop(struct psp_context *psp, /* Wait for response flag (bit 31) */ if (amdgpu_sriov_vf(adev)) - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); else - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); return ret; } @@ -219,7 +221,8 @@ static int psp_v12_0_mode1_reset(struct psp_context *psp) offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64); - ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for(psp, offset, MBOX_TOS_READY_FLAG, + MBOX_TOS_READY_MASK, 0); if (ret) { DRM_INFO("psp is not working correctly before mode1 reset!\n"); @@ -233,7 +236,8 @@ static int psp_v12_0_mode1_reset(struct psp_context *psp) offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33); - ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false); + ret = psp_wait_for(psp, offset, MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, + 0); if (ret) { DRM_INFO("psp mode 1 reset failed!\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c index df612fd9cc50..af4a7d7c4abd 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c @@ -42,7 +42,9 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_5_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_8_toc.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_8_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_0_sos.bin"); +MODULE_FIRMWARE("amdgpu/psp_13_0_0_sos_kicker.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta.bin"); +MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta_kicker.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_7_sos.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_7_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_10_sos.bin"); @@ -180,7 +182,7 @@ static int psp_v13_0_wait_for_vmbx_ready(struct psp_context *psp) ready having bit 31 of C2PMSG_33 set to 1 */ ret = psp_wait_for( psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_33), - 0x80000000, 0xffffffff, false); + 0x80000000, 0xffffffff, PSP_WAITREG_NOVERBOSE); if (ret == 0) break; @@ -211,7 +213,7 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp) for (retry_loop = 0; retry_loop < retry_cnt; retry_loop++) { ret = psp_wait_for( psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35), - 0x80000000, 0xffffffff, false); + 0x80000000, 0xffffffff, PSP_WAITREG_NOVERBOSE); if (ret == 0) return 0; @@ -360,8 +362,8 @@ static int psp_v13_0_bootloader_load_sos(struct psp_context *psp) /* there might be handshake issue with hardware which needs delay */ mdelay(20); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_81), - RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81), - 0, true); + RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81), 0, + PSP_WAITREG_CHANGED); if (!ret) psp_v13_0_init_sos_version(psp); @@ -382,8 +384,9 @@ static int psp_v13_0_ring_stop(struct psp_context *psp, /* there might be handshake issue with hardware which needs delay */ mdelay(20); /* Wait for response flag (bit 31) */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } else { /* Write the ring destroy command*/ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_64, @@ -391,8 +394,9 @@ static int psp_v13_0_ring_stop(struct psp_context *psp, /* there might be handshake issue with hardware which needs delay */ mdelay(20); /* Wait for response flag (bit 31) */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } return ret; @@ -428,13 +432,15 @@ static int psp_v13_0_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_101 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101), - 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } else { /* Wait for sOS ready for ring creation */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), + MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0); if (ret) { DRM_ERROR("Failed to wait for trust OS ready for ring creation\n"); return ret; @@ -458,8 +464,9 @@ static int psp_v13_0_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_64 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } return ret; @@ -522,8 +529,9 @@ static int psp_v13_0_memory_training_send_msg(struct psp_context *psp, int msg) max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout; for (i = 0; i < max_wait; i++) { - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE); if (ret == 0) break; } @@ -675,7 +683,7 @@ static int psp_v13_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20)); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + 0x80000000, 0x80000000, 0); if (ret) return ret; @@ -712,7 +720,7 @@ static int psp_v13_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver) WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + 0x80000000, 0x80000000, 0); if (!ret) *fw_ver = RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_36); @@ -737,8 +745,9 @@ static int psp_v13_0_exec_spi_cmd(struct psp_context *psp, int cmd) ret = psp_wait_for_spirom_update(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115), MBOX_READY_FLAG, MBOX_READY_MASK, PSP_SPIROM_UPDATE_TIMEOUT); else - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115), - MBOX_READY_FLAG, MBOX_READY_MASK, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115), + MBOX_READY_FLAG, MBOX_READY_MASK, 0); if (ret) { dev_err(adev->dev, "SPI cmd %x timed out, ret = %d", cmd, ret); return ret; @@ -762,7 +771,7 @@ static int psp_v13_0_update_spirom(struct psp_context *psp, /* Confirm PSP is ready to start */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115), - MBOX_READY_FLAG, MBOX_READY_MASK, false); + MBOX_READY_FLAG, MBOX_READY_MASK, 0); if (ret) { dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret); return ret; @@ -797,7 +806,7 @@ static int psp_v13_0_dump_spirom(struct psp_context *psp, /* Confirm PSP is ready to start */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_115), - MBOX_READY_FLAG, MBOX_READY_MASK, false); + MBOX_READY_FLAG, MBOX_READY_MASK, 0); if (ret) { dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret); return ret; @@ -924,8 +933,9 @@ static int psp_v13_0_reg_program_no_ring(struct psp_context *psp, uint32_t val, WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_102, id); WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_103, val); - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101), + 0x80000000, 0x80000000, 0); } return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c index eaa5512a21da..5f39a2edcc95 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0_4.c @@ -76,11 +76,9 @@ static int psp_v13_0_4_wait_for_bootloader(struct psp_context *psp) for (retry_loop = 0; retry_loop < 10; retry_loop++) { /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ - ret = psp_wait_for(psp, - SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35), - 0x80000000, - 0x80000000, - false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE); if (ret == 0) return 0; @@ -185,8 +183,8 @@ static int psp_v13_0_4_bootloader_load_sos(struct psp_context *psp) /* there might be handshake issue with hardware which needs delay */ mdelay(20); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_81), - RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81), - 0, true); + RREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_81), 0, + PSP_WAITREG_CHANGED); return ret; } @@ -204,8 +202,9 @@ static int psp_v13_0_4_ring_stop(struct psp_context *psp, /* there might be handshake issue with hardware which needs delay */ mdelay(20); /* Wait for response flag (bit 31) */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } else { /* Write the ring destroy command*/ WREG32_SOC15(MP0, 0, regMP0_SMN_C2PMSG_64, @@ -213,8 +212,9 @@ static int psp_v13_0_4_ring_stop(struct psp_context *psp, /* there might be handshake issue with hardware which needs delay */ mdelay(20); /* Wait for response flag (bit 31) */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } return ret; @@ -250,13 +250,15 @@ static int psp_v13_0_4_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_101 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101), - 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } else { /* Wait for sOS ready for ring creation */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), + MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0); if (ret) { DRM_ERROR("Failed to wait for trust OS ready for ring creation\n"); return ret; @@ -280,8 +282,9 @@ static int psp_v13_0_4_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_64 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c index 256288c6cd78..38dfc5c19f2a 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v14_0.c @@ -34,7 +34,9 @@ MODULE_FIRMWARE("amdgpu/psp_14_0_2_sos.bin"); MODULE_FIRMWARE("amdgpu/psp_14_0_2_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_14_0_3_sos.bin"); +MODULE_FIRMWARE("amdgpu/psp_14_0_3_sos_kicker.bin"); MODULE_FIRMWARE("amdgpu/psp_14_0_3_ta.bin"); +MODULE_FIRMWARE("amdgpu/psp_14_0_3_ta_kicker.bin"); MODULE_FIRMWARE("amdgpu/psp_14_0_5_toc.bin"); MODULE_FIRMWARE("amdgpu/psp_14_0_5_ta.bin"); @@ -109,11 +111,9 @@ static int psp_v14_0_wait_for_bootloader(struct psp_context *psp) for (retry_loop = 0; retry_loop < 10; retry_loop++) { /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ - ret = psp_wait_for(psp, - SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35), - 0x80000000, - 0x80000000, - false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35), + 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE); if (ret == 0) return 0; @@ -228,9 +228,10 @@ static int psp_v14_0_bootloader_load_sos(struct psp_context *psp) /* there might be handshake issue with hardware which needs delay */ mdelay(20); - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_81), - RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_81), - 0, true); + ret = psp_wait_for(psp, + SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_81), + RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_81), 0, + PSP_WAITREG_CHANGED); return ret; } @@ -248,8 +249,9 @@ static int psp_v14_0_ring_stop(struct psp_context *psp, /* there might be handshake issue with hardware which needs delay */ mdelay(20); /* Wait for response flag (bit 31) */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } else { /* Write the ring destroy command*/ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_64, @@ -257,8 +259,9 @@ static int psp_v14_0_ring_stop(struct psp_context *psp, /* there might be handshake issue with hardware which needs delay */ mdelay(20); /* Wait for response flag (bit 31) */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } return ret; @@ -294,13 +297,15 @@ static int psp_v14_0_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_101 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101), - 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_101), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } else { /* Wait for sOS ready for ring creation */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64), + MBOX_TOS_READY_FLAG, MBOX_TOS_READY_MASK, 0); if (ret) { DRM_ERROR("Failed to wait for trust OS ready for ring creation\n"); return ret; @@ -324,8 +329,9 @@ static int psp_v14_0_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_64 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_64), + MBOX_TOS_RESP_FLAG, MBOX_TOS_RESP_MASK, 0); } return ret; @@ -388,8 +394,9 @@ static int psp_v14_0_memory_training_send_msg(struct psp_context *psp, int msg) max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout; for (i = 0; i < max_wait; i++) { - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35), + 0x80000000, 0x80000000, PSP_WAITREG_NOVERBOSE); if (ret == 0) break; } @@ -540,8 +547,9 @@ static int psp_v14_0_load_usbc_pd_fw(struct psp_context *psp, uint64_t fw_pri_mc */ WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36, (fw_pri_mc_addr >> 20)); - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + ret = psp_wait_for(psp, + SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35), + 0x80000000, 0x80000000, 0); if (ret) return ret; @@ -577,8 +585,9 @@ static int psp_v14_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver) WREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER); - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + ret = psp_wait_for(psp, + SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_35), + 0x80000000, 0x80000000, 0); if (!ret) *fw_ver = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_36); @@ -602,11 +611,13 @@ static int psp_v14_0_exec_spi_cmd(struct psp_context *psp, int cmd) ret = psp_wait_for_spirom_update(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115), MBOX_READY_FLAG, MBOX_READY_MASK, PSP_SPIROM_UPDATE_TIMEOUT); else - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115), - MBOX_READY_FLAG, MBOX_READY_MASK, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115), + MBOX_READY_FLAG, MBOX_READY_MASK, 0); - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115), - MBOX_READY_FLAG, MBOX_READY_MASK, false); + ret = psp_wait_for(psp, + SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115), + MBOX_READY_FLAG, MBOX_READY_MASK, 0); if (ret) { dev_err(adev->dev, "SPI cmd %x timed out, ret = %d", cmd, ret); return ret; @@ -629,8 +640,9 @@ static int psp_v14_0_update_spirom(struct psp_context *psp, int ret; /* Confirm PSP is ready to start */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115), - MBOX_READY_FLAG, MBOX_READY_MASK, false); + ret = psp_wait_for(psp, + SOC15_REG_OFFSET(MP0, 0, regMPASP_SMN_C2PMSG_115), + MBOX_READY_FLAG, MBOX_READY_MASK, 0); if (ret) { dev_err(adev->dev, "PSP Not ready to start processing, ret = %d", ret); return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index f6b75e3e47ff..833830bc3e2e 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -91,7 +91,7 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp) /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + 0x80000000, 0x80000000, 0); if (ret) return ret; @@ -109,7 +109,7 @@ static int psp_v3_1_bootloader_load_sysdrv(struct psp_context *psp) mdelay(20); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + 0x80000000, 0x80000000, 0); return ret; } @@ -130,7 +130,7 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp) /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), - 0x80000000, 0x80000000, false); + 0x80000000, 0x80000000, 0); if (ret) return ret; @@ -147,8 +147,8 @@ static int psp_v3_1_bootloader_load_sos(struct psp_context *psp) /* there might be handshake issue with hardware which needs delay */ mdelay(20); ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_81), - RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), - 0, true); + RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81), 0, + PSP_WAITREG_CHANGED); return ret; } @@ -168,7 +168,7 @@ static void psp_v3_1_reroute_ih(struct psp_context *psp) mdelay(20); psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); + 0x80000000, 0x8000FFFF, 0); /* Change IH ring for UMC */ tmp = REG_SET_FIELD(0, IH_CLIENT_CFG_DATA, CREDIT_RETURN_ADDR, 0x1216b); @@ -180,7 +180,7 @@ static void psp_v3_1_reroute_ih(struct psp_context *psp) mdelay(20); psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x8000FFFF, false); + 0x80000000, 0x8000FFFF, 0); } static int psp_v3_1_ring_create(struct psp_context *psp, @@ -217,9 +217,9 @@ static int psp_v3_1_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_101 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, - mmMP0_SMN_C2PMSG_101), 0x80000000, - 0x8000FFFF, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), + 0x80000000, 0x8000FFFF, 0); } else { /* Write low address of the ring to C2PMSG_69 */ @@ -240,10 +240,9 @@ static int psp_v3_1_ring_create(struct psp_context *psp, mdelay(20); /* Wait for response flag (bit 31) in C2PMSG_64 */ - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, - mmMP0_SMN_C2PMSG_64), 0x80000000, - 0x8000FFFF, false); - + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + 0x80000000, 0x8000FFFF, 0); } return ret; } @@ -267,11 +266,13 @@ static int psp_v3_1_ring_stop(struct psp_context *psp, /* Wait for response flag (bit 31) */ if (amdgpu_sriov_vf(adev)) - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), + 0x80000000, 0x80000000, 0); else - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); + ret = psp_wait_for( + psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + 0x80000000, 0x80000000, 0); return ret; } @@ -311,7 +312,7 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp) offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64); - ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false); + ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, 0); if (ret) { DRM_INFO("psp is not working correctly before mode1 reset!\n"); @@ -325,7 +326,7 @@ static int psp_v3_1_mode1_reset(struct psp_context *psp) offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33); - ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false); + ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, 0); if (ret) { DRM_INFO("psp mode 1 reset failed!\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c index bcde34e4e0a1..36b1ca73c2ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4_2.c @@ -45,6 +45,7 @@ #include "amdgpu_ras.h" MODULE_FIRMWARE("amdgpu/sdma_4_4_2.bin"); +MODULE_FIRMWARE("amdgpu/sdma_4_4_4.bin"); MODULE_FIRMWARE("amdgpu/sdma_4_4_5.bin"); static const struct amdgpu_hwip_reg_entry sdma_reg_list_4_4_2[] = { @@ -109,6 +110,8 @@ static void sdma_v4_4_2_set_ras_funcs(struct amdgpu_device *adev); static void sdma_v4_4_2_update_reset_mask(struct amdgpu_device *adev); static int sdma_v4_4_2_stop_queue(struct amdgpu_ring *ring); static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring); +static int sdma_v4_4_2_soft_reset_engine(struct amdgpu_device *adev, + u32 instance_id); static u32 sdma_v4_4_2_get_reg_offset(struct amdgpu_device *adev, u32 instance, u32 offset) @@ -490,7 +493,7 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev, { struct amdgpu_ring *sdma[AMDGPU_MAX_SDMA_INSTANCES]; u32 doorbell_offset, doorbell; - u32 rb_cntl, ib_cntl; + u32 rb_cntl, ib_cntl, sdma_cntl; int i; for_each_inst(i, inst_mask) { @@ -502,6 +505,9 @@ static void sdma_v4_4_2_inst_gfx_stop(struct amdgpu_device *adev, ib_cntl = RREG32_SDMA(i, regSDMA_GFX_IB_CNTL); ib_cntl = REG_SET_FIELD(ib_cntl, SDMA_GFX_IB_CNTL, IB_ENABLE, 0); WREG32_SDMA(i, regSDMA_GFX_IB_CNTL, ib_cntl); + sdma_cntl = RREG32_SDMA(i, regSDMA_CNTL); + sdma_cntl = REG_SET_FIELD(sdma_cntl, SDMA_CNTL, UTC_L1_ENABLE, 0); + WREG32_SDMA(i, regSDMA_CNTL, sdma_cntl); if (sdma[i]->use_doorbell) { doorbell = RREG32_SDMA(i, regSDMA_GFX_DOORBELL); @@ -995,6 +1001,7 @@ static int sdma_v4_4_2_inst_start(struct amdgpu_device *adev, /* set utc l1 enable flag always to 1 */ temp = RREG32_SDMA(i, regSDMA_CNTL); temp = REG_SET_FIELD(temp, SDMA_CNTL, UTC_L1_ENABLE, 1); + WREG32_SDMA(i, regSDMA_CNTL, temp); if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) < IP_VERSION(4, 4, 5)) { /* enable context empty interrupt during initialization */ @@ -1337,6 +1344,7 @@ static bool sdma_v4_4_2_fw_support_paging_queue(struct amdgpu_device *adev) static const struct amdgpu_sdma_funcs sdma_v4_4_2_sdma_funcs = { .stop_kernel_queue = &sdma_v4_4_2_stop_queue, .start_kernel_queue = &sdma_v4_4_2_restore_queue, + .soft_reset_kernel_queue = &sdma_v4_4_2_soft_reset_engine, }; static int sdma_v4_4_2_early_init(struct amdgpu_ip_block *ip_block) @@ -1648,45 +1656,24 @@ static bool sdma_v4_4_2_is_queue_selected(struct amdgpu_device *adev, uint32_t i return (context_status & SDMA_GFX_CONTEXT_STATUS__SELECTED_MASK) != 0; } -static bool sdma_v4_4_2_ring_is_guilty(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - uint32_t instance_id = ring->me; - - return sdma_v4_4_2_is_queue_selected(adev, instance_id, false); -} - -static bool sdma_v4_4_2_page_ring_is_guilty(struct amdgpu_ring *ring) -{ - struct amdgpu_device *adev = ring->adev; - uint32_t instance_id = ring->me; - - if (!adev->sdma.has_page_queue) - return false; - - return sdma_v4_4_2_is_queue_selected(adev, instance_id, true); -} - -static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) +static int sdma_v4_4_2_reset_queue(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; - u32 id = GET_INST(SDMA0, ring->me); + u32 id = ring->me; int r; - if (!(adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)) - return -EOPNOTSUPP; - - amdgpu_amdkfd_suspend(adev, false); - r = amdgpu_sdma_reset_engine(adev, id); - amdgpu_amdkfd_resume(adev, false); - + amdgpu_amdkfd_suspend(adev, true); + r = amdgpu_sdma_reset_engine(adev, id, false); + amdgpu_amdkfd_resume(adev, true); return r; } static int sdma_v4_4_2_stop_queue(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; - u32 instance_id = GET_INST(SDMA0, ring->me); + u32 instance_id = ring->me; u32 inst_mask; uint64_t rptr; @@ -1725,7 +1712,7 @@ static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; u32 inst_mask; - int i; + int i, r; inst_mask = 1 << ring->me; udelay(50); @@ -1742,7 +1729,18 @@ static int sdma_v4_4_2_restore_queue(struct amdgpu_ring *ring) return -ETIMEDOUT; } - return sdma_v4_4_2_inst_start(adev, inst_mask, true); + r = sdma_v4_4_2_inst_start(adev, inst_mask, true); + + return r; +} + +static int sdma_v4_4_2_soft_reset_engine(struct amdgpu_device *adev, + u32 instance_id) +{ + /* For SDMA 4.x, use the existing DPM interface for backward compatibility + * we need to convert the logical instance ID to physical instance ID before reset. + */ + return amdgpu_dpm_reset_sdma(adev, 1 << GET_INST(SDMA0, instance_id)); } static int sdma_v4_4_2_set_trap_irq_state(struct amdgpu_device *adev, @@ -2139,7 +2137,6 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_ring_funcs = { .emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, .reset = sdma_v4_4_2_reset_queue, - .is_guilty = sdma_v4_4_2_ring_is_guilty, }; static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = { @@ -2172,7 +2169,6 @@ static const struct amdgpu_ring_funcs sdma_v4_4_2_page_ring_funcs = { .emit_reg_wait = sdma_v4_4_2_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, .reset = sdma_v4_4_2_reset_queue, - .is_guilty = sdma_v4_4_2_page_ring_is_guilty, }; static void sdma_v4_4_2_set_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 9505ae96fbec..7dc67a22a7a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -1399,6 +1399,7 @@ static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block) return r; for (i = 0; i < adev->sdma.num_instances; i++) { + mutex_init(&adev->sdma.instance[i].engine_reset_mutex); adev->sdma.instance[i].funcs = &sdma_v5_0_sdma_funcs; ring = &adev->sdma.instance[i].ring; ring->ring_obj = NULL; @@ -1427,7 +1428,8 @@ static int sdma_v5_0_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(5, 0, 0): case IP_VERSION(5, 0, 2): case IP_VERSION(5, 0, 5): - if (adev->sdma.instance[0].fw_version >= 35) + if ((adev->sdma.instance[0].fw_version >= 35) && + !amdgpu_sriov_vf(adev)) adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; break; default: @@ -1538,12 +1540,27 @@ static int sdma_v5_0_soft_reset(struct amdgpu_ip_block *ip_block) return 0; } -static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) +static int sdma_v5_0_reset_queue(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; - u32 inst_id = ring->me; + int r; + + if (ring->me >= adev->sdma.num_instances) { + dev_err(adev->dev, "sdma instance not found\n"); + return -EINVAL; + } - return amdgpu_sdma_reset_engine(adev, inst_id); + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + + amdgpu_amdkfd_suspend(adev, true); + r = amdgpu_sdma_reset_engine(adev, ring->me, true); + amdgpu_amdkfd_resume(adev, true); + if (r) + return r; + + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static int sdma_v5_0_stop_queue(struct amdgpu_ring *ring) @@ -1610,6 +1627,7 @@ static int sdma_v5_0_restore_queue(struct amdgpu_ring *ring) r = sdma_v5_0_gfx_resume_instance(adev, inst_id, true); amdgpu_gfx_rlc_exit_safe_mode(adev, 0); + return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index a6e612b4a892..3bd44c24f692 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -1318,6 +1318,7 @@ static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block) } for (i = 0; i < adev->sdma.num_instances; i++) { + mutex_init(&adev->sdma.instance[i].engine_reset_mutex); adev->sdma.instance[i].funcs = &sdma_v5_2_sdma_funcs; ring = &adev->sdma.instance[i].ring; ring->ring_obj = NULL; @@ -1346,11 +1347,13 @@ static int sdma_v5_2_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(5, 2, 2): case IP_VERSION(5, 2, 3): case IP_VERSION(5, 2, 4): - if (adev->sdma.instance[0].fw_version >= 76) + if ((adev->sdma.instance[0].fw_version >= 76) && + !amdgpu_sriov_vf(adev)) adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; break; case IP_VERSION(5, 2, 5): - if (adev->sdma.instance[0].fw_version >= 34) + if ((adev->sdma.instance[0].fw_version >= 34) && + !amdgpu_sriov_vf(adev)) adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; break; default: @@ -1451,12 +1454,27 @@ static int sdma_v5_2_wait_for_idle(struct amdgpu_ip_block *ip_block) return -ETIMEDOUT; } -static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) +static int sdma_v5_2_reset_queue(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; - u32 inst_id = ring->me; + int r; + + if (ring->me >= adev->sdma.num_instances) { + dev_err(adev->dev, "sdma instance not found\n"); + return -EINVAL; + } - return amdgpu_sdma_reset_engine(adev, inst_id); + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + + amdgpu_amdkfd_suspend(adev, true); + r = amdgpu_sdma_reset_engine(adev, ring->me, true); + amdgpu_amdkfd_resume(adev, true); + if (r) + return r; + + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static int sdma_v5_2_stop_queue(struct amdgpu_ring *ring) @@ -1526,6 +1544,7 @@ static int sdma_v5_2_restore_queue(struct amdgpu_ring *ring) r = sdma_v5_2_gfx_resume_instance(adev, inst_id, true); amdgpu_gfx_rlc_exit_safe_mode(adev, 0); + return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c index 5a70ae17be04..e6d8eddda2bf 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v6_0.c @@ -1355,7 +1355,8 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block) case IP_VERSION(6, 0, 0): case IP_VERSION(6, 0, 2): case IP_VERSION(6, 0, 3): - if (adev->sdma.instance[0].fw_version >= 21) + if ((adev->sdma.instance[0].fw_version >= 21) && + !amdgpu_sriov_vf(adev)) adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; break; default: @@ -1374,9 +1375,42 @@ static int sdma_v6_0_sw_init(struct amdgpu_ip_block *ip_block) else DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n"); - /* add firmware version checks here */ - if (0 && !adev->sdma.disable_uq) - adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { + case IP_VERSION(6, 0, 0): + if ((adev->sdma.instance[0].fw_version >= 24) && !adev->sdma.disable_uq) + adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; + break; + case IP_VERSION(6, 0, 1): + if ((adev->sdma.instance[0].fw_version >= 18) && !adev->sdma.disable_uq) + adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; + break; + case IP_VERSION(6, 0, 2): + if ((adev->sdma.instance[0].fw_version >= 21) && !adev->sdma.disable_uq) + adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; + break; + case IP_VERSION(6, 0, 3): + if ((adev->sdma.instance[0].fw_version >= 25) && !adev->sdma.disable_uq) + adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; + break; + case IP_VERSION(6, 1, 0): + if ((adev->sdma.instance[0].fw_version >= 14) && !adev->sdma.disable_uq) + adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; + break; + case IP_VERSION(6, 1, 1): + if ((adev->sdma.instance[0].fw_version >= 17) && !adev->sdma.disable_uq) + adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; + break; + case IP_VERSION(6, 1, 2): + if ((adev->sdma.instance[0].fw_version >= 15) && !adev->sdma.disable_uq) + adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; + break; + case IP_VERSION(6, 1, 3): + if ((adev->sdma.instance[0].fw_version >= 10) && !adev->sdma.disable_uq) + adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; + break; + default: + break; + } r = amdgpu_sdma_sysfs_reset_mask_init(adev); if (r) @@ -1537,29 +1571,29 @@ static int sdma_v6_0_ring_preempt_ib(struct amdgpu_ring *ring) return r; } -static int sdma_v6_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) +static int sdma_v6_0_reset_queue(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; - int i, r; + int r; - if (amdgpu_sriov_vf(adev)) + if (ring->me >= adev->sdma.num_instances) { + dev_err(adev->dev, "sdma instance not found\n"); return -EINVAL; - - for (i = 0; i < adev->sdma.num_instances; i++) { - if (ring == &adev->sdma.instance[i].ring) - break; } - if (i == adev->sdma.num_instances) { - DRM_ERROR("sdma instance not found\n"); - return -EINVAL; - } + amdgpu_ring_reset_helper_begin(ring, timedout_fence); r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true); if (r) return r; - return sdma_v6_0_gfx_resume_instance(adev, i, true); + r = sdma_v6_0_gfx_resume_instance(adev, ring->me, true); + if (r) + return r; + + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static int sdma_v6_0_set_trap_irq_state(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c index ad47d0bdf777..326ecc8d37d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v7_0.c @@ -802,29 +802,29 @@ static bool sdma_v7_0_check_soft_reset(struct amdgpu_ip_block *ip_block) return false; } -static int sdma_v7_0_reset_queue(struct amdgpu_ring *ring, unsigned int vmid) +static int sdma_v7_0_reset_queue(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; - int i, r; + int r; - if (amdgpu_sriov_vf(adev)) + if (ring->me >= adev->sdma.num_instances) { + dev_err(adev->dev, "sdma instance not found\n"); return -EINVAL; - - for (i = 0; i < adev->sdma.num_instances; i++) { - if (ring == &adev->sdma.instance[i].ring) - break; } - if (i == adev->sdma.num_instances) { - DRM_ERROR("sdma instance not found\n"); - return -EINVAL; - } + amdgpu_ring_reset_helper_begin(ring, timedout_fence); r = amdgpu_mes_reset_legacy_queue(adev, ring, vmid, true); if (r) return r; - return sdma_v7_0_gfx_resume_instance(adev, i, true); + r = sdma_v7_0_gfx_resume_instance(adev, ring->me, true); + if (r) + return r; + + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } /** @@ -1337,7 +1337,8 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block) adev->sdma.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->sdma.instance[0].ring); - adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + if (!amdgpu_sriov_vf(adev)) + adev->sdma.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_sdma_sysfs_reset_mask_init(adev); if (r) @@ -1349,9 +1350,15 @@ static int sdma_v7_0_sw_init(struct amdgpu_ip_block *ip_block) else DRM_ERROR("Failed to allocated memory for SDMA IP Dump\n"); - /* add firmware version checks here */ - if (0 && !adev->sdma.disable_uq) - adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; + switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) { + case IP_VERSION(7, 0, 0): + case IP_VERSION(7, 0, 1): + if ((adev->sdma.instance[0].fw_version >= 7966358) && !adev->sdma.disable_uq) + adev->userq_funcs[AMDGPU_HW_IP_DMA] = &userq_mes_funcs; + break; + default: + break; + } return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index c457be3a3c56..9e74c9822e62 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1218,6 +1218,8 @@ static int soc15_common_early_init(struct amdgpu_ip_block *ip_block) AMD_PG_SUPPORT_JPEG; /*TODO: need a new external_rev_id for GC 9.4.4? */ adev->external_rev_id = adev->rev_id + 0x46; + if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) + adev->external_rev_id = adev->rev_id + 0x50; break; default: /* FIXME: not supported yet */ diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index ef7c603b50ae..c8ac11a9cdef 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -118,7 +118,6 @@ int vega10_reg_base_init(struct amdgpu_device *adev); int vega20_reg_base_init(struct amdgpu_device *adev); int arct_reg_base_init(struct amdgpu_device *adev); int aldebaran_reg_base_init(struct amdgpu_device *adev); -void aqua_vanjaram_ip_map_init(struct amdgpu_device *adev); u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id); int aqua_vanjaram_init_soc_config(struct amdgpu_device *adev); ssize_t aqua_vanjaram_get_reg_state(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 148b651be7ca..68b4371df0f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -98,6 +98,8 @@ static int vcn_v2_0_set_pg_state(struct amdgpu_vcn_inst *vinst, static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, struct dpg_pause_state *new_state); static int vcn_v2_0_start_sriov(struct amdgpu_device *adev); +static int vcn_v2_0_reset(struct amdgpu_vcn_inst *vinst); + /** * vcn_v2_0_early_init - set function pointers and load microcode * @@ -213,6 +215,12 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block) } adev->vcn.inst[0].pause_dpg_mode = vcn_v2_0_pause_dpg_mode; + adev->vcn.inst[0].reset = vcn_v2_0_reset; + + adev->vcn.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); + if (!amdgpu_sriov_vf(adev)) + adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_virt_alloc_mm_table(adev); if (r) @@ -233,6 +241,10 @@ static int vcn_v2_0_sw_init(struct amdgpu_ip_block *ip_block) adev->vcn.ip_dump = ptr; } + r = amdgpu_vcn_sysfs_reset_mask_init(adev); + if (r) + return r; + return 0; } @@ -260,6 +272,8 @@ static int vcn_v2_0_sw_fini(struct amdgpu_ip_block *ip_block) if (r) return r; + amdgpu_vcn_sysfs_reset_mask_fini(adev); + r = amdgpu_vcn_sw_fini(adev, 0); kfree(adev->vcn.ip_dump); @@ -1355,6 +1369,16 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, return 0; } +static int vcn_v2_0_reset(struct amdgpu_vcn_inst *vinst) +{ + int r; + + r = vcn_v2_0_stop(vinst); + if (r) + return r; + return vcn_v2_0_start(vinst); +} + static bool vcn_v2_0_is_idle(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; @@ -2176,6 +2200,7 @@ static const struct amdgpu_ring_funcs vcn_v2_0_dec_ring_vm_funcs = { .emit_wreg = vcn_v2_0_dec_ring_emit_wreg, .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .reset = amdgpu_vcn_ring_reset, }; static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = { @@ -2205,6 +2230,7 @@ static const struct amdgpu_ring_funcs vcn_v2_0_enc_ring_vm_funcs = { .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .reset = amdgpu_vcn_ring_reset, }; static void vcn_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 58b527a6b795..bc30a5326866 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -102,6 +102,7 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, struct dpg_pause_state *new_state); static int vcn_v2_5_sriov_start(struct amdgpu_device *adev); static void vcn_v2_5_set_ras_funcs(struct amdgpu_device *adev); +static int vcn_v2_5_reset(struct amdgpu_vcn_inst *vinst); static int amdgpu_ih_clientid_vcns[] = { SOC15_IH_CLIENTID_VCN, @@ -404,8 +405,14 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block) if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) adev->vcn.inst[j].pause_dpg_mode = vcn_v2_5_pause_dpg_mode; + adev->vcn.inst[j].reset = vcn_v2_5_reset; } + adev->vcn.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); + if (!amdgpu_sriov_vf(adev)) + adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + if (amdgpu_sriov_vf(adev)) { r = amdgpu_virt_alloc_mm_table(adev); if (r) @@ -425,6 +432,10 @@ static int vcn_v2_5_sw_init(struct amdgpu_ip_block *ip_block) adev->vcn.ip_dump = ptr; } + r = amdgpu_vcn_sysfs_reset_mask_init(adev); + if (r) + return r; + return 0; } @@ -455,6 +466,8 @@ static int vcn_v2_5_sw_fini(struct amdgpu_ip_block *ip_block) if (amdgpu_sriov_vf(adev)) amdgpu_virt_free_mm_table(adev); + amdgpu_vcn_sysfs_reset_mask_fini(adev); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { r = amdgpu_vcn_suspend(adev, i); if (r) @@ -1816,6 +1829,7 @@ static const struct amdgpu_ring_funcs vcn_v2_5_dec_ring_vm_funcs = { .emit_wreg = vcn_v2_0_dec_ring_emit_wreg, .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .reset = amdgpu_vcn_ring_reset, }; /** @@ -1914,6 +1928,7 @@ static const struct amdgpu_ring_funcs vcn_v2_5_enc_ring_vm_funcs = { .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .reset = amdgpu_vcn_ring_reset, }; static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev) @@ -1942,6 +1957,16 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev) } } +static int vcn_v2_5_reset(struct amdgpu_vcn_inst *vinst) +{ + int r; + + r = vcn_v2_5_stop(vinst); + if (r) + return r; + return vcn_v2_5_start(vinst); +} + static bool vcn_v2_5_is_idle(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 9fb0d5380589..4b8f4407047f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -110,6 +110,7 @@ static int vcn_v3_0_set_pg_state(struct amdgpu_vcn_inst *vinst, enum amd_powergating_state state); static int vcn_v3_0_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, struct dpg_pause_state *new_state); +static int vcn_v3_0_reset(struct amdgpu_vcn_inst *vinst); static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring); static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring); @@ -289,8 +290,14 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block) if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) adev->vcn.inst[i].pause_dpg_mode = vcn_v3_0_pause_dpg_mode; + adev->vcn.inst[i].reset = vcn_v3_0_reset; } + adev->vcn.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); + if (!amdgpu_sriov_vf(adev)) + adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + if (amdgpu_sriov_vf(adev)) { r = amdgpu_virt_alloc_mm_table(adev); if (r) @@ -306,6 +313,10 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block) adev->vcn.ip_dump = ptr; } + r = amdgpu_vcn_sysfs_reset_mask_init(adev); + if (r) + return r; + return 0; } @@ -338,6 +349,8 @@ static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block) if (amdgpu_sriov_vf(adev)) amdgpu_virt_free_mm_table(adev); + amdgpu_vcn_sysfs_reset_mask_fini(adev); + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { r = amdgpu_vcn_suspend(adev, i); if (r) @@ -2033,6 +2046,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = { .emit_wreg = vcn_v2_0_dec_ring_emit_wreg, .emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .reset = amdgpu_vcn_ring_reset, }; /** @@ -2131,6 +2145,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_enc_ring_vm_funcs = { .emit_wreg = vcn_v2_0_enc_ring_emit_wreg, .emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait, .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, + .reset = amdgpu_vcn_ring_reset, }; static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev) @@ -2164,6 +2179,18 @@ static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev) } } +static int vcn_v3_0_reset(struct amdgpu_vcn_inst *vinst) +{ + int r; + + r = vcn_v3_0_stop(vinst); + if (r) + return r; + vcn_v3_0_enable_clock_gating(vinst); + vcn_v3_0_enable_static_power_gating(vinst); + return vcn_v3_0_start(vinst); +} + static bool vcn_v3_0_is_idle(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c index b5071f77f78d..1924e075b66f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c @@ -241,7 +241,8 @@ static int vcn_v4_0_sw_init(struct amdgpu_ip_block *ip_block) adev->vcn.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); - adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + if (!amdgpu_sriov_vf(adev)) + adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; if (amdgpu_sriov_vf(adev)) { r = amdgpu_virt_alloc_mm_table(adev); @@ -1967,18 +1968,22 @@ static int vcn_v4_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, return 0; } -static int vcn_v4_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) +static int vcn_v4_0_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me]; + int r; - if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)) - return -EOPNOTSUPP; - - vcn_v4_0_stop(vinst); - vcn_v4_0_start(vinst); - - return amdgpu_ring_test_helper(ring); + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + r = vcn_v4_0_stop(vinst); + if (r) + return r; + r = vcn_v4_0_start(vinst); + if (r) + return r; + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static struct amdgpu_ring_funcs vcn_v4_0_unified_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c index 5a33140f5723..2a3663b551af 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c @@ -1594,18 +1594,16 @@ static void vcn_v4_0_3_unified_ring_set_wptr(struct amdgpu_ring *ring) } } -static int vcn_v4_0_3_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) +static int vcn_v4_0_3_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { int r = 0; int vcn_inst; struct amdgpu_device *adev = ring->adev; struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me]; - if (amdgpu_sriov_vf(ring->adev)) - return -EOPNOTSUPP; - - if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)) - return -EOPNOTSUPP; + amdgpu_ring_reset_helper_begin(ring, timedout_fence); vcn_inst = GET_INST(VCN, ring->me); r = amdgpu_dpm_reset_vcn(adev, 1 << vcn_inst); @@ -1620,9 +1618,8 @@ static int vcn_v4_0_3_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED); vcn_v4_0_3_hw_init_inst(vinst); vcn_v4_0_3_start_dpg_mode(vinst, adev->vcn.inst[ring->me].indirect_sram); - r = amdgpu_ring_test_helper(ring); - return r; + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static const struct amdgpu_ring_funcs vcn_v4_0_3_unified_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c index 16ade84facc7..caf2d95a85d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c @@ -220,7 +220,8 @@ static int vcn_v4_0_5_sw_init(struct amdgpu_ip_block *ip_block) } adev->vcn.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); - adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + if (!amdgpu_sriov_vf(adev)) + adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; r = amdgpu_vcn_sysfs_reset_mask_init(adev); if (r) @@ -1465,18 +1466,22 @@ static void vcn_v4_0_5_unified_ring_set_wptr(struct amdgpu_ring *ring) } } -static int vcn_v4_0_5_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) +static int vcn_v4_0_5_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me]; + int r; - if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)) - return -EOPNOTSUPP; - - vcn_v4_0_5_stop(vinst); - vcn_v4_0_5_start(vinst); - - return amdgpu_ring_test_helper(ring); + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + r = vcn_v4_0_5_stop(vinst); + if (r) + return r; + r = vcn_v4_0_5_start(vinst); + if (r) + return r; + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static struct amdgpu_ring_funcs vcn_v4_0_5_unified_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c index f8e3f0b882da..07a6e9582880 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c @@ -198,7 +198,8 @@ static int vcn_v5_0_0_sw_init(struct amdgpu_ip_block *ip_block) adev->vcn.supported_reset = amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); - adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; + if (!amdgpu_sriov_vf(adev)) + adev->vcn.supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE; vcn_v5_0_0_alloc_ip_dump(adev); @@ -1192,18 +1193,22 @@ static void vcn_v5_0_0_unified_ring_set_wptr(struct amdgpu_ring *ring) } } -static int vcn_v5_0_0_ring_reset(struct amdgpu_ring *ring, unsigned int vmid) +static int vcn_v5_0_0_ring_reset(struct amdgpu_ring *ring, + unsigned int vmid, + struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[ring->me]; + int r; - if (!(adev->vcn.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)) - return -EOPNOTSUPP; - - vcn_v5_0_0_stop(vinst); - vcn_v5_0_0_start(vinst); - - return amdgpu_ring_test_helper(ring); + amdgpu_ring_reset_helper_begin(ring, timedout_fence); + r = vcn_v5_0_0_stop(vinst); + if (r) + return r; + r = vcn_v5_0_0_start(vinst); + if (r) + return r; + return amdgpu_ring_reset_helper_end(ring, timedout_fence); } static const struct amdgpu_ring_funcs vcn_v5_0_0_unified_ring_vm_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c index 338cf43c45fe..cdefd7fcb0da 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c @@ -669,6 +669,9 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst, if (indirect) amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM); + /* resetting ring, fw should not check RB ring */ + fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET; + /* Pause dpg */ vcn_v5_0_1_pause_dpg_mode(vinst, &state); @@ -681,7 +684,7 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst, tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK); WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); - fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET; + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0); WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0); @@ -692,6 +695,7 @@ static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst, tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); tmp |= VCN_RB_ENABLE__RB1_EN_MASK; WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); + /* resetting done, fw can check RB ring */ fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL, diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig index 62e88e5362e9..16e12c9913f9 100644 --- a/drivers/gpu/drm/amd/amdkfd/Kconfig +++ b/drivers/gpu/drm/amd/amdkfd/Kconfig @@ -5,7 +5,7 @@ config HSA_AMD bool "HSA kernel driver for AMD GPU devices" - depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64 || (RISCV && 64BIT)) + depends on DRM_AMDGPU && (X86_64 || ARM64 || PPC64 || (RISCV && 64BIT) || (LOONGARCH && 64BIT)) select HMM_MIRROR select MMU_NOTIFIER select DRM_AMDGPU_USERPTR diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index a2149afa5803..828a9ceef1e7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -22,7 +22,6 @@ */ #include <linux/device.h> -#include <linux/export.h> #include <linux/err.h> #include <linux/fs.h> #include <linux/file.h> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index bf0854bd5555..7e749f9b6d69 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -971,7 +971,7 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd, kfd_smi_event_update_gpu_reset(node, false, reset_context); } - kgd2kfd_suspend(kfd, false); + kgd2kfd_suspend(kfd, true); for (i = 0; i < kfd->num_nodes; i++) kfd_signal_reset_event(kfd->nodes[i]); @@ -1013,13 +1013,33 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd) return 0; } -bool kfd_is_locked(void) +bool kfd_is_locked(struct kfd_dev *kfd) { + uint8_t id = 0; + struct kfd_node *dev; + lockdep_assert_held(&kfd_processes_mutex); - return (kfd_locked > 0); + + /* check reset/suspend lock */ + if (kfd_locked > 0) + return true; + + if (kfd) + return kfd->kfd_dev_lock > 0; + + /* check lock on all cgroup accessible devices */ + while (kfd_topology_enum_kfd_devices(id++, &dev) == 0) { + if (!dev || kfd_devcgroup_check_permission(dev)) + continue; + + if (dev->kfd->kfd_dev_lock > 0) + return true; + } + + return false; } -void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) +void kgd2kfd_suspend(struct kfd_dev *kfd, bool suspend_proc) { struct kfd_node *node; int i; @@ -1027,14 +1047,8 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) if (!kfd->init_complete) return; - /* for runtime suspend, skip locking kfd */ - if (!run_pm) { - mutex_lock(&kfd_processes_mutex); - /* For first KFD device suspend all the KFD processes */ - if (++kfd_locked == 1) - kfd_suspend_all_processes(); - mutex_unlock(&kfd_processes_mutex); - } + if (suspend_proc) + kgd2kfd_suspend_process(kfd); for (i = 0; i < kfd->num_nodes; i++) { node = kfd->nodes[i]; @@ -1042,7 +1056,7 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) } } -int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) +int kgd2kfd_resume(struct kfd_dev *kfd, bool resume_proc) { int ret, i; @@ -1055,14 +1069,36 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) return ret; } - /* for runtime resume, skip unlocking kfd */ - if (!run_pm) { - mutex_lock(&kfd_processes_mutex); - if (--kfd_locked == 0) - ret = kfd_resume_all_processes(); - WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error"); - mutex_unlock(&kfd_processes_mutex); - } + if (resume_proc) + ret = kgd2kfd_resume_process(kfd); + + return ret; +} + +void kgd2kfd_suspend_process(struct kfd_dev *kfd) +{ + if (!kfd->init_complete) + return; + + mutex_lock(&kfd_processes_mutex); + /* For first KFD device suspend all the KFD processes */ + if (++kfd_locked == 1) + kfd_suspend_all_processes(); + mutex_unlock(&kfd_processes_mutex); +} + +int kgd2kfd_resume_process(struct kfd_dev *kfd) +{ + int ret = 0; + + if (!kfd->init_complete) + return 0; + + mutex_lock(&kfd_processes_mutex); + if (--kfd_locked == 0) + ret = kfd_resume_all_processes(); + WARN_ONCE(kfd_locked < 0, "KFD suspend / resume ref. error"); + mutex_unlock(&kfd_processes_mutex); return ret; } @@ -1442,24 +1478,53 @@ unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *node) kfd_get_num_sdma_engines(node); } -int kgd2kfd_check_and_lock_kfd(void) +int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd) { + struct kfd_process *p; + int r = 0, temp, idx; + mutex_lock(&kfd_processes_mutex); - if (!hash_empty(kfd_processes_table) || kfd_is_locked()) { - mutex_unlock(&kfd_processes_mutex); - return -EBUSY; + + if (hash_empty(kfd_processes_table) && !kfd_is_locked(kfd)) + goto out; + + /* fail under system reset/resume or kfd device is partition switching. */ + if (kfd_is_locked(kfd)) { + r = -EBUSY; + goto out; + } + + /* + * ensure all running processes are cgroup excluded from device before mode switch. + * i.e. no pdd was created on the process socket. + */ + idx = srcu_read_lock(&kfd_processes_srcu); + hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { + int i; + + for (i = 0; i < p->n_pdds; i++) { + if (p->pdds[i]->dev->kfd != kfd) + continue; + + r = -EBUSY; + goto proc_check_unlock; + } } - ++kfd_locked; +proc_check_unlock: + srcu_read_unlock(&kfd_processes_srcu, idx); +out: + if (!r) + ++kfd->kfd_dev_lock; mutex_unlock(&kfd_processes_mutex); - return 0; + return r; } -void kgd2kfd_unlock_kfd(void) +void kgd2kfd_unlock_kfd(struct kfd_dev *kfd) { mutex_lock(&kfd_processes_mutex); - --kfd_locked; + --kfd->kfd_dev_lock; mutex_unlock(&kfd_processes_mutex); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 76359c6a3f3a..6c5c7c1bf5ed 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -2312,7 +2312,7 @@ static int reset_hung_queues_sdma(struct device_queue_manager *dqm) continue; /* Reset engine and check. */ - if (amdgpu_sdma_reset_engine(dqm->dev->adev, i) || + if (amdgpu_sdma_reset_engine(dqm->dev->adev, i, false) || dqm->dev->kfd2kgd->hqd_sdma_get_doorbell(dqm->dev->adev, i, j) || !set_sdma_queue_as_reset(dqm, doorbell_off)) { r = -ENOTRECOVERABLE; @@ -2339,9 +2339,18 @@ reset_fail: static int reset_queues_on_hws_hang(struct device_queue_manager *dqm, bool is_sdma) { + struct amdgpu_device *adev = dqm->dev->adev; + while (halt_if_hws_hang) schedule(); + if (adev->debug_disable_gpu_ring_reset) { + dev_info_once(adev->dev, + "%s queue hung, but ring reset disabled", + is_sdma ? "sdma" : "compute"); + + return -EPERM; + } if (!amdgpu_gpu_recovery) return -ENOTRECOVERABLE; @@ -2716,7 +2725,7 @@ static void get_queue_checkpoint_info(struct device_queue_manager *dqm, dqm_lock(dqm); mqd_mgr = dqm->mqd_mgrs[mqd_type]; - *mqd_size = mqd_mgr->mqd_size; + *mqd_size = mqd_mgr->mqd_size * NUM_XCC(mqd_mgr->dev->xcc_mask); *ctl_stack_size = 0; if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE && mqd_mgr->get_checkpoint_info) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c index dbcb60eb54b2..1d170dc50df3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c @@ -23,7 +23,6 @@ */ #include <linux/device.h> -#include <linux/export.h> #include <linux/err.h> #include <linux/fs.h> #include <linux/sched.h> diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c index aee2212e52f6..33aa23450b3f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c @@ -78,8 +78,8 @@ err_ioctl: static void kfd_exit(void) { kfd_cleanup_processes(); - kfd_debugfs_fini(); kfd_process_destroy_wq(); + kfd_debugfs_fini(); kfd_procfs_shutdown(); kfd_topology_shutdown(); kfd_chardev_exit(); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 97933d2a3803..f2dee320fada 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -373,7 +373,7 @@ static void get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stac { struct v9_mqd *m = get_mqd(mqd); - *ctl_stack_size = m->cp_hqd_cntl_stack_size; + *ctl_stack_size = m->cp_hqd_cntl_stack_size * NUM_XCC(mm->dev->xcc_mask); } static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst) @@ -388,6 +388,24 @@ static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, voi memcpy(ctl_stack_dst, ctl_stack, m->cp_hqd_cntl_stack_size); } +static void checkpoint_mqd_v9_4_3(struct mqd_manager *mm, + void *mqd, + void *mqd_dst, + void *ctl_stack_dst) +{ + struct v9_mqd *m; + int xcc; + uint64_t size = get_mqd(mqd)->cp_mqd_stride_size; + + for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) { + m = get_mqd(mqd + size * xcc); + + checkpoint_mqd(mm, m, + (uint8_t *)mqd_dst + sizeof(*m) * xcc, + (uint8_t *)ctl_stack_dst + m->cp_hqd_cntl_stack_size * xcc); + } +} + static void restore_mqd(struct mqd_manager *mm, void **mqd, struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, struct queue_properties *qp, @@ -764,13 +782,35 @@ static void restore_mqd_v9_4_3(struct mqd_manager *mm, void **mqd, const void *mqd_src, const void *ctl_stack_src, u32 ctl_stack_size) { - restore_mqd(mm, mqd, mqd_mem_obj, gart_addr, qp, mqd_src, ctl_stack_src, ctl_stack_size); - if (amdgpu_sriov_multi_vf_mode(mm->dev->adev)) { - struct v9_mqd *m; + struct kfd_mem_obj xcc_mqd_mem_obj; + u32 mqd_ctl_stack_size; + struct v9_mqd *m; + u32 num_xcc; + int xcc; - m = (struct v9_mqd *) mqd_mem_obj->cpu_ptr; - m->cp_hqd_pq_doorbell_control |= 1 << - CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_MODE__SHIFT; + uint64_t offset = mm->mqd_stride(mm, qp); + + mm->dev->dqm->current_logical_xcc_start++; + + num_xcc = NUM_XCC(mm->dev->xcc_mask); + mqd_ctl_stack_size = ctl_stack_size / num_xcc; + + memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj)); + + /* Set the MQD pointer and gart address to XCC0 MQD */ + *mqd = mqd_mem_obj->cpu_ptr; + if (gart_addr) + *gart_addr = mqd_mem_obj->gpu_addr; + + for (xcc = 0; xcc < num_xcc; xcc++) { + get_xcc_mqd(mqd_mem_obj, &xcc_mqd_mem_obj, offset * xcc); + restore_mqd(mm, (void **)&m, + &xcc_mqd_mem_obj, + NULL, + qp, + (uint8_t *)mqd_src + xcc * sizeof(*m), + (uint8_t *)ctl_stack_src + xcc * mqd_ctl_stack_size, + mqd_ctl_stack_size); } } static int destroy_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, @@ -906,7 +946,6 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, mqd->free_mqd = kfd_free_mqd_cp; mqd->is_occupied = kfd_is_occupied_cp; mqd->get_checkpoint_info = get_checkpoint_info; - mqd->checkpoint_mqd = checkpoint_mqd; mqd->mqd_size = sizeof(struct v9_mqd); mqd->mqd_stride = mqd_stride_v9; #if defined(CONFIG_DEBUG_FS) @@ -918,16 +957,18 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, mqd->init_mqd = init_mqd_v9_4_3; mqd->load_mqd = load_mqd_v9_4_3; mqd->update_mqd = update_mqd_v9_4_3; - mqd->restore_mqd = restore_mqd_v9_4_3; mqd->destroy_mqd = destroy_mqd_v9_4_3; mqd->get_wave_state = get_wave_state_v9_4_3; + mqd->checkpoint_mqd = checkpoint_mqd_v9_4_3; + mqd->restore_mqd = restore_mqd_v9_4_3; } else { mqd->init_mqd = init_mqd; mqd->load_mqd = load_mqd; mqd->update_mqd = update_mqd; - mqd->restore_mqd = restore_mqd; mqd->destroy_mqd = kfd_destroy_mqd_cp; mqd->get_wave_state = get_wave_state; + mqd->checkpoint_mqd = checkpoint_mqd; + mqd->restore_mqd = restore_mqd; } break; case KFD_MQD_TYPE_HIQ: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c index 8fa6489b6f5d..505036968a77 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c @@ -240,7 +240,7 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer, packet->bitfields2.engine_sel = engine_sel__mes_map_queues__compute_vi; - packet->bitfields2.gws_control_queue = q->gws ? 1 : 0; + packet->bitfields2.gws_control_queue = q->properties.is_gws ? 1 : 0; packet->bitfields2.extended_engine_sel = extended_engine_sel__mes_map_queues__legacy_engine_sel; packet->bitfields2.queue_type = diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index d221c58dccc3..67694bcd9464 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -372,6 +372,9 @@ struct kfd_dev { /* bitmap for dynamic doorbell allocation from doorbell object */ unsigned long *doorbell_bitmap; + + /* for dynamic partitioning */ + int kfd_dev_lock; }; enum kfd_mempool { @@ -1536,7 +1539,7 @@ static inline bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev) int kfd_send_exception_to_runtime(struct kfd_process *p, unsigned int queue_id, uint64_t error_reason); -bool kfd_is_locked(void); +bool kfd_is_locked(struct kfd_dev *kfd); /* Compute profile */ void kfd_inc_compute_active(struct kfd_node *dev); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 722ac1662bdc..5be28c6c4f6a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -854,7 +854,7 @@ struct kfd_process *kfd_create_process(struct task_struct *thread) */ mutex_lock(&kfd_processes_mutex); - if (kfd_is_locked()) { + if (kfd_is_locked(NULL)) { pr_debug("KFD is locked! Cannot create process"); process = ERR_PTR(-EINVAL); goto out; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index c643e0ccec52..7fbb5c274ccc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -914,7 +914,10 @@ static int criu_checkpoint_queues_device(struct kfd_process_device *pdd, q_data = (struct kfd_criu_queue_priv_data *)q_private_data; - /* data stored in this order: priv_data, mqd, ctl_stack */ + /* + * data stored in this order: + * priv_data, mqd[xcc0], mqd[xcc1],..., ctl_stack[xcc0], ctl_stack[xcc1]... + */ q_data->mqd_size = mqd_size; q_data->ctl_stack_size = ctl_stack_size; @@ -963,7 +966,7 @@ int kfd_criu_checkpoint_queues(struct kfd_process *p, } static void set_queue_properties_from_criu(struct queue_properties *qp, - struct kfd_criu_queue_priv_data *q_data) + struct kfd_criu_queue_priv_data *q_data, uint32_t num_xcc) { qp->is_interop = false; qp->queue_percent = q_data->q_percent; @@ -976,7 +979,11 @@ static void set_queue_properties_from_criu(struct queue_properties *qp, qp->eop_ring_buffer_size = q_data->eop_ring_buffer_size; qp->ctx_save_restore_area_address = q_data->ctx_save_restore_area_address; qp->ctx_save_restore_area_size = q_data->ctx_save_restore_area_size; - qp->ctl_stack_size = q_data->ctl_stack_size; + if (q_data->type == KFD_QUEUE_TYPE_COMPUTE) + qp->ctl_stack_size = q_data->ctl_stack_size / num_xcc; + else + qp->ctl_stack_size = q_data->ctl_stack_size; + qp->type = q_data->type; qp->format = q_data->format; } @@ -1036,12 +1043,15 @@ int kfd_criu_restore_queue(struct kfd_process *p, goto exit; } - /* data stored in this order: mqd, ctl_stack */ + /* + * data stored in this order: + * mqd[xcc0], mqd[xcc1],..., ctl_stack[xcc0], ctl_stack[xcc1]... + */ mqd = q_extra_data; ctl_stack = mqd + q_data->mqd_size; memset(&qp, 0, sizeof(qp)); - set_queue_properties_from_criu(&qp, q_data); + set_queue_properties_from_criu(&qp, q_data, NUM_XCC(pdd->dev->adev->gfx.xcc_mask)); print_queue_properties(&qp); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 865dca2547de..a0f22ea6d15a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1171,13 +1171,12 @@ svm_range_split_head(struct svm_range *prange, uint64_t new_start, } static void -svm_range_add_child(struct svm_range *prange, struct mm_struct *mm, - struct svm_range *pchild, enum svm_work_list_ops op) +svm_range_add_child(struct svm_range *prange, struct svm_range *pchild, enum svm_work_list_ops op) { pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n", pchild, pchild->start, pchild->last, prange, op); - pchild->work_item.mm = mm; + pchild->work_item.mm = NULL; pchild->work_item.op = op; list_add_tail(&pchild->child_list, &prange->child_list); } @@ -1278,7 +1277,7 @@ svm_range_get_pte_flags(struct kfd_node *node, mapping_flags |= ext_coherent ? AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; /* system memory accessed by the dGPU */ } else { - if (gc_ip_version < IP_VERSION(9, 5, 0)) + if (gc_ip_version < IP_VERSION(9, 5, 0) || ext_coherent) mapping_flags |= AMDGPU_VM_MTYPE_UC; else mapping_flags |= AMDGPU_VM_MTYPE_NC; @@ -2394,15 +2393,17 @@ svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange, prange->work_item.op != SVM_OP_UNMAP_RANGE) prange->work_item.op = op; } else { - prange->work_item.op = op; - - /* Pairs with mmput in deferred_list_work */ - mmget(mm); - prange->work_item.mm = mm; - list_add_tail(&prange->deferred_list, - &prange->svms->deferred_range_list); - pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n", - prange, prange->start, prange->last, op); + /* Pairs with mmput in deferred_list_work. + * If process is exiting and mm is gone, don't update mmu notifier. + */ + if (mmget_not_zero(mm)) { + prange->work_item.mm = mm; + prange->work_item.op = op; + list_add_tail(&prange->deferred_list, + &prange->svms->deferred_range_list); + pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n", + prange, prange->start, prange->last, op); + } } spin_unlock(&svms->deferred_list_lock); } @@ -2416,8 +2417,7 @@ void schedule_deferred_list_work(struct svm_range_list *svms) } static void -svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent, - struct svm_range *prange, unsigned long start, +svm_range_unmap_split(struct svm_range *parent, struct svm_range *prange, unsigned long start, unsigned long last) { struct svm_range *head; @@ -2438,12 +2438,12 @@ svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent, svm_range_split(tail, last + 1, tail->last, &head); if (head != prange && tail != prange) { - svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE); - svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE); + svm_range_add_child(parent, head, SVM_OP_UNMAP_RANGE); + svm_range_add_child(parent, tail, SVM_OP_ADD_RANGE); } else if (tail != prange) { - svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE); + svm_range_add_child(parent, tail, SVM_OP_UNMAP_RANGE); } else if (head != prange) { - svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE); + svm_range_add_child(parent, head, SVM_OP_UNMAP_RANGE); } else if (parent != prange) { prange->work_item.op = SVM_OP_UNMAP_RANGE; } @@ -2520,14 +2520,14 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange, l = min(last, pchild->last); if (l >= s) svm_range_unmap_from_gpus(pchild, s, l, trigger); - svm_range_unmap_split(mm, prange, pchild, start, last); + svm_range_unmap_split(prange, pchild, start, last); mutex_unlock(&pchild->lock); } s = max(start, prange->start); l = min(last, prange->last); if (l >= s) svm_range_unmap_from_gpus(prange, s, l, trigger); - svm_range_unmap_split(mm, prange, prange, start, last); + svm_range_unmap_split(prange, prange, start, last); if (unmap_parent) svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE); @@ -2570,8 +2570,6 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni, if (range->event == MMU_NOTIFY_RELEASE) return true; - if (!mmget_not_zero(mni->mm)) - return true; start = mni->interval_tree.start; last = mni->interval_tree.last; @@ -2598,7 +2596,6 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni, } svm_range_unlock(prange); - mmput(mni->mm); return true; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index baa2374acdeb..4ec73f33535e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -510,6 +510,10 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->node_props.capability |= HSA_CAP_AQL_QUEUE_DOUBLE_MAP; + if (KFD_GC_VERSION(dev->gpu) < IP_VERSION(10, 0, 0) && + (dev->gpu->adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE)) + dev->node_props.capability2 |= HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED; + sysfs_show_32bit_prop(buffer, offs, "max_engine_clk_fcompute", dev->node_props.max_engine_clk_fcompute); @@ -2008,8 +2012,6 @@ static void kfd_topology_set_capabilities(struct kfd_topology_device *dev) if (!amdgpu_sriov_vf(dev->gpu->adev)) dev->node_props.capability |= HSA_CAP_PER_QUEUE_RESET_SUPPORTED; - if (dev->gpu->adev->sdma.supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE) - dev->node_props.capability2 |= HSA_CAP2_PER_SDMA_QUEUE_RESET_SUPPORTED; } else { dev->node_props.debug_prop |= HSA_DBG_WATCH_ADDR_MASK_LO_BIT_GFX10 | HSA_DBG_WATCH_ADDR_MASK_HI_BIT; diff --git a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c index faed84172dd4..8bc36f04b1b7 100644 --- a/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c +++ b/drivers/gpu/drm/amd/amdxcp/amdgpu_xcp_drv.c @@ -21,6 +21,7 @@ * */ +#include <linux/export.h> #include <linux/init.h> #include <linux/module.h> #include <linux/platform_device.h> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index d3100f641ac6..a0ca3b2c6bd8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1758,10 +1758,11 @@ dm_dmub_send_vbios_gpint_command(struct amdgpu_device *adev, return DMUB_STATUS_TIMEOUT; } -static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev) +static void *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *adev) { - struct dml2_soc_bb *bb; + void *bb; long long addr; + unsigned int bb_size; int i = 0; uint16_t chunk; enum dmub_gpint_command send_addrs[] = { @@ -1774,6 +1775,7 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device * switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { case IP_VERSION(4, 0, 1): + bb_size = sizeof(struct dml2_soc_bb); break; default: return NULL; @@ -1781,7 +1783,7 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device * bb = dm_allocate_gpu_mem(adev, DC_MEM_ALLOC_TYPE_GART, - sizeof(struct dml2_soc_bb), + bb_size, &addr); if (!bb) return NULL; @@ -1847,7 +1849,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) mutex_init(&adev->dm.audio_lock); if (amdgpu_dm_irq_init(adev)) { - drm_err(adev_to_drm(adev), "amdgpu: failed to initialize DM IRQ support.\n"); + drm_err(adev_to_drm(adev), "failed to initialize DM IRQ support.\n"); goto error; } @@ -2037,7 +2039,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev); if (!adev->dm.hpd_rx_offload_wq) { - drm_err(adev_to_drm(adev), "amdgpu: failed to create hpd rx offload workqueue.\n"); + drm_err(adev_to_drm(adev), "failed to create hpd rx offload workqueue.\n"); goto error; } @@ -2053,7 +2055,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); if (!adev->dm.freesync_module) { drm_err(adev_to_drm(adev), - "amdgpu: failed to initialize freesync_module.\n"); + "failed to initialize freesync_module.\n"); } else drm_dbg_driver(adev_to_drm(adev), "amdgpu: freesync_module init done %p.\n", adev->dm.freesync_module); @@ -2064,7 +2066,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) adev->dm.vblank_control_workqueue = create_singlethread_workqueue("dm_vblank_control_workqueue"); if (!adev->dm.vblank_control_workqueue) - drm_err(adev_to_drm(adev), "amdgpu: failed to initialize vblank_workqueue.\n"); + drm_err(adev_to_drm(adev), "failed to initialize vblank_workqueue.\n"); } if (adev->dm.dc->caps.ips_support && @@ -2075,7 +2077,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc); if (!adev->dm.hdcp_workqueue) - drm_err(adev_to_drm(adev), "amdgpu: failed to initialize hdcp_workqueue.\n"); + drm_err(adev_to_drm(adev), "failed to initialize hdcp_workqueue.\n"); else drm_dbg_driver(adev_to_drm(adev), "amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue); @@ -2085,20 +2087,20 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_completion(&adev->dm.dmub_aux_transfer_done); adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL); if (!adev->dm.dmub_notify) { - drm_info(adev_to_drm(adev), "amdgpu: fail to allocate adev->dm.dmub_notify"); + drm_info(adev_to_drm(adev), "fail to allocate adev->dm.dmub_notify"); goto error; } adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq"); if (!adev->dm.delayed_hpd_wq) { - drm_err(adev_to_drm(adev), "amdgpu: failed to create hpd offload workqueue.\n"); + drm_err(adev_to_drm(adev), "failed to create hpd offload workqueue.\n"); goto error; } amdgpu_dm_outbox_init(adev); if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY, dmub_aux_setconfig_callback, false)) { - drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub aux callback"); + drm_err(adev_to_drm(adev), "fail to register dmub aux callback"); goto error; } @@ -2107,7 +2109,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_FUSED_IO, dmub_aux_fused_io_callback, false)) { - drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub fused io callback"); + drm_err(adev_to_drm(adev), "fail to register dmub fused io callback"); goto error; } /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. @@ -2125,7 +2127,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dm_initialize_drm_device(adev)) { drm_err(adev_to_drm(adev), - "amdgpu: failed to initialize sw for display support.\n"); + "failed to initialize sw for display support.\n"); goto error; } @@ -2140,14 +2142,14 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) { drm_err(adev_to_drm(adev), - "amdgpu: failed to initialize sw for display support.\n"); + "failed to initialize sw for display support.\n"); goto error; } #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) amdgpu_dm_crtc_secure_display_create_contexts(adev); if (!adev->dm.secure_display_ctx.crtc_ctx) - drm_err(adev_to_drm(adev), "amdgpu: failed to initialize secure display contexts.\n"); + drm_err(adev_to_drm(adev), "failed to initialize secure display contexts.\n"); if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(4, 0, 1)) adev->dm.secure_display_ctx.support_mul_roi = true; @@ -2404,6 +2406,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev) DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_5_TRACEBUFF DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_6_FW_STATE DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_7_SCRATCH_MEM + DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_IB_MEM DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_SHARED_STATE }; int r; @@ -2570,7 +2573,7 @@ static int dm_sw_init(struct amdgpu_ip_block *ip_block) adev->dm.cgs_device = amdgpu_cgs_create_device(adev); if (!adev->dm.cgs_device) { - drm_err(adev_to_drm(adev), "amdgpu: failed to create cgs device.\n"); + drm_err(adev_to_drm(adev), "failed to create cgs device.\n"); return -EINVAL; } @@ -3060,6 +3063,77 @@ static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) } } +static int dm_cache_state(struct amdgpu_device *adev) +{ + int r; + + adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); + if (IS_ERR(adev->dm.cached_state)) { + r = PTR_ERR(adev->dm.cached_state); + adev->dm.cached_state = NULL; + } + + return adev->dm.cached_state ? 0 : r; +} + +static void dm_destroy_cached_state(struct amdgpu_device *adev) +{ + struct amdgpu_display_manager *dm = &adev->dm; + struct drm_device *ddev = adev_to_drm(adev); + struct dm_plane_state *dm_new_plane_state; + struct drm_plane_state *new_plane_state; + struct dm_crtc_state *dm_new_crtc_state; + struct drm_crtc_state *new_crtc_state; + struct drm_plane *plane; + struct drm_crtc *crtc; + int i; + + if (!dm->cached_state) + return; + + /* Force mode set in atomic commit */ + for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { + new_crtc_state->active_changed = true; + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + reset_freesync_config_for_crtc(dm_new_crtc_state); + } + + /* + * atomic_check is expected to create the dc states. We need to release + * them here, since they were duplicated as part of the suspend + * procedure. + */ + for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + if (dm_new_crtc_state->stream) { + WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1); + dc_stream_release(dm_new_crtc_state->stream); + dm_new_crtc_state->stream = NULL; + } + dm_new_crtc_state->base.color_mgmt_changed = true; + } + + for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) { + dm_new_plane_state = to_dm_plane_state(new_plane_state); + if (dm_new_plane_state->dc_state) { + WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1); + dc_plane_state_release(dm_new_plane_state->dc_state); + dm_new_plane_state->dc_state = NULL; + } + } + + drm_atomic_helper_resume(ddev, dm->cached_state); + + dm->cached_state = NULL; +} + +static void dm_complete(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + + dm_destroy_cached_state(adev); +} + static int dm_prepare_suspend(struct amdgpu_ip_block *ip_block) { struct amdgpu_device *adev = ip_block->adev; @@ -3068,11 +3142,8 @@ static int dm_prepare_suspend(struct amdgpu_ip_block *ip_block) return 0; WARN_ON(adev->dm.cached_state); - adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); - if (IS_ERR(adev->dm.cached_state)) - return PTR_ERR(adev->dm.cached_state); - return 0; + return dm_cache_state(adev); } static int dm_suspend(struct amdgpu_ip_block *ip_block) @@ -3106,9 +3177,10 @@ static int dm_suspend(struct amdgpu_ip_block *ip_block) } if (!adev->dm.cached_state) { - adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); - if (IS_ERR(adev->dm.cached_state)) - return PTR_ERR(adev->dm.cached_state); + int r = dm_cache_state(adev); + + if (r) + return r; } s3_handle_hdmi_cec(adev_to_drm(adev), true); @@ -3295,12 +3367,6 @@ static int dm_resume(struct amdgpu_ip_block *ip_block) struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; struct drm_connector_list_iter iter; - struct drm_crtc *crtc; - struct drm_crtc_state *new_crtc_state; - struct dm_crtc_state *dm_new_crtc_state; - struct drm_plane *plane; - struct drm_plane_state *new_plane_state; - struct dm_plane_state *dm_new_plane_state; struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); enum dc_connection_type new_connection_type = dc_connection_none; struct dc_state *dc_state; @@ -3332,8 +3398,10 @@ static int dm_resume(struct amdgpu_ip_block *ip_block) link_enc_cfg_copy(adev->dm.dc->current_state, dc_state); r = dm_dmub_hw_init(adev); - if (r) + if (r) { drm_err(adev_to_drm(adev), "DMUB interface failed to initialize: status=%d\n", r); + return r; + } dc_dmub_srv_set_power_state(dm->dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D0); dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); @@ -3457,40 +3525,7 @@ static int dm_resume(struct amdgpu_ip_block *ip_block) } drm_connector_list_iter_end(&iter); - /* Force mode set in atomic commit */ - for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { - new_crtc_state->active_changed = true; - dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); - reset_freesync_config_for_crtc(dm_new_crtc_state); - } - - /* - * atomic_check is expected to create the dc states. We need to release - * them here, since they were duplicated as part of the suspend - * procedure. - */ - for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { - dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); - if (dm_new_crtc_state->stream) { - WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1); - dc_stream_release(dm_new_crtc_state->stream); - dm_new_crtc_state->stream = NULL; - } - dm_new_crtc_state->base.color_mgmt_changed = true; - } - - for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) { - dm_new_plane_state = to_dm_plane_state(new_plane_state); - if (dm_new_plane_state->dc_state) { - WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1); - dc_plane_state_release(dm_new_plane_state->dc_state); - dm_new_plane_state->dc_state = NULL; - } - } - - drm_atomic_helper_resume(ddev, dm->cached_state); - - dm->cached_state = NULL; + dm_destroy_cached_state(adev); /* Do mst topology probing after resuming cached state*/ drm_connector_list_iter_begin(ddev, &iter); @@ -3539,6 +3574,7 @@ static const struct amd_ip_funcs amdgpu_dm_funcs = { .prepare_suspend = dm_prepare_suspend, .suspend = dm_suspend, .resume = dm_resume, + .complete = dm_complete, .is_idle = dm_is_idle, .wait_for_idle = dm_wait_for_idle, .check_soft_reset = dm_check_soft_reset, @@ -3610,13 +3646,15 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) luminance_range = &conn_base->display_info.luminance_range; - if (luminance_range->max_luminance) { - caps->aux_min_input_signal = luminance_range->min_luminance; + if (luminance_range->max_luminance) caps->aux_max_input_signal = luminance_range->max_luminance; - } else { - caps->aux_min_input_signal = 0; + else caps->aux_max_input_signal = 512; - } + + if (luminance_range->min_luminance) + caps->aux_min_input_signal = luminance_range->min_luminance; + else + caps->aux_min_input_signal = 1; min_input_signal_override = drm_get_panel_min_brightness_quirk(aconnector->drm_edid); if (min_input_signal_override >= 0) @@ -4001,19 +4039,19 @@ static int register_hpd_handlers(struct amdgpu_device *adev) if (dc_is_dmub_outbox_supported(adev->dm.dc)) { if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) { - drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub hpd callback"); + drm_err(adev_to_drm(adev), "fail to register dmub hpd callback"); return -EINVAL; } if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) { - drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub hpd callback"); + drm_err(adev_to_drm(adev), "fail to register dmub hpd callback"); return -EINVAL; } if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_SENSE_NOTIFY, dmub_hpd_sense_callback, true)) { - drm_err(adev_to_drm(adev), "amdgpu: fail to register dmub hpd sense callback"); + drm_err(adev_to_drm(adev), "fail to register dmub hpd sense callback"); return -EINVAL; } } @@ -4718,9 +4756,23 @@ static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, return 1; } +/* Rescale from [min..max] to [0..AMDGPU_MAX_BL_LEVEL] */ +static inline u32 scale_input_to_fw(int min, int max, u64 input) +{ + return DIV_ROUND_CLOSEST_ULL(input * AMDGPU_MAX_BL_LEVEL, max - min); +} + +/* Rescale from [0..AMDGPU_MAX_BL_LEVEL] to [min..max] */ +static inline u32 scale_fw_to_input(int min, int max, u64 input) +{ + return min + DIV_ROUND_CLOSEST_ULL(input * (max - min), AMDGPU_MAX_BL_LEVEL); +} + static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *caps, - uint32_t *brightness) + unsigned int min, unsigned int max, + uint32_t *user_brightness) { + u32 brightness = scale_input_to_fw(min, max, *user_brightness); u8 prev_signal = 0, prev_lum = 0; int i = 0; @@ -4731,7 +4783,7 @@ static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *cap return; /* choose start to run less interpolation steps */ - if (caps->luminance_data[caps->data_points/2].input_signal > *brightness) + if (caps->luminance_data[caps->data_points/2].input_signal > brightness) i = caps->data_points/2; do { u8 signal = caps->luminance_data[i].input_signal; @@ -4742,17 +4794,18 @@ static void convert_custom_brightness(const struct amdgpu_dm_backlight_caps *cap * brightness < signal: interpolate between previous and current luminance numerator * brightness > signal: find next data point */ - if (*brightness > signal) { + if (brightness > signal) { prev_signal = signal; prev_lum = lum; i++; continue; } - if (*brightness < signal) + if (brightness < signal) lum = prev_lum + DIV_ROUND_CLOSEST((lum - prev_lum) * - (*brightness - prev_signal), + (brightness - prev_signal), signal - prev_signal); - *brightness = DIV_ROUND_CLOSEST(lum * *brightness, 101); + *user_brightness = scale_fw_to_input(min, max, + DIV_ROUND_CLOSEST(lum * brightness, 101)); return; } while (i < caps->data_points); } @@ -4765,11 +4818,10 @@ static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *c if (!get_brightness_range(caps, &min, &max)) return brightness; - convert_custom_brightness(caps, &brightness); + convert_custom_brightness(caps, min, max, &brightness); - // Rescale 0..255 to min..max - return min + DIV_ROUND_CLOSEST((max - min) * brightness, - AMDGPU_MAX_BL_LEVEL); + // Rescale 0..max to min..max + return min + DIV_ROUND_CLOSEST_ULL((u64)(max - min) * brightness, max); } static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps, @@ -4782,8 +4834,8 @@ static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *cap if (brightness < min) return 0; - // Rescale min..max to 0..255 - return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min), + // Rescale min..max to 0..max + return DIV_ROUND_CLOSEST_ULL((u64)max * (brightness - min), max - min); } @@ -4813,6 +4865,14 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, reallow_idle = true; } + if (trace_amdgpu_dm_brightness_enabled()) { + trace_amdgpu_dm_brightness(__builtin_return_address(0), + user_brightness, + brightness, + caps->aux_support, + power_supply_is_system_supplied() > 0); + } + if (caps->aux_support) { rc = dc_link_set_backlight_level_nits(link, true, brightness, AUX_BL_DEFAULT_TRANSITION_TIME_MS); @@ -4908,7 +4968,7 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector) struct drm_device *drm = aconnector->base.dev; struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm; struct backlight_properties props = { 0 }; - struct amdgpu_dm_backlight_caps caps = { 0 }; + struct amdgpu_dm_backlight_caps *caps; char bl_name[16]; int min, max; @@ -4922,22 +4982,21 @@ amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector) return; } - amdgpu_acpi_get_backlight_caps(&caps); - if (caps.caps_valid && get_brightness_range(&caps, &min, &max)) { + caps = &dm->backlight_caps[aconnector->bl_idx]; + if (get_brightness_range(caps, &min, &max)) { if (power_supply_is_system_supplied() > 0) - props.brightness = (max - min) * DIV_ROUND_CLOSEST(caps.ac_level, 100); + props.brightness = DIV_ROUND_CLOSEST((max - min) * caps->ac_level, 100); else - props.brightness = (max - min) * DIV_ROUND_CLOSEST(caps.dc_level, 100); + props.brightness = DIV_ROUND_CLOSEST((max - min) * caps->dc_level, 100); /* min is zero, so max needs to be adjusted */ props.max_brightness = max - min; drm_dbg(drm, "Backlight caps: min: %d, max: %d, ac %d, dc %d\n", min, max, - caps.ac_level, caps.dc_level); + caps->ac_level, caps->dc_level); } else - props.brightness = AMDGPU_MAX_BL_LEVEL; + props.brightness = props.max_brightness = MAX_BACKLIGHT_LEVEL; - if (caps.data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)) + if (caps->data_points && !(amdgpu_dc_debug_mask & DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE)) drm_info(drm, "Using custom brightness curve\n"); - props.max_brightness = AMDGPU_MAX_BL_LEVEL; props.type = BACKLIGHT_RAW; snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d", @@ -5353,7 +5412,8 @@ fail: static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) { - drm_atomic_private_obj_fini(&dm->atomic_obj); + if (dm->atomic_obj.state) + drm_atomic_private_obj_fini(&dm->atomic_obj); } /****************************************************************************** @@ -7519,7 +7579,7 @@ static enum dc_status dm_validate_stream_and_context(struct dc *dc, dc_result = DC_FAIL_ATTACH_SURFACES; if (dc_result == DC_OK) - dc_result = dc_validate_global_state(dc, dc_state, true); + dc_result = dc_validate_global_state(dc, dc_state, DC_VALIDATE_MODE_ONLY); cleanup: if (dc_state) @@ -7577,7 +7637,7 @@ create_validate_stream_for_sink(struct drm_connector *connector, dc_result = dm_validate_stream_and_context(adev->dm.dc, stream); if (dc_result != DC_OK) { - DRM_DEBUG_KMS("Mode %dx%d (clk %d) pixel_encoding:%s color_depth:%s failed validation -- %s\n", + DRM_DEBUG_KMS("Pruned mode %d x %d (clk %d) %s %s -- %s\n", drm_mode->hdisplay, drm_mode->vdisplay, drm_mode->clock, @@ -7732,6 +7792,9 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn, struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn); int ret; + if (WARN_ON(unlikely(!old_con_state || !new_con_state))) + return -EINVAL; + trace_amdgpu_dm_connector_atomic_check(new_con_state); if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { @@ -7844,6 +7907,23 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, int clock, bpp = 0; bool is_y420 = false; + if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) || + (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) { + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; + enum drm_mode_status result; + + result = drm_crtc_helper_mode_valid_fixed(encoder->crtc, adjusted_mode, native_mode); + if (result != MODE_OK && dm_new_connector_state->scaling == RMX_OFF) { + drm_dbg_driver(encoder->dev, + "mode %dx%d@%dHz is not native, enabling scaling\n", + adjusted_mode->hdisplay, adjusted_mode->vdisplay, + drm_mode_vrefresh(adjusted_mode)); + dm_new_connector_state->scaling = RMX_FULL; + } + return 0; + } + if (!aconnector->mst_output_port) return 0; @@ -8301,7 +8381,8 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) drm_add_modes_noedid(connector, 1920, 1080); } else { amdgpu_dm_connector_ddc_get_modes(connector, drm_edid); - if (encoder) + if (encoder && (connector->connector_type != DRM_MODE_CONNECTOR_eDP) && + (connector->connector_type != DRM_MODE_CONNECTOR_LVDS)) amdgpu_dm_connector_add_common_modes(encoder, connector); amdgpu_dm_connector_add_freesync_modes(connector, drm_edid); } @@ -12141,7 +12222,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, drm_dbg_atomic(dev, "MST drm_dp_mst_atomic_check() failed\n"); goto fail; } - status = dc_validate_global_state(dc, dm_state->context, true); + status = dc_validate_global_state(dc, dm_state->context, DC_VALIDATE_MODE_ONLY); if (status != DC_OK) { drm_dbg_atomic(dev, "DC global validation failure: %s (%d)", dc_status_to_str(status), status); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index d7d92f9911e4..b937da0a4e4a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -636,8 +636,9 @@ struct amdgpu_display_manager { * @bb_from_dmub: * * Bounding box data read from dmub during early initialization for DCN4+ + * Data is stored as a byte array that should be casted to the appropriate bb struct */ - struct dml2_soc_bb *bb_from_dmub; + void *bb_from_dmub; /** * @oem_i2c: diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 87058271b00c..45feb404b097 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -299,6 +299,25 @@ static inline int amdgpu_dm_crtc_set_vblank(struct drm_crtc *crtc, bool enable) irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id); if (enable) { + struct dc *dc = adev->dm.dc; + struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); + struct psr_settings *psr = &acrtc_state->stream->link->psr_settings; + struct replay_settings *pr = &acrtc_state->stream->link->replay_settings; + bool sr_supported = (psr->psr_version != DC_PSR_VERSION_UNSUPPORTED) || + pr->config.replay_supported; + + /* + * IPS & self-refresh feature can cause vblank counter resets between + * vblank disable and enable. + * It may cause system stuck due to waiting for the vblank counter. + * Call this function to estimate missed vblanks by using timestamps and + * update the vblank counter in DRM. + */ + if (dc->caps.ips_support && + dc->config.disable_ips != DMUB_IPS_DISABLE_ALL && + sr_supported && vblank->config.disable_immediate) + drm_crtc_vblank_restore(crtc); + /* vblank irq on -> Only need vupdate irq in vrr mode */ if (amdgpu_dm_crtc_vrr_active(acrtc_state)) rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true); @@ -661,6 +680,15 @@ static int amdgpu_dm_crtc_helper_atomic_check(struct drm_crtc *crtc, return -EINVAL; } + if (!state->legacy_cursor_update && amdgpu_dm_crtc_vrr_active(dm_crtc_state)) { + struct drm_plane_state *primary_state; + + /* Pull in primary plane for correct VRR handling */ + primary_state = drm_atomic_get_plane_state(state, crtc->primary); + if (IS_ERR(primary_state)) + return PTR_ERR(primary_state); + } + /* In some use cases, like reset, no stream is attached */ if (!dm_crtc_state->stream) return 0; @@ -728,7 +756,16 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, * support programmable degamma anywhere. */ is_dcn = dm->adev->dm.dc->caps.color.dpp.dcn_arch; - drm_crtc_enable_color_mgmt(&acrtc->base, is_dcn ? MAX_COLOR_LUT_ENTRIES : 0, + /* Dont't enable DRM CRTC degamma property for DCN401 since the + * pre-blending degamma LUT doesn't apply to cursor, and therefore + * can't work similar to a post-blending degamma LUT as in other hw + * versions. + * TODO: revisit it once KMS plane color API is merged. + */ + drm_crtc_enable_color_mgmt(&acrtc->base, + (is_dcn && + dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01) ? + MAX_COLOR_LUT_ENTRIES : 0, true, MAX_COLOR_LUT_ENTRIES); drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index c7d13e743e6c..b726bcd18e29 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -3988,7 +3988,7 @@ static int capabilities_show(struct seq_file *m, void *unused) struct hubbub *hubbub = dc->res_pool->hubbub; - if (hubbub->funcs->get_mall_en) + if (hubbub && hubbub->funcs->get_mall_en) hubbub->funcs->get_mall_en(hubbub, &mall_in_use); if (dc->cap_funcs.get_subvp_en) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index c16962256514..b1d1897f5eaf 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -723,8 +723,8 @@ ret: static const struct bin_attribute data_attr = { .attr = {.name = "hdcp_srm", .mode = 0664}, .size = PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, /* Limit SRM size */ - .write_new = srm_data_write, - .read_new = srm_data_read, + .write = srm_data_write, + .read = srm_data_read, }; struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index d4395b92fb85..9e3e51a2dc49 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -1029,6 +1029,10 @@ enum dc_edid_status dm_helpers_read_local_edid( return EDID_NO_RESPONSE; edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw() + if (!edid || + edid->extensions >= sizeof(sink->dc_edid.raw_edid) / EDID_LENGTH) + return EDID_BAD_INPUT; + sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1); memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 25e8befbcc47..7187d5aedf0a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -107,7 +107,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, if (payload.write && result >= 0) { if (result) { /*one byte indicating partially written bytes*/ - drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX partially written\n"); + drm_dbg_dp(adev_to_drm(adev), "AUX partially written\n"); result = payload.data[0]; } else if (!payload.reply[0]) /*I2C_ACK|AUX_ACK*/ @@ -133,11 +133,11 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, break; } - drm_dbg_dp(adev_to_drm(adev), "amdgpu: DP AUX transfer fail:%d\n", operation_result); + drm_dbg_dp(adev_to_drm(adev), "DP AUX transfer fail:%d\n", operation_result); } if (payload.reply[0]) - drm_dbg_dp(adev_to_drm(adev), "amdgpu: AUX reply command not ACK: 0x%02x.", + drm_dbg_dp(adev_to_drm(adev), "AUX reply command not ACK: 0x%02x.", payload.reply[0]); return result; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c index b7c6e8d13435..eef51652ca35 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -92,9 +92,9 @@ enum dm_micro_swizzle { MICRO_SWIZZLE_R = 3 }; -const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd) +const struct drm_format_info *amdgpu_dm_plane_get_format_info(u32 pixel_format, u64 modifier) { - return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]); + return amdgpu_lookup_format_info(pixel_format, modifier); } void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h index 615d2ab2b803..ea2619b507db 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h @@ -58,7 +58,7 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, unsigned long possible_crtcs, const struct dc_plane_cap *plane_cap); -const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd); +const struct drm_format_info *amdgpu_dm_plane_get_format_info(u32 pixel_format, u64 modifier); void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state, bool *per_pixel_alpha, bool *pre_multiplied_alpha, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index f984cb0cb889..ff7b867ae98b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -119,8 +119,10 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) psr_config.allow_multi_disp_optimizations = (amdgpu_dc_feature_mask & DC_PSR_ALLOW_MULTI_DISP_OPT); - if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config)) - return false; + if (link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) { + if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config)) + return false; + } ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h index 4686d4b0cbad..95f890fda8aa 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h @@ -726,6 +726,32 @@ TRACE_EVENT(dcn_optc_lock_unlock_state, ) ); +TRACE_EVENT(amdgpu_dm_brightness, + TP_PROTO(void *function, u32 user_brightness, u32 converted_brightness, bool aux, bool ac), + TP_ARGS(function, user_brightness, converted_brightness, aux, ac), + TP_STRUCT__entry( + __field(void *, function) + __field(u32, user_brightness) + __field(u32, converted_brightness) + __field(bool, aux) + __field(bool, ac) + ), + TP_fast_assign( + __entry->function = function; + __entry->user_brightness = user_brightness; + __entry->converted_brightness = converted_brightness; + __entry->aux = aux; + __entry->ac = ac; + ), + TP_printk("%ps: brightness requested=%u converted=%u aux=%s power=%s", + (void *)__entry->function, + (u32)__entry->user_brightness, + (u32)__entry->converted_brightness, + (__entry->aux) ? "true" : "false", + (__entry->ac) ? "AC" : "DC" + ) +); + #endif /* _AMDGPU_DM_TRACE_H_ */ #undef TRACE_INCLUDE_PATH diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 67f08495b7e6..154fd2c18e88 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -174,11 +174,8 @@ static struct graphics_object_id bios_parser_get_connector_id( return object_id; } - if (tbl->ucNumberOfObjects <= i) { - dm_error("Can't find connector id %d in connector table of size %d.\n", - i, tbl->ucNumberOfObjects); + if (tbl->ucNumberOfObjects <= i) return object_id; - } id = le16_to_cpu(tbl->asObjects[i].usObjectID); object_id = object_id_from_bios_object_id(id); diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c index 2bcae0643e61..58e88778da7f 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c @@ -993,7 +993,7 @@ static enum bp_result set_pixel_clock_v3( allocation.sPCLKInput.usFbDiv = cpu_to_le16((uint16_t)bp_params->feedback_divider); allocation.sPCLKInput.ucFracFbDiv = - (uint8_t)bp_params->fractional_feedback_divider; + (uint8_t)(bp_params->fractional_feedback_divider / 100000); allocation.sPCLKInput.ucPostDiv = (uint8_t)bp_params->pixel_clock_post_divider; diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index 2c645dffec18..f2b1720a6a66 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -396,6 +396,7 @@ static enum bp_result transmitter_control_v1_7( process_phy_transition_init_params.display_port_link_rate = link->cur_link_settings.link_rate; process_phy_transition_init_params.transition_bitmask = link->phy_transition_bitmask; } + dig_v1_7.skip_phy_ssc_reduction = link->wa_flags.skip_phy_ssc_reduction; } // Handle PRE_OFF_TO_ON: Process ACPI PHY Transition Interlock diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile index d9955c5d2e5e..60021671b386 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile @@ -112,7 +112,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21) ############################################################################### # DCN30 ############################################################################### -CLK_MGR_DCN30 = dcn30_clk_mgr.o dcn30_clk_mgr_smu_msg.o +CLK_MGR_DCN30 = dcn30_clk_mgr.o dcn30_clk_mgr_smu_msg.o dcn30m_clk_mgr.o dcn30m_clk_mgr_smu_msg.o AMD_DAL_CLK_MGR_DCN30 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn30/,$(CLK_MGR_DCN30)) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index 4c3e58c730b1..4071851f9e86 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -67,7 +67,7 @@ int clk_mgr_helper_get_active_display_cnt( if (dc_state_get_stream_subvp_type(context, stream) == SUBVP_PHANTOM) continue; - if (!stream->dpms_off || (stream_status && stream_status->plane_count)) + if (!stream->dpms_off || dc->is_switch_in_progress_dest || (stream_status && stream_status->plane_count)) display_count++; } @@ -158,7 +158,6 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p return NULL; } dce60_clk_mgr_construct(ctx, clk_mgr); - dce_clk_mgr_construct(ctx, clk_mgr); return &clk_mgr->base; } #endif diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c index 26feefbb8990..dbd6ef1b60a0 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c @@ -72,9 +72,9 @@ static const struct state_dependent_clocks dce80_max_clks_by_state[] = { /* ClocksStateLow */ { .display_clk_khz = 352000, .pixel_clk_khz = 330000}, /* ClocksStateNominal */ -{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 }, +{ .display_clk_khz = 625000, .pixel_clk_khz = 400000 }, /* ClocksStatePerformance */ -{ .display_clk_khz = 600000, .pixel_clk_khz = 400000 } }; +{ .display_clk_khz = 625000, .pixel_clk_khz = 400000 } }; int dentist_get_divider_from_did(int did) { @@ -245,6 +245,11 @@ int dce_set_clock( pxl_clk_params.target_pixel_clock_100hz = requested_clk_khz * 10; pxl_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; + /* DCE 6.0, DCE 6.4: engine clock is the same as PLL0 */ + if (clk_mgr_base->ctx->dce_version == DCE_VERSION_6_0 || + clk_mgr_base->ctx->dce_version == DCE_VERSION_6_4) + pxl_clk_params.pll_id = CLOCK_SOURCE_ID_PLL0; + if (clk_mgr_dce->dfs_bypass_active) pxl_clk_params.flags.SET_DISPCLK_DFS_BYPASS = true; @@ -386,8 +391,6 @@ static void dce_pplib_apply_display_requirements( { struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; - pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context); - dce110_fill_display_configs(context, pp_display_cfg); if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) @@ -400,11 +403,9 @@ static void dce_update_clocks(struct clk_mgr *clk_mgr_base, { struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct dm_pp_power_level_change_request level_change_req; - int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; - - /*TODO: W/A for dal3 linux, investigate why this works */ - if (!clk_mgr_dce->dfs_bypass_active) - patched_disp_clk = patched_disp_clk * 115 / 100; + const int max_disp_clk = + clk_mgr_dce->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz; + int patched_disp_clk = MIN(max_disp_clk, context->bw_ctx.bw.dce.dispclk_khz); level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context); /* get max clock state from PPLIB */ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c index f8409453434c..13cf415e38e5 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c @@ -120,9 +120,15 @@ void dce110_fill_display_configs( const struct dc_state *context, struct dm_pp_display_configuration *pp_display_cfg) { + struct dc *dc = context->clk_mgr->ctx->dc; int j; int num_cfgs = 0; + pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context); + pp_display_cfg->disp_clk_khz = dc->clk_mgr->clks.dispclk_khz; + pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0; + pp_display_cfg->crtc_index = dc->res_pool->res_cap->num_timing_generator; + for (j = 0; j < context->stream_count; j++) { int k; @@ -164,6 +170,23 @@ void dce110_fill_display_configs( cfg->v_refresh /= stream->timing.h_total; cfg->v_refresh = (cfg->v_refresh + stream->timing.v_total / 2) / stream->timing.v_total; + + /* Find first CRTC index and calculate its line time. + * This is necessary for DPM on SI GPUs. + */ + if (cfg->pipe_idx < pp_display_cfg->crtc_index) { + const struct dc_crtc_timing *timing = + &context->streams[0]->timing; + + pp_display_cfg->crtc_index = cfg->pipe_idx; + pp_display_cfg->line_time_in_us = + timing->h_total * 10000 / timing->pix_clk_100hz; + } + } + + if (!num_cfgs) { + pp_display_cfg->crtc_index = 0; + pp_display_cfg->line_time_in_us = 0; } pp_display_cfg->display_count = num_cfgs; @@ -223,25 +246,8 @@ void dce11_pplib_apply_display_requirements( pp_display_cfg->min_engine_clock_deep_sleep_khz = context->bw_ctx.bw.dce.sclk_deep_sleep_khz; - pp_display_cfg->avail_mclk_switch_time_us = - dce110_get_min_vblank_time_us(context); - /* TODO: dce11.2*/ - pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0; - - pp_display_cfg->disp_clk_khz = dc->clk_mgr->clks.dispclk_khz; - dce110_fill_display_configs(context, pp_display_cfg); - /* TODO: is this still applicable?*/ - if (pp_display_cfg->display_count == 1) { - const struct dc_crtc_timing *timing = - &context->streams[0]->timing; - - pp_display_cfg->crtc_index = - pp_display_cfg->disp_configs[0].pipe_idx; - pp_display_cfg->line_time_in_us = timing->h_total * 10000 / timing->pix_clk_100hz; - } - if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) dm_pp_apply_display_requirements(dc->ctx, pp_display_cfg); } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c index 0267644717b2..a39641a0ff09 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce60/dce60_clk_mgr.c @@ -83,22 +83,13 @@ static const struct state_dependent_clocks dce60_max_clks_by_state[] = { static int dce60_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base) { struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - int dprefclk_wdivider; - int dp_ref_clk_khz; - int target_div; + struct dc_context *ctx = clk_mgr_base->ctx; + int dp_ref_clk_khz = 0; - /* DCE6 has no DPREFCLK_CNTL to read DP Reference Clock source */ - - /* Read the mmDENTIST_DISPCLK_CNTL to get the currently - * programmed DID DENTIST_DPREFCLK_WDIVIDER*/ - REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPREFCLK_WDIVIDER, &dprefclk_wdivider); - - /* Convert DENTIST_DPREFCLK_WDIVIDERto actual divider*/ - target_div = dentist_get_divider_from_did(dprefclk_wdivider); - - /* Calculate the current DFS clock, in kHz.*/ - dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR - * clk_mgr->base.dentist_vco_freq_khz) / target_div; + if (ASIC_REV_IS_TAHITI_P(ctx->asic_id.hw_internal_rev)) + dp_ref_clk_khz = ctx->dc_bios->fw_info.default_display_engine_pll_frequency; + else + dp_ref_clk_khz = clk_mgr_base->clks.dispclk_khz; return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz); } @@ -109,8 +100,6 @@ static void dce60_pplib_apply_display_requirements( { struct dm_pp_display_configuration *pp_display_cfg = &context->pp_display_cfg; - pp_display_cfg->avail_mclk_switch_time_us = dce110_get_min_vblank_time_us(context); - dce110_fill_display_configs(context, pp_display_cfg); if (memcmp(&dc->current_state->pp_display_cfg, pp_display_cfg, sizeof(*pp_display_cfg)) != 0) @@ -123,11 +112,9 @@ static void dce60_update_clocks(struct clk_mgr *clk_mgr_base, { struct clk_mgr_internal *clk_mgr_dce = TO_CLK_MGR_INTERNAL(clk_mgr_base); struct dm_pp_power_level_change_request level_change_req; - int patched_disp_clk = context->bw_ctx.bw.dce.dispclk_khz; - - /*TODO: W/A for dal3 linux, investigate why this works */ - if (!clk_mgr_dce->dfs_bypass_active) - patched_disp_clk = patched_disp_clk * 115 / 100; + const int max_disp_clk = + clk_mgr_dce->max_clks_by_state[DM_PP_CLOCKS_STATE_PERFORMANCE].display_clk_khz; + int patched_disp_clk = MIN(max_disp_clk, context->bw_ctx.bw.dce.dispclk_khz); level_change_req.power_level = dce_get_required_clocks_state(clk_mgr_base, context); /* get max clock state from PPLIB */ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h index fa09c594fd36..06da34676965 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dalsmc.h @@ -56,6 +56,7 @@ #define DALSMC_MSG_SetDisplayRefreshFromMall 0xF #define DALSMC_MSG_SetExternalClientDfCstateAllow 0x10 #define DALSMC_MSG_BacoAudioD3PME 0x11 -#define DALSMC_Message_Count 0x12 +#define DALSMC_MSG_SmartAccess 0x12 +#define DALSMC_Message_Count 0x13 #endif diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index 8083a553c60e..ef77fcd164ed 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -30,6 +30,7 @@ #include "dce100/dce_clk_mgr.h" #include "dcn30/dcn30_clk_mgr.h" #include "dml/dcn30/dcn30_fpu.h" +#include "dcn30/dcn30m_clk_mgr.h" #include "reg_helper.h" #include "core_types.h" #include "dm_helpers.h" @@ -498,7 +499,8 @@ static struct clk_mgr_funcs dcn3_funcs = { .are_clock_states_equal = dcn3_are_clock_states_equal, .enable_pme_wa = dcn3_enable_pme_wa, .notify_link_rate_change = dcn30_notify_link_rate_change, - .is_smu_present = dcn3_is_smu_present + .is_smu_present = dcn3_is_smu_present, + .set_smartmux_switch = dcn30m_set_smartmux_switch }; static void dcn3_init_clocks_fpga(struct clk_mgr *clk_mgr) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c new file mode 100644 index 000000000000..8e8a11c7437e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.c @@ -0,0 +1,36 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "clk_mgr_internal.h" +#include "dcn30/dcn30m_clk_mgr.h" +#include "dcn30m_clk_mgr_smu_msg.h" + + +uint32_t dcn30m_set_smartmux_switch(struct clk_mgr *clk_mgr_base, uint32_t pins_to_set) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + return dcn30m_smu_set_smart_mux_switch(clk_mgr, pins_to_set); +} diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h new file mode 100644 index 000000000000..757985b2eadc --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr.h @@ -0,0 +1,31 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCN30M_CLK_MGR_H__ +#define __DCN30M_CLK_MGR_H__ + +uint32_t dcn30m_set_smartmux_switch(struct clk_mgr *clk_mgr_base, uint32_t pins_to_set); + +#endif //__DCN30M_CLK_MGR_H__ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c new file mode 100644 index 000000000000..0dd0583ff21e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.c @@ -0,0 +1,118 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dcn30m_clk_mgr_smu_msg.h" + +#include "clk_mgr_internal.h" +#include "reg_helper.h" +#include "dm_helpers.h" + +#include "dalsmc.h" + +#define mmDAL_MSG_REG 0x1628A +#define mmDAL_ARG_REG 0x16273 +#define mmDAL_RESP_REG 0x16274 + +#define REG(reg_name) \ + mm ## reg_name + +#include "logger_types.h" +#undef DC_LOGGER +#define DC_LOGGER \ + CTX->logger +#define smu_print(str, ...) {DC_LOG_SMU(str, ##__VA_ARGS__); } + + +/* + * Function to be used instead of REG_WAIT macro because the wait ends when + * the register is NOT EQUAL to zero, and because the translation in msg_if.h + * won't work with REG_WAIT. + */ +static uint32_t dcn30m_smu_wait_for_response(struct clk_mgr_internal *clk_mgr, + unsigned int delay_us, unsigned int max_retries) +{ + uint32_t reg = 0; + + do { + reg = REG_READ(DAL_RESP_REG); + if (reg) + break; + + if (delay_us >= 1000) + msleep(delay_us/1000); + else if (delay_us > 0) + udelay(delay_us); + } while (max_retries--); + + /* handle DALSMC_Result_CmdRejectedBusy? */ + + /* Log? */ + + return reg; +} + +static bool dcn30m_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, + uint32_t msg_id, uint32_t param_in, uint32_t *param_out) +{ + uint32_t result; + /* Wait for response register to be ready */ + dcn30m_smu_wait_for_response(clk_mgr, 10, 200000); + + /* Clear response register */ + REG_WRITE(DAL_RESP_REG, 0); + + /* Set the parameter register for the SMU message */ + REG_WRITE(DAL_ARG_REG, param_in); + + /* Trigger the message transaction by writing the message ID */ + REG_WRITE(DAL_MSG_REG, msg_id); + + result = dcn30m_smu_wait_for_response(clk_mgr, 10, 200000); + + if (IS_SMU_TIMEOUT(result)) + dm_helpers_smu_timeout(CTX, msg_id, param_in, 10 * 200000); + + /* Wait for response */ + if (result == DALSMC_Result_OK) { + if (param_out) + *param_out = REG_READ(DAL_ARG_REG); + + return true; + } + + return false; +} + +uint32_t dcn30m_smu_set_smart_mux_switch(struct clk_mgr_internal *clk_mgr, uint32_t pins_to_set) +{ + uint32_t response = 0; + + smu_print("SMU Set SmartMux Switch: switch_dgpu = %d\n", pins_to_set); + + dcn30m_smu_send_msg_with_param(clk_mgr, + DALSMC_MSG_SmartAccess, pins_to_set, &response); + + return response; +} diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h new file mode 100644 index 000000000000..8a59a473fc5e --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30m_clk_mgr_smu_msg.h @@ -0,0 +1,34 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef DAL_DC_DCN30M_CLK_MGR_SMU_MSG_H_ +#define DAL_DC_DCN30M_CLK_MGR_SMU_MSG_H_ + +#include "core_types.h" + +struct clk_mgr_internal; + +uint32_t dcn30m_smu_set_smart_mux_switch(struct clk_mgr_internal *clk_mgr, uint32_t pins_to_set); +#endif /* DAL_DC_DCN30M_CLK_MGR_SMU_MSG_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c index a3b8e3d4a429..b59703467128 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c @@ -22,8 +22,6 @@ #include "dcn/dcn_4_1_0_offset.h" #include "dcn/dcn_4_1_0_sh_mask.h" -#include "dml/dcn401/dcn401_fpu.h" - #define DCN_BASE__INST0_SEG1 0x000000C0 #define mmCLK01_CLK0_CLK_PLL_REQ 0x16E37 @@ -183,43 +181,36 @@ static void dcn401_init_single_clock(struct clk_mgr_internal *clk_mgr, PPCLK_e c static void dcn401_build_wm_range_table(struct clk_mgr *clk_mgr) { - /* legacy */ - DC_FP_START(); - dcn401_build_wm_range_table_fpu(clk_mgr); - DC_FP_END(); - - if (clk_mgr->ctx->dc->debug.using_dml21) { - /* For min clocks use as reported by PM FW and report those as min */ - uint16_t min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz; - uint16_t min_dcfclk_mhz = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz; - - /* Set A - Normal - default values */ - clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid = true; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF; + /* For min clocks use as reported by PM FW and report those as min */ + uint16_t min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz; + uint16_t min_dcfclk_mhz = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz; - /* Set B - Unused on dcn4 */ - clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid = false; + /* Set A - Normal - default values */ + clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid = true; + clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE; + clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; + clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF; + clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz; + clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF; - /* Set 1A - Dummy P-State - P-State latency set to "dummy p-state" value */ - /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */ - if (clk_mgr->ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) { - clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = true; - clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE; - clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_dcfclk = 0xFFFF; - clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_uclk = min_uclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_uclk = 0xFFFF; - } else { - clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = false; - } + /* Set B - Unused on dcn4 */ + clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid = false; - /* Set 1B - Unused on dcn4 */ - clk_mgr->bw_params->wm_table.nv_entries[WM_1B].valid = false; + /* Set 1A - Dummy P-State - P-State latency set to "dummy p-state" value */ + /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */ + if (clk_mgr->ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) { + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = true; + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE; + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_dcfclk = 0xFFFF; + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.min_uclk = min_uclk_mhz; + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].pmfw_breakdown.max_uclk = 0xFFFF; + } else { + clk_mgr->bw_params->wm_table.nv_entries[WM_1A].valid = false; } + + /* Set 1B - Unused on dcn4 */ + clk_mgr->bw_params->wm_table.nv_entries[WM_1B].valid = false; } void dcn401_init_clocks(struct clk_mgr *clk_mgr_base) @@ -320,6 +311,25 @@ void dcn401_init_clocks(struct clk_mgr *clk_mgr_base) dcn401_build_wm_range_table(clk_mgr_base); } +bool dcn401_is_dc_mode_present(struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + return clk_mgr->smu_present && clk_mgr->dpm_present && + ((clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels && + clk_mgr_base->bw_params->dc_mode_limit.dcfclk_mhz) || + (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels && + clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz) || + (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels && + clk_mgr_base->bw_params->dc_mode_limit.dtbclk_mhz) || + (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_fclk_levels && + clk_mgr_base->bw_params->dc_mode_limit.fclk_mhz) || + (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_memclk_levels && + clk_mgr_base->bw_params->dc_mode_limit.memclk_mhz) || + (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_socclk_levels && + clk_mgr_base->bw_params->dc_mode_limit.socclk_mhz)); +} + static void dcn401_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass, struct clk_mgr *clk_mgr_base, struct clk_log_info *log_info) { @@ -1490,6 +1500,35 @@ static int dcn401_get_dispclk_from_dentist(struct clk_mgr *clk_mgr_base) return 0; } +unsigned int dcn401_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type clk_type) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + + unsigned int num_clk_levels; + + switch (clk_type) { + case CLK_TYPE_DISPCLK: + num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; + return dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DISPCLK) ? + clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 : + clk_mgr->base.boot_snapshot.dispclk; + case CLK_TYPE_DPPCLK: + num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dppclk_levels; + return dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DPPCLK) ? + clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dppclk_mhz * 1000 : + clk_mgr->base.boot_snapshot.dppclk; + case CLK_TYPE_DSCCLK: + num_clk_levels = clk_mgr->base.bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; + return dcn401_is_ppclk_dpm_enabled(clk_mgr, PPCLK_DISPCLK) ? + clk_mgr->base.bw_params->clk_table.entries[num_clk_levels - 1].dispclk_mhz * 1000 / 3 : + clk_mgr->base.boot_snapshot.dispclk / 3; + default: + break; + } + + return 0; +} + static struct clk_mgr_funcs dcn401_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, .get_dtb_ref_clk_frequency = dcn401_get_dtb_ref_freq_khz, @@ -1505,6 +1544,8 @@ static struct clk_mgr_funcs dcn401_funcs = { .get_dispclk_from_dentist = dcn401_get_dispclk_from_dentist, .get_hard_min_memclk = dcn401_get_hard_min_memclk, .get_hard_min_fclk = dcn401_get_hard_min_fclk, + .is_dc_mode_present = dcn401_is_dc_mode_present, + .get_max_clock_khz = dcn401_get_max_clock_khz, }; struct clk_mgr_internal *dcn401_clk_mgr_construct( @@ -1565,7 +1606,7 @@ struct clk_mgr_internal *dcn401_clk_mgr_construct( clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL); if (!clk_mgr->base.bw_params) { BREAK_TO_DEBUGGER(); - kfree(clk_mgr); + kfree(clk_mgr401); return NULL; } @@ -1576,6 +1617,7 @@ struct clk_mgr_internal *dcn401_clk_mgr_construct( if (!clk_mgr->wm_range_table) { BREAK_TO_DEBUGGER(); kfree(clk_mgr->base.bw_params); + kfree(clk_mgr401); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h index 6c9ae5ca2c7e..97a1ce1e8a9e 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.h @@ -105,10 +105,13 @@ struct dcn401_clk_mgr { }; void dcn401_init_clocks(struct clk_mgr *clk_mgr_base); +bool dcn401_is_dc_mode_present(struct clk_mgr *clk_mgr_base); struct clk_mgr_internal *dcn401_clk_mgr_construct(struct dc_context *ctx, struct dccg *dccg); void dcn401_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr); +unsigned int dcn401_get_max_clock_khz(struct clk_mgr *clk_mgr_base, enum clk_type clk_type); + #endif /* __DCN401_CLK_MGR_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 56d011a1323c..dcc48b5238e5 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -217,11 +217,24 @@ static bool create_links( connectors_num, num_virtual_links); - // condition loop on link_count to allow skipping invalid indices + /* When getting the number of connectors, the VBIOS reports the number of valid indices, + * but it doesn't say which indices are valid, and not every index has an actual connector. + * So, if we don't find a connector on an index, that is not an error. + * + * - There is no guarantee that the first N indices will be valid + * - VBIOS may report a higher amount of valid indices than there are actual connectors + * - Some VBIOS have valid configurations for more connectors than there actually are + * on the card. This may be because the manufacturer used the same VBIOS for different + * variants of the same card. + */ for (i = 0; dc->link_count < connectors_num && i < MAX_LINKS; i++) { + struct graphics_object_id connector_id = bios->funcs->get_connector_id(bios, i); struct link_init_data link_init_params = {0}; struct dc_link *link; + if (connector_id.id == CONNECTOR_ID_UNKNOWN) + continue; + DC_LOG_DC("BIOS object table - printing link object info for connector number: %d, link_index: %d", i, dc->link_count); link_init_params.ctx = dc->ctx; @@ -241,6 +254,7 @@ static bool create_links( DC_LOG_DC("BIOS object table - end"); /* Create a link for each usb4 dpia port */ + dc->lowest_dpia_link_index = MAX_LINKS; for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) { struct link_init_data link_init_params = {0}; struct dc_link *link; @@ -253,6 +267,9 @@ static bool create_links( link = dc->link_srv->create_link(&link_init_params); if (link) { + if (dc->lowest_dpia_link_index > dc->link_count) + dc->lowest_dpia_link_index = dc->link_count; + dc->links[dc->link_count] = link; link->dc = dc; ++dc->link_count; @@ -934,17 +951,18 @@ static void dc_destruct(struct dc *dc) if (dc->link_srv) link_destroy_link_service(&dc->link_srv); - if (dc->ctx->gpio_service) - dal_gpio_service_destroy(&dc->ctx->gpio_service); + if (dc->ctx) { + if (dc->ctx->gpio_service) + dal_gpio_service_destroy(&dc->ctx->gpio_service); - if (dc->ctx->created_bios) - dal_bios_parser_destroy(&dc->ctx->dc_bios); + if (dc->ctx->created_bios) + dal_bios_parser_destroy(&dc->ctx->dc_bios); + kfree(dc->ctx->logger); + dc_perf_trace_destroy(&dc->ctx->perf_trace); - kfree(dc->ctx->logger); - dc_perf_trace_destroy(&dc->ctx->perf_trace); - - kfree(dc->ctx); - dc->ctx = NULL; + kfree(dc->ctx); + dc->ctx = NULL; + } kfree(dc->bw_vbios); dc->bw_vbios = NULL; @@ -972,6 +990,8 @@ static bool dc_construct_ctx(struct dc *dc, if (!dc_ctx) return false; + dc_stream_init_rmcm_3dlut(dc); + dc_ctx->cgs_device = init_params->cgs_device; dc_ctx->driver_context = init_params->driver; dc_ctx->dc = dc; @@ -2377,7 +2397,7 @@ enum dc_status dc_commit_streams(struct dc *dc, struct dc_commit_streams_params context->power_source = params->power_source; - res = dc_validate_with_context(dc, set, params->stream_count, context, false); + res = dc_validate_with_context(dc, set, params->stream_count, context, DC_VALIDATE_MODE_AND_PROGRAMMING); /* * Only update link encoder to stream assignment after bandwidth validation passed. @@ -3300,7 +3320,8 @@ static void copy_stream_update_to_stream(struct dc *dc, if (dsc_validate_context) { stream->timing.dsc_cfg = *update->dsc_config; stream->timing.flags.DSC = enable_dsc; - if (dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, true) != DC_OK) { + if (dc->res_pool->funcs->validate_bandwidth(dc, dsc_validate_context, + DC_VALIDATE_MODE_ONLY) != DC_OK) { stream->timing.dsc_cfg = old_dsc_cfg; stream->timing.flags.DSC = old_dsc_enabled; update->dsc_config = NULL; @@ -3522,7 +3543,7 @@ static bool update_planes_and_stream_state(struct dc *dc, } if (update_type == UPDATE_TYPE_FULL) { - if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) { + if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK) { BREAK_TO_DEBUGGER(); goto fail; } @@ -4628,7 +4649,8 @@ static struct dc_state *create_minimal_transition_state(struct dc *dc, backup_and_set_minimal_pipe_split_policy(dc, base_context, policy); /* commit minimal state */ - if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, false) == DC_OK) { + if (dc->res_pool->funcs->validate_bandwidth(dc, minimal_transition_context, + DC_VALIDATE_MODE_AND_PROGRAMMING) == DC_OK) { /* prevent underflow and corruption when reconfiguring pipes */ force_vsync_flip_in_minimal_transition_context(minimal_transition_context); } else { @@ -5151,7 +5173,7 @@ static bool update_planes_and_stream_v1(struct dc *dc, copy_stream_update_to_stream(dc, context, stream, stream_update); if (update_type >= UPDATE_TYPE_FULL) { - if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) { + if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK) { DC_ERROR("Mode validation failed for stream update!\n"); dc_state_release(context); return false; @@ -5435,8 +5457,8 @@ bool dc_update_planes_and_stream(struct dc *dc, else ret = update_planes_and_stream_v2(dc, srf_updates, surface_count, stream, stream_update); - - if (ret) + if (ret && (dc->ctx->dce_version >= DCN_VERSION_3_2 || + dc->ctx->dce_version == DCN_VERSION_3_01)) clear_update_flags(srf_updates, surface_count, stream); return ret; @@ -5467,7 +5489,7 @@ void dc_commit_updates_for_stream(struct dc *dc, ret = update_planes_and_stream_v1(dc, srf_updates, surface_count, stream, stream_update, state); - if (ret) + if (ret && dc->ctx->dce_version >= DCN_VERSION_3_2) clear_update_flags(srf_updates, surface_count, stream); } @@ -5540,6 +5562,15 @@ void dc_set_power_state(struct dc *dc, enum dc_acpi_cm_power_state power_state) dc->hwss.init_sys_ctx(dc->hwseq, dc, &dc->vm_pa_config); } break; + case DC_ACPI_CM_POWER_STATE_D3: + if (dc->caps.ips_support) + dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, DC_ACPI_CM_POWER_STATE_D3); + + if (dc->caps.ips_v2_support) { + if (dc->clk_mgr->funcs->set_low_power_state) + dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr); + } + break; default: ASSERT(dc->current_state->stream_count == 0); dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state); @@ -6337,13 +6368,14 @@ void dc_set_edp_power(const struct dc *dc, struct dc_link *edp_link, edp_link->dc->link_srv->edp_set_panel_power(edp_link, powerOn); } -/* - ***************************************************************************** +/** * dc_get_power_profile_for_dc_state() - extracts power profile from dc state * * Called when DM wants to make power policy decisions based on dc_state * - ***************************************************************************** + * @context: Pointer to the dc_state from which the power profile is extracted. + * + * Return: The power profile structure containing the power level information. */ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state *context) { @@ -6359,13 +6391,14 @@ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state return profile; } -/* - ********************************************************************************** +/** * dc_get_det_buffer_size_from_state() - extracts detile buffer size from dc state * - * Called when DM wants to log detile buffer size from dc_state + * This function is called to log the detile buffer size from the dc_state. + * + * @context: a pointer to the dc_state from which the detile buffer size is extracted. * - ********************************************************************************** + * Return: the size of the detile buffer, or 0 if not available. */ unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context) { @@ -6377,6 +6410,36 @@ unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context) return 0; } +/** + * dc_get_host_router_index: Get index of host router from a dpia link + * + * This function return a host router index of the target link. If the target link is dpia link. + * + * @link: Pointer to the target link (input) + * @host_router_index: Pointer to store the host router index of the target link (output). + * + * Return: true if the host router index is found and valid. + * + */ +bool dc_get_host_router_index(const struct dc_link *link, unsigned int *host_router_index) +{ + struct dc *dc; + + if (!link || !host_router_index || link->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) + return false; + + dc = link->ctx->dc; + + if (link->link_index < dc->lowest_dpia_link_index) + return false; + + *host_router_index = (link->link_index - dc->lowest_dpia_link_index) / dc->caps.num_of_dpias_per_host_router; + if (*host_router_index < dc->caps.num_of_host_routers) + return true; + else + return false; +} + bool dc_is_cursor_limit_pending(struct dc *dc) { uint32_t i; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c index 7551d0a3fe82..bbce751b485f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c @@ -268,6 +268,8 @@ char *dc_status_to_str(enum dc_status status) return "Insufficient DP link bandwidth"; case DC_FAIL_HW_CURSOR_SUPPORT: return "HW Cursor not supported"; + case DC_FAIL_DP_TUNNEL_BW_VALIDATE: + return "Fail DP Tunnel BW validation"; case DC_ERROR_UNEXPECTED: return "Unexpected error"; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c index 7014b8d000bb..ec4e80e5b6eb 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_hw_sequencer.c @@ -427,6 +427,32 @@ void get_hdr_visual_confirm_color( } } +/* Visual Confirm color definition for Smart Mux */ +void get_smartmux_visual_confirm_color( + struct dc *dc, + struct tg_color *color) +{ + uint32_t color_value = MAX_TG_COLOR_VALUE; + + const struct tg_color sm_ver_colors[5] = { + {0, 0, 0}, /* SMUX_MUXCONTROL_UNSUPPORTED - Black */ + {0, MAX_TG_COLOR_VALUE, 0}, /* SMUX_MUXCONTROL_v10 - Green */ + {0, MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE}, /* SMUX_MUXCONTROL_v15 - Cyan */ + {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE, 0}, /* SMUX_MUXCONTROL_MDM - Yellow */ + {MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, /* SMUX_MUXCONTROL_vUNKNOWN - Magenta*/ + }; + + if (dc->caps.is_apu) { + /* APU driving the eDP */ + *color = sm_ver_colors[dc->config.smart_mux_version]; + } else { + /* dGPU driving the eDP - red */ + color->color_r_cr = color_value; + color->color_g_y = 0; + color->color_b_cb = 0; + } +} + /* Visual Confirm color definition for VABC */ void get_vabc_visual_confirm_color( struct pipe_ctx *pipe_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c index 71e15da4bb69..130455f2802a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_exports.c @@ -515,7 +515,8 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable) link->dc->link_srv->enable_hpd_filter(link, enable); } -bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams, const unsigned int count) +enum dc_status dc_link_validate_dp_tunneling_bandwidth(const struct dc *dc, const struct dc_state *new_ctx) { - return dc->link_srv->validate_dpia_bandwidth(streams, count); + return dc->link_srv->validate_dp_tunnel_bandwidth(dc, new_ctx); } + diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index 3da25bd8b578..4d6181e7c612 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -3940,7 +3940,9 @@ enum dc_status resource_map_pool_resources( /* TODO: Add check if ASIC support and EDID audio */ if (!stream->converter_disable_audio && dc_is_audio_capable_signal(pipe_ctx->stream->signal) && - stream->audio_info.mode_count && stream->audio_info.flags.all) { + stream->audio_info.mode_count && + (stream->audio_info.flags.all || + (stream->sink && stream->sink->edid_caps.panel_patch.skip_audio_sab_check))) { pipe_ctx->stream_res.audio = find_first_free_audio( &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id, dc_ctx->dce_version); @@ -4053,7 +4055,7 @@ static bool add_all_planes_for_stream( * @set: An array of dc_validation_set with all the current streams reference * @set_count: Total of streams * @context: New context - * @fast_validate: Enable or disable fast validation + * @validate_mode: identify the validation mode * * This function updates the potential new stream in the context object. It * creates multiple lists for the add, remove, and unchanged streams. In @@ -4068,7 +4070,7 @@ enum dc_status dc_validate_with_context(struct dc *dc, const struct dc_validation_set set[], int set_count, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { struct dc_stream_state *unchanged_streams[MAX_PIPES] = { 0 }; struct dc_stream_state *del_streams[MAX_PIPES] = { 0 }; @@ -4242,7 +4244,7 @@ enum dc_status dc_validate_with_context(struct dc *dc, dc_state_set_stream_subvp_cursor_limit(context->streams[i], context, false); } - res = dc_validate_global_state(dc, context, fast_validate); + res = dc_validate_global_state(dc, context, validate_mode); /* calculate pixel rate divider after deciding pxiel clock & odm combine */ if ((dc->hwss.calculate_pix_rate_divider) && (res == DC_OK)) { @@ -4299,7 +4301,7 @@ static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx) * * @dc: dc struct for this driver * @new_ctx: state to be validated - * @fast_validate: set to true if only yes/no to support matters + * @validate_mode: identify the validation mode * * Checks hardware resource availability and bandwidth requirement. * @@ -4309,7 +4311,7 @@ static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx) enum dc_status dc_validate_global_state( struct dc *dc, struct dc_state *new_ctx, - bool fast_validate) + enum dc_validate_mode validate_mode) { enum dc_status result = DC_ERROR_UNEXPECTED; int i, j; @@ -4368,7 +4370,7 @@ enum dc_status dc_validate_global_state( result = resource_build_scaling_params_for_context(dc, new_ctx); if (result == DC_OK) - result = dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate); + result = dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, validate_mode); return result; } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c index 4db7383720fd..883054bb18e7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c @@ -194,11 +194,6 @@ static void init_state(struct dc *dc, struct dc_state *state) struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *params) { struct dc_state *state; -#ifdef CONFIG_DRM_AMD_DC_FP - struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp; - - memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options)); -#endif state = kvzalloc(sizeof(struct dc_state), GFP_KERNEL); @@ -211,14 +206,12 @@ struct dc_state *dc_state_create(struct dc *dc, struct dc_state_create_params *p #ifdef CONFIG_DRM_AMD_DC_FP if (dc->debug.using_dml2) { - dml2_opt->use_clock_dc_limits = false; - if (!dml2_create(dc, dml2_opt, &state->bw_ctx.dml2)) { + if (!dml2_create(dc, &dc->dml2_options, &state->bw_ctx.dml2)) { dc_state_release(state); return NULL; } - dml2_opt->use_clock_dc_limits = true; - if (!dml2_create(dc, dml2_opt, &state->bw_ctx.dml2_dc_power_source)) { + if (!dml2_create(dc, &dc->dml2_dc_power_options, &state->bw_ctx.dml2_dc_power_source)) { dc_state_release(state); return NULL; } @@ -434,6 +427,8 @@ enum dc_status dc_state_remove_stream( return DC_ERROR_UNEXPECTED; } + dc_stream_release_3dlut_for_stream(dc, stream); + dc_stream_release(state->streams[i]); state->stream_count--; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index b883fb24fa12..4d6bc9fd4faa 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -857,6 +857,73 @@ void dc_stream_log(const struct dc *dc, const struct dc_stream_state *stream) } /* +* dc_stream_get_3dlut() +* Requirements: +* 1. Is stream already owns an RMCM instance, return it. +* 2. If it doesn't and we don't need to allocate, return NULL. +* 3. If there's a free RMCM instance, assign to stream and return it. +* 4. If no free RMCM instances, return NULL. +*/ + +struct dc_rmcm_3dlut *dc_stream_get_3dlut_for_stream( + const struct dc *dc, + const struct dc_stream_state *stream, + bool allocate_one) +{ + unsigned int num_rmcm = dc->caps.color.mpc.num_rmcm_3dluts; + + // see if one is allocated for this stream + for (int i = 0; i < num_rmcm; i++) { + if (dc->res_pool->rmcm_3dlut[i].isInUse && + dc->res_pool->rmcm_3dlut[i].stream == stream) + return &dc->res_pool->rmcm_3dlut[i]; + } + + //case: not found one, and dont need to allocate + if (!allocate_one) + return NULL; + + //see if there is an unused 3dlut, allocate + for (int i = 0; i < num_rmcm; i++) { + if (!dc->res_pool->rmcm_3dlut[i].isInUse) { + dc->res_pool->rmcm_3dlut[i].isInUse = true; + dc->res_pool->rmcm_3dlut[i].stream = stream; + return &dc->res_pool->rmcm_3dlut[i]; + } + } + + //dont have a 3dlut + return NULL; +} + + +void dc_stream_release_3dlut_for_stream( + const struct dc *dc, + const struct dc_stream_state *stream) +{ + struct dc_rmcm_3dlut *rmcm_3dlut = + dc_stream_get_3dlut_for_stream(dc, stream, false); + + if (rmcm_3dlut) { + rmcm_3dlut->isInUse = false; + rmcm_3dlut->stream = NULL; + rmcm_3dlut->protection_bits = 0; + } +} + + +void dc_stream_init_rmcm_3dlut(struct dc *dc) +{ + unsigned int num_rmcm = dc->caps.color.mpc.num_rmcm_3dluts; + + for (int i = 0; i < num_rmcm; i++) { + dc->res_pool->rmcm_3dlut[i].isInUse = false; + dc->res_pool->rmcm_3dlut[i].stream = NULL; + dc->res_pool->rmcm_3dlut[i].protection_bits = 0; + } +} + +/* * Finds the greatest index in refresh_rate_hz that contains a value <= refresh */ static int dc_stream_get_nearest_smallest_index(struct dc_stream_state *stream, int refresh) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 1d917be36fc4..59c07756130d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -46,6 +46,8 @@ #include "dmub/inc/dmub_cmd.h" +#include "sspl/dc_spl_types.h" + struct abm_save_restore; /* forward declaration */ @@ -53,7 +55,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.334" +#define DC_VER "3.2.340" /** * MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC @@ -66,7 +68,11 @@ struct dmub_notification; #define MAX_STREAMS 6 #define MIN_VIEWPORT_SIZE 12 #define MAX_NUM_EDP 2 -#define MAX_HOST_ROUTERS_NUM 2 +#define MAX_SUPPORTED_FORMATS 7 + +#define MAX_HOST_ROUTERS_NUM 3 +#define MAX_DPIA_PER_HOST_ROUTER 3 +#define MAX_DPIA_NUM (MAX_HOST_ROUTERS_NUM * MAX_DPIA_PER_HOST_ROUTER) /* Display Core Interfaces */ struct dc_versions { @@ -192,6 +198,34 @@ struct dpp_color_caps { struct rom_curve_caps ogam_rom_caps; }; +/* Below structure is to describe the HW support for mem layout, extend support + range to match what OS could handle in the roadmap */ +struct lut3d_caps { + uint32_t dma_3d_lut : 1; /*< DMA mode support for 3D LUT */ + struct { + uint32_t swizzle_3d_rgb : 1; + uint32_t swizzle_3d_bgr : 1; + uint32_t linear_1d : 1; + } mem_layout_support; + struct { + uint32_t unorm_12msb : 1; + uint32_t unorm_12lsb : 1; + uint32_t float_fp1_5_10 : 1; + } mem_format_support; + struct { + uint32_t order_rgba : 1; + uint32_t order_bgra : 1; + } mem_pixel_order_support; + /*< size options are 9, 17, 33, 45, 65 */ + struct { + uint32_t dim_9 : 1; /* 3D LUT support for 9x9x9 */ + uint32_t dim_17 : 1; /* 3D LUT support for 17x17x17 */ + uint32_t dim_33 : 1; /* 3D LUT support for 33x33x33 */ + uint32_t dim_45 : 1; /* 3D LUT support for 45x45x45 */ + uint32_t dim_65 : 1; /* 3D LUT support for 65x65x65 */ + } lut_dim_caps; +}; + /** * struct mpc_color_caps - color pipeline capabilities for multiple pipe and * plane combined blocks @@ -203,14 +237,21 @@ struct dpp_color_caps { * @shared_3d_lut: shared 3D LUT flag. Can be either DPP or MPC, but single * instance * @ogam_rom_caps: pre-definied curve caps for regamma 1D LUT + * @mcm_3d_lut_caps: HW support cap for MCM LUT memory + * @rmcm_3d_lut_caps: HW support cap for RMCM LUT memory + * @preblend: whether color manager supports preblend with MPC */ struct mpc_color_caps { uint16_t gamut_remap : 1; uint16_t ogam_ram : 1; uint16_t ocsc : 1; uint16_t num_3dluts : 3; + uint16_t num_rmcm_3dluts : 3; uint16_t shared_3d_lut:1; struct rom_curve_caps ogam_rom_caps; + struct lut3d_caps mcm_3d_lut_caps; + struct lut3d_caps rmcm_3d_lut_caps; + bool preblend; }; /** @@ -270,6 +311,7 @@ struct dc_caps { bool dmcub_support; bool zstate_support; bool ips_support; + bool ips_v2_support; uint32_t num_of_internal_disp; enum dp_protocol_version max_dp_protocol_version; unsigned int mall_size_per_mem_channel; @@ -305,6 +347,10 @@ struct dc_caps { /* Conservative limit for DCC cases which require ODM4:1 to support*/ uint32_t dcc_plane_width_limit; struct dc_scl_caps scl_caps; + uint8_t num_of_host_routers; + uint8_t num_of_dpias_per_host_router; + /* limit of the ODM only, could be limited by other factors (like pipe count)*/ + uint8_t max_odm_combine_factor; }; struct dc_bug_wa { @@ -459,6 +505,7 @@ struct dc_config { bool use_spl; bool prefer_easf; bool use_pipe_ctx_sync_logic; + int smart_mux_version; bool ignore_dpref_ss; bool enable_mipi_converter_optimization; bool use_default_clock_table; @@ -469,6 +516,7 @@ struct dc_config { bool EnableMinDispClkODM; bool enable_auto_dpm_test_logs; unsigned int disable_ips; + unsigned int disable_ips_rcg; unsigned int disable_ips_in_vpb; bool disable_ips_in_dpms_off; bool usb4_bw_alloc_support; @@ -481,6 +529,8 @@ struct dc_config { bool set_pipe_unlock_order; bool enable_dpia_pre_training; bool unify_link_enc_assignment; + struct spl_sharpness_range dcn_sharpness_range; + struct spl_sharpness_range dcn_override_sharpness_range; }; enum visual_confirm { @@ -492,6 +542,7 @@ enum visual_confirm { VISUAL_CONFIRM_SWAPCHAIN = 6, VISUAL_CONFIRM_FAMS = 7, VISUAL_CONFIRM_SWIZZLE = 9, + VISUAL_CONFIRM_SMARTMUX_DGPU = 10, VISUAL_CONFIRM_REPLAY = 12, VISUAL_CONFIRM_SUBVP = 14, VISUAL_CONFIRM_MCLK_SWITCH = 16, @@ -770,6 +821,7 @@ enum pg_hw_resources { PG_DCHVM, PG_DWB, PG_HPO, + PG_DCOH, PG_HW_RESOURCES_NUM_ELEMENT }; @@ -786,10 +838,8 @@ union dpia_debug_options { uint32_t disable_mst_dsc_work_around:1; /* bit 3 */ uint32_t enable_force_tbt3_work_around:1; /* bit 4 */ uint32_t disable_usb4_pm_support:1; /* bit 5 */ - uint32_t enable_consolidated_dpia_dp_lt:1; /* bit 6 */ - uint32_t enable_dpia_pre_training:1; /* bit 7 */ - uint32_t unify_link_enc_assignment:1; /* bit 8 */ - uint32_t reserved:24; + uint32_t enable_usb4_bw_zero_alloc_patch:1; /* bit 6 */ + uint32_t reserved:25; } bits; uint32_t raw; }; @@ -915,6 +965,9 @@ struct dc_debug_options { bool disable_dsc_power_gate; bool disable_optc_power_gate; bool disable_hpo_power_gate; + bool disable_io_clk_power_gate; + bool disable_mem_power_gate; + bool disable_dio_power_gate; int dsc_min_slice_height_override; int dsc_bpp_increment_div; bool disable_pplib_wm_range; @@ -1151,7 +1204,7 @@ struct dc_init_data { uint32_t *dcn_reg_offsets; uint32_t *nbio_reg_offsets; uint32_t *clk_reg_offsets; - struct dml2_soc_bb *bb_from_dmub; + void *bb_from_dmub; }; struct dc_callback_init { @@ -1252,6 +1305,12 @@ union dc_3dlut_state { }; +struct dc_rmcm_3dlut { + bool isInUse; + const struct dc_stream_state *stream; + uint8_t protection_bits; +}; + struct dc_3dlut { struct kref refcount; struct tetrahedral_params lut_3d; @@ -1389,6 +1448,8 @@ struct dc_plane_state { int sharpness_level; enum linear_light_scaling linear_light_scaling; unsigned int sdr_white_level_nits; + struct spl_sharpness_range sharpness_range; + enum sharpness_range_source sharpness_source; }; struct dc_plane_info { @@ -1570,6 +1631,7 @@ struct dc_scratch_space { bool blank_stream_on_ocs_change; bool read_dpcd204h_on_irq_hpd; bool force_dp_ffe_preset; + bool skip_phy_ssc_reduction; } wa_flags; union dc_dp_ffe_preset forced_dp_ffe_preset; struct link_mst_stream_allocation_table mst_stream_alloc_table; @@ -1579,6 +1641,8 @@ struct dc_scratch_space { struct gpio *hpd_gpio; enum dc_link_fec_state fec_state; + bool is_dds; + bool is_display_mux_present; bool link_powered_externally; // Used to bypass hardware sequencing delays when panel is powered down forcibly struct dc_panel_config panel_config; @@ -1603,6 +1667,7 @@ struct dc { uint8_t link_count; struct dc_link *links[MAX_LINKS]; + uint8_t lowest_dpia_link_index; struct link_service *link_srv; struct dc_state *current_state; @@ -1632,6 +1697,10 @@ struct dc { /* Require to maintain clocks and bandwidth for UEFI enabled HW */ + /* For eDP to know the switching state of SmartMux */ + bool is_switch_in_progress_orig; + bool is_switch_in_progress_dest; + /* FBC compressor */ struct compressor *fbc_compressor; @@ -1662,7 +1731,7 @@ struct dc { } scratch; struct dml2_configuration_options dml2_options; - struct dml2_configuration_options dml2_tmp; + struct dml2_configuration_options dml2_dc_power_options; enum dc_acpi_cm_power_state power_state; }; @@ -1767,19 +1836,15 @@ enum dc_status dc_validate_with_context(struct dc *dc, const struct dc_validation_set set[], int set_count, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); bool dc_set_generic_gpio_for_stereo(bool enable, struct gpio_service *gpio_service); -/* - * fast_validate: we return after determining if we can support the new state, - * but before we populate the programming info - */ enum dc_status dc_validate_global_state( struct dc *dc, struct dc_state *new_ctx, - bool fast_validate); + enum dc_validate_mode validate_mode); bool dc_acquire_release_mpc_3dlut( struct dc *dc, bool acquire, @@ -2375,17 +2440,12 @@ void dc_link_dp_dpia_handle_usb4_bandwidth_allocation_for_link( struct dc_link *link, int peak_bw); /* - * Validate the BW of all the valid DPIA links to make sure it doesn't exceed - * available BW for each host router - * - * @dc: pointer to dc struct - * @stream: pointer to all possible streams - * @count: number of valid DPIA streams + * Calculates the DP tunneling bandwidth required for the stream timing + * and aggregates the stream bandwidth for the respective DP tunneling link * - * return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE + * return: dc_status */ -bool dc_link_dp_dpia_validate(struct dc *dc, const struct dc_stream_state *streams, - const unsigned int count); +enum dc_status dc_link_validate_dp_tunneling_bandwidth(const struct dc *dc, const struct dc_state *new_ctx); /* Sink Interfaces - A sink corresponds to a display output device */ @@ -2595,6 +2655,8 @@ struct dc_power_profile dc_get_power_profile_for_dc_state(const struct dc_state unsigned int dc_get_det_buffer_size_from_state(const struct dc_state *context); +bool dc_get_host_router_index(const struct dc_link *link, unsigned int *host_router_index); + /* DSC Interfaces */ #include "dc_dsc.h" diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c index afbcf866520e..f5ef1a07078e 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c @@ -1269,12 +1269,16 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) new_signals.bits.allow_ips1 = 1; new_signals.bits.allow_ips2 = 1; new_signals.bits.allow_z10 = 1; + // New in IPSv2.0 + new_signals.bits.allow_ips1z8 = 1; } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) { new_signals.bits.allow_ips1 = 1; } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) { + // IPSv1.0 only new_signals.bits.allow_pg = 1; new_signals.bits.allow_ips1 = 1; } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) { + // IPSv1.0 only new_signals.bits.allow_pg = 1; new_signals.bits.allow_ips1 = 1; new_signals.bits.allow_ips2 = 1; @@ -1286,6 +1290,8 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) new_signals.bits.allow_ips1 = 1; new_signals.bits.allow_ips2 = 1; new_signals.bits.allow_z10 = 1; + // New in IPSv2.0 + new_signals.bits.allow_ips1z8 = 1; } else { /* RCG only */ new_signals.bits.allow_pg = 0; @@ -1293,8 +1299,28 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) new_signals.bits.allow_ips2 = 0; new_signals.bits.allow_z10 = 0; } + } else if (dc->config.disable_ips == DMUB_IPS_DISABLE_Z8_RETENTION) { + new_signals.bits.allow_pg = 1; + new_signals.bits.allow_ips1 = 1; + new_signals.bits.allow_ips2 = 1; + new_signals.bits.allow_z10 = 1; + } + // Setting RCG allow bits (IPSv2.0) + if (dc->config.disable_ips_rcg == DMUB_IPS_RCG_ENABLE) { + new_signals.bits.allow_ips0_rcg = 1; + new_signals.bits.allow_ips1_rcg = 1; + } else if (dc->config.disable_ips_rcg == DMUB_IPS0_RCG_DISABLE) { + new_signals.bits.allow_ips1_rcg = 1; + } else if (dc->config.disable_ips_rcg == DMUB_IPS1_RCG_DISABLE) { + new_signals.bits.allow_ips0_rcg = 1; + } + // IPS dynamic allow bits (IPSv2 change, vpb use case) + if (dc->config.disable_ips_in_vpb == DMUB_IPS_VPB_ENABLE_IPS1_AND_RCG) { + new_signals.bits.allow_dynamic_ips1 = 1; + } else if (dc->config.disable_ips_in_vpb == DMUB_IPS_VPB_ENABLE_ALL) { + new_signals.bits.allow_dynamic_ips1 = 1; + new_signals.bits.allow_dynamic_ips1_z8 = 1; } - ips_driver->signals = new_signals; dc_dmub_srv->driver_signals = ips_driver->signals; } @@ -1318,7 +1344,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle) static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) { struct dc_dmub_srv *dc_dmub_srv; - uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0; + uint32_t rcg_exit_count = 0, ips1_exit_count = 0, ips2_exit_count = 0, ips1z8_exit_count = 0; if (dc->debug.dmcub_emulation) return; @@ -1338,31 +1364,34 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) rcg_exit_count = ips_fw->rcg_exit_count; ips1_exit_count = ips_fw->ips1_exit_count; ips2_exit_count = ips_fw->ips2_exit_count; + ips1z8_exit_count = ips_fw->ips1_z8ret_exit_count; ips_driver->signals.all = 0; dc_dmub_srv->driver_signals = ips_driver->signals; DC_LOG_IPS( - "%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u) (count rcg=%u ips1=%u ips2=%u)", + "%s (allow ips1=%u ips2=%u) (commit ips1=%u ips2=%u ips1z8=%u) (count rcg=%u ips1=%u ips2=%u ips1_z8=%u)", __func__, ips_driver->signals.bits.allow_ips1, ips_driver->signals.bits.allow_ips2, ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit, + ips_fw->signals.bits.ips1z8_commit, ips_fw->rcg_entry_count, ips_fw->ips1_entry_count, - ips_fw->ips2_entry_count); + ips_fw->ips2_entry_count, + ips_fw->ips1_z8ret_entry_count); /* Note: register access has technically not resumed for DCN here, but we * need to be message PMFW through our standard register interface. */ dc_dmub_srv->needs_idle_wake = false; - if ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) && + if (!dc->caps.ips_v2_support && ((prev_driver_signals.bits.allow_ips2 || prev_driver_signals.all == 0) && (!dc->debug.optimize_ips_handshake || - ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle)) { + ips_fw->signals.bits.ips2_commit || !ips_fw->signals.bits.in_idle))) { DC_LOG_IPS( - "wait IPS2 eval (ips1_commit=%u ips2_commit=%u)", + "wait IPS2 eval (ips1_commit=%u ips2_commit=%u )", ips_fw->signals.bits.ips1_commit, ips_fw->signals.bits.ips2_commit); @@ -1422,28 +1451,31 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc) dc_dmub_srv_notify_idle(dc, false); if (prev_driver_signals.bits.allow_ips1 || prev_driver_signals.all == 0) { DC_LOG_IPS( - "wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u)", + "wait for IPS1 commit clear (ips1_commit=%u ips2_commit=%u ips1z8=%u)", ips_fw->signals.bits.ips1_commit, - ips_fw->signals.bits.ips2_commit); + ips_fw->signals.bits.ips2_commit, + ips_fw->signals.bits.ips1z8_commit); while (ips_fw->signals.bits.ips1_commit) udelay(1); DC_LOG_IPS( - "wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u)", + "wait for IPS1 commit clear done (ips1_commit=%u ips2_commit=%u ips1z8=%u)", ips_fw->signals.bits.ips1_commit, - ips_fw->signals.bits.ips2_commit); + ips_fw->signals.bits.ips2_commit, + ips_fw->signals.bits.ips1z8_commit); } } if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true)) ASSERT(0); - DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u)", + DC_LOG_IPS("%s exit (count rcg=%u ips1=%u ips2=%u ips1z8=%u)", __func__, rcg_exit_count, ips1_exit_count, - ips2_exit_count); + ips2_exit_count, + ips1z8_exit_count); } void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state power_state) @@ -1656,7 +1688,7 @@ bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_com return result; } -void dc_dmub_srv_fams2_update_config(struct dc *dc, +static void dc_dmub_srv_rb_based_fams2_update_config(struct dc *dc, struct dc_state *context, bool enable) { @@ -1722,6 +1754,63 @@ void dc_dmub_srv_fams2_update_config(struct dc *dc, dm_execute_dmub_cmd_list(dc->ctx, num_cmds, cmd, DM_DMUB_WAIT_TYPE_WAIT); } +static void dc_dmub_srv_ib_based_fams2_update_config(struct dc *dc, + struct dc_state *context, + bool enable) +{ + struct dmub_fams2_config_v2 *config = (struct dmub_fams2_config_v2 *)dc->ctx->dmub_srv->dmub->ib_mem_gart.cpu_addr; + union dmub_rb_cmd cmd; + uint32_t i; + + memset(config, 0, sizeof(*config)); + memset(&cmd, 0, sizeof(cmd)); + + cmd.ib_fams2_config.header.type = DMUB_CMD__FW_ASSISTED_MCLK_SWITCH; + cmd.ib_fams2_config.header.sub_type = DMUB_CMD__FAMS2_IB_CONFIG; + + cmd.ib_fams2_config.ib_data.src.quad_part = dc->ctx->dmub_srv->dmub->ib_mem_gart.gpu_addr; + cmd.ib_fams2_config.ib_data.size = sizeof(*config); + + if (enable && context->bw_ctx.bw.dcn.fams2_global_config.features.bits.enable) { + /* copy static feature configuration overrides */ + config->global.features.bits.enable_stall_recovery = dc->debug.fams2_config.bits.enable_stall_recovery; + config->global.features.bits.enable_offload_flip = dc->debug.fams2_config.bits.enable_offload_flip; + config->global.features.bits.enable_debug = dc->debug.fams2_config.bits.enable_debug; + + /* send global configuration parameters */ + memcpy(&config->global, &context->bw_ctx.bw.dcn.fams2_global_config, + sizeof(struct dmub_cmd_fams2_global_config)); + + /* construct per-stream configs */ + for (i = 0; i < context->bw_ctx.bw.dcn.fams2_global_config.num_streams; i++) { + /* copy stream static base state */ + memcpy(&config->stream_v1[i].base, + &context->bw_ctx.bw.dcn.fams2_stream_base_params[i], + sizeof(config->stream_v1[i].base)); + + /* copy stream static sub-state */ + memcpy(&config->stream_v1[i].sub_state, + &context->bw_ctx.bw.dcn.fams2_stream_sub_params_v2[i], + sizeof(config->stream_v1[i].sub_state)); + } + } + + config->global.features.bits.enable_visual_confirm = dc->debug.visual_confirm == VISUAL_CONFIRM_FAMS2; + config->global.features.bits.enable = enable; + + dm_execute_dmub_cmd_list(dc->ctx, 1, &cmd, DM_DMUB_WAIT_TYPE_WAIT); +} + +void dc_dmub_srv_fams2_update_config(struct dc *dc, + struct dc_state *context, + bool enable) +{ + if (dc->debug.fams_version.major == 2) + dc_dmub_srv_rb_based_fams2_update_config(dc, context, enable); + if (dc->debug.fams_version.major == 3) + dc_dmub_srv_ib_based_fams2_update_config(dc, context, enable); +} + void dc_dmub_srv_fams2_drr_update(struct dc *dc, uint32_t tg_inst, uint32_t vtotal_min, @@ -1847,83 +1936,267 @@ void dc_dmub_srv_fams2_passthrough_flip( } } -bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement) + +bool dc_dmub_srv_ips_residency_cntl(const struct dc_context *ctx, uint8_t panel_inst, bool start_measurement) { - bool result; + union dmub_rb_cmd cmd; - if (!dc_dmub_srv || !dc_dmub_srv->dmub) + memset(&cmd, 0, sizeof(cmd)); + + cmd.ips_residency_cntl.header.type = DMUB_CMD__IPS; + cmd.ips_residency_cntl.header.sub_type = DMUB_CMD__IPS_RESIDENCY_CNTL; + cmd.ips_residency_cntl.header.payload_bytes = sizeof(struct dmub_cmd_ips_residency_cntl_data); + + // only panel_inst=0 is supported at the moment + cmd.ips_residency_cntl.cntl_data.panel_inst = panel_inst; + cmd.ips_residency_cntl.cntl_data.start_measurement = start_measurement; + + if (!dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) return false; - result = dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__IPS_RESIDENCY, - start_measurement, NULL, DM_DMUB_WAIT_TYPE_WAIT); + return true; +} + +bool dc_dmub_srv_ips_query_residency_info(const struct dc_context *ctx, uint8_t panel_inst, struct dmub_ips_residency_info *driver_info, + enum ips_residency_mode ips_mode) +{ + union dmub_rb_cmd cmd; + uint32_t bytes = sizeof(struct dmub_ips_residency_info); + + dmub_flush_buffer_mem(&ctx->dmub_srv->dmub->scratch_mem_fb); + memset(&cmd, 0, sizeof(cmd)); + + cmd.ips_query_residency_info.header.type = DMUB_CMD__IPS; + cmd.ips_query_residency_info.header.sub_type = DMUB_CMD__IPS_QUERY_RESIDENCY_INFO; + cmd.ips_query_residency_info.header.payload_bytes = sizeof(struct dmub_cmd_ips_query_residency_info_data); + + cmd.ips_query_residency_info.info_data.dest.quad_part = ctx->dmub_srv->dmub->scratch_mem_fb.gpu_addr; + cmd.ips_query_residency_info.info_data.size = bytes; + cmd.ips_query_residency_info.info_data.panel_inst = panel_inst; + cmd.ips_query_residency_info.info_data.ips_mode = (uint32_t)ips_mode; + + if (!dc_wake_and_execute_dmub_cmd(ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY) || + cmd.ips_query_residency_info.header.ret_status == 0) + return false; + + // copy the result to the output since ret_status != 0 means the command returned data + memcpy(driver_info, ctx->dmub_srv->dmub->scratch_mem_fb.cpu_addr, bytes); + + return true; +} + +bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv) +{ + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + union dmub_rb_cmd cmd; + enum dm_dmub_wait_type wait_type; + struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; + bool result; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.cmd_common.header.type = DMUB_CMD__LSDMA; + cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_INIT_CONFIG; + wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; + + lsdma_data->u.init_data.gpu_addr_base.quad_part = dc_ctx->dmub_srv->dmub->lsdma_rb_fb.gpu_addr; + lsdma_data->u.init_data.ring_size = dc_ctx->dmub_srv->dmub->lsdma_rb_fb.size; + + result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); + + if (!result) + DC_ERROR("LSDMA Init failed in DMUB"); return result; } -void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output) +bool dmub_lsdma_send_linear_copy_packet( + struct dc_dmub_srv *dc_dmub_srv, + uint64_t src_addr, + uint64_t dst_addr, + uint32_t count) { - uint32_t i; - enum dmub_gpint_command command_code; + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + union dmub_rb_cmd cmd; + enum dm_dmub_wait_type wait_type; + struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; + bool result; - if (!dc_dmub_srv || !dc_dmub_srv->dmub) - return; + memset(&cmd, 0, sizeof(cmd)); - switch (output->ips_mode) { - case DMUB_IPS_MODE_IPS1_MAX: - command_code = DMUB_GPINT__GET_IPS1_HISTOGRAM_COUNTER; - break; - case DMUB_IPS_MODE_IPS2: - command_code = DMUB_GPINT__GET_IPS2_HISTOGRAM_COUNTER; - break; - case DMUB_IPS_MODE_IPS1_RCG: - command_code = DMUB_GPINT__GET_IPS1_RCG_HISTOGRAM_COUNTER; - break; - case DMUB_IPS_MODE_IPS1_ONO2_ON: - command_code = DMUB_GPINT__GET_IPS1_ONO2_ON_HISTOGRAM_COUNTER; - break; - default: - command_code = DMUB_GPINT__INVALID_COMMAND; - break; - } + cmd.cmd_common.header.type = DMUB_CMD__LSDMA; + cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_LINEAR_COPY; + wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; - if (command_code == DMUB_GPINT__INVALID_COMMAND) - return; + lsdma_data->u.linear_copy_data.count = count - 1; // LSDMA controller expects bytes to copy -1 + lsdma_data->u.linear_copy_data.src_lo = src_addr & 0xFFFFFFFF; + lsdma_data->u.linear_copy_data.src_hi = (src_addr >> 32) & 0xFFFFFFFF; + lsdma_data->u.linear_copy_data.dst_lo = dst_addr & 0xFFFFFFFF; + lsdma_data->u.linear_copy_data.dst_hi = (dst_addr >> 32) & 0xFFFFFFFF; - for (i = 0; i < GPINT_RETRY_NUM; i++) { - // false could mean GPINT timeout, in which case we should retry - if (dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT, - (uint16_t)(output->ips_mode), &output->residency_percent, - DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) - break; - udelay(100); - } + result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); + + if (!result) + DC_ERROR("LSDMA Linear Copy failed in DMUB"); + + return result; +} + +bool dmub_lsdma_send_tiled_to_tiled_copy_command( + struct dc_dmub_srv *dc_dmub_srv, + struct lsdma_send_tiled_to_tiled_copy_command_params params) +{ + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + union dmub_rb_cmd cmd; + enum dm_dmub_wait_type wait_type; + struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; + bool result; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.cmd_common.header.type = DMUB_CMD__LSDMA; + cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_TILED_TO_TILED_COPY; + wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; + + lsdma_data->u.tiled_copy_data.src_addr_lo = params.src_addr & 0xFFFFFFFF; + lsdma_data->u.tiled_copy_data.src_addr_hi = (params.src_addr >> 32) & 0xFFFFFFFF; + lsdma_data->u.tiled_copy_data.dst_addr_lo = params.dst_addr & 0xFFFFFFFF; + lsdma_data->u.tiled_copy_data.dst_addr_hi = (params.dst_addr >> 32) & 0xFFFFFFFF; + lsdma_data->u.tiled_copy_data.src_x = params.src_x; + lsdma_data->u.tiled_copy_data.src_y = params.src_y; + lsdma_data->u.tiled_copy_data.dst_x = params.dst_x; + lsdma_data->u.tiled_copy_data.dst_y = params.dst_y; + lsdma_data->u.tiled_copy_data.src_width = params.src_width - 1; // LSDMA controller expects width -1 + lsdma_data->u.tiled_copy_data.dst_width = params.dst_width - 1; // LSDMA controller expects width -1 + lsdma_data->u.tiled_copy_data.src_swizzle_mode = params.swizzle_mode; + lsdma_data->u.tiled_copy_data.dst_swizzle_mode = params.swizzle_mode; + lsdma_data->u.tiled_copy_data.src_element_size = params.element_size; + lsdma_data->u.tiled_copy_data.dst_element_size = params.element_size; + lsdma_data->u.tiled_copy_data.rect_x = params.rect_x; + lsdma_data->u.tiled_copy_data.rect_y = params.rect_y; + lsdma_data->u.tiled_copy_data.dcc = params.dcc; + lsdma_data->u.tiled_copy_data.tmz = params.tmz; + lsdma_data->u.tiled_copy_data.read_compress = params.read_compress; + lsdma_data->u.tiled_copy_data.write_compress = params.write_compress; + lsdma_data->u.tiled_copy_data.src_height = params.src_height - 1; // LSDMA controller expects height -1 + lsdma_data->u.tiled_copy_data.dst_height = params.dst_height - 1; // LSDMA controller expects height -1 + lsdma_data->u.tiled_copy_data.data_format = params.data_format; + lsdma_data->u.tiled_copy_data.max_com = params.max_com; + lsdma_data->u.tiled_copy_data.max_uncom = params.max_uncom; + + result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); + + if (!result) + DC_ERROR("LSDMA Tiled to Tiled Copy failed in DMUB"); + + return result; +} + +bool dmub_lsdma_send_pio_copy_command( + struct dc_dmub_srv *dc_dmub_srv, + uint64_t src_addr, + uint64_t dst_addr, + uint32_t byte_count, + uint32_t overlap_disable) +{ + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + union dmub_rb_cmd cmd; + enum dm_dmub_wait_type wait_type; + struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; + bool result; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.cmd_common.header.type = DMUB_CMD__LSDMA; + cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_PIO_COPY; + wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; + + lsdma_data->u.pio_copy_data.packet.fields.byte_count = byte_count; + lsdma_data->u.pio_copy_data.packet.fields.overlap_disable = overlap_disable; + lsdma_data->u.pio_copy_data.src_lo = src_addr & 0xFFFFFFFF; + lsdma_data->u.pio_copy_data.src_hi = (src_addr >> 32) & 0xFFFFFFFF; + lsdma_data->u.pio_copy_data.dst_lo = dst_addr & 0xFFFFFFFF; + lsdma_data->u.pio_copy_data.dst_hi = (dst_addr >> 32) & 0xFFFFFFFF; + + result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); + + if (!result) + DC_ERROR("LSDMA PIO Copy failed in DMUB"); + + return result; +} + +bool dmub_lsdma_send_pio_constfill_command( + struct dc_dmub_srv *dc_dmub_srv, + uint64_t dst_addr, + uint32_t byte_count, + uint32_t data) +{ + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + union dmub_rb_cmd cmd; + enum dm_dmub_wait_type wait_type; + struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; + bool result; + + memset(&cmd, 0, sizeof(cmd)); + + cmd.cmd_common.header.type = DMUB_CMD__LSDMA; + cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_PIO_CONSTFILL; + wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; + + lsdma_data->u.pio_constfill_data.packet.fields.constant_fill = 1; + lsdma_data->u.pio_constfill_data.packet.fields.byte_count = byte_count; + lsdma_data->u.pio_constfill_data.dst_lo = dst_addr & 0xFFFFFFFF; + lsdma_data->u.pio_constfill_data.dst_hi = (dst_addr >> 32) & 0xFFFFFFFF; + lsdma_data->u.pio_constfill_data.data = data; - if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_ENTRY_COUNTER, - (uint16_t)(output->ips_mode), - &output->entry_counter, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) - output->entry_counter = 0; + result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); - if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_LO, - (uint16_t)(output->ips_mode), - &output->total_active_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) - output->total_active_time_us[0] = 0; - if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_HI, - (uint16_t)(output->ips_mode), - &output->total_active_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) - output->total_active_time_us[1] = 0; + if (!result) + DC_ERROR("LSDMA PIO Constfill failed in DMUB"); + + return result; +} + +bool dmub_lsdma_send_poll_reg_write_command(struct dc_dmub_srv *dc_dmub_srv, uint32_t reg_addr, uint32_t reg_data) +{ + struct dc_context *dc_ctx = dc_dmub_srv->ctx; + union dmub_rb_cmd cmd; + enum dm_dmub_wait_type wait_type; + struct dmub_cmd_lsdma_data *lsdma_data = &cmd.lsdma.lsdma_data; + bool result; + + memset(&cmd, 0, sizeof(cmd)); - if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_LO, - (uint16_t)(output->ips_mode), - &output->total_inactive_time_us[0], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) - output->total_inactive_time_us[0] = 0; - if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_HI, - (uint16_t)(output->ips_mode), - &output->total_inactive_time_us[1], DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) - output->total_inactive_time_us[1] = 0; + cmd.cmd_common.header.type = DMUB_CMD__LSDMA; + cmd.cmd_common.header.sub_type = DMUB_CMD__LSDMA_POLL_REG_WRITE; + wait_type = DM_DMUB_WAIT_TYPE_NO_WAIT; + + lsdma_data->u.reg_write_data.reg_addr = reg_addr; + lsdma_data->u.reg_write_data.reg_data = reg_data; + + result = dc_wake_and_execute_dmub_cmd(dc_ctx, &cmd, wait_type); + + if (!result) + DC_ERROR("LSDMA Poll Reg failed in DMUB"); + + return result; +} + +void dc_dmub_srv_release_hw(const struct dc *dc) +{ + struct dc_dmub_srv *dc_dmub_srv = dc->ctx->dmub_srv; + union dmub_rb_cmd cmd = {0}; + + if (!dc_dmub_srv || !dc_dmub_srv->dmub) + return; + + memset(&cmd, 0, sizeof(cmd)); + cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT; + cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_RELEASE_HW; + cmd.idle_opt_notify_idle.header.payload_bytes = + sizeof(cmd.idle_opt_notify_idle) - + sizeof(cmd.idle_opt_notify_idle.header); - // NUM_IPS_HISTOGRAM_BUCKETS = 16 - for (i = 0; i < 16; i++) - if (!dc_wake_and_execute_gpint(dc_dmub_srv->ctx, command_code, i, &output->histogram[i], - DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY)) - output->histogram[i] = 0; + dm_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT); } diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h index ada5c2fb2db3..8ea320f21269 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h @@ -210,6 +210,60 @@ void dc_dmub_srv_fams2_passthrough_flip( struct dc_surface_update *srf_updates, int surface_count); +bool dmub_lsdma_init(struct dc_dmub_srv *dc_dmub_srv); +bool dmub_lsdma_send_linear_copy_packet( + struct dc_dmub_srv *dc_dmub_srv, + uint64_t src_addr, + uint64_t dst_addr, + uint32_t count); +bool dmub_lsdma_send_pio_copy_command( + struct dc_dmub_srv *dc_dmub_srv, + uint64_t src_addr, + uint64_t dst_addr, + uint32_t byte_count, + uint32_t overlap_disable); +bool dmub_lsdma_send_pio_constfill_command( + struct dc_dmub_srv *dc_dmub_srv, + uint64_t dst_addr, + uint32_t byte_count, + uint32_t data); + +struct lsdma_send_tiled_to_tiled_copy_command_params { + uint64_t src_addr; + uint64_t dst_addr; + + uint32_t src_x : 16; + uint32_t src_y : 16; + + uint32_t dst_x : 16; + uint32_t dst_y : 16; + + uint32_t src_width : 16; + uint32_t dst_width : 16; + + uint32_t rect_x : 16; + uint32_t rect_y : 16; + + uint32_t src_height : 16; + uint32_t dst_height : 16; + + uint32_t data_format : 6; + uint32_t swizzle_mode : 5; + uint32_t element_size : 3; + uint32_t dcc : 1; + uint32_t tmz : 1; + uint32_t read_compress : 2; + uint32_t write_compress : 2; + uint32_t max_com : 2; + uint32_t max_uncom : 1; + uint32_t padding : 9; +}; + +bool dmub_lsdma_send_tiled_to_tiled_copy_command( + struct dc_dmub_srv *dc_dmub_srv, + struct lsdma_send_tiled_to_tiled_copy_command_params params); +bool dmub_lsdma_send_poll_reg_write_command(struct dc_dmub_srv *dc_dmub_srv, uint32_t reg_addr, uint32_t reg_data); + /** * struct ips_residency_info - struct containing info from dmub_ips_residency_stats * @@ -223,7 +277,7 @@ void dc_dmub_srv_fams2_passthrough_flip( * @histogram: Histogram of given IPS state durations - bucket definitions in dmub_ips.c */ struct ips_residency_info { - enum dmub_ips_mode ips_mode; + enum ips_residency_mode ips_mode; unsigned int residency_percent; unsigned int entry_counter; unsigned int total_active_time_us[2]; @@ -231,21 +285,16 @@ struct ips_residency_info { unsigned int histogram[16]; }; -/** - * bool dc_dmub_srv_ips_residency_cntl() - Controls IPS residency measurement status - * - * @dc_dmub_srv: The DC DMUB service pointer - * @start_measurement: Describes whether to start or stop measurement - * - * Return: true if GPINT was sent successfully, false otherwise - */ -bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement); +bool dc_dmub_srv_ips_residency_cntl(const struct dc_context *ctx, uint8_t panel_inst, bool start_measurement); + +bool dc_dmub_srv_ips_query_residency_info(const struct dc_context *ctx, uint8_t panel_inst, + struct dmub_ips_residency_info *driver_info, + enum ips_residency_mode ips_mode); /** - * bool dc_dmub_srv_ips_query_residency_info() - Queries DMCUB for residency info + * dc_dmub_srv_release_hw() - Notifies DMUB service that HW access is no longer required. * - * @dc_dmub_srv: The DC DMUB service pointer - * @output: Output struct to copy the the residency info to + * @dc - pointer to DC object */ -void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output); +void dc_dmub_srv_release_hw(const struct dc *dc); #endif /* _DMUB_DC_SRV_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index 0bad8304ccf6..5ce1be362534 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -162,6 +162,11 @@ struct dc_link_settings { struct dc_tunnel_settings { bool should_enable_dp_tunneling; bool should_use_dp_bw_allocation; + uint8_t cm_id; + uint8_t group_id; + uint32_t bw_granularity; + uint32_t estimated_bw; + uint32_t allocated_bw; }; union dc_dp_ffe_preset { @@ -957,11 +962,21 @@ union usb4_driver_bw_cap { uint8_t raw; }; +/* DPCD[0xE0021] DP_IN_ADAPTER_TUNNEL_INFORMATION register. */ +union dpia_tunnel_info { + struct { + uint8_t group_id :3; + uint8_t rsvd :5; + } bits; + uint8_t raw; +}; + /* DP Tunneling over USB4 */ struct dpcd_usb4_dp_tunneling_info { union dp_tun_cap_support dp_tun_cap; union dpia_info dpia_info; union usb4_driver_bw_cap driver_bw_cap; + union dpia_tunnel_info dpia_tunnel_info; uint8_t usb4_driver_id; uint8_t usb4_topology_id[DPCD_USB4_TOPOLOGY_ID_LEN]; }; @@ -1172,8 +1187,8 @@ struct dc_lttpr_caps { union dp_128b_132b_supported_lttpr_link_rates supported_128b_132b_rates; union dp_alpm_lttpr_cap alpm; uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1]; - uint8_t lttpr_ieee_oui[3]; - uint8_t lttpr_device_id[6]; + uint8_t lttpr_ieee_oui[3]; // Always read from closest LTTPR to host + uint8_t lttpr_device_id[6]; // Always read from closest LTTPR to host }; struct dc_dongle_dfp_cap_ext { diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index d562ddeca512..667852517246 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -68,7 +68,7 @@ enum dc_plane_addr_type { struct dc_plane_address { enum dc_plane_addr_type type; - bool tmz_surface; + uint8_t tmz_surface; union { struct{ PHYSICAL_ADDRESS_LOC addr; @@ -974,6 +974,7 @@ struct dc_crtc_timing { uint32_t pix_clk_100hz; uint32_t min_refresh_in_uhz; + uint32_t max_refresh_in_uhz; uint32_t vic; uint32_t hdmi_vic; @@ -1103,7 +1104,8 @@ enum mpcc_gamut_remap_mode_select { enum mpcc_gamut_remap_id { MPCC_OGAM_GAMUT_REMAP, MPCC_MCM_FIRST_GAMUT_REMAP, - MPCC_MCM_SECOND_GAMUT_REMAP + MPCC_MCM_SECOND_GAMUT_REMAP, + MPCC_RMCM_GAMUT_REMAP, }; enum cursor_matrix_mode { diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c index e3a8283b4098..7f57661433eb 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c +++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c @@ -156,15 +156,16 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl spl_in->adaptive_sharpness.enable = true; spl_in->adaptive_sharpness.sharpness_level = 0; } else if (sharpness_setting == SHARPNESS_CUSTOM) { - spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_min = 0; - spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_max = 1750; - spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_mid = 750; - spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_min = 0; - spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_max = 3500; - spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_mid = 1500; - spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_min = 0; - spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_max = 2750; - spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_mid = 1500; + /* SAT: read harpness_range from dc_plane_state */ + spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_min = plane_state->sharpness_range.sdr_rgb_min; + spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_max = plane_state->sharpness_range.sdr_rgb_max; + spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_mid = plane_state->sharpness_range.sdr_rgb_mid; + spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_min = plane_state->sharpness_range.sdr_yuv_min; + spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_max = plane_state->sharpness_range.sdr_yuv_max; + spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_mid = plane_state->sharpness_range.sdr_yuv_mid; + spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_min = plane_state->sharpness_range.hdr_rgb_min; + spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_max = plane_state->sharpness_range.hdr_rgb_max; + spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_mid = plane_state->sharpness_range.hdr_rgb_mid; if (force_sharpness_level > 0) { if (force_sharpness_level > 10) diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 341d2ffb64b1..5fc6fea211de 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -579,6 +579,17 @@ bool dc_stream_set_gamut_remap(struct dc *dc, bool dc_stream_program_csc_matrix(struct dc *dc, struct dc_stream_state *stream); +struct dc_rmcm_3dlut *dc_stream_get_3dlut_for_stream( + const struct dc *dc, + const struct dc_stream_state *stream, + bool allocate_one); + +void dc_stream_release_3dlut_for_stream( + const struct dc *dc, + const struct dc_stream_state *stream); + +void dc_stream_init_rmcm_3dlut(struct dc *dc); + struct pipe_ctx *dc_stream_get_pipe_ctx(struct dc_stream_state *stream); void dc_dmub_update_dirty_rect(struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index a4cd0eb39a3a..375ca2f13b7a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -175,6 +175,7 @@ struct dc_panel_patch { unsigned int embedded_tiled_slave; unsigned int disable_fams; unsigned int skip_avmute; + unsigned int skip_audio_sab_check; unsigned int mst_start_top_delay; unsigned int remove_sink_ext_caps; unsigned int disable_colorimetry; @@ -263,6 +264,7 @@ enum dc_timing_source { TIMING_SOURCE_EDID_4BYTE, TIMING_SOURCE_EDID_CEA_DISPLAYID_VTDB, TIMING_SOURCE_EDID_CEA_RID, + TIMING_SOURCE_EDID_DISPLAYID_TYPE5, TIMING_SOURCE_VBIOS, TIMING_SOURCE_CV, TIMING_SOURCE_TV, @@ -1255,7 +1257,6 @@ enum dc_cm2_gpu_mem_layout { enum dc_cm2_gpu_mem_pixel_component_order { DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA, - DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_BGRA }; enum dc_cm2_gpu_mem_format { @@ -1277,7 +1278,6 @@ struct dc_cm2_gpu_mem_format_parameters { enum dc_cm2_gpu_mem_size { DC_CM2_GPU_MEM_SIZE_171717, - DC_CM2_GPU_MEM_SIZE_333333, DC_CM2_GPU_MEM_SIZE_TRANSFORMED, }; @@ -1315,6 +1315,7 @@ struct dc_cm2_func_luts { bool mpc_3dlut_enable; bool rmcm_3dlut_enable; bool mpc_mcm_post_blend; + uint8_t rmcm_tmz; } lut3d_data; const struct dc_transfer_func *lut1d_func; }; @@ -1372,4 +1373,19 @@ struct set_backlight_level_params { uint8_t aux_inst; }; +enum dc_validate_mode { + /* validate the mode and program HW */ + DC_VALIDATE_MODE_AND_PROGRAMMING = 0, + /* only validate the mode */ + DC_VALIDATE_MODE_ONLY = 1, + /* validate the mode and get the max state (voltage level) */ + DC_VALIDATE_MODE_AND_STATE_INDEX = 2, +}; + +struct dc_validation_dpia_set { + const struct dc_link *link; + const struct dc_tunnel_settings *tunnel_settings; + uint32_t required_bw; +}; + #endif /* DC_TYPES_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c index ffd172231fdf..668ee2d405fd 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c @@ -727,7 +727,7 @@ void dccg401_init(struct dccg *dccg) } } -void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst) +void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst, uint32_t num_slices_h) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h index 55e8718aad22..5947a35363aa 100644 --- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.h @@ -209,7 +209,7 @@ void dccg401_disable_symclk32_le( struct dccg *dccg, int hpo_le_inst); void dccg401_disable_dpstreamclk(struct dccg *dccg, int dp_hpo_inst); -void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst); +void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst, uint32_t num_slices_h); void dccg401_set_ref_dscclk(struct dccg *dccg, uint32_t dsc_inst); void dccg401_set_src_sel( @@ -230,7 +230,6 @@ void dccg401_set_dp_dto( const struct dp_dto_params *params); void dccg401_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst); void dccg401_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint32_t link_enc_inst); -void dccg401_set_dto_dscclk(struct dccg *dccg, uint32_t inst); void dccg401_set_dtbclk_p_src( struct dccg *dccg, enum streamclk_source src, diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c index d28826c3ae5f..0421b267a0b5 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c @@ -292,9 +292,35 @@ static void set_speed( FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2); } +static bool acquire_engine(struct dce_i2c_hw *dce_i2c_hw) +{ + uint32_t arbitrate = 0; + + REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate); + switch (arbitrate) { + case DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW: + return true; + case DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_HW: + return false; + case DC_I2C_STATUS__DC_I2C_STATUS_IDLE: + default: + break; + } + + REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, true); + REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate); + if (arbitrate != DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW) + return false; + + return true; +} + static bool setup_engine( struct dce_i2c_hw *dce_i2c_hw) { + // Deassert soft reset to unblock I2C engine registers + REG_UPDATE(DC_I2C_CONTROL, DC_I2C_SOFT_RESET, false); + uint32_t i2c_setup_limit = I2C_SETUP_TIME_LIMIT_DCE; uint32_t reset_length = 0; @@ -309,8 +335,8 @@ static bool setup_engine( REG_UPDATE_N(SETUP, 1, FN(DC_I2C_DDC1_SETUP, DC_I2C_DDC1_CLK_EN), 1); - /* we have checked I2c not used by DMCU, set SW use I2C REQ to 1 to indicate SW using it*/ - REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_USE_I2C_REG_REQ, 1); + if (!acquire_engine(dce_i2c_hw)) + return false; /*set SW requested I2c speed to default, if API calls in it will be override later*/ set_speed(dce_i2c_hw, dce_i2c_hw->ctx->dc->caps.i2c_speed_in_khz); @@ -319,9 +345,8 @@ static bool setup_engine( i2c_setup_limit = dce_i2c_hw->setup_limit; /* Program pin select */ - REG_UPDATE_6(DC_I2C_CONTROL, + REG_UPDATE_5(DC_I2C_CONTROL, DC_I2C_GO, 0, - DC_I2C_SOFT_RESET, 0, DC_I2C_SEND_RESET, 0, DC_I2C_SW_STATUS_RESET, 1, DC_I2C_TRANSACTION_COUNT, 0, @@ -351,6 +376,32 @@ static bool setup_engine( return true; } +/** + * cntl_stuck_hw_workaround - Workaround for I2C engine stuck state + * @dce_i2c_hw: Pointer to dce_i2c_hw structure + * + * If we boot without an HDMI display, the I2C engine does not get initialized + * correctly. One of its symptoms is that SW_USE_I2C does not get cleared after + * acquire. After setting SW_DONE_USING_I2C on release, the engine gets + * immediately reacquired by SW, preventing DMUB from using it. + * + * This function checks the I2C arbitration status and applies a release + * workaround if necessary. + */ +static void cntl_stuck_hw_workaround(struct dce_i2c_hw *dce_i2c_hw) +{ + uint32_t arbitrate = 0; + + REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate); + if (arbitrate != DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW) + return; + + // Still acquired after release, release again as a workaround + REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, true); + REG_GET(DC_I2C_ARBITRATION, DC_I2C_REG_RW_CNTL_STATUS, &arbitrate); + ASSERT(arbitrate != DC_I2C_STATUS__DC_I2C_STATUS_USED_BY_SW); +} + static void release_engine( struct dce_i2c_hw *dce_i2c_hw) { @@ -378,9 +429,9 @@ static void release_engine( /*for HW HDCP Ri polling failure w/a test*/ set_speed(dce_i2c_hw, dce_i2c_hw->ctx->dc->caps.i2c_speed_in_khz_hdcp); - /* Release I2C after reset, so HW or DMCU could use it */ - REG_UPDATE_2(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, 1, - DC_I2C_SW_USE_I2C_REG_REQ, 0); + // Release I2C engine so it can be used by HW or DMCU, automatically clears SW_USE_I2C + REG_UPDATE(DC_I2C_ARBITRATION, DC_I2C_SW_DONE_USING_I2C_REG, true); + cntl_stuck_hw_workaround(dce_i2c_hw); if (dce_i2c_hw->ctx->dc->debug.enable_mem_low_power.bits.i2c) { if (dce_i2c_hw->regs->DIO_MEM_PWR_CTRL) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c index 4a9d07c31bc5..0c50fe266c8a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c @@ -896,13 +896,13 @@ void dce110_link_encoder_construct( enc110->base.id, &bp_cap_info); /* Override features with DCE-specific values */ - if (BP_RESULT_OK == result) { + if (result == BP_RESULT_OK) { enc110->base.features.flags.bits.IS_HBR2_CAPABLE = bp_cap_info.DP_HBR2_EN; enc110->base.features.flags.bits.IS_HBR3_CAPABLE = bp_cap_info.DP_HBR3_EN; enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; - } else { + } else if (result != BP_RESULT_NORECORD) { DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n", __func__, result); @@ -1798,13 +1798,13 @@ void dce60_link_encoder_construct( enc110->base.id, &bp_cap_info); /* Override features with DCE-specific values */ - if (BP_RESULT_OK == result) { + if (result == BP_RESULT_OK) { enc110->base.features.flags.bits.IS_HBR2_CAPABLE = bp_cap_info.DP_HBR2_EN; enc110->base.features.flags.bits.IS_HBR3_CAPABLE = bp_cap_info.DP_HBR3_EN; enc110->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; - } else { + } else if (result != BP_RESULT_NORECORD) { DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n", __func__, result); diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index e1d500633dfa..b357683b4255 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -114,9 +114,6 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calcs.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_auto.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_math.o := $(dml_rcflags) -CFLAGS_$(AMDDALPATH)/dc/dml/dcn401/dcn401_fpu.o := $(dml_ccflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn401/dcn401_fpu.o := $(dml_rcflags) - ifdef CONFIG_DRM_AMD_DC_FP DML += display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o DML += dcn10/dcn10_fpu.o @@ -137,7 +134,6 @@ DML += dcn303/dcn303_fpu.o DML += dcn314/dcn314_fpu.o DML += dcn35/dcn35_fpu.o DML += dcn351/dcn351_fpu.o -DML += dcn401/dcn401_fpu.o DML += dsc/rc_calc_fpu.o DML += calcs/dcn_calcs.o calcs/dcn_calc_math.o calcs/dcn_calc_auto.o endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c index f1235bf9a596..74962791302f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c @@ -748,7 +748,7 @@ static unsigned int get_highest_allowed_voltage_level(bool is_vmin_only_asic) bool dcn_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { /* * we want a breakdown of the various stages of validation, which the @@ -1119,7 +1119,7 @@ bool dcn_validate_bandwidth( BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - if (v->voltage_level != number_of_states_plus_one && !fast_validate) { + if (v->voltage_level != number_of_states_plus_one && validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING) { float bw_consumed = v->total_bandwidth_consumed_gbyte_per_second; if (bw_consumed < v->fabric_and_dram_bandwidth_vmin0p65) @@ -1286,7 +1286,7 @@ bool dcn_validate_bandwidth( } } else if (v->voltage_level == number_of_states_plus_one) { BW_VAL_TRACE_SKIP(fail); - } else if (fast_validate) { + } else if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) { BW_VAL_TRACE_SKIP(fast); } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c index e9fea9c2162e..2a2eaf6adf26 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c @@ -1315,7 +1315,7 @@ static void swizzle_to_dml_params( int dcn20_populate_dml_pipes_from_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int pipe_cnt, i; bool synchronized_vblank = true; @@ -1733,7 +1733,7 @@ void dcn20_calculate_wm(struct dc *dc, struct dc_state *context, int *out_pipe_cnt, int *pipe_split_from, int vlevel, - bool fast_validate) + enum dc_validate_mode validate_mode) { int pipe_cnt, i, pipe_idx; @@ -1780,10 +1780,10 @@ void dcn20_calculate_wm(struct dc *dc, struct dc_state *context, if (pipe_cnt != pipe_idx) { if (dc->res_pool->funcs->populate_dml_pipes) pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, - context, pipes, fast_validate); + context, pipes, validate_mode); else pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, - context, pipes, fast_validate); + context, pipes, validate_mode); } *out_pipe_cnt = pipe_cnt; @@ -2027,7 +2027,7 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st } static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *context, - bool fast_validate, display_e2e_pipe_params_st *pipes) + enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes) { bool out = false; @@ -2040,7 +2040,7 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co BW_VAL_TRACE_COUNT(); - out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, fast_validate); + out = dcn20_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, validate_mode); if (pipe_cnt == 0) goto validate_out; @@ -2050,12 +2050,12 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - if (fast_validate) { + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) { BW_VAL_TRACE_SKIP(fast); goto validate_out; } - dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate); + dcn20_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, validate_mode); dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); BW_VAL_TRACE_END_WATERMARKS(); @@ -2077,7 +2077,7 @@ validate_out: } bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, - bool fast_validate, display_e2e_pipe_params_st *pipes) + enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes) { bool voltage_supported = false; bool full_pstate_supported = false; @@ -2095,12 +2095,11 @@ bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, /*Unsafe due to current pipe merge and split logic*/ ASSERT(context != dc->current_state); - if (fast_validate) { - return dcn20_validate_bandwidth_internal(dc, context, true, pipes); - } + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) + return dcn20_validate_bandwidth_internal(dc, context, validate_mode, pipes); // Best case, we support full UCLK switch latency - voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false, pipes); + voltage_supported = dcn20_validate_bandwidth_internal(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING, pipes); full_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support; if (context->bw_ctx.dml.soc.dummy_pstate_latency_us == 0 || @@ -2113,7 +2112,7 @@ bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, context->bw_ctx.dml.soc.dram_clock_change_latency_us = context->bw_ctx.dml.soc.dummy_pstate_latency_us; memset(pipes, 0, dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st)); - voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false, pipes); + voltage_supported = dcn20_validate_bandwidth_internal(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING, pipes); dummy_pstate_supported = context->bw_ctx.bw.dcn.clk.p_state_change_support; if (voltage_supported && (dummy_pstate_supported || !(context->stream_count))) { @@ -2156,14 +2155,14 @@ void dcn20_fpu_adjust_dppclk(struct vba_vars_st *v, int dcn21_populate_dml_pipes_from_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { uint32_t pipe_cnt; int i; dc_assert_fp_enabled(); - pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); for (i = 0; i < pipe_cnt; i++) { @@ -2239,7 +2238,7 @@ static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context, int *out_pipe_cnt, int *pipe_split_from, int vlevel_req, - bool fast_validate) + enum dc_validate_mode validate_mode) { int pipe_cnt, i, pipe_idx; int vlevel, vlevel_max; @@ -2281,10 +2280,10 @@ static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context, if (pipe_cnt != pipe_idx) { if (dc->res_pool->funcs->populate_dml_pipes) pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, - context, pipes, fast_validate); + context, pipes, validate_mode); else pipe_cnt = dcn21_populate_dml_pipes_from_context(dc, - context, pipes, fast_validate); + context, pipes, validate_mode); } *out_pipe_cnt = pipe_cnt; @@ -2319,7 +2318,7 @@ static void dcn21_calculate_wm(struct dc *dc, struct dc_state *context, } bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, - bool fast_validate, display_e2e_pipe_params_st *pipes) + enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes) { bool out = false; @@ -2337,7 +2336,7 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, /*Unsafe due to current pipe merge and split logic*/ ASSERT(context != dc->current_state); - out = dcn21_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, fast_validate); + out = dcn21_fast_validate_bw(dc, context, pipes, &pipe_cnt, pipe_split_from, &vlevel, validate_mode); if (pipe_cnt == 0) goto validate_out; @@ -2347,12 +2346,12 @@ bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - if (fast_validate) { + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) { BW_VAL_TRACE_SKIP(fast); goto validate_out; } - dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, fast_validate); + dcn21_calculate_wm(dc, context, pipes, &pipe_cnt, pipe_split_from, vlevel, validate_mode); dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); BW_VAL_TRACE_END_WATERMARKS(); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h index b6c34198ddc8..aed00039ca62 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h @@ -44,14 +44,14 @@ void dcn20_calculate_dlg_params(struct dc *dc, int dcn20_populate_dml_pipes_from_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn20_calculate_wm(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int *out_pipe_cnt, int *pipe_split_from, int vlevel, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn20_cap_soc_clocks(struct _vcs_dpi_soc_bounding_box_st *bb, struct pp_smu_nv_clock_table max_clocks); void dcn20_update_bounding_box(struct dc *dc, @@ -62,7 +62,7 @@ void dcn20_update_bounding_box(struct dc *dc, void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb); bool dcn20_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, - bool fast_validate, display_e2e_pipe_params_st *pipes); + enum dc_validate_mode validate_mode, display_e2e_pipe_params_st *pipes); void dcn20_fpu_set_wm_ranges(int i, struct pp_smu_wm_range_sets *ranges, struct _vcs_dpi_soc_bounding_box_st *loaded_bb); @@ -75,9 +75,9 @@ void dcn20_fpu_adjust_dppclk(struct vba_vars_st *v, int dcn21_populate_dml_pipes_from_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); -bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, bool - fast_validate, display_e2e_pipe_params_st *pipes); + enum dc_validate_mode validate_mode); +bool dcn21_validate_bandwidth_fp(struct dc *dc, struct dc_state *context, enum + dc_validate_mode, display_e2e_pipe_params_st *pipes); void dcn21_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); void dcn21_clk_mgr_set_bw_params_wm_table(struct clk_bw_params *bw_params); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c index 88789987bdbc..e5f5c0663750 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/dcn30_fpu.c @@ -339,7 +339,8 @@ void dcn30_fpu_calculate_wm_and_dlg( * newly found dummy_latency_index */ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; - dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false, true); + dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, + DC_VALIDATE_MODE_AND_PROGRAMMING, true); maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; dcfclk = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; pstate_en = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] != dm_dram_clock_change_unsupported; @@ -630,7 +631,8 @@ int dcn30_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc, while (dummy_latency_index < max_latency_table_entries) { context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; - dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false, true); + dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, + DC_VALIDATE_MODE_AND_PROGRAMMING, true); if (context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank == dm_allow_self_refresh_and_mclk_switch) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h index d2ae43a82ba5..dfcc5d50071e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/dcn31_fpu.h @@ -55,5 +55,5 @@ int dcn_get_approx_det_segs_required_for_pstate( int dcn31x_populate_dml_pipes_from_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); #endif /* __DCN31_FPU_H__*/ diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c index 5ed117e11aa2..df9d50b9b57c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c @@ -306,7 +306,7 @@ static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing) int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; @@ -316,7 +316,7 @@ int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *c dc_assert_fp_enabled(); - dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { struct dc_crtc_timing *timing; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h index d32c5bb99f4c..362ac79184ea 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.h @@ -35,6 +35,6 @@ void dcn314_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params); int dcn314_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index b0fc1fd20208..6160952245b4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -290,7 +290,7 @@ int dcn32_find_dummy_latency_index_for_fw_based_mclk_switch(struct dc *dc, vba->DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] = temp_clock_change_support; context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; - dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false); + dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, DC_VALIDATE_MODE_AND_PROGRAMMING); /* for subvp + DRR case, if subvp pipes are still present we support pstate */ if (vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported && @@ -1479,7 +1479,7 @@ static bool dcn32_full_validate_bw_helper(struct dc *dc, /* Conditions for setting up phantom pipes for SubVP: * 1. Not force disable SubVP - * 2. Full update (i.e. !fast_validate) + * 2. Full update (i.e. DC_VALIDATE_MODE_AND_PROGRAMMING) * 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?) * 4. Display configuration passes validation * 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch) @@ -1517,7 +1517,8 @@ static bool dcn32_full_validate_bw_helper(struct dc *dc, dc->res_pool->funcs->add_phantom_pipes(dc, context, pipes, *pipe_cnt, dc_pipe_idx); - *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false); + *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, + DC_VALIDATE_MODE_AND_PROGRAMMING); // Populate dppclk to trigger a recalculate in dml_get_voltage_level // so the phantom pipe DLG params can be assigned correctly. pipes[0].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, *pipe_cnt, 0); @@ -1560,7 +1561,8 @@ static bool dcn32_full_validate_bw_helper(struct dc *dc, dc_state_remove_phantom_streams_and_planes(dc, context); dc_state_release_phantom_streams_and_planes(dc, context); vba->DRAMClockChangeSupport[*vlevel][vba->maxMpcComb] = dm_dram_clock_change_unsupported; - *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, false); + *pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, + DC_VALIDATE_MODE_AND_PROGRAMMING); *vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, *pipe_cnt); /* This may adjust vlevel and maxMpcComb */ @@ -2138,7 +2140,7 @@ bool dcn32_internal_validate_bw(struct dc *dc, display_e2e_pipe_params_st *pipes, int *pipe_cnt_out, int *vlevel_out, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; bool repopulate_pipes = false; @@ -2162,7 +2164,7 @@ bool dcn32_internal_validate_bw(struct dc *dc, for (i = 0; i < context->stream_count; i++) resource_update_pipes_for_stream_with_slice_count(context, dc->current_state, dc->res_pool, context->streams[i], 1); - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode); if (!pipe_cnt) { out = true; @@ -2172,13 +2174,13 @@ bool dcn32_internal_validate_bw(struct dc *dc, dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt); context->bw_ctx.dml.soc.max_vratio_pre = dcn32_determine_max_vratio_prefetch(dc, context); - if (!fast_validate) { + if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING) { if (!dcn32_full_validate_bw_helper(dc, context, pipes, &vlevel, split, merge, &pipe_cnt, &repopulate_pipes)) goto validate_fail; } - if (fast_validate || + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING || (dc->debug.dml_disallow_alternate_prefetch_modes && (vlevel == context->bw_ctx.dml.soc.num_states || vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported))) { @@ -2195,7 +2197,7 @@ bool dcn32_internal_validate_bw(struct dc *dc, context->bw_ctx.dml.soc.allow_for_pstate_or_stutter_in_vblank_final = dm_prefetch_support_none; - context->bw_ctx.dml.validate_max_state = fast_validate; + context->bw_ctx.dml.validate_max_state = (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING); vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); context->bw_ctx.dml.validate_max_state = false; @@ -2247,7 +2249,7 @@ bool dcn32_internal_validate_bw(struct dc *dc, int flag_vlevel = vlevel; int i; - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode); if (!dc->config.enable_windowed_mpo_odm) dcn32_update_dml_pipes_odm_policy_based_on_context(dc, context, pipes); @@ -2343,7 +2345,7 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, } context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; - dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false); + dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, DC_VALIDATE_MODE_AND_PROGRAMMING); maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; if (is_subvp_p_drr) { context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][maxMpcComb] = dm_dram_clock_change_vblank_w_mall_sub_vp; @@ -2389,7 +2391,8 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; } - dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel_temp, false); + dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel_temp, + DC_VALIDATE_MODE_AND_PROGRAMMING); if (vlevel_temp < vlevel) { vlevel = vlevel_temp; maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; @@ -2410,7 +2413,8 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, stream_status->fpo_in_use = false; } context->bw_ctx.dml.soc.fclk_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us; - dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false); + dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, + DC_VALIDATE_MODE_AND_PROGRAMMING); } } } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h index 276e90e4e0ce..273d2bd79d85 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.h @@ -49,7 +49,7 @@ bool dcn32_internal_validate_bw(struct dc *dc, display_e2e_pipe_params_st *pipes, int *pipe_cnt_out, int *vlevel_out, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c index 92f0a099d089..5d73efa2f0c9 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c @@ -437,7 +437,7 @@ static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing) int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; @@ -446,7 +446,7 @@ int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc, const unsigned int max_allowed_vblank_nom = 1023; dcn31_populate_dml_pipes_from_context(dc, context, pipes, - fast_validate); + validate_mode); for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { struct dc_crtc_timing *timing; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h index 067480fc3691..d121c5afce71 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.h @@ -37,7 +37,7 @@ void dcn35_update_bw_bounding_box_fpu(struct dc *dc, int dcn35_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn35_decide_zstate_support(struct dc *dc, struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c index 17d0b4923b0c..6f516af82956 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c @@ -470,7 +470,7 @@ static unsigned int get_vertical_back_porch(struct dc_crtc_timing *timing) int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; @@ -479,7 +479,7 @@ int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc, const unsigned int max_allowed_vblank_nom = 1023; dcn31_populate_dml_pipes_from_context(dc, context, pipes, - fast_validate); + validate_mode); for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { struct dc_crtc_timing *timing; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h index f93efab9a668..f71d9d8d0759 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.h @@ -12,7 +12,7 @@ void dcn351_update_bw_bounding_box_fpu(struct dc *dc, int dcn351_populate_dml_pipes_from_context_fpu(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn351_decide_zstate_support(struct dc *dc, struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c deleted file mode 100644 index 4fbecb5ff349..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.c +++ /dev/null @@ -1,239 +0,0 @@ -// SPDX-License-Identifier: MIT -// -// Copyright 2024 Advanced Micro Devices, Inc. - -#include "dcn401_fpu.h" -#include "dcn401/dcn401_resource.h" -// We need this includes for WATERMARKS_* defines -#include "clk_mgr/dcn401/dcn401_smu14_driver_if.h" -#include "link.h" - -#define DC_LOGGER_INIT(logger) - -void dcn401_build_wm_range_table_fpu(struct clk_mgr *clk_mgr) -{ - /* defaults */ - double pstate_latency_us = clk_mgr->ctx->dc->dml.soc.dram_clock_change_latency_us; - double fclk_change_latency_us = clk_mgr->ctx->dc->dml.soc.fclk_change_latency_us; - double sr_exit_time_us = clk_mgr->ctx->dc->dml.soc.sr_exit_time_us; - double sr_enter_plus_exit_time_us = clk_mgr->ctx->dc->dml.soc.sr_enter_plus_exit_time_us; - /* For min clocks use as reported by PM FW and report those as min */ - uint16_t min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz; - uint16_t min_dcfclk_mhz = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz; - uint16_t setb_min_uclk_mhz = min_uclk_mhz; - uint16_t dcfclk_mhz_for_the_second_state = clk_mgr->ctx->dc->dml.soc.clock_limits[2].dcfclk_mhz; - - dc_assert_fp_enabled(); - - /* For Set B ranges use min clocks state 2 when available, and report those to PM FW */ - if (dcfclk_mhz_for_the_second_state) - clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = dcfclk_mhz_for_the_second_state; - else - clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_dcfclk = clk_mgr->bw_params->clk_table.entries[0].dcfclk_mhz; - - if (clk_mgr->bw_params->clk_table.entries[2].memclk_mhz) - setb_min_uclk_mhz = clk_mgr->bw_params->clk_table.entries[2].memclk_mhz; - - /* Set A - Normal - default values */ - clk_mgr->bw_params->wm_table.nv_entries[WM_A].valid = true; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us = pstate_latency_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us = fclk_change_latency_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_exit_time_us = sr_exit_time_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_dcfclk = 0xFFFF; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.min_uclk = min_uclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_A].pmfw_breakdown.max_uclk = 0xFFFF; - - /* Set B - Performance - higher clocks, using DPM[2] DCFCLK and UCLK */ - clk_mgr->bw_params->wm_table.nv_entries[WM_B].valid = true; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.pstate_latency_us = pstate_latency_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.fclk_change_latency_us = fclk_change_latency_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_exit_time_us = sr_exit_time_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.wm_type = WATERMARKS_CLOCK_RANGE; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_dcfclk = 0xFFFF; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.min_uclk = setb_min_uclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_B].pmfw_breakdown.max_uclk = 0xFFFF; - - /* Set C - Dummy P-State - P-State latency set to "dummy p-state" value */ - /* 'DalDummyClockChangeLatencyNs' registry key option set to 0x7FFFFFFF can be used to disable Set C for dummy p-state */ - if (clk_mgr->ctx->dc->bb_overrides.dummy_clock_change_latency_ns != 0x7FFFFFFF) { - clk_mgr->bw_params->wm_table.nv_entries[WM_C].valid = true; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.pstate_latency_us = 50; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.fclk_change_latency_us = fclk_change_latency_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_exit_time_us = sr_exit_time_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.wm_type = WATERMARKS_DUMMY_PSTATE; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_dcfclk = 0xFFFF; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.min_uclk = min_uclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_C].pmfw_breakdown.max_uclk = 0xFFFF; - clk_mgr->bw_params->dummy_pstate_table[0].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[0].memclk_mhz * 16; - clk_mgr->bw_params->dummy_pstate_table[0].dummy_pstate_latency_us = 50; - clk_mgr->bw_params->dummy_pstate_table[1].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[1].memclk_mhz * 16; - clk_mgr->bw_params->dummy_pstate_table[1].dummy_pstate_latency_us = 9; - clk_mgr->bw_params->dummy_pstate_table[2].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[2].memclk_mhz * 16; - clk_mgr->bw_params->dummy_pstate_table[2].dummy_pstate_latency_us = 8; - clk_mgr->bw_params->dummy_pstate_table[3].dram_speed_mts = clk_mgr->bw_params->clk_table.entries[3].memclk_mhz * 16; - clk_mgr->bw_params->dummy_pstate_table[3].dummy_pstate_latency_us = 5; - } - /* Set D - MALL - SR enter and exit time specific to MALL, TBD after bringup or later phase for now use DRAM values / 2 */ - /* For MALL DRAM clock change latency is N/A, for watermak calculations use lowest value dummy P state latency */ - clk_mgr->bw_params->wm_table.nv_entries[WM_D].valid = true; - clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.pstate_latency_us = clk_mgr->bw_params->dummy_pstate_table[3].dummy_pstate_latency_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.fclk_change_latency_us = fclk_change_latency_us; - clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_exit_time_us = sr_exit_time_us / 2; // TBD - clk_mgr->bw_params->wm_table.nv_entries[WM_D].dml_input.sr_enter_plus_exit_time_us = sr_enter_plus_exit_time_us / 2; // TBD - clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.wm_type = WATERMARKS_MALL; - clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_dcfclk = min_dcfclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_dcfclk = 0xFFFF; - clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.min_uclk = min_uclk_mhz; - clk_mgr->bw_params->wm_table.nv_entries[WM_D].pmfw_breakdown.max_uclk = 0xFFFF; -} - -/* - * dcn401_update_bw_bounding_box - * - * This would override some dcn4_01 ip_or_soc initial parameters hardcoded from - * spreadsheet with actual values as per dGPU SKU: - * - with passed few options from dc->config - * - with dentist_vco_frequency from Clk Mgr (currently hardcoded, but might - * need to get it from PM FW) - * - with passed latency values (passed in ns units) in dc-> bb override for - * debugging purposes - * - with passed latencies from VBIOS (in 100_ns units) if available for - * certain dGPU SKU - * - with number of DRAM channels from VBIOS (which differ for certain dGPU SKU - * of the same ASIC) - * - clocks levels with passed clk_table entries from Clk Mgr as reported by PM - * FW for different clocks (which might differ for certain dGPU SKU of the - * same ASIC) - */ -void dcn401_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params) -{ - dc_assert_fp_enabled(); - - /* Override from passed dc->bb_overrides if available*/ - if (dc->bb_overrides.sr_exit_time_ns) - dc->dml2_options.bbox_overrides.sr_exit_latency_us = - dc->bb_overrides.sr_exit_time_ns / 1000.0; - - if (dc->bb_overrides.sr_enter_plus_exit_time_ns) - dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us = - dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; - - if (dc->bb_overrides.urgent_latency_ns) - dc->dml2_options.bbox_overrides.urgent_latency_us = - dc->bb_overrides.urgent_latency_ns / 1000.0; - - if (dc->bb_overrides.dram_clock_change_latency_ns) - dc->dml2_options.bbox_overrides.dram_clock_change_latency_us = - dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; - - if (dc->bb_overrides.fclk_clock_change_latency_ns) - dc->dml2_options.bbox_overrides.fclk_change_latency_us = - dc->bb_overrides.fclk_clock_change_latency_ns / 1000; - - /* Override from VBIOS if VBIOS bb_info available */ - if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { - struct bp_soc_bb_info bb_info = {0}; - if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { - if (bb_info.dram_clock_change_latency_100ns > 0) - dc->dml2_options.bbox_overrides.dram_clock_change_latency_us = - bb_info.dram_clock_change_latency_100ns * 10; - - if (bb_info.dram_sr_enter_exit_latency_100ns > 0) - dc->dml2_options.bbox_overrides.sr_enter_plus_exit_latency_us = - bb_info.dram_sr_enter_exit_latency_100ns * 10; - - if (bb_info.dram_sr_exit_latency_100ns > 0) - dc->dml2_options.bbox_overrides.sr_exit_latency_us = - bb_info.dram_sr_exit_latency_100ns * 10; - } - } - - /* Override from VBIOS for num_chan */ - if (dc->ctx->dc_bios->vram_info.num_chans) { - dc->dml2_options.bbox_overrides.dram_num_chan = - dc->ctx->dc_bios->vram_info.num_chans; - - } - - if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) - dc->dml2_options.bbox_overrides.dram_chanel_width_bytes = - dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; - - dc->dml2_options.bbox_overrides.disp_pll_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; - dc->dml2_options.bbox_overrides.xtalclk_mhz = dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000.0; - dc->dml2_options.bbox_overrides.dchub_refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0; - dc->dml2_options.bbox_overrides.dprefclk_mhz = dc->clk_mgr->dprefclk_khz / 1000.0; - - if (dc->clk_mgr->bw_params->clk_table.num_entries > 1) { - unsigned int i = 0; - - dc->dml2_options.bbox_overrides.clks_table.num_states = dc->clk_mgr->bw_params->clk_table.num_entries; - - dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels = - dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels; - - dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels = - dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels; - - dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels = - dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels; - - dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_socclk_levels = - dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels; - - dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels = - dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels; - - dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dispclk_levels = - dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; - - dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dppclk_levels = - dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dppclk_levels; - - for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels; i++) { - if (dc->clk_mgr->bw_params->clk_table.entries[i].dcfclk_mhz) - dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dcfclk_mhz = - dc->clk_mgr->bw_params->clk_table.entries[i].dcfclk_mhz; - } - - for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels; i++) { - if (dc->clk_mgr->bw_params->clk_table.entries[i].fclk_mhz) - dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].fclk_mhz = - dc->clk_mgr->bw_params->clk_table.entries[i].fclk_mhz; - } - - for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels; i++) { - if (dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz) - dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz = - dc->clk_mgr->bw_params->clk_table.entries[i].memclk_mhz; - } - - for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels; i++) { - if (dc->clk_mgr->bw_params->clk_table.entries[i].socclk_mhz) - dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].socclk_mhz = - dc->clk_mgr->bw_params->clk_table.entries[i].socclk_mhz; - } - - for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels; i++) { - if (dc->clk_mgr->bw_params->clk_table.entries[i].dtbclk_mhz) - dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz = - dc->clk_mgr->bw_params->clk_table.entries[i].dtbclk_mhz; - } - - for (i = 0; i < dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels; i++) { - if (dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz) { - dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dispclk_mhz = - dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz; - dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dppclk_mhz = - dc->clk_mgr->bw_params->clk_table.entries[i].dispclk_mhz; - } - } - } -} - diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h deleted file mode 100644 index 329f1788843c..000000000000 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn401/dcn401_fpu.h +++ /dev/null @@ -1,14 +0,0 @@ -// SPDX-License-Identifier: MIT -// -// Copyright 2024 Advanced Micro Devices, Inc. - -#ifndef __DCN401_FPU_H__ -#define __DCN401_FPU_H__ - -#include "clk_mgr.h" - -void dcn401_build_wm_range_table_fpu(struct clk_mgr *clk_mgr); - -void dcn401_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_params); - -#endif diff --git a/drivers/gpu/drm/amd/display/dc/dml2/Makefile b/drivers/gpu/drm/amd/display/dc/dml2/Makefile index 157ecf008d6c..4c21ce42054c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml2/Makefile @@ -81,10 +81,11 @@ AMD_DAL_DML2 = $(addprefix $(AMDDALPATH)/dc/dml2/,$(DML2)) AMD_DISPLAY_FILES += $(AMD_DAL_DML2) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_ccflags) $(frame_warn_flag) +CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_ccflags) $(frame_warn_flag) +CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags) @@ -94,17 +95,16 @@ CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflag CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_factory.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_standalone_libraries/lib_float_math.o := $(dml2_ccflags) - - CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/src/dml21_wrapper.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_translation_helper.o := $(dml2_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_ccflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_factory.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_core/dml2_core_utils.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_interfaces.o := $(dml2_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_top/dml2_top_soc15.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags) @@ -120,6 +120,7 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2/dml21/dml21_utils.o := $(dml2_rcflags) DML21 := src/dml2_top/dml2_top_interfaces.o DML21 += src/dml2_top/dml2_top_soc15.o DML21 += src/dml2_core/dml2_core_dcn4.o +DML21 += src/dml2_core/dml2_core_utils.o DML21 += src/dml2_core/dml2_core_factory.o DML21 += src/dml2_core/dml2_core_dcn4_calcs.o DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c index 7ae9c0ba0c9e..715f9019a33e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c @@ -10189,7 +10189,7 @@ dml_uint_t dml_mode_support_ex(struct dml_mode_support_ex_params_st *in_out_para result = mode_support_pwr_states(&in_out_params->out_lowest_state_idx, in_out_params->mode_lib, in_out_params->in_display_cfg, - 0, + in_out_params->in_start_state_idx, in_out_params->mode_lib->states.num_states - 1); if (result) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h index 0670e4dc4fd9..dbeb08466092 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/display_mode_core_structs.h @@ -1917,6 +1917,7 @@ struct display_mode_lib_st { struct dml_mode_support_ex_params_st { struct display_mode_lib_st *mode_lib; const struct dml_display_cfg_st *in_display_cfg; + dml_uint_t in_start_state_idx; dml_uint_t out_lowest_state_idx; struct dml_mode_support_info_st *out_evaluation_info; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c index d47cacfdb695..a06217a9eef6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c @@ -2,7 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. - #include "dml21_wrapper.h" #include "dml2_core_dcn4_calcs.h" #include "dml2_internal_shared_types.h" @@ -11,277 +10,263 @@ #include "dml21_translation_helper.h" #include "bounding_boxes/dcn4_soc_bb.h" -static void dml21_init_socbb_params(struct dml2_initialize_instance_in_out *dml_init, - const struct dml2_configuration_options *config, - const struct dc *in_dc) +static void dml21_populate_pmo_options(struct dml2_pmo_options *pmo_options, + const struct dc *in_dc, + const struct dml2_configuration_options *config) { - const struct dml2_soc_bb *soc_bb; - const struct dml2_soc_qos_parameters *qos_params; - - switch (in_dc->ctx->dce_version) { - case DCN_VERSION_4_01: - default: - if (config->bb_from_dmub) - soc_bb = config->bb_from_dmub; - else - soc_bb = &dml2_socbb_dcn401; + bool disable_fams2 = !in_dc->debug.fams2_config.bits.enable; - qos_params = &dml_dcn4_variant_a_soc_qos_params; - } - - /* patch soc bb */ - memcpy(&dml_init->soc_bb, soc_bb, sizeof(struct dml2_soc_bb)); + /* ODM options */ + pmo_options->disable_dyn_odm = !config->minimize_dispclk_using_odm; + pmo_options->disable_dyn_odm_for_multi_stream = true; + pmo_options->disable_dyn_odm_for_stream_with_svp = true; - /* patch qos params */ - memcpy(&dml_init->soc_bb.qos_parameters, qos_params, sizeof(struct dml2_soc_qos_parameters)); -} + pmo_options->disable_vblank = ((in_dc->debug.dml21_disable_pstate_method_mask >> 1) & 1); -static void dml21_external_socbb_params(struct dml2_initialize_instance_in_out *dml_init, - const struct dml2_configuration_options *config) -{ - memcpy(&dml_init->soc_bb, &config->external_socbb_ip_params->soc_bb, sizeof(struct dml2_soc_bb)); -} + /* NOTE: DRR and SubVP Require FAMS2 */ + pmo_options->disable_svp = ((in_dc->debug.dml21_disable_pstate_method_mask >> 2) & 1) || + in_dc->debug.force_disable_subvp || + disable_fams2; + pmo_options->disable_drr_clamped = ((in_dc->debug.dml21_disable_pstate_method_mask >> 3) & 1) || + disable_fams2; + pmo_options->disable_drr_var = ((in_dc->debug.dml21_disable_pstate_method_mask >> 4) & 1) || + disable_fams2; + pmo_options->disable_fams2 = disable_fams2; -static void dml21_external_ip_params(struct dml2_initialize_instance_in_out *dml_init, - const struct dml2_configuration_options *config) -{ - memcpy(&dml_init->ip_caps, &config->external_socbb_ip_params->ip_params, sizeof(struct dml2_ip_capabilities)); + pmo_options->disable_drr_var_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE || + in_dc->debug.disable_fams_gaming == INGAME_FAMS_MULTI_DISP_CLAMPED_ONLY; + pmo_options->disable_drr_clamped_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE; } -static void dml21_init_ip_params(struct dml2_initialize_instance_in_out *dml_init, +/* + * Populate dml_init based on default static values in soc bb. The default + * values are for reference and support at least minimal operation of current + * SoC and DCN hardware. The values could be modifed by subsequent override + * functions to reflect our true hardware capability. + */ +static void populate_default_dml_init_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc) { - const struct dml2_ip_capabilities *ip_caps; - switch (in_dc->ctx->dce_version) { case DCN_VERSION_4_01: + dml_init->options.project_id = dml2_project_dcn4x_stage2_auto_drr_svp; + dml21_populate_pmo_options(&dml_init->options.pmo_options, in_dc, config); + dml_init->soc_bb = dml2_socbb_dcn401; + dml_init->soc_bb.qos_parameters = dml_dcn4_variant_a_soc_qos_params; + dml_init->ip_caps = dml2_dcn401_max_ip_caps; + break; default: - ip_caps = &dml2_dcn401_max_ip_caps; + memset(dml_init, 0, sizeof(*dml_init)); + DC_ERR("unsupported dcn version for DML21!"); + return; } - - memcpy(&dml_init->ip_caps, ip_caps, sizeof(struct dml2_ip_capabilities)); } -void dml21_initialize_soc_bb_params(struct dml2_initialize_instance_in_out *dml_init, +static void override_dml_init_with_values_from_hardware_default(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc) { - if (config->use_native_soc_bb_construction) - dml21_init_socbb_params(dml_init, config, in_dc); - else - dml21_external_socbb_params(dml_init, config); + dml_init->soc_bb.dchub_refclk_mhz = in_dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000; + dml_init->soc_bb.dprefclk_mhz = in_dc->clk_mgr->dprefclk_khz / 1000; + dml_init->soc_bb.dispclk_dppclk_vco_speed_mhz = in_dc->clk_mgr->dentist_vco_freq_khz / 1000.0; } -void dml21_initialize_ip_params(struct dml2_initialize_instance_in_out *dml_init, +/* + * SMU stands for System Management Unit. It is a power management processor. + * It owns the initialization of dc's clock table and programming of clock values + * based on dc's requests. + * Our clock values in base soc bb is a dummy placeholder. The real clock values + * are retrieved from SMU firmware to dc clock table at runtime. + * This function overrides our dummy placeholder values with real values in dc + * clock table. + */ +static void override_dml_init_with_values_from_smu( + struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc) { - if (config->use_native_soc_bb_construction) - dml21_init_ip_params(dml_init, config, in_dc); - else - dml21_external_ip_params(dml_init, config); -} - -void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_init, - const struct dml2_configuration_options *config, const struct dc *in_dc) -{ int i; - const struct clk_bw_params *dc_bw_params = in_dc->clk_mgr->bw_params; const struct clk_limit_table *dc_clk_table = &dc_bw_params->clk_table; - struct dml2_soc_bb *dml_soc_bb = &dml_init->soc_bb; - struct dml2_soc_state_table *dml_clk_table = &dml_soc_bb->clk_table; + struct dml2_soc_state_table *dml_clk_table = &dml_init->soc_bb.clk_table; - /* override clocks if smu is present */ - if (in_dc->clk_mgr->funcs->is_smu_present && in_dc->clk_mgr->funcs->is_smu_present(in_dc->clk_mgr)) { - /* dcfclk */ - if (dc_clk_table->num_entries_per_clk.num_dcfclk_levels) { - dml_clk_table->dcfclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dcfclk_levels; - for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { - if (i < dml_clk_table->dcfclk.num_clk_values) { - if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dcfclk_mhz && - dc_clk_table->entries[i].dcfclk_mhz > dc_bw_params->dc_mode_limit.dcfclk_mhz) { - if (i == 0 || dc_clk_table->entries[i-1].dcfclk_mhz < dc_bw_params->dc_mode_limit.dcfclk_mhz) { - dml_clk_table->dcfclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dcfclk_mhz * 1000; - dml_clk_table->dcfclk.num_clk_values = i + 1; - } else { - dml_clk_table->dcfclk.clk_values_khz[i] = 0; - dml_clk_table->dcfclk.num_clk_values = i; - } + if (!in_dc->clk_mgr->funcs->is_smu_present || + !in_dc->clk_mgr->funcs->is_smu_present(in_dc->clk_mgr)) + /* skip if smu is not present */ + return; + + /* dcfclk */ + if (dc_clk_table->num_entries_per_clk.num_dcfclk_levels) { + dml_clk_table->dcfclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dcfclk_levels; + for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { + if (i < dml_clk_table->dcfclk.num_clk_values) { + if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dcfclk_mhz && + dc_clk_table->entries[i].dcfclk_mhz > dc_bw_params->dc_mode_limit.dcfclk_mhz) { + if (i == 0 || dc_clk_table->entries[i-1].dcfclk_mhz < dc_bw_params->dc_mode_limit.dcfclk_mhz) { + dml_clk_table->dcfclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dcfclk_mhz * 1000; + dml_clk_table->dcfclk.num_clk_values = i + 1; } else { - dml_clk_table->dcfclk.clk_values_khz[i] = dc_clk_table->entries[i].dcfclk_mhz * 1000; + dml_clk_table->dcfclk.clk_values_khz[i] = 0; + dml_clk_table->dcfclk.num_clk_values = i; } } else { - dml_clk_table->dcfclk.clk_values_khz[i] = 0; + dml_clk_table->dcfclk.clk_values_khz[i] = dc_clk_table->entries[i].dcfclk_mhz * 1000; } + } else { + dml_clk_table->dcfclk.clk_values_khz[i] = 0; } } + } - /* fclk */ - if (dc_clk_table->num_entries_per_clk.num_fclk_levels) { - dml_clk_table->fclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_fclk_levels; - for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { - if (i < dml_clk_table->fclk.num_clk_values) { - if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.fclk_mhz && - dc_clk_table->entries[i].fclk_mhz > dc_bw_params->dc_mode_limit.fclk_mhz) { - if (i == 0 || dc_clk_table->entries[i-1].fclk_mhz < dc_bw_params->dc_mode_limit.fclk_mhz) { - dml_clk_table->fclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.fclk_mhz * 1000; - dml_clk_table->fclk.num_clk_values = i + 1; - } else { - dml_clk_table->fclk.clk_values_khz[i] = 0; - dml_clk_table->fclk.num_clk_values = i; - } + /* fclk */ + if (dc_clk_table->num_entries_per_clk.num_fclk_levels) { + dml_clk_table->fclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_fclk_levels; + for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { + if (i < dml_clk_table->fclk.num_clk_values) { + if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.fclk_mhz && + dc_clk_table->entries[i].fclk_mhz > dc_bw_params->dc_mode_limit.fclk_mhz) { + if (i == 0 || dc_clk_table->entries[i-1].fclk_mhz < dc_bw_params->dc_mode_limit.fclk_mhz) { + dml_clk_table->fclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.fclk_mhz * 1000; + dml_clk_table->fclk.num_clk_values = i + 1; } else { - dml_clk_table->fclk.clk_values_khz[i] = dc_clk_table->entries[i].fclk_mhz * 1000; + dml_clk_table->fclk.clk_values_khz[i] = 0; + dml_clk_table->fclk.num_clk_values = i; } } else { - dml_clk_table->fclk.clk_values_khz[i] = 0; + dml_clk_table->fclk.clk_values_khz[i] = dc_clk_table->entries[i].fclk_mhz * 1000; } + } else { + dml_clk_table->fclk.clk_values_khz[i] = 0; } } + } - /* uclk */ - if (dc_clk_table->num_entries_per_clk.num_memclk_levels) { - dml_clk_table->uclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_memclk_levels; - for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { - if (i < dml_clk_table->uclk.num_clk_values) { - if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.memclk_mhz && - dc_clk_table->entries[i].memclk_mhz > dc_bw_params->dc_mode_limit.memclk_mhz) { - if (i == 0 || dc_clk_table->entries[i-1].memclk_mhz < dc_bw_params->dc_mode_limit.memclk_mhz) { - dml_clk_table->uclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.memclk_mhz * 1000; - dml_clk_table->uclk.num_clk_values = i + 1; - } else { - dml_clk_table->uclk.clk_values_khz[i] = 0; - dml_clk_table->uclk.num_clk_values = i; - } + /* uclk */ + if (dc_clk_table->num_entries_per_clk.num_memclk_levels) { + dml_clk_table->uclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_memclk_levels; + for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { + if (i < dml_clk_table->uclk.num_clk_values) { + if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.memclk_mhz && + dc_clk_table->entries[i].memclk_mhz > dc_bw_params->dc_mode_limit.memclk_mhz) { + if (i == 0 || dc_clk_table->entries[i-1].memclk_mhz < dc_bw_params->dc_mode_limit.memclk_mhz) { + dml_clk_table->uclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.memclk_mhz * 1000; + dml_clk_table->uclk.num_clk_values = i + 1; } else { - dml_clk_table->uclk.clk_values_khz[i] = dc_clk_table->entries[i].memclk_mhz * 1000; + dml_clk_table->uclk.clk_values_khz[i] = 0; + dml_clk_table->uclk.num_clk_values = i; } } else { - dml_clk_table->uclk.clk_values_khz[i] = 0; + dml_clk_table->uclk.clk_values_khz[i] = dc_clk_table->entries[i].memclk_mhz * 1000; } + } else { + dml_clk_table->uclk.clk_values_khz[i] = 0; } } + } - /* dispclk */ - if (dc_clk_table->num_entries_per_clk.num_dispclk_levels) { - dml_clk_table->dispclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dispclk_levels; - for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { - if (i < dml_clk_table->dispclk.num_clk_values) { - if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dispclk_mhz && - dc_clk_table->entries[i].dispclk_mhz > dc_bw_params->dc_mode_limit.dispclk_mhz) { - if (i == 0 || dc_clk_table->entries[i-1].dispclk_mhz < dc_bw_params->dc_mode_limit.dispclk_mhz) { - dml_clk_table->dispclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dispclk_mhz * 1000; - dml_clk_table->dispclk.num_clk_values = i + 1; - } else { - dml_clk_table->dispclk.clk_values_khz[i] = 0; - dml_clk_table->dispclk.num_clk_values = i; - } + /* dispclk */ + if (dc_clk_table->num_entries_per_clk.num_dispclk_levels) { + dml_clk_table->dispclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dispclk_levels; + for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { + if (i < dml_clk_table->dispclk.num_clk_values) { + if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dispclk_mhz && + dc_clk_table->entries[i].dispclk_mhz > dc_bw_params->dc_mode_limit.dispclk_mhz) { + if (i == 0 || dc_clk_table->entries[i-1].dispclk_mhz < dc_bw_params->dc_mode_limit.dispclk_mhz) { + dml_clk_table->dispclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dispclk_mhz * 1000; + dml_clk_table->dispclk.num_clk_values = i + 1; } else { - dml_clk_table->dispclk.clk_values_khz[i] = dc_clk_table->entries[i].dispclk_mhz * 1000; + dml_clk_table->dispclk.clk_values_khz[i] = 0; + dml_clk_table->dispclk.num_clk_values = i; } } else { - dml_clk_table->dispclk.clk_values_khz[i] = 0; + dml_clk_table->dispclk.clk_values_khz[i] = dc_clk_table->entries[i].dispclk_mhz * 1000; } + } else { + dml_clk_table->dispclk.clk_values_khz[i] = 0; } } + } - /* dppclk */ - if (dc_clk_table->num_entries_per_clk.num_dppclk_levels) { - dml_clk_table->dppclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dppclk_levels; - for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { - if (i < dml_clk_table->dppclk.num_clk_values) { - if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dppclk_mhz && - dc_clk_table->entries[i].dppclk_mhz > dc_bw_params->dc_mode_limit.dppclk_mhz) { - if (i == 0 || dc_clk_table->entries[i-1].dppclk_mhz < dc_bw_params->dc_mode_limit.dppclk_mhz) { - dml_clk_table->dppclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dppclk_mhz * 1000; - dml_clk_table->dppclk.num_clk_values = i + 1; - } else { - dml_clk_table->dppclk.clk_values_khz[i] = 0; - dml_clk_table->dppclk.num_clk_values = i; - } + /* dppclk */ + if (dc_clk_table->num_entries_per_clk.num_dppclk_levels) { + dml_clk_table->dppclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dppclk_levels; + for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { + if (i < dml_clk_table->dppclk.num_clk_values) { + if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dppclk_mhz && + dc_clk_table->entries[i].dppclk_mhz > dc_bw_params->dc_mode_limit.dppclk_mhz) { + if (i == 0 || dc_clk_table->entries[i-1].dppclk_mhz < dc_bw_params->dc_mode_limit.dppclk_mhz) { + dml_clk_table->dppclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dppclk_mhz * 1000; + dml_clk_table->dppclk.num_clk_values = i + 1; } else { - dml_clk_table->dppclk.clk_values_khz[i] = dc_clk_table->entries[i].dppclk_mhz * 1000; + dml_clk_table->dppclk.clk_values_khz[i] = 0; + dml_clk_table->dppclk.num_clk_values = i; } } else { - dml_clk_table->dppclk.clk_values_khz[i] = 0; + dml_clk_table->dppclk.clk_values_khz[i] = dc_clk_table->entries[i].dppclk_mhz * 1000; } + } else { + dml_clk_table->dppclk.clk_values_khz[i] = 0; } } + } - /* dtbclk */ - if (dc_clk_table->num_entries_per_clk.num_dtbclk_levels) { - dml_clk_table->dtbclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dtbclk_levels; - for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { - if (i < dml_clk_table->dtbclk.num_clk_values) { - if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dtbclk_mhz && - dc_clk_table->entries[i].dtbclk_mhz > dc_bw_params->dc_mode_limit.dtbclk_mhz) { - if (i == 0 || dc_clk_table->entries[i-1].dtbclk_mhz < dc_bw_params->dc_mode_limit.dtbclk_mhz) { - dml_clk_table->dtbclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dtbclk_mhz * 1000; - dml_clk_table->dtbclk.num_clk_values = i + 1; - } else { - dml_clk_table->dtbclk.clk_values_khz[i] = 0; - dml_clk_table->dtbclk.num_clk_values = i; - } + /* dtbclk */ + if (dc_clk_table->num_entries_per_clk.num_dtbclk_levels) { + dml_clk_table->dtbclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dtbclk_levels; + for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { + if (i < dml_clk_table->dtbclk.num_clk_values) { + if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.dtbclk_mhz && + dc_clk_table->entries[i].dtbclk_mhz > dc_bw_params->dc_mode_limit.dtbclk_mhz) { + if (i == 0 || dc_clk_table->entries[i-1].dtbclk_mhz < dc_bw_params->dc_mode_limit.dtbclk_mhz) { + dml_clk_table->dtbclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.dtbclk_mhz * 1000; + dml_clk_table->dtbclk.num_clk_values = i + 1; } else { - dml_clk_table->dtbclk.clk_values_khz[i] = dc_clk_table->entries[i].dtbclk_mhz * 1000; + dml_clk_table->dtbclk.clk_values_khz[i] = 0; + dml_clk_table->dtbclk.num_clk_values = i; } } else { - dml_clk_table->dtbclk.clk_values_khz[i] = 0; + dml_clk_table->dtbclk.clk_values_khz[i] = dc_clk_table->entries[i].dtbclk_mhz * 1000; } + } else { + dml_clk_table->dtbclk.clk_values_khz[i] = 0; } } + } - /* socclk */ - if (dc_clk_table->num_entries_per_clk.num_socclk_levels) { - dml_clk_table->socclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_socclk_levels; - for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { - if (i < dml_clk_table->socclk.num_clk_values) { - if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.socclk_mhz && - dc_clk_table->entries[i].socclk_mhz > dc_bw_params->dc_mode_limit.socclk_mhz) { - if (i == 0 || dc_clk_table->entries[i-1].socclk_mhz < dc_bw_params->dc_mode_limit.socclk_mhz) { - dml_clk_table->socclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.socclk_mhz * 1000; - dml_clk_table->socclk.num_clk_values = i + 1; - } else { - dml_clk_table->socclk.clk_values_khz[i] = 0; - dml_clk_table->socclk.num_clk_values = i; - } + /* socclk */ + if (dc_clk_table->num_entries_per_clk.num_socclk_levels) { + dml_clk_table->socclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_socclk_levels; + for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) { + if (i < dml_clk_table->socclk.num_clk_values) { + if (config->use_clock_dc_limits && dc_bw_params->dc_mode_limit.socclk_mhz && + dc_clk_table->entries[i].socclk_mhz > dc_bw_params->dc_mode_limit.socclk_mhz) { + if (i == 0 || dc_clk_table->entries[i-1].socclk_mhz < dc_bw_params->dc_mode_limit.socclk_mhz) { + dml_clk_table->socclk.clk_values_khz[i] = dc_bw_params->dc_mode_limit.socclk_mhz * 1000; + dml_clk_table->socclk.num_clk_values = i + 1; } else { - dml_clk_table->socclk.clk_values_khz[i] = dc_clk_table->entries[i].socclk_mhz * 1000; + dml_clk_table->socclk.clk_values_khz[i] = 0; + dml_clk_table->socclk.num_clk_values = i; } } else { - dml_clk_table->socclk.clk_values_khz[i] = 0; + dml_clk_table->socclk.clk_values_khz[i] = dc_clk_table->entries[i].socclk_mhz * 1000; } + } else { + dml_clk_table->socclk.clk_values_khz[i] = 0; } } - - /* do not override phyclks for now */ - /* phyclk */ - // dml_clk_table->phyclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_phyclk_levels; - // for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) { - // dml_clk_table->phyclk.clk_values_khz[i] = dc_clk_table->entries[i].phyclk_mhz * 1000; - // } - - /* phyclk_d18 */ - // dml_clk_table->phyclk_d18.num_clk_values = dc_clk_table->num_entries_per_clk.num_phyclk_d18_levels; - // for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) { - // dml_clk_table->phyclk_d18.clk_values_khz[i] = dc_clk_table->entries[i].phyclk_d18_mhz * 1000; - // } - - /* phyclk_d32 */ - // dml_clk_table->phyclk_d32.num_clk_values = dc_clk_table->num_entries_per_clk.num_phyclk_d32_levels; - // for (i = 0; i < DML_MAX_CLK_TABLE_SIZE; i++) { - // dml_clk_table->phyclk_d32.clk_values_khz[i] = dc_clk_table->entries[i].phyclk_d32_mhz * 1000; - // } } +} - dml_soc_bb->dchub_refclk_mhz = in_dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000; - dml_soc_bb->dprefclk_mhz = in_dc->clk_mgr->dprefclk_khz / 1000; - dml_soc_bb->xtalclk_mhz = in_dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000; - dml_soc_bb->dispclk_dppclk_vco_speed_mhz = in_dc->clk_mgr->dentist_vco_freq_khz / 1000.0; +static void override_dml_init_with_values_from_vbios( + struct dml2_initialize_instance_in_out *dml_init, + const struct dml2_configuration_options *config, + const struct dc *in_dc) +{ + const struct clk_bw_params *dc_bw_params = in_dc->clk_mgr->bw_params; + struct dml2_soc_bb *dml_soc_bb = &dml_init->soc_bb; + struct dml2_soc_state_table *dml_clk_table = &dml_init->soc_bb.clk_table; - /* override bounding box paramters from VBIOS */ if (in_dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns > 0) dml_soc_bb->power_management_parameters.dram_clk_change_blackout_us = (in_dc->ctx->dc_bios->bb_info.dram_clock_change_latency_100ns + 9) / 10; @@ -308,32 +293,120 @@ void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_in dml_clk_table->dram_config.channel_width_bytes = in_dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; } - /* override bounding box paramters from DC config */ - if (in_dc->bb_overrides.sr_exit_time_ns) { - dml_soc_bb->power_management_parameters.stutter_exit_latency_us = - in_dc->bb_overrides.sr_exit_time_ns / 1000.0; + dml_init->soc_bb.xtalclk_mhz = in_dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency / 1000; +} + + +static void override_dml_init_with_values_from_dmub(struct dml2_initialize_instance_in_out *dml_init, + const struct dml2_configuration_options *config, + const struct dc *in_dc) +{ + /* + * TODO - There seems to be overlaps between the values overriden from + * dmub and vbios. Investigate and identify the values that DMUB needs + * to own. + */ +// const struct dmub_soc_bb_params *dmub_bb_params = +// (const struct dmub_soc_bb_params *)config->bb_from_dmub; + +// if (dmub_bb_params == NULL) +// return; + +// if (dmub_bb_params->dram_clk_change_blackout_ns > 0) +// dml_init->soc_bb.power_management_parameters.dram_clk_change_blackout_us = +// (double) dmub_bb_params->dram_clk_change_blackout_ns / 1000.0; +// if (dmub_bb_params->dram_clk_change_read_only_ns > 0) +// dml_init->soc_bb.power_management_parameters.dram_clk_change_read_only_us = +// (double) dmub_bb_params->dram_clk_change_read_only_ns / 1000.0; +// if (dmub_bb_params->dram_clk_change_write_only_ns > 0) +// dml_init->soc_bb.power_management_parameters.dram_clk_change_write_only_us = +// (double) dmub_bb_params->dram_clk_change_write_only_ns / 1000.0; +// if (dmub_bb_params->fclk_change_blackout_ns > 0) +// dml_init->soc_bb.power_management_parameters.fclk_change_blackout_us = +// (double) dmub_bb_params->fclk_change_blackout_ns / 1000.0; +// if (dmub_bb_params->g7_ppt_blackout_ns > 0) +// dml_init->soc_bb.power_management_parameters.g7_ppt_blackout_us = +// (double) dmub_bb_params->g7_ppt_blackout_ns / 1000.0; +// if (dmub_bb_params->stutter_enter_plus_exit_latency_ns > 0) +// dml_init->soc_bb.power_management_parameters.stutter_enter_plus_exit_latency_us = +// (double) dmub_bb_params->stutter_enter_plus_exit_latency_ns / 1000.0; +// if (dmub_bb_params->stutter_exit_latency_ns > 0) +// dml_init->soc_bb.power_management_parameters.stutter_exit_latency_us = +// (double) dmub_bb_params->stutter_exit_latency_ns / 1000.0; +// if (dmub_bb_params->z8_stutter_enter_plus_exit_latency_ns > 0) +// dml_init->soc_bb.power_management_parameters.z8_stutter_enter_plus_exit_latency_us = +// (double) dmub_bb_params->z8_stutter_enter_plus_exit_latency_ns / 1000.0; +// if (dmub_bb_params->z8_stutter_exit_latency_ns > 0) +// dml_init->soc_bb.power_management_parameters.z8_stutter_exit_latency_us = +// (double) dmub_bb_params->z8_stutter_exit_latency_ns / 1000.0; +// if (dmub_bb_params->z8_min_idle_time_ns > 0) +// dml_init->soc_bb.power_management_parameters.z8_min_idle_time = +// (double) dmub_bb_params->z8_min_idle_time_ns / 1000.0; +// #ifndef TRIM_DML2_DCN6B_IP_SENSITIVE +// if (dmub_bb_params->type_b_dram_clk_change_blackout_ns > 0) +// dml_init->soc_bb.power_management_parameters.lpddr5_dram_clk_change_blackout_us = +// (double) dmub_bb_params->type_b_dram_clk_change_blackout_ns / 1000.0; +// if (dmub_bb_params->type_b_ppt_blackout_ns > 0) +// dml_init->soc_bb.power_management_parameters.lpddr5_ppt_blackout_us = +// (double) dmub_bb_params->type_b_ppt_blackout_ns / 1000.0; +// #else +// if (dmub_bb_params->type_b_dram_clk_change_blackout_ns > 0) +// dml_init->soc_bb.power_management_parameters.type_b_dram_clk_change_blackout_us = +// (double) dmub_bb_params->type_b_dram_clk_change_blackout_ns / 1000.0; +// if (dmub_bb_params->type_b_ppt_blackout_ns > 0) +// dml_init->soc_bb.power_management_parameters.type_b_ppt_blackout_us = +// (double) dmub_bb_params->type_b_ppt_blackout_ns / 1000.0; +// #endif +// if (dmub_bb_params->vmin_limit_dispclk_khz > 0) +// dml_init->soc_bb.vmin_limit.dispclk_khz = dmub_bb_params->vmin_limit_dispclk_khz; +// if (dmub_bb_params->vmin_limit_dcfclk_khz > 0) +// dml_init->soc_bb.vmin_limit.dcfclk_khz = dmub_bb_params->vmin_limit_dcfclk_khz; +// if (dmub_bb_params->g7_temperature_read_blackout_ns > 0) +// dml_init->soc_bb.power_management_parameters.g7_temperature_read_blackout_us = +// (double) dmub_bb_params->g7_temperature_read_blackout_ns / 1000.0; +} + +static void override_dml_init_with_values_from_software_policy(struct dml2_initialize_instance_in_out *dml_init, + const struct dml2_configuration_options *config, + const struct dc *in_dc) +{ + if (!config->use_native_soc_bb_construction) { + dml_init->soc_bb = config->external_socbb_ip_params->soc_bb; + dml_init->ip_caps = config->external_socbb_ip_params->ip_params; } - if (in_dc->bb_overrides.sr_enter_plus_exit_time_ns) { - dml_soc_bb->power_management_parameters.stutter_enter_plus_exit_latency_us = + if (in_dc->bb_overrides.sr_exit_time_ns) + dml_init->soc_bb.power_management_parameters.stutter_exit_latency_us = + in_dc->bb_overrides.sr_exit_time_ns / 1000.0; + + if (in_dc->bb_overrides.sr_enter_plus_exit_time_ns) + dml_init->soc_bb.power_management_parameters.stutter_enter_plus_exit_latency_us = in_dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; - } - if (in_dc->bb_overrides.dram_clock_change_latency_ns) { - dml_soc_bb->power_management_parameters.dram_clk_change_blackout_us = + if (in_dc->bb_overrides.dram_clock_change_latency_ns) + dml_init->soc_bb.power_management_parameters.dram_clk_change_blackout_us = in_dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; - } - if (in_dc->bb_overrides.fclk_clock_change_latency_ns) { - dml_soc_bb->power_management_parameters.fclk_change_blackout_us = + if (in_dc->bb_overrides.fclk_clock_change_latency_ns) + dml_init->soc_bb.power_management_parameters.fclk_change_blackout_us = in_dc->bb_overrides.fclk_clock_change_latency_ns / 1000.0; - } +} - //TODO - // if (in_dc->bb_overrides.dummy_clock_change_latency_ns) { - // dml_soc_bb->power_management_parameters.dram_clk_change_blackout_us = - // in_dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; - // } +void dml21_populate_dml_init_params(struct dml2_initialize_instance_in_out *dml_init, + const struct dml2_configuration_options *config, + const struct dc *in_dc) +{ + populate_default_dml_init_params(dml_init, config, in_dc); + + override_dml_init_with_values_from_hardware_default(dml_init, config, in_dc); + + override_dml_init_with_values_from_smu(dml_init, config, in_dc); + + override_dml_init_with_values_from_vbios(dml_init, config, in_dc); + + override_dml_init_with_values_from_dmub(dml_init, config, in_dc); + + override_dml_init_with_values_from_software_policy(dml_init, config, in_dc); } static unsigned int calc_max_hardware_v_total(const struct dc_stream_state *stream) @@ -726,7 +799,6 @@ static void populate_dml21_surface_config_from_plane_state( switch (plane_state->tiling_info.gfxversion) { case DcGfxVersion7: case DcGfxVersion8: - // Placeholder for programming the array_mode break; case DcGfxVersion9: case DcGfxVersion10: @@ -788,6 +860,7 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm plane->pixel_format = dml2_420_10; break; case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: plane->pixel_format = dml2_444_64; @@ -888,10 +961,8 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm case DC_CM2_GPU_MEM_SIZE_171717: plane->tdlut.tdlut_width_mode = dml2_tdlut_width_17_cube; break; - case DC_CM2_GPU_MEM_SIZE_333333: - plane->tdlut.tdlut_width_mode = dml2_tdlut_width_33_cube; - break; case DC_CM2_GPU_MEM_SIZE_TRANSFORMED: + default: //plane->tdlut.tdlut_width_mode = dml2_tdlut_width_flatten; // dml2_tdlut_width_flatten undefined break; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h index 73a013be1e48..9880d3e0398e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.h @@ -17,9 +17,7 @@ struct dml2_context; struct dml2_configuration_options; struct dml2_initialize_instance_in_out; -void dml21_apply_soc_bb_overrides(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc); -void dml21_initialize_soc_bb_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc); -void dml21_initialize_ip_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc); +void dml21_populate_dml_init_params(struct dml2_initialize_instance_in_out *dml_init, const struct dml2_configuration_options *config, const struct dc *in_dc); bool dml21_map_dc_state_into_dml_display_cfg(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx); void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state *context); void dml21_extract_watermark_sets(const struct dc *in_dc, union dcn_watermark_set *watermarks, struct dml2_context *in_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c index 930e86cdb88a..ee721606b883 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_utils.c @@ -384,6 +384,7 @@ void dml21_build_fams2_programming(const struct dc *dc, /* reset fams2 data */ memset(&context->bw_ctx.bw.dcn.fams2_stream_base_params, 0, sizeof(union dmub_cmd_fams2_config) * DML2_MAX_PLANES); memset(&context->bw_ctx.bw.dcn.fams2_stream_sub_params, 0, sizeof(union dmub_cmd_fams2_config) * DML2_MAX_PLANES); + memset(&context->bw_ctx.bw.dcn.fams2_stream_sub_params_v2, 0, sizeof(union dmub_fams2_stream_static_sub_state_v2) * DML2_MAX_PLANES); memset(&context->bw_ctx.bw.dcn.fams2_global_config, 0, sizeof(struct dmub_cmd_fams2_global_config)); if (dml_ctx->v21.mode_programming.programming->fams2_required) { @@ -414,9 +415,16 @@ void dml21_build_fams2_programming(const struct dc *dc, memcpy(static_base_state, &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_base_params, sizeof(union dmub_cmd_fams2_config)); - memcpy(static_sub_state, - &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_sub_params, - sizeof(union dmub_cmd_fams2_config)); + + if (dc->debug.fams_version.major == 3) { + memcpy(&context->bw_ctx.bw.dcn.fams2_stream_sub_params_v2[num_fams2_streams], + &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_sub_params_v2, + sizeof(union dmub_fams2_stream_static_sub_state_v2)); + } else { + memcpy(static_sub_state, + &dml_ctx->v21.mode_programming.programming->stream_programming[dml_stream_idx].fams2_sub_params, + sizeof(union dmub_cmd_fams2_config)); + } switch (dc->debug.fams_version.minor) { case 1: diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c index 208d3651b6ba..03de3cf06ae5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c @@ -2,8 +2,6 @@ // // Copyright 2024 Advanced Micro Devices, Inc. -#include <linux/vmalloc.h> - #include "dml2_internal_types.h" #include "dml_top.h" #include "dml2_core_dcn4_calcs.h" @@ -37,15 +35,11 @@ static bool dml21_allocate_memory(struct dml2_context **dml_ctx) return true; } -static void dml21_apply_debug_options(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config) +static void dml21_populate_configuration_options(const struct dc *in_dc, + struct dml2_context *dml_ctx, + const struct dml2_configuration_options *config) { - bool disable_fams2; - struct dml2_pmo_options *pmo_options = &dml_ctx->v21.dml_init.options.pmo_options; - - /* ODM options */ - pmo_options->disable_dyn_odm = !config->minimize_dispclk_using_odm; - pmo_options->disable_dyn_odm_for_multi_stream = true; - pmo_options->disable_dyn_odm_for_stream_with_svp = true; + dml_ctx->config = *config; /* UCLK P-State options */ if (in_dc->debug.dml21_force_pstate_method) { @@ -55,52 +49,20 @@ static void dml21_apply_debug_options(const struct dc *in_dc, struct dml2_contex } else { dml_ctx->config.pmo.force_pstate_method_enable = false; } - - pmo_options->disable_vblank = ((in_dc->debug.dml21_disable_pstate_method_mask >> 1) & 1); - - /* NOTE: DRR and SubVP Require FAMS2 */ - disable_fams2 = !in_dc->debug.fams2_config.bits.enable; - pmo_options->disable_svp = ((in_dc->debug.dml21_disable_pstate_method_mask >> 2) & 1) || - in_dc->debug.force_disable_subvp || - disable_fams2; - pmo_options->disable_drr_clamped = ((in_dc->debug.dml21_disable_pstate_method_mask >> 3) & 1) || - disable_fams2; - pmo_options->disable_drr_var = ((in_dc->debug.dml21_disable_pstate_method_mask >> 4) & 1) || - disable_fams2; - pmo_options->disable_fams2 = disable_fams2; - - pmo_options->disable_drr_var_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE || - in_dc->debug.disable_fams_gaming == INGAME_FAMS_MULTI_DISP_CLAMPED_ONLY; - pmo_options->disable_drr_clamped_when_var_active = in_dc->debug.disable_fams_gaming == INGAME_FAMS_DISABLE; } -static void dml21_init(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config) +static void dml21_init(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config) { - switch (in_dc->ctx->dce_version) { - case DCN_VERSION_4_01: - (*dml_ctx)->v21.dml_init.options.project_id = dml2_project_dcn4x_stage2_auto_drr_svp; - break; - default: - (*dml_ctx)->v21.dml_init.options.project_id = dml2_project_invalid; - } - (*dml_ctx)->architecture = dml2_architecture_21; + dml_ctx->architecture = dml2_architecture_21; - /* Store configuration options */ - (*dml_ctx)->config = *config; + dml21_populate_configuration_options(in_dc, dml_ctx, config); DC_FP_START(); - /*Initialize SOCBB and DCNIP params */ - dml21_initialize_soc_bb_params(&(*dml_ctx)->v21.dml_init, config, in_dc); - dml21_initialize_ip_params(&(*dml_ctx)->v21.dml_init, config, in_dc); - dml21_apply_soc_bb_overrides(&(*dml_ctx)->v21.dml_init, config, in_dc); + dml21_populate_dml_init_params(&dml_ctx->v21.dml_init, config, in_dc); - /* apply debug overrides */ - dml21_apply_debug_options(in_dc, *dml_ctx, config); - - /*Initialize DML21 instance */ - dml2_initialize_instance(&(*dml_ctx)->v21.dml_init); + dml2_initialize_instance(&dml_ctx->v21.dml_init); DC_FP_END(); } @@ -111,7 +73,7 @@ bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const s if (!dml21_allocate_memory(dml_ctx)) return false; - dml21_init(in_dc, dml_ctx, config); + dml21_init(in_dc, *dml_ctx, config); return true; } @@ -328,12 +290,13 @@ static bool dml21_check_mode_support(const struct dc *in_dc, struct dc_state *co return true; } -bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, bool fast_validate) +bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, + enum dc_validate_mode validate_mode) { bool out = false; - /* Use dml_validate_only for fast_validate path */ - if (fast_validate) + /* Use dml21_check_mode_support for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX path */ + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) out = dml21_check_mode_support(in_dc, context, dml_ctx); else out = dml21_mode_check_and_programming(in_dc, context, dml_ctx); @@ -496,7 +459,7 @@ bool dml21_create_copy(struct dml2_context **dst_dml_ctx, return true; } -void dml21_reinit(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config) +void dml21_reinit(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config) { dml21_init(in_dc, dml_ctx, config); } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h index 42e715024bc9..15f92029d2e5 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.h @@ -14,6 +14,7 @@ struct dc; struct dc_state; struct dml2_configuration_options; struct dml2_context; +enum dc_validate_mode; /** * dml2_create - Creates dml21_context. @@ -33,22 +34,23 @@ void dml21_copy(struct dml2_context *dst_dml_ctx, struct dml2_context *src_dml_ctx); bool dml21_create_copy(struct dml2_context **dst_dml_ctx, struct dml2_context *src_dml_ctx); -void dml21_reinit(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config); +void dml21_reinit(const struct dc *in_dc, struct dml2_context *dml_ctx, const struct dml2_configuration_options *config); /** * dml21_validate - Determines if a display configuration is supported or not. * @in_dc: dc. * @context: dc_state to be validated. - * @fast_validate: Fast validate will not populate context.res_ctx. + * @validate_mode: DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX + * will not populate context.res_ctx. * * Based on fast_validate option internally would call: * - * -dml21_mode_check_and_programming - for non fast_validate option + * -dml21_mode_check_and_programming - for DC_VALIDATE_MODE_AND_PROGRAMMING option * Calculates if dc_state can be supported on the input display * configuration. If supported, generates the necessary HW * programming for the new dc_state. * - * -dml21_check_mode_support - for fast_validate option + * -dml21_check_mode_support - for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX option * Calculates if dc_state can be supported for the input display * config. @@ -56,7 +58,8 @@ void dml21_reinit(const struct dc *in_dc, struct dml2_context **dml_ctx, const s * separate dc_states for validation. * Return: True if mode is supported, false otherwise. */ -bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, bool fast_validate); +bool dml21_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx, + enum dc_validate_mode validate_mode); /* Prepare hubp mcache_regs for hubp mcache ID and split coordinate programming */ void dml21_prepare_mcache_programming(struct dc *in_dc, struct dc_state *context, struct dml2_context *dml_ctx); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h index c047d56527c4..a64ec4dcf11a 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top.h @@ -43,5 +43,4 @@ bool dml2_build_mode_programming(struct dml2_build_mode_programming_in_out *in_o */ bool dml2_build_mcache_programming(struct dml2_build_mcache_programming_in_out *in_out); - #endif diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h index 84c90050668c..b05030926ce8 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_dchub_registers.h @@ -46,6 +46,7 @@ struct dml2_display_dlg_regs { uint32_t dst_y_delta_drq_limit; uint32_t refcyc_per_vm_dmdata; uint32_t dmdata_dl_delta; + uint32_t dst_y_svp_drq_limit; // MRQ uint32_t refcyc_per_meta_chunk_vblank_l; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h index 255f05de362c..e8dc6471c0be 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_display_cfg_types.h @@ -222,6 +222,7 @@ struct dml2_composition_cfg { struct { bool enabled; + bool upsp_enabled; struct { double h_ratio; double v_ratio; @@ -426,6 +427,7 @@ struct dml2_stream_parameters { struct dml2_display_cfg { bool gpuvm_enable; + bool ffbm_enable; bool hostvm_enable; // Allocate DET proportionally between streams based on pixel rate diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h index 5f0bc42d1d2f..8c9f414aa6bf 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_soc_parameter_types.h @@ -93,12 +93,15 @@ struct dml2_soc_power_management_parameters { double dram_clk_change_write_only_us; double fclk_change_blackout_us; double g7_ppt_blackout_us; + double g7_temperature_read_blackout_us; double stutter_enter_plus_exit_latency_us; double stutter_exit_latency_us; double z8_stutter_enter_plus_exit_latency_us; double z8_stutter_exit_latency_us; double z8_min_idle_time; double g6_temp_read_blackout_us[DML_MAX_CLK_TABLE_SIZE]; + double type_b_dram_clk_change_blackout_us; + double type_b_ppt_blackout_us; }; struct dml2_clk_table { @@ -130,6 +133,7 @@ struct dml2_soc_state_table { struct dml2_soc_vmin_clock_limits { unsigned long dispclk_khz; + unsigned long dcfclk_khz; }; struct dml2_soc_bb { @@ -138,6 +142,7 @@ struct dml2_soc_bb { struct dml2_soc_power_management_parameters power_management_parameters; struct dml2_soc_vmin_clock_limits vmin_limit; + double lower_bound_bandwidth_dchub; unsigned int dprefclk_mhz; unsigned int xtalclk_mhz; unsigned int pcie_refclk_mhz; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h index 0dbf886d8926..98c0234e2f47 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h @@ -53,7 +53,9 @@ enum dml2_output_type_and_rate__rate { dml2_output_rate_hdmi_rate_6x4 = 9, dml2_output_rate_hdmi_rate_8x4 = 10, dml2_output_rate_hdmi_rate_10x4 = 11, - dml2_output_rate_hdmi_rate_12x4 = 12 + dml2_output_rate_hdmi_rate_12x4 = 12, + dml2_output_rate_hdmi_rate_16x4 = 13, + dml2_output_rate_hdmi_rate_20x4 = 14 }; struct dml2_pmo_options { @@ -279,7 +281,10 @@ struct dml2_per_stream_programming { } phantom_stream; union dmub_cmd_fams2_config fams2_base_params; - union dmub_cmd_fams2_config fams2_sub_params; + union { + union dmub_cmd_fams2_config fams2_sub_params; + union dmub_fams2_stream_static_sub_state_v2 fams2_sub_params_v2; + }; }; //----------------- @@ -674,9 +679,14 @@ struct dml2_display_cfg_programming { // unlimited # of mcache struct dml2_mcache_surface_allocation non_optimized_mcache_allocation[DML2_MAX_PLANES]; + bool failed_prefetch; + bool failed_uclk_pstate; bool failed_mcache_validation; bool failed_dpmm; bool failed_mode_programming; + bool failed_mode_programming_dcfclk; + bool failed_mode_programming_prefetch; + bool failed_mode_programming_flip; bool failed_map_watermarks; } informative; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c index c4dad7164d31..b9cff2198511 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c @@ -4685,7 +4685,10 @@ static void calculate_tdlut_setting( //the tdlut is fetched during the 2 row times of prefetch. if (p->setup_for_tdlut) { *p->tdlut_groups_per_2row_ub = (unsigned int)math_ceil2((double) *p->tdlut_bytes_per_frame / *p->tdlut_bytes_per_group, 1); - *p->tdlut_opt_time = (*p->tdlut_bytes_per_frame - p->cursor_buffer_size * 1024) / tdlut_drain_rate; + if (*p->tdlut_bytes_per_frame > p->cursor_buffer_size * 1024) + *p->tdlut_opt_time = (*p->tdlut_bytes_per_frame - p->cursor_buffer_size * 1024) / tdlut_drain_rate; + else + *p->tdlut_opt_time = 0; *p->tdlut_drain_time = p->cursor_buffer_size * 1024 / tdlut_drain_rate; *p->tdlut_bytes_to_deliver = (unsigned int) (p->cursor_buffer_size * 1024.0); } @@ -4858,7 +4861,7 @@ static double get_urgent_bandwidth_required( double ReadBandwidthChroma[], double PrefetchBandwidthLuma[], double PrefetchBandwidthChroma[], - double PrefetchBandwidthOto[], + double PrefetchBandwidthMax[], double excess_vactive_fill_bw_l[], double excess_vactive_fill_bw_c[], double cursor_bw[], @@ -4922,9 +4925,9 @@ static double get_urgent_bandwidth_required( l->vm_row_bw = NumberOfDPP[k] * prefetch_vmrow_bw[k]; l->flip_and_active_bw = l->per_plane_flip_bw[k] + ReadBandwidthLuma[k] * l->adj_factor_p0 + ReadBandwidthChroma[k] * l->adj_factor_p1 + cursor_bw[k] * l->adj_factor_cur; l->flip_and_prefetch_bw = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthLuma[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre; - l->flip_and_prefetch_bw_oto = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthOto[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre; + l->flip_and_prefetch_bw_max = l->per_plane_flip_bw[k] + NumberOfDPP[k] * (PrefetchBandwidthMax[k] * l->adj_factor_p0_pre + PrefetchBandwidthChroma[k] * l->adj_factor_p1_pre) + prefetch_cursor_bw[k] * l->adj_factor_cur_pre; l->active_and_excess_bw = (ReadBandwidthLuma[k] + excess_vactive_fill_bw_l[k]) * l->tmp_nom_adj_factor_p0 + (ReadBandwidthChroma[k] + excess_vactive_fill_bw_c[k]) * l->tmp_nom_adj_factor_p1 + dpte_row_bw[k] + meta_row_bw[k]; - surface_required_bw[k] = math_max5(l->vm_row_bw, l->flip_and_active_bw, l->flip_and_prefetch_bw, l->active_and_excess_bw, l->flip_and_prefetch_bw_oto); + surface_required_bw[k] = math_max5(l->vm_row_bw, l->flip_and_active_bw, l->flip_and_prefetch_bw, l->active_and_excess_bw, l->flip_and_prefetch_bw_max); /* export peak required bandwidth for the surface */ surface_peak_required_bw[k] = math_max2(surface_required_bw[k], surface_peak_required_bw[k]); @@ -5122,7 +5125,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch s->Tsw_est3 = 0.0; s->cursor_prefetch_bytes = 0; *p->prefetch_cursor_bw = 0; - *p->RequiredPrefetchBWOTO = 0.0; + *p->RequiredPrefetchBWMax = 0.0; dcc_mrq_enable = (p->dcc_enable && p->mrq_present); @@ -5353,7 +5356,7 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch * mp will fail if ms decides to use equ schedule and mp decides to use oto schedule * and the required bandwidth increases when going from ms to mp */ - *p->RequiredPrefetchBWOTO = s->prefetch_bw_oto; + *p->RequiredPrefetchBWMax = s->prefetch_bw_oto; #ifdef __DML_VBA_DEBUG__ DML_LOG_VERBOSE("DML::%s: vactive_sw_bw_l = %f\n", __func__, p->vactive_sw_bw_l); @@ -5715,8 +5718,14 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch s->TimeForFetchingVM = s->Tvm_equ; s->TimeForFetchingRowInVBlank = s->Tr0_equ; - *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0; - *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0; + *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0; + *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0; + + /* equ bw should be propagated so a ceiling of the equ bw is accounted for prior to mode programming. + * Overall bandwidth may be lower when going from mode support to mode programming but final pixel data + * bandwidth may end up higher than what was calculated in mode support. + */ + *p->RequiredPrefetchBWMax = math_max2(s->prefetch_bw_equ, *p->RequiredPrefetchBWMax); #ifdef __DML_VBA_DEBUG__ DML_LOG_VERBOSE("DML::%s: Using equ bw scheduling for prefetch\n", __func__); @@ -6112,7 +6121,7 @@ static void calculate_peak_bandwidth_required( p->surface_read_bandwidth_c, l->zero_array, //PrefetchBandwidthLuma, l->zero_array, //PrefetchBandwidthChroma, - l->zero_array, //PrefetchBWOTO + l->zero_array, //PrefetchBWMax l->zero_array, l->zero_array, l->zero_array, @@ -6149,7 +6158,7 @@ static void calculate_peak_bandwidth_required( p->surface_read_bandwidth_c, l->zero_array, //PrefetchBandwidthLuma, l->zero_array, //PrefetchBandwidthChroma, - l->zero_array, //PrefetchBWOTO + l->zero_array, //PrefetchBWMax p->excess_vactive_fill_bw_l, p->excess_vactive_fill_bw_c, p->cursor_bw, @@ -6186,7 +6195,7 @@ static void calculate_peak_bandwidth_required( p->surface_read_bandwidth_c, p->prefetch_bandwidth_l, p->prefetch_bandwidth_c, - p->prefetch_bandwidth_oto, // to prevent ms/mp mismatch when oto bw > total vactive bw + p->prefetch_bandwidth_max, // to prevent ms/mp mismatches where mp prefetch bw > ms prefetch bw p->excess_vactive_fill_bw_l, p->excess_vactive_fill_bw_c, p->cursor_bw, @@ -6223,7 +6232,7 @@ static void calculate_peak_bandwidth_required( p->surface_read_bandwidth_c, p->prefetch_bandwidth_l, p->prefetch_bandwidth_c, - p->prefetch_bandwidth_oto, // to prevent ms/mp mismatch when oto bw > total vactive bw + p->prefetch_bandwidth_max, // to prevent ms/mp mismatch where mp prefetch bw > ms prefetch bw p->excess_vactive_fill_bw_l, p->excess_vactive_fill_bw_c, p->cursor_bw, @@ -6260,7 +6269,7 @@ static void calculate_peak_bandwidth_required( p->surface_read_bandwidth_c, p->prefetch_bandwidth_l, p->prefetch_bandwidth_c, - p->prefetch_bandwidth_oto, // to prevent ms/mp mismatch when oto bw > total vactive bw + p->prefetch_bandwidth_max, // to prevent ms/mp mismatches where mp prefetch bw > ms prefetch bw p->excess_vactive_fill_bw_l, p->excess_vactive_fill_bw_c, p->cursor_bw, @@ -7487,7 +7496,7 @@ static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_inter CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->ms.VRatioPreC[k]; CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->ms.RequiredPrefetchPixelDataBWLuma[k]; // prefetch_sw_bw_l CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->ms.RequiredPrefetchPixelDataBWChroma[k]; // prefetch_sw_bw_c - CalculatePrefetchSchedule_params->RequiredPrefetchBWOTO = &mode_lib->ms.RequiredPrefetchBWOTO[k]; + CalculatePrefetchSchedule_params->RequiredPrefetchBWMax = &mode_lib->ms.RequiredPrefetchBWMax[k]; CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->ms.NoTimeForDynamicMetadata[k]; CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->ms.Tno_bw[k]; CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->ms.Tno_bw_flip[k]; @@ -7632,7 +7641,7 @@ static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_inter calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c; calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma; calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma; - calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO; + calculate_peak_bandwidth_params->prefetch_bandwidth_max = mode_lib->ms.RequiredPrefetchBWMax; calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l; calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c; calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw; @@ -7799,7 +7808,7 @@ static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_inter calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->ms.vactive_sw_bw_c; calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->ms.RequiredPrefetchPixelDataBWLuma; calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->ms.RequiredPrefetchPixelDataBWChroma; - calculate_peak_bandwidth_params->prefetch_bandwidth_oto = mode_lib->ms.RequiredPrefetchBWOTO; + calculate_peak_bandwidth_params->prefetch_bandwidth_max = mode_lib->ms.RequiredPrefetchBWMax; calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->ms.excess_vactive_fill_bw_l; calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->ms.excess_vactive_fill_bw_c; calculate_peak_bandwidth_params->cursor_bw = mode_lib->ms.cursor_bw; @@ -7905,6 +7914,7 @@ static noinline_for_stack void dml_core_ms_prefetch_check(struct dml2_core_inter } + static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out_params) { struct dml2_core_internal_display_mode_lib *mode_lib = in_out_params->mode_lib; @@ -11253,7 +11263,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex CalculatePrefetchSchedule_params->VRatioPrefetchC = &mode_lib->mp.VRatioPrefetchC[k]; CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWLuma = &mode_lib->mp.RequiredPrefetchPixelDataBWLuma[k]; CalculatePrefetchSchedule_params->RequiredPrefetchPixelDataBWChroma = &mode_lib->mp.RequiredPrefetchPixelDataBWChroma[k]; - CalculatePrefetchSchedule_params->RequiredPrefetchBWOTO = &s->dummy_single_array[0][k]; + CalculatePrefetchSchedule_params->RequiredPrefetchBWMax = &s->dummy_single_array[0][k]; CalculatePrefetchSchedule_params->NotEnoughTimeForDynamicMetadata = &mode_lib->mp.NotEnoughTimeForDynamicMetadata[k]; CalculatePrefetchSchedule_params->Tno_bw = &mode_lib->mp.Tno_bw[k]; CalculatePrefetchSchedule_params->Tno_bw_flip = &mode_lib->mp.Tno_bw_flip[k]; @@ -11396,7 +11406,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex calculate_peak_bandwidth_params->surface_read_bandwidth_c = mode_lib->mp.vactive_sw_bw_c; calculate_peak_bandwidth_params->prefetch_bandwidth_l = mode_lib->mp.RequiredPrefetchPixelDataBWLuma; calculate_peak_bandwidth_params->prefetch_bandwidth_c = mode_lib->mp.RequiredPrefetchPixelDataBWChroma; - calculate_peak_bandwidth_params->prefetch_bandwidth_oto = s->dummy_single_array[0]; + calculate_peak_bandwidth_params->prefetch_bandwidth_max = s->dummy_single_array[0]; calculate_peak_bandwidth_params->excess_vactive_fill_bw_l = mode_lib->mp.excess_vactive_fill_bw_l; calculate_peak_bandwidth_params->excess_vactive_fill_bw_c = mode_lib->mp.excess_vactive_fill_bw_c; calculate_peak_bandwidth_params->cursor_bw = mode_lib->mp.cursor_bw; @@ -11536,7 +11546,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex calculate_peak_bandwidth_params->meta_row_bw = mode_lib->mp.meta_row_bw; calculate_peak_bandwidth_params->prefetch_cursor_bw = mode_lib->mp.prefetch_cursor_bw; calculate_peak_bandwidth_params->prefetch_vmrow_bw = mode_lib->mp.prefetch_vmrow_bw; - calculate_peak_bandwidth_params->prefetch_bandwidth_oto = s->dummy_single_array[0]; + calculate_peak_bandwidth_params->prefetch_bandwidth_max = s->dummy_single_array[0]; calculate_peak_bandwidth_params->flip_bw = mode_lib->mp.final_flip_bw; calculate_peak_bandwidth_params->urgent_burst_factor_l = mode_lib->mp.UrgentBurstFactorLuma; calculate_peak_bandwidth_params->urgent_burst_factor_c = mode_lib->mp.UrgentBurstFactorChroma; @@ -11880,7 +11890,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex } //Maximum Bandwidth Used - s->TotalWRBandwidth = 0; + mode_lib->mp.TotalWRBandwidth = 0; for (k = 0; k < display_cfg->num_streams; ++k) { s->WRBandwidth = 0; if (display_cfg->stream_descriptors[k].writeback.active_writebacks_per_stream > 0) { @@ -11889,7 +11899,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex (display_cfg->stream_descriptors[k].timing.h_total * display_cfg->stream_descriptors[k].writeback.writeback_stream[0].input_height / ((double)display_cfg->stream_descriptors[k].timing.pixel_clock_khz / 1000)) * (display_cfg->stream_descriptors[k].writeback.writeback_stream[0].pixel_format == dml2_444_32 ? 4.0 : 8.0); - s->TotalWRBandwidth = s->TotalWRBandwidth + s->WRBandwidth; + mode_lib->mp.TotalWRBandwidth = mode_lib->mp.TotalWRBandwidth + s->WRBandwidth; } } @@ -13059,6 +13069,10 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_10x4; else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_12x4) out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_12x4; + else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_16x4) + out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_16x4; + else if (mode_lib->ms.support.OutputRate[k] == dml2_core_internal_output_rate_hdmi_rate_20x4) + out->informative.mode_support_info.OutputRate[k] = dml2_output_rate_hdmi_rate_20x4; out->informative.mode_support_info.AlignedYPitch[k] = mode_lib->ms.support.AlignedYPitch[k]; out->informative.mode_support_info.AlignedCPitch[k] = mode_lib->ms.support.AlignedCPitch[k]; @@ -13243,7 +13257,7 @@ void dml2_core_calcs_get_informative(const struct dml2_core_internal_display_mod out->informative.misc.DisplayPipeLineDeliveryTimeLumaPrefetch[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeLumaPrefetch[k]; out->informative.misc.DisplayPipeLineDeliveryTimeChromaPrefetch[k] = mode_lib->mp.DisplayPipeLineDeliveryTimeChromaPrefetch[k]; - out->informative.misc.WritebackRequiredBandwidth = mode_lib->scratch.dml_core_mode_programming_locals.TotalWRBandwidth / 1000.0; + out->informative.misc.WritebackRequiredBandwidth = mode_lib->mp.TotalWRBandwidth / 1000.0; out->informative.misc.WritebackAllowDRAMClockChangeEndPosition[k] = mode_lib->mp.WritebackAllowDRAMClockChangeEndPosition[k]; out->informative.misc.WritebackAllowFCLKChangeEndPosition[k] = mode_lib->mp.WritebackAllowFCLKChangeEndPosition[k]; out->informative.misc.DSCCLK_calculated[k] = mode_lib->mp.DSCCLK[k]; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h index bdee6ad7bc59..28687565ac22 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h @@ -102,6 +102,7 @@ struct dml2_core_internal_DmlPipe { double DCFClkDeepSleep; unsigned int DPPPerSurface; bool ScalerEnabled; + bool UPSPEnabled; enum dml2_rotation_angle RotationAngle; bool mirrored; unsigned int ViewportHeight; @@ -186,7 +187,9 @@ enum dml2_core_internal_output_type_rate { dml2_core_internal_output_rate_hdmi_rate_6x4 = 9, dml2_core_internal_output_rate_hdmi_rate_8x4 = 10, dml2_core_internal_output_rate_hdmi_rate_10x4 = 11, - dml2_core_internal_output_rate_hdmi_rate_12x4 = 12 + dml2_core_internal_output_rate_hdmi_rate_12x4 = 12, + dml2_core_internal_output_rate_hdmi_rate_16x4 = 13, + dml2_core_internal_output_rate_hdmi_rate_20x4 = 14 }; struct dml2_core_internal_watermarks { @@ -260,12 +263,14 @@ struct dml2_core_internal_mode_support_info { bool AvgBandwidthSupport; bool UrgVactiveBandwidthSupport; bool EnoughUrgentLatencyHidingSupport; + bool PrefetchScheduleSupported; bool PrefetchSupported; bool PrefetchBandwidthSupported; bool DynamicMetadataSupported; bool VRatioInPrefetchSupported; bool DISPCLK_DPPCLK_Support; bool TotalAvailablePipesSupport; + bool ODMSupport; bool ModeSupport; bool ViewportSizeSupport; @@ -314,9 +319,7 @@ struct dml2_core_internal_mode_support_info { double non_urg_bandwidth_required[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max]; // same as urg_bandwidth, except not scaled by urg burst factor double non_urg_bandwidth_required_flip[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max]; - bool avg_bandwidth_support_ok[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max]; - double max_urgent_latency_us; double max_non_urgent_latency_us; double avg_non_urgent_latency_us; @@ -329,6 +332,8 @@ struct dml2_core_internal_mode_support_info { bool temp_read_or_ppt_support; struct dml2_core_internal_watermarks watermarks; + bool dcfclk_support; + bool qos_bandwidth_support; }; struct dml2_core_internal_mode_support { @@ -350,9 +355,11 @@ struct dml2_core_internal_mode_support { double SOCCLK; /// <brief Basically just the clock freq at the min (or given) state double DCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting double GlobalDPPCLK; /// <brief the Max DPPCLK freq out of all pipes + double GlobalDTBCLK; /// <brief the Max DTBCLK freq out of all pipes double uclk_freq_mhz; double dram_bw_mbps; double max_dram_bw_mbps; + double min_available_urgent_bandwidth_MBps; /// <brief Minimum guaranteed available urgent return bandwidth in MBps double MaxFabricClock; /// <brief Basically just the clock freq at the min (or given) state double MaxDCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting @@ -394,9 +401,13 @@ struct dml2_core_internal_mode_support { double TWait[DML2_MAX_PLANES]; bool UnboundedRequestEnabled; + unsigned int compbuf_reserved_space_64b; + bool hw_debug5; unsigned int CompressedBufferSizeInkByte; double VRatioPreY[DML2_MAX_PLANES]; double VRatioPreC[DML2_MAX_PLANES]; + unsigned int req_per_swath_ub_l[DML2_MAX_PLANES]; + unsigned int req_per_swath_ub_c[DML2_MAX_PLANES]; unsigned int swath_width_luma_ub[DML2_MAX_PLANES]; unsigned int swath_width_chroma_ub[DML2_MAX_PLANES]; unsigned int RequiredSlots[DML2_MAX_PLANES]; @@ -417,8 +428,8 @@ struct dml2_core_internal_mode_support { double dst_y_prefetch[DML2_MAX_PLANES]; double LinesForVM[DML2_MAX_PLANES]; double LinesForDPTERow[DML2_MAX_PLANES]; - double SwathWidthYSingleDPP[DML2_MAX_PLANES]; - double SwathWidthCSingleDPP[DML2_MAX_PLANES]; + unsigned int SwathWidthYSingleDPP[DML2_MAX_PLANES]; + unsigned int SwathWidthCSingleDPP[DML2_MAX_PLANES]; unsigned int BytePerPixelY[DML2_MAX_PLANES]; unsigned int BytePerPixelC[DML2_MAX_PLANES]; double BytePerPixelInDETY[DML2_MAX_PLANES]; @@ -469,13 +480,58 @@ struct dml2_core_internal_mode_support { double mall_prefetch_sdp_overhead_factor[DML2_MAX_PLANES]; // overhead to the imall or phantom pipe double mall_prefetch_dram_overhead_factor[DML2_MAX_PLANES]; + bool is_using_mall_for_ss[DML2_MAX_PLANES]; + unsigned int meta_row_width_chroma[DML2_MAX_PLANES]; + unsigned int PixelPTEReqHeightC[DML2_MAX_PLANES]; + bool PTE_BUFFER_MODE[DML2_MAX_PLANES]; + unsigned int meta_req_height_chroma[DML2_MAX_PLANES]; + unsigned int meta_pte_bytes_per_frame_ub_c[DML2_MAX_PLANES]; + unsigned int dpde0_bytes_per_frame_ub_c[DML2_MAX_PLANES]; + unsigned int dpte_row_width_luma_ub[DML2_MAX_PLANES]; + unsigned int meta_req_width[DML2_MAX_PLANES]; + unsigned int meta_row_width[DML2_MAX_PLANES]; + unsigned int PixelPTEReqWidthY[DML2_MAX_PLANES]; + unsigned int dpte_row_height_linear[DML2_MAX_PLANES]; + unsigned int PTERequestSizeY[DML2_MAX_PLANES]; + unsigned int dpte_row_width_chroma_ub[DML2_MAX_PLANES]; + unsigned int PixelPTEReqWidthC[DML2_MAX_PLANES]; + unsigned int meta_pte_bytes_per_frame_ub_l[DML2_MAX_PLANES]; + unsigned int dpte_row_height_linear_chroma[DML2_MAX_PLANES]; + unsigned int PTERequestSizeC[DML2_MAX_PLANES]; + unsigned int meta_req_height[DML2_MAX_PLANES]; + unsigned int dpde0_bytes_per_frame_ub_l[DML2_MAX_PLANES]; + unsigned int meta_req_width_chroma[DML2_MAX_PLANES]; + unsigned int PixelPTEReqHeightY[DML2_MAX_PLANES]; + unsigned int BIGK_FRAGMENT_SIZE[DML2_MAX_PLANES]; + unsigned int vm_group_bytes[DML2_MAX_PLANES]; + unsigned int VReadyOffsetPix[DML2_MAX_PLANES]; + unsigned int VUpdateOffsetPix[DML2_MAX_PLANES]; + unsigned int VUpdateWidthPix[DML2_MAX_PLANES]; + double TSetup[DML2_MAX_PLANES]; + double Tdmdl_vm_raw[DML2_MAX_PLANES]; + double Tdmdl_raw[DML2_MAX_PLANES]; + unsigned int VStartupMin[DML2_MAX_PLANES]; /// <brief Minimum vstartup to meet the prefetch schedule (i.e. the prefetch solution can be found at this vstartup time); not the actual global sync vstartup pos. + double MaxActiveDRAMClockChangeLatencySupported[DML2_MAX_PLANES]; + double MaxActiveFCLKChangeLatencySupported; + // Backend bool RequiresDSC[DML2_MAX_PLANES]; bool RequiresFEC[DML2_MAX_PLANES]; double OutputBpp[DML2_MAX_PLANES]; + double DesiredOutputBpp[DML2_MAX_PLANES]; + double PixelClockBackEnd[DML2_MAX_PLANES]; unsigned int DSCDelay[DML2_MAX_PLANES]; enum dml2_core_internal_output_type OutputType[DML2_MAX_PLANES]; enum dml2_core_internal_output_type_rate OutputRate[DML2_MAX_PLANES]; + bool TotalAvailablePipesSupportNoDSC; + bool TotalAvailablePipesSupportDSC; + unsigned int NumberOfDPPNoDSC; + unsigned int NumberOfDPPDSC; + enum dml2_odm_mode ODMModeNoDSC; + enum dml2_odm_mode ODMModeDSC; + double RequiredDISPCLKPerSurfaceNoDSC; + double RequiredDISPCLKPerSurfaceDSC; + unsigned int EstimatedNumberOfDSCSlices[DML2_MAX_PLANES]; // Bandwidth Related Info double BandwidthAvailableForImmediateFlip; @@ -484,8 +540,14 @@ struct dml2_core_internal_mode_support { double WriteBandwidth[DML2_MAX_PLANES][DML2_MAX_WRITEBACK]; double RequiredPrefetchPixelDataBWLuma[DML2_MAX_PLANES]; double RequiredPrefetchPixelDataBWChroma[DML2_MAX_PLANES]; - /* oto bw should also be considered when calculating peak urgent bw to avoid situations oto/equ mismatches between ms and mp */ - double RequiredPrefetchBWOTO[DML2_MAX_PLANES]; + /* Max bandwidth calculated from prefetch schedule should be considered in addition to the pixel data bw to avoid ms/mp mismatches. + * 1. oto bw should also be considered when calculating peak urgent bw to avoid situations oto/equ mismatches between ms and mp + * + * 2. equ bandwidth needs to be considered for calculating peak urgent bw when equ schedule is used in mode support. + * Some slight difference in variables may cause the pixel data bandwidth to be higher + * even though overall equ prefetch bandwidths can be lower going from ms to mp + */ + double RequiredPrefetchBWMax[DML2_MAX_PLANES]; double cursor_bw[DML2_MAX_PLANES]; double prefetch_cursor_bw[DML2_MAX_PLANES]; double prefetch_vmrow_bw[DML2_MAX_PLANES]; @@ -538,7 +600,44 @@ struct dml2_core_internal_mode_support { bool mall_comb_mcache_c[DML2_MAX_PLANES]; bool lc_comb_mcache[DML2_MAX_PLANES]; + unsigned int vmpg_width_y[DML2_MAX_PLANES]; + unsigned int vmpg_height_y[DML2_MAX_PLANES]; + unsigned int vmpg_width_c[DML2_MAX_PLANES]; + unsigned int vmpg_height_c[DML2_MAX_PLANES]; + + unsigned int meta_row_height_luma[DML2_MAX_PLANES]; + unsigned int meta_row_height_chroma[DML2_MAX_PLANES]; + unsigned int meta_row_bytes_per_row_ub_l[DML2_MAX_PLANES]; + unsigned int meta_row_bytes_per_row_ub_c[DML2_MAX_PLANES]; + unsigned int dpte_row_bytes_per_row_l[DML2_MAX_PLANES]; + unsigned int dpte_row_bytes_per_row_c[DML2_MAX_PLANES]; + + unsigned int pstate_bytes_required_l[DML2_MAX_PLANES]; + unsigned int pstate_bytes_required_c[DML2_MAX_PLANES]; + unsigned int cursor_bytes_per_chunk[DML2_MAX_PLANES]; + unsigned int cursor_bytes_per_line[DML2_MAX_PLANES]; + + unsigned int MaximumVStartup[DML2_MAX_PLANES]; + + double HostVMInefficiencyFactor; + double HostVMInefficiencyFactorPrefetch; + + unsigned int tdlut_pte_bytes_per_frame[DML2_MAX_PLANES]; + unsigned int tdlut_bytes_per_frame[DML2_MAX_PLANES]; + unsigned int tdlut_groups_per_2row_ub[DML2_MAX_PLANES]; + double tdlut_opt_time[DML2_MAX_PLANES]; + double tdlut_drain_time[DML2_MAX_PLANES]; + unsigned int tdlut_bytes_per_group[DML2_MAX_PLANES]; + + double Tvm_trips_flip[DML2_MAX_PLANES]; + double Tr0_trips_flip[DML2_MAX_PLANES]; + double Tvm_trips_flip_rounded[DML2_MAX_PLANES]; + double Tr0_trips_flip_rounded[DML2_MAX_PLANES]; + unsigned int DSTYAfterScaler[DML2_MAX_PLANES]; + unsigned int DSTXAfterScaler[DML2_MAX_PLANES]; + + enum dml2_pstate_method pstate_switch_modes[DML2_MAX_PLANES]; }; /// @brief A mega structure that houses various info for model programming step. @@ -548,6 +647,7 @@ struct dml2_core_internal_mode_program { double FabricClock; /// <brief Basically just the clock freq at the min (or given) state //double DCFCLK; /// <brief Basically just the clock freq at the min (or given) state and max combine setting double dram_bw_mbps; + double min_available_urgent_bandwidth_MBps; /// <brief Minimum guaranteed available urgent return bandwidth in MBps double uclk_freq_mhz; unsigned int NoOfDPP[DML2_MAX_PLANES]; enum dml2_odm_mode ODMMode[DML2_MAX_PLANES]; @@ -599,6 +699,8 @@ struct dml2_core_internal_mode_program { unsigned int MacroTileHeightC[DML2_MAX_PLANES]; unsigned int MacroTileWidthY[DML2_MAX_PLANES]; unsigned int MacroTileWidthC[DML2_MAX_PLANES]; + double MaximumSwathWidthLuma[DML2_MAX_PLANES]; + double MaximumSwathWidthChroma[DML2_MAX_PLANES]; bool surf_linear128_l[DML2_MAX_PLANES]; bool surf_linear128_c[DML2_MAX_PLANES]; @@ -631,6 +733,14 @@ struct dml2_core_internal_mode_program { double UrgentBurstFactorChroma[DML2_MAX_PLANES]; double UrgentBurstFactorChromaPre[DML2_MAX_PLANES]; + double MaximumSwathWidthInLineBufferLuma; + double MaximumSwathWidthInLineBufferChroma; + + unsigned int vmpg_width_y[DML2_MAX_PLANES]; + unsigned int vmpg_height_y[DML2_MAX_PLANES]; + unsigned int vmpg_width_c[DML2_MAX_PLANES]; + unsigned int vmpg_height_c[DML2_MAX_PLANES]; + double meta_row_bw[DML2_MAX_PLANES]; unsigned int meta_row_bytes[DML2_MAX_PLANES]; unsigned int meta_req_width[DML2_MAX_PLANES]; @@ -652,7 +762,9 @@ struct dml2_core_internal_mode_program { unsigned int PTERequestSizeC[DML2_MAX_PLANES]; double TWait[DML2_MAX_PLANES]; + double Tdmdl_vm_raw[DML2_MAX_PLANES]; double Tdmdl_vm[DML2_MAX_PLANES]; + double Tdmdl_raw[DML2_MAX_PLANES]; double Tdmdl[DML2_MAX_PLANES]; double TSetup[DML2_MAX_PLANES]; unsigned int dpde0_bytes_per_frame_ub_l[DML2_MAX_PLANES]; @@ -684,6 +796,38 @@ struct dml2_core_internal_mode_program { double TCalc; unsigned int TotImmediateFlipBytes; + unsigned int MaxTotalDETInKByte; + unsigned int NomDETInKByte; + unsigned int MinCompressedBufferSizeInKByte; + double PixelClockBackEnd[DML2_MAX_PLANES]; + double OutputBpp[DML2_MAX_PLANES]; + bool dsc_enable[DML2_MAX_PLANES]; + unsigned int num_dsc_slices[DML2_MAX_PLANES]; + unsigned int meta_row_bytes_per_row_ub_l[DML2_MAX_PLANES]; + unsigned int meta_row_bytes_per_row_ub_c[DML2_MAX_PLANES]; + unsigned int dpte_row_bytes_per_row_l[DML2_MAX_PLANES]; + unsigned int dpte_row_bytes_per_row_c[DML2_MAX_PLANES]; + unsigned int cursor_bytes_per_chunk[DML2_MAX_PLANES]; + unsigned int cursor_bytes_per_line[DML2_MAX_PLANES]; + unsigned int MaxVStartupLines[DML2_MAX_PLANES]; /// <brief more like vblank for the plane's OTG + double HostVMInefficiencyFactor; + double HostVMInefficiencyFactorPrefetch; + unsigned int tdlut_pte_bytes_per_frame[DML2_MAX_PLANES]; + unsigned int tdlut_bytes_per_frame[DML2_MAX_PLANES]; + unsigned int tdlut_groups_per_2row_ub[DML2_MAX_PLANES]; + double tdlut_opt_time[DML2_MAX_PLANES]; + double tdlut_drain_time[DML2_MAX_PLANES]; + unsigned int tdlut_bytes_per_group[DML2_MAX_PLANES]; + double Tvm_trips_flip[DML2_MAX_PLANES]; + double Tr0_trips_flip[DML2_MAX_PLANES]; + double Tvm_trips_flip_rounded[DML2_MAX_PLANES]; + double Tr0_trips_flip_rounded[DML2_MAX_PLANES]; + bool immediate_flip_required; // any pipes need immediate flip + double SOCCLK; /// <brief Basically just the clock freq at the min (or given) state + double TotalWRBandwidth; + double max_urgent_latency_us; + double df_response_time_us; + // ------------------- // Output // ------------------- @@ -694,9 +838,12 @@ struct dml2_core_internal_mode_program { // Support bool UrgVactiveBandwidthSupport; + bool PrefetchScheduleSupported; + bool UrgentBandwidthSupport; bool PrefetchModeSupported; // <brief Is the prefetch mode (bandwidth and latency) supported bool ImmediateFlipSupported; bool ImmediateFlipSupportedForPipe[DML2_MAX_PLANES]; + bool dcfclk_support; // Clock double Dcfclk; @@ -788,7 +935,7 @@ struct dml2_core_internal_mode_program { // RQ registers bool PTE_BUFFER_MODE[DML2_MAX_PLANES]; unsigned int BIGK_FRAGMENT_SIZE[DML2_MAX_PLANES]; - + double VActiveLatencyHidingUs[DML2_MAX_PLANES]; unsigned int SubViewportLinesNeededInMALL[DML2_MAX_PLANES]; bool is_using_mall_for_ss[DML2_MAX_PLANES]; @@ -1001,10 +1148,10 @@ struct dml2_core_calcs_mode_programming_locals { double dummy_bw[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max]; double surface_dummy_bw[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max][DML2_MAX_PLANES]; double surface_dummy_bw0[dml2_core_internal_soc_state_max][dml2_core_internal_bw_max][DML2_MAX_PLANES]; - unsigned int dummy_integer_array[2][DML2_MAX_PLANES]; + unsigned int dummy_integer_array[4][DML2_MAX_PLANES]; enum dml2_output_encoder_class dummy_output_encoder_array[DML2_MAX_PLANES]; double dummy_single_array[2][DML2_MAX_PLANES]; - unsigned int dummy_long_array[4][DML2_MAX_PLANES]; + unsigned int dummy_long_array[8][DML2_MAX_PLANES]; bool dummy_boolean_array[2][DML2_MAX_PLANES]; bool dummy_boolean[2]; double dummy_single[2]; @@ -1028,7 +1175,6 @@ struct dml2_core_calcs_mode_programming_locals { double dlg_vblank_start; double LSetup; double blank_lines_remaining; - double TotalWRBandwidth; double WRBandwidth; struct dml2_core_internal_DmlPipe myPipe; double PixelClockBackEndFactor; @@ -1153,6 +1299,7 @@ struct dml2_core_calcs_CalculateVMRowAndSwath_params { unsigned int HostVMMinPageSize; unsigned int DCCMetaBufferSizeBytes; bool mrq_present; + enum dml2_pstate_method pstate_switch_modes[DML2_MAX_PLANES]; // Output bool *PTEBufferSizeNotExceeded; @@ -1389,7 +1536,7 @@ struct dml2_core_shared_get_urgent_bandwidth_required_locals { double vm_row_bw; double flip_and_active_bw; double flip_and_prefetch_bw; - double flip_and_prefetch_bw_oto; + double flip_and_prefetch_bw_max; double active_and_excess_bw; }; @@ -1418,6 +1565,7 @@ struct dml2_core_shared_CalculateFlipSchedule_locals { struct dml2_core_shared_rq_dlg_get_dlg_reg_locals { unsigned int plane_idx; + unsigned int stream_idx; enum dml2_source_format_class source_format; const struct dml2_timing_cfg *timing; bool dual_plane; @@ -1625,6 +1773,9 @@ struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params { double *BytePerPixDETC; unsigned int *DPPPerSurface; bool mrq_present; + unsigned int dummy[2][DML2_MAX_PLANES]; + unsigned int swath_width_luma_ub_single_dpp[DML2_MAX_PLANES]; + unsigned int swath_width_chroma_ub_single_dpp[DML2_MAX_PLANES]; // output unsigned int *req_per_swath_ub_l; @@ -1642,6 +1793,8 @@ struct dml2_core_calcs_CalculateSwathAndDETConfiguration_params { unsigned int *DETBufferSizeC; unsigned int *full_swath_bytes_l; unsigned int *full_swath_bytes_c; + unsigned int *full_swath_bytes_single_dpp_l; + unsigned int *full_swath_bytes_single_dpp_c; bool *UnboundedRequestEnabled; unsigned int *compbuf_reserved_space_64b; unsigned int *CompressedBufferSizeInkByte; @@ -1801,7 +1954,7 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_params { double *VRatioPrefetchC; double *RequiredPrefetchPixelDataBWLuma; double *RequiredPrefetchPixelDataBWChroma; - double *RequiredPrefetchBWOTO; + double *RequiredPrefetchBWMax; bool *NotEnoughTimeForDynamicMetadata; double *Tno_bw; double *Tno_bw_flip; @@ -2038,7 +2191,7 @@ struct dml2_core_calcs_calculate_peak_bandwidth_required_params { double *surface_read_bandwidth_c; double *prefetch_bandwidth_l; double *prefetch_bandwidth_c; - double *prefetch_bandwidth_oto; + double *prefetch_bandwidth_max; double *excess_vactive_fill_bw_l; double *excess_vactive_fill_bw_c; double *cursor_bw; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c index 7a220c0141c2..5f301befed16 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_utils.c @@ -464,7 +464,7 @@ bool dml2_core_utils_get_segment_horizontal_contiguous(enum dml2_swizzle_mode sw bool dml2_core_utils_is_linear(enum dml2_swizzle_mode sw_mode) { - return (sw_mode == dml2_sw_linear || sw_mode == dml2_sw_linear_256b || sw_mode == dml2_linear_64elements); + return sw_mode == dml2_sw_linear; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c index f486b090bbfc..22969a533a7b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c @@ -389,9 +389,6 @@ static bool map_min_clocks_to_dpm(const struct dml2_core_mode_support_result *mo if (result) result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.dispclk_khz, &state_table->dispclk); - if (result) - result = round_up_to_next_dpm(&display_cfg->min_clocks.dcn4x.deepsleep_dcfclk_khz, &state_table->dcfclk); - for (i = 0; i < DML2_MAX_DCN_PIPES; i++) { if (result) result = round_up_to_next_dpm(&display_cfg->plane_programming[i].min_clocks.dcn4x.dppclk_khz, &state_table->dppclk); diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h index b226225103c3..611c80f4f1bf 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_debug.h @@ -10,15 +10,74 @@ #define DML_LOG_LEVEL_DEFAULT DML_LOG_LEVEL_WARN #define DML_LOG_INTERNAL(fmt, ...) dm_output_to_console(fmt, ## __VA_ARGS__) -/* ASSERT with message output */ -#define DML_ASSERT_MSG(condition, fmt, ...) \ - do { \ - if (!(condition)) { \ - DML_LOG_ERROR("DML ASSERT hit in %s line %d\n", __func__, __LINE__); \ - DML_LOG_ERROR(fmt, ## __VA_ARGS__); \ - DML_ASSERT(condition); \ - } \ - } while (0) +/* private helper macros */ +#define _BOOL_FORMAT(field) "%s", field ? "true" : "false" +#define _UINT_FORMAT(field) "%u", field +#define _INT_FORMAT(field) "%d", field +#define _DOUBLE_FORMAT(field) "%lf", field +#define _ELEMENT_FUNC "function" +#define _ELEMENT_COMP_IF "component_interface" +#define _ELEMENT_TOP_IF "top_interface" +#define _LOG_ENTRY(element) do { \ + DML_LOG_INTERNAL("<"element" name=\""); \ + DML_LOG_INTERNAL(__func__); \ + DML_LOG_INTERNAL("\">\n"); \ +} while (0) +#define _LOG_EXIT(element) DML_LOG_INTERNAL("</"element">\n") +#define _LOG_SCALAR(field, format) do { \ + DML_LOG_INTERNAL(#field" = "format(field)); \ + DML_LOG_INTERNAL("\n"); \ +} while (0) +#define _LOG_ARRAY(field, size, format) do { \ + DML_LOG_INTERNAL(#field " = ["); \ + for (int _i = 0; _i < (int) size; _i++) { \ + DML_LOG_INTERNAL(format(field[_i])); \ + if (_i + 1 == (int) size) \ + DML_LOG_INTERNAL("]\n"); \ + else \ + DML_LOG_INTERNAL(", "); \ +}} while (0) +#define _LOG_2D_ARRAY(field, size0, size1, format) do { \ + DML_LOG_INTERNAL(#field" = ["); \ + for (int _i = 0; _i < (int) size0; _i++) { \ + DML_LOG_INTERNAL("\n\t["); \ + for (int _j = 0; _j < (int) size1; _j++) { \ + DML_LOG_INTERNAL(format(field[_i][_j])); \ + if (_j + 1 == (int) size1) \ + DML_LOG_INTERNAL("]"); \ + else \ + DML_LOG_INTERNAL(", "); \ + } \ + if (_i + 1 == (int) size0) \ + DML_LOG_INTERNAL("]\n"); \ + else \ + DML_LOG_INTERNAL(", "); \ + } \ +} while (0) +#define _LOG_3D_ARRAY(field, size0, size1, size2, format) do { \ + DML_LOG_INTERNAL(#field" = ["); \ + for (int _i = 0; _i < (int) size0; _i++) { \ + DML_LOG_INTERNAL("\n\t["); \ + for (int _j = 0; _j < (int) size1; _j++) { \ + DML_LOG_INTERNAL("["); \ + for (int _k = 0; _k < (int) size2; _k++) { \ + DML_LOG_INTERNAL(format(field[_i][_j][_k])); \ + if (_k + 1 == (int) size2) \ + DML_LOG_INTERNAL("]"); \ + else \ + DML_LOG_INTERNAL(", "); \ + } \ + if (_j + 1 == (int) size1) \ + DML_LOG_INTERNAL("]"); \ + else \ + DML_LOG_INTERNAL(", "); \ + } \ + if (_i + 1 == (int) size0) \ + DML_LOG_INTERNAL("]\n"); \ + else \ + DML_LOG_INTERNAL(", "); \ + } \ +} while (0) /* fatal errors for unrecoverable DML states until a full reset */ #define DML_LOG_LEVEL_FATAL 0 @@ -28,7 +87,7 @@ #define DML_LOG_LEVEL_WARN 2 /* high level tracing of DML interfaces */ #define DML_LOG_LEVEL_INFO 3 -/* detailed tracing of DML internal components */ +/* tracing of DML internal executions */ #define DML_LOG_LEVEL_DEBUG 4 /* detailed tracing of DML calculation procedure */ #define DML_LOG_LEVEL_VERBOSE 5 @@ -37,30 +96,94 @@ #define DML_LOG_LEVEL DML_LOG_LEVEL_DEFAULT #endif /* #ifndef DML_LOG_LEVEL */ +/* public macros for DML_LOG_LEVEL_FATAL and up */ #define DML_LOG_FATAL(fmt, ...) DML_LOG_INTERNAL("[DML FATAL] " fmt, ## __VA_ARGS__) + +/* public macros for DML_LOG_LEVEL_ERROR and up */ #if DML_LOG_LEVEL >= DML_LOG_LEVEL_ERROR #define DML_LOG_ERROR(fmt, ...) DML_LOG_INTERNAL("[DML ERROR] "fmt, ## __VA_ARGS__) +#define DML_ASSERT_MSG(condition, fmt, ...) \ + do { \ + if (!(condition)) { \ + DML_LOG_ERROR("ASSERT hit in %s line %d\n", __func__, __LINE__); \ + DML_LOG_ERROR(fmt, ## __VA_ARGS__); \ + DML_ASSERT(condition); \ + } \ + } while (0) #else #define DML_LOG_ERROR(fmt, ...) ((void)0) +#define DML_ASSERT_MSG(condition, fmt, ...) ((void)0) #endif + +/* public macros for DML_LOG_LEVEL_WARN and up */ #if DML_LOG_LEVEL >= DML_LOG_LEVEL_WARN #define DML_LOG_WARN(fmt, ...) DML_LOG_INTERNAL("[DML WARN] "fmt, ## __VA_ARGS__) #else #define DML_LOG_WARN(fmt, ...) ((void)0) #endif + +/* public macros for DML_LOG_LEVEL_INFO and up */ #if DML_LOG_LEVEL >= DML_LOG_LEVEL_INFO #define DML_LOG_INFO(fmt, ...) DML_LOG_INTERNAL("[DML INFO] "fmt, ## __VA_ARGS__) +#define DML_LOG_TOP_IF_ENTER() _LOG_ENTRY(_ELEMENT_TOP_IF) +#define DML_LOG_TOP_IF_EXIT() _LOG_EXIT(_ELEMENT_TOP_IF) #else #define DML_LOG_INFO(fmt, ...) ((void)0) +#define DML_LOG_TOP_IF_ENTER() ((void)0) +#define DML_LOG_TOP_IF_EXIT() ((void)0) #endif + +/* public macros for DML_LOG_LEVEL_DEBUG and up */ #if DML_LOG_LEVEL >= DML_LOG_LEVEL_DEBUG -#define DML_LOG_DEBUG(fmt, ...) DML_LOG_INTERNAL("[DML DEBUG] "fmt, ## __VA_ARGS__) +#define DML_LOG_DEBUG(fmt, ...) DML_LOG_INTERNAL(fmt, ## __VA_ARGS__) +#define DML_LOG_COMP_IF_ENTER() _LOG_ENTRY(_ELEMENT_COMP_IF) +#define DML_LOG_COMP_IF_EXIT() _LOG_EXIT(_ELEMENT_COMP_IF) +#define DML_LOG_FUNC_ENTER() _LOG_ENTRY(_ELEMENT_FUNC) +#define DML_LOG_FUNC_EXIT() _LOG_EXIT(_ELEMENT_FUNC) +#define DML_LOG_DEBUG_BOOL(field) _LOG_SCALAR(field, _BOOL_FORMAT) +#define DML_LOG_DEBUG_UINT(field) _LOG_SCALAR(field, _UINT_FORMAT) +#define DML_LOG_DEBUG_INT(field) _LOG_SCALAR(field, _INT_FORMAT) +#define DML_LOG_DEBUG_DOUBLE(field) _LOG_SCALAR(field, _DOUBLE_FORMAT) +#define DML_LOG_DEBUG_ARRAY_BOOL(field, size) _LOG_ARRAY(field, size, _BOOL_FORMAT) +#define DML_LOG_DEBUG_ARRAY_UINT(field, size) _LOG_ARRAY(field, size, _UINT_FORMAT) +#define DML_LOG_DEBUG_ARRAY_INT(field, size) _LOG_ARRAY(field, size, _INT_FORMAT) +#define DML_LOG_DEBUG_ARRAY_DOUBLE(field, size) _LOG_ARRAY(field, size, _DOUBLE_FORMAT) +#define DML_LOG_DEBUG_2D_ARRAY_BOOL(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _BOOL_FORMAT) +#define DML_LOG_DEBUG_2D_ARRAY_UINT(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _UINT_FORMAT) +#define DML_LOG_DEBUG_2D_ARRAY_INT(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _INT_FORMAT) +#define DML_LOG_DEBUG_2D_ARRAY_DOUBLE(field, size0, size1) _LOG_2D_ARRAY(field, size0, size1, _DOUBLE_FORMAT) +#define DML_LOG_DEBUG_3D_ARRAY_BOOL(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _BOOL_FORMAT) +#define DML_LOG_DEBUG_3D_ARRAY_UINT(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _UINT_FORMAT) +#define DML_LOG_DEBUG_3D_ARRAY_INT(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _INT_FORMAT) +#define DML_LOG_DEBUG_3D_ARRAY_DOUBLE(field, size0, size1, size2) _LOG_3D_ARRAY(field, size0, size1, size2, _DOUBLE_FORMAT) #else #define DML_LOG_DEBUG(fmt, ...) ((void)0) +#define DML_LOG_COMP_IF_ENTER() ((void)0) +#define DML_LOG_COMP_IF_EXIT() ((void)0) +#define DML_LOG_FUNC_ENTER() ((void)0) +#define DML_LOG_FUNC_EXIT() ((void)0) +#define DML_LOG_DEBUG_BOOL(field) ((void)0) +#define DML_LOG_DEBUG_UINT(field) ((void)0) +#define DML_LOG_DEBUG_INT(field) ((void)0) +#define DML_LOG_DEBUG_DOUBLE(field) ((void)0) +#define DML_LOG_DEBUG_ARRAY_BOOL(field, size) ((void)0) +#define DML_LOG_DEBUG_ARRAY_UINT(field, size) ((void)0) +#define DML_LOG_DEBUG_ARRAY_INT(field, size) ((void)0) +#define DML_LOG_DEBUG_ARRAY_DOUBLE(field, size) ((void)0) +#define DML_LOG_DEBUG_2D_ARRAY_BOOL(field, size0, size1) ((void)0) +#define DML_LOG_DEBUG_2D_ARRAY_UINT(field, size0, size1) ((void)0) +#define DML_LOG_DEBUG_2D_ARRAY_INT(field, size0, size1) ((void)0) +#define DML_LOG_DEBUG_2D_ARRAY_DOUBLE(field, size0, size1) ((void)0) +#define DML_LOG_DEBUG_3D_ARRAY_BOOL(field, size0, size1, size2) ((void)0) +#define DML_LOG_DEBUG_3D_ARRAY_UINT(field, size0, size1, size2) ((void)0) +#define DML_LOG_DEBUG_3D_ARRAY_INT(field, size0, size1, size2) ((void)0) +#define DML_LOG_DEBUG_3D_ARRAY_DOUBLE(field, size0, size1, size2) ((void)0) #endif + +/* public macros for DML_LOG_LEVEL_VERBOSE */ #if DML_LOG_LEVEL >= DML_LOG_LEVEL_VERBOSE -#define DML_LOG_VERBOSE(fmt, ...) DML_LOG_INTERNAL("[DML VERBOSE] "fmt, ## __VA_ARGS__) +#define DML_LOG_VERBOSE(fmt, ...) DML_LOG_INTERNAL(fmt, ## __VA_ARGS__) #else #define DML_LOG_VERBOSE(fmt, ...) ((void)0) -#endif +#endif /* #if DML_LOG_LEVEL >= DML_LOG_LEVEL_VERBOSE */ #endif /* __DML2_DEBUG_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h index 00688b9f1df4..d52aa82283b3 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/inc/dml2_internal_shared_types.h @@ -202,6 +202,8 @@ struct dml2_core_mode_support_result { } active; unsigned int dispclk_khz; + unsigned int dpprefclk_khz; + unsigned int dtbrefclk_khz; unsigned int dcfclk_deepsleep_khz; unsigned int socclk_khz; @@ -446,13 +448,17 @@ struct dml2_core_internal_state_intermediates { }; struct dml2_core_mode_support_locals { - struct dml2_core_calcs_mode_support_ex mode_support_ex_params; + union { + struct dml2_core_calcs_mode_support_ex mode_support_ex_params; + }; struct dml2_display_cfg svp_expanded_display_cfg; struct dml2_calculate_mcache_allocation_in_out calc_mcache_allocation_params; }; struct dml2_core_mode_programming_locals { - struct dml2_core_calcs_mode_programming_ex mode_programming_ex_params; + union { + struct dml2_core_calcs_mode_programming_ex mode_programming_ex_params; + }; struct dml2_display_cfg svp_expanded_display_cfg; }; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c index 6b3b8803e0ae..a56e75cdf712 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_mall_phantom.c @@ -868,7 +868,7 @@ bool dml2_svp_remove_all_phantom_pipes(struct dml2_context *ctx, struct dc_state /* Conditions for setting up phantom pipes for SubVP: * 1. Not force disable SubVP - * 2. Full update (i.e. !fast_validate) + * 2. Full update (i.e. DC_VALIDATE_MODE_AND_PROGRAMMING) * 3. Enough pipes are available to support SubVP (TODO: Which pipes will use VACTIVE / VBLANK / SUBVP?) * 4. Display configuration passes validation * 5. (Config doesn't support MCLK in VACTIVE/VBLANK || dc->debug.force_subvp_mclk_switch) diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c index 5de775fd8fce..3b866e876bf4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c @@ -953,6 +953,7 @@ static void populate_dml_surface_cfg_from_plane_state(enum dml_project_id dml2_p out->SourcePixelFormat[location] = dml_420_10; break; case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: out->SourcePixelFormat[location] = dml_444_64; @@ -1188,22 +1189,6 @@ static unsigned int map_plane_to_dml_display_cfg(const struct dml2_context *dml2 return location; } -static void apply_legacy_svp_drr_settings(struct dml2_context *dml2, const struct dc_state *state, struct dml_display_cfg_st *dml_dispcfg) -{ - int i; - - if (state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { - ASSERT(state->stream_count == 1); - dml_dispcfg->timing.DRRDisplay[0] = true; - } else if (state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid) { - - for (i = 0; i < dml_dispcfg->num_timings; i++) { - if (dml2->v20.scratch.dml_to_dc_pipe_mapping.disp_cfg_to_stream_id[i] == state->streams[state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index]->stream_id) - dml_dispcfg->timing.DRRDisplay[i] = true; - } - } -} - static void dml2_populate_pipe_to_plane_index_mapping(struct dml2_context *dml2, struct dc_state *state) { unsigned int i; @@ -1436,9 +1421,6 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat } } } - - if (!dml2->config.use_native_pstate_optimization) - apply_legacy_svp_drr_settings(dml2, context, dml_dispcfg); } void dml2_update_pipe_ctx_dchub_regs(struct _vcs_dpi_dml_display_rq_regs_st *rq_regs, diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c index 525b7d04bf84..0318260370ed 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c @@ -24,8 +24,6 @@ * */ -#include <linux/vmalloc.h> - #include "display_mode_core.h" #include "dml2_internal_types.h" #include "dml2_utils.h" @@ -95,12 +93,17 @@ static void map_hw_resources(struct dml2_context *dml2, static unsigned int pack_and_call_dml_mode_support_ex(struct dml2_context *dml2, const struct dml_display_cfg_st *display_cfg, - struct dml_mode_support_info_st *evaluation_info) + struct dml_mode_support_info_st *evaluation_info, + enum dc_validate_mode validate_mode) { struct dml2_wrapper_scratch *s = &dml2->v20.scratch; s->mode_support_params.mode_lib = &dml2->v20.dml_core_ctx; s->mode_support_params.in_display_cfg = display_cfg; + if (validate_mode == DC_VALIDATE_MODE_ONLY) + s->mode_support_params.in_start_state_idx = dml2->v20.dml_core_ctx.states.num_states - 1; + else + s->mode_support_params.in_start_state_idx = 0; s->mode_support_params.out_evaluation_info = evaluation_info; memset(evaluation_info, 0, sizeof(struct dml_mode_support_info_st)); @@ -112,10 +115,8 @@ static unsigned int pack_and_call_dml_mode_support_ex(struct dml2_context *dml2, static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrapper_optimize_configuration_params *p) { int unused_dpps = p->ip_params->max_num_dpp; - int i, j; - int odms_needed, refresh_rate_hz, dpps_needed, subvp_height, pstate_width_fw_delay_lines, surface_count; - int subvp_timing_to_add, new_timing_index, subvp_surface_to_add, new_surface_index; - float frame_time_sec, max_frame_time_sec; + int i; + int odms_needed; int largest_blend_and_timing = 0; bool optimization_done = false; @@ -130,79 +131,6 @@ static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrappe if (p->new_display_config != p->cur_display_config) *p->new_display_config = *p->cur_display_config; - // Optimize P-State Support - if (dml2->config.use_native_pstate_optimization) { - if (p->cur_mode_support_info->DRAMClockChangeSupport[0] == dml_dram_clock_change_unsupported) { - // Find a display with < 120Hz refresh rate with maximal refresh rate that's not already subvp - subvp_timing_to_add = -1; - subvp_surface_to_add = -1; - max_frame_time_sec = 0; - surface_count = 0; - for (i = 0; i < (int) p->cur_display_config->num_timings; i++) { - refresh_rate_hz = (int)div_u64((unsigned long long) p->cur_display_config->timing.PixelClock[i] * 1000 * 1000, - (p->cur_display_config->timing.HTotal[i] * p->cur_display_config->timing.VTotal[i])); - if (refresh_rate_hz < 120) { - // Check its upstream surfaces to see if this one could be converted to subvp. - dpps_needed = 0; - for (j = 0; j < (int) p->cur_display_config->num_surfaces; j++) { - if (p->cur_display_config->plane.BlendingAndTiming[j] == i && - p->cur_display_config->plane.UseMALLForPStateChange[j] == dml_use_mall_pstate_change_disable) { - dpps_needed += p->cur_mode_support_info->DPPPerSurface[j]; - subvp_surface_to_add = j; - surface_count++; - } - } - - if (surface_count == 1 && dpps_needed > 0 && dpps_needed <= unused_dpps) { - frame_time_sec = (float)1 / refresh_rate_hz; - if (frame_time_sec > max_frame_time_sec) { - max_frame_time_sec = frame_time_sec; - subvp_timing_to_add = i; - } - } - } - } - if (subvp_timing_to_add >= 0) { - new_timing_index = p->new_display_config->num_timings++; - new_surface_index = p->new_display_config->num_surfaces++; - // Add a phantom pipe reflecting the main pipe's timing - dml2_util_copy_dml_timing(&p->new_display_config->timing, new_timing_index, subvp_timing_to_add); - - pstate_width_fw_delay_lines = (int)(((double)(p->config->svp_pstate.subvp_fw_processing_delay_us + - p->config->svp_pstate.subvp_pstate_allow_width_us) / 1000000) * - (p->new_display_config->timing.PixelClock[subvp_timing_to_add] * 1000 * 1000) / - (double)p->new_display_config->timing.HTotal[subvp_timing_to_add]); - - subvp_height = p->cur_mode_support_info->SubViewportLinesNeededInMALL[subvp_timing_to_add] + pstate_width_fw_delay_lines; - - p->new_display_config->timing.VActive[new_timing_index] = subvp_height; - p->new_display_config->timing.VTotal[new_timing_index] = subvp_height + - p->new_display_config->timing.VTotal[subvp_timing_to_add] - p->new_display_config->timing.VActive[subvp_timing_to_add]; - - p->new_display_config->output.OutputDisabled[new_timing_index] = true; - - p->new_display_config->plane.UseMALLForPStateChange[subvp_surface_to_add] = dml_use_mall_pstate_change_sub_viewport; - - dml2_util_copy_dml_plane(&p->new_display_config->plane, new_surface_index, subvp_surface_to_add); - dml2_util_copy_dml_surface(&p->new_display_config->surface, new_surface_index, subvp_surface_to_add); - - p->new_display_config->plane.ViewportHeight[new_surface_index] = subvp_height; - p->new_display_config->plane.ViewportHeightChroma[new_surface_index] = subvp_height; - p->new_display_config->plane.ViewportStationary[new_surface_index] = false; - - p->new_display_config->plane.UseMALLForStaticScreen[new_surface_index] = dml_use_mall_static_screen_disable; - p->new_display_config->plane.UseMALLForPStateChange[new_surface_index] = dml_use_mall_pstate_change_phantom_pipe; - - p->new_display_config->plane.NumberOfCursors[new_surface_index] = 0; - - p->new_policy->ImmediateFlipRequirement[new_surface_index] = dml_immediate_flip_not_required; - - p->new_display_config->plane.BlendingAndTiming[new_surface_index] = new_timing_index; - - optimization_done = true; - } - } - } // Optimize Clocks if (!optimization_done) { @@ -226,7 +154,8 @@ static bool optimize_configuration(struct dml2_context *dml2, struct dml2_wrappe return optimization_done; } -static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *dml2, struct dc_state *display_state) +static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *dml2, struct dc_state *display_state, + enum dc_validate_mode validate_mode) { struct dml2_calculate_lowest_supported_state_for_temp_read_scratch *s = &dml2->v20.scratch.dml2_calculate_lowest_supported_state_for_temp_read_scratch; struct dml2_wrapper_scratch *s_global = &dml2->v20.scratch; @@ -268,7 +197,8 @@ static int calculate_lowest_supported_state_for_temp_read(struct dml2_context *d dml2->v20.dml_core_ctx.states.state_array[j].dram_clock_change_latency_us = s_global->dummy_pstate_table[i].dummy_pstate_latency_us; } - dml_result = pack_and_call_dml_mode_support_ex(dml2, &s->cur_display_config, &s->evaluation_info); + dml_result = pack_and_call_dml_mode_support_ex(dml2, &s->cur_display_config, &s->evaluation_info, + validate_mode); if (dml_result && s->evaluation_info.DRAMClockChangeSupport[0] == dml_dram_clock_change_vactive) { map_hw_resources(dml2, &s->cur_display_config, &s->evaluation_info); @@ -333,7 +263,8 @@ static bool does_configuration_meet_sw_policies(struct dml2_context *ctx, const } static bool dml_mode_support_wrapper(struct dml2_context *dml2, - struct dc_state *display_state) + struct dc_state *display_state, + enum dc_validate_mode validate_mode) { struct dml2_wrapper_scratch *s = &dml2->v20.scratch; unsigned int result = 0, i; @@ -369,7 +300,8 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2, result = pack_and_call_dml_mode_support_ex(dml2, &s->cur_display_config, - &s->mode_support_info); + &s->mode_support_info, + validate_mode); if (result) result = does_configuration_meet_sw_policies(dml2, &s->cur_display_config, &s->mode_support_info); @@ -390,7 +322,8 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2, dml2->v20.dml_core_ctx.policy = s->new_policy; optimized_result = pack_and_call_dml_mode_support_ex(dml2, &s->new_display_config, - &s->mode_support_info); + &s->mode_support_info, + validate_mode); if (optimized_result) optimized_result = does_configuration_meet_sw_policies(dml2, &s->new_display_config, &s->mode_support_info); @@ -409,7 +342,8 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2, if (!optimized_result) { result = pack_and_call_dml_mode_support_ex(dml2, &s->cur_display_config, - &s->mode_support_info); + &s->mode_support_info, + validate_mode); } } @@ -419,118 +353,7 @@ static bool dml_mode_support_wrapper(struct dml2_context *dml2, return result; } -static int find_drr_eligible_stream(struct dc_state *display_state) -{ - int i; - - for (i = 0; i < display_state->stream_count; i++) { - if (dc_state_get_stream_subvp_type(display_state, display_state->streams[i]) == SUBVP_NONE - && display_state->streams[i]->ignore_msa_timing_param) { - // Use ignore_msa_timing_param flag to identify as DRR - return i; - } - } - - return -1; -} - -static bool optimize_pstate_with_svp_and_drr(struct dml2_context *dml2, struct dc_state *display_state) -{ - struct dml2_wrapper_scratch *s = &dml2->v20.scratch; - bool pstate_optimization_done = false; - bool pstate_optimization_success = false; - bool result = false; - int drr_display_index = 0, non_svp_streams = 0; - bool force_svp = dml2->config.svp_pstate.force_enable_subvp; - - display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false; - display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = false; - - result = dml_mode_support_wrapper(dml2, display_state); - - if (!result) { - pstate_optimization_done = true; - } else if (s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported && !force_svp) { - pstate_optimization_success = true; - pstate_optimization_done = true; - } - - if (display_state->stream_count == 1 && dml2->config.callbacks.can_support_mclk_switch_using_fw_based_vblank_stretch(dml2->config.callbacks.dc, display_state)) { - display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = true; - - result = dml_mode_support_wrapper(dml2, display_state); - } else { - non_svp_streams = display_state->stream_count; - - while (!pstate_optimization_done) { - result = dml_mode_programming(&dml2->v20.dml_core_ctx, s->mode_support_params.out_lowest_state_idx, &s->cur_display_config, true); - - // Always try adding SVP first - if (result) - result = dml2_svp_add_phantom_pipe_to_dc_state(dml2, display_state, &s->mode_support_info); - else - pstate_optimization_done = true; - - - if (result) { - result = dml_mode_support_wrapper(dml2, display_state); - } else { - pstate_optimization_done = true; - } - - if (result) { - non_svp_streams--; - - if (s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported) { - if (dml2_svp_validate_static_schedulability(dml2, display_state, s->mode_support_info.DRAMClockChangeSupport[0])) { - pstate_optimization_success = true; - pstate_optimization_done = true; - } else { - pstate_optimization_success = false; - pstate_optimization_done = false; - } - } else { - drr_display_index = find_drr_eligible_stream(display_state); - - // If there is only 1 remaining non SubVP pipe that is DRR, check static - // schedulability for SubVP + DRR. - if (non_svp_streams == 1 && drr_display_index >= 0) { - if (dml2_svp_drr_schedulable(dml2, display_state, &display_state->streams[drr_display_index]->timing)) { - display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = true; - display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index = drr_display_index; - result = dml_mode_support_wrapper(dml2, display_state); - } - - if (result && s->mode_support_info.DRAMClockChangeSupport[0] != dml_dram_clock_change_unsupported) { - pstate_optimization_success = true; - pstate_optimization_done = true; - } else { - pstate_optimization_success = false; - pstate_optimization_done = false; - } - } - - if (pstate_optimization_success) { - pstate_optimization_done = true; - } else { - pstate_optimization_done = false; - } - } - } - } - } - - if (!pstate_optimization_success) { - dml2_svp_remove_all_phantom_pipes(dml2, display_state); - display_state->bw_ctx.bw.dcn.clk.fw_based_mclk_switching = false; - display_state->bw_ctx.bw.dcn.legacy_svp_drr_stream_index_valid = false; - result = dml_mode_support_wrapper(dml2, display_state); - } - - return result; -} - -static bool call_dml_mode_support_and_programming(struct dc_state *context) +static bool call_dml_mode_support_and_programming(struct dc_state *context, enum dc_validate_mode validate_mode) { unsigned int result = 0; unsigned int min_state = 0; @@ -544,16 +367,13 @@ static bool call_dml_mode_support_and_programming(struct dc_state *context) struct dml2_wrapper_scratch *s = &dml2->v20.scratch; if (!context->streams[0]->sink->link->dc->caps.is_apu) { - min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context); + min_state_for_g6_temp_read = calculate_lowest_supported_state_for_temp_read(dml2, context, + validate_mode); ASSERT(min_state_for_g6_temp_read >= 0); } - if (!dml2->config.use_native_pstate_optimization) { - result = optimize_pstate_with_svp_and_drr(dml2, context); - } else { - result = dml_mode_support_wrapper(dml2, context); - } + result = dml_mode_support_wrapper(dml2, context, validate_mode); /* Upon trying to sett certain frequencies in FRL, min_state_for_g6_temp_read is reported as -1. This leads to an invalid value of min_state causing crashes later on. * Use the default logic for min_state only when min_state_for_g6_temp_read is a valid value. In other cases, use the value calculated by the DML directly. @@ -575,7 +395,8 @@ static bool call_dml_mode_support_and_programming(struct dc_state *context) return result; } -static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_state *context) +static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_state *context, + enum dc_validate_mode validate_mode) { struct dml2_context *dml2 = context->bw_ctx.dml2; struct dml2_wrapper_scratch *s = &dml2->v20.scratch; @@ -611,7 +432,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s copy_dummy_pstate_table(s->dummy_pstate_table, in_dc->clk_mgr->bw_params->dummy_pstate_table, 4); - result = call_dml_mode_support_and_programming(context); + result = call_dml_mode_support_and_programming(context, validate_mode); /* Call map dc pipes to map the pipes based on the DML output. For correctly determining if recalculation * is required or not, the resource context needs to correctly reflect the number of active pipes. We would * only know the correct number if active pipes after dml2_map_dc_pipes is called. @@ -628,7 +449,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s need_recalculation = dml2_verify_det_buffer_configuration(dml2, context, &dml2->det_helper_scratch); if (need_recalculation) { /* Engage the DML again if recalculation is required. */ - call_dml_mode_support_and_programming(context); + call_dml_mode_support_and_programming(context, validate_mode); if (!dml2->config.skip_hw_state_mapping) { dml2_map_dc_pipes(dml2, context, &s->cur_display_config, &s->dml_to_dc_pipe_mapping, in_dc->current_state); } @@ -684,7 +505,7 @@ static bool dml2_validate_and_build_resource(const struct dc *in_dc, struct dc_s return result; } -static bool dml2_validate_only(struct dc_state *context) +static bool dml2_validate_only(struct dc_state *context, enum dc_validate_mode validate_mode) { struct dml2_context *dml2; unsigned int result = 0; @@ -708,7 +529,8 @@ static bool dml2_validate_only(struct dc_state *context) result = pack_and_call_dml_mode_support_ex(dml2, &dml2->v20.scratch.cur_display_config, - &dml2->v20.scratch.mode_support_info); + &dml2->v20.scratch.mode_support_info, + validate_mode); if (result) result = does_configuration_meet_sw_policies(dml2, &dml2->v20.scratch.cur_display_config, &dml2->v20.scratch.mode_support_info); @@ -723,7 +545,8 @@ static void dml2_apply_debug_options(const struct dc *dc, struct dml2_context *d } } -bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2, bool fast_validate) +bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2, + enum dc_validate_mode validate_mode) { bool out = false; @@ -733,17 +556,17 @@ bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2 /* DML2.1 validation path */ if (dml2->architecture == dml2_architecture_21) { - out = dml21_validate(in_dc, context, dml2, fast_validate); + out = dml21_validate(in_dc, context, dml2, validate_mode); return out; } DC_FP_START(); - /* Use dml_validate_only for fast_validate path */ - if (fast_validate) - out = dml2_validate_only(context); + /* Use dml_validate_only for DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX path */ + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) + out = dml2_validate_only(context, validate_mode); else - out = dml2_validate_and_build_resource(in_dc, context); + out = dml2_validate_and_build_resource(in_dc, context, validate_mode); DC_FP_END(); @@ -757,8 +580,8 @@ static inline struct dml2_context *dml2_allocate_memory(void) static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) { - if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01)) { - dml21_reinit(in_dc, dml2, config); + if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01)) { + dml21_reinit(in_dc, *dml2, config); return; } @@ -803,9 +626,7 @@ static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_op bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) { // TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete. - if ((in_dc->debug.using_dml21) - && (in_dc->ctx->dce_version == DCN_VERSION_4_01 - )) + if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01)) return dml21_create(in_dc, dml2, config); // Allocate Mode Lib Ctx @@ -874,8 +695,8 @@ void dml2_reinit(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) { - if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version == DCN_VERSION_4_01)) { - dml21_reinit(in_dc, dml2, config); + if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01)) { + dml21_reinit(in_dc, *dml2, config); return; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h index 5100f269368e..c384e141cebc 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h @@ -240,7 +240,7 @@ struct dml2_configuration_options { bool use_clock_dc_limits; bool gpuvm_enable; bool force_tdlut_enable; - struct dml2_soc_bb *bb_from_dmub; + void *bb_from_dmub; }; /* @@ -272,7 +272,7 @@ void dml2_reinit(const struct dc *in_dc, * dml2_validate - Determines if a display configuration is supported or not. * @in_dc: dc. * @context: dc_state to be validated. - * @fast_validate: Fast validate will not populate context.res_ctx. + * @validate_mode: DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX will not populate context.res_ctx. * * DML1.0 compatible interface for validation. * @@ -295,7 +295,7 @@ void dml2_reinit(const struct dc *in_dc, bool dml2_validate(const struct dc *in_dc, struct dc_state *context, struct dml2_context *dml2, - bool fast_validate); + enum dc_validate_mode validate_mode); /* * dml2_extract_dram_and_fclk_change_support - Extracts the FCLK and UCLK change support info. diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c index 97bf26fa3573..36187f890d5d 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.c @@ -231,7 +231,7 @@ static struct dpp_funcs dcn401_dpp_funcs = { .dpp_program_regamma_pwl = NULL, .dpp_set_pre_degam = dpp3_set_pre_degam, .dpp_program_input_lut = NULL, - .dpp_full_bypass = dpp401_full_bypass, + .dpp_full_bypass = NULL, .dpp_setup = dpp401_dpp_setup, .dpp_program_degamma_pwl = NULL, .dpp_program_cm_dealpha = dpp3_program_cm_dealpha, diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h index ecaa976e1f52..5a6a861402b3 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp.h @@ -641,6 +641,7 @@ uint32_t ISHARP_DELTA_DATA; \ uint32_t ISHARP_DELTA_INDEX; \ uint32_t ISHARP_NLDELTA_SOFT_CLIP + struct dcn401_dpp_registers { DPP_REG_VARIABLE_LIST_DCN401; }; @@ -683,8 +684,6 @@ void dpp401_dscl_set_scaler_manual_scale( struct dpp *dpp_base, const struct scaler_data *scl_data); -void dpp401_full_bypass(struct dpp *dpp_base); - void dpp401_dpp_setup( struct dpp *dpp_base, enum surface_pixel_format format, diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c index 712aff7e17f7..7aab77b58869 100644 --- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c +++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_cm.c @@ -88,30 +88,6 @@ enum dscl_mode_sel { DSCL_MODE_DSCL_BYPASS = 6 }; -void dpp401_full_bypass(struct dpp *dpp_base) -{ - struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base); - - /* Input pixel format: ARGB8888 */ - REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0, - CNVC_SURFACE_PIXEL_FORMAT, 0x8); - - /* Zero expansion */ - REG_SET_3(FORMAT_CONTROL, 0, - CNVC_BYPASS, 0, - FORMAT_CONTROL__ALPHA_EN, 0, - FORMAT_EXPANSION_MODE, 0); - - /* COLOR_KEYER_CONTROL.COLOR_KEYER_EN = 0 this should be default */ - if (dpp->tf_mask->CM_BYPASS_EN) - REG_SET(CM_CONTROL, 0, CM_BYPASS_EN, 1); - else - REG_SET(CM_CONTROL, 0, CM_BYPASS, 1); - - /* Setting degamma bypass for now */ - REG_SET(CM_DGAM_CONTROL, 0, CM_DGAM_LUT_MODE, 0); -} - void dpp401_set_cursor_attributes( struct dpp *dpp_base, struct dc_cursor_attributes *cursor_attributes) diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index 11535922b5ff..1f53a9f0c0ac 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -30,6 +30,9 @@ #include "rc_calc.h" #include "fixed31_32.h" +#include "clk_mgr.h" +#include "resource.h" + #define DC_LOGGER \ dsc->ctx->logger @@ -149,6 +152,11 @@ uint32_t dc_bandwidth_in_kbps_from_timing( } /* Forward Declerations */ +static unsigned int get_min_dsc_slice_count_for_odm( + const struct display_stream_compressor *dsc, + const struct dsc_enc_caps *dsc_enc_caps, + const struct dc_crtc_timing *timing); + static bool decide_dsc_bandwidth_range( const uint32_t min_bpp_x16, const uint32_t max_bpp_x16, @@ -183,6 +191,7 @@ static bool setup_dsc_config( const struct dc_crtc_timing *timing, const struct dc_dsc_config_options *options, const enum dc_link_encoding_format link_encoding, + int min_slice_count, struct dc_dsc_config *dsc_cfg); static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size) @@ -442,7 +451,6 @@ bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, return true; } - /* If DSC is possbile, get DSC bandwidth range based on [min_bpp, max_bpp] target bitrate range and * timing's pixel clock and uncompressed bandwidth. * If DSC is not possible, leave '*range' untouched. @@ -458,6 +466,7 @@ bool dc_dsc_compute_bandwidth_range( struct dc_dsc_bw_range *range) { bool is_dsc_possible = false; + unsigned int min_dsc_slice_count; struct dsc_enc_caps dsc_enc_caps; struct dsc_enc_caps dsc_common_caps; struct dc_dsc_config config = {0}; @@ -469,12 +478,14 @@ bool dc_dsc_compute_bandwidth_range( get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz); + min_dsc_slice_count = get_min_dsc_slice_count_for_odm(dsc, &dsc_enc_caps, timing); + is_dsc_possible = intersect_dsc_caps(dsc_sink_caps, &dsc_enc_caps, timing->pixel_encoding, &dsc_common_caps); if (is_dsc_possible) is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing, - &options, link_encoding, &config); + &options, link_encoding, min_dsc_slice_count, &config); if (is_dsc_possible) is_dsc_possible = decide_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16, @@ -525,20 +536,153 @@ void dc_dsc_dump_decoder_caps(const struct display_stream_compressor *dsc, DC_LOG_DSC("\tis_dp %d", dsc_sink_caps->is_dp); } + +static void build_dsc_enc_combined_slice_caps( + const struct dsc_enc_caps *single_dsc_enc_caps, + struct dsc_enc_caps *dsc_enc_caps, + unsigned int max_odm_combine_factor) +{ + /* 1-16 slice configurations, single DSC */ + dsc_enc_caps->slice_caps.raw |= single_dsc_enc_caps->slice_caps.raw; + + /* 2x DSC's */ + if (max_odm_combine_factor >= 2) { + /* 1 + 1 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_1; + + /* 2 + 2 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_2; + + /* 4 + 4 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_4; + + /* 8 + 8 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_8; + } + + /* 3x DSC's */ + if (max_odm_combine_factor >= 3) { + /* 4 + 4 + 4 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_4; + } + + /* 4x DSC's */ + if (max_odm_combine_factor >= 4) { + /* 1 + 1 + 1 + 1 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_1; + + /* 2 + 2 + 2 + 2 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_2; + + /* 3 + 3 + 3 + 3 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_3; + + /* 4 + 4 + 4 + 4 */ + dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 |= single_dsc_enc_caps->slice_caps.bits.NUM_SLICES_4; + } +} + +static void build_dsc_enc_caps( + const struct display_stream_compressor *dsc, + struct dsc_enc_caps *dsc_enc_caps) +{ + unsigned int max_dscclk_khz; + unsigned int num_dsc; + unsigned int max_odm_combine_factor; + struct dsc_enc_caps single_dsc_enc_caps; + + struct dc *dc; + + if (!dsc || !dsc->ctx || !dsc->ctx->dc || !dsc->funcs->dsc_get_single_enc_caps) + return; + + dc = dsc->ctx->dc; + + if (!dc->clk_mgr || !dc->clk_mgr->funcs->get_max_clock_khz || !dc->res_pool || dc->debug.disable_dsc) + return; + + /* get max DSCCLK from clk_mgr */ + max_dscclk_khz = dc->clk_mgr->funcs->get_max_clock_khz(dc->clk_mgr, CLK_TYPE_DSCCLK); + + dsc->funcs->dsc_get_single_enc_caps(&single_dsc_enc_caps, max_dscclk_khz); + + /* global capabilities */ + dsc_enc_caps->dsc_version = single_dsc_enc_caps.dsc_version; + dsc_enc_caps->lb_bit_depth = single_dsc_enc_caps.lb_bit_depth; + dsc_enc_caps->is_block_pred_supported = single_dsc_enc_caps.is_block_pred_supported; + dsc_enc_caps->max_slice_width = single_dsc_enc_caps.max_slice_width; + dsc_enc_caps->bpp_increment_div = single_dsc_enc_caps.bpp_increment_div; + dsc_enc_caps->color_formats.raw = single_dsc_enc_caps.color_formats.raw; + dsc_enc_caps->color_depth.raw = single_dsc_enc_caps.color_depth.raw; + + /* expand per DSC capabilities to global */ + max_odm_combine_factor = dc->caps.max_odm_combine_factor; + num_dsc = dc->res_pool->res_cap->num_dsc; + max_odm_combine_factor = min(max_odm_combine_factor, num_dsc); + dsc_enc_caps->max_total_throughput_mps = + single_dsc_enc_caps.max_total_throughput_mps * + max_odm_combine_factor; + + /* check slice counts possible for with ODM combine */ + build_dsc_enc_combined_slice_caps(&single_dsc_enc_caps, dsc_enc_caps, max_odm_combine_factor); +} + +static inline uint32_t dsc_div_by_10_round_up(uint32_t value) +{ + return (value + 9) / 10; +} + +static unsigned int get_min_dsc_slice_count_for_odm( + const struct display_stream_compressor *dsc, + const struct dsc_enc_caps *dsc_enc_caps, + const struct dc_crtc_timing *timing) +{ + unsigned int max_dispclk_khz; + + /* get max pixel rate and combine caps */ + max_dispclk_khz = dsc_enc_caps->max_total_throughput_mps * 1000; + if (dsc && dsc->ctx->dc) { + if (dsc->ctx->dc->clk_mgr && + dsc->ctx->dc->clk_mgr->funcs->get_max_clock_khz) { + /* dispclk is available */ + max_dispclk_khz = dsc->ctx->dc->clk_mgr->funcs->get_max_clock_khz(dsc->ctx->dc->clk_mgr, CLK_TYPE_DISPCLK); + } + } + + /* validate parameters */ + if (max_dispclk_khz == 0 || dsc_enc_caps->max_slice_width == 0) + return 1; + + /* consider minimum odm slices required due to + * 1) display pipe throughput (dispclk) + * 2) max image width per slice + */ + return dc_fixpt_ceil(dc_fixpt_max( + dc_fixpt_div_int(dc_fixpt_from_int(dsc_div_by_10_round_up(timing->pix_clk_100hz)), + max_dispclk_khz), // throughput + dc_fixpt_div_int(dc_fixpt_from_int(timing->h_addressable + timing->h_border_left + timing->h_border_right), + dsc_enc_caps->max_slice_width))); // slice width +} + static void get_dsc_enc_caps( const struct display_stream_compressor *dsc, struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz) { - // This is a static HW query, so we can use any DSC - memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps)); - if (dsc) { - if (!dsc->ctx->dc->debug.disable_dsc) - dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz); - if (dsc->ctx->dc->debug.native422_support) - dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1; + + if (!dsc || !dsc->ctx || !dsc->ctx->dc || dsc->ctx->dc->debug.disable_dsc) + return; + + /* check if reported cap global or only for a single DCN DSC enc */ + if (dsc->funcs->dsc_get_enc_caps) { + dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz); + } else { + build_dsc_enc_caps(dsc, dsc_enc_caps); } + + if (dsc->ctx->dc->debug.native422_support) + dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1; } /* Returns 'false' if no intersection was found for at least one capability. @@ -621,11 +765,6 @@ static bool intersect_dsc_caps( return true; } -static inline uint32_t dsc_div_by_10_round_up(uint32_t value) -{ - return (value + 9) / 10; -} - static uint32_t compute_bpp_x16_from_target_bandwidth( const uint32_t bandwidth_in_kbps, const struct dc_crtc_timing *timing, @@ -910,11 +1049,11 @@ static bool setup_dsc_config( const struct dc_crtc_timing *timing, const struct dc_dsc_config_options *options, const enum dc_link_encoding_format link_encoding, + int min_slices_h, struct dc_dsc_config *dsc_cfg) { struct dsc_enc_caps dsc_common_caps; int max_slices_h = 0; - int min_slices_h = 0; int num_slices_h = 0; int pic_width; int slice_width; @@ -1018,12 +1157,9 @@ static bool setup_dsc_config( if (!is_dsc_possible) goto done; - min_slices_h = pic_width / dsc_common_caps.max_slice_width; - if (pic_width % dsc_common_caps.max_slice_width) - min_slices_h++; - min_slices_h = fit_num_slices_up(dsc_common_caps.slice_caps, min_slices_h); + /* increase minimum slice count to meet sink throughput limitations */ while (min_slices_h <= max_slices_h) { int pix_clk_per_slice_khz = dsc_div_by_10_round_up(timing->pix_clk_100hz) / min_slices_h; if (pix_clk_per_slice_khz <= sink_per_slice_throughput_mps * 1000) @@ -1032,14 +1168,12 @@ static bool setup_dsc_config( min_slices_h = inc_num_slices(dsc_common_caps.slice_caps, min_slices_h); } - is_dsc_possible = (min_slices_h <= max_slices_h); - - if (pic_width % min_slices_h != 0) - min_slices_h = 0; // DSC TODO: Maybe try increasing the number of slices first? - - if (min_slices_h == 0 && max_slices_h == 0) - is_dsc_possible = false; + /* increase minimum slice count to meet divisibility requirements */ + while (pic_width % min_slices_h != 0 && min_slices_h <= max_slices_h) { + min_slices_h = inc_num_slices(dsc_common_caps.slice_caps, min_slices_h); + } + is_dsc_possible = (min_slices_h <= max_slices_h) && max_slices_h != 0; if (!is_dsc_possible) goto done; @@ -1162,12 +1296,19 @@ bool dc_dsc_compute_config( { bool is_dsc_possible = false; struct dsc_enc_caps dsc_enc_caps; - + unsigned int min_dsc_slice_count; get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz); + + min_dsc_slice_count = get_min_dsc_slice_count_for_odm(dsc, &dsc_enc_caps, timing); + is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, target_bandwidth_kbps, - timing, options, link_encoding, dsc_cfg); + timing, + options, + link_encoding, + min_dsc_slice_count, + dsc_cfg); return is_dsc_possible; } diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c index 4222679fd4c9..7bd92ae8b13e 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.c @@ -9,19 +9,14 @@ #include "dsc/dscc_types.h" #include "dsc/rc_calc.h" -#define MAX_THROUGHPUT_PER_DSC_100HZ 20000000 -#define MAX_DSC_UNIT_COMBINE 4 - static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals); /* Object I/F functions */ //static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz); //static bool dsc401_get_packed_pps(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps); -static void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc); -static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz); +static void dsc401_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz); static const struct dsc_funcs dcn401_dsc_funcs = { - .dsc_get_enc_caps = dsc401_get_enc_caps, .dsc_read_state = dsc401_read_state, .dsc_validate_stream = dsc401_validate_stream, .dsc_set_config = dsc401_set_config, @@ -30,6 +25,7 @@ static const struct dsc_funcs dcn401_dsc_funcs = { .dsc_disable = dsc401_disable, .dsc_disconnect = dsc401_disconnect, .dsc_wait_disconnect_pending_clear = dsc401_wait_disconnect_pending_clear, + .dsc_get_single_enc_caps = dsc401_get_single_enc_caps, }; /* Macro definitios for REG_SET macros*/ @@ -66,22 +62,14 @@ void dsc401_construct(struct dcn401_dsc *dsc, dsc->max_image_width = 5184; } -static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz) +static void dsc401_get_single_enc_caps(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz) { - int min_dsc_unit_required = (pixel_clock_100Hz + MAX_THROUGHPUT_PER_DSC_100HZ - 1) / MAX_THROUGHPUT_PER_DSC_100HZ; - dsc_enc_caps->dsc_version = 0x21; /* v1.2 - DP spec defined it in reverse order and we kept it */ - /* 1 slice is only supported with 1 DSC unit */ - dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = min_dsc_unit_required == 1 ? 1 : 0; - /* 2 slice is only supported with 1 or 2 DSC units */ - dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 = (min_dsc_unit_required == 1 || min_dsc_unit_required == 2) ? 1 : 0; - /* 3 slice is only supported with 1 DSC unit */ - dsc_enc_caps->slice_caps.bits.NUM_SLICES_3 = min_dsc_unit_required == 1 ? 1 : 0; + dsc_enc_caps->slice_caps.bits.NUM_SLICES_1 = 1; + dsc_enc_caps->slice_caps.bits.NUM_SLICES_2 = 1; + dsc_enc_caps->slice_caps.bits.NUM_SLICES_3 = 1; dsc_enc_caps->slice_caps.bits.NUM_SLICES_4 = 1; - dsc_enc_caps->slice_caps.bits.NUM_SLICES_8 = 1; - dsc_enc_caps->slice_caps.bits.NUM_SLICES_12 = 1; - dsc_enc_caps->slice_caps.bits.NUM_SLICES_16 = 1; dsc_enc_caps->lb_bit_depth = 13; dsc_enc_caps->is_block_pred_supported = true; @@ -95,7 +83,7 @@ static void dsc401_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clo dsc_enc_caps->color_depth.bits.COLOR_DEPTH_8_BPC = 1; dsc_enc_caps->color_depth.bits.COLOR_DEPTH_10_BPC = 1; dsc_enc_caps->color_depth.bits.COLOR_DEPTH_12_BPC = 1; - dsc_enc_caps->max_total_throughput_mps = MAX_THROUGHPUT_PER_DSC_100HZ * MAX_DSC_UNIT_COMBINE; + dsc_enc_caps->max_total_throughput_mps = max_dscclk_khz * 3 / 1000; dsc_enc_caps->max_slice_width = 5184; /* (including 64 overlap pixels for eDP MSO mode) */ dsc_enc_caps->bpp_increment_div = 16; /* 1/16th of a bit */ @@ -191,7 +179,7 @@ void dsc401_disable(struct display_stream_compressor *dsc) DSC_CLOCK_EN, 0); } -static void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc) +void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc) { struct dcn401_dsc *dsc401 = TO_DCN401_DSC(dsc); diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h index e3ca70058e64..7acd57eb4f42 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/dcn401/dcn401_dsc.h @@ -341,5 +341,6 @@ void dsc401_set_config(struct display_stream_compressor *dsc, const struct dsc_c void dsc401_enable(struct display_stream_compressor *dsc, int opp_pipe); void dsc401_disable(struct display_stream_compressor *dsc); void dsc401_disconnect(struct display_stream_compressor *dsc); +void dsc401_wait_disconnect_pending_clear(struct display_stream_compressor *dsc); #endif diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h index 1ebce5426a58..b0bd1f9425b5 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dsc/dsc.h @@ -108,6 +108,7 @@ struct dsc_funcs { void (*dsc_disable)(struct display_stream_compressor *dsc); void (*dsc_disconnect)(struct display_stream_compressor *dsc); void (*dsc_wait_disconnect_pending_clear)(struct display_stream_compressor *dsc); + void (*dsc_get_single_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, unsigned int max_dscclk_khz); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h index c7765e6f09e6..f8f991785d4f 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn10/dcn10_hubp.h @@ -666,10 +666,29 @@ struct dcn_mi_mask { DCN_HUBP_REG_FIELD_LIST(uint32_t); }; +struct dcn_fl_regs_st { + uint32_t lut_enable; + uint32_t lut_done; + uint32_t lut_addr_mode; + uint32_t lut_width; + uint32_t lut_tmz; + uint32_t lut_crossbar_sel_r; + uint32_t lut_crossbar_sel_g; + uint32_t lut_crossbar_sel_b; + uint32_t lut_addr_hi; + uint32_t lut_addr_lo; + uint32_t refcyc_3dlut_group; + uint32_t lut_fl_bias; + uint32_t lut_fl_scale; + uint32_t lut_fl_mode; + uint32_t lut_fl_format; +}; + struct dcn_hubp_state { struct _vcs_dpi_display_dlg_regs_st dlg_attr; struct _vcs_dpi_display_ttu_regs_st ttu_attr; struct _vcs_dpi_display_rq_regs_st rq_regs; + struct dcn_fl_regs_st fl_regs; uint32_t pixel_format; uint32_t inuse_addr_hi; uint32_t inuse_addr_lo; diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c index baed31611477..705b98b1b6cc 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c @@ -86,11 +86,11 @@ void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_WIDTH, width); } -void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, bool protection_enabled) +void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, uint8_t protection_bits) { struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp); - REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_TMZ, protection_enabled ? 1 : 0); + REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_TMZ, protection_bits); } void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp, diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h index 6e1d4c90ddd4..608e6153fa68 100644 --- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h @@ -333,7 +333,7 @@ void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp, enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b, enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r); -void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, bool protection_enabled); +void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, uint8_t protection_bits); void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width width); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index e8730cc40edb..4ea13d0bf815 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -1186,8 +1186,10 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) if (dccg) { dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst); dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst); - if (dccg && dccg->funcs->set_dtbclk_dto) - dccg->funcs->set_dtbclk_dto(dccg, &dto_params); + if (!(dc->ctx->dce_version >= DCN_VERSION_3_5)) { + if (dccg && dccg->funcs->set_dtbclk_dto) + dccg->funcs->set_dtbclk_dto(dccg, &dto_params); + } } } else if (dccg && dccg->funcs->disable_symclk_se) { dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst, @@ -1225,7 +1227,7 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx) return; if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) { - if (!link->skip_implict_edp_power_control) + if (!link->skip_implict_edp_power_control && hws) hws->funcs.edp_backlight_control(link, false); link->dc->hwss.set_abm_immediate_disable(pipe_ctx); } @@ -1379,7 +1381,7 @@ static void populate_audio_dp_link_info( } } -static void build_audio_output( +void build_audio_output( struct dc_state *state, const struct pipe_ctx *pipe_ctx, struct audio_output *audio_output) @@ -1684,6 +1686,19 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw( if (dc_is_dp_signal(pipe_ctx->stream->signal)) dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG); + /* Temporary workaround to perform DSC programming ahead of stream enablement + * for smartmux/SPRS + * TODO: Remove SmartMux/SPRS checks once movement of DSC programming is generalized + */ + if (pipe_ctx->stream->timing.flags.DSC) { + if ((pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && + ((link->dc->config.smart_mux_version && link->dc->is_switch_in_progress_dest) + || link->is_dds || link->skip_implict_edp_power_control)) && + (dc_is_dp_signal(pipe_ctx->stream->signal) || + dc_is_virtual_signal(pipe_ctx->stream->signal))) + dc->link_srv->set_dsc_enable(pipe_ctx, true); + } + if (!stream->dpms_off) dc->link_srv->set_dpms_on(context, pipe_ctx); @@ -1925,6 +1940,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) can_apply_edp_fast_boot = dc_validate_boot_timing(dc, edp_stream->sink, &edp_stream->timing); + + // For Mux-platform, the default value is false. + // Disable fast boot during mux switching. + // The flag would be clean after switching done. + if (dc->is_switch_in_progress_dest && edp_link->is_dds) + can_apply_edp_fast_boot = false; + edp_stream->apply_edp_fast_boot_optimization = can_apply_edp_fast_boot; if (can_apply_edp_fast_boot) { DC_LOG_EVENT_LINK_TRAINING("eDP fast boot Enable\n"); @@ -1968,6 +1990,10 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) if (edp_with_sink_num) edp_link_with_sink = edp_links_with_sink[0]; + // During a mux switch, powering down the HW blocks and then enabling + // the link via a DPCD SET_POWER write causes a brief flash + keep_edp_vdd_on |= dc->is_switch_in_progress_dest; + if (!can_apply_edp_fast_boot && !can_apply_seamless_boot) { if (edp_link_with_sink && !keep_edp_vdd_on) { /*turn off backlight before DP_blank and encoder powered down*/ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h index 06789ac3a224..7cd8c1576988 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.h @@ -110,5 +110,9 @@ void dce110_enable_dp_link_output( enum signal_type signal, enum clock_source_id clock_source, const struct dc_link_settings *link_settings); +void build_audio_output( + struct dc_state *state, + const struct pipe_ctx *pipe_ctx, + struct audio_output *audio_output); #endif /* __DC_HWSS_DCE110_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c index f9ee55998b6b..39910f73ecd0 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c @@ -327,6 +327,35 @@ static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx) } } + DTN_INFO("\n=======HUBP FL======\n"); + DTN_INFO( + "HUBP FL: Enabled Done adr_mode width tmz xbar_sel_R xbar_sel_G xbar_sel_B adr_hi adr_low REFCYC Bias Scale Mode Format\n"); + for (i = 0; i < pool->pipe_count; i++) { + struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state); + struct dcn_fl_regs_st *fl_regs = &s->fl_regs; + + if (!s->blank_en) { + DTN_INFO("[%2d]: %5xh %6xh %5d %6d %8xh %2xh %6xh %6d %8d %8d %7d %8xh %5x %5x %5x", + pool->hubps[i]->inst, + fl_regs->lut_enable, + fl_regs->lut_done, + fl_regs->lut_addr_mode, + fl_regs->lut_width, + fl_regs->lut_tmz, + fl_regs->lut_crossbar_sel_r, + fl_regs->lut_crossbar_sel_g, + fl_regs->lut_crossbar_sel_b, + fl_regs->lut_addr_hi, + fl_regs->lut_addr_lo, + fl_regs->refcyc_3dlut_group, + fl_regs->lut_fl_bias, + fl_regs->lut_fl_scale, + fl_regs->lut_fl_mode, + fl_regs->lut_fl_format); + DTN_INFO("\n"); + } + } + DTN_INFO("\n=========RQ========\n"); DTN_INFO("HUBP: drq_exp_m prq_exp_m mrq_exp_m crq_exp_m plane1_ba L:chunk_s min_chu_s meta_ch_s" " min_m_c_s dpte_gr_s mpte_gr_s swath_hei pte_row_h C:chunk_s min_chu_s meta_ch_s" @@ -511,6 +540,36 @@ static void dcn10_log_color_state(struct dc *dc, dc->caps.color.mpc.num_3dluts, dc->caps.color.mpc.ogam_ram, dc->caps.color.mpc.ocsc); + DTN_INFO("===== MPC RMCM 3DLUT =====\n"); + DTN_INFO("MPCC: SIZE MODE MODE_CUR RD_SEL 30BIT_EN WR_EN_MASK RAM_SEL OUT_NORM_FACTOR FL_SEL OUT_OFFSET OUT_SCALE FL_DONE SOFT_UNDERFLOW HARD_UNDERFLOW MEM_PWR_ST FORCE DIS MODE\n"); + for (i = 0; i < pool->mpcc_count; i++) { + struct mpcc_state s = {0}; + + pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s); + if (s.opp_id != 0xf) + DTN_INFO("[%2d]: %4xh %4xh %6xh %4x %4x %4x %4x %4x %4xh %4xh %6xh %4x %4x %4x %4x %4x %4x %4x\n", + i, s.rmcm_regs.rmcm_3dlut_size, s.rmcm_regs.rmcm_3dlut_mode, s.rmcm_regs.rmcm_3dlut_mode_cur, + s.rmcm_regs.rmcm_3dlut_read_sel, s.rmcm_regs.rmcm_3dlut_30bit_en, s.rmcm_regs.rmcm_3dlut_wr_en_mask, + s.rmcm_regs.rmcm_3dlut_ram_sel, s.rmcm_regs.rmcm_3dlut_out_norm_factor, s.rmcm_regs.rmcm_3dlut_fl_sel, + s.rmcm_regs.rmcm_3dlut_out_offset_r, s.rmcm_regs.rmcm_3dlut_out_scale_r, s.rmcm_regs.rmcm_3dlut_fl_done, + s.rmcm_regs.rmcm_3dlut_fl_soft_underflow, s.rmcm_regs.rmcm_3dlut_fl_hard_underflow, s.rmcm_regs.rmcm_3dlut_mem_pwr_state, + s.rmcm_regs.rmcm_3dlut_mem_pwr_force, s.rmcm_regs.rmcm_3dlut_mem_pwr_dis, s.rmcm_regs.rmcm_3dlut_mem_pwr_mode); + } + DTN_INFO("\n"); + DTN_INFO("===== MPC RMCM Shaper =====\n"); + DTN_INFO("MPCC: CNTL LUT_MODE MODE_CUR WR_EN_MASK WR_SEL OFFSET SCALE START_B START_SEG_B END_B END_BASE_B MEM_PWR_ST FORCE DIS MODE\n"); + for (i = 0; i < pool->mpcc_count; i++) { + struct mpcc_state s = {0}; + + pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s); + if (s.opp_id != 0xf) + DTN_INFO("[%2d]: %4xh %4xh %6xh %4x %4x %4x %4x %4x %4xh %4xh %6xh %4x %4x %4x %4x\n", + i, s.rmcm_regs.rmcm_cntl, s.rmcm_regs.rmcm_shaper_lut_mode, s.rmcm_regs.rmcm_shaper_mode_cur, + s.rmcm_regs.rmcm_shaper_lut_write_en_mask, s.rmcm_regs.rmcm_shaper_lut_write_sel, s.rmcm_regs.rmcm_shaper_offset_b, + s.rmcm_regs.rmcm_shaper_scale_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_start_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_start_seg_b, + s.rmcm_regs.rmcm_shaper_rama_exp_region_end_b, s.rmcm_regs.rmcm_shaper_rama_exp_region_end_base_b, s.rmcm_regs.rmcm_shaper_mem_pwr_state, + s.rmcm_regs.rmcm_shaper_mem_pwr_force, s.rmcm_regs.rmcm_shaper_mem_pwr_dis, s.rmcm_regs.rmcm_shaper_mem_pwr_mode); + } } void dcn10_log_hw_state(struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index c277df12c817..3207addbd4eb 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -283,14 +283,13 @@ void dcn20_setup_gsl_group_as_lock( } /* at this point we want to program whether it's to enable or disable */ - if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL && - pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) { + if (pipe_ctx->stream_res.tg->funcs->set_gsl != NULL) { pipe_ctx->stream_res.tg->funcs->set_gsl( pipe_ctx->stream_res.tg, &gsl); - - pipe_ctx->stream_res.tg->funcs->set_gsl_source_select( - pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0); + if (pipe_ctx->stream_res.tg->funcs->set_gsl_source_select != NULL) + pipe_ctx->stream_res.tg->funcs->set_gsl_source_select( + pipe_ctx->stream_res.tg, group_idx, enable ? 4 : 0); } else BREAK_TO_DEBUGGER(); } @@ -956,7 +955,7 @@ enum dc_status dcn20_enable_stream_timing( return DC_ERROR_UNEXPECTED; } - hws->funcs.wait_for_blank_complete(pipe_ctx->stream_res.opp); + fsleep(stream->timing.v_total * (stream->timing.h_total * 10000u / stream->timing.pix_clk_100hz)); params.vertical_total_min = stream->adjust.v_total_min; params.vertical_total_max = stream->adjust.v_total_max; @@ -1971,14 +1970,6 @@ static void dcn20_program_pipe( pipe_ctx->plane_state->update_flags.bits.hdr_mult)) hws->funcs.set_hdr_multiplier(pipe_ctx); - if (hws->funcs.populate_mcm_luts) { - if (pipe_ctx->plane_state) { - hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts, - pipe_ctx->plane_state->lut_bank_a); - pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a; - } - } - if (pipe_ctx->plane_state && (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || pipe_ctx->plane_state->update_flags.bits.gamma_change || @@ -2492,7 +2483,7 @@ bool dcn20_update_bandwidth( struct dce_hwseq *hws = dc->hwseq; /* recalculate DML parameters */ - if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) + if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK) return false; /* apply updated bandwidth parameters */ @@ -2816,6 +2807,8 @@ void dcn20_reset_back_end_for_pipe( { struct dc_link *link = pipe_ctx->stream->link; const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); + struct dccg *dccg = dc->res_pool->dccg; + struct dtbclk_dto_params dto_params = {0}; DC_LOGGER_INIT(dc->ctx->logger); if (pipe_ctx->stream_res.stream_enc == NULL) { @@ -2876,6 +2869,13 @@ void dcn20_reset_back_end_for_pipe( &pipe_ctx->link_res, pipe_ctx->stream->signal); link->phy_state.symclk_state = SYMCLK_OFF_TX_OFF; } + if (dc->link_srv->dp_is_128b_132b_signal(pipe_ctx) && dccg + && dc->ctx->dce_version >= DCN_VERSION_3_5) { + dto_params.otg_inst = pipe_ctx->stream_res.tg->inst; + dto_params.timing = &pipe_ctx->stream->timing; + if (dccg && dccg->funcs->set_dtbclk_dto) + dccg->funcs->set_dtbclk_dto(dccg, &dto_params); + } } /* diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c index 5ba3999991b0..8ba934b83957 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c @@ -562,6 +562,19 @@ static void dcn31_reset_back_end_for_pipe( else if (pipe_ctx->stream_res.audio) dc->hwss.disable_audio_stream(pipe_ctx); + /* Temporary workaround to perform DSC programming ahead of pipe reset + * for smartmux/SPRS + * TODO: Remove SmartMux/SPRS checks once movement of DSC programming is generalized + */ + if (pipe_ctx->stream->timing.flags.DSC) { + if ((pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && + ((link->dc->config.smart_mux_version && link->dc->is_switch_in_progress_dest) + || link->is_dds || link->skip_implict_edp_power_control)) && + (dc_is_dp_signal(pipe_ctx->stream->signal) || + dc_is_virtual_signal(pipe_ctx->stream->signal))) + dc->link_srv->set_dsc_enable(pipe_ctx, false); + } + /* free acquired resources */ if (pipe_ctx->stream_res.audio) { /*disable az_endpoint*/ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index a0b05b9ef660..416b1dca3dac 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -1063,15 +1063,17 @@ void dcn32_update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; if (should_use_dto_dscclk) - dccg->funcs->set_dto_dscclk(dccg, dsc->inst); + dccg->funcs->set_dto_dscclk(dccg, dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h); dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg); dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc; ASSERT(odm_dsc); + if (!odm_dsc) + continue; if (should_use_dto_dscclk) - dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst); + dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h); odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg); odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst); } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index c814d957305a..a267f574b619 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -1047,6 +1047,15 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context, if (dc->caps.sequential_ono) { update_state->pg_pipe_res_update[PG_HUBP][pipe_ctx->stream_res.dsc->inst] = false; update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->stream_res.dsc->inst] = false; + + /* All HUBP/DPP instances must be powered if the DSC inst != HUBP inst */ + if (!pipe_ctx->top_pipe && pipe_ctx->plane_res.hubp && + pipe_ctx->plane_res.hubp->inst != pipe_ctx->stream_res.dsc->inst) { + for (j = 0; j < dc->res_pool->pipe_count; ++j) { + update_state->pg_pipe_res_update[PG_HUBP][j] = false; + update_state->pg_pipe_res_update[PG_DPP][j] = false; + } + } } } @@ -1193,6 +1202,25 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context, update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true; if (dc->caps.sequential_ono) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; + + if (new_pipe->stream_res.dsc && !new_pipe->top_pipe && + update_state->pg_pipe_res_update[PG_DSC][new_pipe->stream_res.dsc->inst]) { + update_state->pg_pipe_res_update[PG_HUBP][new_pipe->stream_res.dsc->inst] = true; + update_state->pg_pipe_res_update[PG_DPP][new_pipe->stream_res.dsc->inst] = true; + + /* All HUBP/DPP instances must be powered if the DSC inst != HUBP inst */ + if (new_pipe->plane_res.hubp && + new_pipe->plane_res.hubp->inst != new_pipe->stream_res.dsc->inst) { + for (j = 0; j < dc->res_pool->pipe_count; ++j) { + update_state->pg_pipe_res_update[PG_HUBP][j] = true; + update_state->pg_pipe_res_update[PG_DPP][j] = true; + } + } + } + } + for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { if (update_state->pg_pipe_res_update[PG_HUBP][i] && update_state->pg_pipe_res_update[PG_DPP][i]) { diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c index c4177a9a662f..cc9f40d97af2 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c @@ -2,6 +2,8 @@ // // Copyright 2024 Advanced Micro Devices, Inc. + +#include "os_types.h" #include "dm_services.h" #include "basics/dc_common.h" #include "dm_helpers.h" @@ -49,7 +51,7 @@ #define FN(reg_name, field_name) \ hws->shifts->field_name, hws->masks->field_name -static void dcn401_initialize_min_clocks(struct dc *dc) +void dcn401_initialize_min_clocks(struct dc *dc) { struct dc_clocks *clocks = &dc->current_state->bw_ctx.bw.dcn.clk; @@ -143,13 +145,8 @@ void dcn401_init_hw(struct dc *dc) dc->clk_mgr->funcs->init_clocks(dc->clk_mgr); // mark dcmode limits present if any clock has distinct AC and DC values from SMU - dc->caps.dcmode_power_limits_present = - (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dcfclk_mhz) || - (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dispclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dispclk_mhz) || - (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.dtbclk_mhz) || - (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_fclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.fclk_mhz) || - (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_memclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.memclk_mhz) || - (dc->clk_mgr->bw_params->clk_table.num_entries_per_clk.num_socclk_levels && dc->clk_mgr->bw_params->dc_mode_limit.socclk_mhz); + dc->caps.dcmode_power_limits_present = dc->clk_mgr->funcs->is_dc_mode_present && + dc->clk_mgr->funcs->is_dc_mode_present(dc->clk_mgr); } // Initialize the dccg @@ -396,249 +393,6 @@ static void dcn401_get_mcm_lut_xable_from_pipe_ctx(struct dc *dc, struct pipe_ct } } -static void dcn401_set_mcm_location_post_blend(struct dc *dc, struct pipe_ctx *pipe_ctx, bool bPostBlend) -{ - struct mpc *mpc = dc->res_pool->mpc; - int mpcc_id = pipe_ctx->plane_res.hubp->inst; - - if (!pipe_ctx->plane_state) - return; - - mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id); - pipe_ctx->plane_state->mcm_location = (bPostBlend) ? - MPCC_MOVABLE_CM_LOCATION_AFTER : - MPCC_MOVABLE_CM_LOCATION_BEFORE; -} - -static void dc_get_lut_mode( - enum dc_cm2_gpu_mem_layout layout, - enum hubp_3dlut_fl_mode *mode, - enum hubp_3dlut_fl_addressing_mode *addr_mode) -{ - switch (layout) { - case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB: - *mode = hubp_3dlut_fl_mode_native_1; - *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear; - break; - case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR: - *mode = hubp_3dlut_fl_mode_native_2; - *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear; - break; - case DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR: - *mode = hubp_3dlut_fl_mode_transform; - *addr_mode = hubp_3dlut_fl_addressing_mode_simple_linear; - break; - default: - *mode = hubp_3dlut_fl_mode_disable; - *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear; - break; - } -} - -static void dc_get_lut_format( - enum dc_cm2_gpu_mem_format dc_format, - enum hubp_3dlut_fl_format *format) -{ - switch (dc_format) { - case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB: - *format = hubp_3dlut_fl_format_unorm_12msb_bitslice; - break; - case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB: - *format = hubp_3dlut_fl_format_unorm_12lsb_bitslice; - break; - case DC_CM2_GPU_MEM_FORMAT_16161616_FLOAT_FP1_5_10: - *format = hubp_3dlut_fl_format_float_fp1_5_10; - break; - } -} - -static void dc_get_lut_xbar( - enum dc_cm2_gpu_mem_pixel_component_order order, - enum hubp_3dlut_fl_crossbar_bit_slice *cr_r, - enum hubp_3dlut_fl_crossbar_bit_slice *y_g, - enum hubp_3dlut_fl_crossbar_bit_slice *cb_b) -{ - switch (order) { - case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA: - *cr_r = hubp_3dlut_fl_crossbar_bit_slice_32_47; - *y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31; - *cb_b = hubp_3dlut_fl_crossbar_bit_slice_0_15; - break; - case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_BGRA: - *cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15; - *y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31; - *cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47; - break; - } -} - -static void dc_get_lut_width( - enum dc_cm2_gpu_mem_size size, - enum hubp_3dlut_fl_width *width) -{ - switch (size) { - case DC_CM2_GPU_MEM_SIZE_333333: - *width = hubp_3dlut_fl_width_33; - break; - case DC_CM2_GPU_MEM_SIZE_171717: - *width = hubp_3dlut_fl_width_17; - break; - case DC_CM2_GPU_MEM_SIZE_TRANSFORMED: - *width = hubp_3dlut_fl_width_transformed; - break; - } -} -static bool dc_is_rmcm_3dlut_supported(struct hubp *hubp, struct mpc *mpc) -{ - if (mpc->funcs->rmcm.update_3dlut_fast_load_select && - mpc->funcs->rmcm.program_lut_read_write_control && - hubp->funcs->hubp_program_3dlut_fl_addr && - mpc->funcs->rmcm.program_bit_depth && - hubp->funcs->hubp_program_3dlut_fl_mode && - hubp->funcs->hubp_program_3dlut_fl_addressing_mode && - hubp->funcs->hubp_program_3dlut_fl_format && - hubp->funcs->hubp_update_3dlut_fl_bias_scale && - mpc->funcs->rmcm.program_bias_scale && - hubp->funcs->hubp_program_3dlut_fl_crossbar && - hubp->funcs->hubp_program_3dlut_fl_width && - mpc->funcs->rmcm.update_3dlut_fast_load_select && - mpc->funcs->rmcm.populate_lut && - mpc->funcs->rmcm.program_lut_mode && - hubp->funcs->hubp_enable_3dlut_fl && - mpc->funcs->rmcm.enable_3dlut_fl) - return true; - - return false; -} - -bool dcn401_program_rmcm_luts( - struct hubp *hubp, - struct pipe_ctx *pipe_ctx, - enum dc_cm2_transfer_func_source lut3d_src, - struct dc_cm2_func_luts *mcm_luts, - struct mpc *mpc, - bool lut_bank_a, - int mpcc_id) -{ - struct dpp *dpp_base = pipe_ctx->plane_res.dpp; - union mcm_lut_params m_lut_params; - enum MCM_LUT_XABLE shaper_xable, lut3d_xable = MCM_LUT_DISABLE, lut1d_xable; - enum hubp_3dlut_fl_mode mode; - enum hubp_3dlut_fl_addressing_mode addr_mode; - enum hubp_3dlut_fl_format format = 0; - enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g = 0; - enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b = 0; - enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r = 0; - enum hubp_3dlut_fl_width width = 0; - struct dc *dc = hubp->ctx->dc; - - bool bypass_rmcm_3dlut = false; - bool bypass_rmcm_shaper = false; - - dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable); - - /* 3DLUT */ - switch (lut3d_src) { - case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM: - memset(&m_lut_params, 0, sizeof(m_lut_params)); - // Don't know what to do in this case. - //case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM: - break; - case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM: - dc_get_lut_width(mcm_luts->lut3d_data.gpu_mem_params.size, &width); - if (!dc_is_rmcm_3dlut_supported(hubp, mpc) || - !mpc->funcs->rmcm.is_config_supported(width)) - return false; - - //0. disable fl on mpc - mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, 0xF); - - //1. power down the block - mpc->funcs->rmcm.power_on_shaper_3dlut(mpc, mpcc_id, false); - - //2. program RMCM - //2a. 3dlut reg programming - mpc->funcs->rmcm.program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, - (!bypass_rmcm_3dlut) && lut3d_xable != MCM_LUT_DISABLE, mpcc_id); - - hubp->funcs->hubp_program_3dlut_fl_addr(hubp, - mcm_luts->lut3d_data.gpu_mem_params.addr); - - mpc->funcs->rmcm.program_bit_depth(mpc, - mcm_luts->lut3d_data.gpu_mem_params.bit_depth, mpcc_id); - - // setting native or transformed mode, - dc_get_lut_mode(mcm_luts->lut3d_data.gpu_mem_params.layout, &mode, &addr_mode); - - //these program the mcm 3dlut - hubp->funcs->hubp_program_3dlut_fl_mode(hubp, mode); - - hubp->funcs->hubp_program_3dlut_fl_addressing_mode(hubp, addr_mode); - - //seems to be only for the MCM - dc_get_lut_format(mcm_luts->lut3d_data.gpu_mem_params.format_params.format, &format); - hubp->funcs->hubp_program_3dlut_fl_format(hubp, format); - - mpc->funcs->rmcm.program_bias_scale(mpc, - mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.bias, - mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.scale, - mpcc_id); - hubp->funcs->hubp_update_3dlut_fl_bias_scale(hubp, - mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.bias, - mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.scale); - - dc_get_lut_xbar( - mcm_luts->lut3d_data.gpu_mem_params.component_order, - &crossbar_bit_slice_cr_r, - &crossbar_bit_slice_y_g, - &crossbar_bit_slice_cb_b); - - hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp, - crossbar_bit_slice_cr_r, - crossbar_bit_slice_y_g, - crossbar_bit_slice_cb_b); - - mpc->funcs->rmcm.program_3dlut_size(mpc, width, mpcc_id); - - mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst); - - //2b. shaper reg programming - memset(&m_lut_params, 0, sizeof(m_lut_params)); - - if (mcm_luts->shaper->type == TF_TYPE_HWPWL) { - m_lut_params.pwl = &mcm_luts->shaper->pwl; - } else if (mcm_luts->shaper->type == TF_TYPE_DISTRIBUTED_POINTS) { - ASSERT(false); - cm_helper_translate_curve_to_hw_format( - dc->ctx, - mcm_luts->shaper, - &dpp_base->regamma_params, true); - m_lut_params.pwl = &dpp_base->regamma_params; - } - if (m_lut_params.pwl) { - mpc->funcs->rmcm.populate_lut(mpc, m_lut_params, lut_bank_a, mpcc_id); - mpc->funcs->rmcm.program_lut_mode(mpc, !bypass_rmcm_shaper, lut_bank_a, mpcc_id); - } else { - //RMCM 3dlut won't work without its shaper - return false; - } - - //3. Select the hubp connected to this RMCM - hubp->funcs->hubp_enable_3dlut_fl(hubp, true); - mpc->funcs->rmcm.enable_3dlut_fl(mpc, true, mpcc_id); - - //4. power on the block - if (m_lut_params.pwl) - mpc->funcs->rmcm.power_on_shaper_3dlut(mpc, mpcc_id, true); - - break; - default: - return false; - } - - return true; -} - void dcn401_populate_mcm_luts(struct dc *dc, struct pipe_ctx *pipe_ctx, struct dc_cm2_func_luts mcm_luts, @@ -664,25 +418,6 @@ void dcn401_populate_mcm_luts(struct dc *dc, dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable); - //MCM - setting its location (Before/After) blender - //set to post blend (true) - dcn401_set_mcm_location_post_blend( - dc, - pipe_ctx, - mcm_luts.lut3d_data.mpc_mcm_post_blend); - - //RMCM - 3dLUT+Shaper - if (mcm_luts.lut3d_data.rmcm_3dlut_enable) { - dcn401_program_rmcm_luts( - hubp, - pipe_ctx, - lut3d_src, - &mcm_luts, - mpc, - lut_bank_a, - mpcc_id); - } - /* 1D LUT */ if (mcm_luts.lut1d_func) { memset(&m_lut_params, 0, sizeof(m_lut_params)); @@ -740,15 +475,15 @@ void dcn401_populate_mcm_luts(struct dc *dc, break; case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM: switch (mcm_luts.lut3d_data.gpu_mem_params.size) { - case DC_CM2_GPU_MEM_SIZE_333333: - width = hubp_3dlut_fl_width_33; - break; case DC_CM2_GPU_MEM_SIZE_171717: width = hubp_3dlut_fl_width_17; break; case DC_CM2_GPU_MEM_SIZE_TRANSFORMED: width = hubp_3dlut_fl_width_transformed; break; + default: + //TODO: handle default case + break; } //check for support @@ -817,11 +552,14 @@ void dcn401_populate_mcm_luts(struct dc *dc, //navi 4x has a bug and r and blue are swapped and need to be worked around here in //TODO: need to make a method for get_xbar per asic OR do the workaround in program_crossbar for 4x - dc_get_lut_xbar( - mcm_luts.lut3d_data.gpu_mem_params.component_order, - &crossbar_bit_slice_cr_r, - &crossbar_bit_slice_y_g, - &crossbar_bit_slice_cb_b); + switch (mcm_luts.lut3d_data.gpu_mem_params.component_order) { + case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA: + default: + crossbar_bit_slice_cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15; + crossbar_bit_slice_y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31; + crossbar_bit_slice_cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47; + break; + } if (hubp->funcs->hubp_program_3dlut_fl_crossbar) hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp, @@ -2269,14 +2007,6 @@ void dcn401_program_pipe( pipe_ctx->plane_state->update_flags.bits.hdr_mult)) hws->funcs.set_hdr_multiplier(pipe_ctx); - if (hws->funcs.populate_mcm_luts) { - if (pipe_ctx->plane_state) { - hws->funcs.populate_mcm_luts(dc, pipe_ctx, pipe_ctx->plane_state->mcm_luts, - pipe_ctx->plane_state->lut_bank_a); - pipe_ctx->plane_state->lut_bank_a = !pipe_ctx->plane_state->lut_bank_a; - } - } - if (pipe_ctx->plane_state && (pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || pipe_ctx->plane_state->update_flags.bits.gamma_change || @@ -2651,7 +2381,7 @@ bool dcn401_update_bandwidth( struct dce_hwseq *hws = dc->hwseq; /* recalculate DML parameters */ - if (dc->res_pool->funcs->validate_bandwidth(dc, context, false) != DC_OK) + if (dc->res_pool->funcs->validate_bandwidth(dc, context, DC_VALIDATE_MODE_AND_PROGRAMMING) != DC_OK) return false; /* apply updated bandwidth parameters */ @@ -2902,10 +2632,12 @@ void dcn401_plane_atomic_power_down(struct dc *dc, DC_LOGGER_INIT(dc->ctx->logger); - REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); - if (org_ip_request_cntl == 0) - REG_SET(DC_IP_REQUEST_CNTL, 0, - IP_REQUEST_EN, 1); + if (REG(DC_IP_REQUEST_CNTL)) { + REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl); + if (org_ip_request_cntl == 0) + REG_SET(DC_IP_REQUEST_CNTL, 0, + IP_REQUEST_EN, 1); + } if (hws->funcs.dpp_pg_control) hws->funcs.dpp_pg_control(hws, dpp->inst, false); @@ -2916,7 +2648,7 @@ void dcn401_plane_atomic_power_down(struct dc *dc, hubp->funcs->hubp_reset(hubp); dpp->funcs->dpp_reset(dpp); - if (org_ip_request_cntl == 0) + if (org_ip_request_cntl == 0 && REG(DC_IP_REQUEST_CNTL)) REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h index ce65b4f6c672..2621b7725267 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.h @@ -109,12 +109,5 @@ void dcn401_detect_pipe_changes( void dcn401_plane_atomic_power_down(struct dc *dc, struct dpp *dpp, struct hubp *hubp); -bool dcn401_program_rmcm_luts( - struct hubp *hubp, - struct pipe_ctx *pipe_ctx, - enum dc_cm2_transfer_func_source lut3d_src, - struct dc_cm2_func_luts *mcm_luts, - struct mpc *mpc, - bool lut_bank_a, - int mpcc_id); +void dcn401_initialize_min_clocks(struct dc *dc); #endif /* __DC_HWSS_DCN401_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h index 3a0795045bc6..9df8030e37f7 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/hw_sequencer.h @@ -502,6 +502,9 @@ void get_hdr_visual_confirm_color( void get_mpctree_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color); +void get_smartmux_visual_confirm_color( + struct dc *dc, + struct tg_color *color); void get_vabc_visual_confirm_color( struct pipe_ctx *pipe_ctx, struct tg_color *color); diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_status.h b/drivers/gpu/drm/amd/display/dc/inc/core_status.h index f3696143590c..82085d9c3f40 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_status.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_status.h @@ -59,6 +59,7 @@ enum dc_status { DC_FAIL_DP_PAYLOAD_ALLOCATION = 27, DC_FAIL_DP_LINK_BANDWIDTH = 28, DC_FAIL_HW_CURSOR_SUPPORT = 29, + DC_FAIL_DP_TUNNEL_BW_VALIDATE = 30, DC_ERROR_UNEXPECTED = -1 }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 0cf349cafb3e..f0d7185153b2 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -67,6 +67,8 @@ struct resource_context; struct clk_bw_params; struct dc_mcache_params; +#define MAX_RMCM_INST 2 + struct resource_funcs { enum engine_id (*get_preferred_eng_id_dpia)(unsigned int dpia_index); void (*destroy)(struct resource_pool **pool); @@ -82,7 +84,7 @@ struct resource_funcs { enum dc_status (*validate_bandwidth)( struct dc *dc, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); void (*calculate_wm_and_dlg)( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, @@ -107,7 +109,7 @@ struct resource_funcs { struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); /* * Algorithm for assigning available link encoders to links. @@ -223,6 +225,11 @@ struct resource_funcs { const struct dc_stream_state *stream); bool (*program_mcache_pipe_config)(struct dc_state *context, const struct dc_mcache_params *mcache_params); + enum dc_status (*update_dc_state_for_encoder_switch)(struct dc_link *link, + struct dc_link_settings *link_setting, + uint8_t pipe_count, + struct pipe_ctx *pipes, + struct audio_output *audio_output); }; struct audio_support{ @@ -281,6 +288,7 @@ struct resource_pool { struct hpo_dp_link_encoder *hpo_dp_link_enc[MAX_HPO_DP2_LINK_ENCODERS]; struct dc_3dlut *mpc_lut[MAX_PIPES]; struct dc_transfer_func *mpc_shaper[MAX_PIPES]; + struct dc_rmcm_3dlut rmcm_3dlut[MAX_RMCM_INST]; struct { unsigned int xtalin_clock_inKhz; @@ -556,7 +564,10 @@ struct dcn_bw_output { struct dml2_mcache_surface_allocation mcache_allocations[DML2_MAX_PLANES]; struct dmub_cmd_fams2_global_config fams2_global_config; union dmub_cmd_fams2_config fams2_stream_base_params[DML2_MAX_PLANES]; - union dmub_cmd_fams2_config fams2_stream_sub_params[DML2_MAX_PLANES]; + union { + union dmub_cmd_fams2_config fams2_stream_sub_params[DML2_MAX_PLANES]; + union dmub_fams2_stream_static_sub_state_v2 fams2_stream_sub_params_v2[DML2_MAX_PLANES]; + }; struct dml2_display_arb_regs arb_regs; }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h index d19a595c2be4..134091d5842d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dcn_calcs.h @@ -622,7 +622,7 @@ extern const struct dcn_ip_params dcn10_ip_defaults; bool dcn_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn_get_soc_clks( struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index c14d64687a3d..2c9a4a12bd8a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -100,6 +100,17 @@ struct dcn301_clk_internal { #define MAX_NUM_DPM_LVL 8 #define WM_SET_COUNT 4 +enum clk_type { + CLK_TYPE_DCFCLK, + CLK_TYPE_FCLK, + CLK_TYPE_MCLK, + CLK_TYPE_SOCCLK, + CLK_TYPE_DTBCLK, + CLK_TYPE_DISPCLK, + CLK_TYPE_DPPCLK, + CLK_TYPE_DSCCLK, + CLK_TYPE_COUNT +}; struct clk_limit_table_entry { unsigned int voltage; /* milivolts withh 2 fractional bits */ @@ -324,6 +335,11 @@ struct clk_mgr_funcs { int (*get_dispclk_from_dentist)(struct clk_mgr *clk_mgr_base); + bool (*is_dc_mode_present)(struct clk_mgr *clk_mgr); + + uint32_t (*set_smartmux_switch)(struct clk_mgr *clk_mgr, uint32_t pins_to_set); + + unsigned int (*get_max_clock_khz)(struct clk_mgr *clk_mgr_base, enum clk_type clk_type); }; struct clk_mgr { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index e94e9ba60f55..61c4d2a7db1c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -211,7 +211,7 @@ struct dccg_funcs { struct dccg *dccg, enum streamclk_source src, uint32_t otg_inst); - void (*set_dto_dscclk)(struct dccg *dccg, uint32_t dsc_inst); + void (*set_dto_dscclk)(struct dccg *dccg, uint32_t dsc_inst, uint32_t num_slices_h); void (*set_ref_dscclk)(struct dccg *dccg, uint32_t dsc_inst); void (*dccg_root_gate_disable_control)(struct dccg *dccg, uint32_t pipe_idx, uint32_t disable_clock_gating); }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h index b610beb075d5..cee29e89ec5c 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h @@ -282,7 +282,7 @@ struct hubp_funcs { void (*hubp_enable_3dlut_fl)(struct hubp *hubp, bool enable); void (*hubp_program_3dlut_fl_addressing_mode)(struct hubp *hubp, enum hubp_3dlut_fl_addressing_mode addr_mode); void (*hubp_program_3dlut_fl_width)(struct hubp *hubp, enum hubp_3dlut_fl_width width); - void (*hubp_program_3dlut_fl_tmz_protected)(struct hubp *hubp, bool protection_enabled); + void (*hubp_program_3dlut_fl_tmz_protected)(struct hubp *hubp, uint8_t protection_bits); void (*hubp_program_3dlut_fl_crossbar)(struct hubp *hubp, enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_y_g, enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h index 6e303b81bfb0..7641439f6ca0 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h @@ -190,6 +190,42 @@ struct mpc_grph_gamut_adjustment { enum mpcc_gamut_remap_id mpcc_gamut_remap_block_id; }; +struct mpc_rmcm_regs { + uint32_t rmcm_3dlut_mem_pwr_state; + uint32_t rmcm_3dlut_mem_pwr_force; + uint32_t rmcm_3dlut_mem_pwr_dis; + uint32_t rmcm_3dlut_mem_pwr_mode; + uint32_t rmcm_3dlut_size; + uint32_t rmcm_3dlut_mode; + uint32_t rmcm_3dlut_mode_cur; + uint32_t rmcm_3dlut_read_sel; + uint32_t rmcm_3dlut_30bit_en; + uint32_t rmcm_3dlut_wr_en_mask; + uint32_t rmcm_3dlut_ram_sel; + uint32_t rmcm_3dlut_out_norm_factor; + uint32_t rmcm_3dlut_fl_sel; + uint32_t rmcm_3dlut_out_offset_r; + uint32_t rmcm_3dlut_out_scale_r; + uint32_t rmcm_3dlut_fl_done; + uint32_t rmcm_3dlut_fl_soft_underflow; + uint32_t rmcm_3dlut_fl_hard_underflow; + uint32_t rmcm_cntl; + uint32_t rmcm_shaper_mem_pwr_state; + uint32_t rmcm_shaper_mem_pwr_force; + uint32_t rmcm_shaper_mem_pwr_dis; + uint32_t rmcm_shaper_mem_pwr_mode; + uint32_t rmcm_shaper_lut_mode; + uint32_t rmcm_shaper_mode_cur; + uint32_t rmcm_shaper_lut_write_en_mask; + uint32_t rmcm_shaper_lut_write_sel; + uint32_t rmcm_shaper_offset_b; + uint32_t rmcm_shaper_scale_b; + uint32_t rmcm_shaper_rama_exp_region_start_b; + uint32_t rmcm_shaper_rama_exp_region_start_seg_b; + uint32_t rmcm_shaper_rama_exp_region_end_b; + uint32_t rmcm_shaper_rama_exp_region_end_base_b; +}; + struct mpcc_sm_cfg { bool enable; /* 0-single plane,2-row subsampling,4-column subsampling,6-checkboard subsampling */ @@ -301,6 +337,7 @@ struct mpcc_state { uint32_t rgam_mode; uint32_t rgam_lut; struct mpc_grph_gamut_adjustment gamut_remap; + struct mpc_rmcm_regs rmcm_regs; }; /** @@ -1038,6 +1075,11 @@ struct mpc_funcs { */ void (*program_3dlut_size)(struct mpc *mpc, bool is_17x17x17, int mpcc_id); + /** + * @mcm: + * + * MPC MCM new HW sequential programming functions + */ struct { void (*program_3dlut_size)(struct mpc *mpc, uint32_t width, int mpcc_id); void (*program_bias_scale)(struct mpc *mpc, uint16_t bias, uint16_t scale, int mpcc_id); @@ -1050,6 +1092,11 @@ struct mpc_funcs { bool lut_bank_a, int mpcc_id); } mcm; + /** + * @rmcm: + * + * MPC RMCM new HW sequential programming functions + */ struct { void (*enable_3dlut_fl)(struct mpc *mpc, bool enable, int mpcc_id); void (*update_3dlut_fast_load_select)(struct mpc *mpc, int mpcc_id, int hubp_idx); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h b/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h index 00ea3864dd4d..44f86cc2d1d6 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/pg_cntl.h @@ -46,6 +46,8 @@ struct pg_cntl_funcs { void (*opp_pg_control)(struct pg_cntl *pg_cntl, unsigned int opp_inst, bool power_on); void (*optc_pg_control)(struct pg_cntl *pg_cntl, unsigned int optc_inst, bool power_on); void (*dwb_pg_control)(struct pg_cntl *pg_cntl, bool power_on); + void (*mem_pg_control)(struct pg_cntl *pg_cntl, bool power_on); + void (*dio_pg_control)(struct pg_cntl *pg_cntl, bool power_on); void (*init_pg_status)(struct pg_cntl *pg_cntl); }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index fe7f3137f228..27f950ae45ee 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -117,6 +117,7 @@ struct stream_encoder { uint32_t stream_enc_inst; struct vpg *vpg; struct afmt *afmt; + struct apg *apg; }; struct enc_state { diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h index 7d16351bba99..f2503402c10e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link.h @@ -144,9 +144,9 @@ struct link_service { uint32_t (*dp_link_bandwidth_kbps)( const struct dc_link *link, const struct dc_link_settings *link_settings); - bool (*validate_dpia_bandwidth)( - const struct dc_stream_state *stream, - const unsigned int num_streams); + enum dc_status (*validate_dp_tunnel_bandwidth)( + const struct dc *dc, + const struct dc_state *new_ctx); uint32_t (*dp_required_hblank_size_bytes)( const struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c index 96febabf464a..2956c2b3ad1a 100644 --- a/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c +++ b/drivers/gpu/drm/amd/display/dc/link/accessories/link_dp_cts.c @@ -34,6 +34,7 @@ #include "dm_helpers.h" #include "dc_dmub_srv.h" #include "dce/dmub_hw_lock_mgr.h" +#include "clk_mgr.h" #define DC_LOGGER \ link->ctx->logger @@ -67,10 +68,17 @@ static void dp_retrain_link_dp_test(struct dc_link *link, { struct pipe_ctx *pipes[MAX_PIPES]; struct dc_state *state = link->dc->current_state; + struct dc_stream_update stream_update = { 0 }; + bool dpms_off = false; + bool needs_divider_update = false; bool was_hpo_acquired = resource_is_hpo_acquired(link->dc->current_state); bool is_hpo_acquired; uint8_t count; int i; + struct audio_output audio_output[MAX_PIPES]; + + needs_divider_update = (link->dc->link_srv->dp_get_encoding_format(link_setting) != + link->dc->link_srv->dp_get_encoding_format((const struct dc_link_settings *) &link->cur_link_settings)); udelay(100); @@ -83,16 +91,59 @@ static void dp_retrain_link_dp_test(struct dc_link *link, link->dc, state, pipes[i]); + + // Disable OTG and re-enable after updating clocks + pipes[i]->stream_res.tg->funcs->disable_crtc(pipes[i]->stream_res.tg); } - if (link->dc->hwss.setup_hpo_hw_control) { - is_hpo_acquired = resource_is_hpo_acquired(state); - if (was_hpo_acquired != is_hpo_acquired) - link->dc->hwss.setup_hpo_hw_control(link->dc->hwseq, is_hpo_acquired); + if (needs_divider_update && link->dc->res_pool->funcs->update_dc_state_for_encoder_switch) { + link->dc->res_pool->funcs->update_dc_state_for_encoder_switch(link, + link_setting, count, + *pipes, &audio_output[0]); + for (i = 0; i < count; i++) { + pipes[i]->clock_source->funcs->program_pix_clk( + pipes[i]->clock_source, + &pipes[i]->stream_res.pix_clk_params, + link->dc->link_srv->dp_get_encoding_format(&pipes[i]->link_config.dp_link_settings), + &pipes[i]->pll_settings); + + if (pipes[i]->stream_res.audio != NULL) { + const struct link_hwss *link_hwss = get_link_hwss( + link, &pipes[i]->link_res); + + link_hwss->setup_audio_output(pipes[i], &audio_output[i], + pipes[i]->stream_res.audio->inst); + + pipes[i]->stream_res.audio->funcs->az_configure( + pipes[i]->stream_res.audio, + pipes[i]->stream->signal, + &audio_output[i].crtc_info, + &pipes[i]->stream->audio_info, + &audio_output[i].dp_link_info); + + if (link->dc->config.disable_hbr_audio_dp2 && + pipes[i]->stream_res.audio->funcs->az_disable_hbr_audio && + link->dc->link_srv->dp_is_128b_132b_signal(pipes[i])) + pipes[i]->stream_res.audio->funcs->az_disable_hbr_audio(pipes[i]->stream_res.audio); + } + } } - for (i = count-1; i >= 0; i--) - link_set_dpms_on(state, pipes[i]); + // Toggle on HPO I/O if necessary + is_hpo_acquired = resource_is_hpo_acquired(state); + if (was_hpo_acquired != is_hpo_acquired && link->dc->hwss.setup_hpo_hw_control) + link->dc->hwss.setup_hpo_hw_control(link->dc->hwseq, is_hpo_acquired); + + for (i = 0; i < count; i++) + pipes[i]->stream_res.tg->funcs->enable_crtc(pipes[i]->stream_res.tg); + + // Set DPMS on with stream update + for (i = 0; i < state->stream_count; i++) + if (state->streams[i] && state->streams[i]->link && state->streams[i]->link == link) { + stream_update.stream = state->streams[i]; + stream_update.dpms_off = &dpms_off; + dc_update_planes_and_stream(state->clk_mgr->ctx->dc, NULL, 0, state->streams[i], &stream_update); + } } static void dp_test_send_link_training(struct dc_link *link) diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c index 116ff37126e7..55c5148de800 100644 --- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c +++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c @@ -74,7 +74,7 @@ static void dp_hpo_fixed_vs_pe_retimer_set_tx_ffe(struct dc_link *link, static void dp_hpo_fixed_vs_pe_retimer_program_override_test_pattern(struct dc_link *link, struct encoder_set_dp_phy_pattern_param *tp_params) { - uint8_t clk_src = 0x4C; + uint8_t clk_src = 0xC4; uint8_t pattern = 0x4F; /* SQ128 */ const uint8_t vendor_lttpr_write_data_pg0[4] = {0x1, 0x11, 0x0, 0x0}; diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c index 9655e6fa53a4..827b630daf49 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c @@ -593,8 +593,9 @@ static bool detect_dp(struct dc_link *link, if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT; - if (!detect_dp_sink_caps(link)) + if (!detect_dp_sink_caps(link)) { return false; + } if (is_dp_branch_device(link)) /* DP SST branch */ diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c index 273a3be6d593..8c8682f743d6 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c @@ -140,7 +140,8 @@ void link_blank_dp_stream(struct dc_link *link, bool hw_init) } } - if ((!link->wa_flags.dp_keep_receiver_powered) || hw_init) + if (((!dc->is_switch_in_progress_dest) && ((!link->wa_flags.dp_keep_receiver_powered) || hw_init)) && + (link->type != dc_connection_none)) dpcd_write_rx_power_ctrl(link, false); } } @@ -842,14 +843,14 @@ void link_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt; if (should_use_dto_dscclk) - dccg->funcs->set_dto_dscclk(dccg, dsc->inst); + dccg->funcs->set_dto_dscclk(dccg, dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h); dsc->funcs->dsc_set_config(dsc, &dsc_cfg, &dsc_optc_cfg); dsc->funcs->dsc_enable(dsc, pipe_ctx->stream_res.opp->inst); for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { struct display_stream_compressor *odm_dsc = odm_pipe->stream_res.dsc; if (should_use_dto_dscclk) - dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst); + dccg->funcs->set_dto_dscclk(dccg, odm_dsc->inst, dsc_cfg.dc_dsc_cfg.num_slices_h); odm_dsc->funcs->dsc_set_config(odm_dsc, &dsc_cfg, &dsc_optc_cfg); odm_dsc->funcs->dsc_enable(odm_dsc, odm_pipe->stream_res.opp->inst); } @@ -2296,8 +2297,7 @@ static bool allocate_usb4_bandwidth_for_stream(struct dc_stream_state *stream, i link->dpia_bw_alloc_config.remote_sink_req_bw[sink_index] = bw; } - /* get dp overhead for dp tunneling */ - link->dpia_bw_alloc_config.dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(link); + link->dpia_bw_alloc_config.dp_overhead = link_dpia_get_dp_overhead(link); req_bw += link->dpia_bw_alloc_config.dp_overhead; link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, req_bw); @@ -2537,6 +2537,14 @@ void link_set_dpms_on( !pipe_ctx->next_odm_pipe) { pipe_ctx->stream->dpms_off = false; update_psp_stream_config(pipe_ctx, false); + + if (link->is_dds) { + uint32_t post_oui_delay = 30; // 30ms + + dpcd_set_source_specific_data(link); + msleep(post_oui_delay); + } + return; } @@ -2629,6 +2637,15 @@ void link_set_dpms_on( dp_is_128b_132b_signal(pipe_ctx)) update_sst_payload(pipe_ctx, true); + /* Corruption was observed on systems with display mux when stream gets + * enabled after the mux switch. Having a small delay between link + * training and stream unblank resolves the corruption issue. + * This is workaround. + */ + if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && + link->is_display_mux_present) + msleep(20); + dc->hwss.unblank_stream(pipe_ctx, &pipe_ctx->stream->link->cur_link_settings); diff --git a/drivers/gpu/drm/amd/display/dc/link/link_factory.c b/drivers/gpu/drm/amd/display/dc/link/link_factory.c index 1a04f4b74585..de1143dbbd25 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_factory.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_factory.c @@ -100,7 +100,7 @@ static void construct_link_service_validation(struct link_service *link_srv) { link_srv->validate_mode_timing = link_validate_mode_timing; link_srv->dp_link_bandwidth_kbps = dp_link_bandwidth_kbps; - link_srv->validate_dpia_bandwidth = link_validate_dpia_bandwidth; + link_srv->validate_dp_tunnel_bandwidth = link_validate_dp_tunnel_bandwidth; link_srv->dp_required_hblank_size_bytes = dp_required_hblank_size_bytes; } @@ -539,10 +539,16 @@ static bool construct_phy(struct dc_link *link, break; case CONNECTOR_ID_EDP: + // If smartmux is supported, only create the link on the primary eDP. + // Dual eDP is not supported with smartmux. + if (!(!link->dc->config.smart_mux_version || dc_ctx->dc_edp_id_count == 0)) + goto create_fail; + link->connector_signal = SIGNAL_TYPE_EDP; if (link->hpd_gpio) { - if (!link->dc->config.allow_edp_hotplug_detection) + if (!link->dc->config.allow_edp_hotplug_detection + && !is_smartmux_suported(link)) link->irq_source_hpd = DC_IRQ_SOURCE_INVALID; switch (link->dc->config.allow_edp_hotplug_detection) { diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.c b/drivers/gpu/drm/amd/display/dc/link/link_validation.c index 29606fda029d..aecaf37eee35 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_validation.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.c @@ -86,6 +86,10 @@ static bool dp_active_dongle_validate_timing( if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through) return false; break; + case PIXEL_ENCODING_UNDEFINED: + /* These color depths are currently not supported */ + ASSERT(false); + break; default: /* Invalid Pixel Encoding*/ return false; @@ -104,6 +108,10 @@ static bool dp_active_dongle_validate_timing( if (dongle_caps->dp_hdmi_max_bpc < 12) return false; break; + case COLOR_DEPTH_UNDEFINED: + /* These color depths are currently not supported */ + ASSERT(false); + break; case COLOR_DEPTH_141414: case COLOR_DEPTH_161616: default: @@ -255,6 +263,14 @@ uint32_t dp_link_bandwidth_kbps( return link_rate_per_lane_kbps * link_settings->lane_count / 10000 * total_data_bw_efficiency_x10000; } +static uint32_t dp_get_timing_bandwidth_kbps( + const struct dc_crtc_timing *timing, + const struct dc_link *link) +{ + return dc_bandwidth_in_kbps_from_timing(timing, + dc_link_get_highest_encoding_format(link)); +} + static bool dp_validate_mode_timing( struct dc_link *link, const struct dc_crtc_timing *timing) @@ -351,63 +367,81 @@ enum dc_status link_validate_mode_timing( return DC_OK; } +static const struct dc_tunnel_settings *get_dp_tunnel_settings(const struct dc_state *context, + const struct dc_stream_state *stream) +{ + int i; + const struct dc_tunnel_settings *dp_tunnel_settings = NULL; + + for (i = 0; i < MAX_PIPES; i++) { + if (context->res_ctx.pipe_ctx[i].stream && (context->res_ctx.pipe_ctx[i].stream == stream)) { + dp_tunnel_settings = &context->res_ctx.pipe_ctx[i].link_config.dp_tunnel_settings; + break; + } + } + + return dp_tunnel_settings; +} + /* - * This function calculates the bandwidth required for the stream timing - * and aggregates the stream bandwidth for the respective dpia link - * - * @stream: pointer to the dc_stream_state struct instance - * @num_streams: number of streams to be validated + * Calculates the DP tunneling bandwidth required for the stream timing + * and aggregates the stream bandwidth for the respective DP tunneling link * - * return: true if validation is succeeded + * return: dc_status */ -bool link_validate_dpia_bandwidth(const struct dc_stream_state *stream, const unsigned int num_streams) +enum dc_status link_validate_dp_tunnel_bandwidth(const struct dc *dc, const struct dc_state *new_ctx) { - int bw_needed[MAX_DPIA_NUM] = {0}; - struct dc_link *dpia_link[MAX_DPIA_NUM] = {0}; - int num_dpias = 0; + struct dc_validation_dpia_set dpia_link_sets[MAX_DPIA_NUM] = { 0 }; + uint8_t link_count = 0; + enum dc_status result = DC_OK; - for (unsigned int i = 0; i < num_streams; ++i) { - if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT) { - /* new dpia sst stream, check whether it exceeds max dpia */ - if (num_dpias >= MAX_DPIA_NUM) - return false; + // Iterate through streams in the new context + for (uint8_t i = 0; (i < MAX_PIPES && i < new_ctx->stream_count); i++) { + const struct dc_stream_state *stream = new_ctx->streams[i]; + const struct dc_link *link; + const struct dc_tunnel_settings *dp_tunnel_settings; + uint32_t timing_bw; - dpia_link[num_dpias] = stream[i].link; - bw_needed[num_dpias] = dc_bandwidth_in_kbps_from_timing(&stream[i].timing, - dc_link_get_highest_encoding_format(dpia_link[num_dpias])); - num_dpias++; - } else if (stream[i].signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - uint8_t j = 0; - /* check whether its a known dpia link */ - for (; j < num_dpias; ++j) { - if (dpia_link[j] == stream[i].link) - break; - } + if (stream == NULL) + continue; + + link = stream->link; + + if (!(link && (stream->signal == SIGNAL_TYPE_DISPLAY_PORT + || stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) + && link->hpd_status)) + continue; + + dp_tunnel_settings = get_dp_tunnel_settings(new_ctx, stream); - if (j == num_dpias) { - /* new dpia mst stream, check whether it exceeds max dpia */ - if (num_dpias >= MAX_DPIA_NUM) - return false; - else { - dpia_link[j] = stream[i].link; - num_dpias++; - } + if ((dp_tunnel_settings == NULL) || (dp_tunnel_settings->should_use_dp_bw_allocation == false)) + continue; + + timing_bw = dp_get_timing_bandwidth_kbps(&stream->timing, link); + + // Find an existing entry for this 'link' in 'dpia_link_sets' + for (uint8_t j = 0; j < MAX_DPIA_NUM; j++) { + bool is_new_slot = false; + + if (dpia_link_sets[j].link == NULL) { + is_new_slot = true; + link_count++; + dpia_link_sets[j].required_bw = 0; + dpia_link_sets[j].link = link; } - bw_needed[j] += dc_bandwidth_in_kbps_from_timing(&stream[i].timing, - dc_link_get_highest_encoding_format(dpia_link[j])); + if (is_new_slot || (dpia_link_sets[j].link == link)) { + dpia_link_sets[j].tunnel_settings = dp_tunnel_settings; + dpia_link_sets[j].required_bw += timing_bw; + break; + } } } - /* Include dp overheads */ - for (uint8_t i = 0; i < num_dpias; ++i) { - int dp_overhead = 0; - - dp_overhead = link_dp_dpia_get_dp_overhead_in_dp_tunneling(dpia_link[i]); - bw_needed[i] += dp_overhead; - } + if (link_count && link_dpia_validate_dp_tunnel_bandwidth(dpia_link_sets, link_count) == false) + result = DC_FAIL_DP_TUNNEL_BW_VALIDATE; - return dpia_validate_usb4_bw(dpia_link, bw_needed, num_dpias); + return result; } struct dp_audio_layout_config { diff --git a/drivers/gpu/drm/amd/display/dc/link/link_validation.h b/drivers/gpu/drm/amd/display/dc/link/link_validation.h index bf398c49c3e8..9553c81053fe 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_validation.h +++ b/drivers/gpu/drm/amd/display/dc/link/link_validation.h @@ -30,9 +30,9 @@ enum dc_status link_validate_mode_timing( const struct dc_stream_state *stream, struct dc_link *link, const struct dc_crtc_timing *timing); -bool link_validate_dpia_bandwidth( - const struct dc_stream_state *stream, - const unsigned int num_streams); +enum dc_status link_validate_dp_tunnel_bandwidth( + const struct dc *dc, + const struct dc_state *new_ctx); uint32_t dp_link_bandwidth_kbps( const struct dc_link *link, const struct dc_link_settings *link_settings); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index a5127c2d47ef..651926e547b9 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -385,9 +385,15 @@ bool dp_is_128b_132b_signal(struct pipe_ctx *pipe_ctx) bool dp_is_lttpr_present(struct dc_link *link) { /* Some sink devices report invalid LTTPR revision, so don't validate against that cap */ - return (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 && + uint32_t lttpr_count = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + bool is_lttpr_present = (lttpr_count > 0 && link->dpcd_caps.lttpr_caps.max_lane_count > 0 && link->dpcd_caps.lttpr_caps.max_lane_count <= 4); + + if (lttpr_count > 0 && !is_lttpr_present) + DC_LOG_ERROR("LTTPR count is nonzero but invalid lane count reported. Assuming no LTTPR present.\n"); + + return is_lttpr_present; } /* in DP compliance test, DPR-120 may have @@ -1382,6 +1388,21 @@ void dpcd_set_source_specific_data(struct dc_link *link) struct dpcd_amd_signature amd_signature = {0}; struct dpcd_amd_device_id amd_device_id = {0}; + if (link->is_dds) { + uint8_t dpcd_dp_edp_backlight_mode = 0; + + /* + * Write 0 to bits 0:1 for dp_edp_backlight_mode_set register + * if platform is DDS + */ + core_link_read_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, + &dpcd_dp_edp_backlight_mode, sizeof(uint8_t)); + dpcd_dp_edp_backlight_mode &= ~0x3; + + core_link_write_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, + &dpcd_dp_edp_backlight_mode, sizeof(uint8_t)); + } + amd_device_id.device_id_byte1 = (uint8_t)(link->ctx->asic_id.chip_id); amd_device_id.device_id_byte2 = @@ -1537,6 +1558,10 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link) return false; link->dpcd_sink_ext_caps.raw = dpcd_data; + if (link->is_dds && !link->dpcd_sink_ext_caps.bits.oled) { + link->dpcd_sink_ext_caps.raw = 0; + return false; + } if (core_link_read_dpcd(link, DP_EDP_GENERAL_CAP_2, &edp_general_cap2, 1) != DC_OK) return false; @@ -1551,6 +1576,8 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link) uint8_t lttpr_dpcd_data[10] = {0}; enum dc_status status; bool is_lttpr_present; + uint32_t lttpr_count; + uint32_t closest_lttpr_offset; /* Logic to determine LTTPR support*/ bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware; @@ -1602,20 +1629,22 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link) lttpr_dpcd_data[DP_LTTPR_ALPM_CAPABILITIES - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; + lttpr_count = dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt); + /* If this chip cap is set, at least one retimer must exist in the chain * Override count to 1 if we receive a known bad count (0 or an invalid value) */ if (((link->chip_caps & AMD_EXT_DISPLAY_PATH_CAPS__EXT_CHIP_MASK) == AMD_EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && - (dp_parse_lttpr_repeater_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) == 0)) { + lttpr_count == 0) { /* If you see this message consistently, either the host platform has FIXED_VS flag * incorrectly configured or the sink device is returning an invalid count. */ DC_LOG_ERROR("lttpr_caps phy_repeater_cnt is 0x%x, forcing it to 0x80.", link->dpcd_caps.lttpr_caps.phy_repeater_cnt); link->dpcd_caps.lttpr_caps.phy_repeater_cnt = 0x80; + lttpr_count = 1; DC_LOG_DC("lttpr_caps forced phy_repeater_cnt = %d\n", link->dpcd_caps.lttpr_caps.phy_repeater_cnt); } - /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */ is_lttpr_present = dp_is_lttpr_present(link); DC_LOG_DC("is_lttpr_present = %d\n", is_lttpr_present); @@ -1623,11 +1652,25 @@ enum dc_status dp_retrieve_lttpr_cap(struct dc_link *link) if (is_lttpr_present) { CONN_DATA_DETECT(link, lttpr_dpcd_data, sizeof(lttpr_dpcd_data), "LTTPR Caps: "); - core_link_read_dpcd(link, DP_LTTPR_IEEE_OUI, link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui)); - CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui), "LTTPR IEEE OUI: "); + // Identify closest LTTPR to determine if workarounds required for known embedded LTTPR + closest_lttpr_offset = dp_get_closest_lttpr_offset(lttpr_count); - core_link_read_dpcd(link, DP_LTTPR_DEVICE_ID, link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id)); - CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id), "LTTPR Device ID: "); + core_link_read_dpcd(link, (DP_LTTPR_IEEE_OUI + closest_lttpr_offset), + link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui)); + core_link_read_dpcd(link, (DP_LTTPR_DEVICE_ID + closest_lttpr_offset), + link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id)); + + if (lttpr_count > 1) { + CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui), + "Closest LTTPR To Host's IEEE OUI: "); + CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id), + "Closest LTTPR To Host's LTTPR Device ID: "); + } else { + CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_ieee_oui, sizeof(link->dpcd_caps.lttpr_caps.lttpr_ieee_oui), + "LTTPR IEEE OUI: "); + CONN_DATA_DETECT(link, link->dpcd_caps.lttpr_caps.lttpr_device_id, sizeof(link->dpcd_caps.lttpr_caps.lttpr_device_id), + "LTTPR Device ID: "); + } } return status; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c index 22bfdced64ab..9b2f1a7da1d1 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia.c @@ -75,12 +75,15 @@ enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link) if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc) { status = core_link_read_dpcd(link, USB4_DRIVER_BW_CAPABILITY, - dpcd_dp_tun_data, 1); + dpcd_dp_tun_data, 2); if (status != DC_OK) goto err; - link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.raw = dpcd_dp_tun_data[0]; + link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.raw = + dpcd_dp_tun_data[USB4_DRIVER_BW_CAPABILITY - USB4_DRIVER_BW_CAPABILITY]; + link->dpcd_caps.usb4_dp_tun_info.dpia_tunnel_info.raw = + dpcd_dp_tun_data[DP_IN_ADAPTER_TUNNEL_INFO - USB4_DRIVER_BW_CAPABILITY]; } DC_LOG_DEBUG("%s: Link[%d] DP tunneling support (RouterId=%d AdapterId=%d) " @@ -155,8 +158,14 @@ void link_decide_dp_tunnel_settings(struct dc_stream_state *stream, link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dp_tunneling; if (link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.bits.dpia_bw_alloc - && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support) + && link->dpcd_caps.usb4_dp_tun_info.driver_bw_cap.bits.driver_bw_alloc_support) { dp_tunnel_setting->should_use_dp_bw_allocation = true; + dp_tunnel_setting->cm_id = link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id & 0x0F; + dp_tunnel_setting->group_id = link->dpcd_caps.usb4_dp_tun_info.dpia_tunnel_info.bits.group_id; + dp_tunnel_setting->estimated_bw = link->dpia_bw_alloc_config.estimated_bw; + dp_tunnel_setting->allocated_bw = link->dpia_bw_alloc_config.allocated_bw; + dp_tunnel_setting->bw_granularity = link->dpia_bw_alloc_config.bw_granularity; + } } } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c index 3af7564a84f1..819bf2d8ba53 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.c @@ -35,6 +35,8 @@ #define Kbps_TO_Gbps (1000 * 1000) +#define MST_TIME_SLOT_COUNT 64 + // ------------------------------------------------------------------ // PRIVATE FUNCTIONS // ------------------------------------------------------------------ @@ -160,78 +162,6 @@ static void retrieve_usb4_dp_bw_allocation_info(struct dc_link *link) link->dpia_bw_alloc_config.nrd_max_lane_count); } -static uint8_t get_lowest_dpia_index(struct dc_link *link) -{ - const struct dc *dc_struct = link->dc; - uint8_t idx = 0xFF; - int i; - - for (i = 0; i < MAX_LINKS; ++i) { - - if (!dc_struct->links[i] || - dc_struct->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) - continue; - - if (idx > dc_struct->links[i]->link_index) { - idx = dc_struct->links[i]->link_index; - break; - } - } - - return idx; -} - -/* - * Get the maximum dp tunnel banwidth of host router - * - * @dc: pointer to the dc struct instance - * @hr_index: host router index - * - * return: host router maximum dp tunnel bandwidth - */ -static int get_host_router_total_dp_tunnel_bw(const struct dc *dc, uint8_t hr_index) -{ - uint8_t lowest_dpia_index = get_lowest_dpia_index(dc->links[0]); - uint8_t hr_index_temp = 0; - struct dc_link *link_dpia_primary, *link_dpia_secondary; - int total_bw = 0; - - for (uint8_t i = 0; i < MAX_LINKS - 1; ++i) { - - if (!dc->links[i] || dc->links[i]->ep_type != DISPLAY_ENDPOINT_USB4_DPIA) - continue; - - hr_index_temp = (dc->links[i]->link_index - lowest_dpia_index) / 2; - - if (hr_index_temp == hr_index) { - link_dpia_primary = dc->links[i]; - link_dpia_secondary = dc->links[i + 1]; - - /** - * If BW allocation enabled on both DPIAs, then - * HR BW = Estimated(dpia_primary) + Allocated(dpia_secondary) - * otherwise HR BW = Estimated(bw alloc enabled dpia) - */ - if ((link_dpia_primary->hpd_status && - link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) && - (link_dpia_secondary->hpd_status && - link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled)) { - total_bw += link_dpia_primary->dpia_bw_alloc_config.estimated_bw + - link_dpia_secondary->dpia_bw_alloc_config.allocated_bw; - } else if (link_dpia_primary->hpd_status && - link_dpia_primary->dpia_bw_alloc_config.bw_alloc_enabled) { - total_bw = link_dpia_primary->dpia_bw_alloc_config.estimated_bw; - } else if (link_dpia_secondary->hpd_status && - link_dpia_secondary->dpia_bw_alloc_config.bw_alloc_enabled) { - total_bw += link_dpia_secondary->dpia_bw_alloc_config.estimated_bw; - } - break; - } - } - - return total_bw; -} - /* * Cleanup function for when the dpia is unplugged to reset struct * and perform any required clean up @@ -251,32 +181,40 @@ static void dpia_bw_alloc_unplug(struct dc_link *link) static void link_dpia_send_bw_alloc_request(struct dc_link *link, int req_bw) { - uint8_t requested_bw; - uint32_t temp; + uint8_t request_reg_val; + uint32_t temp, request_bw; - /* Error check whether request bw greater than allocated */ - if (req_bw > link->dpia_bw_alloc_config.estimated_bw) { - DC_LOG_ERROR("%s: Request BW greater than estimated BW for link(%d)\n", - __func__, link->link_index); - req_bw = link->dpia_bw_alloc_config.estimated_bw; + if (link->dpia_bw_alloc_config.bw_granularity == 0) { + DC_LOG_ERROR("%s: Link[%d]: bw_granularity is zero!", __func__, link->link_index); + return; } temp = req_bw * link->dpia_bw_alloc_config.bw_granularity; - requested_bw = temp / Kbps_TO_Gbps; + request_reg_val = temp / Kbps_TO_Gbps; /* Always make sure to add more to account for floating points */ if (temp % Kbps_TO_Gbps) - ++requested_bw; + ++request_reg_val; - /* Error check whether requested and allocated are equal */ - req_bw = requested_bw * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); - if (req_bw && (req_bw == link->dpia_bw_alloc_config.allocated_bw)) { - DC_LOG_ERROR("%s: Request BW equals to allocated BW for link(%d)\n", - __func__, link->link_index); + request_bw = request_reg_val * (Kbps_TO_Gbps / link->dpia_bw_alloc_config.bw_granularity); + + if (request_bw > link->dpia_bw_alloc_config.estimated_bw) { + DC_LOG_ERROR("%s: Link[%d]: Request BW (%d --> %d) > Estimated BW (%d)... Set to Estimated BW!", + __func__, link->link_index, + req_bw, request_bw, link->dpia_bw_alloc_config.estimated_bw); + req_bw = link->dpia_bw_alloc_config.estimated_bw; + + temp = req_bw * link->dpia_bw_alloc_config.bw_granularity; + request_reg_val = temp / Kbps_TO_Gbps; + if (temp % Kbps_TO_Gbps) + ++request_reg_val; } + link->dpia_bw_alloc_config.allocated_bw = request_bw; + DC_LOG_DC("%s: Link[%d]: Request BW: %d", __func__, link->link_index, request_bw); + core_link_write_dpcd(link, REQUESTED_BW, - &requested_bw, + &request_reg_val, sizeof(uint8_t)); } @@ -304,14 +242,16 @@ bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link) link->dpia_bw_alloc_config.bw_alloc_enabled = true; ret = true; - /* - * During DP tunnel creation, CM preallocates BW and reduces estimated BW of other - * DPIA. CM release preallocation only when allocation is complete. Do zero alloc - * to make the CM to release preallocation and update estimated BW correctly for - * all DPIAs per host router - */ - // TODO: Zero allocation can be removed once the MSFT CM fix has been released - link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0); + if (link->dc->debug.dpia_debug.bits.enable_usb4_bw_zero_alloc_patch) { + /* + * During DP tunnel creation, the CM preallocates BW + * and reduces the estimated BW of other DPIAs. + * The CM releases the preallocation only when the allocation is complete. + * Perform a zero allocation to make the CM release the preallocation + * and correctly update the estimated BW for all DPIAs per host router. + */ + link_dp_dpia_allocate_usb4_bandwidth_for_stream(link, 0); + } } else DC_LOG_DEBUG("%s: link[%d] failed to enable DPTX BW allocation mode", __func__, link->link_index); } @@ -329,19 +269,17 @@ bool link_dpia_enable_usb4_dp_bw_alloc_mode(struct dc_link *link) */ void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status) { + link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link); + if (status & DP_TUNNELING_BW_REQUEST_SUCCEEDED) { DC_LOG_DEBUG("%s: BW Allocation request succeeded on link(%d)", __func__, link->link_index); } else if (status & DP_TUNNELING_BW_REQUEST_FAILED) { - link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link); - DC_LOG_DEBUG("%s: BW Allocation request failed on link(%d) allocated/estimated BW=%d", __func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw); link_dpia_send_bw_alloc_request(link, link->dpia_bw_alloc_config.estimated_bw); } else if (status & DP_TUNNELING_ESTIMATED_BW_CHANGED) { - link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link); - DC_LOG_DEBUG("%s: Estimated BW changed on link(%d) new estimated BW=%d", __func__, link->link_index, link->dpia_bw_alloc_config.estimated_bw); } @@ -374,9 +312,13 @@ void dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int pe void link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int req_bw) { - DC_LOG_DEBUG("%s: ENTER: link(%d), hpd_status(%d), current allocated_bw(%d), req_bw(%d)\n", + link->dpia_bw_alloc_config.estimated_bw = get_estimated_bw(link); + + DC_LOG_DEBUG("%s: ENTER: link[%d] hpd(%d) Allocated_BW: %d Estimated_BW: %d Req_BW: %d", __func__, link->link_index, link->hpd_status, - link->dpia_bw_alloc_config.allocated_bw, req_bw); + link->dpia_bw_alloc_config.allocated_bw, + link->dpia_bw_alloc_config.estimated_bw, + req_bw); if (link_dp_is_bw_alloc_available(link)) link_dpia_send_bw_alloc_request(link, req_bw); @@ -384,73 +326,116 @@ void link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int r DC_LOG_DEBUG("%s: BW Allocation mode not available", __func__); } -bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed_per_dpia, const unsigned int num_dpias) +uint32_t link_dpia_get_dp_overhead(const struct dc_link *link) { - bool ret = true; - int bw_needed_per_hr[MAX_HR_NUM] = { 0, 0 }, host_router_total_dp_bw = 0; - uint8_t lowest_dpia_index, i, hr_index; + uint32_t link_dp_overhead = 0; + + if ((link->type == dc_connection_mst_branch) && + !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) { + /* For 8b/10b encoding: MTP is 64 time slots long, slot 0 is used for MTPH + * MST overhead is 1/64 of link bandwidth (excluding any overhead) + */ + const struct dc_link_settings *link_cap = dc_link_get_link_cap(link); + + if (link_cap) { + uint32_t link_bw_in_kbps = (uint32_t)link_cap->link_rate * + (uint32_t)link_cap->lane_count * + LINK_RATE_REF_FREQ_IN_KHZ * 8; + link_dp_overhead = (link_bw_in_kbps / MST_TIME_SLOT_COUNT) + + ((link_bw_in_kbps % MST_TIME_SLOT_COUNT) ? 1 : 0); + } + } - if (!num_dpias || num_dpias > MAX_DPIA_NUM) - return ret; + return link_dp_overhead; +} - lowest_dpia_index = get_lowest_dpia_index(link[0]); +/* + * Aggregates the DPIA bandwidth usage for the respective USB4 Router. + * And then validate if the required bandwidth is within the router's capacity. + * + * @dc_validation_dpia_set: pointer to the dc_validation_dpia_set + * @count: number of DPIA validation sets + * + * return: true if validation is succeeded + */ +bool link_dpia_validate_dp_tunnel_bandwidth(const struct dc_validation_dpia_set *dpia_link_sets, uint8_t count) +{ + uint32_t granularity_Gbps; + const struct dc_link *link; + uint32_t link_bw_granularity; + uint32_t link_required_bw; + struct usb4_router_validation_set router_sets[MAX_HOST_ROUTERS_NUM] = { 0 }; + uint8_t i; + bool is_success = true; + uint8_t router_count = 0; - /* get total Host Router BW with granularity for the given modes */ - for (i = 0; i < num_dpias; ++i) { - int granularity_Gbps = 0; - int bw_granularity = 0; + if ((dpia_link_sets == NULL) || (count == 0)) + return is_success; - if (!link[i]->dpia_bw_alloc_config.bw_alloc_enabled) - continue; + // Iterate through each DP tunneling link (DPIA). + // Aggregate its bandwidth requirements onto the respective USB4 router. + for (i = 0; i < count; i++) { + link = dpia_link_sets[i].link; + link_required_bw = dpia_link_sets[i].required_bw; + const struct dc_tunnel_settings *dp_tunnel_settings = dpia_link_sets[i].tunnel_settings; - if (link[i]->link_index < lowest_dpia_index) - continue; + if ((link == NULL) || (dp_tunnel_settings == NULL) || dp_tunnel_settings->bw_granularity == 0) + break; - granularity_Gbps = (Kbps_TO_Gbps / link[i]->dpia_bw_alloc_config.bw_granularity); - bw_granularity = (bw_needed_per_dpia[i] / granularity_Gbps) * granularity_Gbps + - ((bw_needed_per_dpia[i] % granularity_Gbps) ? granularity_Gbps : 0); + if (link->type == dc_connection_mst_branch) + link_required_bw += link_dpia_get_dp_overhead(link); - hr_index = (link[i]->link_index - lowest_dpia_index) / 2; - bw_needed_per_hr[hr_index] += bw_granularity; - } + granularity_Gbps = (Kbps_TO_Gbps / dp_tunnel_settings->bw_granularity); + link_bw_granularity = (link_required_bw / granularity_Gbps) * granularity_Gbps + + ((link_required_bw % granularity_Gbps) ? granularity_Gbps : 0); - /* validate against each Host Router max BW */ - for (hr_index = 0; hr_index < MAX_HR_NUM; ++hr_index) { - if (bw_needed_per_hr[hr_index]) { - host_router_total_dp_bw = get_host_router_total_dp_tunnel_bw(link[0]->dc, hr_index); - if (bw_needed_per_hr[hr_index] > host_router_total_dp_bw) { - ret = false; + // Find or add the USB4 router associated with the current DPIA link + for (uint8_t j = 0; j < MAX_HOST_ROUTERS_NUM; j++) { + if (router_sets[j].is_valid == false) { + router_sets[j].is_valid = true; + router_sets[j].cm_id = dp_tunnel_settings->cm_id; + router_count++; + } + + if (router_sets[j].cm_id == dp_tunnel_settings->cm_id) { + uint32_t remaining_bw = + dp_tunnel_settings->estimated_bw - dp_tunnel_settings->allocated_bw; + + router_sets[j].allocated_bw += dp_tunnel_settings->allocated_bw; + + if (remaining_bw > router_sets[j].remaining_bw) + router_sets[j].remaining_bw = remaining_bw; + + // Get the max estimated BW within the same CM_ID + if (dp_tunnel_settings->estimated_bw > router_sets[j].estimated_bw) + router_sets[j].estimated_bw = dp_tunnel_settings->estimated_bw; + + router_sets[j].required_bw += link_bw_granularity; + router_sets[j].dpia_count++; break; } } } - return ret; -} + // Validate bandwidth for each unique router found. + for (i = 0; i < router_count; i++) { + uint32_t total_bw = 0; -int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link) -{ - int dp_overhead = 0, link_mst_overhead = 0; + if (router_sets[i].is_valid == false) + break; - if (!link_dp_is_bw_alloc_available(link)) - return dp_overhead; + // Determine the total available bandwidth for the current router based on aggregated data + if ((router_sets[i].dpia_count == 1) || (router_sets[i].allocated_bw == 0)) + total_bw = router_sets[i].estimated_bw; + else + total_bw = router_sets[i].allocated_bw + router_sets[i].remaining_bw; - /* if its mst link, add MTPH overhead */ - if ((link->type == dc_connection_mst_branch) && - !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) { - /* For 8b/10b encoding: MTP is 64 time slots long, slot 0 is used for MTPH - * MST overhead is 1/64 of link bandwidth (excluding any overhead) - */ - const struct dc_link_settings *link_cap = - dc_link_get_link_cap(link); - uint32_t link_bw_in_kbps = (uint32_t)link_cap->link_rate * - (uint32_t)link_cap->lane_count * - LINK_RATE_REF_FREQ_IN_KHZ * 8; - link_mst_overhead = (link_bw_in_kbps / 64) + ((link_bw_in_kbps % 64) ? 1 : 0); + if (router_sets[i].required_bw > total_bw) { + is_success = false; + break; + } } - /* add all the overheads */ - dp_overhead = link_mst_overhead; - - return dp_overhead; + return is_success; } + diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h index 801965b5f9a4..41efcb3e44e2 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_dpia_bw.h @@ -28,10 +28,6 @@ #include "link.h" -/* Number of Host Routers per motherboard is 2 */ -#define MAX_HR_NUM 2 -/* Number of DPIA per host router is 2 */ -#define MAX_DPIA_NUM (MAX_HR_NUM * 2) /* * Host Router BW type @@ -42,6 +38,16 @@ enum bw_type { HOST_ROUTER_BW_INVALID, }; +struct usb4_router_validation_set { + bool is_valid; + uint8_t cm_id; + uint8_t dpia_count; + uint32_t required_bw; + uint32_t allocated_bw; + uint32_t estimated_bw; + uint32_t remaining_bw; +}; + /* * Enable USB4 DP BW allocation mode * @@ -74,25 +80,13 @@ void link_dp_dpia_allocate_usb4_bandwidth_for_stream(struct dc_link *link, int r void dpia_handle_usb4_bandwidth_allocation_for_link(struct dc_link *link, int peak_bw); /* - * Handle the validation of total BW here and confirm that the bw used by each - * DPIA doesn't exceed available BW for each host router (HR) - * - * @link[]: array of link pointer to all possible DPIA links - * @bw_needed[]: bw needed for each DPIA link based on timing - * @num_dpias: Number of DPIAs for the above 2 arrays. Should always be <= MAX_DPIA_NUM - * - * return: TRUE if bw used by DPIAs doesn't exceed available BW else return FALSE - */ -bool dpia_validate_usb4_bw(struct dc_link **link, int *bw_needed, const unsigned int num_dpias); - -/* * Obtain all the DP overheads in dp tunneling for the dpia link * * @link: pointer to the dc_link struct instance * * return: DP overheads in DP tunneling */ -int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link); +uint32_t link_dpia_get_dp_overhead(const struct dc_link *link); /* * Handle DP BW allocation status register @@ -104,4 +98,15 @@ int link_dp_dpia_get_dp_overhead_in_dp_tunneling(struct dc_link *link); */ void link_dp_dpia_handle_bw_alloc_status(struct dc_link *link, uint8_t status); +/* + * Aggregates the DPIA bandwidth usage for the respective USB4 Router. + * + * @dc_validation_dpia_set: pointer to the dc_validation_dpia_set + * @count: number of DPIA validation sets + * + * return: true if validation is succeeded + */ +bool link_dpia_validate_dp_tunnel_bandwidth(const struct dc_validation_dpia_set *dpia_link_sets, uint8_t count); + #endif /* DC_INC_LINK_DP_DPIA_BW_H_ */ + diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index da74c2b5854f..98ec9b5a559c 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -161,6 +161,9 @@ bool edp_set_backlight_level_nits(struct dc_link *link, link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; + if (link->is_dds && !link->dpcd_caps.panel_luminance_control) + return true; + // use internal backlight control if dmub capabilities are not present if (link->backlight_control_type == BACKLIGHT_CONTROL_VESA_AUX && !link->dc->caps.dmub_caps.aux_backlight_support) { @@ -173,6 +176,15 @@ bool edp_set_backlight_level_nits(struct dc_link *link, target_luminance = (struct target_luminance_value *)&backlight_millinits; + //make sure we disable AMD ABC first. + core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL, + &backlight_enable, sizeof(uint8_t)); + if (backlight_enable) { + backlight_enable = 0; + core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL, + &backlight_enable, 1); + } + core_link_read_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &backlight_enable, sizeof(uint8_t)); @@ -193,10 +205,22 @@ bool edp_set_backlight_level_nits(struct dc_link *link, *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms; uint8_t backlight_control = isHDR ? 1 : 0; + uint8_t backlight_enable = 0; + // OLEDs have no PWM, they can only use AUX if (link->dpcd_sink_ext_caps.bits.oled == 1) backlight_control = 1; + //make sure we disable VESA ABC first. + core_link_read_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, + &backlight_enable, sizeof(uint8_t)); + + if (backlight_enable & DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE) { + backlight_enable &= ~DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE; + core_link_write_dpcd(link, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, + &backlight_enable, sizeof(backlight_enable)); + } + if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL, (uint8_t *)(&dpcd_backlight_set), sizeof(dpcd_backlight_set)) != DC_OK) @@ -222,6 +246,8 @@ bool edp_get_backlight_level_nits(struct dc_link *link, link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; + if (link->is_dds) + return false; if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK, dpcd_backlight_get.raw, sizeof(union dpcd_source_backlight_get))) @@ -248,6 +274,8 @@ bool edp_backlight_enable_aux(struct dc_link *link, bool enable) link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; + if (link->is_dds) + return true; if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_ENABLE, &backlight_enable, 1) != DC_OK) return false; @@ -1173,6 +1201,16 @@ int edp_get_target_backlight_pwm(const struct dc_link *link) return (int) abm->funcs->get_target_backlight(abm); } +bool is_smartmux_suported(struct dc_link *link) +{ + if (link->dc->caps.is_apu) + return false; + if (!link->dc->config.smart_mux_version) + return false; + + return true; +} + static void edp_set_assr_enable(const struct dc *pDC, struct dc_link *link, struct link_resource *link_res, bool enable) { diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h index bcfa6ac5d4e7..4a475d5b9dde 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h @@ -30,6 +30,7 @@ enum dp_panel_mode dp_get_panel_mode(struct dc_link *link); void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode); bool set_default_brightness_aux(struct dc_link *link); +bool is_smartmux_suported(struct dc_link *link); void edp_panel_backlight_power_on(struct dc_link *link, bool wait_for_hpd); int edp_get_backlight_level(const struct dc_link *link); bool edp_get_backlight_level_nits(struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/mpc/Makefile b/drivers/gpu/drm/amd/display/dc/mpc/Makefile index 1e2e66508192..5402c3529f5e 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/mpc/Makefile @@ -68,5 +68,5 @@ MPC_DCN401 = dcn401_mpc.o AMD_DAL_MPC_DCN401 = $(addprefix $(AMDDALPATH)/dc/mpc/dcn401/,$(MPC_DCN401)) AMD_DISPLAY_FILES += $(AMD_DAL_MPC_DCN401) -endif +endif diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c index b4cea2b8cb2a..6f0e017a8ae2 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.c @@ -30,7 +30,6 @@ #include "basics/conversion.h" #include "dcn10/dcn10_cm_common.h" #include "dc.h" -#include "dcn401/dcn401_mpc.h" #define REG(reg)\ mpc30->mpc_regs->reg @@ -879,7 +878,7 @@ void mpc32_set3dlut_ram10( } -static void mpc32_set_3dlut_mode( +void mpc32_set_3dlut_mode( struct mpc *mpc, enum dc_lut_mode mode, bool is_color_channel_12bits, @@ -1022,8 +1021,6 @@ static const struct mpc_funcs dcn32_mpc_funcs = { .power_on_mpc_mem_pwr = mpc3_power_on_ogam_lut, .get_mpc_out_mux = mpc1_get_mpc_out_mux, .set_bg_color = mpc1_set_bg_color, - .set_movable_cm_location = mpc401_set_movable_cm_location, - .populate_lut = mpc401_populate_lut, }; diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h index 9622518826c9..8c9b20bcca85 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn32/dcn32_mpc.h @@ -391,4 +391,12 @@ void mpc32_select_3dlut_ram( enum dc_lut_mode mode, bool is_color_channel_12bits, uint32_t mpcc_id); + +void mpc32_set_3dlut_mode( + struct mpc *mpc, + enum dc_lut_mode mode, + bool is_color_channel_12bits, + bool is_lut_size17x17x17, + uint32_t mpcc_id); + #endif //__DC_MPCC_DCN32_H__ diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c index 98cf0cbd59ba..f3fb3fe13757 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c @@ -294,7 +294,7 @@ void mpc401_program_3dlut_size(struct mpc *mpc, bool is_17x17x17, int mpcc_id) REG_UPDATE(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_SIZE, is_17x17x17 ? 0 : 1); } -static void program_gamut_remap( +void mpc_program_gamut_remap( struct mpc *mpc, unsigned int mpcc_id, const uint16_t *regval, @@ -426,7 +426,7 @@ void mpc401_set_gamut_remap( if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) { /* Bypass / Disable if type is bypass or hw */ - program_gamut_remap(mpc, mpcc_id, NULL, + mpc_program_gamut_remap(mpc, mpcc_id, NULL, adjust->mpcc_gamut_remap_block_id, MPCC_GAMUT_REMAP_MODE_SELECT_0); } else { struct fixed31_32 arr_matrix[12]; @@ -460,12 +460,12 @@ void mpc401_set_gamut_remap( else mode_select = MPCC_GAMUT_REMAP_MODE_SELECT_2; - program_gamut_remap(mpc, mpcc_id, arr_reg_val, + mpc_program_gamut_remap(mpc, mpcc_id, arr_reg_val, adjust->mpcc_gamut_remap_block_id, mode_select); } } -static void read_gamut_remap(struct mpc *mpc, +void mpc_read_gamut_remap(struct mpc *mpc, int mpcc_id, uint16_t *regval, enum mpcc_gamut_remap_id gamut_remap_block_id, @@ -561,9 +561,9 @@ void mpc401_get_gamut_remap(struct mpc *mpc, struct mpc_grph_gamut_adjustment *adjust) { uint16_t arr_reg_val[12] = {0}; - uint32_t mode_select; + uint32_t mode_select = MPCC_GAMUT_REMAP_MODE_SELECT_0; - read_gamut_remap(mpc, mpcc_id, arr_reg_val, adjust->mpcc_gamut_remap_block_id, &mode_select); + mpc_read_gamut_remap(mpc, mpcc_id, arr_reg_val, adjust->mpcc_gamut_remap_block_id, &mode_select); if (mode_select == MPCC_GAMUT_REMAP_MODE_SELECT_0) { adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h index 8e35ebc603a9..eb0c68d0b0c7 100644 --- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h +++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h @@ -241,6 +241,19 @@ void mpc401_update_3dlut_fast_load_select( int mpcc_id, int hubp_idx); +void mpc_program_gamut_remap( + struct mpc *mpc, + unsigned int mpcc_id, + const uint16_t *regval, + enum mpcc_gamut_remap_id gamut_remap_block_id, + enum mpcc_gamut_remap_mode_select mode_select); + +void mpc_read_gamut_remap(struct mpc *mpc, + int mpcc_id, + uint16_t *regval, + enum mpcc_gamut_remap_id gamut_remap_block_id, + uint32_t *mode_select); + void mpc401_update_3dlut_fast_load_select( struct mpc *mpc, int mpcc_id, diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h index f2ba76c1e0c0..782316348941 100644 --- a/drivers/gpu/drm/amd/display/dc/os_types.h +++ b/drivers/gpu/drm/amd/display/dc/os_types.h @@ -31,6 +31,7 @@ #include <linux/kgdb.h> #include <linux/delay.h> #include <linux/mm.h> +#include <linux/vmalloc.h> #include <asm/byteorder.h> diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c index 84f73fdb0f95..3a51be63f020 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce100/dce100_resource.c @@ -839,7 +839,7 @@ static enum dc_status build_mapped_resource( static enum dc_status dce100_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i; bool at_least_one_pipe = false; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c index f3d5baac11bf..cccde5a6f3cd 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce110/dce110_resource.c @@ -963,7 +963,7 @@ static enum dc_status build_mapped_resource( static enum dc_status dce110_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool result = false; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c index 4225cae68c10..164ba796f64c 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.c @@ -886,7 +886,7 @@ static enum dc_status build_mapped_resource( enum dc_status dce112_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool result = false; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h index 6221d749246d..3efc4c55d2d2 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dce112/dce112_resource.h @@ -45,7 +45,7 @@ enum dc_status dce112_validate_with_context( enum dc_status dce112_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); enum dc_status dce112_add_stream_to_ctx( struct dc *dc, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c index d9ffdded5ce1..53b60044653f 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c @@ -373,7 +373,7 @@ static const struct resource_caps res_cap = { .num_timing_generator = 6, .num_audio = 6, .num_stream_encoder = 6, - .num_pll = 2, + .num_pll = 3, .num_ddc = 6, }; @@ -389,7 +389,7 @@ static const struct resource_caps res_cap_64 = { .num_timing_generator = 2, .num_audio = 2, .num_stream_encoder = 2, - .num_pll = 2, + .num_pll = 3, .num_ddc = 2, }; @@ -866,7 +866,7 @@ static void dce60_resource_destruct(struct dce110_resource_pool *pool) static enum dc_status dce60_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i; bool at_least_one_pipe = false; @@ -973,21 +973,24 @@ static bool dce60_construct( if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { pool->base.dp_clock_source = - dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); + /* DCE 6.0 and 6.4: PLL0 can only be used with DP. Don't initialize it here. */ pool->base.clock_sources[0] = - dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], false); + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[1] = - dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); pool->base.clk_src_count = 2; } else { pool->base.dp_clock_source = - dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true); + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true); pool->base.clock_sources[0] = - dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); - pool->base.clk_src_count = 1; + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); + pool->base.clock_sources[1] = + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); + pool->base.clk_src_count = 2; } if (pool->base.dp_clock_source == NULL) { @@ -1365,21 +1368,24 @@ static bool dce64_construct( if (bp->fw_info_valid && bp->fw_info.external_clock_source_frequency_for_dp != 0) { pool->base.dp_clock_source = - dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_EXTERNAL, NULL, true); + /* DCE 6.0 and 6.4: PLL0 can only be used with DP. Don't initialize it here. */ pool->base.clock_sources[0] = - dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], false); + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); pool->base.clock_sources[1] = - dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false); + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); pool->base.clk_src_count = 2; } else { pool->base.dp_clock_source = - dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[0], true); + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL0, &clk_src_regs[0], true); pool->base.clock_sources[0] = - dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[1], false); - pool->base.clk_src_count = 1; + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL1, &clk_src_regs[1], false); + pool->base.clock_sources[1] = + dce60_clock_source_create(ctx, bp, CLOCK_SOURCE_ID_PLL2, &clk_src_regs[2], false); + pool->base.clk_src_count = 2; } if (pool->base.dp_clock_source == NULL) { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c index bd5811f97531..3e8b0ac11d90 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dce80/dce80_resource.c @@ -872,7 +872,7 @@ static void dce80_resource_destruct(struct dce110_resource_pool *pool) static enum dc_status dce80_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i; bool at_least_one_pipe = false; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c index be4ade0853e9..652c05c35494 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn10/dcn10_resource.c @@ -1129,12 +1129,12 @@ static void dcn10_destroy_resource_pool(struct resource_pool **pool) static enum dc_status dcn10_validate_bandwidth( struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool voltage_supported; DC_FP_START(); - voltage_supported = dcn_validate_bandwidth(dc, context, fast_validate); + voltage_supported = dcn_validate_bandwidth(dc, context, validate_mode); DC_FP_END(); return voltage_supported ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c index 3405be07f5e3..f9cbdad3ef37 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c @@ -2007,7 +2007,7 @@ bool dcn20_fast_validate_bw( int *pipe_cnt_out, int *pipe_split_from, int *vlevel_out, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; int split[MAX_PIPES] = { 0 }; @@ -2021,7 +2021,7 @@ bool dcn20_fast_validate_bw( dcn20_merge_pipes_for_validate(dc, context); DC_FP_START(); - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode); DC_FP_END(); *pipe_cnt_out = pipe_cnt; @@ -2125,7 +2125,7 @@ validate_out: } enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool voltage_supported; display_e2e_pipe_params_st *pipes; @@ -2135,7 +2135,7 @@ enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, return DC_FAIL_BANDWIDTH_VALIDATE; DC_FP_START(); - voltage_supported = dcn20_validate_bandwidth_fp(dc, context, fast_validate, pipes); + voltage_supported = dcn20_validate_bandwidth_fp(dc, context, validate_mode, pipes); DC_FP_END(); kfree(pipes); @@ -2736,6 +2736,8 @@ static bool dcn20_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 2; + dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h index c0e062c7407d..e997d35a8b86 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.h @@ -119,7 +119,7 @@ void dcn20_set_mcif_arb_params( struct dc_state *context, display_e2e_pipe_params_st *pipes, int pipe_cnt); -enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate); +enum dc_status dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, enum dc_validate_mode validate_mode); void dcn20_merge_pipes_for_validate( struct dc *dc, struct dc_state *context); @@ -158,7 +158,7 @@ bool dcn20_fast_validate_bw( int *pipe_cnt_out, int *pipe_split_from, int *vlevel_out, - bool fast_validate); + enum dc_validate_mode validate_mode); enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state *context, struct dc_stream_state *stream); enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream); diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c index 43fa2cb117f3..e4a1338d21e0 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn201/dcn201_resource.c @@ -1285,6 +1285,8 @@ static bool dcn201_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 2; + dc->cap_funcs = cap_funcs; return true; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c index 9ab01b65b177..918742a42ded 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.c @@ -769,7 +769,7 @@ bool dcn21_fast_validate_bw(struct dc *dc, int *pipe_cnt_out, int *pipe_split_from, int *vlevel_out, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; int split[MAX_PIPES] = { 0 }; @@ -783,7 +783,7 @@ bool dcn21_fast_validate_bw(struct dc *dc, dcn20_merge_pipes_for_validate(dc, context); DC_FP_START(); - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode); DC_FP_END(); *pipe_cnt_out = pipe_cnt; @@ -924,7 +924,7 @@ validate_out: * dcn20_validate_bandwidth in dcn20_resource.c. */ static enum dc_status dcn21_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool voltage_supported; display_e2e_pipe_params_st *pipes; @@ -934,7 +934,7 @@ static enum dc_status dcn21_validate_bandwidth(struct dc *dc, struct dc_state *c return DC_FAIL_BANDWIDTH_VALIDATE; DC_FP_START(); - voltage_supported = dcn21_validate_bandwidth_fp(dc, context, fast_validate, pipes); + voltage_supported = dcn21_validate_bandwidth_fp(dc, context, validate_mode, pipes); DC_FP_END(); kfree(pipes); @@ -1684,6 +1684,8 @@ static bool dcn21_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 2; + dc->cap_funcs = cap_funcs; return true; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h index f7ecc002c2f7..a017fd9854d1 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn21/dcn21_resource.h @@ -51,6 +51,6 @@ bool dcn21_fast_validate_bw( int *pipe_cnt_out, int *pipe_split_from, int *vlevel_out, - bool fast_validate); + enum dc_validate_mode validate_mode); #endif /* _DCN21_RESOURCE_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c index f631ae34e320..895349d9ca07 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c @@ -1319,13 +1319,13 @@ static struct clock_source *dcn30_clock_source_create( int dcn30_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; DC_FP_START(); - dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); DC_FP_END(); for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { @@ -1627,7 +1627,7 @@ noinline bool dcn30_internal_validate_bw( display_e2e_pipe_params_st *pipes, int *pipe_cnt_out, int *vlevel_out, - bool fast_validate, + enum dc_validate_mode validate_mode, bool allow_self_refresh_only) { bool out = false; @@ -1646,7 +1646,7 @@ noinline bool dcn30_internal_validate_bw( context->bw_ctx.dml.vba.VoltageLevel = 0; context->bw_ctx.dml.vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive; dc->res_pool->funcs->update_soc_for_wm_a(dc, context); - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode); if (!pipe_cnt) { out = true; @@ -1655,7 +1655,7 @@ noinline bool dcn30_internal_validate_bw( dml_log_pipe_params(&context->bw_ctx.dml, pipes, pipe_cnt); - if (!fast_validate || !allow_self_refresh_only) { + if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING || !allow_self_refresh_only) { /* * DML favors voltage over p-state, but we're more interested in * supporting p-state over voltage. We can't support p-state in @@ -1669,7 +1669,7 @@ noinline bool dcn30_internal_validate_bw( vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, merge); } if (allow_self_refresh_only && - (fast_validate || vlevel == context->bw_ctx.dml.soc.num_states || + (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING || vlevel == context->bw_ctx.dml.soc.num_states || vba->DRAMClockChangeSupport[vlevel][vba->maxMpcComb] == dm_dram_clock_change_unsupported)) { /* * If mode is unsupported or there's still no p-state support @@ -1678,7 +1678,7 @@ noinline bool dcn30_internal_validate_bw( * We don't actually support prefetch mode 2, so require that we * at least support prefetch mode 1. */ - context->bw_ctx.dml.validate_max_state = fast_validate; + context->bw_ctx.dml.validate_max_state = (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING); context->bw_ctx.dml.soc.allow_dram_self_refresh_or_dram_clock_change_in_vblank = dm_allow_self_refresh; @@ -1865,7 +1865,7 @@ noinline bool dcn30_internal_validate_bw( } if (repopulate_pipes) - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, validate_mode); context->bw_ctx.dml.vba.VoltageLevel = vlevel; *vlevel_out = vlevel; *pipe_cnt_out = pipe_cnt; @@ -2037,7 +2037,7 @@ void dcn30_calculate_wm_and_dlg( enum dc_status dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; @@ -2055,7 +2055,7 @@ enum dc_status dcn30_validate_bandwidth(struct dc *dc, goto validate_fail; DC_FP_START(); - out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true); + out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode, true); DC_FP_END(); if (pipe_cnt == 0) @@ -2066,7 +2066,7 @@ enum dc_status dcn30_validate_bandwidth(struct dc *dc, BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - if (fast_validate) { + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) { BW_VAL_TRACE_SKIP(fast); goto validate_out; } @@ -2586,6 +2586,8 @@ static bool dcn30_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h index 689d9bdace81..2c967fe55712 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.h @@ -57,14 +57,14 @@ unsigned int dcn30_calc_max_scaled_time( unsigned int urgent_watermark); enum dc_status dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); bool dcn30_internal_validate_bw( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int *pipe_cnt_out, int *vlevel_out, - bool fast_validate, + enum dc_validate_mode validate_mode, bool allow_self_refresh_only); void dcn30_calculate_wm_and_dlg( struct dc *dc, struct dc_state *context, @@ -78,7 +78,7 @@ void dcn30_populate_dml_writeback_from_context( int dcn30_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); bool dcn30_acquire_post_bldn_3dlut( struct resource_context *res_ctx, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c index 121a86a59833..82a205a7c25c 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn301/dcn301_resource.c @@ -1706,6 +1706,8 @@ static bool dcn301_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; return true; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c index 012c5fd52cb1..3345068a878c 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c @@ -1481,6 +1481,8 @@ static bool dcn302_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c index a8d0b4686f9a..3479e1eab4cd 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c @@ -1414,6 +1414,8 @@ static bool dcn303_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c index 7e0af5297dc4..3ed7f50554e2 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c @@ -1616,14 +1616,14 @@ static bool is_dual_plane(enum surface_pixel_format format) int dcn31x_populate_dml_pipes_from_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { uint32_t pipe_cnt; int i; dc_assert_fp_enabled(); - pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); for (i = 0; i < pipe_cnt; i++) { pipes[i].pipe.src.gpuvm = 1; @@ -1641,7 +1641,7 @@ int dcn31x_populate_dml_pipes_from_context(struct dc *dc, int dcn31_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; @@ -1649,7 +1649,7 @@ int dcn31_populate_dml_pipes_from_context( bool upscaled = false; DC_FP_START(); - dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); DC_FP_END(); for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { @@ -1760,7 +1760,7 @@ dcn31_set_mcif_arb_params(struct dc *dc, enum dc_status dcn31_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; @@ -1778,19 +1778,19 @@ enum dc_status dcn31_validate_bandwidth(struct dc *dc, goto validate_fail; DC_FP_START(); - out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, true); + out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode, true); DC_FP_END(); - // Disable fast_validate to set min dcfclk in calculate_wm_and_dlg + // Disable DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX to set min dcfclk in calculate_wm_and_dlg if (pipe_cnt == 0) - fast_validate = false; + validate_mode = DC_VALIDATE_MODE_AND_PROGRAMMING; if (!out) goto validate_fail; BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - if (fast_validate) { + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) { BW_VAL_TRACE_SKIP(fast); goto validate_out; } @@ -1850,7 +1850,9 @@ static struct resource_funcs dcn31_res_pool_funcs = { .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .get_panel_config_defaults = dcn31_get_panel_config_defaults, .get_det_buffer_size = dcn31_get_det_buffer_size, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params }; static struct clock_source *dcn30_clock_source_create( @@ -1954,6 +1956,9 @@ static bool dcn31_resource_construct( dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.num_of_host_routers = 2; + dc->caps.num_of_dpias_per_host_router = 2; + /* Use pipe context based otg sync logic */ dc->config.use_pipe_ctx_sync_logic = true; dc->config.disable_hbr_audio_dp2 = true; @@ -2199,6 +2204,8 @@ static bool dcn31_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; dc->dcn_ip->max_num_dpp = dcn3_1_ip.max_num_dpp; @@ -2228,3 +2235,35 @@ struct resource_pool *dcn31_create_resource_pool( kfree(pool); return NULL; } + +enum dc_status dcn31_update_dc_state_for_encoder_switch(struct dc_link *link, + struct dc_link_settings *link_setting, + uint8_t pipe_count, + struct pipe_ctx *pipes, + struct audio_output *audio_output) +{ + struct dc_state *state = link->dc->current_state; + int i; + +#if defined(CONFIG_DRM_AMD_DC_FP) + for (i = 0; i < state->stream_count; i++) + if (state->streams[i] && state->streams[i]->link && state->streams[i]->link == link) + link->dc->hwss.calculate_pix_rate_divider((struct dc *)link->dc, state, state->streams[i]); + + for (i = 0; i < pipe_count; i++) { + link->dc->res_pool->funcs->build_pipe_pix_clk_params(&pipes[i]); + + // Setup audio + if (pipes[i].stream_res.audio != NULL) + build_audio_output(state, &pipes[i], &audio_output[i]); + } +#else + /* This DCN requires rate divider updates and audio reprogramming to allow DP1<-->DP2 link rate switching, + * but the above will not compile on architectures without an FPU. + */ + DC_LOG_WARNING("%s: DP1<-->DP2 link retraining will not work on this DCN on non-FPU platforms", __func__); + ASSERT(0); +#endif + + return DC_OK; +} diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h index dd82815d7efe..c32c85ef0ba4 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h @@ -39,7 +39,7 @@ struct dcn31_resource_pool { enum dc_status dcn31_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn31_calculate_wm_and_dlg( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, @@ -48,7 +48,7 @@ void dcn31_calculate_wm_and_dlg( int dcn31_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn31_populate_dml_writeback_from_context(struct dc *dc, struct resource_context *res_ctx, @@ -66,6 +66,12 @@ struct resource_pool *dcn31_create_resource_pool( unsigned int dcn31_get_det_buffer_size( const struct dc_state *context); +enum dc_status dcn31_update_dc_state_for_encoder_switch(struct dc_link *link, + struct dc_link_settings *link_setting, + uint8_t pipe_count, + struct pipe_ctx *pipes, + struct audio_output *audio_output); + /*temp: B0 specific before switch to dcn313 headers*/ #ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL #define regPHYPLLF_PIXCLK_RESYNC_CNTL 0x007e diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c index d96bc6cb73ad..663c49cce4aa 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.c @@ -926,6 +926,7 @@ static const struct dc_debug_options debug_defaults_drv = { .seamless_boot_odm_combine = true, .enable_legacy_fast_update = true, .using_dml2 = false, + .disable_dsc_power_gate = true, }; static const struct dc_panel_config panel_config_defaults = { @@ -1667,12 +1668,12 @@ static struct clock_source *dcn31_clock_source_create( static int dcn314_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int pipe_cnt; DC_FP_START(); - pipe_cnt = dcn314_populate_dml_pipes_from_context_fpu(dc, context, pipes, fast_validate); + pipe_cnt = dcn314_populate_dml_pipes_from_context_fpu(dc, context, pipes, validate_mode); DC_FP_END(); return pipe_cnt; @@ -1696,7 +1697,7 @@ static void dcn314_get_panel_config_defaults(struct dc_panel_config *panel_confi enum dc_status dcn314_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; @@ -1715,19 +1716,19 @@ enum dc_status dcn314_validate_bandwidth(struct dc *dc, DC_FP_START(); // do not support self refresh only - out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate, false); + out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode, false); DC_FP_END(); - // Disable fast_validate to set min dcfclk in calculate_wm_and_dlg + // Disable DC_VALIDATE_MODE_ONLY and DC_VALIDATE_MODE_AND_STATE_INDEX to set min dcfclk in calculate_wm_and_dlg if (pipe_cnt == 0) - fast_validate = false; + validate_mode = DC_VALIDATE_MODE_AND_PROGRAMMING; if (!out) goto validate_fail; BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - if (fast_validate) { + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) { BW_VAL_TRACE_SKIP(fast); goto validate_out; } @@ -1779,7 +1780,9 @@ static struct resource_funcs dcn314_res_pool_funcs = { .get_panel_config_defaults = dcn314_get_panel_config_defaults, .get_preferred_eng_id_dpia = dcn314_get_preferred_eng_id_dpia, .get_det_buffer_size = dcn31_get_det_buffer_size, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params }; static struct clock_source *dcn30_clock_source_create( @@ -1885,6 +1888,9 @@ static bool dcn314_resource_construct( dc->caps.max_disp_clock_khz_at_vmin = 650000; + dc->caps.num_of_host_routers = 2; + dc->caps.num_of_dpias_per_host_router = 2; + /* Use pipe context based otg sync logic */ dc->config.use_pipe_ctx_sync_logic = true; @@ -2114,6 +2120,8 @@ static bool dcn314_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; dc->dcn_ip->max_num_dpp = dcn3_14_ip.max_num_dpp; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h index f8ba531d6342..ac9bb7f097d5 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn314/dcn314_resource.h @@ -41,7 +41,7 @@ struct dcn314_resource_pool { enum dc_status dcn314_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); struct resource_pool *dcn314_create_resource_pool( const struct dc_init_data *init_data, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c index 6c2bb3f63be1..82cc78c291d8 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn315/dcn315_resource.c @@ -1664,7 +1664,7 @@ static bool allow_pixel_rate_crb(struct dc *dc, struct dc_state *context) static int dcn315_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt, crb_idx, crb_pipes; struct resource_context *res_ctx = &context->res_ctx; @@ -1674,7 +1674,7 @@ static int dcn315_populate_dml_pipes_from_context( bool pixel_rate_crb = allow_pixel_rate_crb(dc, context); DC_FP_START(); - dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); DC_FP_END(); for (i = 0, pipe_cnt = 0, crb_pipes = 0; i < dc->res_pool->pipe_count; i++) { @@ -1844,7 +1844,9 @@ static struct resource_funcs dcn315_res_pool_funcs = { .get_panel_config_defaults = dcn315_get_panel_config_defaults, .get_power_profile = dcn315_get_power_profile, .get_det_buffer_size = dcn31_get_det_buffer_size, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params }; static bool dcn315_resource_construct( @@ -2140,6 +2142,8 @@ static bool dcn315_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; dc->dcn_ip->max_num_dpp = dcn3_15_ip.max_num_dpp; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c index 568094827212..636110e48d01 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn316/dcn316_resource.c @@ -1610,7 +1610,7 @@ static bool is_dual_plane(enum surface_pixel_format format) static int dcn316_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; @@ -1618,7 +1618,7 @@ static int dcn316_populate_dml_pipes_from_context( const int max_usable_det = context->bw_ctx.dml.ip.config_return_buffer_size_in_kbytes - DCN3_16_MIN_COMPBUF_SIZE_KB; DC_FP_START(); - dcn31x_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + dcn31x_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); DC_FP_END(); for (i = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { @@ -1720,7 +1720,9 @@ static struct resource_funcs dcn316_res_pool_funcs = { .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .get_panel_config_defaults = dcn316_get_panel_config_defaults, .get_det_buffer_size = dcn31_get_det_buffer_size, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params }; static bool dcn316_resource_construct( @@ -2008,6 +2010,8 @@ static bool dcn316_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; dc->dcn_ip->max_num_dpp = dcn3_16_ip.max_num_dpp; diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index bb0dae0be5b8..9917b366f00c 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -1742,7 +1742,7 @@ void dcn32_add_phantom_pipes(struct dc *dc, struct dc_state *context, } } -static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_validate) +static bool dml1_validate(struct dc *dc, struct dc_state *context, enum dc_validate_mode validate_mode) { bool out = false; @@ -1767,7 +1767,7 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val goto validate_fail; DC_FP_START(); - out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate); + out = dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, validate_mode); DC_FP_END(); if (pipe_cnt == 0) @@ -1778,7 +1778,7 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - if (fast_validate) { + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) { BW_VAL_TRACE_SKIP(fast); goto validate_out; } @@ -1809,7 +1809,7 @@ validate_out: enum dc_status dcn32_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { unsigned int i; enum dc_status status; @@ -1827,11 +1827,11 @@ enum dc_status dcn32_validate_bandwidth(struct dc *dc, if (dc->debug.using_dml2) status = dml2_validate(dc, context, context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, - fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; + validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; else - status = dml1_validate(dc, context, fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; + status = dml1_validate(dc, context, validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; - if (!fast_validate && status == DC_OK && dc_state_is_subvp_in_use(context)) { + if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_OK && dc_state_is_subvp_in_use(context)) { /* check new stream configuration still supports cursor if subvp used */ for (i = 0; i < context->stream_count; i++) { stream = context->streams[i]; @@ -1846,14 +1846,14 @@ enum dc_status dcn32_validate_bandwidth(struct dc *dc, }; } - if (!fast_validate && status == DC_FAIL_HW_CURSOR_SUPPORT) { + if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_FAIL_HW_CURSOR_SUPPORT) { /* attempt to validate again with subvp disabled due to cursor */ if (dc->debug.using_dml2) status = dml2_validate(dc, context, context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, - fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; + validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; else - status = dml1_validate(dc, context, fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; + status = dml1_validate(dc, context, validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; } return status; @@ -1862,7 +1862,7 @@ enum dc_status dcn32_validate_bandwidth(struct dc *dc, int dcn32_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate) + enum dc_validate_mode validate_mode) { int i, pipe_cnt; struct resource_context *res_ctx = &context->res_ctx; @@ -1878,7 +1878,7 @@ int dcn32_populate_dml_pipes_from_context( int num_subvp_none = 0; int odm_slice_count; - dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); + dcn20_populate_dml_pipes_from_context(dc, context, pipes, validate_mode); /* For single display subvp, look for subvp main so if we have phantom * pipe, we can set odm policy to match main pipe @@ -1960,7 +1960,7 @@ int dcn32_populate_dml_pipes_from_context( /* Only populate DML input with subvp info for full updates. * This is just a workaround -- needs a proper fix. */ - if (!fast_validate) { + if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING) { switch (dc_state_get_pipe_subvp_type(context, pipe)) { case SUBVP_MAIN: pipes[pipe_cnt].pipe.src.use_mall_for_pstate_change = dm_use_mall_pstate_change_sub_viewport; @@ -2061,21 +2061,15 @@ void dcn32_calculate_wm_and_dlg(struct dc *dc, struct dc_state *context, static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { - struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp; - - memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options)); - DC_FP_START(); dcn32_update_bw_bounding_box_fpu(dc, bw_params); - dml2_opt->use_clock_dc_limits = false; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2) - dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2); + dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2); - dml2_opt->use_clock_dc_limits = true; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source) - dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source); + dml2_reinit(dc, &dc->dml2_dc_power_options, &dc->current_state->bw_ctx.dml2_dc_power_source); DC_FP_END(); } @@ -2257,7 +2251,7 @@ static bool dcn32_resource_construct( dc->caps.color.dpp.gamma_corr = 1; dc->caps.color.dpp.dgam_rom_for_yuv = 0; - dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.hw_3d_lut = 0; dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1 // no OGAM ROM on DCN2 and later ASICs dc->caps.color.dpp.ogam_rom_caps.srgb = 0; @@ -2276,6 +2270,7 @@ static bool dcn32_resource_construct( dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.color.mpc.preblend = true; /* Use pipe context based otg sync logic */ dc->config.use_pipe_ctx_sync_logic = true; @@ -2505,6 +2500,8 @@ static bool dcn32_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { @@ -2519,7 +2516,6 @@ static bool dcn32_resource_construct( } dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; - dc->dml2_options.use_native_pstate_optimization = false; dc->dml2_options.use_native_soc_bb_construction = true; dc->dml2_options.minimize_dispclk_using_odm = true; @@ -2551,6 +2547,10 @@ static bool dcn32_resource_construct( if (ASICREV_IS_GC_11_0_3(dc->ctx->asic_id.hw_internal_rev) && (dc->config.sdpif_request_limit_words_per_umc == 0)) dc->config.sdpif_request_limit_words_per_umc = 16; + /* init DC limited DML2 options */ + memcpy(&dc->dml2_dc_power_options, &dc->dml2_options, sizeof(struct dml2_configuration_options)); + dc->dml2_dc_power_options.use_clock_dc_limits = true; + return true; create_fail: diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h index d60ed77eda80..82f966cf4ed2 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h @@ -100,12 +100,12 @@ void dcn32_add_phantom_pipes(struct dc *dc, enum dc_status dcn32_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); int dcn32_populate_dml_pipes_from_context( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn32_calculate_wm_and_dlg( struct dc *dc, struct dc_state *context, diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c index 7db1f7a5613f..061c0907d802 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c @@ -1580,21 +1580,15 @@ static struct dc_cap_funcs cap_funcs = { static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { - struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp; - - memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options)); - DC_FP_START(); dcn321_update_bw_bounding_box_fpu(dc, bw_params); - dml2_opt->use_clock_dc_limits = false; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2) - dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2); + dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2); - dml2_opt->use_clock_dc_limits = true; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source) - dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source); + dml2_reinit(dc, &dc->dml2_dc_power_options, &dc->current_state->bw_ctx.dml2_dc_power_source); DC_FP_END(); } @@ -1761,8 +1755,8 @@ static bool dcn321_resource_construct( dc->caps.color.dpp.gamma_corr = 1; dc->caps.color.dpp.dgam_rom_for_yuv = 0; - dc->caps.color.dpp.hw_3d_lut = 1; - dc->caps.color.dpp.ogam_ram = 1; + dc->caps.color.dpp.hw_3d_lut = 0; + dc->caps.color.dpp.ogam_ram = 0; // no OGAM ROM on DCN2 and later ASICs dc->caps.color.dpp.ogam_rom_caps.srgb = 0; dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0; @@ -1780,6 +1774,7 @@ static bool dcn321_resource_construct( dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.color.mpc.preblend = true; /* Use pipe context based otg sync logic */ dc->config.use_pipe_ctx_sync_logic = true; @@ -2004,6 +1999,8 @@ static bool dcn321_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { @@ -2018,7 +2015,6 @@ static bool dcn321_resource_construct( } dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; - dc->dml2_options.use_native_pstate_optimization = false; dc->dml2_options.use_native_soc_bb_construction = true; dc->dml2_options.minimize_dispclk_using_odm = true; @@ -2046,6 +2042,10 @@ static bool dcn321_resource_construct( dc->dml2_options.max_segments_per_hubp = 18; dc->dml2_options.det_segment_size = DCN3_2_DET_SEG_SIZE; + /* init DC limited DML2 options */ + memcpy(&dc->dml2_dc_power_options, &dc->dml2_options, sizeof(struct dml2_configuration_options)); + dc->dml2_dc_power_options.use_clock_dc_limits = true; + return true; create_fail: diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c index 72c6cf047db0..8475c6eec547 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c @@ -1734,15 +1734,15 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config static enum dc_status dcn35_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; out = dml2_validate(dc, context, context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, - fast_validate); + validate_mode); - if (fast_validate) + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; DC_FP_START(); @@ -1786,7 +1786,9 @@ static struct resource_funcs dcn35_res_pool_funcs = { .get_panel_config_defaults = dcn35_get_panel_config_defaults, .get_preferred_eng_id_dpia = dcn35_get_preferred_eng_id_dpia, .get_det_buffer_size = dcn31_get_det_buffer_size, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params }; static bool dcn35_resource_construct( @@ -1874,7 +1876,7 @@ static bool dcn35_resource_construct( dc->caps.color.dpp.gamma_corr = 1; dc->caps.color.dpp.dgam_rom_for_yuv = 0; - dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.hw_3d_lut = 0; dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1 // no OGAM ROM on DCN301 dc->caps.color.dpp.ogam_rom_caps.srgb = 0; @@ -1893,6 +1895,13 @@ static bool dcn35_resource_construct( dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.color.mpc.preblend = true; + + dc->caps.num_of_host_routers = 2; + dc->caps.num_of_dpias_per_host_router = 2; + + dc->caps.num_of_host_routers = 2; + dc->caps.num_of_dpias_per_host_router = 2; /* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order * to provide some margin. @@ -2148,12 +2157,13 @@ static bool dcn35_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; dc->dcn_ip->max_num_dpp = pool->base.pipe_count; dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; - dc->dml2_options.use_native_pstate_optimization = true; dc->dml2_options.use_native_soc_bb_construction = true; dc->dml2_options.minimize_dispclk_using_odm = false; if (dc->config.EnableMinDispClkODM) diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index 989a270f7dea..0971c0f74186 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -1714,15 +1714,15 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config static enum dc_status dcn351_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; out = dml2_validate(dc, context, context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, - fast_validate); + validate_mode); - if (fast_validate) + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; DC_FP_START(); @@ -1758,7 +1758,9 @@ static struct resource_funcs dcn351_res_pool_funcs = { .get_panel_config_defaults = dcn35_get_panel_config_defaults, .get_preferred_eng_id_dpia = dcn351_get_preferred_eng_id_dpia, .get_det_buffer_size = dcn31_get_det_buffer_size, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params }; static bool dcn351_resource_construct( @@ -1846,7 +1848,7 @@ static bool dcn351_resource_construct( dc->caps.color.dpp.gamma_corr = 1; dc->caps.color.dpp.dgam_rom_for_yuv = 0; - dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.hw_3d_lut = 0; dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1 // no OGAM ROM on DCN301 dc->caps.color.dpp.ogam_rom_caps.srgb = 0; @@ -1865,6 +1867,13 @@ static bool dcn351_resource_construct( dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.color.mpc.preblend = true; + + dc->caps.num_of_host_routers = 2; + dc->caps.num_of_dpias_per_host_router = 2; + + dc->caps.num_of_host_routers = 2; + dc->caps.num_of_dpias_per_host_router = 2; /* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order * to provide some margin. @@ -2119,13 +2128,14 @@ static bool dcn351_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; dc->dcn_ip->max_num_dpp = pool->base.pipe_count; dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; - dc->dml2_options.use_native_pstate_optimization = true; dc->dml2_options.use_native_soc_bb_construction = true; dc->dml2_options.minimize_dispclk_using_odm = false; if (dc->config.EnableMinDispClkODM) diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c index 48e1f234185f..8bae7fcedc22 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c @@ -1715,15 +1715,15 @@ static void dcn35_get_panel_config_defaults(struct dc_panel_config *panel_config static enum dc_status dcn35_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { bool out = false; out = dml2_validate(dc, context, context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, - fast_validate); + validate_mode); - if (fast_validate) + if (validate_mode != DC_VALIDATE_MODE_AND_PROGRAMMING) return out ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; DC_FP_START(); @@ -1759,7 +1759,9 @@ static struct resource_funcs dcn36_res_pool_funcs = { .patch_unknown_plane_state = dcn20_patch_unknown_plane_state, .get_panel_config_defaults = dcn35_get_panel_config_defaults, .get_preferred_eng_id_dpia = dcn36_get_preferred_eng_id_dpia, - .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe + .get_vstartup_for_pipe = dcn10_get_vstartup_for_pipe, + .update_dc_state_for_encoder_switch = dcn31_update_dc_state_for_encoder_switch, + .build_pipe_pix_clk_params = dcn20_build_pipe_pix_clk_params }; static bool dcn36_resource_construct( @@ -1847,7 +1849,7 @@ static bool dcn36_resource_construct( dc->caps.color.dpp.gamma_corr = 1; dc->caps.color.dpp.dgam_rom_for_yuv = 0; - dc->caps.color.dpp.hw_3d_lut = 1; + dc->caps.color.dpp.hw_3d_lut = 0; dc->caps.color.dpp.ogam_ram = 0; // no OGAM in DPP since DCN1 // no OGAM ROM on DCN301 dc->caps.color.dpp.ogam_rom_caps.srgb = 0; @@ -1866,6 +1868,13 @@ static bool dcn36_resource_construct( dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.color.mpc.preblend = true; + + dc->caps.num_of_host_routers = 2; + dc->caps.num_of_dpias_per_host_router = 2; + + dc->caps.num_of_host_routers = 2; + dc->caps.num_of_dpias_per_host_router = 2; /* max_disp_clock_khz_at_vmin is slightly lower than the STA value in order * to provide some margin. @@ -2121,12 +2130,13 @@ static bool dcn36_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; dc->dcn_ip->max_num_dpp = pool->base.pipe_count; dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; - dc->dml2_options.use_native_pstate_optimization = true; dc->dml2_options.use_native_soc_bb_construction = true; dc->dml2_options.minimize_dispclk_using_odm = false; if (dc->config.EnableMinDispClkODM) diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c index f420c4dafa03..b3988e38d0a6 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c @@ -70,7 +70,6 @@ #include "dml/dcn30/display_mode_vba_30.h" #include "vm_helper.h" #include "dcn20/dcn20_vmid.h" -#include "dml/dcn401/dcn401_fpu.h" #include "dc_state_priv.h" @@ -1608,10 +1607,6 @@ static struct dc_cap_funcs cap_funcs = { static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { - struct dml2_configuration_options *dml2_opt = &dc->dml2_tmp; - - memcpy(dml2_opt, &dc->dml2_options, sizeof(dc->dml2_options)); - /* re-calculate the available MALL size if required */ if (bw_params->num_channels > 0) { dc->caps.max_cab_allocation_bytes = dcn401_calc_num_avail_chans_for_mall( @@ -1622,15 +1617,11 @@ static void dcn401_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b DC_FP_START(); - dcn401_update_bw_bounding_box_fpu(dc, bw_params); - - dml2_opt->use_clock_dc_limits = false; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2) - dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2); + dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2); - dml2_opt->use_clock_dc_limits = true; if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2_dc_power_source) - dml2_reinit(dc, dml2_opt, &dc->current_state->bw_ctx.dml2_dc_power_source); + dml2_reinit(dc, &dc->dml2_dc_power_options, &dc->current_state->bw_ctx.dml2_dc_power_source); DC_FP_END(); } @@ -1644,7 +1635,7 @@ enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_sta enum dc_status dcn401_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate) + enum dc_validate_mode validate_mode) { unsigned int i; enum dc_status status = DC_OK; @@ -1662,9 +1653,9 @@ enum dc_status dcn401_validate_bandwidth(struct dc *dc, if (dc->debug.using_dml2) status = dml2_validate(dc, context, context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, - fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; + validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; - if (!fast_validate && status == DC_OK && dc_state_is_subvp_in_use(context)) { + if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_OK && dc_state_is_subvp_in_use(context)) { /* check new stream configuration still supports cursor if subvp used */ for (i = 0; i < context->stream_count; i++) { stream = context->streams[i]; @@ -1679,12 +1670,12 @@ enum dc_status dcn401_validate_bandwidth(struct dc *dc, }; } - if (!fast_validate && status == DC_FAIL_HW_CURSOR_SUPPORT) { + if (validate_mode == DC_VALIDATE_MODE_AND_PROGRAMMING && status == DC_FAIL_HW_CURSOR_SUPPORT) { /* attempt to validate again with subvp disabled due to cursor */ if (dc->debug.using_dml2) status = dml2_validate(dc, context, context->power_source == DC_POWER_SOURCE_DC ? context->bw_ctx.dml2_dc_power_source : context->bw_ctx.dml2, - fast_validate) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; + validate_mode) ? DC_OK : DC_FAIL_BANDWIDTH_VALIDATE; } return status; @@ -1957,8 +1948,30 @@ static bool dcn401_resource_construct( dc->caps.color.mpc.ogam_rom_caps.pq = 0; dc->caps.color.mpc.ogam_rom_caps.hlg = 0; dc->caps.color.mpc.ocsc = 1; + dc->caps.color.mpc.preblend = true; dc->config.use_spl = true; dc->config.prefer_easf = true; + + dc->config.dcn_sharpness_range.sdr_rgb_min = 0; + dc->config.dcn_sharpness_range.sdr_rgb_max = 1750; + dc->config.dcn_sharpness_range.sdr_rgb_mid = 750; + dc->config.dcn_sharpness_range.sdr_yuv_min = 0; + dc->config.dcn_sharpness_range.sdr_yuv_max = 3500; + dc->config.dcn_sharpness_range.sdr_yuv_mid = 1500; + dc->config.dcn_sharpness_range.hdr_rgb_min = 0; + dc->config.dcn_sharpness_range.hdr_rgb_max = 2750; + dc->config.dcn_sharpness_range.hdr_rgb_mid = 1500; + + dc->config.dcn_override_sharpness_range.sdr_rgb_min = 0; + dc->config.dcn_override_sharpness_range.sdr_rgb_max = 3250; + dc->config.dcn_override_sharpness_range.sdr_rgb_mid = 1250; + dc->config.dcn_override_sharpness_range.sdr_yuv_min = 0; + dc->config.dcn_override_sharpness_range.sdr_yuv_max = 3500; + dc->config.dcn_override_sharpness_range.sdr_yuv_mid = 1500; + dc->config.dcn_override_sharpness_range.hdr_rgb_min = 0; + dc->config.dcn_override_sharpness_range.hdr_rgb_max = 2750; + dc->config.dcn_override_sharpness_range.hdr_rgb_mid = 1500; + dc->config.dc_mode_clk_limit_support = true; dc->config.enable_windowed_mpo_odm = true; dc->config.set_pipe_unlock_order = true; /* Need to ensure DET gets freed before allocating */ @@ -2177,6 +2190,8 @@ static bool dcn401_resource_construct( for (i = 0; i < dc->caps.max_planes; ++i) dc->caps.planes[i] = plane_cap; + dc->caps.max_odm_combine_factor = 4; + dc->cap_funcs = cap_funcs; if (dc->ctx->dc_bios->fw_info.oem_i2c_present) { @@ -2195,7 +2210,6 @@ static bool dcn401_resource_construct( dc->config.sdpif_request_limit_words_per_umc = 16; dc->dml2_options.dcn_pipe_count = pool->base.pipe_count; - dc->dml2_options.use_native_pstate_optimization = false; dc->dml2_options.use_native_soc_bb_construction = true; dc->dml2_options.minimize_dispclk_using_odm = true; dc->dml2_options.map_dc_pipes_with_callbacks = true; @@ -2228,6 +2242,10 @@ static bool dcn401_resource_construct( /* SPL */ dc->caps.scl_caps.sharpener_support = true; + /* init DC limited DML2 options */ + memcpy(&dc->dml2_dc_power_options, &dc->dml2_options, sizeof(struct dml2_configuration_options)); + dc->dml2_dc_power_options.use_clock_dc_limits = true; + return true; create_fail: diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h index dc52a30991af..2ae6831c31ef 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.h @@ -24,7 +24,7 @@ enum dc_status dcn401_patch_unknown_plane_state(struct dc_plane_state *plane_sta enum dc_status dcn401_validate_bandwidth(struct dc *dc, struct dc_state *context, - bool fast_validate); + enum dc_validate_mode validate_mode); void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c index e0008c5f08ad..55b929ca7982 100644 --- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c +++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl.c @@ -196,7 +196,12 @@ static struct spl_rect calculate_mpc_slice_in_timing_active( int epimo = mpc_slice_count - plane_clip_rec->width % mpc_slice_count - 1; struct spl_rect mpc_rec; - if (use_recout_width_aligned) { + if (spl_in->basic_in.custom_width != 0) { + mpc_rec.width = spl_in->basic_in.custom_width; + mpc_rec.x = spl_in->basic_in.custom_x; + mpc_rec.height = plane_clip_rec->height; + mpc_rec.y = plane_clip_rec->y; + } else if (use_recout_width_aligned) { mpc_rec.width = recout_width_align; if ((mpc_rec.width * (mpc_slice_idx + 1)) > plane_clip_rec->width) { mpc_rec.width = plane_clip_rec->width % recout_width_align; @@ -219,7 +224,7 @@ static struct spl_rect calculate_mpc_slice_in_timing_active( /* extra pixels in the division remainder need to go to pipes after * the extra pixel index minus one(epimo) defined here as: */ - if (mpc_slice_idx > epimo) { + if (mpc_slice_idx > epimo && spl_in->basic_in.custom_width == 0) { mpc_rec.x += mpc_slice_idx - epimo - 1; mpc_rec.width += 1; } @@ -252,10 +257,10 @@ static struct spl_rect calculate_odm_slice_in_timing_active(struct spl_in *spl_i odm_rec.x = odm_slice_width * odm_slice_idx; odm_rec.width = is_last_odm_slice ? - /* last slice width is the reminder of h_active */ - h_active - odm_slice_width * (odm_slice_count - 1) : - /* odm slice width is the floor of h_active / count */ - odm_slice_width; + /* last slice width is the reminder of h_active */ + h_active - odm_slice_width * (odm_slice_count - 1) : + /* odm slice width is the floor of h_active / count */ + odm_slice_width; odm_rec.y = 0; odm_rec.height = v_active; @@ -884,7 +889,9 @@ static bool spl_get_isharp_en(struct spl_in *spl_in, /* Calculate number of tap with adaptive scaling off */ static void spl_get_taps_non_adaptive_scaler( - struct spl_scratch *spl_scratch, const struct spl_taps *in_taps, bool always_scale) + struct spl_scratch *spl_scratch, + const struct spl_taps *in_taps, + bool is_subsampled) { bool check_max_downscale = false; @@ -945,14 +952,15 @@ static void spl_get_taps_non_adaptive_scaler( SPL_ASSERT(check_max_downscale); - if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz) && !always_scale) + if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz)) spl_scratch->scl_data.taps.h_taps = 1; - if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert) && !always_scale) + if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert)) spl_scratch->scl_data.taps.v_taps = 1; - if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c) && !always_scale) + if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c) && !is_subsampled) spl_scratch->scl_data.taps.h_taps_c = 1; - if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c) && !always_scale) + if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c) && !is_subsampled) spl_scratch->scl_data.taps.v_taps_c = 1; + } /* Calculate optimal number of taps */ @@ -965,15 +973,13 @@ static bool spl_get_optimal_number_of_taps( unsigned int max_taps_y, max_taps_c; unsigned int min_taps_y, min_taps_c; enum lb_memory_config lb_config; - bool skip_easf = false; - bool always_scale = spl_in->basic_out.always_scale; + bool skip_easf = false; bool is_subsampled = spl_is_subsampled_format(spl_in->basic_in.format); - if (spl_scratch->scl_data.viewport.width > spl_scratch->scl_data.h_active && max_downscale_src_width != 0 && spl_scratch->scl_data.viewport.width > max_downscale_src_width) { - spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, always_scale); + spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, is_subsampled); *enable_easf_v = false; *enable_easf_h = false; *enable_isharp = false; @@ -982,7 +988,7 @@ static bool spl_get_optimal_number_of_taps( /* Disable adaptive scaler and sharpener when integer scaling is enabled */ if (spl_in->scaling_quality.integer_scaling) { - spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, always_scale); + spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, is_subsampled); *enable_easf_v = false; *enable_easf_h = false; *enable_isharp = false; @@ -997,8 +1003,9 @@ static bool spl_get_optimal_number_of_taps( * From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling * taps = 4 for upscaling */ - if (skip_easf) - spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, always_scale); + if (skip_easf) { + spl_get_taps_non_adaptive_scaler(spl_scratch, in_taps, is_subsampled); + } else { if (spl_is_video_format(spl_in->basic_in.format)) { spl_scratch->scl_data.taps.h_taps = 6; @@ -1124,7 +1131,6 @@ static bool spl_get_optimal_number_of_taps( (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert))) { spl_scratch->scl_data.taps.h_taps = 1; spl_scratch->scl_data.taps.v_taps = 1; - if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c) && !is_subsampled) spl_scratch->scl_data.taps.h_taps_c = 1; @@ -1149,6 +1155,7 @@ static bool spl_get_optimal_number_of_taps( if ((!*enable_easf_v) && !is_subsampled && (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c))) spl_scratch->scl_data.taps.v_taps_c = 1; + } } return true; diff --git a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h index 36a284305a70..23d254dea18f 100644 --- a/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h +++ b/drivers/gpu/drm/amd/display/dc/sspl/dc_spl_types.h @@ -460,6 +460,8 @@ struct basic_in { enum spl_color_space color_space; // Color Space unsigned int max_luminance; // Max Luminance TODO: Is determined in dc_hw_sequencer.c is_sdr bool film_grain_applied; // Film Grain Applied // TODO: To check from where to get this? + int custom_width; // Width for non-standard segmentation - used when != 0 + int custom_x; // Start x for non-standard segmentation - used when custom_width != 0 }; // Basic output information diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h index 3f3fa1b6a69e..0bafb6710761 100644 --- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h +++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h @@ -129,7 +129,9 @@ enum dmub_window_id { DMUB_WINDOW_5_TRACEBUFF, DMUB_WINDOW_6_FW_STATE, DMUB_WINDOW_7_SCRATCH_MEM, + DMUB_WINDOW_IB_MEM, DMUB_WINDOW_SHARED_STATE, + DMUB_WINDOW_LSDMA_BUFFER, DMUB_WINDOW_TOTAL, }; @@ -355,6 +357,7 @@ struct dmub_diagnostic_data { uint8_t is_traceport_en : 1; uint8_t is_cw0_enabled : 1; uint8_t is_cw6_enabled : 1; + uint8_t is_pwait : 1; }; struct dmub_srv_inbox { @@ -539,6 +542,7 @@ struct dmub_srv { uint32_t fw_version; bool is_virtual; struct dmub_fb scratch_mem_fb; + struct dmub_fb ib_mem_gart; volatile struct dmub_shared_state_feature_block *shared_state; volatile const struct dmub_fw_state *fw_state; @@ -576,6 +580,7 @@ struct dmub_srv { enum dmub_srv_power_state_type power_state; struct dmub_diagnostic_data debug; + struct dmub_fb lsdma_rb_fb; }; /** @@ -602,14 +607,6 @@ struct dmub_notification { }; }; -/* enum dmub_ips_mode - IPS mode identifier */ -enum dmub_ips_mode { - DMUB_IPS_MODE_IPS1_MAX = 0, - DMUB_IPS_MODE_IPS2, - DMUB_IPS_MODE_IPS1_RCG, - DMUB_IPS_MODE_IPS1_ONO2_ON -}; - /** * DMUB firmware version helper macro - useful for checking if the version * of a firmware to know if feature or functionality is supported or present. diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index b66bd10cdc9b..6a69a788abe8 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -104,6 +104,14 @@ */ #define DMUB_MAX_FPO_STREAMS 4 +/* Define to ensure that the "common" members always appear in the same + * order in different structs for back compat purposes + */ +#define COMMON_STREAM_STATIC_SUB_STATE \ + struct dmub_fams2_cmd_legacy_stream_static_state legacy; \ + struct dmub_fams2_cmd_subvp_stream_static_state subvp; \ + struct dmub_fams2_cmd_drr_stream_static_state drr; + /* Maximum number of streams on any ASIC. */ #define DMUB_MAX_STREAMS 6 @@ -291,6 +299,31 @@ union dmub_addr { } u; /*<< Low/high bit access */ uint64_t quad_part; /*<< 64 bit address */ }; + +/* Flattened structure containing SOC BB parameters stored in the VBIOS + * It is not practical to store the entire bounding box in VBIOS since the bounding box struct can gain new parameters. + * This also prevents alighment issues when new parameters are added to the SoC BB. + * The following parameters should be added since these values can't be obtained elsewhere: + * -dml2_soc_power_management_parameters + * -dml2_soc_vmin_clock_limits + */ +struct dmub_soc_bb_params { + uint32_t dram_clk_change_blackout_ns; + uint32_t dram_clk_change_read_only_ns; + uint32_t dram_clk_change_write_only_ns; + uint32_t fclk_change_blackout_ns; + uint32_t g7_ppt_blackout_ns; + uint32_t stutter_enter_plus_exit_latency_ns; + uint32_t stutter_exit_latency_ns; + uint32_t z8_stutter_enter_plus_exit_latency_ns; + uint32_t z8_stutter_exit_latency_ns; + uint32_t z8_min_idle_time_ns; + uint32_t type_b_dram_clk_change_blackout_ns; + uint32_t type_b_ppt_blackout_ns; + uint32_t vmin_limit_dispclk_khz; + uint32_t vmin_limit_dcfclk_khz; + uint32_t g7_temperature_read_blackout_ns; +}; #pragma pack(pop) /** @@ -757,11 +790,29 @@ enum dmub_ips_rcg_disable_type { DMUB_IPS_RCG_DISABLE = 3 }; +enum dmub_ips_in_vpb_disable_type { + DMUB_IPS_VPB_RCG_ONLY = 0, // Legacy behaviour + DMUB_IPS_VPB_DISABLE_ALL = 1, + DMUB_IPS_VPB_ENABLE_IPS1_AND_RCG = 2, + DMUB_IPS_VPB_ENABLE_ALL = 3 // Enable IPS1 Z8, IPS1 and RCG +}; + #define DMUB_IPS1_ALLOW_MASK 0x00000001 #define DMUB_IPS2_ALLOW_MASK 0x00000002 #define DMUB_IPS1_COMMIT_MASK 0x00000004 #define DMUB_IPS2_COMMIT_MASK 0x00000008 +enum dmub_ips_comand_type { + /** + * Start/stop IPS residency measurements for a given IPS mode + */ + DMUB_CMD__IPS_RESIDENCY_CNTL = 0, + /** + * Query IPS residency information for a given IPS mode + */ + DMUB_CMD__IPS_QUERY_RESIDENCY_INFO = 1, +}; + /** * union dmub_fw_boot_options - Boot option definitions for SCRATCH14 */ @@ -831,7 +882,7 @@ enum dmub_shared_state_feature_id { /** * struct dmub_shared_state_ips_fw - Firmware signals for IPS. */ -union dmub_shared_state_ips_fw_signals { + union dmub_shared_state_ips_fw_signals { struct { uint32_t ips1_commit : 1; /**< 1 if in IPS1 or IPS0 RCG */ uint32_t ips2_commit : 1; /**< 1 if in IPS2 */ @@ -846,7 +897,7 @@ union dmub_shared_state_ips_fw_signals { /** * struct dmub_shared_state_ips_signals - Firmware signals for IPS. */ -union dmub_shared_state_ips_driver_signals { + union dmub_shared_state_ips_driver_signals { struct { uint32_t allow_pg : 1; /**< 1 if PG is allowed */ uint32_t allow_ips1 : 1; /**< 1 is IPS1 is allowed */ @@ -856,7 +907,9 @@ union dmub_shared_state_ips_driver_signals { uint32_t allow_ips0_rcg : 1; /**< 1 is IPS0 RCG is allowed */ uint32_t allow_ips1_rcg : 1; /**< 1 is IPS1 RCG is allowed */ uint32_t allow_ips1z8 : 1; /**< 1 is IPS1 Z8 Retention is allowed */ - uint32_t reserved_bits : 24; /**< Reversed bits */ + uint32_t allow_dynamic_ips1 : 1; /**< 1 if IPS1 is allowed in dynamic use cases such as VPB */ + uint32_t allow_dynamic_ips1_z8: 1; /**< 1 if IPS1 z8 ret is allowed in dynamic use cases such as VPB */ + uint32_t reserved_bits : 22; /**< Reversed bits */ } bits; uint32_t all; }; @@ -1508,6 +1561,16 @@ enum dmub_cmd_type { */ DMUB_CMD__FUSED_IO = 89, + /** + * Command type used for all LSDMA commands. + */ + DMUB_CMD__LSDMA = 90, + + /** + * Command type use for all IPS commands. + */ + DMUB_CMD__IPS = 91, + DMUB_CMD__VBIOS = 128, }; @@ -1918,6 +1981,121 @@ struct dmub_rb_cmd_fams2_flip { struct dmub_fams2_flip_info flip_info; }; +struct dmub_cmd_lsdma_data { + union { + struct lsdma_init_data { + union dmub_addr gpu_addr_base; + uint32_t ring_size; + } init_data; + struct lsdma_tiled_copy_data { + uint32_t src_addr_lo; + uint32_t src_addr_hi; + uint32_t dst_addr_lo; + uint32_t dst_addr_hi; + + uint32_t src_x : 16; + uint32_t src_y : 16; + + uint32_t src_width : 16; + uint32_t src_height : 16; + + uint32_t dst_x : 16; + uint32_t dst_y : 16; + + uint32_t dst_width : 16; + uint32_t dst_height : 16; + + uint32_t rect_x : 16; + uint32_t rect_y : 16; + + uint32_t src_swizzle_mode : 5; + uint32_t src_mip_max : 5; + uint32_t src_mip_id : 5; + uint32_t dst_mip_max : 5; + uint32_t dst_swizzle_mode : 5; + uint32_t dst_mip_id : 5; + uint32_t tmz : 1; + uint32_t dcc : 1; + + uint32_t data_format : 6; + uint32_t padding1 : 4; + uint32_t dst_element_size : 3; + uint32_t num_type : 3; + uint32_t src_element_size : 3; + uint32_t write_compress : 2; + uint32_t cache_policy_dst : 2; + uint32_t cache_policy_src : 2; + uint32_t read_compress : 2; + uint32_t src_dim : 2; + uint32_t dst_dim : 2; + uint32_t max_uncom : 1; + + uint32_t max_com : 2; + uint32_t padding : 30; + } tiled_copy_data; + struct lsdma_linear_copy_data { + uint32_t count : 30; + uint32_t cache_policy_dst : 2; + + uint32_t tmz : 1; + uint32_t cache_policy_src : 2; + uint32_t padding : 29; + + uint32_t src_lo; + uint32_t src_hi; + uint32_t dst_lo; + uint32_t dst_hi; + } linear_copy_data; + struct lsdma_reg_write_data { + uint32_t reg_addr; + uint32_t reg_data; + } reg_write_data; + struct lsdma_pio_copy_data { + union { + struct { + uint32_t byte_count : 26; + uint32_t src_loc : 1; + uint32_t dst_loc : 1; + uint32_t src_addr_inc : 1; + uint32_t dst_addr_inc : 1; + uint32_t overlap_disable : 1; + uint32_t constant_fill : 1; + } fields; + uint32_t raw; + } packet; + uint32_t src_lo; + uint32_t src_hi; + uint32_t dst_lo; + uint32_t dst_hi; + } pio_copy_data; + struct lsdma_pio_constfill_data { + union { + struct { + uint32_t byte_count : 26; + uint32_t src_loc : 1; + uint32_t dst_loc : 1; + uint32_t src_addr_inc : 1; + uint32_t dst_addr_inc : 1; + uint32_t overlap_disable : 1; + uint32_t constant_fill : 1; + } fields; + uint32_t raw; + } packet; + uint32_t dst_lo; + uint32_t dst_hi; + uint32_t data; + } pio_constfill_data; + + uint32_t all[14]; + } u; + +}; + +struct dmub_rb_cmd_lsdma { + struct dmub_cmd_header header; + struct dmub_cmd_lsdma_data lsdma_data; +}; + struct dmub_optc_state_v2 { uint32_t v_total_min; uint32_t v_total_max; @@ -1949,6 +2127,28 @@ enum fams2_stream_type { FAMS2_STREAM_TYPE_SUBVP = 4, }; +struct dmub_rect16 { + /** + * Dirty rect x offset. + */ + uint16_t x; + + /** + * Dirty rect y offset. + */ + uint16_t y; + + /** + * Dirty rect width. + */ + uint16_t width; + + /** + * Dirty rect height. + */ + uint16_t height; +}; + /* static stream state */ struct dmub_fams2_legacy_stream_static_state { uint8_t vactive_det_fill_delay_otg_vlines; @@ -2021,11 +2221,13 @@ union dmub_fams2_stream_static_sub_state { }; //v0 union dmub_fams2_cmd_stream_static_sub_state { - struct dmub_fams2_cmd_legacy_stream_static_state legacy; - struct dmub_fams2_cmd_subvp_stream_static_state subvp; - struct dmub_fams2_cmd_drr_stream_static_state drr; + COMMON_STREAM_STATIC_SUB_STATE }; //v1 +union dmub_fams2_stream_static_sub_state_v2 { + COMMON_STREAM_STATIC_SUB_STATE +}; //v2 + struct dmub_fams2_stream_static_state { enum fams2_stream_type type; uint32_t otg_vline_time_ns; @@ -2091,7 +2293,7 @@ struct dmub_fams2_cmd_stream_static_base_state { struct dmub_fams2_stream_static_state_v1 { struct dmub_fams2_cmd_stream_static_base_state base; - union dmub_fams2_cmd_stream_static_sub_state sub_state; + union dmub_fams2_stream_static_sub_state_v2 sub_state; }; //v1 /** @@ -2139,6 +2341,11 @@ union dmub_cmd_fams2_config { } stream_v1; //v1 }; +struct dmub_fams2_config_v2 { + struct dmub_cmd_fams2_global_config global; + struct dmub_fams2_stream_static_state_v1 stream_v1[DMUB_MAX_STREAMS]; //v1 +}; + /** * DMUB rb command definition for FAMS2 (merged SubVP, FPO, Legacy) */ @@ -2148,6 +2355,22 @@ struct dmub_rb_cmd_fams2 { }; /** + * Indirect buffer descriptor + */ +struct dmub_ib_data { + union dmub_addr src; // location of indirect buffer in memory + uint16_t size; // indirect buffer size in bytes +}; + +/** + * DMUB rb command definition for commands passed over indirect buffer + */ +struct dmub_rb_cmd_ib { + struct dmub_cmd_header header; + struct dmub_ib_data ib_data; +}; + +/** * enum dmub_cmd_idle_opt_type - Idle optimization command type. */ enum dmub_cmd_idle_opt_type { @@ -2170,6 +2393,11 @@ enum dmub_cmd_idle_opt_type { * DCN hardware notify power state. */ DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE = 3, + + /** + * DCN notify to release HW. + */ + DMUB_CMD__IDLE_OPT_RELEASE_HW = 4, }; /** @@ -2315,7 +2543,8 @@ struct dmub_dig_transmitter_control_data_v1_7 { uint8_t connobj_id; /**< Connector Object Id defined in ObjectId.h */ uint8_t HPO_instance; /**< HPO instance (0: inst0, 1: inst1) */ uint8_t reserved1; /**< For future use */ - uint8_t reserved2[3]; /**< For future use */ + uint8_t skip_phy_ssc_reduction; + uint8_t reserved2[2]; /**< For future use */ uint32_t reserved3[11]; /**< For future use */ }; @@ -2933,6 +3162,7 @@ enum dmub_cmd_fams_type { DMUB_CMD__FAMS2_CONFIG = 4, DMUB_CMD__FAMS2_DRR_UPDATE = 5, DMUB_CMD__FAMS2_FLIP = 6, + DMUB_CMD__FAMS2_IB_CONFIG = 7, }; /** @@ -3861,6 +4091,11 @@ struct dmub_cmd_replay_copy_settings_data { * Use for AUX-less ALPM LFPS wake operation */ struct dmub_alpm_auxless_data auxless_alpm_data; + + /** + * @pad: Align structure to 4 byte boundary. + */ + uint8_t pad[2]; }; /** @@ -4416,6 +4651,37 @@ enum dmub_cmd_abm_type { DMUB_CMD__ABM_GET_HISTOGRAM_DATA = 11, }; +/** + * LSDMA command sub-types. + */ +enum dmub_cmd_lsdma_type { + /** + * Initialize parameters for LSDMA. + * Ring buffer is mapped to the ring buffer + */ + DMUB_CMD__LSDMA_INIT_CONFIG = 0, + /** + * LSDMA copies data from source to destination linearly + */ + DMUB_CMD__LSDMA_LINEAR_COPY = 1, + /** + * Send the tiled-to-tiled copy command + */ + DMUB_CMD__LSDMA_TILED_TO_TILED_COPY = 2, + /** + * Send the poll reg write command + */ + DMUB_CMD__LSDMA_POLL_REG_WRITE = 3, + /** + * Send the pio copy command + */ + DMUB_CMD__LSDMA_PIO_COPY = 4, + /** + * Send the pio constfill command + */ + DMUB_CMD__LSDMA_PIO_CONSTFILL = 5, +}; + struct abm_ace_curve { /** * @offsets: ACE curve offsets. @@ -5621,6 +5887,59 @@ struct dmub_rb_cmd_assr_enable { }; /** + * Current definition of "ips_mode" from driver + */ +enum ips_residency_mode { + IPS_RESIDENCY__IPS1_MAX, + IPS_RESIDENCY__IPS2, + IPS_RESIDENCY__IPS1_RCG, + IPS_RESIDENCY__IPS1_ONO2_ON, +}; + +#define NUM_IPS_HISTOGRAM_BUCKETS 16 + +/** + * IPS residency statistics to be sent to driver - subset of struct dmub_ips_residency_stats + */ +struct dmub_ips_residency_info { + uint32_t residency_millipercent; + uint32_t entry_counter; + uint32_t histogram[NUM_IPS_HISTOGRAM_BUCKETS]; + uint64_t total_time_us; + uint64_t total_inactive_time_us; +}; + +/** + * Data passed from driver to FW in a DMUB_CMD__IPS_RESIDENCY_CNTL command. + */ +struct dmub_cmd_ips_residency_cntl_data { + uint8_t panel_inst; + uint8_t start_measurement; + uint8_t padding[2]; // align to 4-byte boundary +}; + +struct dmub_rb_cmd_ips_residency_cntl { + struct dmub_cmd_header header; + struct dmub_cmd_ips_residency_cntl_data cntl_data; +}; + +/** + * Data passed from FW to driver in a DMUB_CMD__IPS_QUERY_RESIDENCY_INFO command. + */ +struct dmub_cmd_ips_query_residency_info_data { + union dmub_addr dest; + uint32_t size; + uint32_t ips_mode; + uint8_t panel_inst; + uint8_t padding[3]; // align to 4-byte boundary +}; + +struct dmub_rb_cmd_ips_query_residency_info { + struct dmub_cmd_header header; + struct dmub_cmd_ips_query_residency_info_data info_data; +}; + +/** * union dmub_rb_cmd - DMUB inbox command. */ union dmub_rb_cmd { @@ -5926,13 +6245,25 @@ union dmub_rb_cmd { * Definition of a DMUB_CMD__PSP_ASSR_ENABLE command. */ struct dmub_rb_cmd_assr_enable assr_enable; + struct dmub_rb_cmd_fams2 fams2_config; + struct dmub_rb_cmd_ib ib_fams2_config; + struct dmub_rb_cmd_fams2_drr_update fams2_drr_update; struct dmub_rb_cmd_fams2_flip fams2_flip; struct dmub_rb_cmd_fused_io fused_io; + + /** + * Definition of a DMUB_CMD__LSDMA command. + */ + struct dmub_rb_cmd_lsdma lsdma; + + struct dmub_rb_cmd_ips_residency_cntl ips_residency_cntl; + + struct dmub_rb_cmd_ips_query_residency_info ips_query_residency_info; }; /** diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c index a308bd604677..3f38db752b84 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c @@ -416,7 +416,7 @@ uint32_t dmub_dcn31_get_current_time(struct dmub_srv *dmub) void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub) { - uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset; + uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset, is_pwait; uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled; struct dmub_timeout_info timeout = {0}; @@ -466,6 +466,9 @@ void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub) REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); dmub->debug.is_dmcub_enabled = is_dmub_enabled; + REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait); + dmub->debug.is_pwait = is_pwait; + REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset); dmub->debug.is_dmcub_soft_reset = is_soft_reset; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c index 72a0f078cd1a..2228d62adc7e 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn35.c @@ -92,19 +92,15 @@ void dmub_dcn35_reset(struct dmub_srv *dmub) uint32_t in_reset, is_enabled, scratch, i, pwait_mode; REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset); + REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled); - if (in_reset == 0) { + if (in_reset == 0 && is_enabled != 0) { cmd.bits.status = 1; cmd.bits.command_code = DMUB_GPINT__STOP_FW; cmd.bits.param = 0; dmub->hw_funcs.set_gpint(dmub, cmd); - /** - * Timeout covers both the ACK and the wait - * for remaining work to finish. - */ - for (i = 0; i < timeout; ++i) { if (dmub->hw_funcs.is_gpint_acked(dmub, cmd)) break; @@ -130,11 +126,9 @@ void dmub_dcn35_reset(struct dmub_srv *dmub) /* Force reset in case we timed out, DMCUB is likely hung. */ } - REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled); - if (is_enabled) { REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1); - REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); + udelay(1); REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); } @@ -160,11 +154,7 @@ void dmub_dcn35_reset_release(struct dmub_srv *dmub) LONO_SOCCLK_GATE_DISABLE, 1, LONO_DMCUBCLK_GATE_DISABLE, 1); - REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); - udelay(1); REG_UPDATE_2(DMCUB_CNTL, DMCUB_ENABLE, 1, DMCUB_TRACEPORT_EN, 1); - REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1); - udelay(1); REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 0); REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 0); } @@ -464,7 +454,7 @@ uint32_t dmub_dcn35_get_current_time(struct dmub_srv *dmub) void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub) { - uint32_t is_dmub_enabled, is_soft_reset; + uint32_t is_dmub_enabled, is_soft_reset, is_pwait; uint32_t is_traceport_enabled, is_cw6_enabled; struct dmub_timeout_info timeout = {0}; @@ -515,6 +505,9 @@ void dmub_dcn35_get_diagnostic_data(struct dmub_srv *dmub) REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); dmub->debug.is_dmcub_enabled = is_dmub_enabled; + REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait); + dmub->debug.is_pwait = is_pwait; + REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset); dmub->debug.is_dmcub_soft_reset = is_soft_reset; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c index 2575dbc448f7..b31adbd0d685 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn401.c @@ -413,7 +413,7 @@ uint32_t dmub_dcn401_get_current_time(struct dmub_srv *dmub) void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub) { - uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset; + uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset, is_pwait; uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled; struct dmub_timeout_info timeout = {0}; @@ -464,6 +464,9 @@ void dmub_dcn401_get_diagnostic_data(struct dmub_srv *dmub) REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); dmub->debug.is_dmcub_enabled = is_dmub_enabled; + REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait); + dmub->debug.is_pwait = is_pwait; + REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset); dmub->debug.is_dmcub_soft_reset = is_soft_reset; diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c index acca7943a8c8..b17a19400c06 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c @@ -65,6 +65,12 @@ /* Default scratch mem size. */ #define DMUB_SCRATCH_MEM_SIZE (1024) +/* Default indirect buffer size. */ +#define DMUB_IB_MEM_SIZE (1280) + +/* Default LSDMA ring buffer size. */ +#define DMUB_LSDMA_RB_SIZE (64 * 1024) + /* Number of windows in use. */ #define DMUB_NUM_WINDOWS (DMUB_WINDOW_TOTAL) /* Base addresses. */ @@ -559,7 +565,9 @@ enum dmub_status window_sizes[DMUB_WINDOW_5_TRACEBUFF] = trace_buffer_size; window_sizes[DMUB_WINDOW_6_FW_STATE] = fw_state_size; window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = DMUB_SCRATCH_MEM_SIZE; + window_sizes[DMUB_WINDOW_IB_MEM] = DMUB_IB_MEM_SIZE; window_sizes[DMUB_WINDOW_SHARED_STATE] = max(DMUB_FW_HEADER_SHARED_STATE_SIZE, shared_state_size); + window_sizes[DMUB_WINDOW_LSDMA_BUFFER] = DMUB_LSDMA_RB_SIZE; out->fb_size = dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_FB); @@ -645,6 +653,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF]; struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE]; struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM]; + struct dmub_fb *ib_mem_gart = params->fb[DMUB_WINDOW_IB_MEM]; struct dmub_fb *shared_state_fb = params->fb[DMUB_WINDOW_SHARED_STATE]; struct dmub_rb_init_params rb_params, outbox0_rb_params; @@ -655,7 +664,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, return DMUB_STATUS_INVALID; if (!inst_fb || !stack_fb || !data_fb || !bios_fb || !mail_fb || - !tracebuff_fb || !fw_state_fb || !scratch_mem_fb) { + !tracebuff_fb || !fw_state_fb || !scratch_mem_fb || !ib_mem_gart) { ASSERT(0); return DMUB_STATUS_INVALID; } @@ -741,6 +750,8 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, dmub->scratch_mem_fb = *scratch_mem_fb; + dmub->ib_mem_gart = *ib_mem_gart; + if (dmub->hw_funcs.setup_windows) dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6, ®ion6); diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index 3ba9b62ba70b..71efd2770c99 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -147,7 +147,7 @@ unsigned int mod_freesync_calc_v_total_from_refresh( ((unsigned int)(div64_u64((1000000000ULL * 1000000), refresh_in_uhz))); - if (MICRO_HZ_TO_HZ(refresh_in_uhz) <= stream->timing.min_refresh_in_uhz) { + if (refresh_in_uhz <= stream->timing.min_refresh_in_uhz) { /* When the target refresh rate is the minimum panel refresh rate, * round down the vtotal value to avoid stretching vblank over * panel's vtotal boundary. @@ -155,6 +155,14 @@ unsigned int mod_freesync_calc_v_total_from_refresh( v_total = div64_u64(div64_u64(((unsigned long long)( frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)), stream->timing.h_total), 1000000); + } else if (refresh_in_uhz >= stream->timing.max_refresh_in_uhz) { + /* When the target refresh rate is the maximum panel refresh rate + * round up the vtotal value to prevent off-by-one error causing + * v_total_min to be below the panel's lower bound + */ + v_total = div64_u64(div64_u64(((unsigned long long)( + frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)), + stream->timing.h_total) + (1000000 - 1), 1000000); } else { v_total = div64_u64(div64_u64(((unsigned long long)( frame_duration_in_ns) * (stream->timing.pix_clk_100hz / 10)), diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c index e58e7b93810b..6b7db8ec9a53 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -260,6 +260,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp) return MOD_HDCP_STATUS_FAILURE; } + if (!display) + return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf; mutex_lock(&psp->hdcp_context.mutex); diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h index 758a8aa31fbe..391209a3bf29 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h @@ -79,4 +79,6 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link, bool fill_custom_backlight_caps(unsigned int config_no, struct dm_acpi_atif_backlight_caps *caps); void reset_replay_dsync_error_count(struct dc_link *link); +void change_replay_to_psr(struct dc_link *link); +void change_psr_to_replay(struct dc_link *link); #endif /* MODULES_POWER_POWER_HELPERS_H_ */ diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 11374a2cbab8..bfb446736ca8 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -396,6 +396,7 @@ enum amd_dpm_forced_level; * (such as allocating any required memory) * @suspend: handles IP specific hw/sw changes for suspend * @resume: handles IP specific hw/sw changes for resume + * @complete: handles IP specific changes after resume * @is_idle: returns current IP block idle status * @wait_for_idle: poll for idle * @check_soft_reset: check soft reset the IP block @@ -427,6 +428,7 @@ struct amd_ip_funcs { int (*prepare_suspend)(struct amdgpu_ip_block *ip_block); int (*suspend)(struct amdgpu_ip_block *ip_block); int (*resume)(struct amdgpu_ip_block *ip_block); + void (*complete)(struct amdgpu_ip_block *ip_block); bool (*is_idle)(struct amdgpu_ip_block *ip_block); int (*wait_for_idle)(struct amdgpu_ip_block *ip_block); bool (*check_soft_reset)(struct amdgpu_ip_block *ip_block); diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index f4d914dc731f..e2b1ea7467b0 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -108,6 +108,8 @@ enum pp_clock_type { PP_VCLK1, PP_DCLK, PP_DCLK1, + PP_ISPICLK, + PP_ISPXCLK, OD_SCLK, OD_MCLK, OD_VDDC_CURVE, diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 5c1cbdc122d2..71d986dd7a6e 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -98,6 +98,7 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, case AMD_IP_BLOCK_TYPE_GMC: case AMD_IP_BLOCK_TYPE_ACP: case AMD_IP_BLOCK_TYPE_VPE: + case AMD_IP_BLOCK_TYPE_ISP: if (pp_funcs && pp_funcs->set_powergating_by_smu) ret = (pp_funcs->set_powergating_by_smu( (adev)->powerplay.pp_handle, block_type, gate, 0)); @@ -852,22 +853,16 @@ int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, uint32_t max) { struct smu_context *smu = adev->powerplay.pp_handle; - int ret = 0; - - if (type != PP_SCLK) - return -EINVAL; if (!is_support_sw_smu(adev)) return -EOPNOTSUPP; - mutex_lock(&adev->pm.mutex); - ret = smu_set_soft_freq_range(smu, - SMU_SCLK, + guard(mutex)(&adev->pm.mutex); + + return smu_set_soft_freq_range(smu, + type, min, max); - mutex_unlock(&adev->pm.mutex); - - return ret; } int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index edd9895b46c0..5fbfe7333b54 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -1398,6 +1398,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, if (ret) return -EINVAL; parameter_size++; + if (!tmp_str) + break; while (isspace(*tmp_str)) tmp_str++; } @@ -1890,7 +1892,7 @@ out: static int ss_power_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, uint32_t mask, enum amdgpu_device_attr_states *states) { - if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev))) + if (!amdgpu_device_supports_smart_shift(adev)) *states = ATTR_STATE_UNSUPPORTED; return 0; @@ -1901,7 +1903,7 @@ static int ss_bias_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ { uint32_t ss_power; - if (!amdgpu_device_supports_smart_shift(adev_to_drm(adev))) + if (!amdgpu_device_supports_smart_shift(adev)) *states = ATTR_STATE_UNSUPPORTED; else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE, (void *)&ss_power)) @@ -3456,14 +3458,16 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, effective_mode &= ~S_IWUSR; /* not implemented yet for APUs other than GC 10.3.1 (vangogh) and 9.4.3 */ - if (((adev->family == AMDGPU_FAMILY_SI) || - ((adev->flags & AMD_IS_APU) && (gc_ver != IP_VERSION(10, 3, 1)) && - (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4)))) && - (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || - attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr || - attr == &sensor_dev_attr_power1_cap.dev_attr.attr || - attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr)) - return 0; + if (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || + attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr || + attr == &sensor_dev_attr_power1_cap.dev_attr.attr || + attr == &sensor_dev_attr_power1_cap_default.dev_attr.attr) { + if (adev->family == AMDGPU_FAMILY_SI || + ((adev->flags & AMD_IS_APU) && gc_ver != IP_VERSION(10, 3, 1) && + (gc_ver != IP_VERSION(9, 4, 3) && gc_ver != IP_VERSION(9, 4, 4))) || + (amdgpu_sriov_vf(adev) && gc_ver == IP_VERSION(11, 0, 3))) + return 0; + } /* not implemented yet for APUs having < GC 9.3.0 (Renoir) */ if (((adev->family == AMDGPU_FAMILY_SI) || @@ -3645,6 +3649,9 @@ static int parse_input_od_command_lines(const char *buf, return -EINVAL; parameter_size++; + if (!tmp_str) + break; + while (isspace(*tmp_str)) tmp_str++; } diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c index 34e71727b27d..307ebf7e3226 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c @@ -1242,7 +1242,7 @@ static void kv_dpm_enable_bapm(void *handle, bool enable) if (pi->bapm_enable) { ret = amdgpu_kv_smc_bapm_enable(adev, enable); if (ret) - DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); + drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n"); } } @@ -1266,40 +1266,40 @@ static int kv_dpm_enable(struct amdgpu_device *adev) ret = kv_process_firmware_header(adev); if (ret) { - DRM_ERROR("kv_process_firmware_header failed\n"); + drm_err(adev_to_drm(adev), "kv_process_firmware_header failed\n"); return ret; } kv_init_fps_limits(adev); kv_init_graphics_levels(adev); ret = kv_program_bootup_state(adev); if (ret) { - DRM_ERROR("kv_program_bootup_state failed\n"); + drm_err(adev_to_drm(adev), "kv_program_bootup_state failed\n"); return ret; } kv_calculate_dfs_bypass_settings(adev); ret = kv_upload_dpm_settings(adev); if (ret) { - DRM_ERROR("kv_upload_dpm_settings failed\n"); + drm_err(adev_to_drm(adev), "kv_upload_dpm_settings failed\n"); return ret; } ret = kv_populate_uvd_table(adev); if (ret) { - DRM_ERROR("kv_populate_uvd_table failed\n"); + drm_err(adev_to_drm(adev), "kv_populate_uvd_table failed\n"); return ret; } ret = kv_populate_vce_table(adev); if (ret) { - DRM_ERROR("kv_populate_vce_table failed\n"); + drm_err(adev_to_drm(adev), "kv_populate_vce_table failed\n"); return ret; } ret = kv_populate_samu_table(adev); if (ret) { - DRM_ERROR("kv_populate_samu_table failed\n"); + drm_err(adev_to_drm(adev), "kv_populate_samu_table failed\n"); return ret; } ret = kv_populate_acp_table(adev); if (ret) { - DRM_ERROR("kv_populate_acp_table failed\n"); + drm_err(adev_to_drm(adev), "kv_populate_acp_table failed\n"); return ret; } kv_program_vc(adev); @@ -1310,39 +1310,39 @@ static int kv_dpm_enable(struct amdgpu_device *adev) if (pi->enable_auto_thermal_throttling) { ret = kv_enable_auto_thermal_throttling(adev); if (ret) { - DRM_ERROR("kv_enable_auto_thermal_throttling failed\n"); + drm_err(adev_to_drm(adev), "kv_enable_auto_thermal_throttling failed\n"); return ret; } } ret = kv_enable_dpm_voltage_scaling(adev); if (ret) { - DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n"); + drm_err(adev_to_drm(adev), "kv_enable_dpm_voltage_scaling failed\n"); return ret; } ret = kv_set_dpm_interval(adev); if (ret) { - DRM_ERROR("kv_set_dpm_interval failed\n"); + drm_err(adev_to_drm(adev), "kv_set_dpm_interval failed\n"); return ret; } ret = kv_set_dpm_boot_state(adev); if (ret) { - DRM_ERROR("kv_set_dpm_boot_state failed\n"); + drm_err(adev_to_drm(adev), "kv_set_dpm_boot_state failed\n"); return ret; } ret = kv_enable_ulv(adev, true); if (ret) { - DRM_ERROR("kv_enable_ulv failed\n"); + drm_err(adev_to_drm(adev), "kv_enable_ulv failed\n"); return ret; } kv_start_dpm(adev); ret = kv_enable_didt(adev, true); if (ret) { - DRM_ERROR("kv_enable_didt failed\n"); + drm_err(adev_to_drm(adev), "kv_enable_didt failed\n"); return ret; } ret = kv_enable_smc_cac(adev, true); if (ret) { - DRM_ERROR("kv_enable_smc_cac failed\n"); + drm_err(adev_to_drm(adev), "kv_enable_smc_cac failed\n"); return ret; } @@ -1350,7 +1350,7 @@ static int kv_dpm_enable(struct amdgpu_device *adev) ret = amdgpu_kv_smc_bapm_enable(adev, false); if (ret) { - DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); + drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n"); return ret; } @@ -1358,7 +1358,7 @@ static int kv_dpm_enable(struct amdgpu_device *adev) kv_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); if (ret) { - DRM_ERROR("kv_set_thermal_temperature_range failed\n"); + drm_err(adev_to_drm(adev), "kv_set_thermal_temperature_range failed\n"); return ret; } amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq, @@ -1382,7 +1382,7 @@ static void kv_dpm_disable(struct amdgpu_device *adev) err = amdgpu_kv_smc_bapm_enable(adev, false); if (err) - DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); + drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n"); if (adev->asic_type == CHIP_MULLINS) kv_enable_nb_dpm(adev, false); @@ -1920,7 +1920,7 @@ static int kv_dpm_set_power_state(void *handle) if (pi->bapm_enable) { ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.ac_power); if (ret) { - DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); + drm_err(adev_to_drm(adev), "amdgpu_kv_smc_bapm_enable failed\n"); return ret; } } @@ -1931,7 +1931,7 @@ static int kv_dpm_set_power_state(void *handle) kv_update_dfs_bypass_settings(adev, new_ps); ret = kv_calculate_ds_divider(adev); if (ret) { - DRM_ERROR("kv_calculate_ds_divider failed\n"); + drm_err(adev_to_drm(adev), "kv_calculate_ds_divider failed\n"); return ret; } kv_calculate_nbps_level_settings(adev); @@ -1947,7 +1947,7 @@ static int kv_dpm_set_power_state(void *handle) ret = kv_update_vce_dpm(adev, new_ps, old_ps); if (ret) { - DRM_ERROR("kv_update_vce_dpm failed\n"); + drm_err(adev_to_drm(adev), "kv_update_vce_dpm failed\n"); return ret; } kv_update_sclk_t(adev); @@ -1960,7 +1960,7 @@ static int kv_dpm_set_power_state(void *handle) kv_update_dfs_bypass_settings(adev, new_ps); ret = kv_calculate_ds_divider(adev); if (ret) { - DRM_ERROR("kv_calculate_ds_divider failed\n"); + drm_err(adev_to_drm(adev), "kv_calculate_ds_divider failed\n"); return ret; } kv_calculate_nbps_level_settings(adev); @@ -1972,7 +1972,7 @@ static int kv_dpm_set_power_state(void *handle) kv_set_enabled_levels(adev); ret = kv_update_vce_dpm(adev, new_ps, old_ps); if (ret) { - DRM_ERROR("kv_update_vce_dpm failed\n"); + drm_err(adev_to_drm(adev), "kv_update_vce_dpm failed\n"); return ret; } kv_update_acp_boot_level(adev); @@ -2521,7 +2521,7 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, if (high_temp > max_temp) high_temp = max_temp; if (high_temp < low_temp) { - DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); + drm_err(adev_to_drm(adev), "invalid thermal range: %d - %d\n", low_temp, high_temp); return -EINVAL; } @@ -2563,7 +2563,7 @@ static int kv_parse_sys_info_table(struct amdgpu_device *adev) data_offset); if (crev != 8) { - DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); + drm_err(adev_to_drm(adev), "Unsupported IGP table: %d %d\n", frev, crev); return -EINVAL; } pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock); @@ -2579,7 +2579,7 @@ static int kv_parse_sys_info_table(struct amdgpu_device *adev) else pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt; if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { - DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n"); + drm_err(adev_to_drm(adev), "The htcTmpLmt should be larger than htcHystLmt.\n"); } if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3)) @@ -2886,16 +2886,18 @@ kv_dpm_print_power_state(void *handle, void *request_ps) struct kv_ps *ps = kv_get_ps(rps); struct amdgpu_device *adev = (struct amdgpu_device *)handle; - amdgpu_dpm_print_class_info(rps->class, rps->class2); - amdgpu_dpm_print_cap_info(rps->caps); - printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); + amdgpu_dpm_dbg_print_class_info(adev, rps->class, rps->class2); + amdgpu_dpm_dbg_print_cap_info(adev, rps->caps); + drm_dbg(adev_to_drm(adev), "vclk: %d, dclk: %d\n", + rps->vclk, rps->dclk); for (i = 0; i < ps->num_levels; i++) { struct kv_pl *pl = &ps->levels[i]; - printk("\t\tpower level %d sclk: %u vddc: %u\n", - i, pl->sclk, - kv_convert_8bit_index_to_voltage(adev, pl->vddc_index)); + drm_dbg(adev_to_drm(adev), + "power level %d sclk: %u vddc: %u\n", + i, pl->sclk, + kv_convert_8bit_index_to_voltage(adev, pl->vddc_index)); } - amdgpu_dpm_print_ps_status(adev, rps); + amdgpu_dpm_dbg_print_ps_status(adev, rps); } static void kv_dpm_fini(struct amdgpu_device *adev) @@ -3013,13 +3015,13 @@ static int kv_dpm_sw_init(struct amdgpu_ip_block *ip_block) adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; if (amdgpu_dpm == 1) amdgpu_pm_print_power_states(adev); - DRM_INFO("amdgpu: dpm initialized\n"); + drm_info(adev_to_drm(adev), "dpm initialized\n"); return 0; dpm_failed: kv_dpm_fini(adev); - DRM_ERROR("amdgpu: dpm initialization failed\n"); + drm_err(adev_to_drm(adev), "dpm initialization failed: %d\n", ret); return ret; } diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c index c7518b13e787..ea3ace882a10 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c @@ -47,7 +47,7 @@ #define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \ ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal))) -void amdgpu_dpm_print_class_info(u32 class, u32 class2) +void amdgpu_dpm_dbg_print_class_info(struct amdgpu_device *adev, u32 class, u32 class2) { const char *s; @@ -66,71 +66,45 @@ void amdgpu_dpm_print_class_info(u32 class, u32 class2) s = "performance"; break; } - printk("\tui class: %s\n", s); - printk("\tinternal class:"); + drm_dbg(adev_to_drm(adev), "\tui class: %s\n", s); if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) && (class2 == 0)) - pr_cont(" none"); - else { - if (class & ATOM_PPLIB_CLASSIFICATION_BOOT) - pr_cont(" boot"); - if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) - pr_cont(" thermal"); - if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) - pr_cont(" limited_pwr"); - if (class & ATOM_PPLIB_CLASSIFICATION_REST) - pr_cont(" rest"); - if (class & ATOM_PPLIB_CLASSIFICATION_FORCED) - pr_cont(" forced"); - if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) - pr_cont(" 3d_perf"); - if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) - pr_cont(" ovrdrv"); - if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) - pr_cont(" uvd"); - if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) - pr_cont(" 3d_low"); - if (class & ATOM_PPLIB_CLASSIFICATION_ACPI) - pr_cont(" acpi"); - if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) - pr_cont(" uvd_hd2"); - if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) - pr_cont(" uvd_hd"); - if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) - pr_cont(" uvd_sd"); - if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) - pr_cont(" limited_pwr2"); - if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) - pr_cont(" ulv"); - if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) - pr_cont(" uvd_mvc"); - } - pr_cont("\n"); + drm_dbg(adev_to_drm(adev), "\tinternal class: none\n"); + else + drm_dbg(adev_to_drm(adev), "\tinternal class: %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", + (class & ATOM_PPLIB_CLASSIFICATION_BOOT) ? " boot" : "", + (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) ? " thermal" : "", + (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) ? " limited_pwr" : "", + (class & ATOM_PPLIB_CLASSIFICATION_REST) ? " rest" : "", + (class & ATOM_PPLIB_CLASSIFICATION_FORCED) ? " forced" : "", + (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) ? " 3d_perf" : "", + (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) ? " ovrdrv" : "", + (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) ? " uvd" : "", + (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) ? " 3d_low" : "", + (class & ATOM_PPLIB_CLASSIFICATION_ACPI) ? " acpi" : "", + (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) ? " uvd_hd2" : "", + (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) ? " uvd_hd" : "", + (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) ? " uvd_sd" : "", + (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) ? " limited_pwr2" : "", + (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) ? " ulv" : "", + (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) ? " uvd_mvc" : ""); } -void amdgpu_dpm_print_cap_info(u32 caps) +void amdgpu_dpm_dbg_print_cap_info(struct amdgpu_device *adev, u32 caps) { - printk("\tcaps:"); - if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) - pr_cont(" single_disp"); - if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) - pr_cont(" video"); - if (caps & ATOM_PPLIB_DISALLOW_ON_DC) - pr_cont(" no_dc"); - pr_cont("\n"); + drm_dbg(adev_to_drm(adev), "\tcaps: %s%s%s\n", + (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) ? " single_disp" : "", + (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) ? " video" : "", + (caps & ATOM_PPLIB_DISALLOW_ON_DC) ? " no_dc" : ""); } -void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, +void amdgpu_dpm_dbg_print_ps_status(struct amdgpu_device *adev, struct amdgpu_ps *rps) { - printk("\tstatus:"); - if (rps == adev->pm.dpm.current_ps) - pr_cont(" c"); - if (rps == adev->pm.dpm.requested_ps) - pr_cont(" r"); - if (rps == adev->pm.dpm.boot_ps) - pr_cont(" b"); - pr_cont("\n"); + drm_dbg(adev_to_drm(adev), "\tstatus:%s%s%s\n", + rps == adev->pm.dpm.current_ps ? " c" : "", + rps == adev->pm.dpm.requested_ps ? " r" : "", + rps == adev->pm.dpm.boot_ps ? " b" : ""); } void amdgpu_pm_print_power_states(struct amdgpu_device *adev) @@ -699,64 +673,64 @@ void amdgpu_add_thermal_controller(struct amdgpu_device *adev) adev->pm.fan_max_rpm = controller->ucFanMaxRPM; } if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_RV770; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_SUMO; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_NI; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_SI; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_CI; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) { - DRM_INFO("Internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_KV; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) { - DRM_INFO("External GPIO thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "External GPIO thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) { - DRM_INFO("ADT7473 with internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "ADT7473 with internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) { - DRM_INFO("EMC2103 with internal thermal controller %s fan control\n", + drm_info(adev_to_drm(adev), "EMC2103 with internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL; } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) { - DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", + drm_info(adev_to_drm(adev), "Possible %s thermal controller at 0x%02x %s fan control\n", pp_lib_thermal_controller_names[controller->ucType], controller->ucI2cAddress >> 1, (controller->ucFanParameters & @@ -772,7 +746,7 @@ void amdgpu_add_thermal_controller(struct amdgpu_device *adev) i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info); } } else { - DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n", + drm_info(adev_to_drm(adev), "Unknown thermal controller type %d at 0x%02x %s fan control\n", controller->ucType, controller->ucI2cAddress >> 1, (controller->ucFanParameters & @@ -943,9 +917,9 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) return -EINVAL; if (amdgpu_dpm == 1 && pp_funcs->print_power_state) { - printk("switching from power state:\n"); + drm_dbg(adev_to_drm(adev), "switching from power state\n"); amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); - printk("switching to power state:\n"); + drm_dbg(adev_to_drm(adev), "switching to power state\n"); amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); } diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h index 93bd3973330c..7120eef30509 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h @@ -23,10 +23,9 @@ #ifndef __LEGACY_DPM_H__ #define __LEGACY_DPM_H__ -void amdgpu_dpm_print_class_info(u32 class, u32 class2); -void amdgpu_dpm_print_cap_info(u32 caps); -void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, - struct amdgpu_ps *rps); +void amdgpu_dpm_dbg_print_class_info(struct amdgpu_device *adev, u32 class, u32 class2); +void amdgpu_dpm_dbg_print_cap_info(struct amdgpu_device *adev, u32 caps); +void amdgpu_dpm_dbg_print_ps_status(struct amdgpu_device *adev, struct amdgpu_ps *rps); int amdgpu_get_platform_caps(struct amdgpu_device *adev); int amdgpu_parse_extended_power_table(struct amdgpu_device *adev); void amdgpu_free_extended_power_table(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index 4c0e976004ba..52e732be59e3 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -7951,15 +7951,15 @@ static void si_dpm_print_power_state(void *handle, struct rv7xx_pl *pl; int i; - amdgpu_dpm_print_class_info(rps->class, rps->class2); - amdgpu_dpm_print_cap_info(rps->caps); - DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); + amdgpu_dpm_dbg_print_class_info(adev, rps->class, rps->class2); + amdgpu_dpm_dbg_print_cap_info(adev, rps->caps); + drm_dbg(adev_to_drm(adev), "\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); for (i = 0; i < ps->performance_level_count; i++) { pl = &ps->performance_levels[i]; - DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n", + drm_dbg(adev_to_drm(adev), "\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n", i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1); } - amdgpu_dpm_print_ps_status(adev, rps); + amdgpu_dpm_dbg_print_ps_status(adev, rps); } static int si_dpm_early_init(struct amdgpu_ip_block *ip_block) diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c index 79a566f3564a..c305ea4ec17d 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu_helper.c @@ -149,7 +149,7 @@ int phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, } cgs_write_register(hwmgr->device, indirect_port, index); - return phm_wait_on_register(hwmgr, indirect_port + 1, mask, value); + return phm_wait_on_register(hwmgr, indirect_port + 1, value, mask); } int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr, diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index d79a1d94661a..b47cb4a5f488 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -76,6 +76,10 @@ static void smu_power_profile_mode_get(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile_mode); static void smu_power_profile_mode_put(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile_mode); +static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type); +static int smu_od_edit_dpm_table(void *handle, + enum PP_OD_DPM_TABLE_COMMAND type, + long *input, uint32_t size); static int smu_sys_get_pp_feature_mask(void *handle, char *buf) @@ -134,12 +138,17 @@ int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value) } int smu_set_soft_freq_range(struct smu_context *smu, - enum smu_clk_type clk_type, + enum pp_clock_type type, uint32_t min, uint32_t max) { + enum smu_clk_type clk_type; int ret = 0; + clk_type = smu_convert_to_smuclk(type); + if (clk_type == SMU_CLK_COUNT) + return -EINVAL; + if (smu->ppt_funcs->set_soft_freq_limited_range) ret = smu->ppt_funcs->set_soft_freq_limited_range(smu, clk_type, @@ -307,6 +316,26 @@ static int smu_dpm_set_vpe_enable(struct smu_context *smu, return ret; } +static int smu_dpm_set_isp_enable(struct smu_context *smu, + bool enable) +{ + struct smu_power_context *smu_power = &smu->smu_power; + struct smu_power_gate *power_gate = &smu_power->power_gate; + int ret; + + if (!smu->ppt_funcs->dpm_set_isp_enable) + return 0; + + if (atomic_read(&power_gate->isp_gated) ^ enable) + return 0; + + ret = smu->ppt_funcs->dpm_set_isp_enable(smu, enable); + if (!ret) + atomic_set(&power_gate->isp_gated, !enable); + + return ret; +} + static int smu_dpm_set_umsch_mm_enable(struct smu_context *smu, bool enable) { @@ -408,6 +437,12 @@ static int smu_dpm_set_power_gate(void *handle, dev_err(smu->adev->dev, "Failed to power %s VPE!\n", gate ? "gate" : "ungate"); break; + case AMD_IP_BLOCK_TYPE_ISP: + ret = smu_dpm_set_isp_enable(smu, !gate); + if (ret) + dev_err(smu->adev->dev, "Failed to power %s ISP!\n", + gate ? "gate" : "ungate"); + break; default: dev_err(smu->adev->dev, "Unsupported block type!\n"); return -EINVAL; @@ -1004,6 +1039,21 @@ static int smu_fini_fb_allocations(struct smu_context *smu) return 0; } +static void smu_update_gpu_addresses(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *pm_status_table = smu_table->tables + SMU_TABLE_PMSTATUSLOG; + struct smu_table *driver_table = &(smu_table->driver_table); + struct smu_table *dummy_read_1_table = &smu_table->dummy_read_1_table; + + if (pm_status_table->bo) + pm_status_table->mc_address = amdgpu_bo_fb_aper_addr(pm_status_table->bo); + if (driver_table->bo) + driver_table->mc_address = amdgpu_bo_fb_aper_addr(driver_table->bo); + if (dummy_read_1_table->bo) + dummy_read_1_table->mc_address = amdgpu_bo_fb_aper_addr(dummy_read_1_table->bo); +} + /** * smu_alloc_memory_pool - allocate memory pool in the system memory * @@ -1285,6 +1335,7 @@ static int smu_sw_init(struct amdgpu_ip_block *ip_block) atomic_set(&smu->smu_power.power_gate.vcn_gated[i], 1); atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); atomic_set(&smu->smu_power.power_gate.vpe_gated, 1); + atomic_set(&smu->smu_power.power_gate.isp_gated, 1); atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1); smu_init_power_profile(smu); @@ -1672,37 +1723,6 @@ static int smu_smc_hw_setup(struct smu_context *smu) } } - ret = smu_system_features_control(smu, true); - if (ret) { - dev_err(adev->dev, "Failed to enable requested dpm features!\n"); - return ret; - } - - smu_init_xgmi_plpd_mode(smu); - - ret = smu_feature_get_enabled_mask(smu, &features_supported); - if (ret) { - dev_err(adev->dev, "Failed to retrieve supported dpm features!\n"); - return ret; - } - bitmap_copy(feature->supported, - (unsigned long *)&features_supported, - feature->feature_num); - - if (!smu_is_dpm_running(smu)) - dev_info(adev->dev, "dpm has been disabled\n"); - - /* - * Set initialized values (get from vbios) to dpm tables context such as - * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each - * type of clks. - */ - ret = smu_set_default_dpm_table(smu); - if (ret) { - dev_err(adev->dev, "Failed to setup default dpm clock tables!\n"); - return ret; - } - if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5) pcie_gen = 4; else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) @@ -1738,6 +1758,37 @@ static int smu_smc_hw_setup(struct smu_context *smu) return ret; } + ret = smu_system_features_control(smu, true); + if (ret) { + dev_err(adev->dev, "Failed to enable requested dpm features!\n"); + return ret; + } + + smu_init_xgmi_plpd_mode(smu); + + ret = smu_feature_get_enabled_mask(smu, &features_supported); + if (ret) { + dev_err(adev->dev, "Failed to retrieve supported dpm features!\n"); + return ret; + } + bitmap_copy(feature->supported, + (unsigned long *)&features_supported, + feature->feature_num); + + if (!smu_is_dpm_running(smu)) + dev_info(adev->dev, "dpm has been disabled\n"); + + /* + * Set initialized values (get from vbios) to dpm tables context such as + * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each + * type of clks. + */ + ret = smu_set_default_dpm_table(smu); + if (ret) { + dev_err(adev->dev, "Failed to setup default dpm clock tables!\n"); + return ret; + } + ret = smu_get_thermal_temperature_range(smu); if (ret) { dev_err(adev->dev, "Failed to get thermal temperature ranges!\n"); @@ -1780,6 +1831,9 @@ static int smu_start_smc_engine(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; int ret = 0; + if (amdgpu_virt_xgmi_migrate_enabled(adev)) + smu_update_gpu_addresses(smu); + smu->smc_fw_state = SMU_FW_INIT; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { @@ -2144,6 +2198,7 @@ static int smu_resume(struct amdgpu_ip_block *ip_block) int ret; struct amdgpu_device *adev = ip_block->adev; struct smu_context *smu = adev->powerplay.pp_handle; + struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); if (amdgpu_sriov_multi_vf_mode(adev)) return 0; @@ -2175,6 +2230,18 @@ static int smu_resume(struct amdgpu_ip_block *ip_block) adev->pm.dpm_enabled = true; + if (smu->current_power_limit) { + ret = smu_set_power_limit(smu, smu->current_power_limit); + if (ret && ret != -EOPNOTSUPP) + return ret; + } + + if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { + ret = smu_od_edit_dpm_table(smu, PP_OD_COMMIT_DPM_TABLE, NULL, 0); + if (ret) + return ret; + } + dev_info(adev->dev, "SMU is resumed successfully!\n"); return 0; @@ -2935,6 +3002,12 @@ static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type) clk_type = SMU_DCLK; break; case PP_DCLK1: clk_type = SMU_DCLK1; break; + case PP_ISPICLK: + clk_type = SMU_ISPICLK; + break; + case PP_ISPXCLK: + clk_type = SMU_ISPXCLK; + break; case OD_SCLK: clk_type = SMU_OD_SCLK; break; case OD_MCLK: diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 9aacc7bc1c69..b52e194397e2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -402,6 +402,7 @@ struct smu_power_gate { atomic_t vcn_gated[AMDGPU_MAX_VCN_INSTANCES]; atomic_t jpeg_gated; atomic_t vpe_gated; + atomic_t isp_gated; atomic_t umsch_mm_gated; }; @@ -1436,6 +1437,12 @@ struct pptable_funcs { int (*dpm_set_vpe_enable)(struct smu_context *smu, bool enable); /** + * @dpm_set_isp_enable: Enable/disable ISP engine dynamic power + * management. + */ + int (*dpm_set_isp_enable)(struct smu_context *smu, bool enable); + + /** * @dpm_set_umsch_mm_enable: Enable/disable UMSCH engine dynamic power * management. */ @@ -1635,7 +1642,7 @@ int smu_write_watermarks_table(struct smu_context *smu); int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max); -int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, +int smu_set_soft_freq_range(struct smu_context *smu, enum pp_clock_type clk_type, uint32_t min, uint32_t max); int smu_set_gfx_power_up_by_imu(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h index 1bc30db22f9c..cd44f4254134 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu14_driver_if_v14_0_0.h @@ -106,6 +106,7 @@ typedef struct { #define NUM_FCLK_DPM_LEVELS 8 #define NUM_MEM_PSTATE_LEVELS 4 +#define ISP_ALL_TILES_MASK 0x7FF typedef struct { uint32_t UClk; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h index d7505cfc433a..0a2ca544f4e3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h @@ -86,8 +86,10 @@ typedef enum { /*36*/ FEATURE_PIT = 36, /*37*/ FEATURE_DVO = 37, /*38*/ FEATURE_XVMINORPSM_CLKSTOP_DS = 38, +/*39*/ FEATURE_GLOBAL_DPM = 39, +/*40*/ FEATURE_NODE_POWER_MANAGER = 40, -/*39*/ NUM_FEATURES = 39 +/*41*/ NUM_FEATURES = 41 } FEATURE_LIST_e; //enum for MPIO PCIe gen speed msgs @@ -133,7 +135,7 @@ typedef enum { GFX_DVM_MARGIN_COUNT } GFX_DVM_MARGIN_e; -#define SMU_METRICS_TABLE_VERSION 0x12 +#define SMU_METRICS_TABLE_VERSION 0x13 typedef struct __attribute__((packed, aligned(4))) { uint64_t AccumulationCounter; @@ -275,6 +277,16 @@ typedef struct { //PSNs uint64_t PublicSerialNumber_AID[4]; uint64_t PublicSerialNumber_XCD[8]; + + //XGMI + uint32_t MaxXgmiWidth; + uint32_t MaxXgmiBitrate; + + // Telemetry + uint32_t InputTelemetryVoltageInmV; + + // General info + uint32_t pldmVersion[2]; } StaticMetricsTable_t; #pragma pack(pop) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h index eefdaa0b5df6..d7a9e41820fa 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h @@ -305,6 +305,8 @@ enum smu_clk_type { SMU_MCLK, SMU_PCIE, SMU_LCLK, + SMU_ISPICLK, + SMU_ISPXCLK, SMU_OD_CCLK, SMU_OD_SCLK, SMU_OD_MCLK, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 7fad5dfb39c4..aac202d0c30e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -2444,7 +2444,8 @@ static int navi10_update_pcie_parameters(struct smu_context *smu, struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; PPTable_t *pptable = smu->smu_table.driver_pptable; uint32_t smu_pcie_arg; - int ret, i; + int ret = 0; + int i; /* lclk dpm table setup */ for (i = 0; i < MAX_PCIE_CONF; i++) { @@ -2453,25 +2454,27 @@ static int navi10_update_pcie_parameters(struct smu_context *smu, } for (i = 0; i < NUM_LINK_LEVELS; i++) { - smu_pcie_arg = (i << 16) | - ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) : - (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ? - pptable->PcieLaneCount[i] : pcie_width_cap); - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_OverridePcieParameters, - smu_pcie_arg, - NULL); - - if (ret) - return ret; - - if (pptable->PcieGenSpeed[i] > pcie_gen_cap) - dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap; - if (pptable->PcieLaneCount[i] > pcie_width_cap) - dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap; + if (pptable->PcieGenSpeed[i] > pcie_gen_cap || + pptable->PcieLaneCount[i] > pcie_width_cap) { + dpm_context->dpm_tables.pcie_table.pcie_gen[i] = + pptable->PcieGenSpeed[i] > pcie_gen_cap ? + pcie_gen_cap : pptable->PcieGenSpeed[i]; + dpm_context->dpm_tables.pcie_table.pcie_lane[i] = + pptable->PcieLaneCount[i] > pcie_width_cap ? + pcie_width_cap : pptable->PcieLaneCount[i]; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_gen_cap << 8; + smu_pcie_arg |= pcie_width_cap; + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } } - return 0; + return ret; } static inline void navi10_dump_od_table(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 115e3fa456bc..d57591509aed 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -2145,7 +2145,8 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu, uint8_t min_gen_speed, max_gen_speed; uint8_t min_lane_width, max_lane_width; uint32_t smu_pcie_arg; - int ret, i; + int ret = 0; + int i; GET_PPTABLE_MEMBER(PcieGenSpeed, &table_member1); GET_PPTABLE_MEMBER(PcieLaneCount, &table_member2); @@ -2170,19 +2171,22 @@ static int sienna_cichlid_update_pcie_parameters(struct smu_context *smu, pcie_table->pcie_lane[1] = max_lane_width; for (i = 0; i < NUM_LINK_LEVELS; i++) { - smu_pcie_arg = (i << 16 | + if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK) || + table_member1[i] > pcie_gen_cap || table_member2[i] > pcie_width_cap) { + smu_pcie_arg = (i << 16 | pcie_table->pcie_gen[i] << 8 | pcie_table->pcie_lane[i]); - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_OverridePcieParameters, - smu_pcie_arg, - NULL); - if (ret) - return ret; + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } } - return 0; + return ret; } static int sienna_cichlid_get_dpm_ultimate_freq(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index a55ea76d7399..2c9869feba61 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -666,7 +666,6 @@ static int vangogh_print_clk_levels(struct smu_context *smu, { DpmClocks_t *clk_table = smu->smu_table.clocks_table; SmuMetrics_t metrics; - struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; bool cur_value_match_level = false; @@ -682,31 +681,25 @@ static int vangogh_print_clk_levels(struct smu_context *smu, switch (clk_type) { case SMU_OD_SCLK: - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); - size += sysfs_emit_at(buf, size, "0: %10uMhz\n", - (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); - size += sysfs_emit_at(buf, size, "1: %10uMhz\n", - (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); - } + size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK"); + size += sysfs_emit_at(buf, size, "0: %10uMhz\n", + (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq); + size += sysfs_emit_at(buf, size, "1: %10uMhz\n", + (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq); break; case SMU_OD_CCLK: - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); - size += sysfs_emit_at(buf, size, "0: %10uMhz\n", - (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); - size += sysfs_emit_at(buf, size, "1: %10uMhz\n", - (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq); - } + size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select); + size += sysfs_emit_at(buf, size, "0: %10uMhz\n", + (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq); + size += sysfs_emit_at(buf, size, "1: %10uMhz\n", + (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq); break; case SMU_OD_RANGE: - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); - size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", - smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); - size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", - smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); - } + size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE"); + size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n", + smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq); + size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n", + smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq); break; case SMU_SOCCLK: /* the level 3 ~ 6 of socclk use the same frequency for vangogh */ diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index 9481f897432d..e97b0cf19197 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -497,7 +497,6 @@ static int renoir_print_clk_levels(struct smu_context *smu, int i, idx, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0; SmuMetrics_t metrics; - struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); bool cur_value_match_level = false; memset(&metrics, 0, sizeof(metrics)); @@ -510,28 +509,24 @@ static int renoir_print_clk_levels(struct smu_context *smu, switch (clk_type) { case SMU_OD_RANGE: - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_GetMinGfxclkFrequency, - 0, &min); - if (ret) - return ret; - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_GetMaxGfxclkFrequency, - 0, &max); - if (ret) - return ret; - size += sysfs_emit_at(buf, size, "OD_RANGE\nSCLK: %10uMhz %10uMhz\n", min, max); - } + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetMinGfxclkFrequency, + 0, &min); + if (ret) + return ret; + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_GetMaxGfxclkFrequency, + 0, &max); + if (ret) + return ret; + size += sysfs_emit_at(buf, size, "OD_RANGE\nSCLK: %10uMhz %10uMhz\n", min, max); break; case SMU_OD_SCLK: - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { - min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; - max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; - size += sysfs_emit_at(buf, size, "OD_SCLK\n"); - size += sysfs_emit_at(buf, size, "0:%10uMhz\n", min); - size += sysfs_emit_at(buf, size, "1:%10uMhz\n", max); - } + min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq; + max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq; + size += sysfs_emit_at(buf, size, "OD_SCLK\n"); + size += sysfs_emit_at(buf, size, "0:%10uMhz\n", min); + size += sysfs_emit_at(buf, size, "1:%10uMhz\n", max); break; case SMU_GFXCLK: case SMU_SCLK: diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index 6de653d2ed62..c63d2e28954d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -342,6 +342,61 @@ static int aldebaran_get_allowed_feature_mask(struct smu_context *smu, return 0; } +static int aldebaran_get_dpm_ultimate_freq(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *min, uint32_t *max) +{ + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + struct smu_13_0_dpm_table *dpm_table; + uint32_t min_clk, max_clk; + + if (amdgpu_sriov_vf(smu->adev)) { + switch (clk_type) { + case SMU_MCLK: + case SMU_UCLK: + dpm_table = &dpm_context->dpm_tables.uclk_table; + break; + case SMU_GFXCLK: + case SMU_SCLK: + dpm_table = &dpm_context->dpm_tables.gfx_table; + break; + case SMU_SOCCLK: + dpm_table = &dpm_context->dpm_tables.soc_table; + break; + case SMU_FCLK: + dpm_table = &dpm_context->dpm_tables.fclk_table; + break; + case SMU_VCLK: + dpm_table = &dpm_context->dpm_tables.vclk_table; + break; + case SMU_DCLK: + dpm_table = &dpm_context->dpm_tables.dclk_table; + break; + default: + return -EINVAL; + } + + min_clk = dpm_table->min; + max_clk = dpm_table->max; + + if (min) { + if (!min_clk) + return -ENODATA; + *min = min_clk; + } + if (max) { + if (!max_clk) + return -ENODATA; + *max = max_clk; + } + + } else { + return smu_v13_0_get_dpm_ultimate_freq(smu, clk_type, min, max); + } + + return 0; +} + static int aldebaran_set_default_dpm_table(struct smu_context *smu) { struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; @@ -2081,7 +2136,7 @@ static const struct pptable_funcs aldebaran_ppt_funcs = { .set_azalia_d3_pme = smu_v13_0_set_azalia_d3_pme, .get_max_sustainable_clocks_by_dc = smu_v13_0_get_max_sustainable_clocks_by_dc, .get_bamaco_support = aldebaran_get_bamaco_support, - .get_dpm_ultimate_freq = smu_v13_0_get_dpm_ultimate_freq, + .get_dpm_ultimate_freq = aldebaran_get_dpm_ultimate_freq, .set_soft_freq_limited_range = aldebaran_set_soft_freq_limited_range, .od_edit_dpm_table = aldebaran_usr_edit_dpm_table, .set_df_cstate = aldebaran_set_df_cstate, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index a7167668d189..1a1f2a6b2e52 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -58,6 +58,7 @@ MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin"); MODULE_FIRMWARE("amdgpu/smu_13_0_0.bin"); +MODULE_FIRMWARE("amdgpu/smu_13_0_0_kicker.bin"); MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin"); MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin"); @@ -92,7 +93,7 @@ const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16}; int smu_v13_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - char ucode_prefix[15]; + char ucode_prefix[30]; int err = 0; const struct smc_firmware_header_v1_0 *hdr; const struct common_firmware_header *header; @@ -103,8 +104,13 @@ int smu_v13_0_init_microcode(struct smu_context *smu) return 0; amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix)); - err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, - "amdgpu/%s.bin", ucode_prefix); + + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_kicker.bin", ucode_prefix); + else + err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s.bin", ucode_prefix); if (err) goto out; @@ -2380,7 +2386,8 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu, &dpm_context->dpm_tables.pcie_table; int num_of_levels = pcie_table->num_of_link_levels; uint32_t smu_pcie_arg; - int ret, i; + int ret = 0; + int i; if (!num_of_levels) return 0; @@ -2396,30 +2403,38 @@ int smu_v13_0_update_pcie_parameters(struct smu_context *smu, for (i = 0; i < num_of_levels; i++) { pcie_table->pcie_gen[i] = pcie_gen_cap; pcie_table->pcie_lane[i] = pcie_width_cap; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; } } else { for (i = 0; i < num_of_levels; i++) { - if (pcie_table->pcie_gen[i] > pcie_gen_cap) + if (pcie_table->pcie_gen[i] > pcie_gen_cap || + pcie_table->pcie_lane[i] > pcie_width_cap) { pcie_table->pcie_gen[i] = pcie_gen_cap; - if (pcie_table->pcie_lane[i] > pcie_width_cap) pcie_table->pcie_lane[i] = pcie_width_cap; - } - } - - for (i = 0; i < num_of_levels; i++) { - smu_pcie_arg = i << 16; - smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; - smu_pcie_arg |= pcie_table->pcie_lane[i]; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_OverridePcieParameters, - smu_pcie_arg, - NULL); - if (ret) - return ret; + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } + } } - return 0; + return ret; } int smu_v13_0_disable_pmfw_state(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 5a9711e8cf68..e084ed99ec0e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -572,8 +572,6 @@ static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu) PPTable_t *pptable = table_context->driver_pptable; SkuTable_t *skutable = &pptable->SkuTable; struct smu_13_0_dpm_table *dpm_table; - struct smu_13_0_pcie_table *pcie_table; - uint32_t link_level; int ret = 0; /* socclk dpm table setup */ @@ -689,24 +687,6 @@ static int smu_v13_0_0_set_default_dpm_table(struct smu_context *smu) dpm_table->max = dpm_table->dpm_levels[0].value; } - /* lclk dpm table setup */ - pcie_table = &dpm_context->dpm_tables.pcie_table; - pcie_table->num_of_link_levels = 0; - for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { - if (!skutable->PcieGenSpeed[link_level] && - !skutable->PcieLaneCount[link_level] && - !skutable->LclkFreq[link_level]) - continue; - - pcie_table->pcie_gen[pcie_table->num_of_link_levels] = - skutable->PcieGenSpeed[link_level]; - pcie_table->pcie_lane[pcie_table->num_of_link_levels] = - skutable->PcieLaneCount[link_level]; - pcie_table->clk_freq[pcie_table->num_of_link_levels] = - skutable->LclkFreq[link_level]; - pcie_table->num_of_link_levels++; - } - /* dcefclk dpm table setup */ dpm_table = &dpm_context->dpm_tables.dcef_table; if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) { @@ -3150,6 +3130,90 @@ static int smu_v13_0_0_set_power_limit(struct smu_context *smu, return 0; } +static int smu_v13_0_0_update_pcie_parameters(struct smu_context *smu, + uint8_t pcie_gen_cap, + uint8_t pcie_width_cap) +{ + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + struct smu_13_0_pcie_table *pcie_table = + &dpm_context->dpm_tables.pcie_table; + int num_of_levels; + uint32_t smu_pcie_arg; + uint32_t link_level; + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + SkuTable_t *skutable = &pptable->SkuTable; + int ret = 0; + int i; + + pcie_table->num_of_link_levels = 0; + + for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { + if (!skutable->PcieGenSpeed[link_level] && + !skutable->PcieLaneCount[link_level] && + !skutable->LclkFreq[link_level]) + continue; + + pcie_table->pcie_gen[pcie_table->num_of_link_levels] = + skutable->PcieGenSpeed[link_level]; + pcie_table->pcie_lane[pcie_table->num_of_link_levels] = + skutable->PcieLaneCount[link_level]; + pcie_table->clk_freq[pcie_table->num_of_link_levels] = + skutable->LclkFreq[link_level]; + pcie_table->num_of_link_levels++; + } + + num_of_levels = pcie_table->num_of_link_levels; + if (!num_of_levels) + return 0; + + if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) { + if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap) + pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1]; + + if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap) + pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1]; + + /* Force all levels to use the same settings */ + for (i = 0; i < num_of_levels; i++) { + pcie_table->pcie_gen[i] = pcie_gen_cap; + pcie_table->pcie_lane[i] = pcie_width_cap; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } + } else { + for (i = 0; i < num_of_levels; i++) { + if (pcie_table->pcie_gen[i] > pcie_gen_cap || + pcie_table->pcie_lane[i] > pcie_width_cap) { + pcie_table->pcie_gen[i] = pcie_table->pcie_gen[i] > pcie_gen_cap ? + pcie_gen_cap : pcie_table->pcie_gen[i]; + pcie_table->pcie_lane[i] = pcie_table->pcie_lane[i] > pcie_width_cap ? + pcie_width_cap : pcie_table->pcie_lane[i]; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } + } + } + + return ret; +} + static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask, .set_default_dpm_table = smu_v13_0_0_set_default_dpm_table, @@ -3179,7 +3243,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .feature_is_enabled = smu_cmn_feature_is_enabled, .print_clk_levels = smu_v13_0_0_print_clk_levels, .force_clk_levels = smu_v13_0_0_force_clk_levels, - .update_pcie_parameters = smu_v13_0_update_pcie_parameters, + .update_pcie_parameters = smu_v13_0_0_update_pcie_parameters, .get_thermal_temperature_range = smu_v13_0_0_get_thermal_temperature_range, .register_irq_handler = smu_v13_0_register_irq_handler, .enable_thermal_alert = smu_v13_0_enable_thermal_alert, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c index e0d356f93ab0..02a455a31c25 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c @@ -187,8 +187,34 @@ int smu_v13_0_12_get_max_metrics_size(void) return max(sizeof(StaticMetricsTable_t), sizeof(MetricsTable_t)); } +static void smu_v13_0_12_init_xgmi_data(struct smu_context *smu, + StaticMetricsTable_t *static_metrics) +{ + struct smu_table_context *smu_table = &smu->smu_table; + uint16_t max_speed; + uint8_t max_width; + int ret; + + if (smu_table->tables[SMU_TABLE_SMU_METRICS].version >= 0x13) { + max_width = (uint8_t)static_metrics->MaxXgmiWidth; + max_speed = (uint16_t)static_metrics->MaxXgmiBitrate; + ret = 0; + } else { + MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table; + + ret = smu_v13_0_6_get_metrics_table(smu, NULL, true); + if (!ret) { + max_width = (uint8_t)metrics->XgmiWidth; + max_speed = (uint16_t)metrics->XgmiBitrate; + } + } + if (!ret) + amgpu_xgmi_set_max_speed_width(smu->adev, max_speed, max_width); +} + int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu) { + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; struct smu_table_context *smu_table = &smu->smu_table; StaticMetricsTable_t *static_metrics = (StaticMetricsTable_t *)smu_table->metrics_table; struct PPTable_t *pptable = @@ -237,6 +263,18 @@ int smu_v13_0_12_setup_driver_pptable(struct smu_context *smu) if (ret) return ret; + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(BOARD_VOLTAGE))) { + if (!static_metrics->InputTelemetryVoltageInmV) { + dev_warn(smu->adev->dev, "Invalid board voltage %d\n", + static_metrics->InputTelemetryVoltageInmV); + } + dpm_context->board_volt = static_metrics->InputTelemetryVoltageInmV; + } + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(PLDM_VERSION)) && + static_metrics->pldmVersion[0] != 0xFFFFFFFF) + smu->adev->firmware.pldm_version = + static_metrics->pldmVersion[0]; + smu_v13_0_12_init_xgmi_data(smu, static_metrics); pptable->Init = true; } @@ -263,7 +301,6 @@ int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu, struct smu_table_context *smu_table = &smu->smu_table; MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table; struct amdgpu_device *adev = smu->adev; - int ret = 0; int xcc_id; /* For clocks with multiple instances, only report the first one */ @@ -319,7 +356,7 @@ int smu_v13_0_12_get_smu_metrics_data(struct smu_context *smu, break; } - return ret; + return 0; } ssize_t smu_v13_0_12_get_xcp_metrics(struct smu_context *smu, struct amdgpu_xcp *xcp, void *table, void *smu_metrics) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c index f00ef7f3f355..9cc294f4708b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c @@ -345,6 +345,11 @@ static void smu_v13_0_12_init_caps(struct smu_context *smu) if (fw_ver >= 0x00562500) smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS)); + + if (fw_ver >= 0x04560100) { + smu_v13_0_6_cap_set(smu, SMU_CAP(BOARD_VOLTAGE)); + smu_v13_0_6_cap_set(smu, SMU_CAP(PLDM_VERSION)); + } } static void smu_v13_0_6_init_caps(struct smu_context *smu) @@ -685,8 +690,8 @@ static int smu_v13_0_6_get_allowed_feature_mask(struct smu_context *smu, return 0; } -static int smu_v13_0_6_get_metrics_table(struct smu_context *smu, - void *metrics_table, bool bypass_cache) +int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table, + bool bypass_cache) { struct smu_table_context *smu_table = &smu->smu_table; uint32_t table_size = smu_table->tables[SMU_TABLE_SMU_METRICS].size; @@ -800,6 +805,8 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) int version = smu_v13_0_6_get_metrics_version(smu); int ret, i, retry = 100; uint32_t table_version; + uint16_t max_speed; + uint8_t max_width; if (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 12) && smu_v13_0_6_cap_supported(smu, SMU_CAP(STATIC_METRICS))) @@ -835,6 +842,9 @@ static int smu_v13_0_6_setup_driver_pptable(struct smu_context *smu) SMUQ10_ROUND(GET_METRIC_FIELD(MaxGfxclkFrequency, version)); pptable->MinGfxclkFrequency = SMUQ10_ROUND(GET_METRIC_FIELD(MinGfxclkFrequency, version)); + max_width = (uint8_t)GET_METRIC_FIELD(XgmiWidth, version); + max_speed = (uint16_t)GET_METRIC_FIELD(XgmiBitrate, version); + amgpu_xgmi_set_max_speed_width(smu->adev, max_speed, max_width); for (i = 0; i < 4; ++i) { pptable->FclkFrequencyTable[i] = @@ -871,51 +881,51 @@ static int smu_v13_0_6_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max) { + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; struct smu_table_context *smu_table = &smu->smu_table; struct PPTable_t *pptable = (struct PPTable_t *)smu_table->driver_pptable; - uint32_t clock_limit = 0, param; + struct smu_13_0_dpm_table *dpm_table; + uint32_t min_clk, max_clk, param; int ret = 0, clk_id = 0; - if (!smu_cmn_clk_dpm_is_enabled(smu, clk_type)) { + /* Use dpm tables, if data is already fetched */ + if (pptable->Init) { switch (clk_type) { case SMU_MCLK: case SMU_UCLK: - if (pptable->Init) - clock_limit = pptable->UclkFrequencyTable[0]; + dpm_table = &dpm_context->dpm_tables.uclk_table; break; case SMU_GFXCLK: case SMU_SCLK: - if (pptable->Init) - clock_limit = pptable->MinGfxclkFrequency; + dpm_table = &dpm_context->dpm_tables.gfx_table; break; case SMU_SOCCLK: - if (pptable->Init) - clock_limit = pptable->SocclkFrequencyTable[0]; + dpm_table = &dpm_context->dpm_tables.soc_table; break; case SMU_FCLK: - if (pptable->Init) - clock_limit = pptable->FclkFrequencyTable[0]; + dpm_table = &dpm_context->dpm_tables.fclk_table; break; case SMU_VCLK: - if (pptable->Init) - clock_limit = pptable->VclkFrequencyTable[0]; + dpm_table = &dpm_context->dpm_tables.vclk_table; break; case SMU_DCLK: - if (pptable->Init) - clock_limit = pptable->DclkFrequencyTable[0]; + dpm_table = &dpm_context->dpm_tables.dclk_table; break; default: - break; + return -EINVAL; } - if (min) - *min = clock_limit; + min_clk = dpm_table->min; + max_clk = dpm_table->max; + if (min) + *min = min_clk; if (max) - *max = clock_limit; + *max = max_clk; - return 0; + if (min_clk && max_clk) + return 0; } if (!(clk_type == SMU_GFXCLK || clk_type == SMU_SCLK)) { @@ -1377,8 +1387,9 @@ static int smu_v13_0_6_print_clk_levels(struct smu_context *smu, return ret; } - min_clk = pstate_table->gfxclk_pstate.curr.min; - max_clk = pstate_table->gfxclk_pstate.curr.max; + single_dpm_table = &(dpm_context->dpm_tables.gfx_table); + min_clk = single_dpm_table->min; + max_clk = single_dpm_table->max; if (now < SMU_13_0_6_DSCLK_THRESHOLD) { size += sysfs_emit_at(buf, size, "S: %uMhz *\n", @@ -2682,7 +2693,7 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table bool per_inst; metrics_v0 = kzalloc(METRICS_TABLE_SIZE, GFP_KERNEL); - ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, true); + ret = smu_v13_0_6_get_metrics_table(smu, metrics_v0, false); if (ret) { kfree(metrics_v0); return ret; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h index d38d6d76b1e7..67b30674fd31 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h @@ -74,6 +74,8 @@ enum smu_v13_0_6_caps { extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu); bool smu_v13_0_6_cap_supported(struct smu_context *smu, enum smu_v13_0_6_caps cap); int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu); +int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table, + bool bypass_cache); bool smu_v13_0_12_is_dpm_running(struct smu_context *smu); int smu_v13_0_12_get_max_metrics_size(void); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index c8f4f6fb4083..c96fa5e49ed6 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -579,8 +579,6 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu) PPTable_t *driver_ppt = smu->smu_table.driver_pptable; SkuTable_t *skutable = &driver_ppt->SkuTable; struct smu_13_0_dpm_table *dpm_table; - struct smu_13_0_pcie_table *pcie_table; - uint32_t link_level; int ret = 0; /* socclk dpm table setup */ @@ -687,24 +685,6 @@ static int smu_v13_0_7_set_default_dpm_table(struct smu_context *smu) dpm_table->max = dpm_table->dpm_levels[0].value; } - /* lclk dpm table setup */ - pcie_table = &dpm_context->dpm_tables.pcie_table; - pcie_table->num_of_link_levels = 0; - for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { - if (!skutable->PcieGenSpeed[link_level] && - !skutable->PcieLaneCount[link_level] && - !skutable->LclkFreq[link_level]) - continue; - - pcie_table->pcie_gen[pcie_table->num_of_link_levels] = - skutable->PcieGenSpeed[link_level]; - pcie_table->pcie_lane[pcie_table->num_of_link_levels] = - skutable->PcieLaneCount[link_level]; - pcie_table->clk_freq[pcie_table->num_of_link_levels] = - skutable->LclkFreq[link_level]; - pcie_table->num_of_link_levels++; - } - /* dcefclk dpm table setup */ dpm_table = &dpm_context->dpm_tables.dcef_table; if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) { @@ -2739,6 +2719,89 @@ static int smu_v13_0_7_set_power_limit(struct smu_context *smu, return 0; } +static int smu_v13_0_7_update_pcie_parameters(struct smu_context *smu, + uint8_t pcie_gen_cap, + uint8_t pcie_width_cap) +{ + struct smu_13_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; + struct smu_13_0_pcie_table *pcie_table = + &dpm_context->dpm_tables.pcie_table; + int num_of_levels; + int link_level; + uint32_t smu_pcie_arg; + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + SkuTable_t *skutable = &pptable->SkuTable; + int ret = 0; + int i; + + pcie_table->num_of_link_levels = 0; + for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { + if (!skutable->PcieGenSpeed[link_level] && + !skutable->PcieLaneCount[link_level] && + !skutable->LclkFreq[link_level]) + continue; + + pcie_table->pcie_gen[pcie_table->num_of_link_levels] = + skutable->PcieGenSpeed[link_level]; + pcie_table->pcie_lane[pcie_table->num_of_link_levels] = + skutable->PcieLaneCount[link_level]; + pcie_table->clk_freq[pcie_table->num_of_link_levels] = + skutable->LclkFreq[link_level]; + pcie_table->num_of_link_levels++; + } + + num_of_levels = pcie_table->num_of_link_levels; + if (!num_of_levels) + return 0; + + if (!(smu->adev->pm.pp_feature & PP_PCIE_DPM_MASK)) { + if (pcie_table->pcie_gen[num_of_levels - 1] < pcie_gen_cap) + pcie_gen_cap = pcie_table->pcie_gen[num_of_levels - 1]; + + if (pcie_table->pcie_lane[num_of_levels - 1] < pcie_width_cap) + pcie_width_cap = pcie_table->pcie_lane[num_of_levels - 1]; + + /* Force all levels to use the same settings */ + for (i = 0; i < num_of_levels; i++) { + pcie_table->pcie_gen[i] = pcie_gen_cap; + pcie_table->pcie_lane[i] = pcie_width_cap; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } + } else { + for (i = 0; i < num_of_levels; i++) { + if (pcie_table->pcie_gen[i] > pcie_gen_cap || + pcie_table->pcie_lane[i] > pcie_width_cap) { + pcie_table->pcie_gen[i] = pcie_table->pcie_gen[i] > pcie_gen_cap ? + pcie_gen_cap : pcie_table->pcie_gen[i]; + pcie_table->pcie_lane[i] = pcie_table->pcie_lane[i] > pcie_width_cap ? + pcie_width_cap : pcie_table->pcie_lane[i]; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; + } + } + } + + return ret; +} + static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask, .set_default_dpm_table = smu_v13_0_7_set_default_dpm_table, @@ -2768,7 +2831,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .feature_is_enabled = smu_cmn_feature_is_enabled, .print_clk_levels = smu_v13_0_7_print_clk_levels, .force_clk_levels = smu_v13_0_7_force_clk_levels, - .update_pcie_parameters = smu_v13_0_update_pcie_parameters, + .update_pcie_parameters = smu_v13_0_7_update_pcie_parameters, .get_thermal_temperature_range = smu_v13_0_7_get_thermal_temperature_range, .register_irq_handler = smu_v13_0_register_irq_handler, .enable_thermal_alert = smu_v13_0_enable_thermal_alert, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index 76c1adda83db..f9b0938c57ea 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -62,13 +62,14 @@ const int decoded_link_width[8] = {0, 1, 2, 4, 8, 12, 16, 32}; MODULE_FIRMWARE("amdgpu/smu_14_0_2.bin"); MODULE_FIRMWARE("amdgpu/smu_14_0_3.bin"); +MODULE_FIRMWARE("amdgpu/smu_14_0_3_kicker.bin"); #define ENABLE_IMU_ARG_GFXOFF_ENABLE 1 int smu_v14_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - char ucode_prefix[15]; + char ucode_prefix[30]; int err = 0; const struct smc_firmware_header_v1_0 *hdr; const struct common_firmware_header *header; @@ -79,8 +80,12 @@ int smu_v14_0_init_microcode(struct smu_context *smu) return 0; amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix)); - err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, - "amdgpu/%s.bin", ucode_prefix); + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s_kicker.bin", ucode_prefix); + else + err = amdgpu_ucode_request(adev, &adev->pm.fw, AMDGPU_UCODE_REQUIRED, + "amdgpu/%s.bin", ucode_prefix); if (err) goto out; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c index 84f9b007b59f..fe00c84b1cc6 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_0_ppt.c @@ -1207,11 +1207,13 @@ static int smu_v14_0_0_print_clk_levels(struct smu_context *smu, static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, - uint32_t max) + u32 min, + u32 max, + bool __always_unused automatic) { - enum smu_message_type msg_set_min, msg_set_max; - int ret = 0; + enum smu_message_type msg_set_min = SMU_MSG_MAX_COUNT; + enum smu_message_type msg_set_max = SMU_MSG_MAX_COUNT; + int ret = -EINVAL; if (!smu_v14_0_0_clk_dpm_is_enabled(smu, clk_type)) return -EINVAL; @@ -1240,16 +1242,23 @@ static int smu_v14_0_0_set_soft_freq_limited_range(struct smu_context *smu, msg_set_min = SMU_MSG_SetHardMinVcn1; msg_set_max = SMU_MSG_SetSoftMaxVcn1; break; + case SMU_ISPICLK: + msg_set_min = SMU_MSG_SetHardMinIspiclkByFreq; + break; + case SMU_ISPXCLK: + msg_set_min = SMU_MSG_SetHardMinIspxclkByFreq; + break; default: return -EINVAL; } - ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL); - if (ret) - return ret; + if (min && msg_set_min != SMU_MSG_MAX_COUNT) + ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL); + + if (max && msg_set_max != SMU_MSG_MAX_COUNT) + ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_max, max, NULL); - return smu_cmn_send_smc_msg_with_param(smu, msg_set_max, - max, NULL); + return ret; } static int smu_v14_0_0_force_clk_levels(struct smu_context *smu, @@ -1278,7 +1287,7 @@ static int smu_v14_0_0_force_clk_levels(struct smu_context *smu, if (ret) break; - ret = smu_v14_0_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq); + ret = smu_v14_0_0_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq, false); break; default: ret = -EINVAL; @@ -1426,7 +1435,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_SCLK, sclk_min, - sclk_max); + sclk_max, + false); if (ret) return ret; @@ -1438,7 +1448,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_min, - fclk_max); + fclk_max, + false); if (ret) return ret; } @@ -1447,7 +1458,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_min, - socclk_max); + socclk_max, + false); if (ret) return ret; } @@ -1456,7 +1468,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_min, - vclk_max); + vclk_max, + false); if (ret) return ret; } @@ -1465,7 +1478,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_VCLK1, vclk1_min, - vclk1_max); + vclk1_max, + false); if (ret) return ret; } @@ -1474,7 +1488,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_min, - dclk_max); + dclk_max, + false); if (ret) return ret; } @@ -1483,7 +1498,8 @@ static int smu_v14_0_common_set_performance_level(struct smu_context *smu, ret = smu_v14_0_0_set_soft_freq_limited_range(smu, SMU_DCLK1, dclk1_min, - dclk1_max); + dclk1_max, + false); if (ret) return ret; } @@ -1533,6 +1549,14 @@ static int smu_v14_0_0_set_vpe_enable(struct smu_context *smu, 0, NULL); } +static int smu_v14_0_0_set_isp_enable(struct smu_context *smu, + bool enable) +{ + return smu_cmn_send_smc_msg_with_param(smu, enable ? + SMU_MSG_PowerUpIspByTile : SMU_MSG_PowerDownIspByTile, + ISP_ALL_TILES_MASK, NULL); +} + static int smu_v14_0_0_set_umsch_mm_enable(struct smu_context *smu, bool enable) { @@ -1662,6 +1686,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { .gfx_off_control = smu_v14_0_gfx_off_control, .mode2_reset = smu_v14_0_0_mode2_reset, .get_dpm_ultimate_freq = smu_v14_0_common_get_dpm_ultimate_freq, + .set_soft_freq_limited_range = smu_v14_0_0_set_soft_freq_limited_range, .od_edit_dpm_table = smu_v14_0_od_edit_dpm_table, .print_clk_levels = smu_v14_0_0_print_clk_levels, .force_clk_levels = smu_v14_0_0_force_clk_levels, @@ -1669,6 +1694,7 @@ static const struct pptable_funcs smu_v14_0_0_ppt_funcs = { .set_fine_grain_gfx_freq_parameters = smu_v14_0_common_set_fine_grain_gfx_freq_parameters, .set_gfx_power_up_by_imu = smu_v14_0_set_gfx_power_up_by_imu, .dpm_set_vpe_enable = smu_v14_0_0_set_vpe_enable, + .dpm_set_isp_enable = smu_v14_0_0_set_isp_enable, .dpm_set_umsch_mm_enable = smu_v14_0_0_set_umsch_mm_enable, .get_dpm_clock_table = smu_v14_0_common_get_dpm_table, .set_mall_enable = smu_v14_0_common_set_mall_enable, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c index 82c2db972491..f32474af90b3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c @@ -502,8 +502,6 @@ static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu) PPTable_t *pptable = table_context->driver_pptable; SkuTable_t *skutable = &pptable->SkuTable; struct smu_14_0_dpm_table *dpm_table; - struct smu_14_0_pcie_table *pcie_table; - uint32_t link_level; int ret = 0; /* socclk dpm table setup */ @@ -619,27 +617,6 @@ static int smu_v14_0_2_set_default_dpm_table(struct smu_context *smu) dpm_table->max = dpm_table->dpm_levels[0].value; } - /* lclk dpm table setup */ - pcie_table = &dpm_context->dpm_tables.pcie_table; - pcie_table->num_of_link_levels = 0; - for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { - if (!skutable->PcieGenSpeed[link_level] && - !skutable->PcieLaneCount[link_level] && - !skutable->LclkFreq[link_level]) - continue; - - pcie_table->pcie_gen[pcie_table->num_of_link_levels] = - skutable->PcieGenSpeed[link_level]; - pcie_table->pcie_lane[pcie_table->num_of_link_levels] = - skutable->PcieLaneCount[link_level]; - pcie_table->clk_freq[pcie_table->num_of_link_levels] = - skutable->LclkFreq[link_level]; - pcie_table->num_of_link_levels++; - - if (link_level == 0) - link_level++; - } - /* dcefclk dpm table setup */ dpm_table = &dpm_context->dpm_tables.dcef_table; if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCN_BIT)) { @@ -1487,10 +1464,31 @@ static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu, struct smu_14_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context; struct smu_14_0_pcie_table *pcie_table = &dpm_context->dpm_tables.pcie_table; - int num_of_levels = pcie_table->num_of_link_levels; + int num_of_levels; uint32_t smu_pcie_arg; - int ret, i; + uint32_t link_level; + struct smu_table_context *table_context = &smu->smu_table; + PPTable_t *pptable = table_context->driver_pptable; + SkuTable_t *skutable = &pptable->SkuTable; + int ret = 0; + int i; + + pcie_table->num_of_link_levels = 0; + for (link_level = 0; link_level < NUM_LINK_LEVELS; link_level++) { + if (!skutable->PcieGenSpeed[link_level] && + !skutable->PcieLaneCount[link_level] && + !skutable->LclkFreq[link_level]) + continue; + pcie_table->pcie_gen[pcie_table->num_of_link_levels] = + skutable->PcieGenSpeed[link_level]; + pcie_table->pcie_lane[pcie_table->num_of_link_levels] = + skutable->PcieLaneCount[link_level]; + pcie_table->clk_freq[pcie_table->num_of_link_levels] = + skutable->LclkFreq[link_level]; + pcie_table->num_of_link_levels++; + } + num_of_levels = pcie_table->num_of_link_levels; if (!num_of_levels) return 0; @@ -1505,30 +1503,40 @@ static int smu_v14_0_2_update_pcie_parameters(struct smu_context *smu, for (i = 0; i < num_of_levels; i++) { pcie_table->pcie_gen[i] = pcie_gen_cap; pcie_table->pcie_lane[i] = pcie_width_cap; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; + + ret = smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg, + NULL); + if (ret) + break; } } else { for (i = 0; i < num_of_levels; i++) { - if (pcie_table->pcie_gen[i] > pcie_gen_cap) - pcie_table->pcie_gen[i] = pcie_gen_cap; - if (pcie_table->pcie_lane[i] > pcie_width_cap) - pcie_table->pcie_lane[i] = pcie_width_cap; - } - } + if (pcie_table->pcie_gen[i] > pcie_gen_cap || + pcie_table->pcie_lane[i] > pcie_width_cap) { + pcie_table->pcie_gen[i] = pcie_table->pcie_gen[i] > pcie_gen_cap ? + pcie_gen_cap : pcie_table->pcie_gen[i]; + pcie_table->pcie_lane[i] = pcie_table->pcie_lane[i] > pcie_width_cap ? + pcie_width_cap : pcie_table->pcie_lane[i]; + smu_pcie_arg = i << 16; + smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; + smu_pcie_arg |= pcie_table->pcie_lane[i]; - for (i = 0; i < num_of_levels; i++) { - smu_pcie_arg = i << 16; - smu_pcie_arg |= pcie_table->pcie_gen[i] << 8; - smu_pcie_arg |= pcie_table->pcie_lane[i]; - - ret = smu_cmn_send_smc_msg_with_param(smu, + ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_OverridePcieParameters, smu_pcie_arg, NULL); - if (ret) - return ret; + if (ret) + break; + } + } } - return 0; + return ret; } static const struct smu_temperature_range smu14_thermal_policy[] = { @@ -1689,9 +1697,11 @@ static int smu_v14_0_2_get_power_limit(struct smu_context *smu, uint32_t *min_power_limit) { struct smu_table_context *table_context = &smu->smu_table; + struct smu_14_0_2_powerplay_table *powerplay_table = + table_context->power_play_table; PPTable_t *pptable = table_context->driver_pptable; CustomSkuTable_t *skutable = &pptable->CustomSkuTable; - uint32_t power_limit; + uint32_t power_limit, od_percent_upper = 0, od_percent_lower = 0; uint32_t msg_limit = pptable->SkuTable.MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC]; if (smu_v14_0_get_current_power_limit(smu, &power_limit)) @@ -1704,11 +1714,29 @@ static int smu_v14_0_2_get_power_limit(struct smu_context *smu, if (default_power_limit) *default_power_limit = power_limit; - if (max_power_limit) - *max_power_limit = msg_limit; + if (powerplay_table) { + if (smu->od_enabled && + smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) { + od_percent_upper = pptable->SkuTable.OverDriveLimitsBasicMax.Ppt; + od_percent_lower = pptable->SkuTable.OverDriveLimitsBasicMin.Ppt; + } else if (smu_v14_0_2_is_od_feature_supported(smu, PP_OD_FEATURE_PPT_BIT)) { + od_percent_upper = 0; + od_percent_lower = pptable->SkuTable.OverDriveLimitsBasicMin.Ppt; + } + } + + dev_dbg(smu->adev->dev, "od percent upper:%d, od percent lower:%d (default power: %d)\n", + od_percent_upper, od_percent_lower, power_limit); - if (min_power_limit) - *min_power_limit = 0; + if (max_power_limit) { + *max_power_limit = msg_limit * (100 + od_percent_upper); + *max_power_limit /= 100; + } + + if (min_power_limit) { + *min_power_limit = power_limit * (100 + od_percent_lower); + *min_power_limit /= 100; + } return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 7eaf58fd7f9a..59f9abd0f7b8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -86,6 +86,7 @@ static void smu_cmn_read_arg(struct smu_context *smu, #define SMU_RESP_BUSY_OTHER 0xFC #define SMU_RESP_DEBUG_END 0xFB +#define SMU_RESP_UNEXP (~0U) /** * __smu_cmn_poll_stat -- poll for a status from the SMU * @smu: a pointer to SMU context @@ -171,6 +172,15 @@ static void __smu_cmn_reg_print_error(struct smu_context *smu, dev_err_ratelimited(adev->dev, "SMU: I'm debugging!"); break; + case SMU_RESP_UNEXP: + if (amdgpu_device_bus_status_check(smu->adev)) { + /* print error immediately if device is off the bus */ + dev_err(adev->dev, + "SMU: response:0x%08X for index:%d param:0x%08X message:%s?", + reg_c2pmsg_90, msg_index, param, message); + break; + } + fallthrough; default: dev_err_ratelimited(adev->dev, "SMU: response:0x%08X for index:%d param:0x%08X message:%s?", diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index 7473672abd2a..a608cdbdada4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -40,28 +40,29 @@ #define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8 #define SMU_IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9 -#define smu_cmn_init_soft_gpu_metrics(ptr, frev, crev) \ - do { \ - typecheck(struct gpu_metrics_v##frev##_##crev, \ - typeof(*(ptr))); \ - struct metrics_table_header *header = \ - (struct metrics_table_header *)(ptr); \ - memset(header, 0xFF, sizeof(*(ptr))); \ - header->format_revision = frev; \ - header->content_revision = crev; \ - header->structure_size = sizeof(*(ptr)); \ +#define smu_cmn_init_soft_gpu_metrics(ptr, frev, crev) \ + do { \ + typecheck(struct gpu_metrics_v##frev##_##crev *, (ptr)); \ + struct gpu_metrics_v##frev##_##crev *tmp = (ptr); \ + struct metrics_table_header *header = \ + (struct metrics_table_header *)tmp; \ + memset(header, 0xFF, sizeof(*tmp)); \ + header->format_revision = frev; \ + header->content_revision = crev; \ + header->structure_size = sizeof(*tmp); \ } while (0) -#define smu_cmn_init_partition_metrics(ptr, frev, crev) \ - do { \ - typecheck(struct amdgpu_partition_metrics_v##frev##_##crev, \ - typeof(*(ptr))); \ - struct metrics_table_header *header = \ - (struct metrics_table_header *)(ptr); \ - memset(header, 0xFF, sizeof(*(ptr))); \ - header->format_revision = frev; \ - header->content_revision = crev; \ - header->structure_size = sizeof(*(ptr)); \ +#define smu_cmn_init_partition_metrics(ptr, fr, cr) \ + do { \ + typecheck(struct amdgpu_partition_metrics_v##fr##_##cr *, \ + (ptr)); \ + struct amdgpu_partition_metrics_v##fr##_##cr *tmp = (ptr); \ + struct metrics_table_header *header = \ + (struct metrics_table_header *)tmp; \ + memset(header, 0xFF, sizeof(*tmp)); \ + header->format_revision = fr; \ + header->content_revision = cr; \ + header->structure_size = sizeof(*tmp); \ } while (0) extern const int link_speed[]; diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c index df5da5a44755..901f938aefe0 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.c @@ -157,6 +157,7 @@ komeda_fb_none_afbc_size_check(struct komeda_dev *mdev, struct komeda_fb *kfb, struct drm_framebuffer * komeda_fb_create(struct drm_device *dev, struct drm_file *file, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { struct komeda_dev *mdev = dev->dev_private; @@ -177,7 +178,7 @@ komeda_fb_create(struct drm_device *dev, struct drm_file *file, return ERR_PTR(-EINVAL); } - drm_helper_mode_fill_fb_struct(dev, &kfb->base, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, &kfb->base, info, mode_cmd); if (kfb->base.modifier) ret = komeda_fb_afbc_size_check(kfb, file, mode_cmd); diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h index c61ca98a3a63..02b2b8ae482a 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h +++ b/drivers/gpu/drm/arm/display/komeda/komeda_framebuffer.h @@ -37,6 +37,7 @@ struct komeda_fb { struct drm_framebuffer * komeda_fb_create(struct drm_device *dev, struct drm_file *file, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd); int komeda_fb_check_src_coords(const struct komeda_fb *kfb, u32 src_x, u32 src_y, u32 src_w, u32 src_h); diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index e083021e9e99..bc5f5e9798c3 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -306,10 +306,10 @@ malidp_verify_afbc_framebuffer_caps(struct drm_device *dev, static bool malidp_verify_afbc_framebuffer_size(struct drm_device *dev, struct drm_file *file, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { int n_superblocks = 0; - const struct drm_format_info *info; struct drm_gem_object *objs = NULL; u32 afbc_superblock_size = 0, afbc_superblock_height = 0; u32 afbc_superblock_width = 0, afbc_size = 0; @@ -325,8 +325,6 @@ malidp_verify_afbc_framebuffer_size(struct drm_device *dev, return false; } - info = drm_get_format_info(dev, mode_cmd); - n_superblocks = (mode_cmd->width / afbc_superblock_width) * (mode_cmd->height / afbc_superblock_height); @@ -366,24 +364,26 @@ malidp_verify_afbc_framebuffer_size(struct drm_device *dev, static bool malidp_verify_afbc_framebuffer(struct drm_device *dev, struct drm_file *file, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { if (malidp_verify_afbc_framebuffer_caps(dev, mode_cmd)) - return malidp_verify_afbc_framebuffer_size(dev, file, mode_cmd); + return malidp_verify_afbc_framebuffer_size(dev, file, info, mode_cmd); return false; } static struct drm_framebuffer * malidp_fb_create(struct drm_device *dev, struct drm_file *file, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { if (mode_cmd->modifier[0]) { - if (!malidp_verify_afbc_framebuffer(dev, file, mode_cmd)) + if (!malidp_verify_afbc_framebuffer(dev, file, info, mode_cmd)) return ERR_PTR(-EINVAL); } - return drm_gem_fb_create(dev, file, mode_cmd); + return drm_gem_fb_create(dev, file, info, mode_cmd); } static const struct drm_mode_config_funcs malidp_mode_config_funcs = { diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c index 34547edf1ee3..87f2e5ee8790 100644 --- a/drivers/gpu/drm/arm/malidp_planes.c +++ b/drivers/gpu/drm/arm/malidp_planes.c @@ -159,7 +159,7 @@ bool malidp_format_mod_supported(struct drm_device *drm, } if (!fourcc_mod_is_vendor(modifier, ARM)) { - DRM_ERROR("Unknown modifier (not Arm)\n"); + DRM_DEBUG_KMS("Unknown modifier (not Arm)\n"); return false; } diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c index cf2e88218dc0..aa4289127086 100644 --- a/drivers/gpu/drm/armada/armada_fb.c +++ b/drivers/gpu/drm/armada/armada_fb.c @@ -18,7 +18,9 @@ static const struct drm_framebuffer_funcs armada_fb_funcs = { }; struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev, - const struct drm_mode_fb_cmd2 *mode, struct armada_gem_object *obj) + const struct drm_format_info *info, + const struct drm_mode_fb_cmd2 *mode, + struct armada_gem_object *obj) { struct armada_framebuffer *dfb; uint8_t format, config; @@ -64,7 +66,7 @@ struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev, dfb->mod = config; dfb->fb.obj[0] = &obj->obj; - drm_helper_mode_fill_fb_struct(dev, &dfb->fb, mode); + drm_helper_mode_fill_fb_struct(dev, &dfb->fb, info, mode); ret = drm_framebuffer_init(dev, &dfb->fb, &armada_fb_funcs); if (ret) { @@ -84,9 +86,9 @@ struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev, } struct drm_framebuffer *armada_fb_create(struct drm_device *dev, - struct drm_file *dfile, const struct drm_mode_fb_cmd2 *mode) + struct drm_file *dfile, const struct drm_format_info *info, + const struct drm_mode_fb_cmd2 *mode) { - const struct drm_format_info *info = drm_get_format_info(dev, mode); struct armada_gem_object *obj; struct armada_framebuffer *dfb; int ret; @@ -122,7 +124,7 @@ struct drm_framebuffer *armada_fb_create(struct drm_device *dev, goto err_unref; } - dfb = armada_framebuffer_create(dev, mode, obj); + dfb = armada_framebuffer_create(dev, info, mode, obj); if (IS_ERR(dfb)) { ret = PTR_ERR(dfb); goto err; diff --git a/drivers/gpu/drm/armada/armada_fb.h b/drivers/gpu/drm/armada/armada_fb.h index c5bc53d7e0c4..f2b990f055a2 100644 --- a/drivers/gpu/drm/armada/armada_fb.h +++ b/drivers/gpu/drm/armada/armada_fb.h @@ -17,7 +17,9 @@ struct armada_framebuffer { #define drm_fb_obj(fb) drm_to_armada_gem((fb)->obj[0]) struct armada_framebuffer *armada_framebuffer_create(struct drm_device *, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *, struct armada_gem_object *); struct drm_framebuffer *armada_fb_create(struct drm_device *dev, - struct drm_file *dfile, const struct drm_mode_fb_cmd2 *mode); + struct drm_file *dfile, const struct drm_format_info *info, + const struct drm_mode_fb_cmd2 *mode); #endif diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c index 6ee7ce04ee71..cb53cc91bafb 100644 --- a/drivers/gpu/drm/armada/armada_fbdev.c +++ b/drivers/gpu/drm/armada/armada_fbdev.c @@ -78,7 +78,10 @@ int armada_fbdev_driver_fbdev_probe(struct drm_fb_helper *fbh, return -ENOMEM; } - dfb = armada_framebuffer_create(dev, &mode, obj); + dfb = armada_framebuffer_create(dev, + drm_get_format_info(dev, mode.pixel_format, + mode.modifier[0]), + &mode, obj); /* * A reference is now held by the framebuffer object if diff --git a/drivers/gpu/drm/ast/Makefile b/drivers/gpu/drm/ast/Makefile index 8d09ba5d5889..2547613155da 100644 --- a/drivers/gpu/drm/ast/Makefile +++ b/drivers/gpu/drm/ast/Makefile @@ -4,6 +4,11 @@ # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. ast-y := \ + ast_2000.o \ + ast_2100.o \ + ast_2300.o \ + ast_2500.o \ + ast_2600.o \ ast_cursor.o \ ast_ddc.o \ ast_dp501.o \ diff --git a/drivers/gpu/drm/ast/ast_2000.c b/drivers/gpu/drm/ast/ast_2000.c new file mode 100644 index 000000000000..41c2aa1e425a --- /dev/null +++ b/drivers/gpu/drm/ast/ast_2000.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + */ +/* + * Authors: Dave Airlie <[email protected]> + */ + +#include <linux/delay.h> + +#include "ast_drv.h" +#include "ast_post.h" + +/* + * POST + */ + +void ast_2000_set_def_ext_reg(struct ast_device *ast) +{ + static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff }; + u8 i, index, reg; + const u8 *ext_reg_info; + + /* reset scratch */ + for (i = 0x81; i <= 0x9f; i++) + ast_set_index_reg(ast, AST_IO_VGACRI, i, 0x00); + + ext_reg_info = extreginfo; + index = 0xa0; + while (*ext_reg_info != 0xff) { + ast_set_index_reg_mask(ast, AST_IO_VGACRI, index, 0x00, *ext_reg_info); + index++; + ext_reg_info++; + } + + /* disable standard IO/MEM decode if secondary */ + /* ast_set_index_reg-mask(ast, AST_IO_VGACRI, 0xa1, 0xff, 0x3); */ + + /* Set Ext. Default */ + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x8c, 0x00, 0x01); + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x00, 0x00); + + /* Enable RAMDAC for A1 */ + reg = 0x04; + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xff, reg); +} + +static const struct ast_dramstruct ast2000_dram_table_data[] = { + { 0x0108, 0x00000000 }, + { 0x0120, 0x00004a21 }, + AST_DRAMSTRUCT_UDELAY(67u), + { 0x0000, 0xFFFFFFFF }, + AST_DRAMSTRUCT_INIT(DRAM_TYPE, 0x00000089), + { 0x0008, 0x22331353 }, + { 0x000C, 0x0d07000b }, + { 0x0010, 0x11113333 }, + { 0x0020, 0x00110350 }, + { 0x0028, 0x1e0828f0 }, + { 0x0024, 0x00000001 }, + { 0x001C, 0x00000000 }, + { 0x0014, 0x00000003 }, + AST_DRAMSTRUCT_UDELAY(67u), + { 0x0018, 0x00000131 }, + { 0x0014, 0x00000001 }, + AST_DRAMSTRUCT_UDELAY(67u), + { 0x0018, 0x00000031 }, + { 0x0014, 0x00000001 }, + AST_DRAMSTRUCT_UDELAY(67u), + { 0x0028, 0x1e0828f1 }, + { 0x0024, 0x00000003 }, + { 0x002C, 0x1f0f28fb }, + { 0x0030, 0xFFFFFE01 }, + AST_DRAMSTRUCT_INVALID, +}; + +static void ast_post_chip_2000(struct ast_device *ast) +{ + u8 j; + u32 temp, i; + const struct ast_dramstruct *dram_reg_info; + + j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); + + if ((j & 0x80) == 0) { /* VGA only */ + dram_reg_info = ast2000_dram_table_data; + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + ast_write32(ast, 0x10100, 0xa8); + + do { + ; + } while (ast_read32(ast, 0x10100) != 0xa8); + + while (!AST_DRAMSTRUCT_IS(dram_reg_info, INVALID)) { + if (AST_DRAMSTRUCT_IS(dram_reg_info, UDELAY)) { + for (i = 0; i < 15; i++) + udelay(dram_reg_info->data); + } else { + ast_write32(ast, 0x10000 + dram_reg_info->index, + dram_reg_info->data); + } + dram_reg_info++; + } + + temp = ast_read32(ast, 0x10140); + ast_write32(ast, 0x10140, temp | 0x40); + } + + /* wait ready */ + do { + j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); + } while ((j & 0x40) == 0); +} + +int ast_2000_post(struct ast_device *ast) +{ + ast_2000_set_def_ext_reg(ast); + + if (ast->config_mode == ast_use_p2a) { + ast_post_chip_2000(ast); + } else { + if (ast->tx_chip == AST_TX_SIL164) { + /* Enable DVO */ + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80); + } + } + + return 0; +} diff --git a/drivers/gpu/drm/ast/ast_2100.c b/drivers/gpu/drm/ast/ast_2100.c new file mode 100644 index 000000000000..477ee15eff5d --- /dev/null +++ b/drivers/gpu/drm/ast/ast_2100.c @@ -0,0 +1,348 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + */ +/* + * Authors: Dave Airlie <[email protected]> + */ + +#include <linux/delay.h> + +#include "ast_drv.h" +#include "ast_post.h" + +/* + * POST + */ + +static const struct ast_dramstruct ast1100_dram_table_data[] = { + { 0x2000, 0x1688a8a8 }, + { 0x2020, 0x000041f0 }, + AST_DRAMSTRUCT_UDELAY(67u), + { 0x0000, 0xfc600309 }, + { 0x006C, 0x00909090 }, + { 0x0064, 0x00050000 }, + AST_DRAMSTRUCT_INIT(DRAM_TYPE, 0x00000585), + { 0x0008, 0x0011030f }, + { 0x0010, 0x22201724 }, + { 0x0018, 0x1e29011a }, + { 0x0020, 0x00c82222 }, + { 0x0014, 0x01001523 }, + { 0x001C, 0x1024010d }, + { 0x0024, 0x00cb2522 }, + { 0x0038, 0xffffff82 }, + { 0x003C, 0x00000000 }, + { 0x0040, 0x00000000 }, + { 0x0044, 0x00000000 }, + { 0x0048, 0x00000000 }, + { 0x004C, 0x00000000 }, + { 0x0050, 0x00000000 }, + { 0x0054, 0x00000000 }, + { 0x0058, 0x00000000 }, + { 0x005C, 0x00000000 }, + { 0x0060, 0x032aa02a }, + { 0x0064, 0x002d3000 }, + { 0x0068, 0x00000000 }, + { 0x0070, 0x00000000 }, + { 0x0074, 0x00000000 }, + { 0x0078, 0x00000000 }, + { 0x007C, 0x00000000 }, + { 0x0034, 0x00000001 }, + AST_DRAMSTRUCT_UDELAY(67u), + { 0x002C, 0x00000732 }, + { 0x0030, 0x00000040 }, + { 0x0028, 0x00000005 }, + { 0x0028, 0x00000007 }, + { 0x0028, 0x00000003 }, + { 0x0028, 0x00000001 }, + { 0x000C, 0x00005a08 }, + { 0x002C, 0x00000632 }, + { 0x0028, 0x00000001 }, + { 0x0030, 0x000003c0 }, + { 0x0028, 0x00000003 }, + { 0x0030, 0x00000040 }, + { 0x0028, 0x00000003 }, + { 0x000C, 0x00005a21 }, + { 0x0034, 0x00007c03 }, + { 0x0120, 0x00004c41 }, + AST_DRAMSTRUCT_INVALID, +}; + +static const struct ast_dramstruct ast2100_dram_table_data[] = { + { 0x2000, 0x1688a8a8 }, + { 0x2020, 0x00004120 }, + AST_DRAMSTRUCT_UDELAY(67u), + { 0x0000, 0xfc600309 }, + { 0x006C, 0x00909090 }, + { 0x0064, 0x00070000 }, + AST_DRAMSTRUCT_INIT(DRAM_TYPE, 0x00000489), + { 0x0008, 0x0011030f }, + { 0x0010, 0x32302926 }, + { 0x0018, 0x274c0122 }, + { 0x0020, 0x00ce2222 }, + { 0x0014, 0x01001523 }, + { 0x001C, 0x1024010d }, + { 0x0024, 0x00cb2522 }, + { 0x0038, 0xffffff82 }, + { 0x003C, 0x00000000 }, + { 0x0040, 0x00000000 }, + { 0x0044, 0x00000000 }, + { 0x0048, 0x00000000 }, + { 0x004C, 0x00000000 }, + { 0x0050, 0x00000000 }, + { 0x0054, 0x00000000 }, + { 0x0058, 0x00000000 }, + { 0x005C, 0x00000000 }, + { 0x0060, 0x0f2aa02a }, + { 0x0064, 0x003f3005 }, + { 0x0068, 0x02020202 }, + { 0x0070, 0x00000000 }, + { 0x0074, 0x00000000 }, + { 0x0078, 0x00000000 }, + { 0x007C, 0x00000000 }, + { 0x0034, 0x00000001 }, + AST_DRAMSTRUCT_UDELAY(67u), + { 0x002C, 0x00000942 }, + { 0x0030, 0x00000040 }, + { 0x0028, 0x00000005 }, + { 0x0028, 0x00000007 }, + { 0x0028, 0x00000003 }, + { 0x0028, 0x00000001 }, + { 0x000C, 0x00005a08 }, + { 0x002C, 0x00000842 }, + { 0x0028, 0x00000001 }, + { 0x0030, 0x000003c0 }, + { 0x0028, 0x00000003 }, + { 0x0030, 0x00000040 }, + { 0x0028, 0x00000003 }, + { 0x000C, 0x00005a21 }, + { 0x0034, 0x00007c03 }, + { 0x0120, 0x00005061 }, + AST_DRAMSTRUCT_INVALID, +}; + +/* + * AST2100/2150 DLL CBR Setting + */ +#define CBR_SIZE_AST2150 ((16 << 10) - 1) +#define CBR_PASSNUM_AST2150 5 +#define CBR_THRESHOLD_AST2150 10 +#define CBR_THRESHOLD2_AST2150 10 +#define TIMEOUT_AST2150 5000000 + +#define CBR_PATNUM_AST2150 8 + +static const u32 pattern_AST2150[14] = { + 0xFF00FF00, + 0xCC33CC33, + 0xAA55AA55, + 0xFFFE0001, + 0x683501FE, + 0x0F1929B0, + 0x2D0B4346, + 0x60767F02, + 0x6FBE36A6, + 0x3A253035, + 0x3019686D, + 0x41C6167E, + 0x620152BF, + 0x20F050E0 +}; + +static u32 mmctestburst2_ast2150(struct ast_device *ast, u32 datagen) +{ + u32 data, timeout; + + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + ast_moutdwm(ast, 0x1e6e0070, 0x00000001 | (datagen << 3)); + timeout = 0; + do { + data = ast_mindwm(ast, 0x1e6e0070) & 0x40; + if (++timeout > TIMEOUT_AST2150) { + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + return 0xffffffff; + } + } while (!data); + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + ast_moutdwm(ast, 0x1e6e0070, 0x00000003 | (datagen << 3)); + timeout = 0; + do { + data = ast_mindwm(ast, 0x1e6e0070) & 0x40; + if (++timeout > TIMEOUT_AST2150) { + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + return 0xffffffff; + } + } while (!data); + data = (ast_mindwm(ast, 0x1e6e0070) & 0x80) >> 7; + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + return data; +} + +static int cbrtest_ast2150(struct ast_device *ast) +{ + int i; + + for (i = 0; i < 8; i++) + if (mmctestburst2_ast2150(ast, i)) + return 0; + return 1; +} + +static int cbrscan_ast2150(struct ast_device *ast, int busw) +{ + u32 patcnt, loop; + + for (patcnt = 0; patcnt < CBR_PATNUM_AST2150; patcnt++) { + ast_moutdwm(ast, 0x1e6e007c, pattern_AST2150[patcnt]); + for (loop = 0; loop < CBR_PASSNUM_AST2150; loop++) { + if (cbrtest_ast2150(ast)) + break; + } + if (loop == CBR_PASSNUM_AST2150) + return 0; + } + return 1; +} + +static void cbrdlli_ast2150(struct ast_device *ast, int busw) +{ + u32 dll_min[4], dll_max[4], dlli, data, passcnt; + +cbr_start: + dll_min[0] = 0xff; + dll_min[1] = 0xff; + dll_min[2] = 0xff; + dll_min[3] = 0xff; + dll_max[0] = 0x00; + dll_max[1] = 0x00; + dll_max[2] = 0x00; + dll_max[3] = 0x00; + passcnt = 0; + + for (dlli = 0; dlli < 100; dlli++) { + ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24)); + data = cbrscan_ast2150(ast, busw); + if (data != 0) { + if (data & 0x1) { + if (dll_min[0] > dlli) + dll_min[0] = dlli; + if (dll_max[0] < dlli) + dll_max[0] = dlli; + } + passcnt++; + } else if (passcnt >= CBR_THRESHOLD_AST2150) { + goto cbr_start; + } + } + if (dll_max[0] == 0 || (dll_max[0] - dll_min[0]) < CBR_THRESHOLD_AST2150) + goto cbr_start; + + dlli = dll_min[0] + (((dll_max[0] - dll_min[0]) * 7) >> 4); + ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24)); +} + +static void ast_post_chip_2100(struct ast_device *ast) +{ + u8 j; + u32 data, temp, i; + const struct ast_dramstruct *dram_reg_info; + + j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); + + if ((j & 0x80) == 0) { /* VGA only */ + if (ast->chip == AST2100 || ast->chip == AST2200) + dram_reg_info = ast2100_dram_table_data; + else + dram_reg_info = ast1100_dram_table_data; + + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + ast_write32(ast, 0x12000, 0x1688A8A8); + do { + ; + } while (ast_read32(ast, 0x12000) != 0x01); + + ast_write32(ast, 0x10000, 0xfc600309); + do { + ; + } while (ast_read32(ast, 0x10000) != 0x01); + + while (!AST_DRAMSTRUCT_IS(dram_reg_info, INVALID)) { + if (AST_DRAMSTRUCT_IS(dram_reg_info, UDELAY)) { + for (i = 0; i < 15; i++) + udelay(dram_reg_info->data); + } else if (AST_DRAMSTRUCT_IS(dram_reg_info, DRAM_TYPE)) { + data = dram_reg_info->data; + if (ast->dram_type == AST_DRAM_1Gx16) + data = 0x00000d89; + else if (ast->dram_type == AST_DRAM_1Gx32) + data = 0x00000c8d; + + temp = ast_read32(ast, 0x12070); + temp &= 0xc; + temp <<= 2; + ast_write32(ast, 0x10000 + dram_reg_info->index, data | temp); + } else { + ast_write32(ast, 0x10000 + dram_reg_info->index, + dram_reg_info->data); + } + dram_reg_info++; + } + + /* AST 2100/2150 DRAM calibration */ + data = ast_read32(ast, 0x10120); + if (data == 0x5061) { /* 266Mhz */ + data = ast_read32(ast, 0x10004); + if (data & 0x40) + cbrdlli_ast2150(ast, 16); /* 16 bits */ + else + cbrdlli_ast2150(ast, 32); /* 32 bits */ + } + + temp = ast_read32(ast, 0x1200c); + ast_write32(ast, 0x1200c, temp & 0xfffffffd); + temp = ast_read32(ast, 0x12040); + ast_write32(ast, 0x12040, temp | 0x40); + } + + /* wait ready */ + do { + j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); + } while ((j & 0x40) == 0); +} + +int ast_2100_post(struct ast_device *ast) +{ + ast_2000_set_def_ext_reg(ast); + + if (ast->config_mode == ast_use_p2a) { + ast_post_chip_2100(ast); + } else { + if (ast->tx_chip == AST_TX_SIL164) { + /* Enable DVO */ + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80); + } + } + + return 0; +} diff --git a/drivers/gpu/drm/ast/ast_2300.c b/drivers/gpu/drm/ast/ast_2300.c new file mode 100644 index 000000000000..dc2a32244689 --- /dev/null +++ b/drivers/gpu/drm/ast/ast_2300.c @@ -0,0 +1,1328 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + */ +/* + * Authors: Dave Airlie <[email protected]> + */ + +#include <linux/delay.h> + +#include "ast_drv.h" +#include "ast_post.h" + +/* + * POST + */ + +void ast_2300_set_def_ext_reg(struct ast_device *ast) +{ + static const u8 extreginfo[] = { 0x0f, 0x04, 0x1f, 0xff }; + u8 i, index, reg; + const u8 *ext_reg_info; + + /* reset scratch */ + for (i = 0x81; i <= 0x9f; i++) + ast_set_index_reg(ast, AST_IO_VGACRI, i, 0x00); + + ext_reg_info = extreginfo; + index = 0xa0; + while (*ext_reg_info != 0xff) { + ast_set_index_reg_mask(ast, AST_IO_VGACRI, index, 0x00, *ext_reg_info); + index++; + ext_reg_info++; + } + + /* disable standard IO/MEM decode if secondary */ + /* ast_set_index_reg-mask(ast, AST_IO_VGACRI, 0xa1, 0xff, 0x3); */ + + /* Set Ext. Default */ + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x8c, 0x00, 0x01); + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x00, 0x00); + + /* Enable RAMDAC for A1 */ + reg = 0x04; + reg |= 0x20; + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xff, reg); +} + +/* AST 2300 DRAM settings */ +#define AST_DDR3 0 +#define AST_DDR2 1 + +struct ast2300_dram_param { + u32 dram_type; + u32 dram_chipid; + u32 dram_freq; + u32 vram_size; + u32 odt; + u32 wodt; + u32 rodt; + u32 dram_config; + u32 reg_PERIOD; + u32 reg_MADJ; + u32 reg_SADJ; + u32 reg_MRS; + u32 reg_EMRS; + u32 reg_AC1; + u32 reg_AC2; + u32 reg_DQSIC; + u32 reg_DRV; + u32 reg_IOZ; + u32 reg_DQIDLY; + u32 reg_FREQ; + u32 madj_max; + u32 dll2_finetune_step; +}; + +/* + * DQSI DLL CBR Setting + */ +#define CBR_SIZE0 ((1 << 10) - 1) +#define CBR_SIZE1 ((4 << 10) - 1) +#define CBR_SIZE2 ((64 << 10) - 1) +#define CBR_PASSNUM 5 +#define CBR_PASSNUM2 5 +#define CBR_THRESHOLD 10 +#define CBR_THRESHOLD2 10 +#define TIMEOUT 5000000 +#define CBR_PATNUM 8 + +static const u32 pattern[8] = { + 0xFF00FF00, + 0xCC33CC33, + 0xAA55AA55, + 0x88778877, + 0x92CC4D6E, + 0x543D3CDE, + 0xF1E843C7, + 0x7C61D253 +}; + +static u32 mmc_test2(struct ast_device *ast, u32 datagen, u8 test_ctl) +{ + u32 data, timeout; + + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + ast_moutdwm(ast, 0x1e6e0070, (datagen << 3) | test_ctl); + timeout = 0; + do { + data = ast_mindwm(ast, 0x1e6e0070) & 0x1000; + if (++timeout > TIMEOUT) { + ast_moutdwm(ast, 0x1e6e0070, 0x0); + return 0xffffffff; + } + } while (!data); + data = ast_mindwm(ast, 0x1e6e0078); + data = (data | (data >> 16)) & 0xffff; + ast_moutdwm(ast, 0x1e6e0070, 0x00000000); + return data; +} + +static u32 mmc_test_burst2(struct ast_device *ast, u32 datagen) +{ + return mmc_test2(ast, datagen, 0x41); +} + +static bool mmc_test_single(struct ast_device *ast, u32 datagen) +{ + return mmc_test(ast, datagen, 0xc5); +} + +static u32 mmc_test_single2(struct ast_device *ast, u32 datagen) +{ + return mmc_test2(ast, datagen, 0x05); +} + +static int cbr_test(struct ast_device *ast) +{ + u32 data; + int i; + + data = mmc_test_single2(ast, 0); + if ((data & 0xff) && (data & 0xff00)) + return 0; + for (i = 0; i < 8; i++) { + data = mmc_test_burst2(ast, i); + if ((data & 0xff) && (data & 0xff00)) + return 0; + } + if (!data) + return 3; + else if (data & 0xff) + return 2; + return 1; +} + +static int cbr_scan(struct ast_device *ast) +{ + u32 data, data2, patcnt, loop; + + data2 = 3; + for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) { + ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]); + for (loop = 0; loop < CBR_PASSNUM2; loop++) { + data = cbr_test(ast); + if (data != 0) { + data2 &= data; + if (!data2) + return 0; + break; + } + } + if (loop == CBR_PASSNUM2) + return 0; + } + return data2; +} + +static u32 cbr_test2(struct ast_device *ast) +{ + u32 data; + + data = mmc_test_burst2(ast, 0); + if (data == 0xffff) + return 0; + data |= mmc_test_single2(ast, 0); + if (data == 0xffff) + return 0; + + return ~data & 0xffff; +} + +static u32 cbr_scan2(struct ast_device *ast) +{ + u32 data, data2, patcnt, loop; + + data2 = 0xffff; + for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) { + ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]); + for (loop = 0; loop < CBR_PASSNUM2; loop++) { + data = cbr_test2(ast); + if (data != 0) { + data2 &= data; + if (!data2) + return 0; + break; + } + } + if (loop == CBR_PASSNUM2) + return 0; + } + return data2; +} + +static bool cbr_test3(struct ast_device *ast) +{ + if (!mmc_test_burst(ast, 0)) + return false; + if (!mmc_test_single(ast, 0)) + return false; + return true; +} + +static bool cbr_scan3(struct ast_device *ast) +{ + u32 patcnt, loop; + + for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) { + ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]); + for (loop = 0; loop < 2; loop++) { + if (cbr_test3(ast)) + break; + } + if (loop == 2) + return false; + } + return true; +} + +static bool finetuneDQI_L(struct ast_device *ast, struct ast2300_dram_param *param) +{ + u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, retry = 0; + bool status = false; +FINETUNE_START: + for (cnt = 0; cnt < 16; cnt++) { + dllmin[cnt] = 0xff; + dllmax[cnt] = 0x0; + } + passcnt = 0; + for (dlli = 0; dlli < 76; dlli++) { + ast_moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24)); + ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE1); + data = cbr_scan2(ast); + if (data != 0) { + mask = 0x00010001; + for (cnt = 0; cnt < 16; cnt++) { + if (data & mask) { + if (dllmin[cnt] > dlli) + dllmin[cnt] = dlli; + if (dllmax[cnt] < dlli) + dllmax[cnt] = dlli; + } + mask <<= 1; + } + passcnt++; + } else if (passcnt >= CBR_THRESHOLD2) { + break; + } + } + gold_sadj[0] = 0x0; + passcnt = 0; + for (cnt = 0; cnt < 16; cnt++) { + if ((dllmax[cnt] > dllmin[cnt]) && + ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) { + gold_sadj[0] += dllmin[cnt]; + passcnt++; + } + } + if (retry++ > 10) + goto FINETUNE_DONE; + if (passcnt != 16) + goto FINETUNE_START; + status = true; +FINETUNE_DONE: + gold_sadj[0] = gold_sadj[0] >> 4; + gold_sadj[1] = gold_sadj[0]; + + data = 0; + for (cnt = 0; cnt < 8; cnt++) { + data >>= 3; + if ((dllmax[cnt] > dllmin[cnt]) && + ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) { + dlli = dllmin[cnt]; + if (gold_sadj[0] >= dlli) { + dlli = ((gold_sadj[0] - dlli) * 19) >> 5; + if (dlli > 3) + dlli = 3; + } else { + dlli = ((dlli - gold_sadj[0]) * 19) >> 5; + if (dlli > 4) + dlli = 4; + dlli = (8 - dlli) & 0x7; + } + data |= dlli << 21; + } + } + ast_moutdwm(ast, 0x1E6E0080, data); + + data = 0; + for (cnt = 8; cnt < 16; cnt++) { + data >>= 3; + if ((dllmax[cnt] > dllmin[cnt]) && + ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) { + dlli = dllmin[cnt]; + if (gold_sadj[1] >= dlli) { + dlli = ((gold_sadj[1] - dlli) * 19) >> 5; + if (dlli > 3) + dlli = 3; + else + dlli = (dlli - 1) & 0x7; + } else { + dlli = ((dlli - gold_sadj[1]) * 19) >> 5; + dlli += 1; + if (dlli > 4) + dlli = 4; + dlli = (8 - dlli) & 0x7; + } + data |= dlli << 21; + } + } + ast_moutdwm(ast, 0x1E6E0084, data); + return status; +} /* finetuneDQI_L */ + +static void finetuneDQSI(struct ast_device *ast) +{ + u32 dlli, dqsip, dqidly; + u32 reg_mcr18, reg_mcr0c, passcnt[2], diff; + u32 g_dqidly, g_dqsip, g_margin, g_side; + u16 pass[32][2][2]; + char tag[2][76]; + + /* Disable DQI CBR */ + reg_mcr0c = ast_mindwm(ast, 0x1E6E000C); + reg_mcr18 = ast_mindwm(ast, 0x1E6E0018); + reg_mcr18 &= 0x0000ffff; + ast_moutdwm(ast, 0x1E6E0018, reg_mcr18); + + for (dlli = 0; dlli < 76; dlli++) { + tag[0][dlli] = 0x0; + tag[1][dlli] = 0x0; + } + for (dqidly = 0; dqidly < 32; dqidly++) { + pass[dqidly][0][0] = 0xff; + pass[dqidly][0][1] = 0x0; + pass[dqidly][1][0] = 0xff; + pass[dqidly][1][1] = 0x0; + } + for (dqidly = 0; dqidly < 32; dqidly++) { + passcnt[0] = 0; + passcnt[1] = 0; + for (dqsip = 0; dqsip < 2; dqsip++) { + ast_moutdwm(ast, 0x1E6E000C, 0); + ast_moutdwm(ast, 0x1E6E0018, reg_mcr18 | (dqidly << 16) | (dqsip << 23)); + ast_moutdwm(ast, 0x1E6E000C, reg_mcr0c); + for (dlli = 0; dlli < 76; dlli++) { + ast_moutdwm(ast, 0x1E6E0068, + 0x00001300 | (dlli << 16) | (dlli << 24)); + ast_moutdwm(ast, 0x1E6E0070, 0); + ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE0); + if (cbr_scan3(ast)) { + if (dlli == 0) + break; + passcnt[dqsip]++; + tag[dqsip][dlli] = 'P'; + if (dlli < pass[dqidly][dqsip][0]) + pass[dqidly][dqsip][0] = (u16)dlli; + if (dlli > pass[dqidly][dqsip][1]) + pass[dqidly][dqsip][1] = (u16)dlli; + } else if (passcnt[dqsip] >= 5) { + break; + } else { + pass[dqidly][dqsip][0] = 0xff; + pass[dqidly][dqsip][1] = 0x0; + } + } + } + if (passcnt[0] == 0 && passcnt[1] == 0) + dqidly++; + } + /* Search margin */ + g_dqidly = 0; + g_dqsip = 0; + g_margin = 0; + g_side = 0; + + for (dqidly = 0; dqidly < 32; dqidly++) { + for (dqsip = 0; dqsip < 2; dqsip++) { + if (pass[dqidly][dqsip][0] > pass[dqidly][dqsip][1]) + continue; + diff = pass[dqidly][dqsip][1] - pass[dqidly][dqsip][0]; + if ((diff + 2) < g_margin) + continue; + passcnt[0] = 0; + passcnt[1] = 0; + for (dlli = pass[dqidly][dqsip][0]; + dlli > 0 && tag[dqsip][dlli] != 0; + dlli--, passcnt[0]++) { + } + for (dlli = pass[dqidly][dqsip][1]; + dlli < 76 && tag[dqsip][dlli] != 0; + dlli++, passcnt[1]++) { + } + if (passcnt[0] > passcnt[1]) + passcnt[0] = passcnt[1]; + passcnt[1] = 0; + if (passcnt[0] > g_side) + passcnt[1] = passcnt[0] - g_side; + if (diff > (g_margin + 1) && (passcnt[1] > 0 || passcnt[0] > 8)) { + g_margin = diff; + g_dqidly = dqidly; + g_dqsip = dqsip; + g_side = passcnt[0]; + } else if (passcnt[1] > 1 && g_side < 8) { + if (diff > g_margin) + g_margin = diff; + g_dqidly = dqidly; + g_dqsip = dqsip; + g_side = passcnt[0]; + } + } + } + reg_mcr18 = reg_mcr18 | (g_dqidly << 16) | (g_dqsip << 23); + ast_moutdwm(ast, 0x1E6E0018, reg_mcr18); +} + +static bool cbr_dll2(struct ast_device *ast, struct ast2300_dram_param *param) +{ + u32 dllmin[2], dllmax[2], dlli, data, passcnt, retry = 0; + bool status = false; + + finetuneDQSI(ast); + if (finetuneDQI_L(ast, param) == false) + return status; + +CBR_START2: + dllmin[0] = 0xff; + dllmin[1] = 0xff; + dllmax[0] = 0x0; + dllmax[1] = 0x0; + passcnt = 0; + for (dlli = 0; dlli < 76; dlli++) { + ast_moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24)); + ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE2); + data = cbr_scan(ast); + if (data != 0) { + if (data & 0x1) { + if (dllmin[0] > dlli) + dllmin[0] = dlli; + if (dllmax[0] < dlli) + dllmax[0] = dlli; + } + if (data & 0x2) { + if (dllmin[1] > dlli) + dllmin[1] = dlli; + if (dllmax[1] < dlli) + dllmax[1] = dlli; + } + passcnt++; + } else if (passcnt >= CBR_THRESHOLD) { + break; + } + } + if (retry++ > 10) + goto CBR_DONE2; + if (dllmax[0] == 0 || (dllmax[0] - dllmin[0]) < CBR_THRESHOLD) + goto CBR_START2; + if (dllmax[1] == 0 || (dllmax[1] - dllmin[1]) < CBR_THRESHOLD) + goto CBR_START2; + status = true; +CBR_DONE2: + dlli = (dllmin[1] + dllmax[1]) >> 1; + dlli <<= 8; + dlli += (dllmin[0] + dllmax[0]) >> 1; + ast_moutdwm(ast, 0x1E6E0068, ast_mindwm(ast, 0x1E720058) | (dlli << 16)); + return status; +} /* CBRDLL2 */ + +static void get_ddr3_info(struct ast_device *ast, struct ast2300_dram_param *param) +{ + u32 trap, trap_AC2, trap_MRS; + + ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8); + + /* Ger trap info */ + trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3; + trap_AC2 = 0x00020000 + (trap << 16); + trap_AC2 |= 0x00300000 + ((trap & 0x2) << 19); + trap_MRS = 0x00000010 + (trap << 4); + trap_MRS |= ((trap & 0x2) << 18); + + param->reg_MADJ = 0x00034C4C; + param->reg_SADJ = 0x00001800; + param->reg_DRV = 0x000000F0; + param->reg_PERIOD = param->dram_freq; + param->rodt = 0; + + switch (param->dram_freq) { + case 336: + ast_moutdwm(ast, 0x1E6E2020, 0x0190); + param->wodt = 0; + param->reg_AC1 = 0x22202725; + param->reg_AC2 = 0xAA007613 | trap_AC2; + param->reg_DQSIC = 0x000000BA; + param->reg_MRS = 0x04001400 | trap_MRS; + param->reg_EMRS = 0x00000000; + param->reg_IOZ = 0x00000023; + param->reg_DQIDLY = 0x00000074; + param->reg_FREQ = 0x00004DC0; + param->madj_max = 96; + param->dll2_finetune_step = 3; + switch (param->dram_chipid) { + default: + case AST_DRAM_512Mx16: + case AST_DRAM_1Gx16: + param->reg_AC2 = 0xAA007613 | trap_AC2; + break; + case AST_DRAM_2Gx16: + param->reg_AC2 = 0xAA00761C | trap_AC2; + break; + case AST_DRAM_4Gx16: + param->reg_AC2 = 0xAA007636 | trap_AC2; + break; + } + break; + default: + case 396: + ast_moutdwm(ast, 0x1E6E2020, 0x03F1); + param->wodt = 1; + param->reg_AC1 = 0x33302825; + param->reg_AC2 = 0xCC009617 | trap_AC2; + param->reg_DQSIC = 0x000000E2; + param->reg_MRS = 0x04001600 | trap_MRS; + param->reg_EMRS = 0x00000000; + param->reg_IOZ = 0x00000034; + param->reg_DRV = 0x000000FA; + param->reg_DQIDLY = 0x00000089; + param->reg_FREQ = 0x00005040; + param->madj_max = 96; + param->dll2_finetune_step = 4; + + switch (param->dram_chipid) { + default: + case AST_DRAM_512Mx16: + case AST_DRAM_1Gx16: + param->reg_AC2 = 0xCC009617 | trap_AC2; + break; + case AST_DRAM_2Gx16: + param->reg_AC2 = 0xCC009622 | trap_AC2; + break; + case AST_DRAM_4Gx16: + param->reg_AC2 = 0xCC00963F | trap_AC2; + break; + } + break; + + case 408: + ast_moutdwm(ast, 0x1E6E2020, 0x01F0); + param->wodt = 1; + param->reg_AC1 = 0x33302825; + param->reg_AC2 = 0xCC009617 | trap_AC2; + param->reg_DQSIC = 0x000000E2; + param->reg_MRS = 0x04001600 | trap_MRS; + param->reg_EMRS = 0x00000000; + param->reg_IOZ = 0x00000023; + param->reg_DRV = 0x000000FA; + param->reg_DQIDLY = 0x00000089; + param->reg_FREQ = 0x000050C0; + param->madj_max = 96; + param->dll2_finetune_step = 4; + + switch (param->dram_chipid) { + default: + case AST_DRAM_512Mx16: + case AST_DRAM_1Gx16: + param->reg_AC2 = 0xCC009617 | trap_AC2; + break; + case AST_DRAM_2Gx16: + param->reg_AC2 = 0xCC009622 | trap_AC2; + break; + case AST_DRAM_4Gx16: + param->reg_AC2 = 0xCC00963F | trap_AC2; + break; + } + + break; + case 456: + ast_moutdwm(ast, 0x1E6E2020, 0x0230); + param->wodt = 0; + param->reg_AC1 = 0x33302926; + param->reg_AC2 = 0xCD44961A; + param->reg_DQSIC = 0x000000FC; + param->reg_MRS = 0x00081830; + param->reg_EMRS = 0x00000000; + param->reg_IOZ = 0x00000045; + param->reg_DQIDLY = 0x00000097; + param->reg_FREQ = 0x000052C0; + param->madj_max = 88; + param->dll2_finetune_step = 4; + break; + case 504: + ast_moutdwm(ast, 0x1E6E2020, 0x0270); + param->wodt = 1; + param->reg_AC1 = 0x33302926; + param->reg_AC2 = 0xDE44A61D; + param->reg_DQSIC = 0x00000117; + param->reg_MRS = 0x00081A30; + param->reg_EMRS = 0x00000000; + param->reg_IOZ = 0x070000BB; + param->reg_DQIDLY = 0x000000A0; + param->reg_FREQ = 0x000054C0; + param->madj_max = 79; + param->dll2_finetune_step = 4; + break; + case 528: + ast_moutdwm(ast, 0x1E6E2020, 0x0290); + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x33302926; + param->reg_AC2 = 0xEF44B61E; + param->reg_DQSIC = 0x00000125; + param->reg_MRS = 0x00081A30; + param->reg_EMRS = 0x00000040; + param->reg_DRV = 0x000000F5; + param->reg_IOZ = 0x00000023; + param->reg_DQIDLY = 0x00000088; + param->reg_FREQ = 0x000055C0; + param->madj_max = 76; + param->dll2_finetune_step = 3; + break; + case 576: + ast_moutdwm(ast, 0x1E6E2020, 0x0140); + param->reg_MADJ = 0x00136868; + param->reg_SADJ = 0x00004534; + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x33302A37; + param->reg_AC2 = 0xEF56B61E; + param->reg_DQSIC = 0x0000013F; + param->reg_MRS = 0x00101A50; + param->reg_EMRS = 0x00000040; + param->reg_DRV = 0x000000FA; + param->reg_IOZ = 0x00000023; + param->reg_DQIDLY = 0x00000078; + param->reg_FREQ = 0x000057C0; + param->madj_max = 136; + param->dll2_finetune_step = 3; + break; + case 600: + ast_moutdwm(ast, 0x1E6E2020, 0x02E1); + param->reg_MADJ = 0x00136868; + param->reg_SADJ = 0x00004534; + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x32302A37; + param->reg_AC2 = 0xDF56B61F; + param->reg_DQSIC = 0x0000014D; + param->reg_MRS = 0x00101A50; + param->reg_EMRS = 0x00000004; + param->reg_DRV = 0x000000F5; + param->reg_IOZ = 0x00000023; + param->reg_DQIDLY = 0x00000078; + param->reg_FREQ = 0x000058C0; + param->madj_max = 132; + param->dll2_finetune_step = 3; + break; + case 624: + ast_moutdwm(ast, 0x1E6E2020, 0x0160); + param->reg_MADJ = 0x00136868; + param->reg_SADJ = 0x00004534; + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x32302A37; + param->reg_AC2 = 0xEF56B621; + param->reg_DQSIC = 0x0000015A; + param->reg_MRS = 0x02101A50; + param->reg_EMRS = 0x00000004; + param->reg_DRV = 0x000000F5; + param->reg_IOZ = 0x00000034; + param->reg_DQIDLY = 0x00000078; + param->reg_FREQ = 0x000059C0; + param->madj_max = 128; + param->dll2_finetune_step = 3; + break; + } /* switch freq */ + + switch (param->dram_chipid) { + case AST_DRAM_512Mx16: + param->dram_config = 0x130; + break; + default: + case AST_DRAM_1Gx16: + param->dram_config = 0x131; + break; + case AST_DRAM_2Gx16: + param->dram_config = 0x132; + break; + case AST_DRAM_4Gx16: + param->dram_config = 0x133; + break; + } /* switch size */ + + switch (param->vram_size) { + default: + case SZ_8M: + param->dram_config |= 0x00; + break; + case SZ_16M: + param->dram_config |= 0x04; + break; + case SZ_32M: + param->dram_config |= 0x08; + break; + case SZ_64M: + param->dram_config |= 0x0c; + break; + } +} + +static void ddr3_init(struct ast_device *ast, struct ast2300_dram_param *param) +{ + u32 data, data2, retry = 0; + +ddr3_init_start: + ast_moutdwm(ast, 0x1E6E0000, 0xFC600309); + ast_moutdwm(ast, 0x1E6E0018, 0x00000100); + ast_moutdwm(ast, 0x1E6E0024, 0x00000000); + ast_moutdwm(ast, 0x1E6E0034, 0x00000000); + udelay(10); + ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ); + ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ); + udelay(10); + ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000); + udelay(10); + + ast_moutdwm(ast, 0x1E6E0004, param->dram_config); + ast_moutdwm(ast, 0x1E6E0008, 0x90040f); + ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1); + ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2); + ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC); + ast_moutdwm(ast, 0x1E6E0080, 0x00000000); + ast_moutdwm(ast, 0x1E6E0084, 0x00000000); + ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY); + ast_moutdwm(ast, 0x1E6E0018, 0x4000A170); + ast_moutdwm(ast, 0x1E6E0018, 0x00002370); + ast_moutdwm(ast, 0x1E6E0038, 0x00000000); + ast_moutdwm(ast, 0x1E6E0040, 0xFF444444); + ast_moutdwm(ast, 0x1E6E0044, 0x22222222); + ast_moutdwm(ast, 0x1E6E0048, 0x22222222); + ast_moutdwm(ast, 0x1E6E004C, 0x00000002); + ast_moutdwm(ast, 0x1E6E0050, 0x80000000); + ast_moutdwm(ast, 0x1E6E0050, 0x00000000); + ast_moutdwm(ast, 0x1E6E0054, 0); + ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV); + ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ); + ast_moutdwm(ast, 0x1E6E0070, 0x00000000); + ast_moutdwm(ast, 0x1E6E0074, 0x00000000); + ast_moutdwm(ast, 0x1E6E0078, 0x00000000); + ast_moutdwm(ast, 0x1E6E007C, 0x00000000); + /* Wait MCLK2X lock to MCLK */ + do { + data = ast_mindwm(ast, 0x1E6E001C); + } while (!(data & 0x08000000)); + data = ast_mindwm(ast, 0x1E6E001C); + data = (data >> 8) & 0xff; + while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) { + data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4; + if ((data2 & 0xff) > param->madj_max) + break; + ast_moutdwm(ast, 0x1E6E0064, data2); + if (data2 & 0x00100000) + data2 = ((data2 & 0xff) >> 3) + 3; + else + data2 = ((data2 & 0xff) >> 2) + 5; + data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff; + data2 += data & 0xff; + data = data | (data2 << 8); + ast_moutdwm(ast, 0x1E6E0068, data); + udelay(10); + ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000); + udelay(10); + data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff; + ast_moutdwm(ast, 0x1E6E0018, data); + data = data | 0x200; + ast_moutdwm(ast, 0x1E6E0018, data); + do { + data = ast_mindwm(ast, 0x1E6E001C); + } while (!(data & 0x08000000)); + + data = ast_mindwm(ast, 0x1E6E001C); + data = (data >> 8) & 0xff; + } + ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0068) & 0xffff); + data = ast_mindwm(ast, 0x1E6E0018) | 0xC00; + ast_moutdwm(ast, 0x1E6E0018, data); + + ast_moutdwm(ast, 0x1E6E0034, 0x00000001); + ast_moutdwm(ast, 0x1E6E000C, 0x00000040); + udelay(50); + /* Mode Register Setting */ + ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100); + ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS); + ast_moutdwm(ast, 0x1E6E0028, 0x00000005); + ast_moutdwm(ast, 0x1E6E0028, 0x00000007); + ast_moutdwm(ast, 0x1E6E0028, 0x00000003); + ast_moutdwm(ast, 0x1E6E0028, 0x00000001); + ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS); + ast_moutdwm(ast, 0x1E6E000C, 0x00005C08); + ast_moutdwm(ast, 0x1E6E0028, 0x00000001); + + ast_moutdwm(ast, 0x1E6E000C, 0x00005C01); + data = 0; + if (param->wodt) + data = 0x300; + if (param->rodt) + data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3); + ast_moutdwm(ast, 0x1E6E0034, data | 0x3); + + /* Calibrate the DQSI delay */ + if ((cbr_dll2(ast, param) == false) && (retry++ < 10)) + goto ddr3_init_start; + + ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ); + /* ECC Memory Initialization */ +#ifdef ECC + ast_moutdwm(ast, 0x1E6E007C, 0x00000000); + ast_moutdwm(ast, 0x1E6E0070, 0x221); + do { + data = ast_mindwm(ast, 0x1E6E0070); + } while (!(data & 0x00001000)); + ast_moutdwm(ast, 0x1E6E0070, 0x00000000); + ast_moutdwm(ast, 0x1E6E0050, 0x80000000); + ast_moutdwm(ast, 0x1E6E0050, 0x00000000); +#endif +} + +static void get_ddr2_info(struct ast_device *ast, struct ast2300_dram_param *param) +{ + u32 trap, trap_AC2, trap_MRS; + + ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8); + + /* Ger trap info */ + trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3; + trap_AC2 = (trap << 20) | (trap << 16); + trap_AC2 += 0x00110000; + trap_MRS = 0x00000040 | (trap << 4); + + param->reg_MADJ = 0x00034C4C; + param->reg_SADJ = 0x00001800; + param->reg_DRV = 0x000000F0; + param->reg_PERIOD = param->dram_freq; + param->rodt = 0; + + switch (param->dram_freq) { + case 264: + ast_moutdwm(ast, 0x1E6E2020, 0x0130); + param->wodt = 0; + param->reg_AC1 = 0x11101513; + param->reg_AC2 = 0x78117011; + param->reg_DQSIC = 0x00000092; + param->reg_MRS = 0x00000842; + param->reg_EMRS = 0x00000000; + param->reg_DRV = 0x000000F0; + param->reg_IOZ = 0x00000034; + param->reg_DQIDLY = 0x0000005A; + param->reg_FREQ = 0x00004AC0; + param->madj_max = 138; + param->dll2_finetune_step = 3; + break; + case 336: + ast_moutdwm(ast, 0x1E6E2020, 0x0190); + param->wodt = 1; + param->reg_AC1 = 0x22202613; + param->reg_AC2 = 0xAA009016 | trap_AC2; + param->reg_DQSIC = 0x000000BA; + param->reg_MRS = 0x00000A02 | trap_MRS; + param->reg_EMRS = 0x00000040; + param->reg_DRV = 0x000000FA; + param->reg_IOZ = 0x00000034; + param->reg_DQIDLY = 0x00000074; + param->reg_FREQ = 0x00004DC0; + param->madj_max = 96; + param->dll2_finetune_step = 3; + switch (param->dram_chipid) { + default: + case AST_DRAM_512Mx16: + param->reg_AC2 = 0xAA009012 | trap_AC2; + break; + case AST_DRAM_1Gx16: + param->reg_AC2 = 0xAA009016 | trap_AC2; + break; + case AST_DRAM_2Gx16: + param->reg_AC2 = 0xAA009023 | trap_AC2; + break; + case AST_DRAM_4Gx16: + param->reg_AC2 = 0xAA00903B | trap_AC2; + break; + } + break; + default: + case 396: + ast_moutdwm(ast, 0x1E6E2020, 0x03F1); + param->wodt = 1; + param->rodt = 0; + param->reg_AC1 = 0x33302714; + param->reg_AC2 = 0xCC00B01B | trap_AC2; + param->reg_DQSIC = 0x000000E2; + param->reg_MRS = 0x00000C02 | trap_MRS; + param->reg_EMRS = 0x00000040; + param->reg_DRV = 0x000000FA; + param->reg_IOZ = 0x00000034; + param->reg_DQIDLY = 0x00000089; + param->reg_FREQ = 0x00005040; + param->madj_max = 96; + param->dll2_finetune_step = 4; + + switch (param->dram_chipid) { + case AST_DRAM_512Mx16: + param->reg_AC2 = 0xCC00B016 | trap_AC2; + break; + default: + case AST_DRAM_1Gx16: + param->reg_AC2 = 0xCC00B01B | trap_AC2; + break; + case AST_DRAM_2Gx16: + param->reg_AC2 = 0xCC00B02B | trap_AC2; + break; + case AST_DRAM_4Gx16: + param->reg_AC2 = 0xCC00B03F | trap_AC2; + break; + } + + break; + + case 408: + ast_moutdwm(ast, 0x1E6E2020, 0x01F0); + param->wodt = 1; + param->rodt = 0; + param->reg_AC1 = 0x33302714; + param->reg_AC2 = 0xCC00B01B | trap_AC2; + param->reg_DQSIC = 0x000000E2; + param->reg_MRS = 0x00000C02 | trap_MRS; + param->reg_EMRS = 0x00000040; + param->reg_DRV = 0x000000FA; + param->reg_IOZ = 0x00000034; + param->reg_DQIDLY = 0x00000089; + param->reg_FREQ = 0x000050C0; + param->madj_max = 96; + param->dll2_finetune_step = 4; + + switch (param->dram_chipid) { + case AST_DRAM_512Mx16: + param->reg_AC2 = 0xCC00B016 | trap_AC2; + break; + default: + case AST_DRAM_1Gx16: + param->reg_AC2 = 0xCC00B01B | trap_AC2; + break; + case AST_DRAM_2Gx16: + param->reg_AC2 = 0xCC00B02B | trap_AC2; + break; + case AST_DRAM_4Gx16: + param->reg_AC2 = 0xCC00B03F | trap_AC2; + break; + } + + break; + case 456: + ast_moutdwm(ast, 0x1E6E2020, 0x0230); + param->wodt = 0; + param->reg_AC1 = 0x33302815; + param->reg_AC2 = 0xCD44B01E; + param->reg_DQSIC = 0x000000FC; + param->reg_MRS = 0x00000E72; + param->reg_EMRS = 0x00000000; + param->reg_DRV = 0x00000000; + param->reg_IOZ = 0x00000034; + param->reg_DQIDLY = 0x00000097; + param->reg_FREQ = 0x000052C0; + param->madj_max = 88; + param->dll2_finetune_step = 3; + break; + case 504: + ast_moutdwm(ast, 0x1E6E2020, 0x0261); + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x33302815; + param->reg_AC2 = 0xDE44C022; + param->reg_DQSIC = 0x00000117; + param->reg_MRS = 0x00000E72; + param->reg_EMRS = 0x00000040; + param->reg_DRV = 0x0000000A; + param->reg_IOZ = 0x00000045; + param->reg_DQIDLY = 0x000000A0; + param->reg_FREQ = 0x000054C0; + param->madj_max = 79; + param->dll2_finetune_step = 3; + break; + case 528: + ast_moutdwm(ast, 0x1E6E2020, 0x0120); + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x33302815; + param->reg_AC2 = 0xEF44D024; + param->reg_DQSIC = 0x00000125; + param->reg_MRS = 0x00000E72; + param->reg_EMRS = 0x00000004; + param->reg_DRV = 0x000000F9; + param->reg_IOZ = 0x00000045; + param->reg_DQIDLY = 0x000000A7; + param->reg_FREQ = 0x000055C0; + param->madj_max = 76; + param->dll2_finetune_step = 3; + break; + case 552: + ast_moutdwm(ast, 0x1E6E2020, 0x02A1); + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x43402915; + param->reg_AC2 = 0xFF44E025; + param->reg_DQSIC = 0x00000132; + param->reg_MRS = 0x00000E72; + param->reg_EMRS = 0x00000040; + param->reg_DRV = 0x0000000A; + param->reg_IOZ = 0x00000045; + param->reg_DQIDLY = 0x000000AD; + param->reg_FREQ = 0x000056C0; + param->madj_max = 76; + param->dll2_finetune_step = 3; + break; + case 576: + ast_moutdwm(ast, 0x1E6E2020, 0x0140); + param->wodt = 1; + param->rodt = 1; + param->reg_AC1 = 0x43402915; + param->reg_AC2 = 0xFF44E027; + param->reg_DQSIC = 0x0000013F; + param->reg_MRS = 0x00000E72; + param->reg_EMRS = 0x00000004; + param->reg_DRV = 0x000000F5; + param->reg_IOZ = 0x00000045; + param->reg_DQIDLY = 0x000000B3; + param->reg_FREQ = 0x000057C0; + param->madj_max = 76; + param->dll2_finetune_step = 3; + break; + } + + switch (param->dram_chipid) { + case AST_DRAM_512Mx16: + param->dram_config = 0x100; + break; + default: + case AST_DRAM_1Gx16: + param->dram_config = 0x121; + break; + case AST_DRAM_2Gx16: + param->dram_config = 0x122; + break; + case AST_DRAM_4Gx16: + param->dram_config = 0x123; + break; + } /* switch size */ + + switch (param->vram_size) { + default: + case SZ_8M: + param->dram_config |= 0x00; + break; + case SZ_16M: + param->dram_config |= 0x04; + break; + case SZ_32M: + param->dram_config |= 0x08; + break; + case SZ_64M: + param->dram_config |= 0x0c; + break; + } +} + +static void ddr2_init(struct ast_device *ast, struct ast2300_dram_param *param) +{ + u32 data, data2, retry = 0; + +ddr2_init_start: + ast_moutdwm(ast, 0x1E6E0000, 0xFC600309); + ast_moutdwm(ast, 0x1E6E0018, 0x00000100); + ast_moutdwm(ast, 0x1E6E0024, 0x00000000); + ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ); + ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ); + udelay(10); + ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000); + udelay(10); + + ast_moutdwm(ast, 0x1E6E0004, param->dram_config); + ast_moutdwm(ast, 0x1E6E0008, 0x90040f); + ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1); + ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2); + ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC); + ast_moutdwm(ast, 0x1E6E0080, 0x00000000); + ast_moutdwm(ast, 0x1E6E0084, 0x00000000); + ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY); + ast_moutdwm(ast, 0x1E6E0018, 0x4000A130); + ast_moutdwm(ast, 0x1E6E0018, 0x00002330); + ast_moutdwm(ast, 0x1E6E0038, 0x00000000); + ast_moutdwm(ast, 0x1E6E0040, 0xFF808000); + ast_moutdwm(ast, 0x1E6E0044, 0x88848466); + ast_moutdwm(ast, 0x1E6E0048, 0x44440008); + ast_moutdwm(ast, 0x1E6E004C, 0x00000000); + ast_moutdwm(ast, 0x1E6E0050, 0x80000000); + ast_moutdwm(ast, 0x1E6E0050, 0x00000000); + ast_moutdwm(ast, 0x1E6E0054, 0); + ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV); + ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ); + ast_moutdwm(ast, 0x1E6E0070, 0x00000000); + ast_moutdwm(ast, 0x1E6E0074, 0x00000000); + ast_moutdwm(ast, 0x1E6E0078, 0x00000000); + ast_moutdwm(ast, 0x1E6E007C, 0x00000000); + + /* Wait MCLK2X lock to MCLK */ + do { + data = ast_mindwm(ast, 0x1E6E001C); + } while (!(data & 0x08000000)); + data = ast_mindwm(ast, 0x1E6E001C); + data = (data >> 8) & 0xff; + while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) { + data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4; + if ((data2 & 0xff) > param->madj_max) + break; + ast_moutdwm(ast, 0x1E6E0064, data2); + if (data2 & 0x00100000) + data2 = ((data2 & 0xff) >> 3) + 3; + else + data2 = ((data2 & 0xff) >> 2) + 5; + data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff; + data2 += data & 0xff; + data = data | (data2 << 8); + ast_moutdwm(ast, 0x1E6E0068, data); + udelay(10); + ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000); + udelay(10); + data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff; + ast_moutdwm(ast, 0x1E6E0018, data); + data = data | 0x200; + ast_moutdwm(ast, 0x1E6E0018, data); + do { + data = ast_mindwm(ast, 0x1E6E001C); + } while (!(data & 0x08000000)); + + data = ast_mindwm(ast, 0x1E6E001C); + data = (data >> 8) & 0xff; + } + ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0008) & 0xffff); + data = ast_mindwm(ast, 0x1E6E0018) | 0xC00; + ast_moutdwm(ast, 0x1E6E0018, data); + + ast_moutdwm(ast, 0x1E6E0034, 0x00000001); + ast_moutdwm(ast, 0x1E6E000C, 0x00000000); + udelay(50); + /* Mode Register Setting */ + ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100); + ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS); + ast_moutdwm(ast, 0x1E6E0028, 0x00000005); + ast_moutdwm(ast, 0x1E6E0028, 0x00000007); + ast_moutdwm(ast, 0x1E6E0028, 0x00000003); + ast_moutdwm(ast, 0x1E6E0028, 0x00000001); + + ast_moutdwm(ast, 0x1E6E000C, 0x00005C08); + ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS); + ast_moutdwm(ast, 0x1E6E0028, 0x00000001); + ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS | 0x380); + ast_moutdwm(ast, 0x1E6E0028, 0x00000003); + ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS); + ast_moutdwm(ast, 0x1E6E0028, 0x00000003); + + ast_moutdwm(ast, 0x1E6E000C, 0x7FFF5C01); + data = 0; + if (param->wodt) + data = 0x500; + if (param->rodt) + data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3); + ast_moutdwm(ast, 0x1E6E0034, data | 0x3); + ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ); + + /* Calibrate the DQSI delay */ + if ((cbr_dll2(ast, param) == false) && (retry++ < 10)) + goto ddr2_init_start; + + /* ECC Memory Initialization */ +#ifdef ECC + ast_moutdwm(ast, 0x1E6E007C, 0x00000000); + ast_moutdwm(ast, 0x1E6E0070, 0x221); + do { + data = ast_mindwm(ast, 0x1E6E0070); + } while (!(data & 0x00001000)); + ast_moutdwm(ast, 0x1E6E0070, 0x00000000); + ast_moutdwm(ast, 0x1E6E0050, 0x80000000); + ast_moutdwm(ast, 0x1E6E0050, 0x00000000); +#endif +} + +static void ast_post_chip_2300(struct ast_device *ast) +{ + struct ast2300_dram_param param; + u32 temp; + u8 reg; + + reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); + if ((reg & 0x80) == 0) {/* vga only */ + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + ast_write32(ast, 0x12000, 0x1688a8a8); + do { + ; + } while (ast_read32(ast, 0x12000) != 0x1); + + ast_write32(ast, 0x10000, 0xfc600309); + do { + ; + } while (ast_read32(ast, 0x10000) != 0x1); + + /* Slow down CPU/AHB CLK in VGA only mode */ + temp = ast_read32(ast, 0x12008); + temp |= 0x73; + ast_write32(ast, 0x12008, temp); + + param.dram_freq = 396; + param.dram_type = AST_DDR3; + temp = ast_mindwm(ast, 0x1e6e2070); + if (temp & 0x01000000) + param.dram_type = AST_DDR2; + switch (temp & 0x18000000) { + case 0: + param.dram_chipid = AST_DRAM_512Mx16; + break; + default: + case 0x08000000: + param.dram_chipid = AST_DRAM_1Gx16; + break; + case 0x10000000: + param.dram_chipid = AST_DRAM_2Gx16; + break; + case 0x18000000: + param.dram_chipid = AST_DRAM_4Gx16; + break; + } + switch (temp & 0x0c) { + default: + case 0x00: + param.vram_size = SZ_8M; + break; + case 0x04: + param.vram_size = SZ_16M; + break; + case 0x08: + param.vram_size = SZ_32M; + break; + case 0x0c: + param.vram_size = SZ_64M; + break; + } + + if (param.dram_type == AST_DDR3) { + get_ddr3_info(ast, ¶m); + ddr3_init(ast, ¶m); + } else { + get_ddr2_info(ast, ¶m); + ddr2_init(ast, ¶m); + } + + temp = ast_mindwm(ast, 0x1e6e2040); + ast_moutdwm(ast, 0x1e6e2040, temp | 0x40); + } + + /* wait ready */ + do { + reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); + } while ((reg & 0x40) == 0); +} + +int ast_2300_post(struct ast_device *ast) +{ + ast_2300_set_def_ext_reg(ast); + + if (ast->config_mode == ast_use_p2a) { + ast_post_chip_2300(ast); + ast_init_3rdtx(ast); + } else { + if (ast->tx_chip == AST_TX_SIL164) { + /* Enable DVO */ + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80); + } + } + + return 0; +} diff --git a/drivers/gpu/drm/ast/ast_2500.c b/drivers/gpu/drm/ast/ast_2500.c new file mode 100644 index 000000000000..1e541498ea67 --- /dev/null +++ b/drivers/gpu/drm/ast/ast_2500.c @@ -0,0 +1,569 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + */ +/* + * Authors: Dave Airlie <[email protected]> + */ + +#include <linux/delay.h> + +#include <drm/drm_print.h> + +#include "ast_drv.h" +#include "ast_post.h" + +/* + * POST + */ + +/* + * AST2500 DRAM settings modules + */ + +#define REGTBL_NUM 17 +#define REGIDX_010 0 +#define REGIDX_014 1 +#define REGIDX_018 2 +#define REGIDX_020 3 +#define REGIDX_024 4 +#define REGIDX_02C 5 +#define REGIDX_030 6 +#define REGIDX_214 7 +#define REGIDX_2E0 8 +#define REGIDX_2E4 9 +#define REGIDX_2E8 10 +#define REGIDX_2EC 11 +#define REGIDX_2F0 12 +#define REGIDX_2F4 13 +#define REGIDX_2F8 14 +#define REGIDX_RFC 15 +#define REGIDX_PLL 16 + +static const u32 ast2500_ddr3_1600_timing_table[REGTBL_NUM] = { + 0x64604D38, /* 0x010 */ + 0x29690599, /* 0x014 */ + 0x00000300, /* 0x018 */ + 0x00000000, /* 0x020 */ + 0x00000000, /* 0x024 */ + 0x02181E70, /* 0x02C */ + 0x00000040, /* 0x030 */ + 0x00000024, /* 0x214 */ + 0x02001300, /* 0x2E0 */ + 0x0E0000A0, /* 0x2E4 */ + 0x000E001B, /* 0x2E8 */ + 0x35B8C105, /* 0x2EC */ + 0x08090408, /* 0x2F0 */ + 0x9B000800, /* 0x2F4 */ + 0x0E400A00, /* 0x2F8 */ + 0x9971452F, /* tRFC */ + 0x000071C1 /* PLL */ +}; + +static const u32 ast2500_ddr4_1600_timing_table[REGTBL_NUM] = { + 0x63604E37, /* 0x010 */ + 0xE97AFA99, /* 0x014 */ + 0x00019000, /* 0x018 */ + 0x08000000, /* 0x020 */ + 0x00000400, /* 0x024 */ + 0x00000410, /* 0x02C */ + 0x00000101, /* 0x030 */ + 0x00000024, /* 0x214 */ + 0x03002900, /* 0x2E0 */ + 0x0E0000A0, /* 0x2E4 */ + 0x000E001C, /* 0x2E8 */ + 0x35B8C106, /* 0x2EC */ + 0x08080607, /* 0x2F0 */ + 0x9B000900, /* 0x2F4 */ + 0x0E400A00, /* 0x2F8 */ + 0x99714545, /* tRFC */ + 0x000071C1 /* PLL */ +}; + +#define TIMEOUT 5000000 + +void ast_2500_patch_ahb(void __iomem *regs) +{ + u32 data; + + /* Clear bus lock condition */ + __ast_moutdwm(regs, 0x1e600000, 0xAEED1A03); + __ast_moutdwm(regs, 0x1e600084, 0x00010000); + __ast_moutdwm(regs, 0x1e600088, 0x00000000); + __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8); + + data = __ast_mindwm(regs, 0x1e6e2070); + if (data & 0x08000000) { /* check fast reset */ + /* + * If "Fast restet" is enabled for ARM-ICE debugger, + * then WDT needs to enable, that + * WDT04 is WDT#1 Reload reg. + * WDT08 is WDT#1 counter restart reg to avoid system deadlock + * WDT0C is WDT#1 control reg + * [6:5]:= 01:Full chip + * [4]:= 1:1MHz clock source + * [1]:= 1:WDT will be cleeared and disabled after timeout occurs + * [0]:= 1:WDT enable + */ + __ast_moutdwm(regs, 0x1E785004, 0x00000010); + __ast_moutdwm(regs, 0x1E785008, 0x00004755); + __ast_moutdwm(regs, 0x1E78500c, 0x00000033); + udelay(1000); + } + + do { + __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8); + data = __ast_mindwm(regs, 0x1e6e2000); + } while (data != 1); + + __ast_moutdwm(regs, 0x1e6e207c, 0x08000000); /* clear fast reset */ +} + +static bool mmc_test_single_2500(struct ast_device *ast, u32 datagen) +{ + return mmc_test(ast, datagen, 0x85); +} + +static bool cbr_test_2500(struct ast_device *ast) +{ + ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF); + ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00); + if (!mmc_test_burst(ast, 0)) + return false; + if (!mmc_test_single_2500(ast, 0)) + return false; + return true; +} + +static bool ddr_test_2500(struct ast_device *ast) +{ + ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF); + ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00); + if (!mmc_test_burst(ast, 0)) + return false; + if (!mmc_test_burst(ast, 1)) + return false; + if (!mmc_test_burst(ast, 2)) + return false; + if (!mmc_test_burst(ast, 3)) + return false; + if (!mmc_test_single_2500(ast, 0)) + return false; + return true; +} + +static void ddr_init_common_2500(struct ast_device *ast) +{ + ast_moutdwm(ast, 0x1E6E0034, 0x00020080); + ast_moutdwm(ast, 0x1E6E0008, 0x2003000F); + ast_moutdwm(ast, 0x1E6E0038, 0x00000FFF); + ast_moutdwm(ast, 0x1E6E0040, 0x88448844); + ast_moutdwm(ast, 0x1E6E0044, 0x24422288); + ast_moutdwm(ast, 0x1E6E0048, 0x22222222); + ast_moutdwm(ast, 0x1E6E004C, 0x22222222); + ast_moutdwm(ast, 0x1E6E0050, 0x80000000); + ast_moutdwm(ast, 0x1E6E0208, 0x00000000); + ast_moutdwm(ast, 0x1E6E0218, 0x00000000); + ast_moutdwm(ast, 0x1E6E0220, 0x00000000); + ast_moutdwm(ast, 0x1E6E0228, 0x00000000); + ast_moutdwm(ast, 0x1E6E0230, 0x00000000); + ast_moutdwm(ast, 0x1E6E02A8, 0x00000000); + ast_moutdwm(ast, 0x1E6E02B0, 0x00000000); + ast_moutdwm(ast, 0x1E6E0240, 0x86000000); + ast_moutdwm(ast, 0x1E6E0244, 0x00008600); + ast_moutdwm(ast, 0x1E6E0248, 0x80000000); + ast_moutdwm(ast, 0x1E6E024C, 0x80808080); +} + +static void ddr_phy_init_2500(struct ast_device *ast) +{ + u32 data, pass, timecnt; + + pass = 0; + ast_moutdwm(ast, 0x1E6E0060, 0x00000005); + while (!pass) { + for (timecnt = 0; timecnt < TIMEOUT; timecnt++) { + data = ast_mindwm(ast, 0x1E6E0060) & 0x1; + if (!data) + break; + } + if (timecnt != TIMEOUT) { + data = ast_mindwm(ast, 0x1E6E0300) & 0x000A0000; + if (!data) + pass = 1; + } + if (!pass) { + ast_moutdwm(ast, 0x1E6E0060, 0x00000000); + udelay(10); /* delay 10 us */ + ast_moutdwm(ast, 0x1E6E0060, 0x00000005); + } + } + + ast_moutdwm(ast, 0x1E6E0060, 0x00000006); +} + +/* + * Check DRAM Size + * 1Gb : 0x80000000 ~ 0x87FFFFFF + * 2Gb : 0x80000000 ~ 0x8FFFFFFF + * 4Gb : 0x80000000 ~ 0x9FFFFFFF + * 8Gb : 0x80000000 ~ 0xBFFFFFFF + */ +static void check_dram_size_2500(struct ast_device *ast, u32 tRFC) +{ + u32 reg_04, reg_14; + + reg_04 = ast_mindwm(ast, 0x1E6E0004) & 0xfffffffc; + reg_14 = ast_mindwm(ast, 0x1E6E0014) & 0xffffff00; + + ast_moutdwm(ast, 0xA0100000, 0x41424344); + ast_moutdwm(ast, 0x90100000, 0x35363738); + ast_moutdwm(ast, 0x88100000, 0x292A2B2C); + ast_moutdwm(ast, 0x80100000, 0x1D1E1F10); + + /* Check 8Gbit */ + if (ast_mindwm(ast, 0xA0100000) == 0x41424344) { + reg_04 |= 0x03; + reg_14 |= (tRFC >> 24) & 0xFF; + /* Check 4Gbit */ + } else if (ast_mindwm(ast, 0x90100000) == 0x35363738) { + reg_04 |= 0x02; + reg_14 |= (tRFC >> 16) & 0xFF; + /* Check 2Gbit */ + } else if (ast_mindwm(ast, 0x88100000) == 0x292A2B2C) { + reg_04 |= 0x01; + reg_14 |= (tRFC >> 8) & 0xFF; + } else { + reg_14 |= tRFC & 0xFF; + } + ast_moutdwm(ast, 0x1E6E0004, reg_04); + ast_moutdwm(ast, 0x1E6E0014, reg_14); +} + +static void enable_cache_2500(struct ast_device *ast) +{ + u32 reg_04, data; + + reg_04 = ast_mindwm(ast, 0x1E6E0004); + ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x1000); + + do + data = ast_mindwm(ast, 0x1E6E0004); + while (!(data & 0x80000)); + ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x400); +} + +static void set_mpll_2500(struct ast_device *ast) +{ + u32 addr, data, param; + + /* Reset MMC */ + ast_moutdwm(ast, 0x1E6E0000, 0xFC600309); + ast_moutdwm(ast, 0x1E6E0034, 0x00020080); + for (addr = 0x1e6e0004; addr < 0x1e6e0090;) { + ast_moutdwm(ast, addr, 0x0); + addr += 4; + } + ast_moutdwm(ast, 0x1E6E0034, 0x00020000); + + ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8); + data = ast_mindwm(ast, 0x1E6E2070) & 0x00800000; + if (data) { + /* CLKIN = 25MHz */ + param = 0x930023E0; + ast_moutdwm(ast, 0x1E6E2160, 0x00011320); + } else { + /* CLKIN = 24MHz */ + param = 0x93002400; + } + ast_moutdwm(ast, 0x1E6E2020, param); + udelay(100); +} + +static void reset_mmc_2500(struct ast_device *ast) +{ + ast_moutdwm(ast, 0x1E78505C, 0x00000004); + ast_moutdwm(ast, 0x1E785044, 0x00000001); + ast_moutdwm(ast, 0x1E785048, 0x00004755); + ast_moutdwm(ast, 0x1E78504C, 0x00000013); + mdelay(100); + ast_moutdwm(ast, 0x1E785054, 0x00000077); + ast_moutdwm(ast, 0x1E6E0000, 0xFC600309); +} + +static void ddr3_init_2500(struct ast_device *ast, const u32 *ddr_table) +{ + ast_moutdwm(ast, 0x1E6E0004, 0x00000303); + ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]); + ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]); + ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]); + ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */ + ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */ + ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */ + ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */ + + /* DDR PHY Setting */ + ast_moutdwm(ast, 0x1E6E0200, 0x02492AAE); + ast_moutdwm(ast, 0x1E6E0204, 0x00001001); + ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B); + ast_moutdwm(ast, 0x1E6E0210, 0x20000000); + ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]); + ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]); + ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]); + ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]); + ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]); + ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]); + ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]); + ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]); + ast_moutdwm(ast, 0x1E6E0290, 0x00100008); + ast_moutdwm(ast, 0x1E6E02C0, 0x00000006); + + /* Controller Setting */ + ast_moutdwm(ast, 0x1E6E0034, 0x00020091); + + /* Wait DDR PHY init done */ + ddr_phy_init_2500(ast); + + ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]); + ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81); + ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93); + + check_dram_size_2500(ast, ddr_table[REGIDX_RFC]); + enable_cache_2500(ast); + ast_moutdwm(ast, 0x1E6E001C, 0x00000008); + ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00); +} + +static void ddr4_init_2500(struct ast_device *ast, const u32 *ddr_table) +{ + u32 data, data2, pass, retrycnt; + u32 ddr_vref, phy_vref; + u32 min_ddr_vref = 0, min_phy_vref = 0; + u32 max_ddr_vref = 0, max_phy_vref = 0; + + ast_moutdwm(ast, 0x1E6E0004, 0x00000313); + ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]); + ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]); + ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]); + ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */ + ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */ + ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */ + ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */ + + /* DDR PHY Setting */ + ast_moutdwm(ast, 0x1E6E0200, 0x42492AAE); + ast_moutdwm(ast, 0x1E6E0204, 0x09002000); + ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B); + ast_moutdwm(ast, 0x1E6E0210, 0x20000000); + ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]); + ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]); + ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]); + ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]); + ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]); + ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]); + ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]); + ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]); + ast_moutdwm(ast, 0x1E6E0290, 0x00100008); + ast_moutdwm(ast, 0x1E6E02C4, 0x3C183C3C); + ast_moutdwm(ast, 0x1E6E02C8, 0x00631E0E); + + /* Controller Setting */ + ast_moutdwm(ast, 0x1E6E0034, 0x0001A991); + + /* Train PHY Vref first */ + pass = 0; + + for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) { + max_phy_vref = 0x0; + pass = 0; + ast_moutdwm(ast, 0x1E6E02C0, 0x00001C06); + for (phy_vref = 0x40; phy_vref < 0x80; phy_vref++) { + ast_moutdwm(ast, 0x1E6E000C, 0x00000000); + ast_moutdwm(ast, 0x1E6E0060, 0x00000000); + ast_moutdwm(ast, 0x1E6E02CC, phy_vref | (phy_vref << 8)); + /* Fire DFI Init */ + ddr_phy_init_2500(ast); + ast_moutdwm(ast, 0x1E6E000C, 0x00005C01); + if (cbr_test_2500(ast)) { + pass++; + data = ast_mindwm(ast, 0x1E6E03D0); + data2 = data >> 8; + data = data & 0xff; + if (data > data2) + data = data2; + if (max_phy_vref < data) { + max_phy_vref = data; + min_phy_vref = phy_vref; + } + } else if (pass > 0) { + break; + } + } + } + ast_moutdwm(ast, 0x1E6E02CC, min_phy_vref | (min_phy_vref << 8)); + + /* Train DDR Vref next */ + pass = 0; + + for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) { + min_ddr_vref = 0xFF; + max_ddr_vref = 0x0; + pass = 0; + for (ddr_vref = 0x00; ddr_vref < 0x40; ddr_vref++) { + ast_moutdwm(ast, 0x1E6E000C, 0x00000000); + ast_moutdwm(ast, 0x1E6E0060, 0x00000000); + ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8)); + /* Fire DFI Init */ + ddr_phy_init_2500(ast); + ast_moutdwm(ast, 0x1E6E000C, 0x00005C01); + if (cbr_test_2500(ast)) { + pass++; + if (min_ddr_vref > ddr_vref) + min_ddr_vref = ddr_vref; + if (max_ddr_vref < ddr_vref) + max_ddr_vref = ddr_vref; + } else if (pass != 0) { + break; + } + } + } + + ast_moutdwm(ast, 0x1E6E000C, 0x00000000); + ast_moutdwm(ast, 0x1E6E0060, 0x00000000); + ddr_vref = (min_ddr_vref + max_ddr_vref + 1) >> 1; + ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8)); + + /* Wait DDR PHY init done */ + ddr_phy_init_2500(ast); + + ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]); + ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81); + ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93); + + check_dram_size_2500(ast, ddr_table[REGIDX_RFC]); + enable_cache_2500(ast); + ast_moutdwm(ast, 0x1E6E001C, 0x00000008); + ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00); +} + +static bool ast_dram_init_2500(struct ast_device *ast) +{ + u32 data; + u32 max_tries = 5; + + do { + if (max_tries-- == 0) + return false; + set_mpll_2500(ast); + reset_mmc_2500(ast); + ddr_init_common_2500(ast); + + data = ast_mindwm(ast, 0x1E6E2070); + if (data & 0x01000000) + ddr4_init_2500(ast, ast2500_ddr4_1600_timing_table); + else + ddr3_init_2500(ast, ast2500_ddr3_1600_timing_table); + } while (!ddr_test_2500(ast)); + + ast_moutdwm(ast, 0x1E6E2040, ast_mindwm(ast, 0x1E6E2040) | 0x41); + + /* Patch code */ + data = ast_mindwm(ast, 0x1E6E200C) & 0xF9FFFFFF; + ast_moutdwm(ast, 0x1E6E200C, data | 0x10000000); + + return true; +} + +static void ast_post_chip_2500(struct ast_device *ast) +{ + struct drm_device *dev = &ast->base; + u32 temp; + u8 reg; + + reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); + if ((reg & AST_IO_VGACRD0_VRAM_INIT_STATUS_MASK) == 0) {/* vga only */ + /* Clear bus lock condition */ + ast_2500_patch_ahb(ast->regs); + + /* Disable watchdog */ + ast_moutdwm(ast, 0x1E78502C, 0x00000000); + ast_moutdwm(ast, 0x1E78504C, 0x00000000); + + /* + * Reset USB port to patch USB unknown device issue + * SCU90 is Multi-function Pin Control #5 + * [29]:= 1:Enable USB2.0 Host port#1 (that the mutually shared USB2.0 Hub + * port). + * SCU94 is Multi-function Pin Control #6 + * [14:13]:= 1x:USB2.0 Host2 controller + * SCU70 is Hardware Strap reg + * [23]:= 1:CLKIN is 25MHz and USBCK1 = 24/48 MHz (determined by + * [18]: 0(24)/1(48) MHz) + * SCU7C is Write clear reg to SCU70 + * [23]:= write 1 and then SCU70[23] will be clear as 0b. + */ + ast_moutdwm(ast, 0x1E6E2090, 0x20000000); + ast_moutdwm(ast, 0x1E6E2094, 0x00004000); + if (ast_mindwm(ast, 0x1E6E2070) & 0x00800000) { + ast_moutdwm(ast, 0x1E6E207C, 0x00800000); + mdelay(100); + ast_moutdwm(ast, 0x1E6E2070, 0x00800000); + } + /* Modify eSPI reset pin */ + temp = ast_mindwm(ast, 0x1E6E2070); + if (temp & 0x02000000) + ast_moutdwm(ast, 0x1E6E207C, 0x00004000); + + /* Slow down CPU/AHB CLK in VGA only mode */ + temp = ast_read32(ast, 0x12008); + temp |= 0x73; + ast_write32(ast, 0x12008, temp); + + if (!ast_dram_init_2500(ast)) + drm_err(dev, "DRAM init failed !\n"); + + temp = ast_mindwm(ast, 0x1e6e2040); + ast_moutdwm(ast, 0x1e6e2040, temp | 0x40); + } + + /* wait ready */ + do { + reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); + } while ((reg & 0x40) == 0); +} + +int ast_2500_post(struct ast_device *ast) +{ + ast_2300_set_def_ext_reg(ast); + + if (ast->config_mode == ast_use_p2a) { + ast_post_chip_2500(ast); + } else { + if (ast->tx_chip == AST_TX_SIL164) { + /* Enable DVO */ + ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80); + } + } + + return 0; +} diff --git a/drivers/gpu/drm/ast/ast_2600.c b/drivers/gpu/drm/ast/ast_2600.c new file mode 100644 index 000000000000..8d75a47444f5 --- /dev/null +++ b/drivers/gpu/drm/ast/ast_2600.c @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + */ +/* + * Authors: Dave Airlie <[email protected]> + */ + +#include "ast_drv.h" +#include "ast_post.h" + +/* + * POST + */ + +int ast_2600_post(struct ast_device *ast) +{ + ast_2300_set_def_ext_reg(ast); + + if (ast->tx_chip == AST_TX_ASTDP) + return ast_dp_launch(ast); + + return 0; +} diff --git a/drivers/gpu/drm/ast/ast_dram_tables.h b/drivers/gpu/drm/ast/ast_dram_tables.h deleted file mode 100644 index 1e9ac9d6d26c..000000000000 --- a/drivers/gpu/drm/ast/ast_dram_tables.h +++ /dev/null @@ -1,207 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef AST_DRAM_TABLES_H -#define AST_DRAM_TABLES_H - -/* DRAM timing tables */ -struct ast_dramstruct { - u16 index; - u32 data; -}; - -static const struct ast_dramstruct ast2000_dram_table_data[] = { - { 0x0108, 0x00000000 }, - { 0x0120, 0x00004a21 }, - { 0xFF00, 0x00000043 }, - { 0x0000, 0xFFFFFFFF }, - { 0x0004, 0x00000089 }, - { 0x0008, 0x22331353 }, - { 0x000C, 0x0d07000b }, - { 0x0010, 0x11113333 }, - { 0x0020, 0x00110350 }, - { 0x0028, 0x1e0828f0 }, - { 0x0024, 0x00000001 }, - { 0x001C, 0x00000000 }, - { 0x0014, 0x00000003 }, - { 0xFF00, 0x00000043 }, - { 0x0018, 0x00000131 }, - { 0x0014, 0x00000001 }, - { 0xFF00, 0x00000043 }, - { 0x0018, 0x00000031 }, - { 0x0014, 0x00000001 }, - { 0xFF00, 0x00000043 }, - { 0x0028, 0x1e0828f1 }, - { 0x0024, 0x00000003 }, - { 0x002C, 0x1f0f28fb }, - { 0x0030, 0xFFFFFE01 }, - { 0xFFFF, 0xFFFFFFFF } -}; - -static const struct ast_dramstruct ast1100_dram_table_data[] = { - { 0x2000, 0x1688a8a8 }, - { 0x2020, 0x000041f0 }, - { 0xFF00, 0x00000043 }, - { 0x0000, 0xfc600309 }, - { 0x006C, 0x00909090 }, - { 0x0064, 0x00050000 }, - { 0x0004, 0x00000585 }, - { 0x0008, 0x0011030f }, - { 0x0010, 0x22201724 }, - { 0x0018, 0x1e29011a }, - { 0x0020, 0x00c82222 }, - { 0x0014, 0x01001523 }, - { 0x001C, 0x1024010d }, - { 0x0024, 0x00cb2522 }, - { 0x0038, 0xffffff82 }, - { 0x003C, 0x00000000 }, - { 0x0040, 0x00000000 }, - { 0x0044, 0x00000000 }, - { 0x0048, 0x00000000 }, - { 0x004C, 0x00000000 }, - { 0x0050, 0x00000000 }, - { 0x0054, 0x00000000 }, - { 0x0058, 0x00000000 }, - { 0x005C, 0x00000000 }, - { 0x0060, 0x032aa02a }, - { 0x0064, 0x002d3000 }, - { 0x0068, 0x00000000 }, - { 0x0070, 0x00000000 }, - { 0x0074, 0x00000000 }, - { 0x0078, 0x00000000 }, - { 0x007C, 0x00000000 }, - { 0x0034, 0x00000001 }, - { 0xFF00, 0x00000043 }, - { 0x002C, 0x00000732 }, - { 0x0030, 0x00000040 }, - { 0x0028, 0x00000005 }, - { 0x0028, 0x00000007 }, - { 0x0028, 0x00000003 }, - { 0x0028, 0x00000001 }, - { 0x000C, 0x00005a08 }, - { 0x002C, 0x00000632 }, - { 0x0028, 0x00000001 }, - { 0x0030, 0x000003c0 }, - { 0x0028, 0x00000003 }, - { 0x0030, 0x00000040 }, - { 0x0028, 0x00000003 }, - { 0x000C, 0x00005a21 }, - { 0x0034, 0x00007c03 }, - { 0x0120, 0x00004c41 }, - { 0xffff, 0xffffffff }, -}; - -static const struct ast_dramstruct ast2100_dram_table_data[] = { - { 0x2000, 0x1688a8a8 }, - { 0x2020, 0x00004120 }, - { 0xFF00, 0x00000043 }, - { 0x0000, 0xfc600309 }, - { 0x006C, 0x00909090 }, - { 0x0064, 0x00070000 }, - { 0x0004, 0x00000489 }, - { 0x0008, 0x0011030f }, - { 0x0010, 0x32302926 }, - { 0x0018, 0x274c0122 }, - { 0x0020, 0x00ce2222 }, - { 0x0014, 0x01001523 }, - { 0x001C, 0x1024010d }, - { 0x0024, 0x00cb2522 }, - { 0x0038, 0xffffff82 }, - { 0x003C, 0x00000000 }, - { 0x0040, 0x00000000 }, - { 0x0044, 0x00000000 }, - { 0x0048, 0x00000000 }, - { 0x004C, 0x00000000 }, - { 0x0050, 0x00000000 }, - { 0x0054, 0x00000000 }, - { 0x0058, 0x00000000 }, - { 0x005C, 0x00000000 }, - { 0x0060, 0x0f2aa02a }, - { 0x0064, 0x003f3005 }, - { 0x0068, 0x02020202 }, - { 0x0070, 0x00000000 }, - { 0x0074, 0x00000000 }, - { 0x0078, 0x00000000 }, - { 0x007C, 0x00000000 }, - { 0x0034, 0x00000001 }, - { 0xFF00, 0x00000043 }, - { 0x002C, 0x00000942 }, - { 0x0030, 0x00000040 }, - { 0x0028, 0x00000005 }, - { 0x0028, 0x00000007 }, - { 0x0028, 0x00000003 }, - { 0x0028, 0x00000001 }, - { 0x000C, 0x00005a08 }, - { 0x002C, 0x00000842 }, - { 0x0028, 0x00000001 }, - { 0x0030, 0x000003c0 }, - { 0x0028, 0x00000003 }, - { 0x0030, 0x00000040 }, - { 0x0028, 0x00000003 }, - { 0x000C, 0x00005a21 }, - { 0x0034, 0x00007c03 }, - { 0x0120, 0x00005061 }, - { 0xffff, 0xffffffff }, -}; - -/* - * AST2500 DRAM settings modules - */ -#define REGTBL_NUM 17 -#define REGIDX_010 0 -#define REGIDX_014 1 -#define REGIDX_018 2 -#define REGIDX_020 3 -#define REGIDX_024 4 -#define REGIDX_02C 5 -#define REGIDX_030 6 -#define REGIDX_214 7 -#define REGIDX_2E0 8 -#define REGIDX_2E4 9 -#define REGIDX_2E8 10 -#define REGIDX_2EC 11 -#define REGIDX_2F0 12 -#define REGIDX_2F4 13 -#define REGIDX_2F8 14 -#define REGIDX_RFC 15 -#define REGIDX_PLL 16 - -static const u32 ast2500_ddr3_1600_timing_table[REGTBL_NUM] = { - 0x64604D38, /* 0x010 */ - 0x29690599, /* 0x014 */ - 0x00000300, /* 0x018 */ - 0x00000000, /* 0x020 */ - 0x00000000, /* 0x024 */ - 0x02181E70, /* 0x02C */ - 0x00000040, /* 0x030 */ - 0x00000024, /* 0x214 */ - 0x02001300, /* 0x2E0 */ - 0x0E0000A0, /* 0x2E4 */ - 0x000E001B, /* 0x2E8 */ - 0x35B8C105, /* 0x2EC */ - 0x08090408, /* 0x2F0 */ - 0x9B000800, /* 0x2F4 */ - 0x0E400A00, /* 0x2F8 */ - 0x9971452F, /* tRFC */ - 0x000071C1 /* PLL */ -}; - -static const u32 ast2500_ddr4_1600_timing_table[REGTBL_NUM] = { - 0x63604E37, /* 0x010 */ - 0xE97AFA99, /* 0x014 */ - 0x00019000, /* 0x018 */ - 0x08000000, /* 0x020 */ - 0x00000400, /* 0x024 */ - 0x00000410, /* 0x02C */ - 0x00000101, /* 0x030 */ - 0x00000024, /* 0x214 */ - 0x03002900, /* 0x2E0 */ - 0x0E0000A0, /* 0x2E4 */ - 0x000E001C, /* 0x2E8 */ - 0x35B8C106, /* 0x2EC */ - 0x08080607, /* 0x2F0 */ - 0x9B000900, /* 0x2F4 */ - 0x0E400A00, /* 0x2F8 */ - 0x99714545, /* tRFC */ - 0x000071C1 /* PLL */ -}; - -#endif diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index 054acda41909..473faa92d08c 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -64,7 +64,7 @@ static const struct drm_driver ast_driver = { .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, - DRM_GEM_SHMEM_DRIVER_OPS_NO_MAP_SGT, + DRM_GEM_SHMEM_DRIVER_OPS, DRM_FBDEV_SHMEM_DRIVER_OPS, }; @@ -171,7 +171,7 @@ static int ast_detect_chip(struct pci_dev *pdev, /* Patch AST2500/AST2510 */ if ((pdev->revision & 0xf0) == 0x40) { if (!(vgacrd0 & AST_IO_VGACRD0_VRAM_INIT_STATUS_MASK)) - ast_patch_ahb_2500(regs); + ast_2500_patch_ahb(regs); } /* Double check that it's actually working */ diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 2ee402096cd9..e37a55295ed7 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -417,11 +417,26 @@ struct ast_crtc_state { int ast_mm_init(struct ast_device *ast); +/* ast_2000.c */ +int ast_2000_post(struct ast_device *ast); + +/* ast_2100.c */ +int ast_2100_post(struct ast_device *ast); + +/* ast_2300.c */ +int ast_2300_post(struct ast_device *ast); + +/* ast_2500.c */ +void ast_2500_patch_ahb(void __iomem *regs); +int ast_2500_post(struct ast_device *ast); + +/* ast_2600.c */ +int ast_2600_post(struct ast_device *ast); + /* ast post */ int ast_post_gpu(struct ast_device *ast); u32 ast_mindwm(struct ast_device *ast, u32 r); void ast_moutdwm(struct ast_device *ast, u32 r, u32 v); -void ast_patch_ahb_2500(void __iomem *regs); int ast_vga_output_init(struct ast_device *ast); int ast_sil164_output_init(struct ast_device *ast); diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 7908087bcb5a..b4e8edc7c767 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -29,7 +29,6 @@ */ #include <linux/delay.h> -#include <linux/export.h> #include <linux/pci.h> #include <drm/drm_atomic.h> diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index 37568cf3822c..b72914dbed38 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -31,51 +31,10 @@ #include <drm/drm_print.h> -#include "ast_dram_tables.h" #include "ast_drv.h" +#include "ast_post.h" -static void ast_post_chip_2300(struct ast_device *ast); -static void ast_post_chip_2500(struct ast_device *ast); - -static const u8 extreginfo[] = { 0x0f, 0x04, 0x1c, 0xff }; -static const u8 extreginfo_ast2300[] = { 0x0f, 0x04, 0x1f, 0xff }; - -static void ast_set_def_ext_reg(struct ast_device *ast) -{ - u8 i, index, reg; - const u8 *ext_reg_info; - - /* reset scratch */ - for (i = 0x81; i <= 0x9f; i++) - ast_set_index_reg(ast, AST_IO_VGACRI, i, 0x00); - - if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast)) - ext_reg_info = extreginfo_ast2300; - else - ext_reg_info = extreginfo; - - index = 0xa0; - while (*ext_reg_info != 0xff) { - ast_set_index_reg_mask(ast, AST_IO_VGACRI, index, 0x00, *ext_reg_info); - index++; - ext_reg_info++; - } - - /* disable standard IO/MEM decode if secondary */ - /* ast_set_index_reg-mask(ast, AST_IO_VGACRI, 0xa1, 0xff, 0x3); */ - - /* Set Ext. Default */ - ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0x8c, 0x00, 0x01); - ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb7, 0x00, 0x00); - - /* Enable RAMDAC for A1 */ - reg = 0x04; - if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast) || IS_AST_GEN6(ast)) - reg |= 0x20; - ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xb6, 0xff, reg); -} - -static u32 __ast_mindwm(void __iomem *regs, u32 r) +u32 __ast_mindwm(void __iomem *regs, u32 r) { u32 data; @@ -89,7 +48,7 @@ static u32 __ast_mindwm(void __iomem *regs, u32 r) return __ast_read32(regs, 0x10000 + (r & 0x0000ffff)); } -static void __ast_moutdwm(void __iomem *regs, u32 r, u32 v) +void __ast_moutdwm(void __iomem *regs, u32 r, u32 v) { u32 data; @@ -113,332 +72,38 @@ void ast_moutdwm(struct ast_device *ast, u32 r, u32 v) __ast_moutdwm(ast->regs, r, v); } -/* - * AST2100/2150 DLL CBR Setting - */ -#define CBR_SIZE_AST2150 ((16 << 10) - 1) -#define CBR_PASSNUM_AST2150 5 -#define CBR_THRESHOLD_AST2150 10 -#define CBR_THRESHOLD2_AST2150 10 -#define TIMEOUT_AST2150 5000000 - -#define CBR_PATNUM_AST2150 8 - -static const u32 pattern_AST2150[14] = { - 0xFF00FF00, - 0xCC33CC33, - 0xAA55AA55, - 0xFFFE0001, - 0x683501FE, - 0x0F1929B0, - 0x2D0B4346, - 0x60767F02, - 0x6FBE36A6, - 0x3A253035, - 0x3019686D, - 0x41C6167E, - 0x620152BF, - 0x20F050E0 -}; - -static u32 mmctestburst2_ast2150(struct ast_device *ast, u32 datagen) -{ - u32 data, timeout; - - ast_moutdwm(ast, 0x1e6e0070, 0x00000000); - ast_moutdwm(ast, 0x1e6e0070, 0x00000001 | (datagen << 3)); - timeout = 0; - do { - data = ast_mindwm(ast, 0x1e6e0070) & 0x40; - if (++timeout > TIMEOUT_AST2150) { - ast_moutdwm(ast, 0x1e6e0070, 0x00000000); - return 0xffffffff; - } - } while (!data); - ast_moutdwm(ast, 0x1e6e0070, 0x00000000); - ast_moutdwm(ast, 0x1e6e0070, 0x00000003 | (datagen << 3)); - timeout = 0; - do { - data = ast_mindwm(ast, 0x1e6e0070) & 0x40; - if (++timeout > TIMEOUT_AST2150) { - ast_moutdwm(ast, 0x1e6e0070, 0x00000000); - return 0xffffffff; - } - } while (!data); - data = (ast_mindwm(ast, 0x1e6e0070) & 0x80) >> 7; - ast_moutdwm(ast, 0x1e6e0070, 0x00000000); - return data; -} - -#if 0 /* unused in DDX driver - here for completeness */ -static u32 mmctestsingle2_ast2150(struct ast_device *ast, u32 datagen) -{ - u32 data, timeout; - - ast_moutdwm(ast, 0x1e6e0070, 0x00000000); - ast_moutdwm(ast, 0x1e6e0070, 0x00000005 | (datagen << 3)); - timeout = 0; - do { - data = ast_mindwm(ast, 0x1e6e0070) & 0x40; - if (++timeout > TIMEOUT_AST2150) { - ast_moutdwm(ast, 0x1e6e0070, 0x00000000); - return 0xffffffff; - } - } while (!data); - data = (ast_mindwm(ast, 0x1e6e0070) & 0x80) >> 7; - ast_moutdwm(ast, 0x1e6e0070, 0x00000000); - return data; -} -#endif - -static int cbrtest_ast2150(struct ast_device *ast) -{ - int i; - - for (i = 0; i < 8; i++) - if (mmctestburst2_ast2150(ast, i)) - return 0; - return 1; -} - -static int cbrscan_ast2150(struct ast_device *ast, int busw) -{ - u32 patcnt, loop; - - for (patcnt = 0; patcnt < CBR_PATNUM_AST2150; patcnt++) { - ast_moutdwm(ast, 0x1e6e007c, pattern_AST2150[patcnt]); - for (loop = 0; loop < CBR_PASSNUM_AST2150; loop++) { - if (cbrtest_ast2150(ast)) - break; - } - if (loop == CBR_PASSNUM_AST2150) - return 0; - } - return 1; -} - - -static void cbrdlli_ast2150(struct ast_device *ast, int busw) -{ - u32 dll_min[4], dll_max[4], dlli, data, passcnt; - -cbr_start: - dll_min[0] = dll_min[1] = dll_min[2] = dll_min[3] = 0xff; - dll_max[0] = dll_max[1] = dll_max[2] = dll_max[3] = 0x0; - passcnt = 0; - - for (dlli = 0; dlli < 100; dlli++) { - ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24)); - data = cbrscan_ast2150(ast, busw); - if (data != 0) { - if (data & 0x1) { - if (dll_min[0] > dlli) - dll_min[0] = dlli; - if (dll_max[0] < dlli) - dll_max[0] = dlli; - } - passcnt++; - } else if (passcnt >= CBR_THRESHOLD_AST2150) - goto cbr_start; - } - if (dll_max[0] == 0 || (dll_max[0]-dll_min[0]) < CBR_THRESHOLD_AST2150) - goto cbr_start; - - dlli = dll_min[0] + (((dll_max[0] - dll_min[0]) * 7) >> 4); - ast_moutdwm(ast, 0x1e6e0068, dlli | (dlli << 8) | (dlli << 16) | (dlli << 24)); -} - - - -static void ast_init_dram_reg(struct ast_device *ast) -{ - u8 j; - u32 data, temp, i; - const struct ast_dramstruct *dram_reg_info; - - j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); - - if ((j & 0x80) == 0) { /* VGA only */ - if (IS_AST_GEN1(ast)) { - dram_reg_info = ast2000_dram_table_data; - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - ast_write32(ast, 0x10100, 0xa8); - - do { - ; - } while (ast_read32(ast, 0x10100) != 0xa8); - } else { /* GEN2/GEN3 */ - if (ast->chip == AST2100 || ast->chip == AST2200) - dram_reg_info = ast2100_dram_table_data; - else - dram_reg_info = ast1100_dram_table_data; - - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - ast_write32(ast, 0x12000, 0x1688A8A8); - do { - ; - } while (ast_read32(ast, 0x12000) != 0x01); - - ast_write32(ast, 0x10000, 0xfc600309); - do { - ; - } while (ast_read32(ast, 0x10000) != 0x01); - } - - while (dram_reg_info->index != 0xffff) { - if (dram_reg_info->index == 0xff00) {/* delay fn */ - for (i = 0; i < 15; i++) - udelay(dram_reg_info->data); - } else if (dram_reg_info->index == 0x4 && !IS_AST_GEN1(ast)) { - data = dram_reg_info->data; - if (ast->dram_type == AST_DRAM_1Gx16) - data = 0x00000d89; - else if (ast->dram_type == AST_DRAM_1Gx32) - data = 0x00000c8d; - - temp = ast_read32(ast, 0x12070); - temp &= 0xc; - temp <<= 2; - ast_write32(ast, 0x10000 + dram_reg_info->index, data | temp); - } else - ast_write32(ast, 0x10000 + dram_reg_info->index, dram_reg_info->data); - dram_reg_info++; - } - - /* AST 2100/2150 DRAM calibration */ - data = ast_read32(ast, 0x10120); - if (data == 0x5061) { /* 266Mhz */ - data = ast_read32(ast, 0x10004); - if (data & 0x40) - cbrdlli_ast2150(ast, 16); /* 16 bits */ - else - cbrdlli_ast2150(ast, 32); /* 32 bits */ - } - - switch (AST_GEN(ast)) { - case 1: - temp = ast_read32(ast, 0x10140); - ast_write32(ast, 0x10140, temp | 0x40); - break; - case 2: - case 3: - temp = ast_read32(ast, 0x1200c); - ast_write32(ast, 0x1200c, temp & 0xfffffffd); - temp = ast_read32(ast, 0x12040); - ast_write32(ast, 0x12040, temp | 0x40); - break; - default: - break; - } - } - - /* wait ready */ - do { - j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); - } while ((j & 0x40) == 0); -} - int ast_post_gpu(struct ast_device *ast) { int ret; - ast_set_def_ext_reg(ast); - if (AST_GEN(ast) >= 7) { - if (ast->tx_chip == AST_TX_ASTDP) { - ret = ast_dp_launch(ast); - if (ret) - return ret; - } + ret = ast_2600_post(ast); + if (ret) + return ret; } else if (AST_GEN(ast) >= 6) { - if (ast->config_mode == ast_use_p2a) { - ast_post_chip_2500(ast); - } else { - if (ast->tx_chip == AST_TX_SIL164) { - /* Enable DVO */ - ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80); - } - } + ret = ast_2500_post(ast); + if (ret) + return ret; } else if (AST_GEN(ast) >= 4) { - if (ast->config_mode == ast_use_p2a) { - ast_post_chip_2300(ast); - ast_init_3rdtx(ast); - } else { - if (ast->tx_chip == AST_TX_SIL164) { - /* Enable DVO */ - ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80); - } - } + ret = ast_2300_post(ast); + if (ret) + return ret; + } else if (AST_GEN(ast) >= 2) { + ret = ast_2100_post(ast); + if (ret) + return ret; } else { - if (ast->config_mode == ast_use_p2a) { - ast_init_dram_reg(ast); - } else { - if (ast->tx_chip == AST_TX_SIL164) { - /* Enable DVO */ - ast_set_index_reg_mask(ast, AST_IO_VGACRI, 0xa3, 0xcf, 0x80); - } - } + ret = ast_2000_post(ast); + if (ret) + return ret; } return 0; } -/* AST 2300 DRAM settings */ -#define AST_DDR3 0 -#define AST_DDR2 1 - -struct ast2300_dram_param { - u32 dram_type; - u32 dram_chipid; - u32 dram_freq; - u32 vram_size; - u32 odt; - u32 wodt; - u32 rodt; - u32 dram_config; - u32 reg_PERIOD; - u32 reg_MADJ; - u32 reg_SADJ; - u32 reg_MRS; - u32 reg_EMRS; - u32 reg_AC1; - u32 reg_AC2; - u32 reg_DQSIC; - u32 reg_DRV; - u32 reg_IOZ; - u32 reg_DQIDLY; - u32 reg_FREQ; - u32 madj_max; - u32 dll2_finetune_step; -}; - -/* - * DQSI DLL CBR Setting - */ -#define CBR_SIZE0 ((1 << 10) - 1) -#define CBR_SIZE1 ((4 << 10) - 1) -#define CBR_SIZE2 ((64 << 10) - 1) -#define CBR_PASSNUM 5 -#define CBR_PASSNUM2 5 -#define CBR_THRESHOLD 10 -#define CBR_THRESHOLD2 10 #define TIMEOUT 5000000 -#define CBR_PATNUM 8 -static const u32 pattern[8] = { - 0xFF00FF00, - 0xCC33CC33, - 0xAA55AA55, - 0x88778877, - 0x92CC4D6E, - 0x543D3CDE, - 0xF1E843C7, - 0x7C61D253 -}; - -static bool mmc_test(struct ast_device *ast, u32 datagen, u8 test_ctl) +bool mmc_test(struct ast_device *ast, u32 datagen, u8 test_ctl) { u32 data, timeout; @@ -458,1657 +123,7 @@ static bool mmc_test(struct ast_device *ast, u32 datagen, u8 test_ctl) return true; } -static u32 mmc_test2(struct ast_device *ast, u32 datagen, u8 test_ctl) -{ - u32 data, timeout; - - ast_moutdwm(ast, 0x1e6e0070, 0x00000000); - ast_moutdwm(ast, 0x1e6e0070, (datagen << 3) | test_ctl); - timeout = 0; - do { - data = ast_mindwm(ast, 0x1e6e0070) & 0x1000; - if (++timeout > TIMEOUT) { - ast_moutdwm(ast, 0x1e6e0070, 0x0); - return 0xffffffff; - } - } while (!data); - data = ast_mindwm(ast, 0x1e6e0078); - data = (data | (data >> 16)) & 0xffff; - ast_moutdwm(ast, 0x1e6e0070, 0x00000000); - return data; -} - - -static bool mmc_test_burst(struct ast_device *ast, u32 datagen) +bool mmc_test_burst(struct ast_device *ast, u32 datagen) { return mmc_test(ast, datagen, 0xc1); } - -static u32 mmc_test_burst2(struct ast_device *ast, u32 datagen) -{ - return mmc_test2(ast, datagen, 0x41); -} - -static bool mmc_test_single(struct ast_device *ast, u32 datagen) -{ - return mmc_test(ast, datagen, 0xc5); -} - -static u32 mmc_test_single2(struct ast_device *ast, u32 datagen) -{ - return mmc_test2(ast, datagen, 0x05); -} - -static bool mmc_test_single_2500(struct ast_device *ast, u32 datagen) -{ - return mmc_test(ast, datagen, 0x85); -} - -static int cbr_test(struct ast_device *ast) -{ - u32 data; - int i; - data = mmc_test_single2(ast, 0); - if ((data & 0xff) && (data & 0xff00)) - return 0; - for (i = 0; i < 8; i++) { - data = mmc_test_burst2(ast, i); - if ((data & 0xff) && (data & 0xff00)) - return 0; - } - if (!data) - return 3; - else if (data & 0xff) - return 2; - return 1; -} - -static int cbr_scan(struct ast_device *ast) -{ - u32 data, data2, patcnt, loop; - - data2 = 3; - for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) { - ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]); - for (loop = 0; loop < CBR_PASSNUM2; loop++) { - if ((data = cbr_test(ast)) != 0) { - data2 &= data; - if (!data2) - return 0; - break; - } - } - if (loop == CBR_PASSNUM2) - return 0; - } - return data2; -} - -static u32 cbr_test2(struct ast_device *ast) -{ - u32 data; - - data = mmc_test_burst2(ast, 0); - if (data == 0xffff) - return 0; - data |= mmc_test_single2(ast, 0); - if (data == 0xffff) - return 0; - - return ~data & 0xffff; -} - -static u32 cbr_scan2(struct ast_device *ast) -{ - u32 data, data2, patcnt, loop; - - data2 = 0xffff; - for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) { - ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]); - for (loop = 0; loop < CBR_PASSNUM2; loop++) { - if ((data = cbr_test2(ast)) != 0) { - data2 &= data; - if (!data2) - return 0; - break; - } - } - if (loop == CBR_PASSNUM2) - return 0; - } - return data2; -} - -static bool cbr_test3(struct ast_device *ast) -{ - if (!mmc_test_burst(ast, 0)) - return false; - if (!mmc_test_single(ast, 0)) - return false; - return true; -} - -static bool cbr_scan3(struct ast_device *ast) -{ - u32 patcnt, loop; - - for (patcnt = 0; patcnt < CBR_PATNUM; patcnt++) { - ast_moutdwm(ast, 0x1e6e007c, pattern[patcnt]); - for (loop = 0; loop < 2; loop++) { - if (cbr_test3(ast)) - break; - } - if (loop == 2) - return false; - } - return true; -} - -static bool finetuneDQI_L(struct ast_device *ast, struct ast2300_dram_param *param) -{ - u32 gold_sadj[2], dllmin[16], dllmax[16], dlli, data, cnt, mask, passcnt, retry = 0; - bool status = false; -FINETUNE_START: - for (cnt = 0; cnt < 16; cnt++) { - dllmin[cnt] = 0xff; - dllmax[cnt] = 0x0; - } - passcnt = 0; - for (dlli = 0; dlli < 76; dlli++) { - ast_moutdwm(ast, 0x1E6E0068, 0x00001400 | (dlli << 16) | (dlli << 24)); - ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE1); - data = cbr_scan2(ast); - if (data != 0) { - mask = 0x00010001; - for (cnt = 0; cnt < 16; cnt++) { - if (data & mask) { - if (dllmin[cnt] > dlli) { - dllmin[cnt] = dlli; - } - if (dllmax[cnt] < dlli) { - dllmax[cnt] = dlli; - } - } - mask <<= 1; - } - passcnt++; - } else if (passcnt >= CBR_THRESHOLD2) { - break; - } - } - gold_sadj[0] = 0x0; - passcnt = 0; - for (cnt = 0; cnt < 16; cnt++) { - if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) { - gold_sadj[0] += dllmin[cnt]; - passcnt++; - } - } - if (retry++ > 10) - goto FINETUNE_DONE; - if (passcnt != 16) { - goto FINETUNE_START; - } - status = true; -FINETUNE_DONE: - gold_sadj[0] = gold_sadj[0] >> 4; - gold_sadj[1] = gold_sadj[0]; - - data = 0; - for (cnt = 0; cnt < 8; cnt++) { - data >>= 3; - if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) { - dlli = dllmin[cnt]; - if (gold_sadj[0] >= dlli) { - dlli = ((gold_sadj[0] - dlli) * 19) >> 5; - if (dlli > 3) { - dlli = 3; - } - } else { - dlli = ((dlli - gold_sadj[0]) * 19) >> 5; - if (dlli > 4) { - dlli = 4; - } - dlli = (8 - dlli) & 0x7; - } - data |= dlli << 21; - } - } - ast_moutdwm(ast, 0x1E6E0080, data); - - data = 0; - for (cnt = 8; cnt < 16; cnt++) { - data >>= 3; - if ((dllmax[cnt] > dllmin[cnt]) && ((dllmax[cnt] - dllmin[cnt]) >= CBR_THRESHOLD2)) { - dlli = dllmin[cnt]; - if (gold_sadj[1] >= dlli) { - dlli = ((gold_sadj[1] - dlli) * 19) >> 5; - if (dlli > 3) { - dlli = 3; - } else { - dlli = (dlli - 1) & 0x7; - } - } else { - dlli = ((dlli - gold_sadj[1]) * 19) >> 5; - dlli += 1; - if (dlli > 4) { - dlli = 4; - } - dlli = (8 - dlli) & 0x7; - } - data |= dlli << 21; - } - } - ast_moutdwm(ast, 0x1E6E0084, data); - return status; -} /* finetuneDQI_L */ - -static void finetuneDQSI(struct ast_device *ast) -{ - u32 dlli, dqsip, dqidly; - u32 reg_mcr18, reg_mcr0c, passcnt[2], diff; - u32 g_dqidly, g_dqsip, g_margin, g_side; - u16 pass[32][2][2]; - char tag[2][76]; - - /* Disable DQI CBR */ - reg_mcr0c = ast_mindwm(ast, 0x1E6E000C); - reg_mcr18 = ast_mindwm(ast, 0x1E6E0018); - reg_mcr18 &= 0x0000ffff; - ast_moutdwm(ast, 0x1E6E0018, reg_mcr18); - - for (dlli = 0; dlli < 76; dlli++) { - tag[0][dlli] = 0x0; - tag[1][dlli] = 0x0; - } - for (dqidly = 0; dqidly < 32; dqidly++) { - pass[dqidly][0][0] = 0xff; - pass[dqidly][0][1] = 0x0; - pass[dqidly][1][0] = 0xff; - pass[dqidly][1][1] = 0x0; - } - for (dqidly = 0; dqidly < 32; dqidly++) { - passcnt[0] = passcnt[1] = 0; - for (dqsip = 0; dqsip < 2; dqsip++) { - ast_moutdwm(ast, 0x1E6E000C, 0); - ast_moutdwm(ast, 0x1E6E0018, reg_mcr18 | (dqidly << 16) | (dqsip << 23)); - ast_moutdwm(ast, 0x1E6E000C, reg_mcr0c); - for (dlli = 0; dlli < 76; dlli++) { - ast_moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24)); - ast_moutdwm(ast, 0x1E6E0070, 0); - ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE0); - if (cbr_scan3(ast)) { - if (dlli == 0) - break; - passcnt[dqsip]++; - tag[dqsip][dlli] = 'P'; - if (dlli < pass[dqidly][dqsip][0]) - pass[dqidly][dqsip][0] = (u16) dlli; - if (dlli > pass[dqidly][dqsip][1]) - pass[dqidly][dqsip][1] = (u16) dlli; - } else if (passcnt[dqsip] >= 5) - break; - else { - pass[dqidly][dqsip][0] = 0xff; - pass[dqidly][dqsip][1] = 0x0; - } - } - } - if (passcnt[0] == 0 && passcnt[1] == 0) - dqidly++; - } - /* Search margin */ - g_dqidly = g_dqsip = g_margin = g_side = 0; - - for (dqidly = 0; dqidly < 32; dqidly++) { - for (dqsip = 0; dqsip < 2; dqsip++) { - if (pass[dqidly][dqsip][0] > pass[dqidly][dqsip][1]) - continue; - diff = pass[dqidly][dqsip][1] - pass[dqidly][dqsip][0]; - if ((diff+2) < g_margin) - continue; - passcnt[0] = passcnt[1] = 0; - for (dlli = pass[dqidly][dqsip][0]; dlli > 0 && tag[dqsip][dlli] != 0; dlli--, passcnt[0]++); - for (dlli = pass[dqidly][dqsip][1]; dlli < 76 && tag[dqsip][dlli] != 0; dlli++, passcnt[1]++); - if (passcnt[0] > passcnt[1]) - passcnt[0] = passcnt[1]; - passcnt[1] = 0; - if (passcnt[0] > g_side) - passcnt[1] = passcnt[0] - g_side; - if (diff > (g_margin+1) && (passcnt[1] > 0 || passcnt[0] > 8)) { - g_margin = diff; - g_dqidly = dqidly; - g_dqsip = dqsip; - g_side = passcnt[0]; - } else if (passcnt[1] > 1 && g_side < 8) { - if (diff > g_margin) - g_margin = diff; - g_dqidly = dqidly; - g_dqsip = dqsip; - g_side = passcnt[0]; - } - } - } - reg_mcr18 = reg_mcr18 | (g_dqidly << 16) | (g_dqsip << 23); - ast_moutdwm(ast, 0x1E6E0018, reg_mcr18); - -} -static bool cbr_dll2(struct ast_device *ast, struct ast2300_dram_param *param) -{ - u32 dllmin[2], dllmax[2], dlli, data, passcnt, retry = 0; - bool status = false; - - finetuneDQSI(ast); - if (finetuneDQI_L(ast, param) == false) - return status; - -CBR_START2: - dllmin[0] = dllmin[1] = 0xff; - dllmax[0] = dllmax[1] = 0x0; - passcnt = 0; - for (dlli = 0; dlli < 76; dlli++) { - ast_moutdwm(ast, 0x1E6E0068, 0x00001300 | (dlli << 16) | (dlli << 24)); - ast_moutdwm(ast, 0x1E6E0074, CBR_SIZE2); - data = cbr_scan(ast); - if (data != 0) { - if (data & 0x1) { - if (dllmin[0] > dlli) { - dllmin[0] = dlli; - } - if (dllmax[0] < dlli) { - dllmax[0] = dlli; - } - } - if (data & 0x2) { - if (dllmin[1] > dlli) { - dllmin[1] = dlli; - } - if (dllmax[1] < dlli) { - dllmax[1] = dlli; - } - } - passcnt++; - } else if (passcnt >= CBR_THRESHOLD) { - break; - } - } - if (retry++ > 10) - goto CBR_DONE2; - if (dllmax[0] == 0 || (dllmax[0]-dllmin[0]) < CBR_THRESHOLD) { - goto CBR_START2; - } - if (dllmax[1] == 0 || (dllmax[1]-dllmin[1]) < CBR_THRESHOLD) { - goto CBR_START2; - } - status = true; -CBR_DONE2: - dlli = (dllmin[1] + dllmax[1]) >> 1; - dlli <<= 8; - dlli += (dllmin[0] + dllmax[0]) >> 1; - ast_moutdwm(ast, 0x1E6E0068, ast_mindwm(ast, 0x1E720058) | (dlli << 16)); - return status; -} /* CBRDLL2 */ - -static void get_ddr3_info(struct ast_device *ast, struct ast2300_dram_param *param) -{ - u32 trap, trap_AC2, trap_MRS; - - ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8); - - /* Ger trap info */ - trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3; - trap_AC2 = 0x00020000 + (trap << 16); - trap_AC2 |= 0x00300000 + ((trap & 0x2) << 19); - trap_MRS = 0x00000010 + (trap << 4); - trap_MRS |= ((trap & 0x2) << 18); - - param->reg_MADJ = 0x00034C4C; - param->reg_SADJ = 0x00001800; - param->reg_DRV = 0x000000F0; - param->reg_PERIOD = param->dram_freq; - param->rodt = 0; - - switch (param->dram_freq) { - case 336: - ast_moutdwm(ast, 0x1E6E2020, 0x0190); - param->wodt = 0; - param->reg_AC1 = 0x22202725; - param->reg_AC2 = 0xAA007613 | trap_AC2; - param->reg_DQSIC = 0x000000BA; - param->reg_MRS = 0x04001400 | trap_MRS; - param->reg_EMRS = 0x00000000; - param->reg_IOZ = 0x00000023; - param->reg_DQIDLY = 0x00000074; - param->reg_FREQ = 0x00004DC0; - param->madj_max = 96; - param->dll2_finetune_step = 3; - switch (param->dram_chipid) { - default: - case AST_DRAM_512Mx16: - case AST_DRAM_1Gx16: - param->reg_AC2 = 0xAA007613 | trap_AC2; - break; - case AST_DRAM_2Gx16: - param->reg_AC2 = 0xAA00761C | trap_AC2; - break; - case AST_DRAM_4Gx16: - param->reg_AC2 = 0xAA007636 | trap_AC2; - break; - } - break; - default: - case 396: - ast_moutdwm(ast, 0x1E6E2020, 0x03F1); - param->wodt = 1; - param->reg_AC1 = 0x33302825; - param->reg_AC2 = 0xCC009617 | trap_AC2; - param->reg_DQSIC = 0x000000E2; - param->reg_MRS = 0x04001600 | trap_MRS; - param->reg_EMRS = 0x00000000; - param->reg_IOZ = 0x00000034; - param->reg_DRV = 0x000000FA; - param->reg_DQIDLY = 0x00000089; - param->reg_FREQ = 0x00005040; - param->madj_max = 96; - param->dll2_finetune_step = 4; - - switch (param->dram_chipid) { - default: - case AST_DRAM_512Mx16: - case AST_DRAM_1Gx16: - param->reg_AC2 = 0xCC009617 | trap_AC2; - break; - case AST_DRAM_2Gx16: - param->reg_AC2 = 0xCC009622 | trap_AC2; - break; - case AST_DRAM_4Gx16: - param->reg_AC2 = 0xCC00963F | trap_AC2; - break; - } - break; - - case 408: - ast_moutdwm(ast, 0x1E6E2020, 0x01F0); - param->wodt = 1; - param->reg_AC1 = 0x33302825; - param->reg_AC2 = 0xCC009617 | trap_AC2; - param->reg_DQSIC = 0x000000E2; - param->reg_MRS = 0x04001600 | trap_MRS; - param->reg_EMRS = 0x00000000; - param->reg_IOZ = 0x00000023; - param->reg_DRV = 0x000000FA; - param->reg_DQIDLY = 0x00000089; - param->reg_FREQ = 0x000050C0; - param->madj_max = 96; - param->dll2_finetune_step = 4; - - switch (param->dram_chipid) { - default: - case AST_DRAM_512Mx16: - case AST_DRAM_1Gx16: - param->reg_AC2 = 0xCC009617 | trap_AC2; - break; - case AST_DRAM_2Gx16: - param->reg_AC2 = 0xCC009622 | trap_AC2; - break; - case AST_DRAM_4Gx16: - param->reg_AC2 = 0xCC00963F | trap_AC2; - break; - } - - break; - case 456: - ast_moutdwm(ast, 0x1E6E2020, 0x0230); - param->wodt = 0; - param->reg_AC1 = 0x33302926; - param->reg_AC2 = 0xCD44961A; - param->reg_DQSIC = 0x000000FC; - param->reg_MRS = 0x00081830; - param->reg_EMRS = 0x00000000; - param->reg_IOZ = 0x00000045; - param->reg_DQIDLY = 0x00000097; - param->reg_FREQ = 0x000052C0; - param->madj_max = 88; - param->dll2_finetune_step = 4; - break; - case 504: - ast_moutdwm(ast, 0x1E6E2020, 0x0270); - param->wodt = 1; - param->reg_AC1 = 0x33302926; - param->reg_AC2 = 0xDE44A61D; - param->reg_DQSIC = 0x00000117; - param->reg_MRS = 0x00081A30; - param->reg_EMRS = 0x00000000; - param->reg_IOZ = 0x070000BB; - param->reg_DQIDLY = 0x000000A0; - param->reg_FREQ = 0x000054C0; - param->madj_max = 79; - param->dll2_finetune_step = 4; - break; - case 528: - ast_moutdwm(ast, 0x1E6E2020, 0x0290); - param->wodt = 1; - param->rodt = 1; - param->reg_AC1 = 0x33302926; - param->reg_AC2 = 0xEF44B61E; - param->reg_DQSIC = 0x00000125; - param->reg_MRS = 0x00081A30; - param->reg_EMRS = 0x00000040; - param->reg_DRV = 0x000000F5; - param->reg_IOZ = 0x00000023; - param->reg_DQIDLY = 0x00000088; - param->reg_FREQ = 0x000055C0; - param->madj_max = 76; - param->dll2_finetune_step = 3; - break; - case 576: - ast_moutdwm(ast, 0x1E6E2020, 0x0140); - param->reg_MADJ = 0x00136868; - param->reg_SADJ = 0x00004534; - param->wodt = 1; - param->rodt = 1; - param->reg_AC1 = 0x33302A37; - param->reg_AC2 = 0xEF56B61E; - param->reg_DQSIC = 0x0000013F; - param->reg_MRS = 0x00101A50; - param->reg_EMRS = 0x00000040; - param->reg_DRV = 0x000000FA; - param->reg_IOZ = 0x00000023; - param->reg_DQIDLY = 0x00000078; - param->reg_FREQ = 0x000057C0; - param->madj_max = 136; - param->dll2_finetune_step = 3; - break; - case 600: - ast_moutdwm(ast, 0x1E6E2020, 0x02E1); - param->reg_MADJ = 0x00136868; - param->reg_SADJ = 0x00004534; - param->wodt = 1; - param->rodt = 1; - param->reg_AC1 = 0x32302A37; - param->reg_AC2 = 0xDF56B61F; - param->reg_DQSIC = 0x0000014D; - param->reg_MRS = 0x00101A50; - param->reg_EMRS = 0x00000004; - param->reg_DRV = 0x000000F5; - param->reg_IOZ = 0x00000023; - param->reg_DQIDLY = 0x00000078; - param->reg_FREQ = 0x000058C0; - param->madj_max = 132; - param->dll2_finetune_step = 3; - break; - case 624: - ast_moutdwm(ast, 0x1E6E2020, 0x0160); - param->reg_MADJ = 0x00136868; - param->reg_SADJ = 0x00004534; - param->wodt = 1; - param->rodt = 1; - param->reg_AC1 = 0x32302A37; - param->reg_AC2 = 0xEF56B621; - param->reg_DQSIC = 0x0000015A; - param->reg_MRS = 0x02101A50; - param->reg_EMRS = 0x00000004; - param->reg_DRV = 0x000000F5; - param->reg_IOZ = 0x00000034; - param->reg_DQIDLY = 0x00000078; - param->reg_FREQ = 0x000059C0; - param->madj_max = 128; - param->dll2_finetune_step = 3; - break; - } /* switch freq */ - - switch (param->dram_chipid) { - case AST_DRAM_512Mx16: - param->dram_config = 0x130; - break; - default: - case AST_DRAM_1Gx16: - param->dram_config = 0x131; - break; - case AST_DRAM_2Gx16: - param->dram_config = 0x132; - break; - case AST_DRAM_4Gx16: - param->dram_config = 0x133; - break; - } /* switch size */ - - switch (param->vram_size) { - default: - case SZ_8M: - param->dram_config |= 0x00; - break; - case SZ_16M: - param->dram_config |= 0x04; - break; - case SZ_32M: - param->dram_config |= 0x08; - break; - case SZ_64M: - param->dram_config |= 0x0c; - break; - } - -} - -static void ddr3_init(struct ast_device *ast, struct ast2300_dram_param *param) -{ - u32 data, data2, retry = 0; - -ddr3_init_start: - ast_moutdwm(ast, 0x1E6E0000, 0xFC600309); - ast_moutdwm(ast, 0x1E6E0018, 0x00000100); - ast_moutdwm(ast, 0x1E6E0024, 0x00000000); - ast_moutdwm(ast, 0x1E6E0034, 0x00000000); - udelay(10); - ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ); - ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ); - udelay(10); - ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000); - udelay(10); - - ast_moutdwm(ast, 0x1E6E0004, param->dram_config); - ast_moutdwm(ast, 0x1E6E0008, 0x90040f); - ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1); - ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2); - ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC); - ast_moutdwm(ast, 0x1E6E0080, 0x00000000); - ast_moutdwm(ast, 0x1E6E0084, 0x00000000); - ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY); - ast_moutdwm(ast, 0x1E6E0018, 0x4000A170); - ast_moutdwm(ast, 0x1E6E0018, 0x00002370); - ast_moutdwm(ast, 0x1E6E0038, 0x00000000); - ast_moutdwm(ast, 0x1E6E0040, 0xFF444444); - ast_moutdwm(ast, 0x1E6E0044, 0x22222222); - ast_moutdwm(ast, 0x1E6E0048, 0x22222222); - ast_moutdwm(ast, 0x1E6E004C, 0x00000002); - ast_moutdwm(ast, 0x1E6E0050, 0x80000000); - ast_moutdwm(ast, 0x1E6E0050, 0x00000000); - ast_moutdwm(ast, 0x1E6E0054, 0); - ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV); - ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ); - ast_moutdwm(ast, 0x1E6E0070, 0x00000000); - ast_moutdwm(ast, 0x1E6E0074, 0x00000000); - ast_moutdwm(ast, 0x1E6E0078, 0x00000000); - ast_moutdwm(ast, 0x1E6E007C, 0x00000000); - /* Wait MCLK2X lock to MCLK */ - do { - data = ast_mindwm(ast, 0x1E6E001C); - } while (!(data & 0x08000000)); - data = ast_mindwm(ast, 0x1E6E001C); - data = (data >> 8) & 0xff; - while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) { - data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4; - if ((data2 & 0xff) > param->madj_max) { - break; - } - ast_moutdwm(ast, 0x1E6E0064, data2); - if (data2 & 0x00100000) { - data2 = ((data2 & 0xff) >> 3) + 3; - } else { - data2 = ((data2 & 0xff) >> 2) + 5; - } - data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff; - data2 += data & 0xff; - data = data | (data2 << 8); - ast_moutdwm(ast, 0x1E6E0068, data); - udelay(10); - ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000); - udelay(10); - data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff; - ast_moutdwm(ast, 0x1E6E0018, data); - data = data | 0x200; - ast_moutdwm(ast, 0x1E6E0018, data); - do { - data = ast_mindwm(ast, 0x1E6E001C); - } while (!(data & 0x08000000)); - - data = ast_mindwm(ast, 0x1E6E001C); - data = (data >> 8) & 0xff; - } - ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0068) & 0xffff); - data = ast_mindwm(ast, 0x1E6E0018) | 0xC00; - ast_moutdwm(ast, 0x1E6E0018, data); - - ast_moutdwm(ast, 0x1E6E0034, 0x00000001); - ast_moutdwm(ast, 0x1E6E000C, 0x00000040); - udelay(50); - /* Mode Register Setting */ - ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100); - ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS); - ast_moutdwm(ast, 0x1E6E0028, 0x00000005); - ast_moutdwm(ast, 0x1E6E0028, 0x00000007); - ast_moutdwm(ast, 0x1E6E0028, 0x00000003); - ast_moutdwm(ast, 0x1E6E0028, 0x00000001); - ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS); - ast_moutdwm(ast, 0x1E6E000C, 0x00005C08); - ast_moutdwm(ast, 0x1E6E0028, 0x00000001); - - ast_moutdwm(ast, 0x1E6E000C, 0x00005C01); - data = 0; - if (param->wodt) { - data = 0x300; - } - if (param->rodt) { - data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3); - } - ast_moutdwm(ast, 0x1E6E0034, data | 0x3); - - /* Calibrate the DQSI delay */ - if ((cbr_dll2(ast, param) == false) && (retry++ < 10)) - goto ddr3_init_start; - - ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ); - /* ECC Memory Initialization */ -#ifdef ECC - ast_moutdwm(ast, 0x1E6E007C, 0x00000000); - ast_moutdwm(ast, 0x1E6E0070, 0x221); - do { - data = ast_mindwm(ast, 0x1E6E0070); - } while (!(data & 0x00001000)); - ast_moutdwm(ast, 0x1E6E0070, 0x00000000); - ast_moutdwm(ast, 0x1E6E0050, 0x80000000); - ast_moutdwm(ast, 0x1E6E0050, 0x00000000); -#endif - - -} - -static void get_ddr2_info(struct ast_device *ast, struct ast2300_dram_param *param) -{ - u32 trap, trap_AC2, trap_MRS; - - ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8); - - /* Ger trap info */ - trap = (ast_mindwm(ast, 0x1E6E2070) >> 25) & 0x3; - trap_AC2 = (trap << 20) | (trap << 16); - trap_AC2 += 0x00110000; - trap_MRS = 0x00000040 | (trap << 4); - - - param->reg_MADJ = 0x00034C4C; - param->reg_SADJ = 0x00001800; - param->reg_DRV = 0x000000F0; - param->reg_PERIOD = param->dram_freq; - param->rodt = 0; - - switch (param->dram_freq) { - case 264: - ast_moutdwm(ast, 0x1E6E2020, 0x0130); - param->wodt = 0; - param->reg_AC1 = 0x11101513; - param->reg_AC2 = 0x78117011; - param->reg_DQSIC = 0x00000092; - param->reg_MRS = 0x00000842; - param->reg_EMRS = 0x00000000; - param->reg_DRV = 0x000000F0; - param->reg_IOZ = 0x00000034; - param->reg_DQIDLY = 0x0000005A; - param->reg_FREQ = 0x00004AC0; - param->madj_max = 138; - param->dll2_finetune_step = 3; - break; - case 336: - ast_moutdwm(ast, 0x1E6E2020, 0x0190); - param->wodt = 1; - param->reg_AC1 = 0x22202613; - param->reg_AC2 = 0xAA009016 | trap_AC2; - param->reg_DQSIC = 0x000000BA; - param->reg_MRS = 0x00000A02 | trap_MRS; - param->reg_EMRS = 0x00000040; - param->reg_DRV = 0x000000FA; - param->reg_IOZ = 0x00000034; - param->reg_DQIDLY = 0x00000074; - param->reg_FREQ = 0x00004DC0; - param->madj_max = 96; - param->dll2_finetune_step = 3; - switch (param->dram_chipid) { - default: - case AST_DRAM_512Mx16: - param->reg_AC2 = 0xAA009012 | trap_AC2; - break; - case AST_DRAM_1Gx16: - param->reg_AC2 = 0xAA009016 | trap_AC2; - break; - case AST_DRAM_2Gx16: - param->reg_AC2 = 0xAA009023 | trap_AC2; - break; - case AST_DRAM_4Gx16: - param->reg_AC2 = 0xAA00903B | trap_AC2; - break; - } - break; - default: - case 396: - ast_moutdwm(ast, 0x1E6E2020, 0x03F1); - param->wodt = 1; - param->rodt = 0; - param->reg_AC1 = 0x33302714; - param->reg_AC2 = 0xCC00B01B | trap_AC2; - param->reg_DQSIC = 0x000000E2; - param->reg_MRS = 0x00000C02 | trap_MRS; - param->reg_EMRS = 0x00000040; - param->reg_DRV = 0x000000FA; - param->reg_IOZ = 0x00000034; - param->reg_DQIDLY = 0x00000089; - param->reg_FREQ = 0x00005040; - param->madj_max = 96; - param->dll2_finetune_step = 4; - - switch (param->dram_chipid) { - case AST_DRAM_512Mx16: - param->reg_AC2 = 0xCC00B016 | trap_AC2; - break; - default: - case AST_DRAM_1Gx16: - param->reg_AC2 = 0xCC00B01B | trap_AC2; - break; - case AST_DRAM_2Gx16: - param->reg_AC2 = 0xCC00B02B | trap_AC2; - break; - case AST_DRAM_4Gx16: - param->reg_AC2 = 0xCC00B03F | trap_AC2; - break; - } - - break; - - case 408: - ast_moutdwm(ast, 0x1E6E2020, 0x01F0); - param->wodt = 1; - param->rodt = 0; - param->reg_AC1 = 0x33302714; - param->reg_AC2 = 0xCC00B01B | trap_AC2; - param->reg_DQSIC = 0x000000E2; - param->reg_MRS = 0x00000C02 | trap_MRS; - param->reg_EMRS = 0x00000040; - param->reg_DRV = 0x000000FA; - param->reg_IOZ = 0x00000034; - param->reg_DQIDLY = 0x00000089; - param->reg_FREQ = 0x000050C0; - param->madj_max = 96; - param->dll2_finetune_step = 4; - - switch (param->dram_chipid) { - case AST_DRAM_512Mx16: - param->reg_AC2 = 0xCC00B016 | trap_AC2; - break; - default: - case AST_DRAM_1Gx16: - param->reg_AC2 = 0xCC00B01B | trap_AC2; - break; - case AST_DRAM_2Gx16: - param->reg_AC2 = 0xCC00B02B | trap_AC2; - break; - case AST_DRAM_4Gx16: - param->reg_AC2 = 0xCC00B03F | trap_AC2; - break; - } - - break; - case 456: - ast_moutdwm(ast, 0x1E6E2020, 0x0230); - param->wodt = 0; - param->reg_AC1 = 0x33302815; - param->reg_AC2 = 0xCD44B01E; - param->reg_DQSIC = 0x000000FC; - param->reg_MRS = 0x00000E72; - param->reg_EMRS = 0x00000000; - param->reg_DRV = 0x00000000; - param->reg_IOZ = 0x00000034; - param->reg_DQIDLY = 0x00000097; - param->reg_FREQ = 0x000052C0; - param->madj_max = 88; - param->dll2_finetune_step = 3; - break; - case 504: - ast_moutdwm(ast, 0x1E6E2020, 0x0261); - param->wodt = 1; - param->rodt = 1; - param->reg_AC1 = 0x33302815; - param->reg_AC2 = 0xDE44C022; - param->reg_DQSIC = 0x00000117; - param->reg_MRS = 0x00000E72; - param->reg_EMRS = 0x00000040; - param->reg_DRV = 0x0000000A; - param->reg_IOZ = 0x00000045; - param->reg_DQIDLY = 0x000000A0; - param->reg_FREQ = 0x000054C0; - param->madj_max = 79; - param->dll2_finetune_step = 3; - break; - case 528: - ast_moutdwm(ast, 0x1E6E2020, 0x0120); - param->wodt = 1; - param->rodt = 1; - param->reg_AC1 = 0x33302815; - param->reg_AC2 = 0xEF44D024; - param->reg_DQSIC = 0x00000125; - param->reg_MRS = 0x00000E72; - param->reg_EMRS = 0x00000004; - param->reg_DRV = 0x000000F9; - param->reg_IOZ = 0x00000045; - param->reg_DQIDLY = 0x000000A7; - param->reg_FREQ = 0x000055C0; - param->madj_max = 76; - param->dll2_finetune_step = 3; - break; - case 552: - ast_moutdwm(ast, 0x1E6E2020, 0x02A1); - param->wodt = 1; - param->rodt = 1; - param->reg_AC1 = 0x43402915; - param->reg_AC2 = 0xFF44E025; - param->reg_DQSIC = 0x00000132; - param->reg_MRS = 0x00000E72; - param->reg_EMRS = 0x00000040; - param->reg_DRV = 0x0000000A; - param->reg_IOZ = 0x00000045; - param->reg_DQIDLY = 0x000000AD; - param->reg_FREQ = 0x000056C0; - param->madj_max = 76; - param->dll2_finetune_step = 3; - break; - case 576: - ast_moutdwm(ast, 0x1E6E2020, 0x0140); - param->wodt = 1; - param->rodt = 1; - param->reg_AC1 = 0x43402915; - param->reg_AC2 = 0xFF44E027; - param->reg_DQSIC = 0x0000013F; - param->reg_MRS = 0x00000E72; - param->reg_EMRS = 0x00000004; - param->reg_DRV = 0x000000F5; - param->reg_IOZ = 0x00000045; - param->reg_DQIDLY = 0x000000B3; - param->reg_FREQ = 0x000057C0; - param->madj_max = 76; - param->dll2_finetune_step = 3; - break; - } - - switch (param->dram_chipid) { - case AST_DRAM_512Mx16: - param->dram_config = 0x100; - break; - default: - case AST_DRAM_1Gx16: - param->dram_config = 0x121; - break; - case AST_DRAM_2Gx16: - param->dram_config = 0x122; - break; - case AST_DRAM_4Gx16: - param->dram_config = 0x123; - break; - } /* switch size */ - - switch (param->vram_size) { - default: - case SZ_8M: - param->dram_config |= 0x00; - break; - case SZ_16M: - param->dram_config |= 0x04; - break; - case SZ_32M: - param->dram_config |= 0x08; - break; - case SZ_64M: - param->dram_config |= 0x0c; - break; - } -} - -static void ddr2_init(struct ast_device *ast, struct ast2300_dram_param *param) -{ - u32 data, data2, retry = 0; - -ddr2_init_start: - ast_moutdwm(ast, 0x1E6E0000, 0xFC600309); - ast_moutdwm(ast, 0x1E6E0018, 0x00000100); - ast_moutdwm(ast, 0x1E6E0024, 0x00000000); - ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ); - ast_moutdwm(ast, 0x1E6E0068, param->reg_SADJ); - udelay(10); - ast_moutdwm(ast, 0x1E6E0064, param->reg_MADJ | 0xC0000); - udelay(10); - - ast_moutdwm(ast, 0x1E6E0004, param->dram_config); - ast_moutdwm(ast, 0x1E6E0008, 0x90040f); - ast_moutdwm(ast, 0x1E6E0010, param->reg_AC1); - ast_moutdwm(ast, 0x1E6E0014, param->reg_AC2); - ast_moutdwm(ast, 0x1E6E0020, param->reg_DQSIC); - ast_moutdwm(ast, 0x1E6E0080, 0x00000000); - ast_moutdwm(ast, 0x1E6E0084, 0x00000000); - ast_moutdwm(ast, 0x1E6E0088, param->reg_DQIDLY); - ast_moutdwm(ast, 0x1E6E0018, 0x4000A130); - ast_moutdwm(ast, 0x1E6E0018, 0x00002330); - ast_moutdwm(ast, 0x1E6E0038, 0x00000000); - ast_moutdwm(ast, 0x1E6E0040, 0xFF808000); - ast_moutdwm(ast, 0x1E6E0044, 0x88848466); - ast_moutdwm(ast, 0x1E6E0048, 0x44440008); - ast_moutdwm(ast, 0x1E6E004C, 0x00000000); - ast_moutdwm(ast, 0x1E6E0050, 0x80000000); - ast_moutdwm(ast, 0x1E6E0050, 0x00000000); - ast_moutdwm(ast, 0x1E6E0054, 0); - ast_moutdwm(ast, 0x1E6E0060, param->reg_DRV); - ast_moutdwm(ast, 0x1E6E006C, param->reg_IOZ); - ast_moutdwm(ast, 0x1E6E0070, 0x00000000); - ast_moutdwm(ast, 0x1E6E0074, 0x00000000); - ast_moutdwm(ast, 0x1E6E0078, 0x00000000); - ast_moutdwm(ast, 0x1E6E007C, 0x00000000); - - /* Wait MCLK2X lock to MCLK */ - do { - data = ast_mindwm(ast, 0x1E6E001C); - } while (!(data & 0x08000000)); - data = ast_mindwm(ast, 0x1E6E001C); - data = (data >> 8) & 0xff; - while ((data & 0x08) || ((data & 0x7) < 2) || (data < 4)) { - data2 = (ast_mindwm(ast, 0x1E6E0064) & 0xfff3ffff) + 4; - if ((data2 & 0xff) > param->madj_max) { - break; - } - ast_moutdwm(ast, 0x1E6E0064, data2); - if (data2 & 0x00100000) { - data2 = ((data2 & 0xff) >> 3) + 3; - } else { - data2 = ((data2 & 0xff) >> 2) + 5; - } - data = ast_mindwm(ast, 0x1E6E0068) & 0xffff00ff; - data2 += data & 0xff; - data = data | (data2 << 8); - ast_moutdwm(ast, 0x1E6E0068, data); - udelay(10); - ast_moutdwm(ast, 0x1E6E0064, ast_mindwm(ast, 0x1E6E0064) | 0xC0000); - udelay(10); - data = ast_mindwm(ast, 0x1E6E0018) & 0xfffff1ff; - ast_moutdwm(ast, 0x1E6E0018, data); - data = data | 0x200; - ast_moutdwm(ast, 0x1E6E0018, data); - do { - data = ast_mindwm(ast, 0x1E6E001C); - } while (!(data & 0x08000000)); - - data = ast_mindwm(ast, 0x1E6E001C); - data = (data >> 8) & 0xff; - } - ast_moutdwm(ast, 0x1E720058, ast_mindwm(ast, 0x1E6E0008) & 0xffff); - data = ast_mindwm(ast, 0x1E6E0018) | 0xC00; - ast_moutdwm(ast, 0x1E6E0018, data); - - ast_moutdwm(ast, 0x1E6E0034, 0x00000001); - ast_moutdwm(ast, 0x1E6E000C, 0x00000000); - udelay(50); - /* Mode Register Setting */ - ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS | 0x100); - ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS); - ast_moutdwm(ast, 0x1E6E0028, 0x00000005); - ast_moutdwm(ast, 0x1E6E0028, 0x00000007); - ast_moutdwm(ast, 0x1E6E0028, 0x00000003); - ast_moutdwm(ast, 0x1E6E0028, 0x00000001); - - ast_moutdwm(ast, 0x1E6E000C, 0x00005C08); - ast_moutdwm(ast, 0x1E6E002C, param->reg_MRS); - ast_moutdwm(ast, 0x1E6E0028, 0x00000001); - ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS | 0x380); - ast_moutdwm(ast, 0x1E6E0028, 0x00000003); - ast_moutdwm(ast, 0x1E6E0030, param->reg_EMRS); - ast_moutdwm(ast, 0x1E6E0028, 0x00000003); - - ast_moutdwm(ast, 0x1E6E000C, 0x7FFF5C01); - data = 0; - if (param->wodt) { - data = 0x500; - } - if (param->rodt) { - data = data | 0x3000 | ((param->reg_AC2 & 0x60000) >> 3); - } - ast_moutdwm(ast, 0x1E6E0034, data | 0x3); - ast_moutdwm(ast, 0x1E6E0120, param->reg_FREQ); - - /* Calibrate the DQSI delay */ - if ((cbr_dll2(ast, param) == false) && (retry++ < 10)) - goto ddr2_init_start; - - /* ECC Memory Initialization */ -#ifdef ECC - ast_moutdwm(ast, 0x1E6E007C, 0x00000000); - ast_moutdwm(ast, 0x1E6E0070, 0x221); - do { - data = ast_mindwm(ast, 0x1E6E0070); - } while (!(data & 0x00001000)); - ast_moutdwm(ast, 0x1E6E0070, 0x00000000); - ast_moutdwm(ast, 0x1E6E0050, 0x80000000); - ast_moutdwm(ast, 0x1E6E0050, 0x00000000); -#endif - -} - -static void ast_post_chip_2300(struct ast_device *ast) -{ - struct ast2300_dram_param param; - u32 temp; - u8 reg; - - reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); - if ((reg & 0x80) == 0) {/* vga only */ - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - ast_write32(ast, 0x12000, 0x1688a8a8); - do { - ; - } while (ast_read32(ast, 0x12000) != 0x1); - - ast_write32(ast, 0x10000, 0xfc600309); - do { - ; - } while (ast_read32(ast, 0x10000) != 0x1); - - /* Slow down CPU/AHB CLK in VGA only mode */ - temp = ast_read32(ast, 0x12008); - temp |= 0x73; - ast_write32(ast, 0x12008, temp); - - param.dram_freq = 396; - param.dram_type = AST_DDR3; - temp = ast_mindwm(ast, 0x1e6e2070); - if (temp & 0x01000000) - param.dram_type = AST_DDR2; - switch (temp & 0x18000000) { - case 0: - param.dram_chipid = AST_DRAM_512Mx16; - break; - default: - case 0x08000000: - param.dram_chipid = AST_DRAM_1Gx16; - break; - case 0x10000000: - param.dram_chipid = AST_DRAM_2Gx16; - break; - case 0x18000000: - param.dram_chipid = AST_DRAM_4Gx16; - break; - } - switch (temp & 0x0c) { - default: - case 0x00: - param.vram_size = SZ_8M; - break; - - case 0x04: - param.vram_size = SZ_16M; - break; - - case 0x08: - param.vram_size = SZ_32M; - break; - - case 0x0c: - param.vram_size = SZ_64M; - break; - } - - if (param.dram_type == AST_DDR3) { - get_ddr3_info(ast, ¶m); - ddr3_init(ast, ¶m); - } else { - get_ddr2_info(ast, ¶m); - ddr2_init(ast, ¶m); - } - - temp = ast_mindwm(ast, 0x1e6e2040); - ast_moutdwm(ast, 0x1e6e2040, temp | 0x40); - } - - /* wait ready */ - do { - reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); - } while ((reg & 0x40) == 0); -} - -static bool cbr_test_2500(struct ast_device *ast) -{ - ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF); - ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00); - if (!mmc_test_burst(ast, 0)) - return false; - if (!mmc_test_single_2500(ast, 0)) - return false; - return true; -} - -static bool ddr_test_2500(struct ast_device *ast) -{ - ast_moutdwm(ast, 0x1E6E0074, 0x0000FFFF); - ast_moutdwm(ast, 0x1E6E007C, 0xFF00FF00); - if (!mmc_test_burst(ast, 0)) - return false; - if (!mmc_test_burst(ast, 1)) - return false; - if (!mmc_test_burst(ast, 2)) - return false; - if (!mmc_test_burst(ast, 3)) - return false; - if (!mmc_test_single_2500(ast, 0)) - return false; - return true; -} - -static void ddr_init_common_2500(struct ast_device *ast) -{ - ast_moutdwm(ast, 0x1E6E0034, 0x00020080); - ast_moutdwm(ast, 0x1E6E0008, 0x2003000F); - ast_moutdwm(ast, 0x1E6E0038, 0x00000FFF); - ast_moutdwm(ast, 0x1E6E0040, 0x88448844); - ast_moutdwm(ast, 0x1E6E0044, 0x24422288); - ast_moutdwm(ast, 0x1E6E0048, 0x22222222); - ast_moutdwm(ast, 0x1E6E004C, 0x22222222); - ast_moutdwm(ast, 0x1E6E0050, 0x80000000); - ast_moutdwm(ast, 0x1E6E0208, 0x00000000); - ast_moutdwm(ast, 0x1E6E0218, 0x00000000); - ast_moutdwm(ast, 0x1E6E0220, 0x00000000); - ast_moutdwm(ast, 0x1E6E0228, 0x00000000); - ast_moutdwm(ast, 0x1E6E0230, 0x00000000); - ast_moutdwm(ast, 0x1E6E02A8, 0x00000000); - ast_moutdwm(ast, 0x1E6E02B0, 0x00000000); - ast_moutdwm(ast, 0x1E6E0240, 0x86000000); - ast_moutdwm(ast, 0x1E6E0244, 0x00008600); - ast_moutdwm(ast, 0x1E6E0248, 0x80000000); - ast_moutdwm(ast, 0x1E6E024C, 0x80808080); -} - -static void ddr_phy_init_2500(struct ast_device *ast) -{ - u32 data, pass, timecnt; - - pass = 0; - ast_moutdwm(ast, 0x1E6E0060, 0x00000005); - while (!pass) { - for (timecnt = 0; timecnt < TIMEOUT; timecnt++) { - data = ast_mindwm(ast, 0x1E6E0060) & 0x1; - if (!data) - break; - } - if (timecnt != TIMEOUT) { - data = ast_mindwm(ast, 0x1E6E0300) & 0x000A0000; - if (!data) - pass = 1; - } - if (!pass) { - ast_moutdwm(ast, 0x1E6E0060, 0x00000000); - udelay(10); /* delay 10 us */ - ast_moutdwm(ast, 0x1E6E0060, 0x00000005); - } - } - - ast_moutdwm(ast, 0x1E6E0060, 0x00000006); -} - -/* - * Check DRAM Size - * 1Gb : 0x80000000 ~ 0x87FFFFFF - * 2Gb : 0x80000000 ~ 0x8FFFFFFF - * 4Gb : 0x80000000 ~ 0x9FFFFFFF - * 8Gb : 0x80000000 ~ 0xBFFFFFFF - */ -static void check_dram_size_2500(struct ast_device *ast, u32 tRFC) -{ - u32 reg_04, reg_14; - - reg_04 = ast_mindwm(ast, 0x1E6E0004) & 0xfffffffc; - reg_14 = ast_mindwm(ast, 0x1E6E0014) & 0xffffff00; - - ast_moutdwm(ast, 0xA0100000, 0x41424344); - ast_moutdwm(ast, 0x90100000, 0x35363738); - ast_moutdwm(ast, 0x88100000, 0x292A2B2C); - ast_moutdwm(ast, 0x80100000, 0x1D1E1F10); - - /* Check 8Gbit */ - if (ast_mindwm(ast, 0xA0100000) == 0x41424344) { - reg_04 |= 0x03; - reg_14 |= (tRFC >> 24) & 0xFF; - /* Check 4Gbit */ - } else if (ast_mindwm(ast, 0x90100000) == 0x35363738) { - reg_04 |= 0x02; - reg_14 |= (tRFC >> 16) & 0xFF; - /* Check 2Gbit */ - } else if (ast_mindwm(ast, 0x88100000) == 0x292A2B2C) { - reg_04 |= 0x01; - reg_14 |= (tRFC >> 8) & 0xFF; - } else { - reg_14 |= tRFC & 0xFF; - } - ast_moutdwm(ast, 0x1E6E0004, reg_04); - ast_moutdwm(ast, 0x1E6E0014, reg_14); -} - -static void enable_cache_2500(struct ast_device *ast) -{ - u32 reg_04, data; - - reg_04 = ast_mindwm(ast, 0x1E6E0004); - ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x1000); - - do - data = ast_mindwm(ast, 0x1E6E0004); - while (!(data & 0x80000)); - ast_moutdwm(ast, 0x1E6E0004, reg_04 | 0x400); -} - -static void set_mpll_2500(struct ast_device *ast) -{ - u32 addr, data, param; - - /* Reset MMC */ - ast_moutdwm(ast, 0x1E6E0000, 0xFC600309); - ast_moutdwm(ast, 0x1E6E0034, 0x00020080); - for (addr = 0x1e6e0004; addr < 0x1e6e0090;) { - ast_moutdwm(ast, addr, 0x0); - addr += 4; - } - ast_moutdwm(ast, 0x1E6E0034, 0x00020000); - - ast_moutdwm(ast, 0x1E6E2000, 0x1688A8A8); - data = ast_mindwm(ast, 0x1E6E2070) & 0x00800000; - if (data) { - /* CLKIN = 25MHz */ - param = 0x930023E0; - ast_moutdwm(ast, 0x1E6E2160, 0x00011320); - } else { - /* CLKIN = 24MHz */ - param = 0x93002400; - } - ast_moutdwm(ast, 0x1E6E2020, param); - udelay(100); -} - -static void reset_mmc_2500(struct ast_device *ast) -{ - ast_moutdwm(ast, 0x1E78505C, 0x00000004); - ast_moutdwm(ast, 0x1E785044, 0x00000001); - ast_moutdwm(ast, 0x1E785048, 0x00004755); - ast_moutdwm(ast, 0x1E78504C, 0x00000013); - mdelay(100); - ast_moutdwm(ast, 0x1E785054, 0x00000077); - ast_moutdwm(ast, 0x1E6E0000, 0xFC600309); -} - -static void ddr3_init_2500(struct ast_device *ast, const u32 *ddr_table) -{ - - ast_moutdwm(ast, 0x1E6E0004, 0x00000303); - ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]); - ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]); - ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]); - ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */ - ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */ - ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */ - ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */ - - /* DDR PHY Setting */ - ast_moutdwm(ast, 0x1E6E0200, 0x02492AAE); - ast_moutdwm(ast, 0x1E6E0204, 0x00001001); - ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B); - ast_moutdwm(ast, 0x1E6E0210, 0x20000000); - ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]); - ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]); - ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]); - ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]); - ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]); - ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]); - ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]); - ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]); - ast_moutdwm(ast, 0x1E6E0290, 0x00100008); - ast_moutdwm(ast, 0x1E6E02C0, 0x00000006); - - /* Controller Setting */ - ast_moutdwm(ast, 0x1E6E0034, 0x00020091); - - /* Wait DDR PHY init done */ - ddr_phy_init_2500(ast); - - ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]); - ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81); - ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93); - - check_dram_size_2500(ast, ddr_table[REGIDX_RFC]); - enable_cache_2500(ast); - ast_moutdwm(ast, 0x1E6E001C, 0x00000008); - ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00); -} - -static void ddr4_init_2500(struct ast_device *ast, const u32 *ddr_table) -{ - u32 data, data2, pass, retrycnt; - u32 ddr_vref, phy_vref; - u32 min_ddr_vref = 0, min_phy_vref = 0; - u32 max_ddr_vref = 0, max_phy_vref = 0; - - ast_moutdwm(ast, 0x1E6E0004, 0x00000313); - ast_moutdwm(ast, 0x1E6E0010, ddr_table[REGIDX_010]); - ast_moutdwm(ast, 0x1E6E0014, ddr_table[REGIDX_014]); - ast_moutdwm(ast, 0x1E6E0018, ddr_table[REGIDX_018]); - ast_moutdwm(ast, 0x1E6E0020, ddr_table[REGIDX_020]); /* MODEREG4/6 */ - ast_moutdwm(ast, 0x1E6E0024, ddr_table[REGIDX_024]); /* MODEREG5 */ - ast_moutdwm(ast, 0x1E6E002C, ddr_table[REGIDX_02C] | 0x100); /* MODEREG0/2 */ - ast_moutdwm(ast, 0x1E6E0030, ddr_table[REGIDX_030]); /* MODEREG1/3 */ - - /* DDR PHY Setting */ - ast_moutdwm(ast, 0x1E6E0200, 0x42492AAE); - ast_moutdwm(ast, 0x1E6E0204, 0x09002000); - ast_moutdwm(ast, 0x1E6E020C, 0x55E00B0B); - ast_moutdwm(ast, 0x1E6E0210, 0x20000000); - ast_moutdwm(ast, 0x1E6E0214, ddr_table[REGIDX_214]); - ast_moutdwm(ast, 0x1E6E02E0, ddr_table[REGIDX_2E0]); - ast_moutdwm(ast, 0x1E6E02E4, ddr_table[REGIDX_2E4]); - ast_moutdwm(ast, 0x1E6E02E8, ddr_table[REGIDX_2E8]); - ast_moutdwm(ast, 0x1E6E02EC, ddr_table[REGIDX_2EC]); - ast_moutdwm(ast, 0x1E6E02F0, ddr_table[REGIDX_2F0]); - ast_moutdwm(ast, 0x1E6E02F4, ddr_table[REGIDX_2F4]); - ast_moutdwm(ast, 0x1E6E02F8, ddr_table[REGIDX_2F8]); - ast_moutdwm(ast, 0x1E6E0290, 0x00100008); - ast_moutdwm(ast, 0x1E6E02C4, 0x3C183C3C); - ast_moutdwm(ast, 0x1E6E02C8, 0x00631E0E); - - /* Controller Setting */ - ast_moutdwm(ast, 0x1E6E0034, 0x0001A991); - - /* Train PHY Vref first */ - pass = 0; - - for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) { - max_phy_vref = 0x0; - pass = 0; - ast_moutdwm(ast, 0x1E6E02C0, 0x00001C06); - for (phy_vref = 0x40; phy_vref < 0x80; phy_vref++) { - ast_moutdwm(ast, 0x1E6E000C, 0x00000000); - ast_moutdwm(ast, 0x1E6E0060, 0x00000000); - ast_moutdwm(ast, 0x1E6E02CC, phy_vref | (phy_vref << 8)); - /* Fire DFI Init */ - ddr_phy_init_2500(ast); - ast_moutdwm(ast, 0x1E6E000C, 0x00005C01); - if (cbr_test_2500(ast)) { - pass++; - data = ast_mindwm(ast, 0x1E6E03D0); - data2 = data >> 8; - data = data & 0xff; - if (data > data2) - data = data2; - if (max_phy_vref < data) { - max_phy_vref = data; - min_phy_vref = phy_vref; - } - } else if (pass > 0) - break; - } - } - ast_moutdwm(ast, 0x1E6E02CC, min_phy_vref | (min_phy_vref << 8)); - - /* Train DDR Vref next */ - pass = 0; - - for (retrycnt = 0; retrycnt < 4 && pass == 0; retrycnt++) { - min_ddr_vref = 0xFF; - max_ddr_vref = 0x0; - pass = 0; - for (ddr_vref = 0x00; ddr_vref < 0x40; ddr_vref++) { - ast_moutdwm(ast, 0x1E6E000C, 0x00000000); - ast_moutdwm(ast, 0x1E6E0060, 0x00000000); - ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8)); - /* Fire DFI Init */ - ddr_phy_init_2500(ast); - ast_moutdwm(ast, 0x1E6E000C, 0x00005C01); - if (cbr_test_2500(ast)) { - pass++; - if (min_ddr_vref > ddr_vref) - min_ddr_vref = ddr_vref; - if (max_ddr_vref < ddr_vref) - max_ddr_vref = ddr_vref; - } else if (pass != 0) - break; - } - } - - ast_moutdwm(ast, 0x1E6E000C, 0x00000000); - ast_moutdwm(ast, 0x1E6E0060, 0x00000000); - ddr_vref = (min_ddr_vref + max_ddr_vref + 1) >> 1; - ast_moutdwm(ast, 0x1E6E02C0, 0x00000006 | (ddr_vref << 8)); - - /* Wait DDR PHY init done */ - ddr_phy_init_2500(ast); - - ast_moutdwm(ast, 0x1E6E0120, ddr_table[REGIDX_PLL]); - ast_moutdwm(ast, 0x1E6E000C, 0x42AA5C81); - ast_moutdwm(ast, 0x1E6E0034, 0x0001AF93); - - check_dram_size_2500(ast, ddr_table[REGIDX_RFC]); - enable_cache_2500(ast); - ast_moutdwm(ast, 0x1E6E001C, 0x00000008); - ast_moutdwm(ast, 0x1E6E0038, 0xFFFFFF00); -} - -static bool ast_dram_init_2500(struct ast_device *ast) -{ - u32 data; - u32 max_tries = 5; - - do { - if (max_tries-- == 0) - return false; - set_mpll_2500(ast); - reset_mmc_2500(ast); - ddr_init_common_2500(ast); - - data = ast_mindwm(ast, 0x1E6E2070); - if (data & 0x01000000) - ddr4_init_2500(ast, ast2500_ddr4_1600_timing_table); - else - ddr3_init_2500(ast, ast2500_ddr3_1600_timing_table); - } while (!ddr_test_2500(ast)); - - ast_moutdwm(ast, 0x1E6E2040, ast_mindwm(ast, 0x1E6E2040) | 0x41); - - /* Patch code */ - data = ast_mindwm(ast, 0x1E6E200C) & 0xF9FFFFFF; - ast_moutdwm(ast, 0x1E6E200C, data | 0x10000000); - - return true; -} - -void ast_patch_ahb_2500(void __iomem *regs) -{ - u32 data; - - /* Clear bus lock condition */ - __ast_moutdwm(regs, 0x1e600000, 0xAEED1A03); - __ast_moutdwm(regs, 0x1e600084, 0x00010000); - __ast_moutdwm(regs, 0x1e600088, 0x00000000); - __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8); - - data = __ast_mindwm(regs, 0x1e6e2070); - if (data & 0x08000000) { /* check fast reset */ - /* - * If "Fast restet" is enabled for ARM-ICE debugger, - * then WDT needs to enable, that - * WDT04 is WDT#1 Reload reg. - * WDT08 is WDT#1 counter restart reg to avoid system deadlock - * WDT0C is WDT#1 control reg - * [6:5]:= 01:Full chip - * [4]:= 1:1MHz clock source - * [1]:= 1:WDT will be cleeared and disabled after timeout occurs - * [0]:= 1:WDT enable - */ - __ast_moutdwm(regs, 0x1E785004, 0x00000010); - __ast_moutdwm(regs, 0x1E785008, 0x00004755); - __ast_moutdwm(regs, 0x1E78500c, 0x00000033); - udelay(1000); - } - - do { - __ast_moutdwm(regs, 0x1e6e2000, 0x1688A8A8); - data = __ast_mindwm(regs, 0x1e6e2000); - } while (data != 1); - - __ast_moutdwm(regs, 0x1e6e207c, 0x08000000); /* clear fast reset */ -} - -void ast_post_chip_2500(struct ast_device *ast) -{ - struct drm_device *dev = &ast->base; - u32 temp; - u8 reg; - - reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); - if ((reg & AST_IO_VGACRD0_VRAM_INIT_STATUS_MASK) == 0) {/* vga only */ - /* Clear bus lock condition */ - ast_patch_ahb_2500(ast->regs); - - /* Disable watchdog */ - ast_moutdwm(ast, 0x1E78502C, 0x00000000); - ast_moutdwm(ast, 0x1E78504C, 0x00000000); - - /* - * Reset USB port to patch USB unknown device issue - * SCU90 is Multi-function Pin Control #5 - * [29]:= 1:Enable USB2.0 Host port#1 (that the mutually shared USB2.0 Hub - * port). - * SCU94 is Multi-function Pin Control #6 - * [14:13]:= 1x:USB2.0 Host2 controller - * SCU70 is Hardware Strap reg - * [23]:= 1:CLKIN is 25MHz and USBCK1 = 24/48 MHz (determined by - * [18]: 0(24)/1(48) MHz) - * SCU7C is Write clear reg to SCU70 - * [23]:= write 1 and then SCU70[23] will be clear as 0b. - */ - ast_moutdwm(ast, 0x1E6E2090, 0x20000000); - ast_moutdwm(ast, 0x1E6E2094, 0x00004000); - if (ast_mindwm(ast, 0x1E6E2070) & 0x00800000) { - ast_moutdwm(ast, 0x1E6E207C, 0x00800000); - mdelay(100); - ast_moutdwm(ast, 0x1E6E2070, 0x00800000); - } - /* Modify eSPI reset pin */ - temp = ast_mindwm(ast, 0x1E6E2070); - if (temp & 0x02000000) - ast_moutdwm(ast, 0x1E6E207C, 0x00004000); - - /* Slow down CPU/AHB CLK in VGA only mode */ - temp = ast_read32(ast, 0x12008); - temp |= 0x73; - ast_write32(ast, 0x12008, temp); - - if (!ast_dram_init_2500(ast)) - drm_err(dev, "DRAM init failed !\n"); - - temp = ast_mindwm(ast, 0x1e6e2040); - ast_moutdwm(ast, 0x1e6e2040, temp | 0x40); - } - - /* wait ready */ - do { - reg = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); - } while ((reg & 0x40) == 0); -} diff --git a/drivers/gpu/drm/ast/ast_post.h b/drivers/gpu/drm/ast/ast_post.h new file mode 100644 index 000000000000..aa5d247bebe8 --- /dev/null +++ b/drivers/gpu/drm/ast/ast_post.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: MIT */ + +#ifndef AST_POST_H +#define AST_POST_H + +#include <linux/limits.h> +#include <linux/types.h> + +struct ast_device; + +/* DRAM timing tables */ +struct ast_dramstruct { + u16 index; + u32 data; +}; + +/* hardware fields */ +#define __AST_DRAMSTRUCT_DRAM_TYPE 0x0004 + +/* control commands */ +#define __AST_DRAMSTRUCT_UDELAY 0xff00 +#define __AST_DRAMSTRUCT_INVALID 0xffff + +#define __AST_DRAMSTRUCT_INDEX(_name) \ + (__AST_DRAMSTRUCT_ ## _name) + +#define AST_DRAMSTRUCT_INIT(_name, _value) \ + { __AST_DRAMSTRUCT_INDEX(_name), (_value) } + +#define AST_DRAMSTRUCT_UDELAY(_usecs) \ + AST_DRAMSTRUCT_INIT(UDELAY, _usecs) +#define AST_DRAMSTRUCT_INVALID \ + AST_DRAMSTRUCT_INIT(INVALID, U32_MAX) + +#define AST_DRAMSTRUCT_IS(_entry, _name) \ + ((_entry)->index == __AST_DRAMSTRUCT_INDEX(_name)) + +u32 __ast_mindwm(void __iomem *regs, u32 r); +void __ast_moutdwm(void __iomem *regs, u32 r, u32 v); + +bool mmc_test(struct ast_device *ast, u32 datagen, u8 test_ctl); +bool mmc_test_burst(struct ast_device *ast, u32 datagen); + +/* ast_2000.c */ +void ast_2000_set_def_ext_reg(struct ast_device *ast); + +/* ast_2300.c */ +void ast_2300_set_def_ext_reg(struct ast_device *ast); + +#endif diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h index 71bb64e5f481..85ebead9809c 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511.h +++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h @@ -399,8 +399,8 @@ static inline struct adv7511 *bridge_to_adv7511(struct drm_bridge *bridge) } #ifdef CONFIG_DRM_I2C_ADV7511_CEC -int adv7511_cec_init(struct drm_connector *connector, - struct drm_bridge *bridge); +int adv7511_cec_init(struct drm_bridge *bridge, + struct drm_connector *connector); int adv7511_cec_enable(struct drm_bridge *bridge, bool enable); int adv7511_cec_log_addr(struct drm_bridge *bridge, u8 addr); int adv7511_cec_transmit(struct drm_bridge *bridge, u8 attempts, @@ -424,12 +424,12 @@ int adv7533_attach_dsi(struct adv7511 *adv); int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv); #ifdef CONFIG_DRM_I2C_ADV7511_AUDIO -int adv7511_hdmi_audio_startup(struct drm_connector *connector, - struct drm_bridge *bridge); -void adv7511_hdmi_audio_shutdown(struct drm_connector *connector, - struct drm_bridge *bridge); -int adv7511_hdmi_audio_prepare(struct drm_connector *connector, - struct drm_bridge *bridge, +int adv7511_hdmi_audio_startup(struct drm_bridge *bridge, + struct drm_connector *connector); +void adv7511_hdmi_audio_shutdown(struct drm_bridge *bridge, + struct drm_connector *connector); +int adv7511_hdmi_audio_prepare(struct drm_bridge *bridge, + struct drm_connector *connector, struct hdmi_codec_daifmt *fmt, struct hdmi_codec_params *hparms); #else /*CONFIG_DRM_I2C_ADV7511_AUDIO */ diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c index 915c3b967216..766b1c96bc88 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c @@ -55,8 +55,8 @@ static int adv7511_update_cts_n(struct adv7511 *adv7511) return 0; } -int adv7511_hdmi_audio_prepare(struct drm_connector *connector, - struct drm_bridge *bridge, +int adv7511_hdmi_audio_prepare(struct drm_bridge *bridge, + struct drm_connector *connector, struct hdmi_codec_daifmt *fmt, struct hdmi_codec_params *hparms) { @@ -168,8 +168,8 @@ int adv7511_hdmi_audio_prepare(struct drm_connector *connector, return 0; } -int adv7511_hdmi_audio_startup(struct drm_connector *connector, - struct drm_bridge *bridge) +int adv7511_hdmi_audio_startup(struct drm_bridge *bridge, + struct drm_connector *connector) { struct adv7511 *adv7511 = bridge_to_adv7511(bridge); @@ -206,8 +206,8 @@ int adv7511_hdmi_audio_startup(struct drm_connector *connector, return 0; } -void adv7511_hdmi_audio_shutdown(struct drm_connector *connector, - struct drm_bridge *bridge) +void adv7511_hdmi_audio_shutdown(struct drm_bridge *bridge, + struct drm_connector *connector) { struct adv7511 *adv7511 = bridge_to_adv7511(bridge); diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c index 822265426f58..8ecbc25dc647 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c @@ -346,8 +346,8 @@ static int adv7511_cec_parse_dt(struct device *dev, struct adv7511 *adv7511) return 0; } -int adv7511_cec_init(struct drm_connector *connector, - struct drm_bridge *bridge) +int adv7511_cec_init(struct drm_bridge *bridge, + struct drm_connector *connector) { struct adv7511 *adv7511 = bridge_to_adv7511(bridge); struct device *dev = &adv7511->i2c_main->dev; diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 9df18a8f2e37..00d6417c177b 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -864,7 +864,8 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge, return ret; } -static enum drm_connector_status adv7511_bridge_detect(struct drm_bridge *bridge) +static enum drm_connector_status +adv7511_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) { struct adv7511 *adv = bridge_to_adv7511(bridge); @@ -1262,9 +1263,7 @@ static int adv7511_probe(struct i2c_client *i2c) adv7511->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | - DRM_BRIDGE_OP_HDMI | - DRM_BRIDGE_OP_HDMI_AUDIO | - DRM_BRIDGE_OP_HDMI_CEC_ADAPTER; + DRM_BRIDGE_OP_HDMI; if (adv7511->i2c_main->irq) adv7511->bridge.ops |= DRM_BRIDGE_OP_HPD; @@ -1272,6 +1271,7 @@ static int adv7511_probe(struct i2c_client *i2c) adv7511->bridge.product = adv7511->info->name; #ifdef CONFIG_DRM_I2C_ADV7511_AUDIO + adv7511->bridge.ops |= DRM_BRIDGE_OP_HDMI_AUDIO; adv7511->bridge.hdmi_audio_dev = dev; adv7511->bridge.hdmi_audio_max_i2s_playback_channels = 2; adv7511->bridge.hdmi_audio_i2s_formats = (SNDRV_PCM_FMTBIT_S16_LE | @@ -1284,6 +1284,7 @@ static int adv7511_probe(struct i2c_client *i2c) #endif #ifdef CONFIG_DRM_I2C_ADV7511_CEC + adv7511->bridge.ops |= DRM_BRIDGE_OP_HDMI_CEC_ADAPTER; adv7511->bridge.hdmi_cec_dev = dev; adv7511->bridge.hdmi_cec_adapter_name = dev_name(dev); adv7511->bridge.hdmi_cec_available_las = ADV7511_MAX_ADDRS; diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index a1bc3e96dd35..efe534977d12 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -1041,7 +1041,7 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge, struct drm_encoder *encoder, enum drm_bridge_attach_flags flags) { - struct analogix_dp_device *dp = bridge->driver_private; + struct analogix_dp_device *dp = to_dp(bridge); struct drm_connector *connector = NULL; int ret = 0; @@ -1125,7 +1125,7 @@ struct drm_crtc *analogix_dp_get_new_crtc(struct analogix_dp_device *dp, static void analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge, struct drm_atomic_state *old_state) { - struct analogix_dp_device *dp = bridge->driver_private; + struct analogix_dp_device *dp = to_dp(bridge); struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state; @@ -1180,7 +1180,7 @@ out_dp_init: static void analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge, struct drm_atomic_state *old_state) { - struct analogix_dp_device *dp = bridge->driver_private; + struct analogix_dp_device *dp = to_dp(bridge); struct drm_crtc *crtc; struct drm_crtc_state *old_crtc_state; int timeout_loop = 0; @@ -1217,7 +1217,7 @@ static void analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge, static void analogix_dp_bridge_disable(struct drm_bridge *bridge) { - struct analogix_dp_device *dp = bridge->driver_private; + struct analogix_dp_device *dp = to_dp(bridge); if (dp->dpms_mode != DRM_MODE_DPMS_ON) return; @@ -1240,7 +1240,7 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge) static void analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge, struct drm_atomic_state *old_state) { - struct analogix_dp_device *dp = bridge->driver_private; + struct analogix_dp_device *dp = to_dp(bridge); struct drm_crtc *old_crtc, *new_crtc; struct drm_crtc_state *old_crtc_state = NULL; struct drm_crtc_state *new_crtc_state = NULL; @@ -1278,7 +1278,7 @@ out: static void analogix_dp_bridge_atomic_post_disable(struct drm_bridge *bridge, struct drm_atomic_state *old_state) { - struct analogix_dp_device *dp = bridge->driver_private; + struct analogix_dp_device *dp = to_dp(bridge); struct drm_crtc *crtc; struct drm_crtc_state *new_crtc_state; int ret; @@ -1300,7 +1300,7 @@ static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge, const struct drm_display_mode *orig_mode, const struct drm_display_mode *mode) { - struct analogix_dp_device *dp = bridge->driver_private; + struct analogix_dp_device *dp = to_dp(bridge); struct drm_display_info *display_info = &dp->connector.display_info; struct video_info *video = &dp->video_info; struct device_node *dp_node = dp->dev->of_node; @@ -1385,25 +1385,6 @@ static const struct drm_bridge_funcs analogix_dp_bridge_funcs = { .attach = analogix_dp_bridge_attach, }; -static int analogix_dp_create_bridge(struct drm_device *drm_dev, - struct analogix_dp_device *dp) -{ - struct drm_bridge *bridge; - - bridge = devm_kzalloc(drm_dev->dev, sizeof(*bridge), GFP_KERNEL); - if (!bridge) { - DRM_ERROR("failed to allocate for drm bridge\n"); - return -ENOMEM; - } - - dp->bridge = bridge; - - bridge->driver_private = dp; - bridge->funcs = &analogix_dp_bridge_funcs; - - return drm_bridge_attach(dp->encoder, bridge, NULL, 0); -} - static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp) { struct device_node *dp_node = dp->dev->of_node; @@ -1491,9 +1472,10 @@ analogix_dp_probe(struct device *dev, struct analogix_dp_plat_data *plat_data) return ERR_PTR(-EINVAL); } - dp = devm_kzalloc(dev, sizeof(struct analogix_dp_device), GFP_KERNEL); - if (!dp) - return ERR_PTR(-ENOMEM); + dp = devm_drm_bridge_alloc(dev, struct analogix_dp_device, bridge, + &analogix_dp_bridge_funcs); + if (IS_ERR(dp)) + return ERR_CAST(dp); dp->dev = &pdev->dev; dp->dpms_mode = DRM_MODE_DPMS_OFF; @@ -1643,7 +1625,7 @@ int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev) return ret; } - ret = analogix_dp_create_bridge(drm_dev, dp); + ret = drm_bridge_attach(dp->encoder, &dp->bridge, NULL, 0); if (ret) { DRM_ERROR("failed to create bridge (%d)\n", ret); goto err_unregister_aux; @@ -1660,7 +1642,7 @@ EXPORT_SYMBOL_GPL(analogix_dp_bind); void analogix_dp_unbind(struct analogix_dp_device *dp) { - analogix_dp_bridge_disable(dp->bridge); + analogix_dp_bridge_disable(&dp->bridge); dp->connector.funcs->destroy(&dp->connector); drm_panel_unprepare(dp->plat_data->panel); diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h index 2b54120ba4a3..b86e93f30ed6 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h @@ -11,6 +11,7 @@ #include <drm/display/drm_dp_helper.h> #include <drm/drm_crtc.h> +#include <drm/drm_bridge.h> #define DP_TIMEOUT_LOOP_COUNT 100 #define MAX_CR_LOOP 5 @@ -154,7 +155,7 @@ struct analogix_dp_device { struct device *dev; struct drm_device *drm_dev; struct drm_connector connector; - struct drm_bridge *bridge; + struct drm_bridge bridge; struct drm_dp_aux aux; struct clk *clock; unsigned int irq; diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c index 0ac4a82c5a6e..c0ad8f59e483 100644 --- a/drivers/gpu/drm/bridge/analogix/anx7625.c +++ b/drivers/gpu/drm/bridge/analogix/anx7625.c @@ -2448,7 +2448,7 @@ anx7625_audio_update_connector_status(struct anx7625_data *ctx, enum drm_connector_status status); static enum drm_connector_status -anx7625_bridge_detect(struct drm_bridge *bridge) +anx7625_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) { struct anx7625_data *ctx = bridge_to_anx7625(bridge); struct device *dev = ctx->dev; diff --git a/drivers/gpu/drm/bridge/aux-bridge.c b/drivers/gpu/drm/bridge/aux-bridge.c index b63304d3a80f..b3e4cdff61d6 100644 --- a/drivers/gpu/drm/bridge/aux-bridge.c +++ b/drivers/gpu/drm/bridge/aux-bridge.c @@ -18,6 +18,7 @@ static void drm_aux_bridge_release(struct device *dev) { struct auxiliary_device *adev = to_auxiliary_dev(dev); + of_node_put(dev->of_node); ida_free(&drm_aux_bridge_ida, adev->id); kfree(adev); @@ -65,6 +66,7 @@ int drm_aux_bridge_register(struct device *parent) ret = auxiliary_device_init(adev); if (ret) { + of_node_put(adev->dev.of_node); ida_free(&drm_aux_bridge_ida, adev->id); kfree(adev); return ret; diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c index e579f947e15b..2e9c702c7087 100644 --- a/drivers/gpu/drm/bridge/aux-hpd-bridge.c +++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c @@ -65,10 +65,11 @@ struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent, str adev->id = ret; adev->name = "dp_hpd_bridge"; adev->dev.parent = parent; - adev->dev.of_node = of_node_get(parent->of_node); adev->dev.release = drm_aux_hpd_bridge_release; adev->dev.platform_data = of_node_get(np); + device_set_of_node_from_dev(&adev->dev, parent); + ret = auxiliary_device_init(adev); if (ret) { of_node_put(adev->dev.platform_data); diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c index cb5f5a8c539a..a614d1384f71 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c @@ -2143,7 +2143,8 @@ static int cdns_mhdp_atomic_check(struct drm_bridge *bridge, return 0; } -static enum drm_connector_status cdns_mhdp_bridge_detect(struct drm_bridge *bridge) +static enum drm_connector_status +cdns_mhdp_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) { struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge); diff --git a/drivers/gpu/drm/bridge/chrontel-ch7033.c b/drivers/gpu/drm/bridge/chrontel-ch7033.c index ab9274793356..54d49d4882c8 100644 --- a/drivers/gpu/drm/bridge/chrontel-ch7033.c +++ b/drivers/gpu/drm/bridge/chrontel-ch7033.c @@ -215,7 +215,7 @@ static enum drm_connector_status ch7033_connector_detect( { struct ch7033_priv *priv = conn_to_ch7033_priv(connector); - return drm_bridge_detect(priv->next_bridge); + return drm_bridge_detect(priv->next_bridge, connector); } static const struct drm_connector_funcs ch7033_connector_funcs = { diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c index badd2c7f91a1..52b7b5889e6f 100644 --- a/drivers/gpu/drm/bridge/display-connector.c +++ b/drivers/gpu/drm/bridge/display-connector.c @@ -40,8 +40,7 @@ static int display_connector_attach(struct drm_bridge *bridge, return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL; } -static enum drm_connector_status -display_connector_detect(struct drm_bridge *bridge) +static enum drm_connector_status display_connector_detect(struct drm_bridge *bridge) { struct display_connector *conn = to_display_connector(bridge); @@ -82,6 +81,12 @@ display_connector_detect(struct drm_bridge *bridge) } } +static enum drm_connector_status +display_connector_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) +{ + return display_connector_detect(bridge); +} + static const struct drm_edid *display_connector_edid_read(struct drm_bridge *bridge, struct drm_connector *connector) { @@ -172,7 +177,7 @@ static u32 *display_connector_get_input_bus_fmts(struct drm_bridge *bridge, static const struct drm_bridge_funcs display_connector_bridge_funcs = { .attach = display_connector_attach, - .detect = display_connector_detect, + .detect = display_connector_bridge_detect, .edid_read = display_connector_edid_read, .atomic_get_output_bus_fmts = display_connector_get_output_bus_fmts, .atomic_get_input_bus_fmts = display_connector_get_input_bus_fmts, diff --git a/drivers/gpu/drm/bridge/ite-it6263.c b/drivers/gpu/drm/bridge/ite-it6263.c index c4eedf643f39..cf813672b4ff 100644 --- a/drivers/gpu/drm/bridge/ite-it6263.c +++ b/drivers/gpu/drm/bridge/ite-it6263.c @@ -693,7 +693,8 @@ static int it6263_bridge_attach(struct drm_bridge *bridge, return 0; } -static enum drm_connector_status it6263_bridge_detect(struct drm_bridge *bridge) +static enum drm_connector_status +it6263_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) { struct it6263 *it = bridge_to_it6263(bridge); diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c index b0dc9280d870..89649c17ffad 100644 --- a/drivers/gpu/drm/bridge/ite-it6505.c +++ b/drivers/gpu/drm/bridge/ite-it6505.c @@ -3238,7 +3238,7 @@ static void it6505_bridge_atomic_post_disable(struct drm_bridge *bridge, } static enum drm_connector_status -it6505_bridge_detect(struct drm_bridge *bridge) +it6505_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) { struct it6505 *it6505 = bridge_to_it6505(bridge); diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c index 6494f0842793..aa7b1dcc5d70 100644 --- a/drivers/gpu/drm/bridge/ite-it66121.c +++ b/drivers/gpu/drm/bridge/ite-it66121.c @@ -843,7 +843,8 @@ static enum drm_mode_status it66121_bridge_mode_valid(struct drm_bridge *bridge, return MODE_OK; } -static enum drm_connector_status it66121_bridge_detect(struct drm_bridge *bridge) +static enum drm_connector_status +it66121_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) { struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge); diff --git a/drivers/gpu/drm/bridge/lontium-lt8912b.c b/drivers/gpu/drm/bridge/lontium-lt8912b.c index bd83228b0f0e..342374cb8fc6 100644 --- a/drivers/gpu/drm/bridge/lontium-lt8912b.c +++ b/drivers/gpu/drm/bridge/lontium-lt8912b.c @@ -408,7 +408,7 @@ lt8912_connector_detect(struct drm_connector *connector, bool force) struct lt8912 *lt = connector_to_lt8912(connector); if (lt->hdmi_port->ops & DRM_BRIDGE_OP_DETECT) - return drm_bridge_detect(lt->hdmi_port); + return drm_bridge_detect(lt->hdmi_port, connector); return lt8912_check_cable_status(lt); } @@ -607,12 +607,12 @@ lt8912_bridge_mode_valid(struct drm_bridge *bridge, } static enum drm_connector_status -lt8912_bridge_detect(struct drm_bridge *bridge) +lt8912_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) { struct lt8912 *lt = bridge_to_lt8912(bridge); if (lt->hdmi_port->ops & DRM_BRIDGE_OP_DETECT) - return drm_bridge_detect(lt->hdmi_port); + return drm_bridge_detect(lt->hdmi_port, connector); return lt8912_check_cable_status(lt); } diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c index d6ee79c1e427..a2d032ee4744 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611.c @@ -543,7 +543,8 @@ static int lt9611_regulator_enable(struct lt9611 *lt9611) return 0; } -static enum drm_connector_status lt9611_bridge_detect(struct drm_bridge *bridge) +static enum drm_connector_status +lt9611_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) { struct lt9611 *lt9611 = bridge_to_lt9611(bridge); unsigned int reg_val = 0; @@ -936,8 +937,8 @@ lt9611_hdmi_tmds_char_rate_valid(const struct drm_bridge *bridge, return MODE_OK; } -static int lt9611_hdmi_audio_startup(struct drm_connector *connector, - struct drm_bridge *bridge) +static int lt9611_hdmi_audio_startup(struct drm_bridge *bridge, + struct drm_connector *connector) { struct lt9611 *lt9611 = bridge_to_lt9611(bridge); @@ -952,8 +953,8 @@ static int lt9611_hdmi_audio_startup(struct drm_connector *connector, return 0; } -static int lt9611_hdmi_audio_prepare(struct drm_connector *connector, - struct drm_bridge *bridge, +static int lt9611_hdmi_audio_prepare(struct drm_bridge *bridge, + struct drm_connector *connector, struct hdmi_codec_daifmt *fmt, struct hdmi_codec_params *hparms) { @@ -974,8 +975,8 @@ static int lt9611_hdmi_audio_prepare(struct drm_connector *connector, &hparms->cea); } -static void lt9611_hdmi_audio_shutdown(struct drm_connector *connector, - struct drm_bridge *bridge) +static void lt9611_hdmi_audio_shutdown(struct drm_bridge *bridge, + struct drm_connector *connector) { struct lt9611 *lt9611 = bridge_to_lt9611(bridge); diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c index 766da2cb45a7..38fb8776c0f4 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c @@ -353,7 +353,8 @@ static void lt9611uxc_bridge_mode_set(struct drm_bridge *bridge, lt9611uxc_unlock(lt9611uxc); } -static enum drm_connector_status lt9611uxc_bridge_detect(struct drm_bridge *bridge) +static enum drm_connector_status +lt9611uxc_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) { struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge); unsigned int reg_val = 0; diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c index 81dde9ed7bcf..c9e6505cbd88 100644 --- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c +++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c @@ -120,7 +120,8 @@ drm_connector_helper_funcs ge_b850v3_lvds_connector_helper_funcs = { .get_modes = ge_b850v3_lvds_get_modes, }; -static enum drm_connector_status ge_b850v3_lvds_bridge_detect(struct drm_bridge *bridge) +static enum drm_connector_status +ge_b850v3_lvds_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) { struct i2c_client *stdp4028_i2c = ge_b850v3_lvds_ptr->stdp4028_i2c; @@ -141,7 +142,7 @@ static enum drm_connector_status ge_b850v3_lvds_bridge_detect(struct drm_bridge static enum drm_connector_status ge_b850v3_lvds_detect(struct drm_connector *connector, bool force) { - return ge_b850v3_lvds_bridge_detect(&ge_b850v3_lvds_ptr->bridge); + return ge_b850v3_lvds_bridge_detect(&ge_b850v3_lvds_ptr->bridge, connector); } static const struct drm_connector_funcs ge_b850v3_lvds_connector_funcs = { diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c index 6361a943e213..184a8b7049a7 100644 --- a/drivers/gpu/drm/bridge/panel.c +++ b/drivers/gpu/drm/bridge/panel.c @@ -299,6 +299,7 @@ struct drm_bridge *drm_panel_bridge_add_typed(struct drm_panel *panel, panel_bridge->bridge.of_node = panel->dev->of_node; panel_bridge->bridge.ops = DRM_BRIDGE_OP_MODES; panel_bridge->bridge.type = connector_type; + panel_bridge->bridge.pre_enable_prev_first = panel->prepare_prev_first; drm_bridge_add(&panel_bridge->bridge); @@ -414,8 +415,6 @@ struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev, return bridge; } - bridge->pre_enable_prev_first = panel->prepare_prev_first; - *ptr = bridge; devres_add(dev, ptr); @@ -457,8 +456,6 @@ struct drm_bridge *drmm_panel_bridge_add(struct drm_device *drm, if (ret) return ERR_PTR(ret); - bridge->pre_enable_prev_first = panel->prepare_prev_first; - return bridge; } EXPORT_SYMBOL(drmm_panel_bridge_add); diff --git a/drivers/gpu/drm/bridge/samsung-dsim.c b/drivers/gpu/drm/bridge/samsung-dsim.c index f2f666b27d2d..b5dd71f6a990 100644 --- a/drivers/gpu/drm/bridge/samsung-dsim.c +++ b/drivers/gpu/drm/bridge/samsung-dsim.c @@ -20,6 +20,7 @@ #include <linux/of.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> +#include <linux/units.h> #include <video/mipi_display.h> @@ -558,10 +559,6 @@ static void samsung_dsim_reset(struct samsung_dsim *dsi) samsung_dsim_write(dsi, DSIM_SWRST_REG, reset_val); } -#ifndef MHZ -#define MHZ (1000 * 1000) -#endif - static unsigned long samsung_dsim_pll_find_pms(struct samsung_dsim *dsi, unsigned long fin, unsigned long fout, @@ -575,8 +572,8 @@ static unsigned long samsung_dsim_pll_find_pms(struct samsung_dsim *dsi, u16 _m, best_m; u8 _s, best_s; - p_min = DIV_ROUND_UP(fin, (driver_data->pll_fin_max * MHZ)); - p_max = fin / (driver_data->pll_fin_min * MHZ); + p_min = DIV_ROUND_UP(fin, (driver_data->pll_fin_max * HZ_PER_MHZ)); + p_max = fin / (driver_data->pll_fin_min * HZ_PER_MHZ); for (_p = p_min; _p <= p_max; ++_p) { for (_s = 0; _s <= 5; ++_s) { @@ -591,8 +588,8 @@ static unsigned long samsung_dsim_pll_find_pms(struct samsung_dsim *dsi, tmp = (u64)_m * fin; do_div(tmp, _p); - if (tmp < driver_data->min_freq * MHZ || - tmp > driver_data->max_freq * MHZ) + if (tmp < driver_data->min_freq * HZ_PER_MHZ || + tmp > driver_data->max_freq * HZ_PER_MHZ) continue; tmp = (u64)_m * fin; @@ -635,7 +632,7 @@ static unsigned long samsung_dsim_set_pll(struct samsung_dsim *dsi, * limit. */ fin = clk_get_rate(clk_get_parent(dsi->pll_clk)); - while (fin > driver_data->pll_fin_max * MHZ) + while (fin > driver_data->pll_fin_max * HZ_PER_MHZ) fin /= 2; clk_set_rate(dsi->pll_clk, fin); @@ -661,10 +658,11 @@ static unsigned long samsung_dsim_set_pll(struct samsung_dsim *dsi, if (driver_data->has_freqband) { static const unsigned long freq_bands[] = { - 100 * MHZ, 120 * MHZ, 160 * MHZ, 200 * MHZ, - 270 * MHZ, 320 * MHZ, 390 * MHZ, 450 * MHZ, - 510 * MHZ, 560 * MHZ, 640 * MHZ, 690 * MHZ, - 770 * MHZ, 870 * MHZ, 950 * MHZ, + 100 * HZ_PER_MHZ, 120 * HZ_PER_MHZ, 160 * HZ_PER_MHZ, + 200 * HZ_PER_MHZ, 270 * HZ_PER_MHZ, 320 * HZ_PER_MHZ, + 390 * HZ_PER_MHZ, 450 * HZ_PER_MHZ, 510 * HZ_PER_MHZ, + 560 * HZ_PER_MHZ, 640 * HZ_PER_MHZ, 690 * HZ_PER_MHZ, + 770 * HZ_PER_MHZ, 870 * HZ_PER_MHZ, 950 * HZ_PER_MHZ, }; int band; @@ -724,7 +722,7 @@ static int samsung_dsim_enable_clock(struct samsung_dsim *dsi) esc_div = DIV_ROUND_UP(byte_clk, dsi->esc_clk_rate); esc_clk = byte_clk / esc_div; - if (esc_clk > 20 * MHZ) { + if (esc_clk > 20 * HZ_PER_MHZ) { ++esc_div; esc_clk = byte_clk / esc_div; } @@ -899,8 +897,6 @@ static int samsung_dsim_init_link(struct samsung_dsim *dsi) * The user manual describes that following bits are ignored in * command mode. */ - if (!(dsi->mode_flags & MIPI_DSI_MODE_VSYNC_FLUSH)) - reg |= DSIM_MFLUSH_VS; if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) reg |= DSIM_SYNC_INFORM; if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) @@ -1096,7 +1092,7 @@ static void samsung_dsim_send_to_fifo(struct samsung_dsim *dsi, bool first = !xfer->tx_done; u32 reg; - dev_dbg(dev, "< xfer %pK: tx len %u, done %u, rx len %u, done %u\n", + dev_dbg(dev, "< xfer %p: tx len %u, done %u, rx len %u, done %u\n", xfer, length, xfer->tx_done, xfer->rx_len, xfer->rx_done); if (length > DSI_TX_FIFO_SIZE) @@ -1236,43 +1232,34 @@ static void samsung_dsim_transfer_start(struct samsung_dsim *dsi) { unsigned long flags; struct samsung_dsim_transfer *xfer; - bool start = false; -again: spin_lock_irqsave(&dsi->transfer_lock, flags); - if (list_empty(&dsi->transfer_list)) { - spin_unlock_irqrestore(&dsi->transfer_lock, flags); - return; - } + while (!list_empty(&dsi->transfer_list)) { + xfer = list_first_entry(&dsi->transfer_list, + struct samsung_dsim_transfer, list); - xfer = list_first_entry(&dsi->transfer_list, - struct samsung_dsim_transfer, list); - - spin_unlock_irqrestore(&dsi->transfer_lock, flags); + spin_unlock_irqrestore(&dsi->transfer_lock, flags); - if (xfer->packet.payload_length && - xfer->tx_done == xfer->packet.payload_length) - /* waiting for RX */ - return; + if (xfer->packet.payload_length && + xfer->tx_done == xfer->packet.payload_length) + /* waiting for RX */ + return; - samsung_dsim_send_to_fifo(dsi, xfer); + samsung_dsim_send_to_fifo(dsi, xfer); - if (xfer->packet.payload_length || xfer->rx_len) - return; + if (xfer->packet.payload_length || xfer->rx_len) + return; - xfer->result = 0; - complete(&xfer->completed); + xfer->result = 0; + complete(&xfer->completed); - spin_lock_irqsave(&dsi->transfer_lock, flags); + spin_lock_irqsave(&dsi->transfer_lock, flags); - list_del_init(&xfer->list); - start = !list_empty(&dsi->transfer_list); + list_del_init(&xfer->list); + } spin_unlock_irqrestore(&dsi->transfer_lock, flags); - - if (start) - goto again; } static bool samsung_dsim_transfer_finish(struct samsung_dsim *dsi) @@ -1294,7 +1281,7 @@ static bool samsung_dsim_transfer_finish(struct samsung_dsim *dsi) spin_unlock_irqrestore(&dsi->transfer_lock, flags); dev_dbg(dsi->dev, - "> xfer %pK, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n", + "> xfer %p, tx_len %zu, tx_done %u, rx_len %u, rx_done %u\n", xfer, xfer->packet.payload_length, xfer->tx_done, xfer->rx_len, xfer->rx_done); diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c index 882973e90062..d537b1d036fb 100644 --- a/drivers/gpu/drm/bridge/sii902x.c +++ b/drivers/gpu/drm/bridge/sii902x.c @@ -458,7 +458,8 @@ static int sii902x_bridge_attach(struct drm_bridge *bridge, return 0; } -static enum drm_connector_status sii902x_bridge_detect(struct drm_bridge *bridge) +static enum drm_connector_status +sii902x_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) { struct sii902x *sii902x = bridge_to_sii902x(bridge); diff --git a/drivers/gpu/drm/bridge/simple-bridge.c b/drivers/gpu/drm/bridge/simple-bridge.c index c66bd913e33a..3d15ddd39470 100644 --- a/drivers/gpu/drm/bridge/simple-bridge.c +++ b/drivers/gpu/drm/bridge/simple-bridge.c @@ -90,7 +90,7 @@ simple_bridge_connector_detect(struct drm_connector *connector, bool force) { struct simple_bridge *sbridge = drm_connector_to_simple_bridge(connector); - return drm_bridge_detect(sbridge->next_bridge); + return drm_bridge_detect(sbridge->next_bridge, connector); } static const struct drm_connector_funcs simple_bridge_con_funcs = { diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c index 7ade80f02a94..39332c57f2c5 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-qp.c @@ -440,8 +440,8 @@ static void dw_hdmi_qp_set_sample_rate(struct dw_hdmi_qp *hdmi, unsigned long lo dw_hdmi_qp_set_cts_n(hdmi, cts, n); } -static int dw_hdmi_qp_audio_enable(struct drm_connector *connector, - struct drm_bridge *bridge) +static int dw_hdmi_qp_audio_enable(struct drm_bridge *bridge, + struct drm_connector *connector) { struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge); @@ -451,8 +451,8 @@ static int dw_hdmi_qp_audio_enable(struct drm_connector *connector, return 0; } -static int dw_hdmi_qp_audio_prepare(struct drm_connector *connector, - struct drm_bridge *bridge, +static int dw_hdmi_qp_audio_prepare(struct drm_bridge *bridge, + struct drm_connector *connector, struct hdmi_codec_daifmt *fmt, struct hdmi_codec_params *hparms) { @@ -497,8 +497,8 @@ static void dw_hdmi_qp_audio_disable_regs(struct dw_hdmi_qp *hdmi) AVP_DATAPATH_PACKET_AUDIO_SWDISABLE, GLOBAL_SWDISABLE); } -static void dw_hdmi_qp_audio_disable(struct drm_connector *connector, - struct drm_bridge *bridge) +static void dw_hdmi_qp_audio_disable(struct drm_bridge *bridge, + struct drm_connector *connector) { struct dw_hdmi_qp *hdmi = dw_hdmi_qp_from_bridge(bridge); @@ -876,7 +876,7 @@ static void dw_hdmi_qp_bridge_atomic_disable(struct drm_bridge *bridge, } static enum drm_connector_status -dw_hdmi_qp_bridge_detect(struct drm_bridge *bridge) +dw_hdmi_qp_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) { struct dw_hdmi_qp *hdmi = bridge->driver_private; diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 76c6570e2a85..206b099a35e9 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -2978,7 +2978,8 @@ static void dw_hdmi_bridge_atomic_enable(struct drm_bridge *bridge, mutex_unlock(&hdmi->mutex); } -static enum drm_connector_status dw_hdmi_bridge_detect(struct drm_bridge *bridge) +static enum drm_connector_status +dw_hdmi_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) { struct dw_hdmi *hdmi = bridge->driver_private; diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 61559467e2d2..4097fef4b86b 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -1760,7 +1760,8 @@ static const struct drm_connector_helper_funcs tc_connector_helper_funcs = { .get_modes = tc_connector_get_modes, }; -static enum drm_connector_status tc_bridge_detect(struct drm_bridge *bridge) +static enum drm_connector_status +tc_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) { struct tc_data *tc = bridge_to_tc(bridge); bool conn; @@ -1785,7 +1786,7 @@ tc_connector_detect(struct drm_connector *connector, bool force) struct tc_data *tc = connector_to_tc(connector); if (tc->hpd_pin >= 0) - return tc_bridge_detect(&tc->bridge); + return tc_bridge_detect(&tc->bridge, connector); if (tc->panel_bridge) return connector_status_connected; @@ -2422,6 +2423,7 @@ static int tc_probe_bridge_endpoint(struct tc_data *tc, enum tc_mode mode) struct device_node *node = NULL; for_each_endpoint_of_node(dev->of_node, node) { + of_graph_parse_endpoint(node, &endpoint); if (endpoint.port == 2) { of_property_read_u8_array(node, "toshiba,pre-emphasis", tc->pre_emphasis, diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 78a50b947a08..464390372b34 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -348,12 +348,18 @@ static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata, * 200 ms. We'll assume that the panel driver will have the hardcoded * delay in its prepare and always disable HPD. * - * If HPD somehow makes sense on some future panel we'll have to - * change this to be conditional on someone specifying that HPD should - * be used. + * For DisplayPort bridge type, we need HPD. So we use the bridge type + * to conditionally disable HPD. + * NOTE: The bridge type is set in ti_sn_bridge_probe() but enable_comms() + * can be called before. So for DisplayPort, HPD will be enabled once + * bridge type is set. We are using bridge type instead of "no-hpd" + * property because it is not used properly in devicetree description + * and hence is unreliable. */ - regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE, - HPD_DISABLE); + + if (pdata->bridge.type != DRM_MODE_CONNECTOR_DisplayPort) + regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE, + HPD_DISABLE); pdata->comms_enabled = true; @@ -1155,14 +1161,20 @@ static void ti_sn_bridge_atomic_post_disable(struct drm_bridge *bridge, pm_runtime_put_sync(pdata->dev); } -static enum drm_connector_status ti_sn_bridge_detect(struct drm_bridge *bridge) +static enum drm_connector_status +ti_sn_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) { struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); int val = 0; - pm_runtime_get_sync(pdata->dev); + /* + * Runtime reference is grabbed in ti_sn_bridge_hpd_enable() + * as the chip won't report HPD just after being powered on. + * HPD_DEBOUNCED_STATE reflects correct state only after the + * debounce time (~100-400 ms). + */ + regmap_read(pdata->regmap, SN_HPD_DISABLE_REG, &val); - pm_runtime_put_autosuspend(pdata->dev); return val & HPD_DEBOUNCED_STATE ? connector_status_connected : connector_status_disconnected; @@ -1185,6 +1197,26 @@ static void ti_sn65dsi86_debugfs_init(struct drm_bridge *bridge, struct dentry * debugfs_create_file("status", 0600, debugfs, pdata, &status_fops); } +static void ti_sn_bridge_hpd_enable(struct drm_bridge *bridge) +{ + struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); + + /* + * Device needs to be powered on before reading the HPD state + * for reliable hpd detection in ti_sn_bridge_detect() due to + * the high debounce time. + */ + + pm_runtime_get_sync(pdata->dev); +} + +static void ti_sn_bridge_hpd_disable(struct drm_bridge *bridge) +{ + struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge); + + pm_runtime_put_autosuspend(pdata->dev); +} + static const struct drm_bridge_funcs ti_sn_bridge_funcs = { .attach = ti_sn_bridge_attach, .detach = ti_sn_bridge_detach, @@ -1199,6 +1231,8 @@ static const struct drm_bridge_funcs ti_sn_bridge_funcs = { .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, .debugfs_init = ti_sn65dsi86_debugfs_init, + .hpd_enable = ti_sn_bridge_hpd_enable, + .hpd_disable = ti_sn_bridge_hpd_disable, }; static void ti_sn_bridge_parse_lanes(struct ti_sn65dsi86 *pdata, @@ -1286,8 +1320,26 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev, pdata->bridge.type = pdata->next_bridge->type == DRM_MODE_CONNECTOR_DisplayPort ? DRM_MODE_CONNECTOR_DisplayPort : DRM_MODE_CONNECTOR_eDP; - if (pdata->bridge.type == DRM_MODE_CONNECTOR_DisplayPort) - pdata->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT; + if (pdata->bridge.type == DRM_MODE_CONNECTOR_DisplayPort) { + pdata->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT | + DRM_BRIDGE_OP_HPD; + /* + * If comms were already enabled they would have been enabled + * with the wrong value of HPD_DISABLE. Update it now. Comms + * could be enabled if anyone is holding a pm_runtime reference + * (like if a GPIO is in use). Note that in most cases nobody + * is doing AUX channel xfers before the bridge is added so + * HPD doesn't _really_ matter then. The only exception is in + * the eDP case where the panel wants to read the EDID before + * the bridge is added. We always consistently have HPD disabled + * for eDP. + */ + mutex_lock(&pdata->comms_mutex); + if (pdata->comms_enabled) + regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, + HPD_DISABLE, 0); + mutex_unlock(&pdata->comms_mutex); + } drm_bridge_add(&pdata->bridge); @@ -1677,11 +1729,6 @@ static int ti_sn_bridge_gpio_set(struct gpio_chip *chip, unsigned int offset, { struct ti_sn65dsi86 *pdata = gpiochip_get_data(chip); - if (!test_bit(offset, pdata->gchip_output)) { - dev_err(pdata->dev, "Ignoring GPIO set while input\n"); - return -EPERM; - } - val &= 1; return regmap_update_bits(pdata->regmap, SN_GPIO_IO_REG, BIT(SN_GPIO_OUTPUT_SHIFT + offset), @@ -1789,7 +1836,7 @@ static int ti_sn_gpio_probe(struct auxiliary_device *adev, pdata->gchip.direction_input = ti_sn_bridge_gpio_direction_input; pdata->gchip.direction_output = ti_sn_bridge_gpio_direction_output; pdata->gchip.get = ti_sn_bridge_gpio_get; - pdata->gchip.set_rv = ti_sn_bridge_gpio_set; + pdata->gchip.set = ti_sn_bridge_gpio_set; pdata->gchip.can_sleep = true; pdata->gchip.names = ti_sn_bridge_gpio_names; pdata->gchip.ngpio = SN_NUM_GPIOS; diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c index 549e8e8edeb4..b80ee089f880 100644 --- a/drivers/gpu/drm/bridge/ti-tfp410.c +++ b/drivers/gpu/drm/bridge/ti-tfp410.c @@ -89,7 +89,7 @@ tfp410_connector_detect(struct drm_connector *connector, bool force) { struct tfp410 *dvi = drm_connector_to_tfp410(connector); - return drm_bridge_detect(dvi->next_bridge); + return drm_bridge_detect(dvi->next_bridge, connector); } static const struct drm_connector_funcs tfp410_con_funcs = { diff --git a/drivers/gpu/drm/bridge/ti-tpd12s015.c b/drivers/gpu/drm/bridge/ti-tpd12s015.c index 0919364e80d1..dcf686c4e73d 100644 --- a/drivers/gpu/drm/bridge/ti-tpd12s015.c +++ b/drivers/gpu/drm/bridge/ti-tpd12s015.c @@ -77,6 +77,12 @@ static enum drm_connector_status tpd12s015_detect(struct drm_bridge *bridge) return connector_status_disconnected; } +static enum drm_connector_status +tpd12s015_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) +{ + return tpd12s015_detect(bridge); +} + static void tpd12s015_hpd_enable(struct drm_bridge *bridge) { struct tpd12s015_device *tpd = to_tpd12s015(bridge); @@ -94,7 +100,7 @@ static void tpd12s015_hpd_disable(struct drm_bridge *bridge) static const struct drm_bridge_funcs tpd12s015_bridge_funcs = { .attach = tpd12s015_attach, .detach = tpd12s015_detach, - .detect = tpd12s015_detect, + .detect = tpd12s015_bridge_detect, .hpd_enable = tpd12s015_hpd_enable, .hpd_disable = tpd12s015_hpd_disable, }; diff --git a/drivers/gpu/drm/clients/drm_client_setup.c b/drivers/gpu/drm/clients/drm_client_setup.c index aec2fab6d2bf..72480db1f00d 100644 --- a/drivers/gpu/drm/clients/drm_client_setup.c +++ b/drivers/gpu/drm/clients/drm_client_setup.c @@ -4,6 +4,7 @@ #include <drm/clients/drm_client_setup.h> #include <drm/drm_device.h> +#include <drm/drm_drv.h> #include <drm/drm_fourcc.h> #include <drm/drm_print.h> @@ -33,6 +34,10 @@ MODULE_PARM_DESC(active, */ void drm_client_setup(struct drm_device *dev, const struct drm_format_info *format) { + if (!drm_core_check_feature(dev, DRIVER_MODESET)) { + drm_dbg(dev, "driver does not support mode-setting, skipping DRM clients\n"); + return; + } #ifdef CONFIG_DRM_FBDEV_EMULATION if (!strcmp(drm_client_default, "fbdev")) { diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c index 6cdb432dbc30..5eb7e9bfe361 100644 --- a/drivers/gpu/drm/display/drm_bridge_connector.c +++ b/drivers/gpu/drm/display/drm_bridge_connector.c @@ -210,7 +210,7 @@ drm_bridge_connector_detect(struct drm_connector *connector, bool force) enum drm_connector_status status; if (detect) { - status = detect->funcs->detect(detect); + status = detect->funcs->detect(detect, connector); if (hdmi) drm_atomic_helper_connector_hdmi_hotplug(connector, status); @@ -463,7 +463,7 @@ static int drm_bridge_connector_audio_startup(struct drm_connector *connector) if (!bridge->funcs->hdmi_audio_startup) return 0; - return bridge->funcs->hdmi_audio_startup(connector, bridge); + return bridge->funcs->hdmi_audio_startup(bridge, connector); } if (bridge_connector->bridge_dp_audio) { @@ -472,7 +472,7 @@ static int drm_bridge_connector_audio_startup(struct drm_connector *connector) if (!bridge->funcs->dp_audio_startup) return 0; - return bridge->funcs->dp_audio_startup(connector, bridge); + return bridge->funcs->dp_audio_startup(bridge, connector); } return -EINVAL; @@ -489,13 +489,13 @@ static int drm_bridge_connector_audio_prepare(struct drm_connector *connector, if (bridge_connector->bridge_hdmi_audio) { bridge = bridge_connector->bridge_hdmi_audio; - return bridge->funcs->hdmi_audio_prepare(connector, bridge, fmt, hparms); + return bridge->funcs->hdmi_audio_prepare(bridge, connector, fmt, hparms); } if (bridge_connector->bridge_dp_audio) { bridge = bridge_connector->bridge_dp_audio; - return bridge->funcs->dp_audio_prepare(connector, bridge, fmt, hparms); + return bridge->funcs->dp_audio_prepare(bridge, connector, fmt, hparms); } return -EINVAL; @@ -509,12 +509,12 @@ static void drm_bridge_connector_audio_shutdown(struct drm_connector *connector) if (bridge_connector->bridge_hdmi_audio) { bridge = bridge_connector->bridge_hdmi_audio; - bridge->funcs->hdmi_audio_shutdown(connector, bridge); + bridge->funcs->hdmi_audio_shutdown(bridge, connector); } if (bridge_connector->bridge_dp_audio) { bridge = bridge_connector->bridge_dp_audio; - bridge->funcs->dp_audio_shutdown(connector, bridge); + bridge->funcs->dp_audio_shutdown(bridge, connector); } } @@ -531,7 +531,7 @@ static int drm_bridge_connector_audio_mute_stream(struct drm_connector *connecto if (!bridge->funcs->hdmi_audio_mute_stream) return -ENOTSUPP; - return bridge->funcs->hdmi_audio_mute_stream(connector, bridge, + return bridge->funcs->hdmi_audio_mute_stream(bridge, connector, enable, direction); } @@ -541,7 +541,7 @@ static int drm_bridge_connector_audio_mute_stream(struct drm_connector *connecto if (!bridge->funcs->dp_audio_mute_stream) return -ENOTSUPP; - return bridge->funcs->dp_audio_mute_stream(connector, bridge, + return bridge->funcs->dp_audio_mute_stream(bridge, connector, enable, direction); } @@ -604,7 +604,7 @@ static int drm_bridge_connector_hdmi_cec_init(struct drm_connector *connector) if (!bridge->funcs->hdmi_cec_init) return 0; - return bridge->funcs->hdmi_cec_init(connector, bridge); + return bridge->funcs->hdmi_cec_init(bridge, connector); } static const struct drm_connector_hdmi_cec_funcs drm_bridge_connector_hdmi_cec_funcs = { @@ -795,11 +795,14 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, if (bridge_connector->bridge_hdmi_audio || bridge_connector->bridge_dp_audio) { struct device *dev; + struct drm_bridge *bridge; if (bridge_connector->bridge_hdmi_audio) - dev = bridge_connector->bridge_hdmi_audio->hdmi_audio_dev; + bridge = bridge_connector->bridge_hdmi_audio; else - dev = bridge_connector->bridge_dp_audio->hdmi_audio_dev; + bridge = bridge_connector->bridge_dp_audio; + + dev = bridge->hdmi_audio_dev; ret = drm_connector_hdmi_audio_init(connector, dev, &drm_bridge_connector_hdmi_audio_funcs, diff --git a/drivers/gpu/drm/display/drm_dp_aux_bus.c b/drivers/gpu/drm/display/drm_dp_aux_bus.c index 7b9afcf48836..2d279e82922f 100644 --- a/drivers/gpu/drm/display/drm_dp_aux_bus.c +++ b/drivers/gpu/drm/display/drm_dp_aux_bus.c @@ -58,7 +58,7 @@ static int dp_aux_ep_probe(struct device *dev) container_of(aux_ep, struct dp_aux_ep_device_with_data, aux_ep); int ret; - ret = dev_pm_domain_attach(dev, true); + ret = dev_pm_domain_attach(dev, PD_FLAG_ATTACH_POWER_ON); if (ret) return dev_err_probe(dev, ret, "Failed to attach to PM Domain\n"); diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c index 385a1bfdb272..1ecc3df7e316 100644 --- a/drivers/gpu/drm/display/drm_dp_helper.c +++ b/drivers/gpu/drm/display/drm_dp_helper.c @@ -742,7 +742,7 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, int ret; if (dpcd_access_needs_probe(aux)) { - ret = drm_dp_dpcd_probe(aux, DP_LANE0_1_STATUS); + ret = drm_dp_dpcd_probe(aux, DP_TRAINING_PATTERN_SET); if (ret < 0) return ret; } @@ -3957,23 +3957,31 @@ EXPORT_SYMBOL(drm_dp_pcon_convert_rgb_to_ycbcr); * Returns: %0 on success, negative error code on failure */ int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl, - u16 level) + u32 level) { int ret; - u8 buf[2] = { 0 }; + unsigned int offset = DP_EDP_BACKLIGHT_BRIGHTNESS_MSB; + u8 buf[3] = { 0 }; /* The panel uses the PWM for controlling brightness levels */ - if (!bl->aux_set) + if (!(bl->aux_set || bl->luminance_set)) return 0; - if (bl->lsb_reg_used) { + if (bl->luminance_set) { + level = level * 1000; + level &= 0xffffff; + buf[0] = (level & 0x0000ff); + buf[1] = (level & 0x00ff00) >> 8; + buf[2] = (level & 0xff0000) >> 16; + offset = DP_EDP_PANEL_TARGET_LUMINANCE_VALUE; + } else if (bl->lsb_reg_used) { buf[0] = (level & 0xff00) >> 8; buf[1] = (level & 0x00ff); } else { buf[0] = level; } - ret = drm_dp_dpcd_write_data(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, sizeof(buf)); + ret = drm_dp_dpcd_write_data(aux, offset, buf, sizeof(buf)); if (ret < 0) { drm_err(aux->drm_dev, "%s: Failed to write aux backlight level: %d\n", @@ -4036,7 +4044,7 @@ drm_edp_backlight_set_enable(struct drm_dp_aux *aux, const struct drm_edp_backli * Returns: %0 on success, negative error code on failure. */ int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl, - const u16 level) + const u32 level) { int ret; u8 dpcd_buf; @@ -4046,6 +4054,9 @@ int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backli else dpcd_buf = DP_EDP_BACKLIGHT_CONTROL_MODE_PWM; + if (bl->luminance_set) + dpcd_buf |= DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE; + if (bl->pwmgen_bit_count) { ret = drm_dp_dpcd_write_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, bl->pwmgen_bit_count); if (ret < 0) @@ -4209,7 +4220,7 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i u8 *current_mode) { int ret; - u8 buf[2]; + u8 buf[3]; u8 mode_reg; ret = drm_dp_dpcd_read_byte(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &mode_reg); @@ -4226,17 +4237,37 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i if (*current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) { int size = 1 + bl->lsb_reg_used; - ret = drm_dp_dpcd_read_data(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, size); - if (ret < 0) { - drm_dbg_kms(aux->drm_dev, "%s: Failed to read backlight level: %d\n", - aux->name, ret); - return ret; - } + if (bl->luminance_set) { + ret = drm_dp_dpcd_read_data(aux, DP_EDP_PANEL_TARGET_LUMINANCE_VALUE, + buf, sizeof(buf)); + if (ret < 0) { + drm_dbg_kms(aux->drm_dev, + "%s: Failed to read backlight level: %d\n", + aux->name, ret); + return ret; + } - if (bl->lsb_reg_used) - return (buf[0] << 8) | buf[1]; - else - return buf[0]; + /* + * Incase luminance is set we want to send the value back in nits but + * since DP_EDP_PANEL_TARGET_LUMINANCE stores values in millinits we + * need to divide by 1000. + */ + return (buf[0] | buf[1] << 8 | buf[2] << 16) / 1000; + } else { + ret = drm_dp_dpcd_read_data(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, + buf, size); + if (ret < 0) { + drm_dbg_kms(aux->drm_dev, + "%s: Failed to read backlight level: %d\n", + aux->name, ret); + return ret; + } + + if (bl->lsb_reg_used) + return (buf[0] << 8) | buf[1]; + else + return buf[0]; + } } /* @@ -4251,10 +4282,12 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i * interface. * @aux: The DP aux device to use for probing * @bl: The &drm_edp_backlight_info struct to fill out with information on the backlight + * @max_luminance: max luminance when need luminance is set as true * @driver_pwm_freq_hz: Optional PWM frequency from the driver in hz * @edp_dpcd: A cached copy of the eDP DPCD * @current_level: Where to store the probed brightness level, if any * @current_mode: Where to store the currently set backlight control mode + * @need_luminance: Tells us if a we want to manipulate backlight using luminance values * * Initializes a &drm_edp_backlight_info struct by probing @aux for it's backlight capabilities, * along with also probing the current and maximum supported brightness levels. @@ -4266,8 +4299,9 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i */ int drm_edp_backlight_init(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl, + u32 max_luminance, u16 driver_pwm_freq_hz, const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE], - u16 *current_level, u8 *current_mode) + u32 *current_level, u8 *current_mode, bool need_luminance) { int ret; @@ -4277,18 +4311,26 @@ drm_edp_backlight_init(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl bl->aux_set = true; if (edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) bl->lsb_reg_used = true; + if ((edp_dpcd[0] & DP_EDP_15) && edp_dpcd[3] & + (DP_EDP_PANEL_LUMINANCE_CONTROL_CAPABLE) && need_luminance) + bl->luminance_set = true; /* Sanity check caps */ - if (!bl->aux_set && !(edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP)) { + if (!bl->aux_set && !(edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP) && + !bl->luminance_set) { drm_dbg_kms(aux->drm_dev, - "%s: Panel supports neither AUX or PWM brightness control? Aborting\n", + "%s: Panel does not support AUX, PWM or luminance-based brightness control. Aborting\n", aux->name); return -EINVAL; } - ret = drm_edp_backlight_probe_max(aux, bl, driver_pwm_freq_hz, edp_dpcd); - if (ret < 0) - return ret; + if (bl->luminance_set) { + bl->max = max_luminance; + } else { + ret = drm_edp_backlight_probe_max(aux, bl, driver_pwm_freq_hz, edp_dpcd); + if (ret < 0) + return ret; + } ret = drm_edp_backlight_probe_state(aux, bl, current_mode); if (ret < 0) @@ -4367,7 +4409,7 @@ int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux) { struct dp_aux_backlight *bl; struct backlight_properties props = { 0 }; - u16 current_level; + u32 current_level; u8 current_mode; u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]; int ret; @@ -4391,8 +4433,8 @@ int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux) bl->aux = aux; - ret = drm_edp_backlight_init(aux, &bl->info, 0, edp_dpcd, - ¤t_level, ¤t_mode); + ret = drm_edp_backlight_init(aux, &bl->info, 0, 0, edp_dpcd, + ¤t_level, ¤t_mode, false); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/display/drm_dp_tunnel.c b/drivers/gpu/drm/display/drm_dp_tunnel.c index 8a4ef5438f35..43f13a7c79b9 100644 --- a/drivers/gpu/drm/display/drm_dp_tunnel.c +++ b/drivers/gpu/drm/display/drm_dp_tunnel.c @@ -1921,7 +1921,7 @@ drm_dp_tunnel_mgr_create(struct drm_device *dev, int max_group_count) } #ifdef CONFIG_DRM_DISPLAY_DP_TUNNEL_STATE_DEBUG - ref_tracker_dir_init(&mgr->ref_tracker, 16, "dptun"); + ref_tracker_dir_init(&mgr->ref_tracker, 16, "drm_dptun"); #endif for (i = 0; i < max_group_count; i++) { diff --git a/drivers/gpu/drm/display/drm_hdmi_cec_helper.c b/drivers/gpu/drm/display/drm_hdmi_cec_helper.c index b4273c3522fa..3651ad0f76e0 100644 --- a/drivers/gpu/drm/display/drm_hdmi_cec_helper.c +++ b/drivers/gpu/drm/display/drm_hdmi_cec_helper.c @@ -69,7 +69,7 @@ static void drm_connector_hdmi_cec_adapter_unregister(struct drm_device *dev, vo struct drm_connector *connector = res; struct drm_connector_hdmi_cec_data *data = connector->cec.data; - cec_delete_adapter(data->adapter); + cec_unregister_adapter(data->adapter); if (data->funcs->uninit) data->funcs->uninit(connector); diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index d6ce7b4c019f..4bde00083047 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -295,6 +295,11 @@ EXPORT_SYMBOL(__devm_drm_bridge_alloc); */ void drm_bridge_add(struct drm_bridge *bridge) { + if (!bridge->container) + DRM_WARN("DRM bridge corrupted or not allocated by devm_drm_bridge_alloc()\n"); + + drm_bridge_get(bridge); + mutex_init(&bridge->hpd_mutex); if (bridge->ops & DRM_BRIDGE_OP_HDMI) @@ -342,6 +347,8 @@ void drm_bridge_remove(struct drm_bridge *bridge) mutex_unlock(&bridge_lock); mutex_destroy(&bridge->hpd_mutex); + + drm_bridge_put(bridge); } EXPORT_SYMBOL(drm_bridge_remove); @@ -407,11 +414,17 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge, if (!encoder || !bridge) return -EINVAL; - if (previous && (!previous->dev || previous->encoder != encoder)) - return -EINVAL; + drm_bridge_get(bridge); - if (bridge->dev) - return -EBUSY; + if (previous && (!previous->dev || previous->encoder != encoder)) { + ret = -EINVAL; + goto err_put_bridge; + } + + if (bridge->dev) { + ret = -EBUSY; + goto err_put_bridge; + } bridge->dev = encoder->dev; bridge->encoder = encoder; @@ -460,6 +473,8 @@ err_reset_bridge: "failed to attach bridge %pOF to encoder %s\n", bridge->of_node, encoder->name); +err_put_bridge: + drm_bridge_put(bridge); return ret; } EXPORT_SYMBOL(drm_bridge_attach); @@ -480,6 +495,7 @@ void drm_bridge_detach(struct drm_bridge *bridge) list_del(&bridge->chain_node); bridge->dev = NULL; + drm_bridge_put(bridge); } /** @@ -1211,6 +1227,7 @@ EXPORT_SYMBOL(drm_atomic_bridge_chain_check); /** * drm_bridge_detect - check if anything is attached to the bridge output * @bridge: bridge control structure + * @connector: attached connector * * If the bridge supports output detection, as reported by the * DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the @@ -1221,12 +1238,13 @@ EXPORT_SYMBOL(drm_atomic_bridge_chain_check); * The detection status on success, or connector_status_unknown if the bridge * doesn't support output detection. */ -enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge) +enum drm_connector_status +drm_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) { if (!(bridge->ops & DRM_BRIDGE_OP_DETECT)) return connector_status_unknown; - return bridge->funcs->detect(bridge); + return bridge->funcs->detect(bridge, connector); } EXPORT_SYMBOL_GPL(drm_bridge_detect); diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c index a1e652b7631d..a94061f373de 100644 --- a/drivers/gpu/drm/drm_buddy.c +++ b/drivers/gpu/drm/drm_buddy.c @@ -406,6 +406,49 @@ drm_get_buddy(struct drm_buddy_block *block) EXPORT_SYMBOL(drm_get_buddy); /** + * drm_buddy_reset_clear - reset blocks clear state + * + * @mm: DRM buddy manager + * @is_clear: blocks clear state + * + * Reset the clear state based on @is_clear value for each block + * in the freelist. + */ +void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear) +{ + u64 root_size, size, start; + unsigned int order; + int i; + + size = mm->size; + for (i = 0; i < mm->n_roots; ++i) { + order = ilog2(size) - ilog2(mm->chunk_size); + start = drm_buddy_block_offset(mm->roots[i]); + __force_merge(mm, start, start + size, order); + + root_size = mm->chunk_size << order; + size -= root_size; + } + + for (i = 0; i <= mm->max_order; ++i) { + struct drm_buddy_block *block; + + list_for_each_entry_reverse(block, &mm->free_list[i], link) { + if (is_clear != drm_buddy_block_is_clear(block)) { + if (is_clear) { + mark_cleared(block); + mm->clear_avail += drm_buddy_block_size(mm, block); + } else { + clear_reset(block); + mm->clear_avail -= drm_buddy_block_size(mm, block); + } + } + } + } +} +EXPORT_SYMBOL(drm_buddy_reset_clear); + +/** * drm_buddy_free_block - free a block * * @mm: DRM buddy manager diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c index 7051c9c909c2..ea1d2d5d2c66 100644 --- a/drivers/gpu/drm/drm_cache.c +++ b/drivers/gpu/drm/drm_cache.c @@ -93,8 +93,7 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages) return; } - if (wbinvd_on_all_cpus()) - pr_err("Timed out waiting for cache flush\n"); + wbinvd_on_all_cpus(); #elif defined(__powerpc__) unsigned long i; @@ -139,8 +138,7 @@ drm_clflush_sg(struct sg_table *st) return; } - if (wbinvd_on_all_cpus()) - pr_err("Timed out waiting for cache flush\n"); + wbinvd_on_all_cpus(); #else WARN_ONCE(1, "Architecture has no drm_cache.c support\n"); #endif @@ -172,8 +170,7 @@ drm_clflush_virt_range(void *addr, unsigned long length) return; } - if (wbinvd_on_all_cpus()) - pr_err("Timed out waiting for cache flush\n"); + wbinvd_on_all_cpus(); #else WARN_ONCE(1, "Architecture has no drm_cache.c support\n"); #endif diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c index abceb28b23fc..365cf337529f 100644 --- a/drivers/gpu/drm/drm_debugfs.c +++ b/drivers/gpu/drm/drm_debugfs.c @@ -44,6 +44,9 @@ #include "drm_crtc_internal.h" #include "drm_internal.h" +static struct dentry *accel_debugfs_root; +static struct dentry *drm_debugfs_root; + /*************************************************** * Initialization, etc. **************************************************/ @@ -287,16 +290,120 @@ int drm_debugfs_remove_files(const struct drm_info_list *files, int count, } EXPORT_SYMBOL(drm_debugfs_remove_files); +void drm_debugfs_bridge_params(void) +{ + drm_bridge_debugfs_params(drm_debugfs_root); +} + +void drm_debugfs_init_root(void) +{ + drm_debugfs_root = debugfs_create_dir("dri", NULL); +#if IS_ENABLED(CONFIG_DRM_ACCEL) + accel_debugfs_root = debugfs_create_dir("accel", NULL); +#endif +} + +void drm_debugfs_remove_root(void) +{ +#if IS_ENABLED(CONFIG_DRM_ACCEL) + debugfs_remove(accel_debugfs_root); +#endif + debugfs_remove(drm_debugfs_root); +} + +static int drm_debugfs_proc_info_show(struct seq_file *m, void *unused) +{ + struct pid *pid; + struct task_struct *task; + struct drm_file *file = m->private; + + if (!file) + return -EINVAL; + + rcu_read_lock(); + pid = rcu_dereference(file->pid); + task = pid_task(pid, PIDTYPE_TGID); + + seq_printf(m, "pid: %d\n", task ? task->pid : 0); + seq_printf(m, "comm: %s\n", task ? task->comm : "Unset"); + rcu_read_unlock(); + return 0; +} + +static int drm_debufs_proc_info_open(struct inode *inode, struct file *file) +{ + return single_open(file, drm_debugfs_proc_info_show, inode->i_private); +} + +static const struct file_operations drm_debugfs_proc_info_fops = { + .owner = THIS_MODULE, + .open = drm_debufs_proc_info_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/** + * drm_debugfs_clients_add - Add a per client debugfs directory + * @file: drm_file for a client + * + * Create the debugfs directory for each client. This will be used to populate + * driver specific data for each client. + * + * Also add the process information debugfs file for each client to tag + * which client belongs to which process. + */ +void drm_debugfs_clients_add(struct drm_file *file) +{ + char *client; + + client = kasprintf(GFP_KERNEL, "client-%llu", file->client_id); + if (!client) + return; + + /* Create a debugfs directory for the client in root on drm debugfs */ + file->debugfs_client = debugfs_create_dir(client, drm_debugfs_root); + kfree(client); + + debugfs_create_file("proc_info", 0444, file->debugfs_client, file, + &drm_debugfs_proc_info_fops); + + client = kasprintf(GFP_KERNEL, "../%s", file->minor->dev->unique); + if (!client) + return; + + /* Create a link from client_id to the drm device this client id belongs to */ + debugfs_create_symlink("device", file->debugfs_client, client); + kfree(client); +} + +/** + * drm_debugfs_clients_remove - removes all debugfs directories and files + * @file: drm_file for a client + * + * Removes the debugfs directories recursively from the client directory. + * + * There is also a possibility that debugfs files are open while the drm_file + * is released. + */ +void drm_debugfs_clients_remove(struct drm_file *file) +{ + debugfs_remove_recursive(file->debugfs_client); + file->debugfs_client = NULL; +} + /** * drm_debugfs_dev_init - create debugfs directory for the device * @dev: the device which we want to create the directory for - * @root: the parent directory depending on the device type * * Creates the debugfs directory for the device under the given root directory. */ -void drm_debugfs_dev_init(struct drm_device *dev, struct dentry *root) +void drm_debugfs_dev_init(struct drm_device *dev) { - dev->debugfs_root = debugfs_create_dir(dev->unique, root); + if (drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL)) + dev->debugfs_root = debugfs_create_dir(dev->unique, accel_debugfs_root); + else + dev->debugfs_root = debugfs_create_dir(dev->unique, drm_debugfs_root); } /** @@ -323,14 +430,13 @@ void drm_debugfs_dev_register(struct drm_device *dev) drm_atomic_debugfs_init(dev); } -int drm_debugfs_register(struct drm_minor *minor, int minor_id, - struct dentry *root) +int drm_debugfs_register(struct drm_minor *minor, int minor_id) { struct drm_device *dev = minor->dev; char name[64]; sprintf(name, "%d", minor_id); - minor->debugfs_symlink = debugfs_create_symlink(name, root, + minor->debugfs_symlink = debugfs_create_symlink(name, drm_debugfs_root, dev->unique); /* TODO: Only for compatibility with drivers */ diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 02556363e918..cdd591b11488 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -72,8 +72,6 @@ DEFINE_XARRAY_ALLOC(drm_minors_xa); */ static bool drm_core_init_complete; -static struct dentry *drm_debugfs_root; - DEFINE_STATIC_SRCU(drm_unplug_srcu); /* @@ -186,8 +184,7 @@ static int drm_minor_register(struct drm_device *dev, enum drm_minor_type type) return 0; if (minor->type != DRM_MINOR_ACCEL) { - ret = drm_debugfs_register(minor, minor->index, - drm_debugfs_root); + ret = drm_debugfs_register(minor, minor->index); if (ret) { DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n"); goto err_debugfs; @@ -787,10 +784,7 @@ static int drm_dev_init(struct drm_device *dev, goto err; } - if (drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL)) - accel_debugfs_init(dev); - else - drm_debugfs_dev_init(dev, drm_debugfs_root); + drm_debugfs_dev_init(dev); return 0; @@ -1230,7 +1224,7 @@ static void drm_core_exit(void) drm_panic_exit(); accel_core_exit(); unregister_chrdev(DRM_MAJOR, "drm"); - debugfs_remove(drm_debugfs_root); + drm_debugfs_remove_root(); drm_sysfs_destroy(); WARN_ON(!xa_empty(&drm_minors_xa)); drm_connector_ida_destroy(); @@ -1249,8 +1243,8 @@ static int __init drm_core_init(void) goto error; } - drm_debugfs_root = debugfs_create_dir("dri", NULL); - drm_bridge_debugfs_params(drm_debugfs_root); + drm_debugfs_init_root(); + drm_debugfs_bridge_params(); ret = register_chrdev(DRM_MAJOR, "drm", &drm_stub_fops); if (ret < 0) diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index 3952e27447ee..eebd1a05ee97 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -46,6 +46,7 @@ #include <drm/drm_file.h> #include <drm/drm_gem.h> #include <drm/drm_print.h> +#include <drm/drm_debugfs.h> #include "drm_crtc_internal.h" #include "drm_internal.h" @@ -168,6 +169,9 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor) drm_prime_init_file_private(&file->prime); + if (!drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL)) + drm_debugfs_clients_add(file); + if (dev->driver->open) { ret = dev->driver->open(dev, file); if (ret < 0) @@ -182,6 +186,10 @@ out_prime_destroy: drm_syncobj_release(file); if (drm_core_check_feature(dev, DRIVER_GEM)) drm_gem_release(dev, file); + + if (!drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL)) + drm_debugfs_clients_remove(file); + put_pid(rcu_access_pointer(file->pid)); kfree(file); @@ -236,6 +244,9 @@ void drm_file_free(struct drm_file *file) (long)old_encode_dev(file->minor->kdev->devt), atomic_read(&dev->open_count)); + if (!drm_core_check_feature(dev, DRIVER_COMPUTE_ACCEL)) + drm_debugfs_clients_remove(file); + drm_events_release(file); if (drm_core_check_feature(dev, DRIVER_MODESET)) { diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c index 99d9f7bbc261..8f3daf38ca63 100644 --- a/drivers/gpu/drm/drm_format_helper.c +++ b/drivers/gpu/drm/drm_format_helper.c @@ -559,18 +559,6 @@ static void drm_fb_xrgb8888_to_rgb565_line(void *dbuf, const void *sbuf, unsigne drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgb565); } -static __always_inline u32 drm_xrgb8888_to_rgb565_swab(u32 pix) -{ - return swab16(drm_pixel_xrgb8888_to_rgb565(pix)); -} - -/* TODO: implement this helper as conversion to RGB565|BIG_ENDIAN */ -static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf, - unsigned int pixels) -{ - drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_xrgb8888_to_rgb565_swab); -} - /** * drm_fb_xrgb8888_to_rgb565 - Convert XRGB8888 to RGB565 clip buffer * @dst: Array of RGB565 destination buffers @@ -580,7 +568,6 @@ static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf, * @fb: DRM framebuffer * @clip: Clip rectangle area to copy * @state: Transform and conversion state - * @swab: Swap bytes * * This function copies parts of a framebuffer to display memory and converts the * color format during the process. Destination and framebuffer formats must match. The @@ -595,23 +582,56 @@ static void drm_fb_xrgb8888_to_rgb565_swab_line(void *dbuf, const void *sbuf, */ void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pitch, const struct iosys_map *src, const struct drm_framebuffer *fb, - const struct drm_rect *clip, struct drm_format_conv_state *state, - bool swab) + const struct drm_rect *clip, struct drm_format_conv_state *state) { static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = { 2, }; - void (*xfrm_line)(void *dbuf, const void *sbuf, unsigned int npixels); + drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, + drm_fb_xrgb8888_to_rgb565_line); +} +EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565); + +static void drm_fb_xrgb8888_to_rgb565be_line(void *dbuf, const void *sbuf, + unsigned int pixels) +{ + drm_fb_xfrm_line_32to16(dbuf, sbuf, pixels, drm_pixel_xrgb8888_to_rgb565be); +} - if (swab) - xfrm_line = drm_fb_xrgb8888_to_rgb565_swab_line; - else - xfrm_line = drm_fb_xrgb8888_to_rgb565_line; +/** + * drm_fb_xrgb8888_to_rgb565be - Convert XRGB8888 to RGB565|DRM_FORMAT_BIG_ENDIAN clip buffer + * @dst: Array of RGB565BE destination buffers + * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines + * within @dst; can be NULL if scanlines are stored next to each other. + * @src: Array of XRGB8888 source buffer + * @fb: DRM framebuffer + * @clip: Clip rectangle area to copy + * @state: Transform and conversion state + * + * This function copies parts of a framebuffer to display memory and converts the + * color format during the process. Destination and framebuffer formats must match. The + * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at + * least as many entries as there are planes in @fb's format. Each entry stores the + * value for the format's respective color plane at the same index. + * + * This function does not apply clipping on @dst (i.e. the destination is at the + * top-left corner). + * + * Drivers can use this function for RGB565BE devices that don't support XRGB8888 natively. + */ +void drm_fb_xrgb8888_to_rgb565be(struct iosys_map *dst, const unsigned int *dst_pitch, + const struct iosys_map *src, const struct drm_framebuffer *fb, + const struct drm_rect *clip, struct drm_format_conv_state *state) +{ + static const u8 dst_pixsize[DRM_FORMAT_MAX_PLANES] = { + 2, + }; - drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, xfrm_line); + drm_fb_xfrm(dst, dst_pitch, dst_pixsize, src, fb, clip, false, state, + drm_fb_xrgb8888_to_rgb565be_line); } -EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565); +EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb565be); static void drm_fb_xrgb8888_to_xrgb1555_line(void *dbuf, const void *sbuf, unsigned int pixels) { @@ -1188,7 +1208,7 @@ int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t d return 0; } else if (fb_format == DRM_FORMAT_XRGB8888) { if (dst_format == DRM_FORMAT_RGB565) { - drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, state, false); + drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, state); return 0; } else if (dst_format == DRM_FORMAT_XRGB1555) { drm_fb_xrgb8888_to_xrgb1555(dst, dst_pitch, src, fb, clip, state); diff --git a/drivers/gpu/drm/drm_format_internal.h b/drivers/gpu/drm/drm_format_internal.h index 9428d5cfebc5..ce29dd05bcc5 100644 --- a/drivers/gpu/drm/drm_format_internal.h +++ b/drivers/gpu/drm/drm_format_internal.h @@ -5,6 +5,7 @@ #include <linux/bits.h> #include <linux/types.h> +#include <linux/swab.h> /* * Each pixel-format conversion helper takes a raw pixel in a @@ -59,6 +60,11 @@ static inline u32 drm_pixel_xrgb8888_to_rgb565(u32 pix) ((pix & 0x000000f8) >> 3); } +static inline u32 drm_pixel_xrgb8888_to_rgb565be(u32 pix) +{ + return swab16(drm_pixel_xrgb8888_to_rgb565(pix)); +} + static inline u32 drm_pixel_xrgb8888_to_rgbx5551(u32 pix) { return ((pix & 0x00f80000) >> 8) | diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c index 55ddd99fd7f6..e0d533611040 100644 --- a/drivers/gpu/drm/drm_fourcc.c +++ b/drivers/gpu/drm/drm_fourcc.c @@ -238,6 +238,14 @@ const struct drm_format_info *__drm_format_info(u32 format) { .format = DRM_FORMAT_ABGR2101010, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_RGBA1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_BGRA1010102, .depth = 30, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, + { .format = DRM_FORMAT_RGB161616, .depth = 0, + .num_planes = 1, .char_per_block = { 6, 0, 0 }, + .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 }, + .hsub = 1, .vsub = 1, .has_alpha = false }, + { .format = DRM_FORMAT_BGR161616, .depth = 0, + .num_planes = 1, .char_per_block = { 6, 0, 0 }, + .block_w = { 1, 0, 0 }, .block_h = { 1, 0, 0 }, + .hsub = 1, .vsub = 1, .has_alpha = false }, { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, { .format = DRM_FORMAT_RGBA8888, .depth = 32, .num_planes = 1, .cpp = { 4, 0, 0 }, .hsub = 1, .vsub = 1, .has_alpha = true }, @@ -409,7 +417,8 @@ EXPORT_SYMBOL(drm_format_info); /** * drm_get_format_info - query information for a given framebuffer configuration * @dev: DRM device - * @mode_cmd: metadata from the userspace fb creation request + * @pixel_format: pixel format (DRM_FORMAT_*) + * @modifier: modifier * * Returns: * The instance of struct drm_format_info that describes the pixel format, or @@ -417,15 +426,16 @@ EXPORT_SYMBOL(drm_format_info); */ const struct drm_format_info * drm_get_format_info(struct drm_device *dev, - const struct drm_mode_fb_cmd2 *mode_cmd) + u32 pixel_format, u64 modifier) { const struct drm_format_info *info = NULL; if (dev->mode_config.funcs->get_format_info) - info = dev->mode_config.funcs->get_format_info(mode_cmd); + info = dev->mode_config.funcs->get_format_info(pixel_format, + modifier); if (!info) - info = drm_format_info(mode_cmd->pixel_format); + info = drm_format_info(pixel_format); return info; } diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index b781601946db..adbb73f00d68 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -153,18 +153,11 @@ int drm_mode_addfb_ioctl(struct drm_device *dev, } static int framebuffer_check(struct drm_device *dev, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *r) { - const struct drm_format_info *info; int i; - /* check if the format is supported at all */ - if (!__drm_format_info(r->pixel_format)) { - drm_dbg_kms(dev, "bad framebuffer format %p4cc\n", - &r->pixel_format); - return -EINVAL; - } - if (r->width == 0) { drm_dbg_kms(dev, "bad framebuffer width %u\n", r->width); return -EINVAL; @@ -175,9 +168,6 @@ static int framebuffer_check(struct drm_device *dev, return -EINVAL; } - /* now let the driver pick its own format info */ - info = drm_get_format_info(dev, r); - for (i = 0; i < info->num_planes; i++) { unsigned int width = drm_format_info_plane_width(info, r->width, i); unsigned int height = drm_format_info_plane_height(info, r->height, i); @@ -272,6 +262,7 @@ drm_internal_framebuffer_create(struct drm_device *dev, struct drm_file *file_priv) { struct drm_mode_config *config = &dev->mode_config; + const struct drm_format_info *info; struct drm_framebuffer *fb; int ret; @@ -297,11 +288,21 @@ drm_internal_framebuffer_create(struct drm_device *dev, return ERR_PTR(-EINVAL); } - ret = framebuffer_check(dev, r); + /* check if the format is supported at all */ + if (!__drm_format_info(r->pixel_format)) { + drm_dbg_kms(dev, "bad framebuffer format %p4cc\n", + &r->pixel_format); + return ERR_PTR(-EINVAL); + } + + /* now let the driver pick its own format info */ + info = drm_get_format_info(dev, r->pixel_format, r->modifier[0]); + + ret = framebuffer_check(dev, info, r); if (ret) return ERR_PTR(ret); - fb = dev->mode_config.funcs->fb_create(dev, file_priv, r); + fb = dev->mode_config.funcs->fb_create(dev, file_priv, info, r); if (IS_ERR(fb)) { drm_dbg_kms(dev, "could not create framebuffer\n"); return fb; @@ -862,11 +863,23 @@ EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_framebuffer_free); int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, const struct drm_framebuffer_funcs *funcs) { + unsigned int i; int ret; + bool exists; if (WARN_ON_ONCE(fb->dev != dev || !fb->format)) return -EINVAL; + for (i = 0; i < fb->format->num_planes; i++) { + if (drm_WARN_ON_ONCE(dev, fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i))) + fb->internal_flags &= ~DRM_FRAMEBUFFER_HAS_HANDLE_REF(i); + if (fb->obj[i]) { + exists = drm_gem_object_handle_get_if_exists_unlocked(fb->obj[i]); + if (exists) + fb->internal_flags |= DRM_FRAMEBUFFER_HAS_HANDLE_REF(i); + } + } + INIT_LIST_HEAD(&fb->filp_head); fb->funcs = funcs; @@ -875,7 +888,7 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, ret = __drm_mode_object_add(dev, &fb->base, DRM_MODE_OBJECT_FB, false, drm_framebuffer_free); if (ret) - goto out; + goto err; mutex_lock(&dev->mode_config.fb_lock); dev->mode_config.num_fb++; @@ -883,7 +896,16 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, mutex_unlock(&dev->mode_config.fb_lock); drm_mode_object_register(dev, &fb->base); -out: + + return 0; + +err: + for (i = 0; i < fb->format->num_planes; i++) { + if (fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i)) { + drm_gem_object_handle_put_unlocked(fb->obj[i]); + fb->internal_flags &= ~DRM_FRAMEBUFFER_HAS_HANDLE_REF(i); + } + } return ret; } EXPORT_SYMBOL(drm_framebuffer_init); @@ -960,6 +982,12 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private); void drm_framebuffer_cleanup(struct drm_framebuffer *fb) { struct drm_device *dev = fb->dev; + unsigned int i; + + for (i = 0; i < fb->format->num_planes; i++) { + if (fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i)) + drm_gem_object_handle_put_unlocked(fb->obj[i]); + } mutex_lock(&dev->mode_config.fb_lock); list_del(&fb->head); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 0905ef6786e9..6a44351e58b7 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -213,6 +213,46 @@ void drm_gem_private_object_fini(struct drm_gem_object *obj) } EXPORT_SYMBOL(drm_gem_private_object_fini); +static void drm_gem_object_handle_get(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + + drm_WARN_ON(dev, !mutex_is_locked(&dev->object_name_lock)); + + if (obj->handle_count++ == 0) + drm_gem_object_get(obj); +} + +/** + * drm_gem_object_handle_get_if_exists_unlocked - acquire reference on user-space handle, if any + * @obj: GEM object + * + * Acquires a reference on the GEM buffer object's handle. Required to keep + * the GEM object alive. Call drm_gem_object_handle_put_if_exists_unlocked() + * to release the reference. Does nothing if the buffer object has no handle. + * + * Returns: + * True if a handle exists, or false otherwise + */ +bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + + guard(mutex)(&dev->object_name_lock); + + /* + * First ref taken during GEM object creation, if any. Some + * drivers set up internal framebuffers with GEM objects that + * do not have a GEM handle. Hence, this counter can be zero. + */ + if (!obj->handle_count) + return false; + + drm_gem_object_handle_get(obj); + + return true; +} + /** * drm_gem_object_handle_free - release resources bound to userspace handles * @obj: GEM object to clean up. @@ -243,20 +283,26 @@ static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) } } -static void -drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) +/** + * drm_gem_object_handle_put_unlocked - releases reference on user-space handle + * @obj: GEM object + * + * Releases a reference on the GEM buffer object's handle. Possibly releases + * the GEM buffer object and associated dma-buf objects. + */ +void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; bool final = false; - if (WARN_ON(READ_ONCE(obj->handle_count) == 0)) + if (drm_WARN_ON(dev, READ_ONCE(obj->handle_count) == 0)) return; /* - * Must bump handle count first as this may be the last - * ref, in which case the object would disappear before we - * checked for a name - */ + * Must bump handle count first as this may be the last + * ref, in which case the object would disappear before + * we checked for a name. + */ mutex_lock(&dev->object_name_lock); if (--obj->handle_count == 0) { @@ -280,6 +326,9 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) struct drm_file *file_priv = data; struct drm_gem_object *obj = ptr; + if (drm_WARN_ON(obj->dev, !data)) + return 0; + if (obj->funcs->close) obj->funcs->close(obj, file_priv); @@ -390,8 +439,8 @@ drm_gem_handle_create_tail(struct drm_file *file_priv, int ret; WARN_ON(!mutex_is_locked(&dev->object_name_lock)); - if (obj->handle_count++ == 0) - drm_gem_object_get(obj); + + drm_gem_object_handle_get(obj); /* * Get the user-visible handle using idr. Preload and perform @@ -400,7 +449,7 @@ drm_gem_handle_create_tail(struct drm_file *file_priv, idr_preload(GFP_KERNEL); spin_lock(&file_priv->table_lock); - ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); + ret = idr_alloc(&file_priv->object_idr, NULL, 1, 0, GFP_NOWAIT); spin_unlock(&file_priv->table_lock); idr_preload_end(); @@ -421,6 +470,11 @@ drm_gem_handle_create_tail(struct drm_file *file_priv, goto err_revoke; } + /* mirrors drm_gem_handle_delete to avoid races */ + spin_lock(&file_priv->table_lock); + obj = idr_replace(&file_priv->object_idr, obj, handle); + WARN_ON(obj != NULL); + spin_unlock(&file_priv->table_lock); *handlep = handle; return 0; diff --git a/drivers/gpu/drm/drm_gem_dma_helper.c b/drivers/gpu/drm/drm_gem_dma_helper.c index b7f033d4352a..4f0320df858f 100644 --- a/drivers/gpu/drm/drm_gem_dma_helper.c +++ b/drivers/gpu/drm/drm_gem_dma_helper.c @@ -230,7 +230,7 @@ void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj) if (drm_gem_is_imported(gem_obj)) { if (dma_obj->vaddr) - dma_buf_vunmap_unlocked(gem_obj->dma_buf, &map); + dma_buf_vunmap_unlocked(gem_obj->import_attach->dmabuf, &map); drm_prime_gem_destroy(gem_obj, dma_obj->sgt); } else if (dma_obj->vaddr) { if (dma_obj->map_noncoherent) diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c index 618ce725cd75..4bc89d33df59 100644 --- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c +++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c @@ -68,6 +68,7 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_get_obj); static int drm_gem_fb_init(struct drm_device *dev, struct drm_framebuffer *fb, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **obj, unsigned int num_planes, const struct drm_framebuffer_funcs *funcs) @@ -75,7 +76,7 @@ drm_gem_fb_init(struct drm_device *dev, unsigned int i; int ret; - drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd); for (i = 0; i < num_planes; i++) fb->obj[i] = obj[i]; @@ -136,6 +137,7 @@ EXPORT_SYMBOL(drm_gem_fb_create_handle); * @dev: DRM device * @fb: framebuffer object * @file: DRM file that holds the GEM handle(s) backing the framebuffer + * @info: pixel format information * @mode_cmd: Metadata from the userspace framebuffer creation request * @funcs: vtable to be used for the new framebuffer object * @@ -152,20 +154,14 @@ EXPORT_SYMBOL(drm_gem_fb_create_handle); int drm_gem_fb_init_with_funcs(struct drm_device *dev, struct drm_framebuffer *fb, struct drm_file *file, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, const struct drm_framebuffer_funcs *funcs) { - const struct drm_format_info *info; struct drm_gem_object *objs[DRM_FORMAT_MAX_PLANES]; unsigned int i; int ret; - info = drm_get_format_info(dev, mode_cmd); - if (!info) { - drm_dbg_kms(dev, "Failed to get FB format info\n"); - return -EINVAL; - } - if (drm_drv_uses_atomic_modeset(dev) && !drm_any_plane_has_format(dev, mode_cmd->pixel_format, mode_cmd->modifier[0])) { @@ -200,7 +196,7 @@ int drm_gem_fb_init_with_funcs(struct drm_device *dev, } } - ret = drm_gem_fb_init(dev, fb, mode_cmd, objs, i, funcs); + ret = drm_gem_fb_init(dev, fb, info, mode_cmd, objs, i, funcs); if (ret) goto err_gem_object_put; @@ -221,6 +217,7 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_init_with_funcs); * callback * @dev: DRM device * @file: DRM file that holds the GEM handle(s) backing the framebuffer + * @info: pixel format information * @mode_cmd: Metadata from the userspace framebuffer creation request * @funcs: vtable to be used for the new framebuffer object * @@ -233,6 +230,7 @@ EXPORT_SYMBOL_GPL(drm_gem_fb_init_with_funcs); */ struct drm_framebuffer * drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, const struct drm_framebuffer_funcs *funcs) { @@ -243,7 +241,7 @@ drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file, if (!fb) return ERR_PTR(-ENOMEM); - ret = drm_gem_fb_init_with_funcs(dev, fb, file, mode_cmd, funcs); + ret = drm_gem_fb_init_with_funcs(dev, fb, file, info, mode_cmd, funcs); if (ret) { kfree(fb); return ERR_PTR(ret); @@ -263,6 +261,7 @@ static const struct drm_framebuffer_funcs drm_gem_fb_funcs = { * &drm_mode_config_funcs.fb_create callback * @dev: DRM device * @file: DRM file that holds the GEM handle(s) backing the framebuffer + * @info: pixel format information * @mode_cmd: Metadata from the userspace framebuffer creation request * * This function creates a new framebuffer object described by @@ -282,9 +281,10 @@ static const struct drm_framebuffer_funcs drm_gem_fb_funcs = { */ struct drm_framebuffer * drm_gem_fb_create(struct drm_device *dev, struct drm_file *file, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { - return drm_gem_fb_create_with_funcs(dev, file, mode_cmd, + return drm_gem_fb_create_with_funcs(dev, file, info, mode_cmd, &drm_gem_fb_funcs); } EXPORT_SYMBOL_GPL(drm_gem_fb_create); @@ -300,6 +300,7 @@ static const struct drm_framebuffer_funcs drm_gem_fb_funcs_dirtyfb = { * &drm_mode_config_funcs.fb_create callback * @dev: DRM device * @file: DRM file that holds the GEM handle(s) backing the framebuffer + * @info: pixel format information * @mode_cmd: Metadata from the userspace framebuffer creation request * * This function creates a new framebuffer object described by @@ -320,9 +321,10 @@ static const struct drm_framebuffer_funcs drm_gem_fb_funcs_dirtyfb = { */ struct drm_framebuffer * drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { - return drm_gem_fb_create_with_funcs(dev, file, mode_cmd, + return drm_gem_fb_create_with_funcs(dev, file, info, mode_cmd, &drm_gem_fb_funcs_dirtyfb); } EXPORT_SYMBOL_GPL(drm_gem_fb_create_with_dirty); @@ -420,6 +422,7 @@ EXPORT_SYMBOL(drm_gem_fb_vunmap); static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir, unsigned int num_planes) { + struct dma_buf_attachment *import_attach; struct drm_gem_object *obj; int ret; @@ -428,9 +431,10 @@ static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_dat obj = drm_gem_fb_get_obj(fb, num_planes); if (!obj) continue; + import_attach = obj->import_attach; if (!drm_gem_is_imported(obj)) continue; - ret = dma_buf_end_cpu_access(obj->dma_buf, dir); + ret = dma_buf_end_cpu_access(import_attach->dmabuf, dir); if (ret) drm_err(fb->dev, "dma_buf_end_cpu_access(%u, %d) failed: %d\n", ret, num_planes, dir); @@ -453,6 +457,7 @@ static void __drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_dat */ int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direction dir) { + struct dma_buf_attachment *import_attach; struct drm_gem_object *obj; unsigned int i; int ret; @@ -463,9 +468,10 @@ int drm_gem_fb_begin_cpu_access(struct drm_framebuffer *fb, enum dma_data_direct ret = -EINVAL; goto err___drm_gem_fb_end_cpu_access; } + import_attach = obj->import_attach; if (!drm_gem_is_imported(obj)) continue; - ret = dma_buf_begin_cpu_access(obj->dma_buf, dir); + ret = dma_buf_begin_cpu_access(import_attach->dmabuf, dir); if (ret) goto err___drm_gem_fb_end_cpu_access; } @@ -498,12 +504,9 @@ EXPORT_SYMBOL(drm_gem_fb_end_cpu_access); // TODO Drop this function and replace by drm_format_info_bpp() once all // DRM_FORMAT_* provide proper block info in drivers/gpu/drm/drm_fourcc.c static __u32 drm_gem_afbc_get_bpp(struct drm_device *dev, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { - const struct drm_format_info *info; - - info = drm_get_format_info(dev, mode_cmd); - switch (info->format) { case DRM_FORMAT_YUV420_8BIT: return 12; @@ -517,6 +520,7 @@ static __u32 drm_gem_afbc_get_bpp(struct drm_device *dev, } static int drm_gem_afbc_min_size(struct drm_device *dev, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_afbc_framebuffer *afbc_fb) { @@ -557,7 +561,7 @@ static int drm_gem_afbc_min_size(struct drm_device *dev, afbc_fb->aligned_height = ALIGN(mode_cmd->height, h_alignment); afbc_fb->offset = mode_cmd->offsets[0]; - bpp = drm_gem_afbc_get_bpp(dev, mode_cmd); + bpp = drm_gem_afbc_get_bpp(dev, info, mode_cmd); if (!bpp) { drm_dbg_kms(dev, "Invalid AFBC bpp value: %d\n", bpp); return -EINVAL; @@ -579,6 +583,7 @@ static int drm_gem_afbc_min_size(struct drm_device *dev, * * @dev: DRM device * @afbc_fb: afbc-specific framebuffer + * @info: pixel format information * @mode_cmd: Metadata from the userspace framebuffer creation request * @afbc_fb: afbc framebuffer * @@ -592,19 +597,16 @@ static int drm_gem_afbc_min_size(struct drm_device *dev, * Zero on success or a negative error value on failure. */ int drm_gem_fb_afbc_init(struct drm_device *dev, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_afbc_framebuffer *afbc_fb) { - const struct drm_format_info *info; struct drm_gem_object **objs; int ret; objs = afbc_fb->base.obj; - info = drm_get_format_info(dev, mode_cmd); - if (!info) - return -EINVAL; - ret = drm_gem_afbc_min_size(dev, mode_cmd, afbc_fb); + ret = drm_gem_afbc_min_size(dev, info, mode_cmd, afbc_fb); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 8ac0b1fa5287..5d1349c34afd 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -351,7 +351,7 @@ int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, dma_resv_assert_held(obj->resv); if (drm_gem_is_imported(obj)) { - ret = dma_buf_vmap(obj->dma_buf, map); + ret = dma_buf_vmap(obj->import_attach->dmabuf, map); } else { pgprot_t prot = PAGE_KERNEL; @@ -413,7 +413,7 @@ void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, dma_resv_assert_held(obj->resv); if (drm_gem_is_imported(obj)) { - dma_buf_vunmap(obj->dma_buf, map); + dma_buf_vunmap(obj->import_attach->dmabuf, map); } else { dma_resv_assert_held(shmem->base.resv); diff --git a/drivers/gpu/drm/drm_gpusvm.c b/drivers/gpu/drm/drm_gpusvm.c index ab198645d90f..5bb4c77db2c3 100644 --- a/drivers/gpu/drm/drm_gpusvm.c +++ b/drivers/gpu/drm/drm_gpusvm.c @@ -9,10 +9,9 @@ #include <linux/dma-mapping.h> #include <linux/export.h> #include <linux/hmm.h> +#include <linux/hugetlb_inline.h> #include <linux/memremap.h> -#include <linux/migrate.h> #include <linux/mm_types.h> -#include <linux/pagemap.h> #include <linux/slab.h> #include <drm/drm_device.h> @@ -109,21 +108,6 @@ */ /** - * DOC: Migration - * - * The migration support is quite simple, allowing migration between RAM and - * device memory at the range granularity. For example, GPU SVM currently does - * not support mixing RAM and device memory pages within a range. This means - * that upon GPU fault, the entire range can be migrated to device memory, and - * upon CPU fault, the entire range is migrated to RAM. Mixed RAM and device - * memory storage within a range could be added in the future if required. - * - * The reasoning for only supporting range granularity is as follows: it - * simplifies the implementation, and range sizes are driver-defined and should - * be relatively small. - */ - -/** * DOC: Partial Unmapping of Ranges * * Partial unmapping of ranges (e.g., 1M out of 2M is unmapped by CPU resulting @@ -192,12 +176,9 @@ * } * * if (driver_migration_policy(range)) { - * mmap_read_lock(mm); - * devmem = driver_alloc_devmem(); - * err = drm_gpusvm_migrate_to_devmem(gpusvm, range, - * devmem_allocation, - * &ctx); - * mmap_read_unlock(mm); + * err = drm_pagemap_populate_mm(driver_choose_drm_pagemap(), + * gpuva_start, gpuva_end, gpusvm->mm, + * ctx->timeslice_ms); * if (err) // CPU mappings may have changed * goto retry; * } @@ -290,97 +271,6 @@ npages_in_range(unsigned long start, unsigned long end) } /** - * struct drm_gpusvm_zdd - GPU SVM zone device data - * - * @refcount: Reference count for the zdd - * @devmem_allocation: device memory allocation - * @device_private_page_owner: Device private pages owner - * - * This structure serves as a generic wrapper installed in - * page->zone_device_data. It provides infrastructure for looking up a device - * memory allocation upon CPU page fault and asynchronously releasing device - * memory once the CPU has no page references. Asynchronous release is useful - * because CPU page references can be dropped in IRQ contexts, while releasing - * device memory likely requires sleeping locks. - */ -struct drm_gpusvm_zdd { - struct kref refcount; - struct drm_gpusvm_devmem *devmem_allocation; - void *device_private_page_owner; -}; - -/** - * drm_gpusvm_zdd_alloc() - Allocate a zdd structure. - * @device_private_page_owner: Device private pages owner - * - * This function allocates and initializes a new zdd structure. It sets up the - * reference count and initializes the destroy work. - * - * Return: Pointer to the allocated zdd on success, ERR_PTR() on failure. - */ -static struct drm_gpusvm_zdd * -drm_gpusvm_zdd_alloc(void *device_private_page_owner) -{ - struct drm_gpusvm_zdd *zdd; - - zdd = kmalloc(sizeof(*zdd), GFP_KERNEL); - if (!zdd) - return NULL; - - kref_init(&zdd->refcount); - zdd->devmem_allocation = NULL; - zdd->device_private_page_owner = device_private_page_owner; - - return zdd; -} - -/** - * drm_gpusvm_zdd_get() - Get a reference to a zdd structure. - * @zdd: Pointer to the zdd structure. - * - * This function increments the reference count of the provided zdd structure. - * - * Return: Pointer to the zdd structure. - */ -static struct drm_gpusvm_zdd *drm_gpusvm_zdd_get(struct drm_gpusvm_zdd *zdd) -{ - kref_get(&zdd->refcount); - return zdd; -} - -/** - * drm_gpusvm_zdd_destroy() - Destroy a zdd structure. - * @ref: Pointer to the reference count structure. - * - * This function queues the destroy_work of the zdd for asynchronous destruction. - */ -static void drm_gpusvm_zdd_destroy(struct kref *ref) -{ - struct drm_gpusvm_zdd *zdd = - container_of(ref, struct drm_gpusvm_zdd, refcount); - struct drm_gpusvm_devmem *devmem = zdd->devmem_allocation; - - if (devmem) { - complete_all(&devmem->detached); - if (devmem->ops->devmem_release) - devmem->ops->devmem_release(devmem); - } - kfree(zdd); -} - -/** - * drm_gpusvm_zdd_put() - Put a zdd reference. - * @zdd: Pointer to the zdd structure. - * - * This function decrements the reference count of the provided zdd structure - * and schedules its destruction if the count drops to zero. - */ -static void drm_gpusvm_zdd_put(struct drm_gpusvm_zdd *zdd) -{ - kref_put(&zdd->refcount, drm_gpusvm_zdd_destroy); -} - -/** * drm_gpusvm_range_find() - Find GPU SVM range from GPU SVM notifier * @notifier: Pointer to the GPU SVM notifier structure. * @start: Start address of the range @@ -946,7 +836,7 @@ retry: * process-many-malloc' fails. In the failure case, each process * mallocs 16k but the CPU VMA is ~128k which results in 64k SVM * ranges. When migrating the SVM ranges, some processes fail in - * drm_gpusvm_migrate_to_devmem with 'migrate.cpages != npages' + * drm_pagemap_migrate_to_devmem with 'migrate.cpages != npages' * and then upon drm_gpusvm_range_get_pages device pages from * other processes are collected + faulted in which creates all * sorts of problems. Unsure exactly how this happening, also @@ -1364,7 +1254,7 @@ int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm, .dev_private_owner = gpusvm->device_private_page_owner, }; struct mm_struct *mm = gpusvm->mm; - struct drm_gpusvm_zdd *zdd; + void *zdd; unsigned long timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); unsigned long i, j; @@ -1447,6 +1337,7 @@ map_pages: } zdd = NULL; + pagemap = NULL; num_dma_mapped = 0; for (i = 0, j = 0; i < npages; ++j) { struct page *page = hmm_pfn_to_page(pfns[i]); @@ -1466,7 +1357,7 @@ map_pages: } pagemap = page_pgmap(page); - dpagemap = zdd->devmem_allocation->dpagemap; + dpagemap = drm_pagemap_page_to_dpagemap(page); if (drm_WARN_ON(gpusvm->drm, !dpagemap)) { /* * Raced. This is not supposed to happen @@ -1490,7 +1381,7 @@ map_pages: } else { dma_addr_t addr; - if (is_zone_device_page(page) || zdd) { + if (is_zone_device_page(page) || pagemap) { err = -EOPNOTSUPP; goto err_unmap; } @@ -1518,7 +1409,7 @@ map_pages: flags.has_dma_mapping = true; } - if (zdd) { + if (pagemap) { flags.has_devmem_pages = true; range->dpagemap = dpagemap; } @@ -1546,6 +1437,7 @@ EXPORT_SYMBOL_GPL(drm_gpusvm_range_get_pages); /** * drm_gpusvm_range_unmap_pages() - Unmap pages associated with a GPU SVM range + * drm_gpusvm_range_evict() - Evict GPU SVM range * @gpusvm: Pointer to the GPU SVM structure * @range: Pointer to the GPU SVM range structure * @ctx: GPU SVM context @@ -1576,562 +1468,11 @@ void drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm, EXPORT_SYMBOL_GPL(drm_gpusvm_range_unmap_pages); /** - * drm_gpusvm_migration_unlock_put_page() - Put a migration page - * @page: Pointer to the page to put - * - * This function unlocks and puts a page. - */ -static void drm_gpusvm_migration_unlock_put_page(struct page *page) -{ - unlock_page(page); - put_page(page); -} - -/** - * drm_gpusvm_migration_unlock_put_pages() - Put migration pages - * @npages: Number of pages - * @migrate_pfn: Array of migrate page frame numbers - * - * This function unlocks and puts an array of pages. - */ -static void drm_gpusvm_migration_unlock_put_pages(unsigned long npages, - unsigned long *migrate_pfn) -{ - unsigned long i; - - for (i = 0; i < npages; ++i) { - struct page *page; - - if (!migrate_pfn[i]) - continue; - - page = migrate_pfn_to_page(migrate_pfn[i]); - drm_gpusvm_migration_unlock_put_page(page); - migrate_pfn[i] = 0; - } -} - -/** - * drm_gpusvm_get_devmem_page() - Get a reference to a device memory page - * @page: Pointer to the page - * @zdd: Pointer to the GPU SVM zone device data - * - * This function associates the given page with the specified GPU SVM zone - * device data and initializes it for zone device usage. - */ -static void drm_gpusvm_get_devmem_page(struct page *page, - struct drm_gpusvm_zdd *zdd) -{ - page->zone_device_data = drm_gpusvm_zdd_get(zdd); - zone_device_page_init(page); -} - -/** - * drm_gpusvm_migrate_map_pages() - Map migration pages for GPU SVM migration - * @dev: The device for which the pages are being mapped - * @dma_addr: Array to store DMA addresses corresponding to mapped pages - * @migrate_pfn: Array of migrate page frame numbers to map - * @npages: Number of pages to map - * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL) - * - * This function maps pages of memory for migration usage in GPU SVM. It - * iterates over each page frame number provided in @migrate_pfn, maps the - * corresponding page, and stores the DMA address in the provided @dma_addr - * array. - * - * Return: 0 on success, -EFAULT if an error occurs during mapping. - */ -static int drm_gpusvm_migrate_map_pages(struct device *dev, - dma_addr_t *dma_addr, - unsigned long *migrate_pfn, - unsigned long npages, - enum dma_data_direction dir) -{ - unsigned long i; - - for (i = 0; i < npages; ++i) { - struct page *page = migrate_pfn_to_page(migrate_pfn[i]); - - if (!page) - continue; - - if (WARN_ON_ONCE(is_zone_device_page(page))) - return -EFAULT; - - dma_addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir); - if (dma_mapping_error(dev, dma_addr[i])) - return -EFAULT; - } - - return 0; -} - -/** - * drm_gpusvm_migrate_unmap_pages() - Unmap pages previously mapped for GPU SVM migration - * @dev: The device for which the pages were mapped - * @dma_addr: Array of DMA addresses corresponding to mapped pages - * @npages: Number of pages to unmap - * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL) - * - * This function unmaps previously mapped pages of memory for GPU Shared Virtual - * Memory (SVM). It iterates over each DMA address provided in @dma_addr, checks - * if it's valid and not already unmapped, and unmaps the corresponding page. - */ -static void drm_gpusvm_migrate_unmap_pages(struct device *dev, - dma_addr_t *dma_addr, - unsigned long npages, - enum dma_data_direction dir) -{ - unsigned long i; - - for (i = 0; i < npages; ++i) { - if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i])) - continue; - - dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir); - } -} - -/** - * drm_gpusvm_migrate_to_devmem() - Migrate GPU SVM range to device memory + * drm_gpusvm_range_evict() - Evict GPU SVM range * @gpusvm: Pointer to the GPU SVM structure - * @range: Pointer to the GPU SVM range structure - * @devmem_allocation: Pointer to the device memory allocation. The caller - * should hold a reference to the device memory allocation, - * which should be dropped via ops->devmem_release or upon - * the failure of this function. - * @ctx: GPU SVM context - * - * This function migrates the specified GPU SVM range to device memory. It - * performs the necessary setup and invokes the driver-specific operations for - * migration to device memory. Upon successful return, @devmem_allocation can - * safely reference @range until ops->devmem_release is called which only upon - * successful return. Expected to be called while holding the mmap lock in read - * mode. - * - * Return: 0 on success, negative error code on failure. - */ -int drm_gpusvm_migrate_to_devmem(struct drm_gpusvm *gpusvm, - struct drm_gpusvm_range *range, - struct drm_gpusvm_devmem *devmem_allocation, - const struct drm_gpusvm_ctx *ctx) -{ - const struct drm_gpusvm_devmem_ops *ops = devmem_allocation->ops; - unsigned long start = drm_gpusvm_range_start(range), - end = drm_gpusvm_range_end(range); - struct migrate_vma migrate = { - .start = start, - .end = end, - .pgmap_owner = gpusvm->device_private_page_owner, - .flags = MIGRATE_VMA_SELECT_SYSTEM, - }; - struct mm_struct *mm = gpusvm->mm; - unsigned long i, npages = npages_in_range(start, end); - struct vm_area_struct *vas; - struct drm_gpusvm_zdd *zdd = NULL; - struct page **pages; - dma_addr_t *dma_addr; - void *buf; - int err; - - mmap_assert_locked(gpusvm->mm); - - if (!range->flags.migrate_devmem) - return -EINVAL; - - if (!ops->populate_devmem_pfn || !ops->copy_to_devmem || - !ops->copy_to_ram) - return -EOPNOTSUPP; - - vas = vma_lookup(mm, start); - if (!vas) { - err = -ENOENT; - goto err_out; - } - - if (end > vas->vm_end || start < vas->vm_start) { - err = -EINVAL; - goto err_out; - } - - if (!vma_is_anonymous(vas)) { - err = -EBUSY; - goto err_out; - } - - buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) + - sizeof(*pages), GFP_KERNEL); - if (!buf) { - err = -ENOMEM; - goto err_out; - } - dma_addr = buf + (2 * sizeof(*migrate.src) * npages); - pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages; - - zdd = drm_gpusvm_zdd_alloc(gpusvm->device_private_page_owner); - if (!zdd) { - err = -ENOMEM; - goto err_free; - } - - migrate.vma = vas; - migrate.src = buf; - migrate.dst = migrate.src + npages; - - err = migrate_vma_setup(&migrate); - if (err) - goto err_free; - - if (!migrate.cpages) { - err = -EFAULT; - goto err_free; - } - - if (migrate.cpages != npages) { - err = -EBUSY; - goto err_finalize; - } - - err = ops->populate_devmem_pfn(devmem_allocation, npages, migrate.dst); - if (err) - goto err_finalize; - - err = drm_gpusvm_migrate_map_pages(devmem_allocation->dev, dma_addr, - migrate.src, npages, DMA_TO_DEVICE); - if (err) - goto err_finalize; - - for (i = 0; i < npages; ++i) { - struct page *page = pfn_to_page(migrate.dst[i]); - - pages[i] = page; - migrate.dst[i] = migrate_pfn(migrate.dst[i]); - drm_gpusvm_get_devmem_page(page, zdd); - } - - err = ops->copy_to_devmem(pages, dma_addr, npages); - if (err) - goto err_finalize; - - /* Upon success bind devmem allocation to range and zdd */ - devmem_allocation->timeslice_expiration = get_jiffies_64() + - msecs_to_jiffies(ctx->timeslice_ms); - zdd->devmem_allocation = devmem_allocation; /* Owns ref */ - -err_finalize: - if (err) - drm_gpusvm_migration_unlock_put_pages(npages, migrate.dst); - migrate_vma_pages(&migrate); - migrate_vma_finalize(&migrate); - drm_gpusvm_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages, - DMA_TO_DEVICE); -err_free: - if (zdd) - drm_gpusvm_zdd_put(zdd); - kvfree(buf); -err_out: - return err; -} -EXPORT_SYMBOL_GPL(drm_gpusvm_migrate_to_devmem); - -/** - * drm_gpusvm_migrate_populate_ram_pfn() - Populate RAM PFNs for a VM area - * @vas: Pointer to the VM area structure, can be NULL - * @fault_page: Fault page - * @npages: Number of pages to populate - * @mpages: Number of pages to migrate - * @src_mpfn: Source array of migrate PFNs - * @mpfn: Array of migrate PFNs to populate - * @addr: Start address for PFN allocation - * - * This function populates the RAM migrate page frame numbers (PFNs) for the - * specified VM area structure. It allocates and locks pages in the VM area for - * RAM usage. If vas is non-NULL use alloc_page_vma for allocation, if NULL use - * alloc_page for allocation. - * - * Return: 0 on success, negative error code on failure. - */ -static int drm_gpusvm_migrate_populate_ram_pfn(struct vm_area_struct *vas, - struct page *fault_page, - unsigned long npages, - unsigned long *mpages, - unsigned long *src_mpfn, - unsigned long *mpfn, - unsigned long addr) -{ - unsigned long i; - - for (i = 0; i < npages; ++i, addr += PAGE_SIZE) { - struct page *page, *src_page; - - if (!(src_mpfn[i] & MIGRATE_PFN_MIGRATE)) - continue; - - src_page = migrate_pfn_to_page(src_mpfn[i]); - if (!src_page) - continue; - - if (fault_page) { - if (src_page->zone_device_data != - fault_page->zone_device_data) - continue; - } - - if (vas) - page = alloc_page_vma(GFP_HIGHUSER, vas, addr); - else - page = alloc_page(GFP_HIGHUSER); - - if (!page) - goto free_pages; - - mpfn[i] = migrate_pfn(page_to_pfn(page)); - } - - for (i = 0; i < npages; ++i) { - struct page *page = migrate_pfn_to_page(mpfn[i]); - - if (!page) - continue; - - WARN_ON_ONCE(!trylock_page(page)); - ++*mpages; - } - - return 0; - -free_pages: - for (i = 0; i < npages; ++i) { - struct page *page = migrate_pfn_to_page(mpfn[i]); - - if (!page) - continue; - - put_page(page); - mpfn[i] = 0; - } - return -ENOMEM; -} - -/** - * drm_gpusvm_evict_to_ram() - Evict GPU SVM range to RAM - * @devmem_allocation: Pointer to the device memory allocation - * - * Similar to __drm_gpusvm_migrate_to_ram but does not require mmap lock and - * migration done via migrate_device_* functions. - * - * Return: 0 on success, negative error code on failure. - */ -int drm_gpusvm_evict_to_ram(struct drm_gpusvm_devmem *devmem_allocation) -{ - const struct drm_gpusvm_devmem_ops *ops = devmem_allocation->ops; - unsigned long npages, mpages = 0; - struct page **pages; - unsigned long *src, *dst; - dma_addr_t *dma_addr; - void *buf; - int i, err = 0; - unsigned int retry_count = 2; - - npages = devmem_allocation->size >> PAGE_SHIFT; - -retry: - if (!mmget_not_zero(devmem_allocation->mm)) - return -EFAULT; - - buf = kvcalloc(npages, 2 * sizeof(*src) + sizeof(*dma_addr) + - sizeof(*pages), GFP_KERNEL); - if (!buf) { - err = -ENOMEM; - goto err_out; - } - src = buf; - dst = buf + (sizeof(*src) * npages); - dma_addr = buf + (2 * sizeof(*src) * npages); - pages = buf + (2 * sizeof(*src) + sizeof(*dma_addr)) * npages; - - err = ops->populate_devmem_pfn(devmem_allocation, npages, src); - if (err) - goto err_free; - - err = migrate_device_pfns(src, npages); - if (err) - goto err_free; - - err = drm_gpusvm_migrate_populate_ram_pfn(NULL, NULL, npages, &mpages, - src, dst, 0); - if (err || !mpages) - goto err_finalize; - - err = drm_gpusvm_migrate_map_pages(devmem_allocation->dev, dma_addr, - dst, npages, DMA_FROM_DEVICE); - if (err) - goto err_finalize; - - for (i = 0; i < npages; ++i) - pages[i] = migrate_pfn_to_page(src[i]); - - err = ops->copy_to_ram(pages, dma_addr, npages); - if (err) - goto err_finalize; - -err_finalize: - if (err) - drm_gpusvm_migration_unlock_put_pages(npages, dst); - migrate_device_pages(src, dst, npages); - migrate_device_finalize(src, dst, npages); - drm_gpusvm_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages, - DMA_FROM_DEVICE); -err_free: - kvfree(buf); -err_out: - mmput_async(devmem_allocation->mm); - - if (completion_done(&devmem_allocation->detached)) - return 0; - - if (retry_count--) { - cond_resched(); - goto retry; - } - - return err ?: -EBUSY; -} -EXPORT_SYMBOL_GPL(drm_gpusvm_evict_to_ram); - -/** - * __drm_gpusvm_migrate_to_ram() - Migrate GPU SVM range to RAM (internal) - * @vas: Pointer to the VM area structure - * @device_private_page_owner: Device private pages owner - * @page: Pointer to the page for fault handling (can be NULL) - * @fault_addr: Fault address - * @size: Size of migration - * - * This internal function performs the migration of the specified GPU SVM range - * to RAM. It sets up the migration, populates + dma maps RAM PFNs, and - * invokes the driver-specific operations for migration to RAM. - * - * Return: 0 on success, negative error code on failure. - */ -static int __drm_gpusvm_migrate_to_ram(struct vm_area_struct *vas, - void *device_private_page_owner, - struct page *page, - unsigned long fault_addr, - unsigned long size) -{ - struct migrate_vma migrate = { - .vma = vas, - .pgmap_owner = device_private_page_owner, - .flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE | - MIGRATE_VMA_SELECT_DEVICE_COHERENT, - .fault_page = page, - }; - struct drm_gpusvm_zdd *zdd; - const struct drm_gpusvm_devmem_ops *ops; - struct device *dev = NULL; - unsigned long npages, mpages = 0; - struct page **pages; - dma_addr_t *dma_addr; - unsigned long start, end; - void *buf; - int i, err = 0; - - if (page) { - zdd = page->zone_device_data; - if (time_before64(get_jiffies_64(), - zdd->devmem_allocation->timeslice_expiration)) - return 0; - } - - start = ALIGN_DOWN(fault_addr, size); - end = ALIGN(fault_addr + 1, size); - - /* Corner where VMA area struct has been partially unmapped */ - if (start < vas->vm_start) - start = vas->vm_start; - if (end > vas->vm_end) - end = vas->vm_end; - - migrate.start = start; - migrate.end = end; - npages = npages_in_range(start, end); - - buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) + - sizeof(*pages), GFP_KERNEL); - if (!buf) { - err = -ENOMEM; - goto err_out; - } - dma_addr = buf + (2 * sizeof(*migrate.src) * npages); - pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages; - - migrate.vma = vas; - migrate.src = buf; - migrate.dst = migrate.src + npages; - - err = migrate_vma_setup(&migrate); - if (err) - goto err_free; - - /* Raced with another CPU fault, nothing to do */ - if (!migrate.cpages) - goto err_free; - - if (!page) { - for (i = 0; i < npages; ++i) { - if (!(migrate.src[i] & MIGRATE_PFN_MIGRATE)) - continue; - - page = migrate_pfn_to_page(migrate.src[i]); - break; - } - - if (!page) - goto err_finalize; - } - zdd = page->zone_device_data; - ops = zdd->devmem_allocation->ops; - dev = zdd->devmem_allocation->dev; - - err = drm_gpusvm_migrate_populate_ram_pfn(vas, page, npages, &mpages, - migrate.src, migrate.dst, - start); - if (err) - goto err_finalize; - - err = drm_gpusvm_migrate_map_pages(dev, dma_addr, migrate.dst, npages, - DMA_FROM_DEVICE); - if (err) - goto err_finalize; - - for (i = 0; i < npages; ++i) - pages[i] = migrate_pfn_to_page(migrate.src[i]); - - err = ops->copy_to_ram(pages, dma_addr, npages); - if (err) - goto err_finalize; - -err_finalize: - if (err) - drm_gpusvm_migration_unlock_put_pages(npages, migrate.dst); - migrate_vma_pages(&migrate); - migrate_vma_finalize(&migrate); - if (dev) - drm_gpusvm_migrate_unmap_pages(dev, dma_addr, npages, - DMA_FROM_DEVICE); -err_free: - kvfree(buf); -err_out: - - return err; -} - -/** - * drm_gpusvm_range_evict - Evict GPU SVM range * @range: Pointer to the GPU SVM range to be removed * - * This function evicts the specified GPU SVM range. This function will not - * evict coherent pages. + * This function evicts the specified GPU SVM range. * * Return: 0 on success, a negative error code on failure. */ @@ -2184,60 +1525,6 @@ int drm_gpusvm_range_evict(struct drm_gpusvm *gpusvm, EXPORT_SYMBOL_GPL(drm_gpusvm_range_evict); /** - * drm_gpusvm_page_free() - Put GPU SVM zone device data associated with a page - * @page: Pointer to the page - * - * This function is a callback used to put the GPU SVM zone device data - * associated with a page when it is being released. - */ -static void drm_gpusvm_page_free(struct page *page) -{ - drm_gpusvm_zdd_put(page->zone_device_data); -} - -/** - * drm_gpusvm_migrate_to_ram() - Migrate GPU SVM range to RAM (page fault handler) - * @vmf: Pointer to the fault information structure - * - * This function is a page fault handler used to migrate a GPU SVM range to RAM. - * It retrieves the GPU SVM range information from the faulting page and invokes - * the internal migration function to migrate the range back to RAM. - * - * Return: VM_FAULT_SIGBUS on failure, 0 on success. - */ -static vm_fault_t drm_gpusvm_migrate_to_ram(struct vm_fault *vmf) -{ - struct drm_gpusvm_zdd *zdd = vmf->page->zone_device_data; - int err; - - err = __drm_gpusvm_migrate_to_ram(vmf->vma, - zdd->device_private_page_owner, - vmf->page, vmf->address, - zdd->devmem_allocation->size); - - return err ? VM_FAULT_SIGBUS : 0; -} - -/* - * drm_gpusvm_pagemap_ops - Device page map operations for GPU SVM - */ -static const struct dev_pagemap_ops drm_gpusvm_pagemap_ops = { - .page_free = drm_gpusvm_page_free, - .migrate_to_ram = drm_gpusvm_migrate_to_ram, -}; - -/** - * drm_gpusvm_pagemap_ops_get() - Retrieve GPU SVM device page map operations - * - * Return: Pointer to the GPU SVM device page map operations structure. - */ -const struct dev_pagemap_ops *drm_gpusvm_pagemap_ops_get(void) -{ - return &drm_gpusvm_pagemap_ops; -} -EXPORT_SYMBOL_GPL(drm_gpusvm_pagemap_ops_get); - -/** * drm_gpusvm_has_mapping() - Check if GPU SVM has mapping for the given address range * @gpusvm: Pointer to the GPU SVM structure. * @start: Start address @@ -2281,28 +1568,5 @@ void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range, } EXPORT_SYMBOL_GPL(drm_gpusvm_range_set_unmapped); -/** - * drm_gpusvm_devmem_init() - Initialize a GPU SVM device memory allocation - * - * @dev: Pointer to the device structure which device memory allocation belongs to - * @mm: Pointer to the mm_struct for the address space - * @ops: Pointer to the operations structure for GPU SVM device memory - * @dpagemap: The struct drm_pagemap we're allocating from. - * @size: Size of device memory allocation - */ -void drm_gpusvm_devmem_init(struct drm_gpusvm_devmem *devmem_allocation, - struct device *dev, struct mm_struct *mm, - const struct drm_gpusvm_devmem_ops *ops, - struct drm_pagemap *dpagemap, size_t size) -{ - init_completion(&devmem_allocation->detached); - devmem_allocation->dev = dev; - devmem_allocation->mm = mm; - devmem_allocation->ops = ops; - devmem_allocation->dpagemap = dpagemap; - devmem_allocation->size = size; -} -EXPORT_SYMBOL_GPL(drm_gpusvm_devmem_init); - MODULE_DESCRIPTION("DRM GPUSVM"); MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c index f62005ff9b2e..db9b089ef62c 100644 --- a/drivers/gpu/drm/drm_gpuvm.c +++ b/drivers/gpu/drm/drm_gpuvm.c @@ -40,7 +40,7 @@ * mapping's backing &drm_gem_object buffers. * * &drm_gem_object buffers maintain a list of &drm_gpuva objects representing - * all existent GPU VA mappings using this &drm_gem_object as backing buffer. + * all existing GPU VA mappings using this &drm_gem_object as backing buffer. * * GPU VAs can be flagged as sparse, such that drivers may use GPU VAs to also * keep track of sparse PTEs in order to support Vulkan 'Sparse Resources'. @@ -72,7 +72,7 @@ * but it can also be a 'dummy' object, which can be allocated with * drm_gpuvm_resv_object_alloc(). * - * In order to connect a struct drm_gpuva its backing &drm_gem_object each + * In order to connect a struct drm_gpuva to its backing &drm_gem_object each * &drm_gem_object maintains a list of &drm_gpuvm_bo structures, and each * &drm_gpuvm_bo contains a list of &drm_gpuva structures. * @@ -81,7 +81,7 @@ * This is ensured by the API through drm_gpuvm_bo_obtain() and * drm_gpuvm_bo_obtain_prealloc() which first look into the corresponding * &drm_gem_object list of &drm_gpuvm_bos for an existing instance of this - * particular combination. If not existent a new instance is created and linked + * particular combination. If not present, a new instance is created and linked * to the &drm_gem_object. * * &drm_gpuvm_bo structures, since unique for a given &drm_gpuvm, are also used @@ -108,7 +108,7 @@ * sequence of operations to satisfy a given map or unmap request. * * Therefore the DRM GPU VA manager provides an algorithm implementing splitting - * and merging of existent GPU VA mappings with the ones that are requested to + * and merging of existing GPU VA mappings with the ones that are requested to * be mapped or unmapped. This feature is required by the Vulkan API to * implement Vulkan 'Sparse Memory Bindings' - drivers UAPIs often refer to this * as VM BIND. @@ -119,7 +119,7 @@ * execute in order to integrate the new mapping cleanly into the current state * of the GPU VA space. * - * Depending on how the new GPU VA mapping intersects with the existent mappings + * Depending on how the new GPU VA mapping intersects with the existing mappings * of the GPU VA space the &drm_gpuvm_ops callbacks contain an arbitrary amount * of unmap operations, a maximum of two remap operations and a single map * operation. The caller might receive no callback at all if no operation is @@ -139,16 +139,16 @@ * one unmap operation and one or two map operations, such that drivers can * derive the page table update delta accordingly. * - * Note that there can't be more than two existent mappings to split up, one at + * Note that there can't be more than two existing mappings to split up, one at * the beginning and one at the end of the new mapping, hence there is a * maximum of two remap operations. * * Analogous to drm_gpuvm_sm_map() drm_gpuvm_sm_unmap() uses &drm_gpuvm_ops to * call back into the driver in order to unmap a range of GPU VA space. The - * logic behind this function is way simpler though: For all existent mappings + * logic behind this function is way simpler though: For all existing mappings * enclosed by the given range unmap operations are created. For mappings which - * are only partically located within the given range, remap operations are - * created such that those mappings are split up and re-mapped partically. + * are only partially located within the given range, remap operations are + * created such that those mappings are split up and re-mapped partially. * * As an alternative to drm_gpuvm_sm_map() and drm_gpuvm_sm_unmap(), * drm_gpuvm_sm_map_ops_create() and drm_gpuvm_sm_unmap_ops_create() can be used @@ -168,7 +168,7 @@ * provided helper functions drm_gpuva_map(), drm_gpuva_remap() and * drm_gpuva_unmap() instead. * - * The following diagram depicts the basic relationships of existent GPU VA + * The following diagram depicts the basic relationships of existing GPU VA * mappings, a newly requested mapping and the resulting mappings as implemented * by drm_gpuvm_sm_map() - it doesn't cover any arbitrary combinations of these. * @@ -218,7 +218,7 @@ * * * 4) Existent mapping is a left aligned subset of the requested one, hence - * replace the existent one. + * replace the existing one. * * :: * @@ -236,9 +236,9 @@ * and/or non-contiguous BO offset. * * - * 5) Requested mapping's range is a left aligned subset of the existent one, + * 5) Requested mapping's range is a left aligned subset of the existing one, * but backed by a different BO. Hence, map the requested mapping and split - * the existent one adjusting its BO offset. + * the existing one adjusting its BO offset. * * :: * @@ -271,9 +271,9 @@ * new: |-----|-----| (a.bo_offset=n, a'.bo_offset=n+1) * * - * 7) Requested mapping's range is a right aligned subset of the existent one, + * 7) Requested mapping's range is a right aligned subset of the existing one, * but backed by a different BO. Hence, map the requested mapping and split - * the existent one, without adjusting the BO offset. + * the existing one, without adjusting the BO offset. * * :: * @@ -304,7 +304,7 @@ * * 9) Existent mapping is overlapped at the end by the requested mapping backed * by a different BO. Hence, map the requested mapping and split up the - * existent one, without adjusting the BO offset. + * existing one, without adjusting the BO offset. * * :: * @@ -334,9 +334,9 @@ * new: |-----|-----------| (a'.bo_offset=n, a.bo_offset=n+1) * * - * 11) Requested mapping's range is a centered subset of the existent one + * 11) Requested mapping's range is a centered subset of the existing one * having a different backing BO. Hence, map the requested mapping and split - * up the existent one in two mappings, adjusting the BO offset of the right + * up the existing one in two mappings, adjusting the BO offset of the right * one accordingly. * * :: @@ -351,7 +351,7 @@ * new: |-----|-----|-----| (a.bo_offset=n,b.bo_offset=m,a'.bo_offset=n+2) * * - * 12) Requested mapping is a contiguous subset of the existent one. Split it + * 12) Requested mapping is a contiguous subset of the existing one. Split it * up, but indicate that the backing PTEs could be kept. * * :: @@ -367,7 +367,7 @@ * * * 13) Existent mapping is a right aligned subset of the requested one, hence - * replace the existent one. + * replace the existing one. * * :: * @@ -386,7 +386,7 @@ * * * 14) Existent mapping is a centered subset of the requested one, hence - * replace the existent one. + * replace the existing one. * * :: * @@ -406,7 +406,7 @@ * * 15) Existent mappings is overlapped at the beginning by the requested mapping * backed by a different BO. Hence, map the requested mapping and split up - * the existent one, adjusting its BO offset accordingly. + * the existing one, adjusting its BO offset accordingly. * * :: * @@ -469,8 +469,8 @@ * make use of them. * * The below code is strictly limited to illustrate the generic usage pattern. - * To maintain simplicitly, it doesn't make use of any abstractions for common - * code, different (asyncronous) stages with fence signalling critical paths, + * To maintain simplicity, it doesn't make use of any abstractions for common + * code, different (asynchronous) stages with fence signalling critical paths, * any other helpers or error handling in terms of freeing memory and dropping * previously taken locks. * @@ -479,7 +479,7 @@ * // Allocates a new &drm_gpuva. * struct drm_gpuva * driver_gpuva_alloc(void); * - * // Typically drivers would embedd the &drm_gpuvm and &drm_gpuva + * // Typically drivers would embed the &drm_gpuvm and &drm_gpuva * // structure in individual driver structures and lock the dma-resv with * // drm_exec or similar helpers. * int driver_mapping_create(struct drm_gpuvm *gpuvm, @@ -582,7 +582,7 @@ * .sm_step_unmap = driver_gpuva_unmap, * }; * - * // Typically drivers would embedd the &drm_gpuvm and &drm_gpuva + * // Typically drivers would embed the &drm_gpuvm and &drm_gpuva * // structure in individual driver structures and lock the dma-resv with * // drm_exec or similar helpers. * int driver_mapping_create(struct drm_gpuvm *gpuvm, @@ -680,7 +680,7 @@ * * This helper is here to provide lockless list iteration. Lockless as in, the * iterator releases the lock immediately after picking the first element from - * the list, so list insertion deletion can happen concurrently. + * the list, so list insertion and deletion can happen concurrently. * * Elements popped from the original list are kept in a local list, so removal * and is_empty checks can still happen while we're iterating the list. @@ -1160,7 +1160,7 @@ drm_gpuvm_prepare_objects_locked(struct drm_gpuvm *gpuvm, } /** - * drm_gpuvm_prepare_objects() - prepare all assoiciated BOs + * drm_gpuvm_prepare_objects() - prepare all associated BOs * @gpuvm: the &drm_gpuvm * @exec: the &drm_exec locking context * @num_fences: the amount of &dma_fences to reserve @@ -1230,13 +1230,13 @@ drm_gpuvm_prepare_range(struct drm_gpuvm *gpuvm, struct drm_exec *exec, EXPORT_SYMBOL_GPL(drm_gpuvm_prepare_range); /** - * drm_gpuvm_exec_lock() - lock all dma-resv of all assoiciated BOs + * drm_gpuvm_exec_lock() - lock all dma-resv of all associated BOs * @vm_exec: the &drm_gpuvm_exec wrapper * * Acquires all dma-resv locks of all &drm_gem_objects the given * &drm_gpuvm contains mappings of. * - * Addionally, when calling this function with struct drm_gpuvm_exec::extra + * Additionally, when calling this function with struct drm_gpuvm_exec::extra * being set the driver receives the given @fn callback to lock additional * dma-resv in the context of the &drm_gpuvm_exec instance. Typically, drivers * would call drm_exec_prepare_obj() from within this callback. @@ -1293,7 +1293,7 @@ fn_lock_array(struct drm_gpuvm_exec *vm_exec) } /** - * drm_gpuvm_exec_lock_array() - lock all dma-resv of all assoiciated BOs + * drm_gpuvm_exec_lock_array() - lock all dma-resv of all associated BOs * @vm_exec: the &drm_gpuvm_exec wrapper * @objs: additional &drm_gem_objects to lock * @num_objs: the number of additional &drm_gem_objects to lock @@ -1588,7 +1588,7 @@ drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm, EXPORT_SYMBOL_GPL(drm_gpuvm_bo_find); /** - * drm_gpuvm_bo_obtain() - obtains and instance of the &drm_gpuvm_bo for the + * drm_gpuvm_bo_obtain() - obtains an instance of the &drm_gpuvm_bo for the * given &drm_gpuvm and &drm_gem_object * @gpuvm: The &drm_gpuvm the @obj is mapped in. * @obj: The &drm_gem_object being mapped in the @gpuvm. @@ -1624,7 +1624,7 @@ drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm, EXPORT_SYMBOL_GPL(drm_gpuvm_bo_obtain); /** - * drm_gpuvm_bo_obtain_prealloc() - obtains and instance of the &drm_gpuvm_bo + * drm_gpuvm_bo_obtain_prealloc() - obtains an instance of the &drm_gpuvm_bo * for the given &drm_gpuvm and &drm_gem_object * @__vm_bo: A pre-allocated struct drm_gpuvm_bo. * @@ -1688,7 +1688,7 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_bo_extobj_add); * @vm_bo: the &drm_gpuvm_bo to add or remove * @evict: indicates whether the object is evicted * - * Adds a &drm_gpuvm_bo to or removes it from the &drm_gpuvms evicted list. + * Adds a &drm_gpuvm_bo to or removes it from the &drm_gpuvm's evicted list. */ void drm_gpuvm_bo_evict(struct drm_gpuvm_bo *vm_bo, bool evict) @@ -1790,7 +1790,7 @@ __drm_gpuva_remove(struct drm_gpuva *va) * drm_gpuva_remove() - remove a &drm_gpuva * @va: the &drm_gpuva to remove * - * This removes the given &va from the underlaying tree. + * This removes the given &va from the underlying tree. * * It is safe to use this function using the safe versions of iterating the GPU * VA space, such as drm_gpuvm_for_each_va_safe() and @@ -2358,7 +2358,7 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map); * * This function iterates the given range of the GPU VA space. It utilizes the * &drm_gpuvm_ops to call back into the driver providing the operations to - * unmap and, if required, split existent mappings. + * unmap and, if required, split existing mappings. * * Drivers may use these callbacks to update the GPU VA space right away within * the callback. In case the driver decides to copy and store the operations for @@ -2432,6 +2432,8 @@ static const struct drm_gpuvm_ops lock_ops = { * * The expected usage is:: * + * .. code-block:: c + * * vm_bind { * struct drm_exec exec; * @@ -2473,7 +2475,7 @@ static const struct drm_gpuvm_ops lock_ops = { * required without the earlier DRIVER_OP_MAP. This is safe because we've * already locked the GEM object in the earlier DRIVER_OP_MAP step. * - * Returns: 0 on success or a negative error codec + * Returns: 0 on success or a negative error code */ int drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm, @@ -2617,12 +2619,12 @@ static const struct drm_gpuvm_ops gpuvm_list_ops = { * @req_offset: the offset within the &drm_gem_object * * This function creates a list of operations to perform splitting and merging - * of existent mapping(s) with the newly requested one. + * of existing mapping(s) with the newly requested one. * * The list can be iterated with &drm_gpuva_for_each_op and must be processed * in the given order. It can contain map, unmap and remap operations, but it * also can be empty if no operation is required, e.g. if the requested mapping - * already exists is the exact same way. + * already exists in the exact same way. * * There can be an arbitrary amount of unmap operations, a maximum of two remap * operations and a single map operation. The latter one represents the original diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 442eb31351dd..e79c3c623c9a 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -161,6 +161,8 @@ void drm_sysfs_lease_event(struct drm_device *dev); /* drm_gem.c */ int drm_gem_init(struct drm_device *dev); +bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj); +void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj); int drm_gem_handle_create_tail(struct drm_file *file_priv, struct drm_gem_object *obj, u32 *handlep); @@ -182,8 +184,7 @@ void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map); #if defined(CONFIG_DEBUG_FS) void drm_debugfs_dev_fini(struct drm_device *dev); void drm_debugfs_dev_register(struct drm_device *dev); -int drm_debugfs_register(struct drm_minor *minor, int minor_id, - struct dentry *root); +int drm_debugfs_register(struct drm_minor *minor, int minor_id); void drm_debugfs_unregister(struct drm_minor *minor); void drm_debugfs_connector_add(struct drm_connector *connector); void drm_debugfs_connector_remove(struct drm_connector *connector); @@ -201,8 +202,7 @@ static inline void drm_debugfs_dev_register(struct drm_device *dev) { } -static inline int drm_debugfs_register(struct drm_minor *minor, int minor_id, - struct dentry *root) +static inline int drm_debugfs_register(struct drm_minor *minor, int minor_id) { return 0; } diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c index ba4be6be5d28..e33c78fc8fbd 100644 --- a/drivers/gpu/drm/drm_mipi_dbi.c +++ b/drivers/gpu/drm/drm_mipi_dbi.c @@ -230,7 +230,13 @@ int mipi_dbi_buf_copy(void *dst, struct iosys_map *src, struct drm_framebuffer * case DRM_FORMAT_XRGB8888: switch (dbidev->pixel_format) { case DRM_FORMAT_RGB565: - drm_fb_xrgb8888_to_rgb565(&dst_map, NULL, src, fb, clip, fmtcnv_state, swap); + if (swap) { + drm_fb_xrgb8888_to_rgb565be(&dst_map, NULL, src, fb, clip, + fmtcnv_state); + } else { + drm_fb_xrgb8888_to_rgb565(&dst_map, NULL, src, fb, clip, + fmtcnv_state); + } break; case DRM_FORMAT_RGB888: drm_fb_xrgb8888_to_rgb888(&dst_map, NULL, src, fb, clip, fmtcnv_state); diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index a00d76443128..3a9b3278a6e3 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -92,12 +92,13 @@ static const struct dev_pm_ops mipi_dsi_device_pm_ops = { .restore = pm_generic_restore, }; -static const struct bus_type mipi_dsi_bus_type = { +const struct bus_type mipi_dsi_bus_type = { .name = "mipi-dsi", .match = mipi_dsi_device_match, .uevent = mipi_dsi_uevent, .pm = &mipi_dsi_device_pm_ops, }; +EXPORT_SYMBOL_GPL(mipi_dsi_bus_type); /** * of_find_mipi_dsi_device_by_node() - find the MIPI DSI device matching a diff --git a/drivers/gpu/drm/drm_modeset_helper.c b/drivers/gpu/drm/drm_modeset_helper.c index ef32f6af10d4..988735560570 100644 --- a/drivers/gpu/drm/drm_modeset_helper.c +++ b/drivers/gpu/drm/drm_modeset_helper.c @@ -74,6 +74,7 @@ EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head); * drm_helper_mode_fill_fb_struct - fill out framebuffer metadata * @dev: DRM device * @fb: drm_framebuffer object to fill out + * @info: pixel format information * @mode_cmd: metadata from the userspace fb creation request * * This helper can be used in a drivers fb_create callback to pre-fill the fb's @@ -81,12 +82,13 @@ EXPORT_SYMBOL(drm_helper_move_panel_connectors_to_head); */ void drm_helper_mode_fill_fb_struct(struct drm_device *dev, struct drm_framebuffer *fb, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { int i; fb->dev = dev; - fb->format = drm_get_format_info(dev, mode_cmd); + fb->format = info; fb->width = mode_cmd->width; fb->height = mode_cmd->height; for (i = 0; i < 4; i++) { diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c new file mode 100644 index 000000000000..1da55322af12 --- /dev/null +++ b/drivers/gpu/drm/drm_pagemap.c @@ -0,0 +1,838 @@ +// SPDX-License-Identifier: GPL-2.0-only OR MIT +/* + * Copyright © 2024-2025 Intel Corporation + */ + +#include <linux/dma-mapping.h> +#include <linux/migrate.h> +#include <linux/pagemap.h> +#include <drm/drm_drv.h> +#include <drm/drm_pagemap.h> + +/** + * DOC: Overview + * + * The DRM pagemap layer is intended to augment the dev_pagemap functionality by + * providing a way to populate a struct mm_struct virtual range with device + * private pages and to provide helpers to abstract device memory allocations, + * to migrate memory back and forth between device memory and system RAM and + * to handle access (and in the future migration) between devices implementing + * a fast interconnect that is not necessarily visible to the rest of the + * system. + * + * Typically the DRM pagemap receives requests from one or more DRM GPU SVM + * instances to populate struct mm_struct virtual ranges with memory, and the + * migration is best effort only and may thus fail. The implementation should + * also handle device unbinding by blocking (return an -ENODEV) error for new + * population requests and after that migrate all device pages to system ram. + */ + +/** + * DOC: Migration + * + * Migration granularity typically follows the GPU SVM range requests, but + * if there are clashes, due to races or due to the fact that multiple GPU + * SVM instances have different views of the ranges used, and because of that + * parts of a requested range is already present in the requested device memory, + * the implementation has a variety of options. It can fail and it can choose + * to populate only the part of the range that isn't already in device memory, + * and it can evict the range to system before trying to migrate. Ideally an + * implementation would just try to migrate the missing part of the range and + * allocate just enough memory to do so. + * + * When migrating to system memory as a response to a cpu fault or a device + * memory eviction request, currently a full device memory allocation is + * migrated back to system. Moving forward this might need improvement for + * situations where a single page needs bouncing between system memory and + * device memory due to, for example, atomic operations. + * + * Key DRM pagemap components: + * + * - Device Memory Allocations: + * Embedded structure containing enough information for the drm_pagemap to + * migrate to / from device memory. + * + * - Device Memory Operations: + * Define the interface for driver-specific device memory operations + * release memory, populate pfns, and copy to / from device memory. + */ + +/** + * struct drm_pagemap_zdd - GPU SVM zone device data + * + * @refcount: Reference count for the zdd + * @devmem_allocation: device memory allocation + * @device_private_page_owner: Device private pages owner + * + * This structure serves as a generic wrapper installed in + * page->zone_device_data. It provides infrastructure for looking up a device + * memory allocation upon CPU page fault and asynchronously releasing device + * memory once the CPU has no page references. Asynchronous release is useful + * because CPU page references can be dropped in IRQ contexts, while releasing + * device memory likely requires sleeping locks. + */ +struct drm_pagemap_zdd { + struct kref refcount; + struct drm_pagemap_devmem *devmem_allocation; + void *device_private_page_owner; +}; + +/** + * drm_pagemap_zdd_alloc() - Allocate a zdd structure. + * @device_private_page_owner: Device private pages owner + * + * This function allocates and initializes a new zdd structure. It sets up the + * reference count and initializes the destroy work. + * + * Return: Pointer to the allocated zdd on success, ERR_PTR() on failure. + */ +static struct drm_pagemap_zdd * +drm_pagemap_zdd_alloc(void *device_private_page_owner) +{ + struct drm_pagemap_zdd *zdd; + + zdd = kmalloc(sizeof(*zdd), GFP_KERNEL); + if (!zdd) + return NULL; + + kref_init(&zdd->refcount); + zdd->devmem_allocation = NULL; + zdd->device_private_page_owner = device_private_page_owner; + + return zdd; +} + +/** + * drm_pagemap_zdd_get() - Get a reference to a zdd structure. + * @zdd: Pointer to the zdd structure. + * + * This function increments the reference count of the provided zdd structure. + * + * Return: Pointer to the zdd structure. + */ +static struct drm_pagemap_zdd *drm_pagemap_zdd_get(struct drm_pagemap_zdd *zdd) +{ + kref_get(&zdd->refcount); + return zdd; +} + +/** + * drm_pagemap_zdd_destroy() - Destroy a zdd structure. + * @ref: Pointer to the reference count structure. + * + * This function queues the destroy_work of the zdd for asynchronous destruction. + */ +static void drm_pagemap_zdd_destroy(struct kref *ref) +{ + struct drm_pagemap_zdd *zdd = + container_of(ref, struct drm_pagemap_zdd, refcount); + struct drm_pagemap_devmem *devmem = zdd->devmem_allocation; + + if (devmem) { + complete_all(&devmem->detached); + if (devmem->ops->devmem_release) + devmem->ops->devmem_release(devmem); + } + kfree(zdd); +} + +/** + * drm_pagemap_zdd_put() - Put a zdd reference. + * @zdd: Pointer to the zdd structure. + * + * This function decrements the reference count of the provided zdd structure + * and schedules its destruction if the count drops to zero. + */ +static void drm_pagemap_zdd_put(struct drm_pagemap_zdd *zdd) +{ + kref_put(&zdd->refcount, drm_pagemap_zdd_destroy); +} + +/** + * drm_pagemap_migration_unlock_put_page() - Put a migration page + * @page: Pointer to the page to put + * + * This function unlocks and puts a page. + */ +static void drm_pagemap_migration_unlock_put_page(struct page *page) +{ + unlock_page(page); + put_page(page); +} + +/** + * drm_pagemap_migration_unlock_put_pages() - Put migration pages + * @npages: Number of pages + * @migrate_pfn: Array of migrate page frame numbers + * + * This function unlocks and puts an array of pages. + */ +static void drm_pagemap_migration_unlock_put_pages(unsigned long npages, + unsigned long *migrate_pfn) +{ + unsigned long i; + + for (i = 0; i < npages; ++i) { + struct page *page; + + if (!migrate_pfn[i]) + continue; + + page = migrate_pfn_to_page(migrate_pfn[i]); + drm_pagemap_migration_unlock_put_page(page); + migrate_pfn[i] = 0; + } +} + +/** + * drm_pagemap_get_devmem_page() - Get a reference to a device memory page + * @page: Pointer to the page + * @zdd: Pointer to the GPU SVM zone device data + * + * This function associates the given page with the specified GPU SVM zone + * device data and initializes it for zone device usage. + */ +static void drm_pagemap_get_devmem_page(struct page *page, + struct drm_pagemap_zdd *zdd) +{ + page->zone_device_data = drm_pagemap_zdd_get(zdd); + zone_device_page_init(page); +} + +/** + * drm_pagemap_migrate_map_pages() - Map migration pages for GPU SVM migration + * @dev: The device for which the pages are being mapped + * @dma_addr: Array to store DMA addresses corresponding to mapped pages + * @migrate_pfn: Array of migrate page frame numbers to map + * @npages: Number of pages to map + * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL) + * + * This function maps pages of memory for migration usage in GPU SVM. It + * iterates over each page frame number provided in @migrate_pfn, maps the + * corresponding page, and stores the DMA address in the provided @dma_addr + * array. + * + * Returns: 0 on success, -EFAULT if an error occurs during mapping. + */ +static int drm_pagemap_migrate_map_pages(struct device *dev, + dma_addr_t *dma_addr, + unsigned long *migrate_pfn, + unsigned long npages, + enum dma_data_direction dir) +{ + unsigned long i; + + for (i = 0; i < npages; ++i) { + struct page *page = migrate_pfn_to_page(migrate_pfn[i]); + + if (!page) + continue; + + if (WARN_ON_ONCE(is_zone_device_page(page))) + return -EFAULT; + + dma_addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir); + if (dma_mapping_error(dev, dma_addr[i])) + return -EFAULT; + } + + return 0; +} + +/** + * drm_pagemap_migrate_unmap_pages() - Unmap pages previously mapped for GPU SVM migration + * @dev: The device for which the pages were mapped + * @dma_addr: Array of DMA addresses corresponding to mapped pages + * @npages: Number of pages to unmap + * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL) + * + * This function unmaps previously mapped pages of memory for GPU Shared Virtual + * Memory (SVM). It iterates over each DMA address provided in @dma_addr, checks + * if it's valid and not already unmapped, and unmaps the corresponding page. + */ +static void drm_pagemap_migrate_unmap_pages(struct device *dev, + dma_addr_t *dma_addr, + unsigned long npages, + enum dma_data_direction dir) +{ + unsigned long i; + + for (i = 0; i < npages; ++i) { + if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i])) + continue; + + dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir); + } +} + +static unsigned long +npages_in_range(unsigned long start, unsigned long end) +{ + return (end - start) >> PAGE_SHIFT; +} + +/** + * drm_pagemap_migrate_to_devmem() - Migrate a struct mm_struct range to device memory + * @devmem_allocation: The device memory allocation to migrate to. + * The caller should hold a reference to the device memory allocation, + * and the reference is consumed by this function unless it returns with + * an error. + * @mm: Pointer to the struct mm_struct. + * @start: Start of the virtual address range to migrate. + * @end: End of the virtual address range to migrate. + * @timeslice_ms: The time requested for the migrated pagemap pages to + * be present in @mm before being allowed to be migrated back. + * @pgmap_owner: Not used currently, since only system memory is considered. + * + * This function migrates the specified virtual address range to device memory. + * It performs the necessary setup and invokes the driver-specific operations for + * migration to device memory. Expected to be called while holding the mmap lock in + * at least read mode. + * + * Note: The @timeslice_ms parameter can typically be used to force data to + * remain in pagemap pages long enough for a GPU to perform a task and to prevent + * a migration livelock. One alternative would be for the GPU driver to block + * in a mmu_notifier for the specified amount of time, but adding the + * functionality to the pagemap is likely nicer to the system as a whole. + * + * Return: %0 on success, negative error code on failure. + */ +int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation, + struct mm_struct *mm, + unsigned long start, unsigned long end, + unsigned long timeslice_ms, + void *pgmap_owner) +{ + const struct drm_pagemap_devmem_ops *ops = devmem_allocation->ops; + struct migrate_vma migrate = { + .start = start, + .end = end, + .pgmap_owner = pgmap_owner, + .flags = MIGRATE_VMA_SELECT_SYSTEM, + }; + unsigned long i, npages = npages_in_range(start, end); + struct vm_area_struct *vas; + struct drm_pagemap_zdd *zdd = NULL; + struct page **pages; + dma_addr_t *dma_addr; + void *buf; + int err; + + mmap_assert_locked(mm); + + if (!ops->populate_devmem_pfn || !ops->copy_to_devmem || + !ops->copy_to_ram) + return -EOPNOTSUPP; + + vas = vma_lookup(mm, start); + if (!vas) { + err = -ENOENT; + goto err_out; + } + + if (end > vas->vm_end || start < vas->vm_start) { + err = -EINVAL; + goto err_out; + } + + if (!vma_is_anonymous(vas)) { + err = -EBUSY; + goto err_out; + } + + buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) + + sizeof(*pages), GFP_KERNEL); + if (!buf) { + err = -ENOMEM; + goto err_out; + } + dma_addr = buf + (2 * sizeof(*migrate.src) * npages); + pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages; + + zdd = drm_pagemap_zdd_alloc(pgmap_owner); + if (!zdd) { + err = -ENOMEM; + goto err_free; + } + + migrate.vma = vas; + migrate.src = buf; + migrate.dst = migrate.src + npages; + + err = migrate_vma_setup(&migrate); + if (err) + goto err_free; + + if (!migrate.cpages) { + err = -EFAULT; + goto err_free; + } + + if (migrate.cpages != npages) { + err = -EBUSY; + goto err_finalize; + } + + err = ops->populate_devmem_pfn(devmem_allocation, npages, migrate.dst); + if (err) + goto err_finalize; + + err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, dma_addr, + migrate.src, npages, DMA_TO_DEVICE); + if (err) + goto err_finalize; + + for (i = 0; i < npages; ++i) { + struct page *page = pfn_to_page(migrate.dst[i]); + + pages[i] = page; + migrate.dst[i] = migrate_pfn(migrate.dst[i]); + drm_pagemap_get_devmem_page(page, zdd); + } + + err = ops->copy_to_devmem(pages, dma_addr, npages); + if (err) + goto err_finalize; + + /* Upon success bind devmem allocation to range and zdd */ + devmem_allocation->timeslice_expiration = get_jiffies_64() + + msecs_to_jiffies(timeslice_ms); + zdd->devmem_allocation = devmem_allocation; /* Owns ref */ + +err_finalize: + if (err) + drm_pagemap_migration_unlock_put_pages(npages, migrate.dst); + migrate_vma_pages(&migrate); + migrate_vma_finalize(&migrate); + drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages, + DMA_TO_DEVICE); +err_free: + if (zdd) + drm_pagemap_zdd_put(zdd); + kvfree(buf); +err_out: + return err; +} +EXPORT_SYMBOL_GPL(drm_pagemap_migrate_to_devmem); + +/** + * drm_pagemap_migrate_populate_ram_pfn() - Populate RAM PFNs for a VM area + * @vas: Pointer to the VM area structure, can be NULL + * @fault_page: Fault page + * @npages: Number of pages to populate + * @mpages: Number of pages to migrate + * @src_mpfn: Source array of migrate PFNs + * @mpfn: Array of migrate PFNs to populate + * @addr: Start address for PFN allocation + * + * This function populates the RAM migrate page frame numbers (PFNs) for the + * specified VM area structure. It allocates and locks pages in the VM area for + * RAM usage. If vas is non-NULL use alloc_page_vma for allocation, if NULL use + * alloc_page for allocation. + * + * Return: 0 on success, negative error code on failure. + */ +static int drm_pagemap_migrate_populate_ram_pfn(struct vm_area_struct *vas, + struct page *fault_page, + unsigned long npages, + unsigned long *mpages, + unsigned long *src_mpfn, + unsigned long *mpfn, + unsigned long addr) +{ + unsigned long i; + + for (i = 0; i < npages; ++i, addr += PAGE_SIZE) { + struct page *page, *src_page; + + if (!(src_mpfn[i] & MIGRATE_PFN_MIGRATE)) + continue; + + src_page = migrate_pfn_to_page(src_mpfn[i]); + if (!src_page) + continue; + + if (fault_page) { + if (src_page->zone_device_data != + fault_page->zone_device_data) + continue; + } + + if (vas) + page = alloc_page_vma(GFP_HIGHUSER, vas, addr); + else + page = alloc_page(GFP_HIGHUSER); + + if (!page) + goto free_pages; + + mpfn[i] = migrate_pfn(page_to_pfn(page)); + } + + for (i = 0; i < npages; ++i) { + struct page *page = migrate_pfn_to_page(mpfn[i]); + + if (!page) + continue; + + WARN_ON_ONCE(!trylock_page(page)); + ++*mpages; + } + + return 0; + +free_pages: + for (i = 0; i < npages; ++i) { + struct page *page = migrate_pfn_to_page(mpfn[i]); + + if (!page) + continue; + + put_page(page); + mpfn[i] = 0; + } + return -ENOMEM; +} + +/** + * drm_pagemap_evict_to_ram() - Evict GPU SVM range to RAM + * @devmem_allocation: Pointer to the device memory allocation + * + * Similar to __drm_pagemap_migrate_to_ram but does not require mmap lock and + * migration done via migrate_device_* functions. + * + * Return: 0 on success, negative error code on failure. + */ +int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation) +{ + const struct drm_pagemap_devmem_ops *ops = devmem_allocation->ops; + unsigned long npages, mpages = 0; + struct page **pages; + unsigned long *src, *dst; + dma_addr_t *dma_addr; + void *buf; + int i, err = 0; + unsigned int retry_count = 2; + + npages = devmem_allocation->size >> PAGE_SHIFT; + +retry: + if (!mmget_not_zero(devmem_allocation->mm)) + return -EFAULT; + + buf = kvcalloc(npages, 2 * sizeof(*src) + sizeof(*dma_addr) + + sizeof(*pages), GFP_KERNEL); + if (!buf) { + err = -ENOMEM; + goto err_out; + } + src = buf; + dst = buf + (sizeof(*src) * npages); + dma_addr = buf + (2 * sizeof(*src) * npages); + pages = buf + (2 * sizeof(*src) + sizeof(*dma_addr)) * npages; + + err = ops->populate_devmem_pfn(devmem_allocation, npages, src); + if (err) + goto err_free; + + err = migrate_device_pfns(src, npages); + if (err) + goto err_free; + + err = drm_pagemap_migrate_populate_ram_pfn(NULL, NULL, npages, &mpages, + src, dst, 0); + if (err || !mpages) + goto err_finalize; + + err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, dma_addr, + dst, npages, DMA_FROM_DEVICE); + if (err) + goto err_finalize; + + for (i = 0; i < npages; ++i) + pages[i] = migrate_pfn_to_page(src[i]); + + err = ops->copy_to_ram(pages, dma_addr, npages); + if (err) + goto err_finalize; + +err_finalize: + if (err) + drm_pagemap_migration_unlock_put_pages(npages, dst); + migrate_device_pages(src, dst, npages); + migrate_device_finalize(src, dst, npages); + drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages, + DMA_FROM_DEVICE); +err_free: + kvfree(buf); +err_out: + mmput_async(devmem_allocation->mm); + + if (completion_done(&devmem_allocation->detached)) + return 0; + + if (retry_count--) { + cond_resched(); + goto retry; + } + + return err ?: -EBUSY; +} +EXPORT_SYMBOL_GPL(drm_pagemap_evict_to_ram); + +/** + * __drm_pagemap_migrate_to_ram() - Migrate GPU SVM range to RAM (internal) + * @vas: Pointer to the VM area structure + * @device_private_page_owner: Device private pages owner + * @page: Pointer to the page for fault handling (can be NULL) + * @fault_addr: Fault address + * @size: Size of migration + * + * This internal function performs the migration of the specified GPU SVM range + * to RAM. It sets up the migration, populates + dma maps RAM PFNs, and + * invokes the driver-specific operations for migration to RAM. + * + * Return: 0 on success, negative error code on failure. + */ +static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas, + void *device_private_page_owner, + struct page *page, + unsigned long fault_addr, + unsigned long size) +{ + struct migrate_vma migrate = { + .vma = vas, + .pgmap_owner = device_private_page_owner, + .flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE | + MIGRATE_VMA_SELECT_DEVICE_COHERENT, + .fault_page = page, + }; + struct drm_pagemap_zdd *zdd; + const struct drm_pagemap_devmem_ops *ops; + struct device *dev = NULL; + unsigned long npages, mpages = 0; + struct page **pages; + dma_addr_t *dma_addr; + unsigned long start, end; + void *buf; + int i, err = 0; + + if (page) { + zdd = page->zone_device_data; + if (time_before64(get_jiffies_64(), + zdd->devmem_allocation->timeslice_expiration)) + return 0; + } + + start = ALIGN_DOWN(fault_addr, size); + end = ALIGN(fault_addr + 1, size); + + /* Corner where VMA area struct has been partially unmapped */ + if (start < vas->vm_start) + start = vas->vm_start; + if (end > vas->vm_end) + end = vas->vm_end; + + migrate.start = start; + migrate.end = end; + npages = npages_in_range(start, end); + + buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) + + sizeof(*pages), GFP_KERNEL); + if (!buf) { + err = -ENOMEM; + goto err_out; + } + dma_addr = buf + (2 * sizeof(*migrate.src) * npages); + pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages; + + migrate.vma = vas; + migrate.src = buf; + migrate.dst = migrate.src + npages; + + err = migrate_vma_setup(&migrate); + if (err) + goto err_free; + + /* Raced with another CPU fault, nothing to do */ + if (!migrate.cpages) + goto err_free; + + if (!page) { + for (i = 0; i < npages; ++i) { + if (!(migrate.src[i] & MIGRATE_PFN_MIGRATE)) + continue; + + page = migrate_pfn_to_page(migrate.src[i]); + break; + } + + if (!page) + goto err_finalize; + } + zdd = page->zone_device_data; + ops = zdd->devmem_allocation->ops; + dev = zdd->devmem_allocation->dev; + + err = drm_pagemap_migrate_populate_ram_pfn(vas, page, npages, &mpages, + migrate.src, migrate.dst, + start); + if (err) + goto err_finalize; + + err = drm_pagemap_migrate_map_pages(dev, dma_addr, migrate.dst, npages, + DMA_FROM_DEVICE); + if (err) + goto err_finalize; + + for (i = 0; i < npages; ++i) + pages[i] = migrate_pfn_to_page(migrate.src[i]); + + err = ops->copy_to_ram(pages, dma_addr, npages); + if (err) + goto err_finalize; + +err_finalize: + if (err) + drm_pagemap_migration_unlock_put_pages(npages, migrate.dst); + migrate_vma_pages(&migrate); + migrate_vma_finalize(&migrate); + if (dev) + drm_pagemap_migrate_unmap_pages(dev, dma_addr, npages, + DMA_FROM_DEVICE); +err_free: + kvfree(buf); +err_out: + + return err; +} + +/** + * drm_pagemap_page_free() - Put GPU SVM zone device data associated with a page + * @page: Pointer to the page + * + * This function is a callback used to put the GPU SVM zone device data + * associated with a page when it is being released. + */ +static void drm_pagemap_page_free(struct page *page) +{ + drm_pagemap_zdd_put(page->zone_device_data); +} + +/** + * drm_pagemap_migrate_to_ram() - Migrate a virtual range to RAM (page fault handler) + * @vmf: Pointer to the fault information structure + * + * This function is a page fault handler used to migrate a virtual range + * to ram. The device memory allocation in which the device page is found is + * migrated in its entirety. + * + * Returns: + * VM_FAULT_SIGBUS on failure, 0 on success. + */ +static vm_fault_t drm_pagemap_migrate_to_ram(struct vm_fault *vmf) +{ + struct drm_pagemap_zdd *zdd = vmf->page->zone_device_data; + int err; + + err = __drm_pagemap_migrate_to_ram(vmf->vma, + zdd->device_private_page_owner, + vmf->page, vmf->address, + zdd->devmem_allocation->size); + + return err ? VM_FAULT_SIGBUS : 0; +} + +static const struct dev_pagemap_ops drm_pagemap_pagemap_ops = { + .page_free = drm_pagemap_page_free, + .migrate_to_ram = drm_pagemap_migrate_to_ram, +}; + +/** + * drm_pagemap_pagemap_ops_get() - Retrieve GPU SVM device page map operations + * + * Returns: + * Pointer to the GPU SVM device page map operations structure. + */ +const struct dev_pagemap_ops *drm_pagemap_pagemap_ops_get(void) +{ + return &drm_pagemap_pagemap_ops; +} +EXPORT_SYMBOL_GPL(drm_pagemap_pagemap_ops_get); + +/** + * drm_pagemap_devmem_init() - Initialize a drm_pagemap device memory allocation + * + * @devmem_allocation: The struct drm_pagemap_devmem to initialize. + * @dev: Pointer to the device structure which device memory allocation belongs to + * @mm: Pointer to the mm_struct for the address space + * @ops: Pointer to the operations structure for GPU SVM device memory + * @dpagemap: The struct drm_pagemap we're allocating from. + * @size: Size of device memory allocation + */ +void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation, + struct device *dev, struct mm_struct *mm, + const struct drm_pagemap_devmem_ops *ops, + struct drm_pagemap *dpagemap, size_t size) +{ + init_completion(&devmem_allocation->detached); + devmem_allocation->dev = dev; + devmem_allocation->mm = mm; + devmem_allocation->ops = ops; + devmem_allocation->dpagemap = dpagemap; + devmem_allocation->size = size; +} +EXPORT_SYMBOL_GPL(drm_pagemap_devmem_init); + +/** + * drm_pagemap_page_to_dpagemap() - Return a pointer the drm_pagemap of a page + * @page: The struct page. + * + * Return: A pointer to the struct drm_pagemap of a device private page that + * was populated from the struct drm_pagemap. If the page was *not* populated + * from a struct drm_pagemap, the result is undefined and the function call + * may result in dereferencing and invalid address. + */ +struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page) +{ + struct drm_pagemap_zdd *zdd = page->zone_device_data; + + return zdd->devmem_allocation->dpagemap; +} +EXPORT_SYMBOL_GPL(drm_pagemap_page_to_dpagemap); + +/** + * drm_pagemap_populate_mm() - Populate a virtual range with device memory pages + * @dpagemap: Pointer to the drm_pagemap managing the device memory + * @start: Start of the virtual range to populate. + * @end: End of the virtual range to populate. + * @mm: Pointer to the virtual address space. + * @timeslice_ms: The time requested for the migrated pagemap pages to + * be present in @mm before being allowed to be migrated back. + * + * Attempt to populate a virtual range with device memory pages, + * clearing them or migrating data from the existing pages if necessary. + * The function is best effort only, and implementations may vary + * in how hard they try to satisfy the request. + * + * Return: %0 on success, negative error code on error. If the hardware + * device was removed / unbound the function will return %-ENODEV. + */ +int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap, + unsigned long start, unsigned long end, + struct mm_struct *mm, + unsigned long timeslice_ms) +{ + int err; + + if (!mmget_not_zero(mm)) + return -EFAULT; + mmap_read_lock(mm); + err = dpagemap->ops->populate_mm(dpagemap, start, end, mm, + timeslice_ms); + mmap_read_unlock(mm); + mmput(mm); + + return err; +} +EXPORT_SYMBOL(drm_pagemap_populate_mm); diff --git a/drivers/gpu/drm/drm_panic_qr.rs b/drivers/gpu/drm/drm_panic_qr.rs index dd55b1cb764d..50c286c5cee8 100644 --- a/drivers/gpu/drm/drm_panic_qr.rs +++ b/drivers/gpu/drm/drm_panic_qr.rs @@ -27,7 +27,7 @@ //! * <https://github.com/erwanvivien/fast_qr> //! * <https://github.com/bjguillot/qr> -use kernel::{prelude::*, str::CStr}; +use kernel::prelude::*; #[derive(Debug, Clone, Copy, PartialEq, Eq, Ord, PartialOrd)] struct Version(usize); @@ -381,6 +381,26 @@ struct DecFifo { len: usize, } +// On arm32 architecture, dividing an `u64` by a constant will generate a call +// to `__aeabi_uldivmod` which is not present in the kernel. +// So use the multiply by inverse method for this architecture. +fn div10(val: u64) -> u64 { + if cfg!(target_arch = "arm") { + let val_h = val >> 32; + let val_l = val & 0xFFFFFFFF; + let b_h: u64 = 0x66666666; + let b_l: u64 = 0x66666667; + + let tmp1 = val_h * b_l + ((val_l * b_l) >> 32); + let tmp2 = val_l * b_h + (tmp1 & 0xffffffff); + let tmp3 = val_h * b_h + (tmp1 >> 32) + (tmp2 >> 32); + + tmp3 >> 2 + } else { + val / 10 + } +} + impl DecFifo { fn push(&mut self, data: u64, len: usize) { let mut chunk = data; @@ -389,7 +409,7 @@ impl DecFifo { } for i in 0..len { self.decimals[i] = (chunk % 10) as u8; - chunk /= 10; + chunk = div10(chunk); } self.len += len; } @@ -404,7 +424,7 @@ impl DecFifo { let mut out = 0; let mut exp = 1; for i in 0..poplen { - out += self.decimals[self.len + i] as u16 * exp; + out += u16::from(self.decimals[self.len + i]) * exp; exp *= 10; } Some((out, NUM_CHARS_BITS[poplen])) @@ -425,7 +445,7 @@ impl Iterator for SegmentIterator<'_> { match self.segment { Segment::Binary(data) => { if self.offset < data.len() { - let byte = data[self.offset] as u16; + let byte = u16::from(data[self.offset]); self.offset += 1; Some((byte, 8)) } else { diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index b703f83874e1..a23fc712a8b7 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -453,7 +453,13 @@ struct dma_buf *drm_gem_prime_handle_to_dmabuf(struct drm_device *dev, } mutex_lock(&dev->object_name_lock); - /* re-export the original imported/exported object */ + /* re-export the original imported object */ + if (obj->import_attach) { + dmabuf = obj->import_attach->dmabuf; + get_dma_buf(dmabuf); + goto out_have_obj; + } + if (obj->dma_buf) { get_dma_buf(obj->dma_buf); dmabuf = obj->dma_buf; diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index 60c1f26edb6f..a455c56dbbeb 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -319,7 +319,7 @@ static const struct bin_attribute edid_attr = { .attr.name = "edid", .attr.mode = 0444, .size = 0, - .read_new = edid_show, + .read = edid_show, }; static const struct bin_attribute *const connector_bin_attrs[] = { @@ -329,7 +329,7 @@ static const struct bin_attribute *const connector_bin_attrs[] = { static const struct attribute_group connector_dev_group = { .attrs = connector_dev_attrs, - .bin_attrs_new = connector_bin_attrs, + .bin_attrs = connector_bin_attrs, }; static const struct attribute_group *connector_dev_groups[] = { diff --git a/drivers/gpu/drm/drm_writeback.c b/drivers/gpu/drm/drm_writeback.c index ace8c98c3e04..95b8a2e4bda6 100644 --- a/drivers/gpu/drm/drm_writeback.c +++ b/drivers/gpu/drm/drm_writeback.c @@ -344,17 +344,18 @@ EXPORT_SYMBOL(drm_writeback_connector_init_with_encoder); /** * drm_writeback_connector_cleanup - Cleanup the writeback connector * @dev: DRM device - * @wb_connector: Pointer to the writeback connector to clean up + * @data: Pointer to the writeback connector to clean up * * This will decrement the reference counter of blobs and destroy properties. It * will also clean the remaining jobs in this writeback connector. Caution: This helper will not * clean up the attached encoder and the drm_connector. */ static void drm_writeback_connector_cleanup(struct drm_device *dev, - struct drm_writeback_connector *wb_connector) + void *data) { unsigned long flags; struct drm_writeback_job *pos, *n; + struct drm_writeback_connector *wb_connector = data; delete_writeback_properties(dev); drm_property_blob_put(wb_connector->pixel_formats_blob_ptr); @@ -406,7 +407,7 @@ int drmm_writeback_connector_init(struct drm_device *dev, if (ret) return ret; - ret = drmm_add_action_or_reset(dev, (void *)drm_writeback_connector_cleanup, + ret = drmm_add_action_or_reset(dev, drm_writeback_connector_cleanup, wb_connector); if (ret) return ret; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c index 917ad527c961..40a50c60dfff 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c @@ -65,7 +65,7 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj) struct iosys_map map = IOSYS_MAP_INIT_VADDR(etnaviv_obj->vaddr); if (etnaviv_obj->vaddr) - dma_buf_vunmap_unlocked(etnaviv_obj->base.dma_buf, &map); + dma_buf_vunmap_unlocked(etnaviv_obj->base.import_attach->dmabuf, &map); /* Don't drop the pages for imported dmabuf, as they are not * ours, just free the array we allocated: @@ -82,7 +82,7 @@ static void *etnaviv_gem_prime_vmap_impl(struct etnaviv_gem_object *etnaviv_obj) lockdep_assert_held(&etnaviv_obj->lock); - ret = dma_buf_vmap(etnaviv_obj->base.dma_buf, &map); + ret = dma_buf_vmap(etnaviv_obj->base.import_attach->dmabuf, &map); if (ret) return NULL; return map.vaddr; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index 76a3a3e517d8..df4232d7e135 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -40,11 +40,11 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job int change; /* - * If the GPU managed to complete this jobs fence, the timout is - * spurious. Bail out. + * If the GPU managed to complete this jobs fence, the timeout has + * fired before free-job worker. The timeout is spurious, so bail out. */ if (dma_fence_is_signaled(submit->out_fence)) - goto out_no_timeout; + return DRM_GPU_SCHED_STAT_NO_HANG; /* * If the GPU is still making forward progress on the front-end (which @@ -70,7 +70,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job gpu->hangcheck_dma_addr = dma_addr; gpu->hangcheck_primid = primid; gpu->hangcheck_fence = gpu->completed_fence; - goto out_no_timeout; + return DRM_GPU_SCHED_STAT_NO_HANG; } /* block scheduler */ @@ -86,11 +86,7 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job drm_sched_resubmit_jobs(&gpu->sched); drm_sched_start(&gpu->sched, 0); - return DRM_GPU_SCHED_STAT_NOMINAL; - -out_no_timeout: - list_add(&sched_job->list, &sched_job->sched->pending_list); - return DRM_GPU_SCHED_STAT_NOMINAL; + return DRM_GPU_SCHED_STAT_RESET; } static void etnaviv_sched_free_job(struct drm_sched_job *sched_job) diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index f91daefa9d2b..805aa28c1723 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -636,6 +636,10 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id) if (!ctx->drm_dev) goto out; + /* check if crtc and vblank have been initialized properly */ + if (!drm_dev_has_vblank(ctx->drm_dev)) + goto out; + if (!ctx->i80_if) { drm_crtc_handle_vblank(&ctx->crtc->base); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index fc1c5608db96..ddd73e7f26a3 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -56,6 +56,7 @@ static const struct drm_framebuffer_funcs exynos_drm_fb_funcs = { struct drm_framebuffer * exynos_drm_framebuffer_init(struct drm_device *dev, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct exynos_drm_gem **exynos_gem, int count) @@ -76,7 +77,7 @@ exynos_drm_framebuffer_init(struct drm_device *dev, fb->obj[i] = &exynos_gem[i]->base; } - drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd); ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs); if (ret < 0) { @@ -94,9 +95,9 @@ err: static struct drm_framebuffer * exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { - const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd); struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER]; struct drm_framebuffer *fb; int i; @@ -124,7 +125,7 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, } } - fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i); + fb = exynos_drm_framebuffer_init(dev, info, mode_cmd, exynos_gem, i); if (IS_ERR(fb)) { ret = PTR_ERR(fb); goto err; diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h index 2f841bbdddc5..fdc6cb40cc9c 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.h +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h @@ -14,6 +14,7 @@ struct drm_framebuffer * exynos_drm_framebuffer_init(struct drm_device *dev, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct exynos_drm_gem **exynos_gem, int count); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c index 9526a25e90ac..93de25b77e68 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c @@ -116,7 +116,10 @@ int exynos_drm_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, return PTR_ERR(exynos_gem); helper->fb = - exynos_drm_framebuffer_init(dev, &mode_cmd, &exynos_gem, 1); + exynos_drm_framebuffer_init(dev, + drm_get_format_info(dev, mode_cmd.pixel_format, + mode_cmd.modifier[0]), + &mode_cmd, &exynos_gem, 1); if (IS_ERR(helper->fb)) { DRM_DEV_ERROR(dev->dev, "failed to create drm framebuffer.\n"); ret = PTR_ERR(helper->fb); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index c394cc702d7d..205c238cc73a 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -187,6 +187,7 @@ struct fimd_context { u32 i80ifcon; bool i80_if; bool suspended; + bool dp_clk_enabled; wait_queue_head_t wait_vsync_queue; atomic_t wait_vsync_event; atomic_t win_updated; @@ -1047,7 +1048,18 @@ static void fimd_dp_clock_enable(struct exynos_drm_clk *clk, bool enable) struct fimd_context *ctx = container_of(clk, struct fimd_context, dp_clk); u32 val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE; + + if (enable == ctx->dp_clk_enabled) + return; + + if (enable) + pm_runtime_resume_and_get(ctx->dev); + + ctx->dp_clk_enabled = enable; writel(val, ctx->regs + DP_MIE_CLKCON); + + if (!enable) + pm_runtime_put(ctx->dev); } static const struct exynos_drm_crtc_ops fimd_crtc_ops = { diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c index 4787fee4696f..e3fbb45f37a2 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -7,7 +7,6 @@ #include <linux/dma-buf.h> -#include <linux/pfn_t.h> #include <linux/shmem_fs.h> #include <linux/module.h> @@ -174,7 +173,7 @@ static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev, return ERR_PTR(ret); } - DRM_DEV_DEBUG_KMS(dev->dev, "created file object = %pK\n", obj->filp); + DRM_DEV_DEBUG_KMS(dev->dev, "created file object = %p\n", obj->filp); return exynos_gem; } diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c index ea9f66037600..03c8490af4f4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c +++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c @@ -271,7 +271,7 @@ static inline struct exynos_drm_ipp_task * task->src.rect.h = task->dst.rect.h = UINT_MAX; task->transform.rotation = DRM_MODE_ROTATE_0; - DRM_DEV_DEBUG_DRIVER(task->dev, "Allocated task %pK\n", task); + DRM_DEV_DEBUG_DRIVER(task->dev, "Allocated task %p\n", task); return task; } @@ -339,7 +339,7 @@ static int exynos_drm_ipp_task_set(struct exynos_drm_ipp_task *task, } DRM_DEV_DEBUG_DRIVER(task->dev, - "Got task %pK configuration from userspace\n", + "Got task %p configuration from userspace\n", task); return 0; } @@ -394,7 +394,7 @@ static void exynos_drm_ipp_task_release_buf(struct exynos_drm_ipp_buffer *buf) static void exynos_drm_ipp_task_free(struct exynos_drm_ipp *ipp, struct exynos_drm_ipp_task *task) { - DRM_DEV_DEBUG_DRIVER(task->dev, "Freeing task %pK\n", task); + DRM_DEV_DEBUG_DRIVER(task->dev, "Freeing task %p\n", task); exynos_drm_ipp_task_release_buf(&task->src); exynos_drm_ipp_task_release_buf(&task->dst); @@ -559,7 +559,7 @@ static int exynos_drm_ipp_check_format(struct exynos_drm_ipp_task *task, DRM_EXYNOS_IPP_FORMAT_DESTINATION); if (!fmt) { DRM_DEV_DEBUG_DRIVER(task->dev, - "Task %pK: %s format not supported\n", + "Task %p: %s format not supported\n", task, buf == src ? "src" : "dst"); return -EINVAL; } @@ -609,7 +609,7 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task) bool rotate = (rotation != DRM_MODE_ROTATE_0); bool scale = false; - DRM_DEV_DEBUG_DRIVER(task->dev, "Checking task %pK\n", task); + DRM_DEV_DEBUG_DRIVER(task->dev, "Checking task %p\n", task); if (src->rect.w == UINT_MAX) src->rect.w = src->buf.width; @@ -625,7 +625,7 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task) dst->rect.x + dst->rect.w > (dst->buf.width) || dst->rect.y + dst->rect.h > (dst->buf.height)) { DRM_DEV_DEBUG_DRIVER(task->dev, - "Task %pK: defined area is outside provided buffers\n", + "Task %p: defined area is outside provided buffers\n", task); return -EINVAL; } @@ -642,7 +642,7 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task) (!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_SCALE) && scale) || (!(ipp->capabilities & DRM_EXYNOS_IPP_CAP_CONVERT) && src->buf.fourcc != dst->buf.fourcc)) { - DRM_DEV_DEBUG_DRIVER(task->dev, "Task %pK: hw capabilities exceeded\n", + DRM_DEV_DEBUG_DRIVER(task->dev, "Task %p: hw capabilities exceeded\n", task); return -EINVAL; } @@ -655,7 +655,7 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task) if (ret) return ret; - DRM_DEV_DEBUG_DRIVER(ipp->dev, "Task %pK: all checks done.\n", + DRM_DEV_DEBUG_DRIVER(ipp->dev, "Task %p: all checks done.\n", task); return ret; @@ -667,25 +667,25 @@ static int exynos_drm_ipp_task_setup_buffers(struct exynos_drm_ipp_task *task, struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst; int ret = 0; - DRM_DEV_DEBUG_DRIVER(task->dev, "Setting buffer for task %pK\n", + DRM_DEV_DEBUG_DRIVER(task->dev, "Setting buffer for task %p\n", task); ret = exynos_drm_ipp_task_setup_buffer(src, filp); if (ret) { DRM_DEV_DEBUG_DRIVER(task->dev, - "Task %pK: src buffer setup failed\n", + "Task %p: src buffer setup failed\n", task); return ret; } ret = exynos_drm_ipp_task_setup_buffer(dst, filp); if (ret) { DRM_DEV_DEBUG_DRIVER(task->dev, - "Task %pK: dst buffer setup failed\n", + "Task %p: dst buffer setup failed\n", task); return ret; } - DRM_DEV_DEBUG_DRIVER(task->dev, "Task %pK: buffers prepared.\n", + DRM_DEV_DEBUG_DRIVER(task->dev, "Task %p: buffers prepared.\n", task); return ret; @@ -764,7 +764,7 @@ void exynos_drm_ipp_task_done(struct exynos_drm_ipp_task *task, int ret) struct exynos_drm_ipp *ipp = task->ipp; unsigned long flags; - DRM_DEV_DEBUG_DRIVER(task->dev, "ipp: %d, task %pK done: %d\n", + DRM_DEV_DEBUG_DRIVER(task->dev, "ipp: %d, task %p done: %d\n", ipp->id, task, ret); spin_lock_irqsave(&ipp->lock, flags); @@ -807,7 +807,7 @@ static void exynos_drm_ipp_next_task(struct exynos_drm_ipp *ipp) spin_unlock_irqrestore(&ipp->lock, flags); DRM_DEV_DEBUG_DRIVER(ipp->dev, - "ipp: %d, selected task %pK to run\n", ipp->id, + "ipp: %d, selected task %p to run\n", ipp->id, task); ret = ipp->funcs->commit(ipp, task); @@ -917,14 +917,14 @@ int exynos_drm_ipp_commit_ioctl(struct drm_device *dev, void *data, */ if (arg->flags & DRM_EXYNOS_IPP_FLAG_NONBLOCK) { DRM_DEV_DEBUG_DRIVER(ipp->dev, - "ipp: %d, nonblocking processing task %pK\n", + "ipp: %d, nonblocking processing task %p\n", ipp->id, task); task->flags |= DRM_EXYNOS_IPP_TASK_ASYNC; exynos_drm_ipp_schedule_task(task->ipp, task); ret = 0; } else { - DRM_DEV_DEBUG_DRIVER(ipp->dev, "ipp: %d, processing task %pK\n", + DRM_DEV_DEBUG_DRIVER(ipp->dev, "ipp: %d, processing task %p\n", ipp->id, task); exynos_drm_ipp_schedule_task(ipp, task); ret = wait_event_interruptible(ipp->done_wq, diff --git a/drivers/gpu/drm/gma500/fbdev.c b/drivers/gpu/drm/gma500/fbdev.c index 8edefea2ef59..4a37136f90f4 100644 --- a/drivers/gpu/drm/gma500/fbdev.c +++ b/drivers/gpu/drm/gma500/fbdev.c @@ -6,7 +6,6 @@ **************************************************************************/ #include <linux/fb.h> -#include <linux/pfn_t.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_drv.h> @@ -33,7 +32,7 @@ static vm_fault_t psb_fbdev_vm_fault(struct vm_fault *vmf) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); for (i = 0; i < page_num; ++i) { - err = vmf_insert_mixed(vma, address, __pfn_to_pfn_t(pfn, PFN_DEV)); + err = vmf_insert_mixed(vma, address, pfn); if (unlikely(err & VM_FAULT_ERROR)) break; address += PAGE_SIZE; @@ -203,7 +202,10 @@ int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, return PTR_ERR(backing); obj = &backing->base; - fb = psb_framebuffer_create(dev, &mode_cmd, obj); + fb = psb_framebuffer_create(dev, + drm_get_format_info(dev, mode_cmd.pixel_format, + mode_cmd.modifier[0]), + &mode_cmd, obj); if (IS_ERR(fb)) { ret = PTR_ERR(fb); goto err_drm_gem_object_put; diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index 1a374702b696..e69b537ded6b 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -29,24 +29,23 @@ static const struct drm_framebuffer_funcs psb_fb_funcs = { */ static int psb_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { - const struct drm_format_info *info; int ret; /* * Reject unknown formats, YUV formats, and formats with more than * 4 bytes per pixel. */ - info = drm_get_format_info(dev, mode_cmd); - if (!info || !info->depth || info->cpp[0] > 4) + if (!info->depth || info->cpp[0] > 4) return -EINVAL; if (mode_cmd->pitches[0] & 63) return -EINVAL; - drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd); fb->obj[0] = obj; ret = drm_framebuffer_init(dev, fb, &psb_fb_funcs); if (ret) { @@ -59,6 +58,7 @@ static int psb_framebuffer_init(struct drm_device *dev, /** * psb_framebuffer_create - create a framebuffer backed by gt * @dev: our DRM device + * @info: pixel format information * @mode_cmd: the description of the requested mode * @obj: the backing object * @@ -68,6 +68,7 @@ static int psb_framebuffer_init(struct drm_device *dev, * TODO: review object references */ struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { @@ -78,7 +79,7 @@ struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev, if (!fb) return ERR_PTR(-ENOMEM); - ret = psb_framebuffer_init(dev, fb, mode_cmd, obj); + ret = psb_framebuffer_init(dev, fb, info, mode_cmd, obj); if (ret) { kfree(fb); return ERR_PTR(ret); @@ -96,6 +97,7 @@ struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev, */ static struct drm_framebuffer *psb_user_framebuffer_create (struct drm_device *dev, struct drm_file *filp, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *cmd) { struct drm_gem_object *obj; @@ -110,7 +112,7 @@ static struct drm_framebuffer *psb_user_framebuffer_create return ERR_PTR(-ENOENT); /* Let the core code do all the work */ - fb = psb_framebuffer_create(dev, cmd, obj); + fb = psb_framebuffer_create(dev, info, cmd, obj); if (IS_ERR(fb)) drm_gem_object_put(obj); diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index 7f77cb2b2751..0b27112ec46f 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h @@ -594,6 +594,7 @@ extern void psb_modeset_cleanup(struct drm_device *dev); /* framebuffer */ struct drm_framebuffer *psb_framebuffer_create(struct drm_device *dev, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c index adadd526641d..8d548d08f127 100644 --- a/drivers/gpu/drm/gud/gud_pipe.c +++ b/drivers/gpu/drm/gud/gud_pipe.c @@ -188,8 +188,13 @@ retry: } else if (format->format == DRM_FORMAT_RGB332) { drm_fb_xrgb8888_to_rgb332(&dst, NULL, src, fb, rect, fmtcnv_state); } else if (format->format == DRM_FORMAT_RGB565) { - drm_fb_xrgb8888_to_rgb565(&dst, NULL, src, fb, rect, fmtcnv_state, - gud_is_big_endian()); + if (gud_is_big_endian()) { + drm_fb_xrgb8888_to_rgb565be(&dst, NULL, src, fb, rect, + fmtcnv_state); + } else { + drm_fb_xrgb8888_to_rgb565(&dst, NULL, src, fb, rect, + fmtcnv_state); + } } else if (format->format == DRM_FORMAT_RGB888) { drm_fb_xrgb8888_to_rgb888(&dst, NULL, src, fb, rect, fmtcnv_state); } else { diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c index 74f7832ea53e..0726cb5b736e 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c +++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c @@ -325,6 +325,17 @@ static int hibmc_dp_link_downgrade_training_eq(struct hibmc_dp_dev *dp) return hibmc_dp_link_reduce_rate(dp); } +static void hibmc_dp_update_caps(struct hibmc_dp_dev *dp) +{ + dp->link.cap.link_rate = dp->dpcd[DP_MAX_LINK_RATE]; + if (dp->link.cap.link_rate > DP_LINK_BW_8_1 || !dp->link.cap.link_rate) + dp->link.cap.link_rate = DP_LINK_BW_8_1; + + dp->link.cap.lanes = dp->dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK; + if (dp->link.cap.lanes > HIBMC_DP_LANE_NUM_MAX) + dp->link.cap.lanes = HIBMC_DP_LANE_NUM_MAX; +} + int hibmc_dp_link_training(struct hibmc_dp_dev *dp) { struct hibmc_dp_link *link = &dp->link; @@ -334,8 +345,7 @@ int hibmc_dp_link_training(struct hibmc_dp_dev *dp) if (ret) drm_err(dp->dev, "dp aux read dpcd failed, ret: %d\n", ret); - dp->link.cap.link_rate = dp->dpcd[DP_MAX_LINK_RATE]; - dp->link.cap.lanes = 0x2; + hibmc_dp_update_caps(dp); ret = hibmc_dp_get_serdes_rate_cfg(dp); if (ret < 0) diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c index 768b97f9e74a..289304500ab0 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c @@ -32,7 +32,7 @@ DEFINE_DRM_GEM_FOPS(hibmc_fops); -static const char *g_irqs_names_map[HIBMC_MAX_VECTORS] = { "vblank", "hpd" }; +static const char *g_irqs_names_map[HIBMC_MAX_VECTORS] = { "hibmc-vblank", "hibmc-hpd" }; static irqreturn_t hibmc_interrupt(int irq, void *arg) { @@ -115,6 +115,8 @@ static const struct drm_mode_config_funcs hibmc_mode_funcs = { static int hibmc_kms_init(struct hibmc_drm_private *priv) { struct drm_device *dev = &priv->dev; + struct drm_encoder *encoder; + u32 clone_mask = 0; int ret; ret = drmm_mode_config_init(dev); @@ -154,6 +156,12 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv) return ret; } + drm_for_each_encoder(encoder, dev) + clone_mask |= drm_encoder_mask(encoder); + + drm_for_each_encoder(encoder, dev) + encoder->possible_clones = clone_mask; + return 0; } @@ -277,7 +285,6 @@ static void hibmc_unload(struct drm_device *dev) static int hibmc_msi_init(struct drm_device *dev) { struct pci_dev *pdev = to_pci_dev(dev->dev); - char name[32] = {0}; int valid_irq_num; int irq; int ret; @@ -292,9 +299,6 @@ static int hibmc_msi_init(struct drm_device *dev) valid_irq_num = ret; for (int i = 0; i < valid_irq_num; i++) { - snprintf(name, ARRAY_SIZE(name) - 1, "%s-%s-%s", - dev->driver->name, pci_name(pdev), g_irqs_names_map[i]); - irq = pci_irq_vector(pdev, i); if (i) @@ -302,10 +306,10 @@ static int hibmc_msi_init(struct drm_device *dev) ret = devm_request_threaded_irq(&pdev->dev, irq, hibmc_dp_interrupt, hibmc_dp_hpd_isr, - IRQF_SHARED, name, dev); + IRQF_SHARED, g_irqs_names_map[i], dev); else ret = devm_request_irq(&pdev->dev, irq, hibmc_interrupt, - IRQF_SHARED, name, dev); + IRQF_SHARED, g_irqs_names_map[i], dev); if (ret) { drm_err(dev, "install irq failed: %d\n", ret); return ret; @@ -323,13 +327,13 @@ static int hibmc_load(struct drm_device *dev) ret = hibmc_hw_init(priv); if (ret) - goto err; + return ret; ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); if (ret) { drm_err(dev, "Error initializing VRAM MM; %d\n", ret); - goto err; + return ret; } ret = hibmc_kms_init(priv); diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h index 274feabe7df0..ca8502e2760c 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h @@ -69,6 +69,7 @@ int hibmc_de_init(struct hibmc_drm_private *priv); int hibmc_vdac_init(struct hibmc_drm_private *priv); int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *connector); +void hibmc_ddc_del(struct hibmc_vdac *vdac); int hibmc_dp_init(struct hibmc_drm_private *priv); diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c index 99b3b77b5445..44860011855e 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c @@ -95,3 +95,8 @@ int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_vdac *vdac) return i2c_bit_add_bus(&vdac->adapter); } + +void hibmc_ddc_del(struct hibmc_vdac *vdac) +{ + i2c_del_adapter(&vdac->adapter); +} diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c index e8a527ede854..841e81f47b68 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c @@ -53,7 +53,7 @@ static void hibmc_connector_destroy(struct drm_connector *connector) { struct hibmc_vdac *vdac = to_hibmc_vdac(connector); - i2c_del_adapter(&vdac->adapter); + hibmc_ddc_del(vdac); drm_connector_cleanup(connector); } @@ -110,7 +110,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv) ret = drmm_encoder_init(dev, encoder, NULL, DRM_MODE_ENCODER_DAC, NULL); if (ret) { drm_err(dev, "failed to init encoder: %d\n", ret); - return ret; + goto err; } drm_encoder_helper_add(encoder, &hibmc_encoder_helper_funcs); @@ -121,7 +121,7 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv) &vdac->adapter); if (ret) { drm_err(dev, "failed to init connector: %d\n", ret); - return ret; + goto err; } drm_connector_helper_add(connector, &hibmc_connector_helper_funcs); @@ -131,4 +131,9 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv) connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; return 0; + +err: + hibmc_ddc_del(vdac); + + return ret; } diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 7c6075bc483c..853543443072 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -218,12 +218,11 @@ i915-$(CONFIG_HWMON) += \ # modesetting core code i915-y += \ display/hsw_ips.o \ - display/i9xx_plane.o \ display/i9xx_display_sr.o \ + display/i9xx_plane.o \ display/i9xx_wm.o \ display/intel_alpm.o \ display/intel_atomic.o \ - display/intel_atomic_plane.o \ display/intel_audio.o \ display/intel_bios.o \ display/intel_bo.o \ @@ -265,6 +264,7 @@ i915-y += \ display/intel_fbc.o \ display/intel_fdi.o \ display/intel_fifo_underrun.o \ + display/intel_flipq.o \ display/intel_frontbuffer.o \ display/intel_global_state.o \ display/intel_hdcp.o \ @@ -283,6 +283,7 @@ i915-y += \ display/intel_pch.o \ display/intel_pch_display.o \ display/intel_pch_refclk.o \ + display/intel_plane.o \ display/intel_plane_initial.o \ display/intel_pmdemand.o \ display/intel_psr.o \ diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c index 0d33782f11be..927fe56aec77 100644 --- a/drivers/gpu/drm/i915/display/hsw_ips.c +++ b/drivers/gpu/drm/i915/display/hsw_ips.c @@ -5,8 +5,9 @@ #include <linux/debugfs.h> +#include <drm/drm_print.h> + #include "hsw_ips.h" -#include "i915_drv.h" #include "i915_reg.h" #include "intel_color_regs.h" #include "intel_de.h" @@ -18,8 +19,6 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); u32 val; if (!crtc_state->ips_enabled) @@ -40,8 +39,8 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state) if (display->platform.broadwell) { drm_WARN_ON(display->drm, - snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL, - val | IPS_PCODE_CONTROL)); + intel_pcode_write(display->drm, DISPLAY_IPS_CONTROL, + val | IPS_PCODE_CONTROL)); /* * Quoting Art Runyan: "its not safe to expect any particular * value in IPS_CTL bit 31 after enabling IPS through the @@ -66,8 +65,6 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state) bool hsw_ips_disable(const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *i915 = to_i915(crtc->base.dev); bool need_vblank_wait = false; if (!crtc_state->ips_enabled) @@ -75,7 +72,7 @@ bool hsw_ips_disable(const struct intel_crtc_state *crtc_state) if (display->platform.broadwell) { drm_WARN_ON(display->drm, - snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL, 0)); + intel_pcode_write(display->drm, DISPLAY_IPS_CONTROL, 0)); /* * Wait for PCODE to finish disabling IPS. The BSpec specified * 42ms timeout value leads to occasional timeouts so use 100ms @@ -268,7 +265,7 @@ int hsw_ips_compute_config(struct intel_atomic_state *state, return PTR_ERR(cdclk_state); /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ - if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100) + if (crtc_state->pixel_rate > intel_cdclk_logical(cdclk_state) * 95 / 100) return 0; } diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c index 8f15333a4b07..f291ced989dc 100644 --- a/drivers/gpu/drm/i915/display/i9xx_plane.c +++ b/drivers/gpu/drm/i915/display/i9xx_plane.c @@ -15,7 +15,7 @@ #include "i9xx_plane.h" #include "i9xx_plane_regs.h" #include "intel_atomic.h" -#include "intel_atomic_plane.h" +#include "intel_bo.h" #include "intel_de.h" #include "intel_display_irq.h" #include "intel_display_regs.h" @@ -23,6 +23,7 @@ #include "intel_fb.h" #include "intel_fbc.h" #include "intel_frontbuffer.h" +#include "intel_plane.h" #include "intel_sprite.h" /* Primary plane formats for gen <= 3 */ @@ -336,10 +337,10 @@ i9xx_plane_check(struct intel_crtc_state *crtc_state, if (ret) return ret; - ret = intel_atomic_plane_check_clipping(plane_state, crtc_state, - DRM_PLANE_NO_SCALING, - DRM_PLANE_NO_SCALING, - i9xx_plane_has_windowing(plane)); + ret = intel_plane_check_clipping(plane_state, crtc_state, + DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, + i9xx_plane_has_windowing(plane)); if (ret) return ret; @@ -905,6 +906,27 @@ static const struct drm_plane_funcs i8xx_plane_funcs = { .format_mod_supported_async = intel_plane_format_mod_supported_async, }; +static void i9xx_disable_tiling(struct intel_plane *plane) +{ + struct intel_display *display = to_intel_display(plane); + enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; + u32 dspcntr; + u32 reg; + + dspcntr = intel_de_read_fw(display, DSPCNTR(display, i9xx_plane)); + dspcntr &= ~DISP_TILED; + intel_de_write_fw(display, DSPCNTR(display, i9xx_plane), dspcntr); + + if (DISPLAY_VER(display) >= 4) { + reg = intel_de_read_fw(display, DSPSURF(display, i9xx_plane)); + intel_de_write_fw(display, DSPSURF(display, i9xx_plane), reg); + + } else { + reg = intel_de_read_fw(display, DSPADDR(display, i9xx_plane)); + intel_de_write_fw(display, DSPADDR(display, i9xx_plane), reg); + } +} + struct intel_plane * intel_primary_plane_create(struct intel_display *display, enum pipe pipe) { @@ -1047,6 +1069,8 @@ intel_primary_plane_create(struct intel_display *display, enum pipe pipe) } } + plane->disable_tiling = i9xx_disable_tiling; + modifiers = intel_fb_plane_get_modifiers(display, INTEL_PLANE_CAP_TILING_X); if (DISPLAY_VER(display) >= 5 || display->platform.g4x) @@ -1151,7 +1175,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, drm_WARN_ON(display->drm, pipe != crtc->pipe); - intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); + intel_fb = intel_bo_alloc_framebuffer(); if (!intel_fb) { drm_dbg_kms(display->drm, "failed to alloc fb\n"); return; diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index f85edb374c97..348b1655435e 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -26,7 +26,7 @@ * * The functions here implement the state management and hardware programming * dispatch required by the atomic modeset infrastructure. - * See intel_atomic_plane.c for the plane-specific atomic functionality. + * See intel_plane.c for the plane-specific atomic functionality. */ #include <drm/display/drm_dp_tunnel.h> diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index 55af3a553c58..5bdaef38f13d 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -951,7 +951,7 @@ static int glk_force_audio_cdclk_commit(struct intel_atomic_state *state, if (IS_ERR(cdclk_state)) return PTR_ERR(cdclk_state); - cdclk_state->force_min_cdclk = enable ? 2 * 96000 : 0; + intel_cdclk_force_min_cdclk(cdclk_state, enable ? 2 * 96000 : 0); return drm_atomic_commit(&state->base); } diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 9ce41e689d50..9c268bed091d 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -1939,7 +1939,7 @@ static int get_init_otp_deassert_fragment_len(struct intel_display *display, int index, len; if (drm_WARN_ON(display->drm, - !data || panel->vbt.dsi.seq_version != 1)) + !data || panel->vbt.dsi.seq_version >= 3)) return 0; /* index = 1 to skip sequence byte */ @@ -1962,7 +1962,7 @@ static int get_init_otp_deassert_fragment_len(struct intel_display *display, } /* - * Some v1 VBT MIPI sequences do the deassert in the init OTP sequence. + * Some v1/v2 VBT MIPI sequences do the deassert in the init OTP sequence. * The deassert must be done before calling intel_dsi_device_ready, so for * these devices we split the init OTP sequence into a deassert sequence and * the actual init OTP part. @@ -1973,9 +1973,9 @@ static void vlv_fixup_mipi_sequences(struct intel_display *display, u8 *init_otp; int len; - /* Limit this to v1 vid-mode sequences */ + /* Limit this to v1/v2 vid-mode sequences */ if (panel->vbt.dsi.config->is_cmd_mode || - panel->vbt.dsi.seq_version != 1) + panel->vbt.dsi.seq_version >= 3) return; /* Only do this if there are otp and assert seqs and no deassert seq */ diff --git a/drivers/gpu/drm/i915/display/intel_bo.c b/drivers/gpu/drm/i915/display/intel_bo.c index fbd16d7b58d9..65d64f79a4bd 100644 --- a/drivers/gpu/drm/i915/display/intel_bo.c +++ b/drivers/gpu/drm/i915/display/intel_bo.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: MIT /* Copyright © 2024 Intel Corporation */ +#include <drm/drm_panic.h> +#include "display/intel_display_types.h" #include "gem/i915_gem_mman.h" #include "gem/i915_gem_object.h" #include "gem/i915_gem_object_frontbuffer.h" @@ -57,3 +59,18 @@ void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj) { i915_debugfs_describe_obj(m, to_intel_bo(obj)); } + +struct intel_framebuffer *intel_bo_alloc_framebuffer(void) +{ + return i915_gem_object_alloc_framebuffer(); +} + +int intel_bo_panic_setup(struct drm_scanout_buffer *sb) +{ + return i915_gem_object_panic_setup(sb); +} + +void intel_bo_panic_finish(struct intel_framebuffer *fb) +{ + return i915_gem_object_panic_finish(fb); +} diff --git a/drivers/gpu/drm/i915/display/intel_bo.h b/drivers/gpu/drm/i915/display/intel_bo.h index ea7a2253aaa5..97087a64d23b 100644 --- a/drivers/gpu/drm/i915/display/intel_bo.h +++ b/drivers/gpu/drm/i915/display/intel_bo.h @@ -7,6 +7,8 @@ #include <linux/types.h> struct drm_gem_object; +struct drm_scanout_buffer; +struct intel_framebuffer; struct seq_file; struct vm_area_struct; @@ -23,5 +25,8 @@ struct intel_frontbuffer *intel_bo_set_frontbuffer(struct drm_gem_object *obj, struct intel_frontbuffer *front); void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj); +struct intel_framebuffer *intel_bo_alloc_framebuffer(void); +int intel_bo_panic_setup(struct drm_scanout_buffer *sb); +void intel_bo_panic_finish(struct intel_framebuffer *fb); #endif /* __INTEL_BO__ */ diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 6c2ab2e0dc91..d29a755612de 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -18,8 +18,44 @@ #include "intel_display_types.h" #include "intel_mchbar_regs.h" #include "intel_pcode.h" +#include "intel_uncore.h" #include "skl_watermark.h" +struct intel_dbuf_bw { + unsigned int max_bw[I915_MAX_DBUF_SLICES]; + u8 active_planes[I915_MAX_DBUF_SLICES]; +}; + +struct intel_bw_state { + struct intel_global_state base; + struct intel_dbuf_bw dbuf_bw[I915_MAX_PIPES]; + + /* + * Contains a bit mask, used to determine, whether correspondent + * pipe allows SAGV or not. + */ + u8 pipe_sagv_reject; + + /* bitmask of active pipes */ + u8 active_pipes; + + /* + * From MTL onwards, to lock a QGV point, punit expects the peak BW of + * the selected QGV point as the parameter in multiples of 100MB/s + */ + u16 qgv_point_peakbw; + + /* + * Current QGV points mask, which restricts + * some particular SAGV states, not to confuse + * with pipe_sagv_mask. + */ + u16 qgv_points_mask; + + unsigned int data_rate[I915_MAX_PIPES]; + u8 num_active_planes[I915_MAX_PIPES]; +}; + /* Parameters for Qclk Geyserville (QGV) */ struct intel_qgv_point { u16 dclk, t_rp, t_rdpre, t_rc, t_ras, t_rcd; @@ -82,14 +118,13 @@ static int icl_pcode_read_qgv_point_info(struct intel_display *display, struct intel_qgv_point *sp, int point) { - struct drm_i915_private *i915 = to_i915(display->drm); u32 val = 0, val2 = 0; u16 dclk; int ret; - ret = snb_pcode_read(&i915->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO | - ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point), - &val, &val2); + ret = intel_pcode_read(display->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO | + ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point), + &val, &val2); if (ret) return ret; @@ -110,13 +145,12 @@ static int icl_pcode_read_qgv_point_info(struct intel_display *display, static int adls_pcode_read_psf_gv_point_info(struct intel_display *display, struct intel_psf_gv_point *points) { - struct drm_i915_private *i915 = to_i915(display->drm); u32 val = 0; int ret; int i; - ret = snb_pcode_read(&i915->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO | - ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL); + ret = intel_pcode_read(display->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO | + ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL); if (ret) return ret; @@ -154,21 +188,20 @@ static bool is_sagv_enabled(struct intel_display *display, u16 points_mask) ICL_PCODE_REQ_QGV_PT_MASK); } -int icl_pcode_restrict_qgv_points(struct intel_display *display, - u32 points_mask) +static int icl_pcode_restrict_qgv_points(struct intel_display *display, + u32 points_mask) { - struct drm_i915_private *i915 = to_i915(display->drm); int ret; if (DISPLAY_VER(display) >= 14) return 0; /* bspec says to keep retrying for at least 1 ms */ - ret = skl_pcode_request(&i915->uncore, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG, - points_mask, - ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK, - ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE, - 1); + ret = intel_pcode_request(display->drm, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG, + points_mask, + ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK, + ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE, + 1); if (ret < 0) { drm_err(display->drm, @@ -420,6 +453,13 @@ static const struct intel_sa_info xe3lpd_sa_info = { .derating = 10, }; +static const struct intel_sa_info xe3lpd_3002_sa_info = { + .deburst = 32, + .deprogbwlimit = 22, /* GB/s */ + .displayrtids = 256, + .derating = 10, +}; + static int icl_get_bw_info(struct intel_display *display, const struct dram_info *dram_info, const struct intel_sa_info *sa) @@ -771,7 +811,9 @@ void intel_bw_init_hw(struct intel_display *display) if (!HAS_DISPLAY(display)) return; - if (DISPLAY_VER(display) >= 30) + if (DISPLAY_VERx100(display) >= 3002) + tgl_get_bw_info(display, dram_info, &xe3lpd_3002_sa_info); + else if (DISPLAY_VER(display) >= 30) tgl_get_bw_info(display, dram_info, &xe3lpd_sa_info); else if (DISPLAY_VERx100(display) >= 1401 && display->platform.dgfx && dram_info->type == INTEL_DRAM_GDDR_ECC) @@ -865,6 +907,11 @@ static unsigned int intel_bw_data_rate(struct intel_display *display, return data_rate; } +struct intel_bw_state *to_intel_bw_state(struct intel_global_state *obj_state) +{ + return container_of(obj_state, struct intel_bw_state, base); +} + struct intel_bw_state * intel_atomic_get_old_bw_state(struct intel_atomic_state *state) { @@ -974,6 +1021,70 @@ static void icl_force_disable_sagv(struct intel_display *display, icl_pcode_restrict_qgv_points(display, bw_state->qgv_points_mask); } +void icl_sagv_pre_plane_update(struct intel_atomic_state *state) +{ + struct intel_display *display = to_intel_display(state); + const struct intel_bw_state *old_bw_state = + intel_atomic_get_old_bw_state(state); + const struct intel_bw_state *new_bw_state = + intel_atomic_get_new_bw_state(state); + u16 old_mask, new_mask; + + if (!new_bw_state) + return; + + old_mask = old_bw_state->qgv_points_mask; + new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask; + + if (old_mask == new_mask) + return; + + WARN_ON(!new_bw_state->base.changed); + + drm_dbg_kms(display->drm, "Restricting QGV points: 0x%x -> 0x%x\n", + old_mask, new_mask); + + /* + * Restrict required qgv points before updating the configuration. + * According to BSpec we can't mask and unmask qgv points at the same + * time. Also masking should be done before updating the configuration + * and unmasking afterwards. + */ + icl_pcode_restrict_qgv_points(display, new_mask); +} + +void icl_sagv_post_plane_update(struct intel_atomic_state *state) +{ + struct intel_display *display = to_intel_display(state); + const struct intel_bw_state *old_bw_state = + intel_atomic_get_old_bw_state(state); + const struct intel_bw_state *new_bw_state = + intel_atomic_get_new_bw_state(state); + u16 old_mask, new_mask; + + if (!new_bw_state) + return; + + old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask; + new_mask = new_bw_state->qgv_points_mask; + + if (old_mask == new_mask) + return; + + WARN_ON(!new_bw_state->base.changed); + + drm_dbg_kms(display->drm, "Relaxing QGV points: 0x%x -> 0x%x\n", + old_mask, new_mask); + + /* + * Allow required qgv points after updating the configuration. + * According to BSpec we can't mask and unmask qgv points at the same + * time. Also masking should be done before updating the configuration + * and unmasking afterwards. + */ + icl_pcode_restrict_qgv_points(display, new_mask); +} + static int mtl_find_qgv_points(struct intel_display *display, unsigned int data_rate, unsigned int num_active_planes, @@ -994,7 +1105,7 @@ static int mtl_find_qgv_points(struct intel_display *display, * for qgv peak bw in PM Demand request. So assign UINT_MAX if SAGV is * not enabled. PM Demand code will clamp the value for the register */ - if (!intel_can_enable_sagv(display, new_bw_state)) { + if (!intel_bw_can_enable_sagv(display, new_bw_state)) { new_bw_state->qgv_point_peakbw = U16_MAX; drm_dbg_kms(display->drm, "No SAGV, use UINT_MAX as peak bw."); return 0; @@ -1107,7 +1218,7 @@ static int icl_find_qgv_points(struct intel_display *display, * we can't enable SAGV due to the increased memory latency it may * cause. */ - if (!intel_can_enable_sagv(display, new_bw_state)) { + if (!intel_bw_can_enable_sagv(display, new_bw_state)) { qgv_points = icl_max_bw_qgv_point_mask(display, num_active_planes); drm_dbg_kms(display->drm, "No SAGV, using single QGV point mask 0x%x\n", qgv_points); @@ -1357,12 +1468,12 @@ int intel_bw_calc_min_cdclk(struct intel_atomic_state *state, * requirements. This can reduce back and forth * display blinking due to constant cdclk changes. */ - if (new_min_cdclk <= cdclk_state->bw_min_cdclk) + if (new_min_cdclk <= intel_cdclk_bw_min_cdclk(cdclk_state)) return 0; drm_dbg_kms(display->drm, "new bandwidth min cdclk (%d kHz) > old min cdclk (%d kHz)\n", - new_min_cdclk, cdclk_state->bw_min_cdclk); + new_min_cdclk, intel_cdclk_bw_min_cdclk(cdclk_state)); *need_cdclk_calc = true; return 0; @@ -1474,8 +1585,8 @@ static int intel_bw_check_sagv_mask(struct intel_atomic_state *state) if (!new_bw_state) return 0; - if (intel_can_enable_sagv(display, new_bw_state) != - intel_can_enable_sagv(display, old_bw_state)) { + if (intel_bw_can_enable_sagv(display, new_bw_state) != + intel_bw_can_enable_sagv(display, old_bw_state)) { ret = intel_atomic_serialize_global_state(&new_bw_state->base); if (ret) return ret; @@ -1521,8 +1632,8 @@ int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms) new_bw_state = intel_atomic_get_new_bw_state(state); if (new_bw_state && - intel_can_enable_sagv(display, old_bw_state) != - intel_can_enable_sagv(display, new_bw_state)) + intel_bw_can_enable_sagv(display, old_bw_state) != + intel_bw_can_enable_sagv(display, new_bw_state)) changed = true; /* @@ -1644,3 +1755,32 @@ int intel_bw_init(struct intel_display *display) return 0; } + +bool intel_bw_pmdemand_needs_update(struct intel_atomic_state *state) +{ + const struct intel_bw_state *new_bw_state, *old_bw_state; + + new_bw_state = intel_atomic_get_new_bw_state(state); + old_bw_state = intel_atomic_get_old_bw_state(state); + + if (new_bw_state && + new_bw_state->qgv_point_peakbw != old_bw_state->qgv_point_peakbw) + return true; + + return false; +} + +bool intel_bw_can_enable_sagv(struct intel_display *display, + const struct intel_bw_state *bw_state) +{ + if (DISPLAY_VER(display) < 11 && + bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes)) + return false; + + return bw_state->pipe_sagv_reject == 0; +} + +int intel_bw_qgv_point_peakbw(const struct intel_bw_state *bw_state) +{ + return bw_state->qgv_point_peakbw; +} diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h index eb2cc883e9c1..d51f50c9d302 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.h +++ b/drivers/gpu/drm/i915/display/intel_bw.h @@ -8,52 +8,14 @@ #include <drm/drm_atomic.h> -#include "intel_display_limits.h" -#include "intel_display_power.h" -#include "intel_global_state.h" - struct intel_atomic_state; +struct intel_bw_state; struct intel_crtc; struct intel_crtc_state; struct intel_display; +struct intel_global_state; -struct intel_dbuf_bw { - unsigned int max_bw[I915_MAX_DBUF_SLICES]; - u8 active_planes[I915_MAX_DBUF_SLICES]; -}; - -struct intel_bw_state { - struct intel_global_state base; - struct intel_dbuf_bw dbuf_bw[I915_MAX_PIPES]; - - /* - * Contains a bit mask, used to determine, whether correspondent - * pipe allows SAGV or not. - */ - u8 pipe_sagv_reject; - - /* bitmask of active pipes */ - u8 active_pipes; - - /* - * From MTL onwards, to lock a QGV point, punit expects the peak BW of - * the selected QGV point as the parameter in multiples of 100MB/s - */ - u16 qgv_point_peakbw; - - /* - * Current QGV points mask, which restricts - * some particular SAGV states, not to confuse - * with pipe_sagv_mask. - */ - u16 qgv_points_mask; - - unsigned int data_rate[I915_MAX_PIPES]; - u8 num_active_planes[I915_MAX_PIPES]; -}; - -#define to_intel_bw_state(global_state) \ - container_of_const((global_state), struct intel_bw_state, base) +struct intel_bw_state *to_intel_bw_state(struct intel_global_state *obj_state); struct intel_bw_state * intel_atomic_get_old_bw_state(struct intel_atomic_state *state); @@ -67,8 +29,6 @@ intel_atomic_get_bw_state(struct intel_atomic_state *state); void intel_bw_init_hw(struct intel_display *display); int intel_bw_init(struct intel_display *display); int intel_bw_atomic_check(struct intel_atomic_state *state, bool any_ms); -int icl_pcode_restrict_qgv_points(struct intel_display *display, - u32 points_mask); int intel_bw_calc_min_cdclk(struct intel_atomic_state *state, bool *need_cdclk_calc); int intel_bw_min_cdclk(struct intel_display *display, @@ -76,4 +36,11 @@ int intel_bw_min_cdclk(struct intel_display *display, void intel_bw_update_hw_state(struct intel_display *display); void intel_bw_crtc_disable_noatomic(struct intel_crtc *crtc); +bool intel_bw_pmdemand_needs_update(struct intel_atomic_state *state); +bool intel_bw_can_enable_sagv(struct intel_display *display, + const struct intel_bw_state *bw_state); +void icl_sagv_pre_plane_update(struct intel_atomic_state *state); +void icl_sagv_post_plane_update(struct intel_atomic_state *state); +int intel_bw_qgv_point_peakbw(const struct intel_bw_state *bw_state); + #endif /* __INTEL_BW_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 38b3094b37d7..228aa64c1349 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -32,7 +32,6 @@ #include "i915_drv.h" #include "i915_reg.h" #include "intel_atomic.h" -#include "intel_atomic_plane.h" #include "intel_audio.h" #include "intel_bw.h" #include "intel_cdclk.h" @@ -43,6 +42,7 @@ #include "intel_mchbar_regs.h" #include "intel_pci_config.h" #include "intel_pcode.h" +#include "intel_plane.h" #include "intel_psr.h" #include "intel_vdsc.h" #include "skl_watermark.h" @@ -114,6 +114,42 @@ * dividers can be programmed correctly. */ +struct intel_cdclk_state { + struct intel_global_state base; + + /* + * Logical configuration of cdclk (used for all scaling, + * watermark, etc. calculations and checks). This is + * computed as if all enabled crtcs were active. + */ + struct intel_cdclk_config logical; + + /* + * Actual configuration of cdclk, can be different from the + * logical configuration only when all crtc's are DPMS off. + */ + struct intel_cdclk_config actual; + + /* minimum acceptable cdclk to satisfy bandwidth requirements */ + int bw_min_cdclk; + /* minimum acceptable cdclk for each pipe */ + int min_cdclk[I915_MAX_PIPES]; + /* minimum acceptable voltage level for each pipe */ + u8 min_voltage_level[I915_MAX_PIPES]; + + /* pipe to which cd2x update is synchronized */ + enum pipe pipe; + + /* forced minimum cdclk for glk+ audio w/a */ + int force_min_cdclk; + + /* bitmask of active pipes */ + u8 active_pipes; + + /* update cdclk with pipes disabled */ + bool disable_pipes; +}; + struct intel_cdclk_funcs { void (*get_cdclk)(struct intel_display *display, struct intel_cdclk_config *cdclk_config); @@ -841,7 +877,6 @@ static void bdw_set_cdclk(struct intel_display *display, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { - struct drm_i915_private *dev_priv = to_i915(display->drm); int cdclk = cdclk_config->cdclk; int ret; @@ -854,7 +889,7 @@ static void bdw_set_cdclk(struct intel_display *display, "trying to change cdclk frequency with cdclk not enabled\n")) return; - ret = snb_pcode_write(&dev_priv->uncore, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0); + ret = intel_pcode_write(display->drm, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0); if (ret) { drm_err(display->drm, "failed to inform pcode about cdclk change\n"); @@ -882,8 +917,8 @@ static void bdw_set_cdclk(struct intel_display *display, LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) drm_err(display->drm, "Switching back to LCPLL failed\n"); - snb_pcode_write(&dev_priv->uncore, HSW_PCODE_DE_WRITE_FREQ_REQ, - cdclk_config->voltage_level); + intel_pcode_write(display->drm, HSW_PCODE_DE_WRITE_FREQ_REQ, + cdclk_config->voltage_level); intel_de_write(display, CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1); @@ -1123,7 +1158,6 @@ static void skl_set_cdclk(struct intel_display *display, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { - struct drm_i915_private *dev_priv = to_i915(display->drm); int cdclk = cdclk_config->cdclk; int vco = cdclk_config->vco; u32 freq_select, cdclk_ctl; @@ -1140,10 +1174,10 @@ static void skl_set_cdclk(struct intel_display *display, drm_WARN_ON_ONCE(display->drm, display->platform.skylake && vco == 8640000); - ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL, - SKL_CDCLK_PREPARE_FOR_CHANGE, - SKL_CDCLK_READY_FOR_CHANGE, - SKL_CDCLK_READY_FOR_CHANGE, 3); + ret = intel_pcode_request(display->drm, SKL_PCODE_CDCLK_CONTROL, + SKL_CDCLK_PREPARE_FOR_CHANGE, + SKL_CDCLK_READY_FOR_CHANGE, + SKL_CDCLK_READY_FOR_CHANGE, 3); if (ret) { drm_err(display->drm, "Failed to inform PCU about cdclk change (%d)\n", ret); @@ -1186,8 +1220,8 @@ static void skl_set_cdclk(struct intel_display *display, intel_de_posting_read(display, CDCLK_CTL); /* inform PCU of the change */ - snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL, - cdclk_config->voltage_level); + intel_pcode_write(display->drm, SKL_PCODE_CDCLK_CONTROL, + cdclk_config->voltage_level); intel_update_cdclk(display); } @@ -2123,7 +2157,6 @@ static void bxt_set_cdclk(struct intel_display *display, const struct intel_cdclk_config *cdclk_config, enum pipe pipe) { - struct drm_i915_private *dev_priv = to_i915(display->drm); struct intel_cdclk_config mid_cdclk_config; int cdclk = cdclk_config->cdclk; int ret = 0; @@ -2137,18 +2170,18 @@ static void bxt_set_cdclk(struct intel_display *display, if (DISPLAY_VER(display) >= 14 || display->platform.dg2) ; /* NOOP */ else if (DISPLAY_VER(display) >= 11) - ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL, - SKL_CDCLK_PREPARE_FOR_CHANGE, - SKL_CDCLK_READY_FOR_CHANGE, - SKL_CDCLK_READY_FOR_CHANGE, 3); + ret = intel_pcode_request(display->drm, SKL_PCODE_CDCLK_CONTROL, + SKL_CDCLK_PREPARE_FOR_CHANGE, + SKL_CDCLK_READY_FOR_CHANGE, + SKL_CDCLK_READY_FOR_CHANGE, 3); else /* * BSpec requires us to wait up to 150usec, but that leads to * timeouts; the 2ms used here is based on experiment. */ - ret = snb_pcode_write_timeout(&dev_priv->uncore, - HSW_PCODE_DE_WRITE_FREQ_REQ, - 0x80000000, 150, 2); + ret = intel_pcode_write_timeout(display->drm, + HSW_PCODE_DE_WRITE_FREQ_REQ, + 0x80000000, 2); if (ret) { drm_err(display->drm, @@ -2177,8 +2210,8 @@ static void bxt_set_cdclk(struct intel_display *display, * Display versions 14 and beyond */; else if (DISPLAY_VER(display) >= 11 && !display->platform.dg2) - ret = snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL, - cdclk_config->voltage_level); + ret = intel_pcode_write(display->drm, SKL_PCODE_CDCLK_CONTROL, + cdclk_config->voltage_level); if (DISPLAY_VER(display) < 11) { /* * The timeout isn't specified, the 2ms used here is based on @@ -2186,10 +2219,9 @@ static void bxt_set_cdclk(struct intel_display *display, * FIXME: Waiting for the request completion could be delayed * until the next PCODE request based on BSpec. */ - ret = snb_pcode_write_timeout(&dev_priv->uncore, - HSW_PCODE_DE_WRITE_FREQ_REQ, - cdclk_config->voltage_level, - 150, 2); + ret = intel_pcode_write_timeout(display->drm, + HSW_PCODE_DE_WRITE_FREQ_REQ, + cdclk_config->voltage_level, 2); } if (ret) { drm_err(display->drm, @@ -2475,7 +2507,6 @@ static void intel_pcode_notify(struct intel_display *display, bool cdclk_update_valid, bool pipe_count_update_valid) { - struct drm_i915_private *i915 = to_i915(display->drm); int ret; u32 update_mask = 0; @@ -2490,11 +2521,11 @@ static void intel_pcode_notify(struct intel_display *display, if (pipe_count_update_valid) update_mask |= DISPLAY_TO_PCODE_PIPE_COUNT_VALID; - ret = skl_pcode_request(&i915->uncore, SKL_PCODE_CDCLK_CONTROL, - SKL_CDCLK_PREPARE_FOR_CHANGE | - update_mask, - SKL_CDCLK_READY_FOR_CHANGE, - SKL_CDCLK_READY_FOR_CHANGE, 3); + ret = intel_pcode_request(display->drm, SKL_PCODE_CDCLK_CONTROL, + SKL_CDCLK_PREPARE_FOR_CHANGE | + update_mask, + SKL_CDCLK_READY_FOR_CHANGE, + SKL_CDCLK_READY_FOR_CHANGE, 3); if (ret) drm_err(display->drm, "Failed to inform PCU about display config (err %d)\n", @@ -3386,7 +3417,9 @@ static int intel_compute_max_dotclk(struct intel_display *display) */ void intel_update_max_cdclk(struct intel_display *display) { - if (DISPLAY_VER(display) >= 30) { + if (DISPLAY_VERx100(display) >= 3002) { + display->cdclk.max_cdclk_freq = 480000; + } else if (DISPLAY_VER(display) >= 30) { display->cdclk.max_cdclk_freq = 691200; } else if (display->platform.jasperlake || display->platform.elkhartlake) { if (display->cdclk.hw.ref == 24000) @@ -3837,3 +3870,60 @@ void intel_init_cdclk_hooks(struct intel_display *display) "Unknown platform. Assuming i830\n")) display->funcs.cdclk = &i830_cdclk_funcs; } + +int intel_cdclk_logical(const struct intel_cdclk_state *cdclk_state) +{ + return cdclk_state->logical.cdclk; +} + +int intel_cdclk_actual(const struct intel_cdclk_state *cdclk_state) +{ + return cdclk_state->actual.cdclk; +} + +int intel_cdclk_actual_voltage_level(const struct intel_cdclk_state *cdclk_state) +{ + return cdclk_state->actual.voltage_level; +} + +int intel_cdclk_min_cdclk(const struct intel_cdclk_state *cdclk_state, enum pipe pipe) +{ + return cdclk_state->min_cdclk[pipe]; +} + +int intel_cdclk_bw_min_cdclk(const struct intel_cdclk_state *cdclk_state) +{ + return cdclk_state->bw_min_cdclk; +} + +bool intel_cdclk_pmdemand_needs_update(struct intel_atomic_state *state) +{ + const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state; + + new_cdclk_state = intel_atomic_get_new_cdclk_state(state); + old_cdclk_state = intel_atomic_get_old_cdclk_state(state); + + if (new_cdclk_state && + (new_cdclk_state->actual.cdclk != old_cdclk_state->actual.cdclk || + new_cdclk_state->actual.voltage_level != old_cdclk_state->actual.voltage_level)) + return true; + + return false; +} + +void intel_cdclk_force_min_cdclk(struct intel_cdclk_state *cdclk_state, int force_min_cdclk) +{ + cdclk_state->force_min_cdclk = force_min_cdclk; +} + +void intel_cdclk_read_hw(struct intel_display *display) +{ + struct intel_cdclk_state *cdclk_state; + + cdclk_state = to_intel_cdclk_state(display->cdclk.obj.state); + + intel_update_cdclk(display); + intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK"); + cdclk_state->actual = display->cdclk.hw; + cdclk_state->logical = display->cdclk.hw; +} diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h index a1cefd455d92..cacee598af0e 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.h +++ b/drivers/gpu/drm/i915/display/intel_cdclk.h @@ -8,10 +8,9 @@ #include <linux/types.h> -#include "intel_display_limits.h" -#include "intel_global_state.h" - +enum pipe; struct intel_atomic_state; +struct intel_cdclk_state; struct intel_crtc; struct intel_crtc_state; struct intel_display; @@ -23,42 +22,6 @@ struct intel_cdclk_config { bool joined_mbus; }; -struct intel_cdclk_state { - struct intel_global_state base; - - /* - * Logical configuration of cdclk (used for all scaling, - * watermark, etc. calculations and checks). This is - * computed as if all enabled crtcs were active. - */ - struct intel_cdclk_config logical; - - /* - * Actual configuration of cdclk, can be different from the - * logical configuration only when all crtc's are DPMS off. - */ - struct intel_cdclk_config actual; - - /* minimum acceptable cdclk to satisfy bandwidth requirements */ - int bw_min_cdclk; - /* minimum acceptable cdclk for each pipe */ - int min_cdclk[I915_MAX_PIPES]; - /* minimum acceptable voltage level for each pipe */ - u8 min_voltage_level[I915_MAX_PIPES]; - - /* pipe to which cd2x update is synchronized */ - enum pipe pipe; - - /* forced minimum cdclk for glk+ audio w/a */ - int force_min_cdclk; - - /* bitmask of active pipes */ - u8 active_pipes; - - /* update cdclk with pipes disabled */ - bool disable_pipes; -}; - void intel_cdclk_init_hw(struct intel_display *display); void intel_cdclk_uninit_hw(struct intel_display *display); void intel_init_cdclk_hooks(struct intel_display *display); @@ -97,4 +60,13 @@ void intel_cdclk_crtc_disable_noatomic(struct intel_crtc *crtc); int intel_cdclk_init(struct intel_display *display); void intel_cdclk_debugfs_register(struct intel_display *display); +int intel_cdclk_logical(const struct intel_cdclk_state *cdclk_state); +int intel_cdclk_actual(const struct intel_cdclk_state *cdclk_state); +int intel_cdclk_actual_voltage_level(const struct intel_cdclk_state *cdclk_state); +int intel_cdclk_min_cdclk(const struct intel_cdclk_state *cdclk_state, enum pipe pipe); +int intel_cdclk_bw_min_cdclk(const struct intel_cdclk_state *cdclk_state); +bool intel_cdclk_pmdemand_needs_update(struct intel_atomic_state *state); +void intel_cdclk_force_min_cdclk(struct intel_cdclk_state *cdclk_state, int force_min_cdclk); +void intel_cdclk_read_hw(struct intel_display *display); + #endif /* __INTEL_CDCLK_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c index 2867d76d1a5e..42c923f416b3 100644 --- a/drivers/gpu/drm/i915/display/intel_connector.c +++ b/drivers/gpu/drm/i915/display/intel_connector.c @@ -64,10 +64,10 @@ static void intel_connector_modeset_retry_work_fn(struct work_struct *work) void intel_connector_queue_modeset_retry_work(struct intel_connector *connector) { - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); drm_connector_get(&connector->base); - if (!queue_work(i915->unordered_wq, &connector->modeset_retry_work)) + if (!queue_work(display->wq.unordered, &connector->modeset_retry_work)) drm_connector_put(&connector->base); } diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c index a88317ea4e9c..a187db6df2d3 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc.c +++ b/drivers/gpu/drm/i915/display/intel_crtc.c @@ -17,7 +17,6 @@ #include "i9xx_plane.h" #include "icl_dsi.h" #include "intel_atomic.h" -#include "intel_atomic_plane.h" #include "intel_color.h" #include "intel_crtc.h" #include "intel_cursor.h" @@ -29,6 +28,7 @@ #include "intel_dsi.h" #include "intel_fifo_underrun.h" #include "intel_pipe_crc.h" +#include "intel_plane.h" #include "intel_psr.h" #include "intel_sprite.h" #include "intel_vblank.h" diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c index 6bd4f6a28cae..198e69efe9ac 100644 --- a/drivers/gpu/drm/i915/display/intel_cursor.c +++ b/drivers/gpu/drm/i915/display/intel_cursor.c @@ -14,7 +14,6 @@ #include "i915_utils.h" #include "intel_atomic.h" -#include "intel_atomic_plane.h" #include "intel_cursor.h" #include "intel_cursor_regs.h" #include "intel_de.h" @@ -23,6 +22,7 @@ #include "intel_fb.h" #include "intel_fb_pin.h" #include "intel_frontbuffer.h" +#include "intel_plane.h" #include "intel_psr.h" #include "intel_psr_regs.h" #include "intel_vblank.h" @@ -158,10 +158,10 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state, return -EINVAL; } - ret = intel_atomic_plane_check_clipping(plane_state, crtc_state, - DRM_PLANE_NO_SCALING, - DRM_PLANE_NO_SCALING, - true); + ret = intel_plane_check_clipping(plane_state, crtc_state, + DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, + true); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c index 83c8df9dbc0c..801235a5bc0a 100644 --- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c +++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c @@ -39,7 +39,13 @@ bool intel_encoder_is_c10phy(struct intel_encoder *encoder) struct intel_display *display = to_intel_display(encoder); enum phy phy = intel_encoder_to_phy(encoder); - if (display->platform.pantherlake && phy == PHY_A) + /* PTL doesn't have a PHY connected to PORT B; as such, + * there will never be a case where PTL uses PHY B. + * WCL uses PORT A and B with the C10 PHY. + * Reusing the condition for WCL and extending it for PORT B + * should not cause any issues for PTL. + */ + if (display->platform.pantherlake && phy < PHY_C) return true; if ((display->platform.lunarlake || display->platform.meteorlake) && phy < PHY_C) @@ -3233,14 +3239,22 @@ void intel_lnl_mac_transmit_lfps(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(encoder); - u8 owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder); - bool enable = intel_alpm_is_alpm_aux_less(enc_to_intel_dp(encoder), - crtc_state); + intel_wakeref_t wakeref; int i; + u8 owned_lane_mask; - if (DISPLAY_VER(display) < 20) + if (DISPLAY_VER(display) < 20 || + !intel_alpm_is_alpm_aux_less(enc_to_intel_dp(encoder), crtc_state)) return; + owned_lane_mask = intel_cx0_get_owned_lane_mask(encoder); + + wakeref = intel_cx0_phy_transaction_begin(encoder); + + if (intel_encoder_is_c10phy(encoder)) + intel_cx0_rmw(encoder, owned_lane_mask, PHY_C10_VDR_CONTROL(1), 0, + C10_VDR_CTRL_MSGBUS_ACCESS, MB_WRITE_COMMITTED); + for (i = 0; i < 4; i++) { int tx = i % 2 + 1; u8 lane_mask = i < 2 ? INTEL_CX0_LANE0 : INTEL_CX0_LANE1; @@ -3250,9 +3264,10 @@ void intel_lnl_mac_transmit_lfps(struct intel_encoder *encoder, intel_cx0_rmw(encoder, lane_mask, PHY_CMN1_CONTROL(tx, 0), CONTROL0_MAC_TRANSMIT_LFPS, - enable ? CONTROL0_MAC_TRANSMIT_LFPS : 0, - MB_WRITE_COMMITTED); + CONTROL0_MAC_TRANSMIT_LFPS, MB_WRITE_COMMITTED); } + + intel_cx0_phy_transaction_end(encoder, wakeref); } static u8 cx0_power_control_disable_val(struct intel_encoder *encoder) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index cbd1060e9664..0405396c7750 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -73,11 +73,13 @@ #include "intel_lspcon.h" #include "intel_mg_phy_regs.h" #include "intel_modeset_lock.h" +#include "intel_panel.h" #include "intel_pfit.h" #include "intel_pps.h" #include "intel_psr.h" #include "intel_quirks.h" #include "intel_snps_phy.h" +#include "intel_step.h" #include "intel_tc.h" #include "intel_vdsc.h" #include "intel_vdsc_regs.h" @@ -1394,6 +1396,21 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder, for (ln = 0; ln < 2; ln++) { int level; + /* Wa_16011342517:adl-p */ + if (display->platform.alderlake_p && + IS_DISPLAY_STEP(display, STEP_A0, STEP_D0)) { + if ((intel_encoder_is_hdmi(encoder) && + crtc_state->port_clock == 594000) || + (intel_encoder_is_dp(encoder) && + crtc_state->port_clock == 162000)) { + intel_dkl_phy_rmw(display, DKL_TX_DPCNTL2(tc_port, ln), + LOADGEN_SHARING_PMD_DISABLE, 1); + } else { + intel_dkl_phy_rmw(display, DKL_TX_DPCNTL2(tc_port, ln), + LOADGEN_SHARING_PMD_DISABLE, 0); + } + } + intel_dkl_phy_write(display, DKL_TX_PMD_LANE_SUS(tc_port, ln), 0); level = intel_ddi_level(encoder, crtc_state, 2*ln+0); @@ -3355,6 +3372,8 @@ static void intel_ddi_enable_dp(struct intel_atomic_state *state, drm_connector_update_privacy_screen(conn_state); intel_edp_backlight_on(crtc_state, conn_state); + intel_panel_prepare(crtc_state, conn_state); + if (!intel_lspcon_active(dig_port) || intel_dp_has_hdmi_sink(&dig_port->dp)) intel_dp_set_infoframes(encoder, true, crtc_state, conn_state); @@ -3552,6 +3571,7 @@ static void intel_ddi_disable_dp(struct intel_atomic_state *state, intel_dp->link.active = false; + intel_panel_unprepare(old_conn_state); intel_psr_disable(intel_dp, old_crtc_state); intel_alpm_disable(intel_dp); intel_edp_backlight_off(old_conn_state); diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 6ec786198f43..7035c1fc9033 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -57,7 +57,6 @@ #include "i9xx_wm.h" #include "intel_alpm.h" #include "intel_atomic.h" -#include "intel_atomic_plane.h" #include "intel_audio.h" #include "intel_bo.h" #include "intel_bw.h" @@ -94,6 +93,7 @@ #include "intel_fbc.h" #include "intel_fdi.h" #include "intel_fifo_underrun.h" +#include "intel_flipq.h" #include "intel_frontbuffer.h" #include "intel_hdmi.h" #include "intel_hotplug.h" @@ -108,6 +108,7 @@ #include "intel_pch_refclk.h" #include "intel_pfit.h" #include "intel_pipe_crc.h" +#include "intel_plane.h" #include "intel_plane_initial.h" #include "intel_pmdemand.h" #include "intel_pps.h" @@ -1659,8 +1660,12 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, if (drm_WARN_ON(display->drm, crtc->active)) return; - for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) - intel_dmc_enable_pipe(display, pipe_crtc->pipe); + for_each_pipe_crtc_modeset_enable(display, pipe_crtc, new_crtc_state, i) { + const struct intel_crtc_state *new_pipe_crtc_state = + intel_atomic_get_new_crtc_state(state, pipe_crtc); + + intel_dmc_enable_pipe(new_pipe_crtc_state); + } intel_encoders_pre_pll_enable(state, crtc); @@ -1798,8 +1803,12 @@ static void hsw_crtc_disable(struct intel_atomic_state *state, intel_encoders_post_pll_disable(state, crtc); - for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) - intel_dmc_disable_pipe(display, pipe_crtc->pipe); + for_each_pipe_crtc_modeset_disable(display, pipe_crtc, old_crtc_state, i) { + const struct intel_crtc_state *old_pipe_crtc_state = + intel_atomic_get_old_crtc_state(state, pipe_crtc); + + intel_dmc_disable_pipe(old_pipe_crtc_state); + } } /* Prefer intel_encoder_is_combo() */ @@ -4160,7 +4169,7 @@ static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state, return 0; linetime_wm = DIV_ROUND_CLOSEST(pipe_mode->crtc_htotal * 1000 * 8, - cdclk_state->logical.cdclk); + intel_cdclk_logical(cdclk_state)); return min(linetime_wm, 0x1ff); } @@ -5479,7 +5488,7 @@ static int intel_modeset_pipe(struct intel_atomic_state *state, if (ret) return ret; - ret = intel_atomic_add_affected_planes(state, crtc); + ret = intel_plane_add_affected(state, crtc); if (ret) return ret; @@ -6195,7 +6204,7 @@ static int intel_joiner_add_affected_crtcs(struct intel_atomic_state *state) if (ret) return ret; - ret = intel_atomic_add_affected_planes(state, crtc); + ret = intel_plane_add_affected(state, crtc); if (ret) return ret; } @@ -6447,7 +6456,7 @@ int intel_atomic_check(struct drm_device *dev, goto fail; } - ret = intel_atomic_check_planes(state); + ret = intel_plane_atomic_check(state); if (ret) goto fail; @@ -6611,7 +6620,7 @@ static void commit_pipe_pre_planes(struct intel_atomic_state *state, intel_atomic_get_new_crtc_state(state, crtc); bool modeset = intel_crtc_needs_modeset(new_crtc_state); - drm_WARN_ON(display->drm, new_crtc_state->use_dsb); + drm_WARN_ON(display->drm, new_crtc_state->use_dsb || new_crtc_state->use_flipq); /* * During modesets pipe configuration was programmed as the @@ -6641,7 +6650,7 @@ static void commit_pipe_post_planes(struct intel_atomic_state *state, intel_atomic_get_new_crtc_state(state, crtc); bool modeset = intel_crtc_needs_modeset(new_crtc_state); - drm_WARN_ON(display->drm, new_crtc_state->use_dsb); + drm_WARN_ON(display->drm, new_crtc_state->use_dsb || new_crtc_state->use_flipq); /* * Disable the scaler(s) after the plane(s) so that we don't @@ -6730,10 +6739,10 @@ static void intel_pre_update_crtc(struct intel_atomic_state *state, if (!modeset && intel_crtc_needs_color_update(new_crtc_state) && - !new_crtc_state->use_dsb) + !new_crtc_state->use_dsb && !new_crtc_state->use_flipq) intel_color_commit_noarm(NULL, new_crtc_state); - if (!new_crtc_state->use_dsb) + if (!new_crtc_state->use_dsb && !new_crtc_state->use_flipq) intel_crtc_planes_update_noarm(NULL, state, crtc); } @@ -6745,7 +6754,14 @@ static void intel_update_crtc(struct intel_atomic_state *state, struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - if (new_crtc_state->use_dsb) { + if (new_crtc_state->use_flipq) { + intel_flipq_enable(new_crtc_state); + + intel_crtc_prepare_vblank_event(new_crtc_state, &crtc->flipq_event); + + intel_flipq_add(crtc, INTEL_FLIPQ_PLANE_1, 0, INTEL_DSB_0, + new_crtc_state->dsb_commit); + } else if (new_crtc_state->use_dsb) { intel_crtc_prepare_vblank_event(new_crtc_state, &crtc->dsb_event); intel_dsb_commit(new_crtc_state->dsb_commit); @@ -7076,7 +7092,8 @@ static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_stat struct drm_i915_private *i915 = to_i915(intel_state->base.dev); struct drm_plane *plane; struct drm_plane_state *new_plane_state; - int ret, i; + long ret; + int i; for_each_new_plane_in_state(&intel_state->base, plane, new_plane_state, i) { if (new_plane_state->fence) { @@ -7183,7 +7200,17 @@ static void intel_atomic_dsb_prepare(struct intel_atomic_state *state, return; /* FIXME deal with everything */ + new_crtc_state->use_flipq = + intel_flipq_supported(display) && + !new_crtc_state->do_async_flip && + !new_crtc_state->vrr.enable && + !new_crtc_state->has_psr && + !intel_crtc_needs_modeset(new_crtc_state) && + !intel_crtc_needs_fastset(new_crtc_state) && + !intel_crtc_needs_color_update(new_crtc_state); + new_crtc_state->use_dsb = + !new_crtc_state->use_flipq && !new_crtc_state->do_async_flip && (DISPLAY_VER(display) >= 20 || !new_crtc_state->has_psr) && !intel_crtc_needs_modeset(new_crtc_state) && @@ -7199,7 +7226,9 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state, struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - if (!new_crtc_state->use_dsb && !new_crtc_state->dsb_color) + if (!new_crtc_state->use_flipq && + !new_crtc_state->use_dsb && + !new_crtc_state->dsb_color) return; /* @@ -7208,14 +7237,20 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state, * Double that for pipe stuff and other overhead. */ new_crtc_state->dsb_commit = intel_dsb_prepare(state, crtc, INTEL_DSB_0, - new_crtc_state->use_dsb ? 1024 : 16); + new_crtc_state->use_dsb || + new_crtc_state->use_flipq ? 1024 : 16); if (!new_crtc_state->dsb_commit) { + new_crtc_state->use_flipq = false; new_crtc_state->use_dsb = false; intel_color_cleanup_commit(new_crtc_state); return; } - if (new_crtc_state->use_dsb) { + if (new_crtc_state->use_flipq || new_crtc_state->use_dsb) { + /* Wa_18034343758 */ + if (new_crtc_state->use_flipq) + intel_flipq_wait_dmc_halt(new_crtc_state->dsb_commit, crtc); + if (intel_crtc_needs_color_update(new_crtc_state)) intel_color_commit_noarm(new_crtc_state->dsb_commit, new_crtc_state); @@ -7230,7 +7265,8 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state, intel_psr_trigger_frame_change_event(new_crtc_state->dsb_commit, state, crtc); - intel_dsb_vblank_evade(state, new_crtc_state->dsb_commit); + if (new_crtc_state->use_dsb) + intel_dsb_vblank_evade(state, new_crtc_state->dsb_commit); if (intel_crtc_needs_color_update(new_crtc_state)) intel_color_commit_arm(new_crtc_state->dsb_commit, @@ -7245,6 +7281,10 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state, if (DISPLAY_VER(display) >= 9) skl_detach_scalers(new_crtc_state->dsb_commit, new_crtc_state); + + /* Wa_18034343758 */ + if (new_crtc_state->use_flipq) + intel_flipq_unhalt_dmc(new_crtc_state->dsb_commit, crtc); } if (intel_color_uses_chained_dsb(new_crtc_state)) @@ -7385,6 +7425,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) /* Now enable the clocks, plane, pipe, and connectors that we set up. */ display->funcs.display->commit_modeset_enables(state); + /* FIXME probably need to sequence this properly */ intel_program_dpkgc_latency(state); intel_wait_for_vblank_workers(state); @@ -7408,6 +7449,9 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) if (!state->base.legacy_cursor_update && !new_crtc_state->use_dsb) intel_vrr_check_push_sent(NULL, new_crtc_state); + + if (new_crtc_state->use_flipq) + intel_flipq_disable(new_crtc_state); } /* diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h index 32cb0e59c81e..8c226406c5cd 100644 --- a/drivers/gpu/drm/i915/display/intel_display_core.h +++ b/drivers/gpu/drm/i915/display/intel_display_core.h @@ -480,6 +480,12 @@ struct intel_display { } irq; struct { + /* protected by wm.wm_mutex */ + u16 linetime[I915_MAX_PIPES]; + bool disable[I915_MAX_PIPES]; + } pkgc; + + struct { wait_queue_head_t waitqueue; /* mutex to protect pmdemand programming sequence */ @@ -570,6 +576,9 @@ struct intel_display { /* hipri wq for commit cleanups */ struct workqueue_struct *cleanup; + + /* unordered workqueue for all display unordered work */ + struct workqueue_struct *unordered; } wq; /* Grouping using named structs. Keep sorted. */ diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c index a4070f40e26f..089cffabbad5 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.c +++ b/drivers/gpu/drm/i915/display/intel_display_device.c @@ -1480,6 +1480,7 @@ static const struct { { 14, 1, &xe2_hpd_display }, { 20, 0, &xe2_lpd_display }, { 30, 0, &xe2_lpd_display }, + { 30, 2, &xe2_lpd_display }, }; static const struct intel_display_device_info * diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h index 0ac5484c0043..4308822f0415 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.h +++ b/drivers/gpu/drm/i915/display/intel_display_device.h @@ -192,9 +192,8 @@ struct intel_display_platforms { #define HAS_TRANSCODER(__display, trans) ((DISPLAY_RUNTIME_INFO(__display)->cpu_transcoder_mask & \ BIT(trans)) != 0) #define HAS_UNCOMPRESSED_JOINER(__display) (DISPLAY_VER(__display) >= 13) -#define HAS_ULTRAJOINER(__display) ((DISPLAY_VER(__display) >= 20 || \ - ((__display)->platform.dgfx && DISPLAY_VER(__display) == 14)) && \ - HAS_DSC(__display)) +#define HAS_ULTRAJOINER(__display) (((__display)->platform.dgfx && \ + DISPLAY_VER(__display) == 14) && HAS_DSC(__display)) #define HAS_VRR(__display) (DISPLAY_VER(__display) >= 11) #define INTEL_NUM_PIPES(__display) (hweight8(DISPLAY_RUNTIME_INFO(__display)->pipe_mask)) #define OVERLAY_NEEDS_PHYSICAL(__display) (DISPLAY_INFO(__display)->overlay_needs_physical) diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c index ec799a1773e4..8586ba102605 100644 --- a/drivers/gpu/drm/i915/display/intel_display_driver.c +++ b/drivers/gpu/drm/i915/display/intel_display_driver.c @@ -44,6 +44,7 @@ #include "intel_fbc.h" #include "intel_fbdev.h" #include "intel_fdi.h" +#include "intel_flipq.h" #include "intel_gmbus.h" #include "intel_hdcp.h" #include "intel_hotplug.h" @@ -84,16 +85,10 @@ bool intel_display_driver_probe_defer(struct pci_dev *pdev) void intel_display_driver_init_hw(struct intel_display *display) { - struct intel_cdclk_state *cdclk_state; - if (!HAS_DISPLAY(display)) return; - cdclk_state = to_intel_cdclk_state(display->cdclk.obj.state); - - intel_update_cdclk(display); - intel_cdclk_dump_config(display, &display->cdclk.hw, "Current CDCLK"); - cdclk_state->logical = cdclk_state->actual = display->cdclk.hw; + intel_cdclk_read_hw(display); intel_display_wa_apply(display); } @@ -242,8 +237,6 @@ int intel_display_driver_probe_noirq(struct intel_display *display) if (!HAS_DISPLAY(display)) return 0; - intel_dmc_init(display); - display->hotplug.dp_wq = alloc_ordered_workqueue("intel-dp", 0); if (!display->hotplug.dp_wq) { ret = -ENOMEM; @@ -269,27 +262,35 @@ int intel_display_driver_probe_noirq(struct intel_display *display) goto cleanup_wq_flip; } + display->wq.unordered = alloc_workqueue("display_unordered", 0, 0); + if (!display->wq.unordered) { + ret = -ENOMEM; + goto cleanup_wq_cleanup; + } + + intel_dmc_init(display); + intel_mode_config_init(display); ret = intel_cdclk_init(display); if (ret) - goto cleanup_wq_cleanup; + goto cleanup_wq_unordered; ret = intel_color_init(display); if (ret) - goto cleanup_wq_cleanup; + goto cleanup_wq_unordered; ret = intel_dbuf_init(display); if (ret) - goto cleanup_wq_cleanup; + goto cleanup_wq_unordered; ret = intel_bw_init(display); if (ret) - goto cleanup_wq_cleanup; + goto cleanup_wq_unordered; ret = intel_pmdemand_init(display); if (ret) - goto cleanup_wq_cleanup; + goto cleanup_wq_unordered; intel_init_quirks(display); @@ -297,6 +298,8 @@ int intel_display_driver_probe_noirq(struct intel_display *display) return 0; +cleanup_wq_unordered: + destroy_workqueue(display->wq.unordered); cleanup_wq_cleanup: destroy_workqueue(display->wq.cleanup); cleanup_wq_flip: @@ -535,6 +538,8 @@ int intel_display_driver_probe(struct intel_display *display) */ intel_hdcp_component_init(display); + intel_flipq_init(display); + /* * Force all active planes to recompute their states. So that on * mode_setcrtc after probe, all the intel_plane_state variables @@ -600,6 +605,7 @@ void intel_display_driver_remove(struct intel_display *display) flush_workqueue(display->wq.flip); flush_workqueue(display->wq.modeset); flush_workqueue(display->wq.cleanup); + flush_workqueue(display->wq.unordered); /* * MST topology needs to be suspended so we don't have any calls to @@ -612,8 +618,6 @@ void intel_display_driver_remove(struct intel_display *display) /* part #2: call after irq uninstall */ void intel_display_driver_remove_noirq(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); - if (!HAS_DISPLAY(display)) return; @@ -628,7 +632,7 @@ void intel_display_driver_remove_noirq(struct intel_display *display) intel_unregister_dsm_handler(); /* flush any delayed tasks or pending work */ - flush_workqueue(i915->unordered_wq); + flush_workqueue(display->wq.unordered); intel_hdcp_component_fini(display); @@ -644,6 +648,7 @@ void intel_display_driver_remove_noirq(struct intel_display *display) destroy_workqueue(display->wq.flip); destroy_workqueue(display->wq.modeset); destroy_workqueue(display->wq.cleanup); + destroy_workqueue(display->wq.unordered); intel_fbc_cleanup(display); } diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c index 8d0dcf252bed..68157f177b6a 100644 --- a/drivers/gpu/drm/i915/display/intel_display_irq.c +++ b/drivers/gpu/drm/i915/display/intel_display_irq.c @@ -9,7 +9,6 @@ #include "i915_irq.h" #include "i915_reg.h" #include "icl_dsi_regs.h" -#include "intel_atomic_plane.h" #include "intel_crtc.h" #include "intel_de.h" #include "intel_display_irq.h" @@ -27,6 +26,7 @@ #include "intel_gmbus.h" #include "intel_hotplug_irq.h" #include "intel_pipe_crc_regs.h" +#include "intel_plane.h" #include "intel_pmdemand.h" #include "intel_psr.h" #include "intel_psr_regs.h" @@ -1506,10 +1506,14 @@ u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl) if (!(master_ctl & GEN11_GU_MISC_IRQ)) return 0; + intel_display_rpm_assert_block(display); + iir = intel_de_read(display, GEN11_GU_MISC_IIR); if (likely(iir)) intel_de_write(display, GEN11_GU_MISC_IIR, iir); + intel_display_rpm_assert_unblock(display); + return iir; } diff --git a/drivers/gpu/drm/i915/display/intel_display_params.c b/drivers/gpu/drm/i915/display/intel_display_params.c index c4f1ab43fc0c..75316247ee8a 100644 --- a/drivers/gpu/drm/i915/display/intel_display_params.c +++ b/drivers/gpu/drm/i915/display/intel_display_params.c @@ -62,6 +62,9 @@ intel_display_param_named_unsafe(enable_dpt, bool, 0400, intel_display_param_named_unsafe(enable_dsb, bool, 0400, "Enable display state buffer (DSB) (default: true)"); +intel_display_param_named_unsafe(enable_flipq, bool, 0400, + "Enable DMC flip queue (default: false)"); + intel_display_param_named_unsafe(enable_sagv, bool, 0400, "Enable system agent voltage/frequency scaling (SAGV) (default: true)"); diff --git a/drivers/gpu/drm/i915/display/intel_display_params.h b/drivers/gpu/drm/i915/display/intel_display_params.h index 5317138e6044..784e6bae8615 100644 --- a/drivers/gpu/drm/i915/display/intel_display_params.h +++ b/drivers/gpu/drm/i915/display/intel_display_params.h @@ -31,6 +31,7 @@ struct drm_printer; param(int, enable_dc, -1, 0400) \ param(bool, enable_dpt, true, 0400) \ param(bool, enable_dsb, true, 0600) \ + param(bool, enable_flipq, false, 0600) \ param(bool, enable_sagv, true, 0600) \ param(int, disable_power_well, -1, 0400) \ param(bool, enable_ips, true, 0600) \ diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index fe3a8e90b97a..273054c22325 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -1257,10 +1257,8 @@ static u32 hsw_read_dcomp(struct intel_display *display) static void hsw_write_dcomp(struct intel_display *display, u32 val) { - struct drm_i915_private *dev_priv = to_i915(display->drm); - if (display->platform.haswell) { - if (snb_pcode_write(&dev_priv->uncore, GEN6_PCODE_WRITE_D_COMP, val)) + if (intel_pcode_write(display->drm, GEN6_PCODE_WRITE_D_COMP, val)) drm_dbg_kms(display->drm, "Failed to write to D_COMP\n"); } else { intel_de_write(display, D_COMP_BDW, val); diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c index cba96f920fd2..48cac225a809 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c @@ -34,6 +34,18 @@ #include "vlv_iosf_sb_reg.h" #include "vlv_sideband.h" +/* + * PG0 is HW controlled, so doesn't have a corresponding power well control knob + * + * {ICL,SKL}_DISP_PW1_IDX..{ICL,SKL}_DISP_PW4_IDX -> PG1..PG4 + */ +static enum skl_power_gate pw_idx_to_pg(struct intel_display *display, int pw_idx) +{ + int pw1_idx = DISPLAY_VER(display) >= 11 ? ICL_PW_CTL_IDX_PW_1 : SKL_PW_CTL_IDX_PW_1; + + return pw_idx - pw1_idx + SKL_PG1; +} + struct i915_power_well_regs { i915_reg_t bios; i915_reg_t driver; @@ -308,8 +320,8 @@ static void hsw_wait_for_power_well_disable(struct intel_display *display, { const struct i915_power_well_regs *regs = power_well->desc->ops->regs; int pw_idx = i915_power_well_instance(power_well)->hsw.idx; - bool disabled; u32 reqs; + int ret; /* * Bspec doesn't require waiting for PWs to get disabled, but still do @@ -320,12 +332,18 @@ static void hsw_wait_for_power_well_disable(struct intel_display *display, * Skip the wait in case any of the request bits are set and print a * diagnostic message. */ - wait_for((disabled = !(intel_de_read(display, regs->driver) & - HSW_PWR_WELL_CTL_STATE(pw_idx))) || - (reqs = hsw_power_well_requesters(display, regs, pw_idx)), 1); - if (disabled) + reqs = hsw_power_well_requesters(display, regs, pw_idx); + + ret = intel_de_wait_for_clear(display, regs->driver, + HSW_PWR_WELL_CTL_STATE(pw_idx), + reqs ? 0 : 1); + if (!ret) return; + /* Refresh requesters in case they popped up during the wait. */ + if (!reqs) + reqs = hsw_power_well_requesters(display, regs, pw_idx); + drm_dbg_kms(display->drm, "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n", intel_power_well_name(power_well), @@ -350,8 +368,7 @@ static void hsw_power_well_enable(struct intel_display *display, if (power_well->desc->has_fuses) { enum skl_power_gate pg; - pg = DISPLAY_VER(display) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : - SKL_PW_CTL_IDX_TO_PG(pw_idx); + pg = pw_idx_to_pg(display, pw_idx); /* Wa_16013190616:adlp */ if (display->platform.alderlake_p && pg == SKL_PG1) @@ -375,8 +392,8 @@ static void hsw_power_well_enable(struct intel_display *display, if (power_well->desc->has_fuses) { enum skl_power_gate pg; - pg = DISPLAY_VER(display) >= 11 ? ICL_PW_CTL_IDX_TO_PG(pw_idx) : - SKL_PW_CTL_IDX_TO_PG(pw_idx); + pg = pw_idx_to_pg(display, pw_idx); + gen9_wait_for_power_well_fuses(display, pg); } @@ -486,8 +503,7 @@ static void icl_tc_cold_exit(struct intel_display *display) int ret, tries = 0; while (1) { - ret = snb_pcode_write_timeout(&i915->uncore, ICL_PCODE_EXIT_TCCOLD, 0, - 250, 1); + ret = intel_pcode_write(display->drm, ICL_PCODE_EXIT_TCCOLD, 0); if (ret != -EAGAIN || ++tries == 3) break; msleep(1); @@ -829,7 +845,7 @@ static void assert_can_enable_dc5(struct intel_display *display) assert_display_rpm_held(display); - assert_dmc_loaded(display); + assert_main_dmc_loaded(display); } void gen9_enable_dc5(struct intel_display *display) @@ -860,7 +876,7 @@ static void assert_can_enable_dc6(struct intel_display *display) DC_STATE_EN_UPTO_DC6), "DC6 already programmed to be enabled.\n"); - assert_dmc_loaded(display); + assert_main_dmc_loaded(display); } void skl_enable_dc6(struct intel_display *display) @@ -1766,7 +1782,7 @@ tgl_tc_cold_request(struct intel_display *display, bool block) * Spec states that we should timeout the request after 200us * but the function below will timeout after 500us */ - ret = snb_pcode_read(&i915->uncore, TGL_PCODE_TCCOLD, &low_val, &high_val); + ret = intel_pcode_read(display->drm, TGL_PCODE_TCCOLD, &low_val, &high_val); if (ret == 0) { if (block && (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED)) diff --git a/drivers/gpu/drm/i915/display/intel_display_regs.h b/drivers/gpu/drm/i915/display/intel_display_regs.h index e101105da4af..7bd09d981cd2 100644 --- a/drivers/gpu/drm/i915/display/intel_display_regs.h +++ b/drivers/gpu/drm/i915/display/intel_display_regs.h @@ -2195,20 +2195,17 @@ #define HSW_PWR_WELL_FORCE_ON (1 << 19) #define HSW_PWR_WELL_CTL6 _MMIO(0x45414) +/* SKL Fuse Status */ +enum skl_power_gate { + SKL_PG0, + SKL_PG1, + SKL_PG2, + ICL_PG3, + ICL_PG4, +}; + #define SKL_FUSE_STATUS _MMIO(0x42000) #define SKL_FUSE_DOWNLOAD_STATUS (1 << 31) -/* - * PG0 is HW controlled, so doesn't have a corresponding power well control knob - * SKL_DISP_PW1_IDX..SKL_DISP_PW2_IDX -> PG1..PG2 - */ -#define SKL_PW_CTL_IDX_TO_PG(pw_idx) \ - ((pw_idx) - SKL_PW_CTL_IDX_PW_1 + SKL_PG1) -/* - * PG0 is HW controlled, so doesn't have a corresponding power well control knob - * ICL_DISP_PW1_IDX..ICL_DISP_PW4_IDX -> PG1..PG4 - */ -#define ICL_PW_CTL_IDX_TO_PG(pw_idx) \ - ((pw_idx) - ICL_PW_CTL_IDX_PW_1 + SKL_PG1) #define SKL_FUSE_PG_DIST_STATUS(pg) (1 << (27 - (pg))) /* Per-pipe DDI Function Control */ diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 30c7315fc25e..ce45261c4a8f 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -146,6 +146,8 @@ struct intel_framebuffer { unsigned int min_alignment; unsigned int vtd_guard; + + unsigned int (*panic_tiling)(unsigned int x, unsigned int y, unsigned int width); }; enum intel_hotplug_state { @@ -1303,6 +1305,7 @@ struct intel_crtc_state { /* For DSB based pipe updates */ struct intel_dsb *dsb_color, *dsb_commit; bool use_dsb; + bool use_flipq; u32 psr2_man_track_ctl; @@ -1369,6 +1372,21 @@ struct intel_pipe_crc { enum intel_pipe_crc_source source; }; +enum intel_flipq_id { + INTEL_FLIPQ_PLANE_1, + INTEL_FLIPQ_PLANE_2, + INTEL_FLIPQ_PLANE_3, + INTEL_FLIPQ_GENERAL, + INTEL_FLIPQ_FAST, + MAX_INTEL_FLIPQ, +}; + +struct intel_flipq { + u32 start_mmioaddr; + enum intel_flipq_id flipq_id; + u8 tail; +}; + struct intel_crtc { struct drm_crtc base; enum pipe pipe; @@ -1395,11 +1413,15 @@ struct intel_crtc { struct drm_pending_vblank_event *flip_done_event; /* armed event for DSB based updates */ struct drm_pending_vblank_event *dsb_event; + /* armed event for flip queue based updates */ + struct drm_pending_vblank_event *flipq_event; /* Access to these should be protected by display->irq.lock. */ bool cpu_fifo_underrun_disabled; bool pch_fifo_underrun_disabled; + struct intel_flipq flipq[MAX_INTEL_FLIPQ]; + /* per-pipe watermark state */ struct { /* watermarks currently being used */ @@ -1521,6 +1543,8 @@ struct intel_plane { bool async_flip); void (*enable_flip_done)(struct intel_plane *plane); void (*disable_flip_done)(struct intel_plane *plane); + /* For drm_panic */ + void (*disable_tiling)(struct intel_plane *plane); }; #define to_intel_atomic_state(x) container_of(x, struct intel_atomic_state, base) diff --git a/drivers/gpu/drm/i915/display/intel_dkl_phy_regs.h b/drivers/gpu/drm/i915/display/intel_dkl_phy_regs.h index 3d8fa667cc73..f8ffeec29e93 100644 --- a/drivers/gpu/drm/i915/display/intel_dkl_phy_regs.h +++ b/drivers/gpu/drm/i915/display/intel_dkl_phy_regs.h @@ -153,6 +153,7 @@ struct intel_dkl_phy_reg { #define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(val) REG_FIELD_PREP(DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK, (val)) #define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK REG_GENMASK(6, 5) #define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(val) REG_FIELD_PREP(DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK, (val)) +#define LOADGEN_SHARING_PMD_DISABLE REG_BIT(12) #define _DKL_TX_FW_CALIB_LN0 0x02F8 #define _DKL_TX_FW_CALIB_LN1 0x12F8 diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index a10e56e7cf31..744f51c0eab8 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -24,9 +24,13 @@ #include <linux/debugfs.h> #include <linux/firmware.h> +#include <drm/drm_vblank.h> + +#include <drm/drm_file.h> +#include <drm/drm_print.h> -#include "i915_drv.h" #include "i915_reg.h" +#include "i915_utils.h" #include "intel_crtc.h" #include "intel_de.h" #include "intel_display_power_well.h" @@ -35,6 +39,7 @@ #include "intel_display_types.h" #include "intel_dmc.h" #include "intel_dmc_regs.h" +#include "intel_flipq.h" #include "intel_step.h" /** @@ -179,7 +184,8 @@ static const char *dmc_firmware_default(struct intel_display *display, u32 *size const char *fw_path = NULL; u32 max_fw_size = 0; - if (DISPLAY_VERx100(display) == 3000) { + if (DISPLAY_VERx100(display) == 3002 || + DISPLAY_VERx100(display) == 3000) { fw_path = XE3LPD_DMC_PATH; max_fw_size = XE2LPD_DMC_MAX_FW_SIZE; } else if (DISPLAY_VERx100(display) == 2000) { @@ -432,25 +438,22 @@ static void disable_event_handler(struct intel_display *display, intel_de_write(display, htp_reg, 0); } -static void disable_all_event_handlers(struct intel_display *display) +static void disable_all_event_handlers(struct intel_display *display, + enum intel_dmc_id dmc_id) { - enum intel_dmc_id dmc_id; + int handler; /* TODO: disable the event handlers on pre-GEN12 platforms as well */ if (DISPLAY_VER(display) < 12) return; - for_each_dmc_id(dmc_id) { - int handler; - - if (!has_dmc_id_fw(display, dmc_id)) - continue; + if (!has_dmc_id_fw(display, dmc_id)) + return; - for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++) - disable_event_handler(display, - DMC_EVT_CTL(display, dmc_id, handler), - DMC_EVT_HTP(display, dmc_id, handler)); - } + for (handler = 0; handler < DMC_EVENT_HANDLER_COUNT_GEN12; handler++) + disable_event_handler(display, + DMC_EVT_CTL(display, dmc_id, handler), + DMC_EVT_HTP(display, dmc_id, handler)); } static void adlp_pipedmc_clock_gating_wa(struct intel_display *display, bool enable) @@ -482,12 +485,13 @@ static void mtl_pipedmc_clock_gating_wa(struct intel_display *display) * for pipe A and B. */ intel_de_rmw(display, GEN9_CLKGATE_DIS_0, 0, - MTL_PIPEDMC_GATING_DIS_A | MTL_PIPEDMC_GATING_DIS_B); + MTL_PIPEDMC_GATING_DIS(PIPE_A) | + MTL_PIPEDMC_GATING_DIS(PIPE_B)); } static void pipedmc_clock_gating_wa(struct intel_display *display, bool enable) { - if (DISPLAY_VER(display) >= 14 && enable) + if (display->platform.meteorlake && enable) mtl_pipedmc_clock_gating_wa(display); else if (DISPLAY_VER(display) == 13) adlp_pipedmc_clock_gating_wa(display, enable); @@ -500,46 +504,11 @@ static u32 pipedmc_interrupt_mask(struct intel_display *display) * triggering it during the first DC state transition. Figure * out what is going on... */ - return PIPEDMC_GTT_FAULT | + return PIPEDMC_FLIPQ_PROG_DONE | + PIPEDMC_GTT_FAULT | PIPEDMC_ATS_FAULT; } -void intel_dmc_enable_pipe(struct intel_display *display, enum pipe pipe) -{ - enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe); - - if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id)) - return; - - if (DISPLAY_VER(display) >= 20) { - intel_de_write(display, PIPEDMC_INTERRUPT(pipe), pipedmc_interrupt_mask(display)); - intel_de_write(display, PIPEDMC_INTERRUPT_MASK(pipe), ~pipedmc_interrupt_mask(display)); - } - - if (DISPLAY_VER(display) >= 14) - intel_de_rmw(display, MTL_PIPEDMC_CONTROL, 0, PIPEDMC_ENABLE_MTL(pipe)); - else - intel_de_rmw(display, PIPEDMC_CONTROL(pipe), 0, PIPEDMC_ENABLE); -} - -void intel_dmc_disable_pipe(struct intel_display *display, enum pipe pipe) -{ - enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe); - - if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id)) - return; - - if (DISPLAY_VER(display) >= 14) - intel_de_rmw(display, MTL_PIPEDMC_CONTROL, PIPEDMC_ENABLE_MTL(pipe), 0); - else - intel_de_rmw(display, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0); - - if (DISPLAY_VER(display) >= 20) { - intel_de_write(display, PIPEDMC_INTERRUPT_MASK(pipe), ~0); - intel_de_write(display, PIPEDMC_INTERRUPT(pipe), pipedmc_interrupt_mask(display)); - } -} - static u32 dmc_evt_ctl_disable(void) { return REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK, @@ -577,6 +546,238 @@ static bool is_event_handler(struct intel_display *display, REG_FIELD_GET(DMC_EVT_CTL_EVENT_ID_MASK, data) == event_id; } +static bool disable_dmc_evt(struct intel_display *display, + enum intel_dmc_id dmc_id, + i915_reg_t reg, u32 data) +{ + if (!is_dmc_evt_ctl_reg(display, dmc_id, reg)) + return false; + + /* keep all pipe DMC events disabled by default */ + if (dmc_id != DMC_FW_MAIN) + return true; + + /* also disable the flip queue event on the main DMC on TGL */ + if (display->platform.tigerlake && + is_event_handler(display, dmc_id, MAINDMC_EVENT_CLK_MSEC, reg, data)) + return true; + + /* also disable the HRR event on the main DMC on TGL/ADLS */ + if ((display->platform.tigerlake || display->platform.alderlake_s) && + is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_A, reg, data)) + return true; + + return false; +} + +static u32 dmc_mmiodata(struct intel_display *display, + struct intel_dmc *dmc, + enum intel_dmc_id dmc_id, int i) +{ + if (disable_dmc_evt(display, dmc_id, + dmc->dmc_info[dmc_id].mmioaddr[i], + dmc->dmc_info[dmc_id].mmiodata[i])) + return dmc_evt_ctl_disable(); + else + return dmc->dmc_info[dmc_id].mmiodata[i]; +} + +static void dmc_load_mmio(struct intel_display *display, enum intel_dmc_id dmc_id) +{ + struct intel_dmc *dmc = display_to_dmc(display); + int i; + + for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) { + intel_de_write(display, dmc->dmc_info[dmc_id].mmioaddr[i], + dmc_mmiodata(display, dmc, dmc_id, i)); + } +} + +static void dmc_load_program(struct intel_display *display, enum intel_dmc_id dmc_id) +{ + struct intel_dmc *dmc = display_to_dmc(display); + int i; + + disable_all_event_handlers(display, dmc_id); + + preempt_disable(); + + for (i = 0; i < dmc->dmc_info[dmc_id].dmc_fw_size; i++) { + intel_de_write_fw(display, + DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, i), + dmc->dmc_info[dmc_id].payload[i]); + } + + preempt_enable(); + + dmc_load_mmio(display, dmc_id); +} + +static void assert_dmc_loaded(struct intel_display *display, + enum intel_dmc_id dmc_id) +{ + struct intel_dmc *dmc = display_to_dmc(display); + u32 expected, found; + int i; + + if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id)) + return; + + found = intel_de_read(display, DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, 0)); + expected = dmc->dmc_info[dmc_id].payload[0]; + + drm_WARN(display->drm, found != expected, + "DMC %d program storage start incorrect (expected 0x%x, current 0x%x)\n", + dmc_id, expected, found); + + for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) { + i915_reg_t reg = dmc->dmc_info[dmc_id].mmioaddr[i]; + + found = intel_de_read(display, reg); + expected = dmc_mmiodata(display, dmc, dmc_id, i); + + /* once set DMC_EVT_CTL_ENABLE can't be cleared :/ */ + if (is_dmc_evt_ctl_reg(display, dmc_id, reg)) { + found &= ~DMC_EVT_CTL_ENABLE; + expected &= ~DMC_EVT_CTL_ENABLE; + } + + drm_WARN(display->drm, found != expected, + "DMC %d mmio[%d]/0x%x incorrect (expected 0x%x, current 0x%x)\n", + dmc_id, i, i915_mmio_reg_offset(reg), expected, found); + } +} + +void assert_main_dmc_loaded(struct intel_display *display) +{ + assert_dmc_loaded(display, DMC_FW_MAIN); +} + +static bool need_pipedmc_load_program(struct intel_display *display) +{ + /* On TGL/derivatives pipe DMC state is lost when PG1 is disabled */ + return DISPLAY_VER(display) == 12; +} + +static bool need_pipedmc_load_mmio(struct intel_display *display, enum pipe pipe) +{ + /* + * PTL: + * - pipe A/B DMC doesn't need save/restore + * - pipe C/D DMC is in PG0, needs manual save/restore + */ + if (DISPLAY_VER(display) == 30) + return pipe >= PIPE_C; + + /* + * FIXME LNL unclear, main DMC firmware has the pipe DMC A/B PG0 + * save/restore, but so far unable to see the loss of pipe DMC state + * in action. Are we just failing to turn off PG0 due to some other + * SoC level stuff? + */ + if (DISPLAY_VER(display) == 20) + return false; + + /* + * FIXME BMG untested, main DMC firmware has the + * pipe DMC A/B PG0 save/restore... + */ + if (display->platform.battlemage) + return false; + + /* + * DG2: + * - Pipe DMCs presumably in PG0? + * - No DC6, and even DC9 doesn't seem to result + * in loss of DMC state for whatever reason + */ + if (display->platform.dg2) + return false; + + /* + * ADL/MTL: + * - pipe A/B DMC is in PG0, saved/restored by the main DMC + * - pipe C/D DMC is in PG0, needs manual save/restore + */ + if (IS_DISPLAY_VER(display, 13, 14)) + return pipe >= PIPE_C; + + return false; +} + +static bool can_enable_pipedmc(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + + /* + * On TGL/derivatives pipe DMC state is lost when PG1 is disabled. + * Do not even enable the pipe DMC when that can happen outside + * of driver control (PSR+DC5/6). + */ + if (DISPLAY_VER(display) == 12 && crtc_state->has_psr) + return false; + + return true; +} + +void intel_dmc_enable_pipe(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + enum pipe pipe = crtc->pipe; + enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe); + + if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id)) + return; + + if (!can_enable_pipedmc(crtc_state)) { + intel_dmc_disable_pipe(crtc_state); + return; + } + + if (need_pipedmc_load_program(display)) + dmc_load_program(display, dmc_id); + else if (need_pipedmc_load_mmio(display, pipe)) + dmc_load_mmio(display, dmc_id); + + assert_dmc_loaded(display, dmc_id); + + if (DISPLAY_VER(display) >= 20) { + intel_flipq_reset(display, pipe); + + intel_de_write(display, PIPEDMC_INTERRUPT(pipe), pipedmc_interrupt_mask(display)); + intel_de_write(display, PIPEDMC_INTERRUPT_MASK(pipe), ~pipedmc_interrupt_mask(display)); + } + + if (DISPLAY_VER(display) >= 14) + intel_de_rmw(display, MTL_PIPEDMC_CONTROL, 0, PIPEDMC_ENABLE_MTL(pipe)); + else + intel_de_rmw(display, PIPEDMC_CONTROL(pipe), 0, PIPEDMC_ENABLE); +} + +void intel_dmc_disable_pipe(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + enum pipe pipe = crtc->pipe; + enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(pipe); + + if (!is_valid_dmc_id(dmc_id) || !has_dmc_id_fw(display, dmc_id)) + return; + + if (DISPLAY_VER(display) >= 14) + intel_de_rmw(display, MTL_PIPEDMC_CONTROL, PIPEDMC_ENABLE_MTL(pipe), 0); + else + intel_de_rmw(display, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0); + + if (DISPLAY_VER(display) >= 20) { + intel_de_write(display, PIPEDMC_INTERRUPT_MASK(pipe), ~0); + intel_de_write(display, PIPEDMC_INTERRUPT(pipe), pipedmc_interrupt_mask(display)); + + intel_flipq_reset(display, pipe); + } +} + static void dmc_configure_event(struct intel_display *display, enum intel_dmc_id dmc_id, unsigned int event_id, @@ -637,42 +838,6 @@ void intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(struct intel_display dmc_configure_event(display, dmc_id, PIPEDMC_EVENT_VBLANK, enable); } -static bool disable_dmc_evt(struct intel_display *display, - enum intel_dmc_id dmc_id, - i915_reg_t reg, u32 data) -{ - if (!is_dmc_evt_ctl_reg(display, dmc_id, reg)) - return false; - - /* keep all pipe DMC events disabled by default */ - if (dmc_id != DMC_FW_MAIN) - return true; - - /* also disable the flip queue event on the main DMC on TGL */ - if (display->platform.tigerlake && - is_event_handler(display, dmc_id, MAINDMC_EVENT_CLK_MSEC, reg, data)) - return true; - - /* also disable the HRR event on the main DMC on TGL/ADLS */ - if ((display->platform.tigerlake || display->platform.alderlake_s) && - is_event_handler(display, dmc_id, MAINDMC_EVENT_VBLANK_A, reg, data)) - return true; - - return false; -} - -static u32 dmc_mmiodata(struct intel_display *display, - struct intel_dmc *dmc, - enum intel_dmc_id dmc_id, int i) -{ - if (disable_dmc_evt(display, dmc_id, - dmc->dmc_info[dmc_id].mmioaddr[i], - dmc->dmc_info[dmc_id].mmiodata[i])) - return dmc_evt_ctl_disable(); - else - return dmc->dmc_info[dmc_id].mmiodata[i]; -} - /** * intel_dmc_load_program() - write the firmware from memory to register. * @display: display instance @@ -684,37 +849,26 @@ static u32 dmc_mmiodata(struct intel_display *display, void intel_dmc_load_program(struct intel_display *display) { struct i915_power_domains *power_domains = &display->power.domains; - struct intel_dmc *dmc = display_to_dmc(display); enum intel_dmc_id dmc_id; - u32 i; if (!intel_dmc_has_payload(display)) return; - pipedmc_clock_gating_wa(display, true); - - disable_all_event_handlers(display); - assert_display_rpm_held(display); - preempt_disable(); + pipedmc_clock_gating_wa(display, true); for_each_dmc_id(dmc_id) { - for (i = 0; i < dmc->dmc_info[dmc_id].dmc_fw_size; i++) { - intel_de_write_fw(display, - DMC_PROGRAM(dmc->dmc_info[dmc_id].start_mmioaddr, i), - dmc->dmc_info[dmc_id].payload[i]); - } + dmc_load_program(display, dmc_id); + assert_dmc_loaded(display, dmc_id); } - preempt_enable(); - - for_each_dmc_id(dmc_id) { - for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) { - intel_de_write(display, dmc->dmc_info[dmc_id].mmioaddr[i], - dmc_mmiodata(display, dmc, dmc_id, i)); - } - } + if (DISPLAY_VER(display) >= 20) + intel_de_write(display, DMC_FQ_W2_PTS_CFG_SEL, + PIPE_D_DMC_W2_PTS_CONFIG_SELECT(PIPE_D) | + PIPE_C_DMC_W2_PTS_CONFIG_SELECT(PIPE_C) | + PIPE_B_DMC_W2_PTS_CONFIG_SELECT(PIPE_B) | + PIPE_A_DMC_W2_PTS_CONFIG_SELECT(PIPE_A)); power_domains->dc_state = 0; @@ -732,26 +886,17 @@ void intel_dmc_load_program(struct intel_display *display) */ void intel_dmc_disable_program(struct intel_display *display) { + enum intel_dmc_id dmc_id; + if (!intel_dmc_has_payload(display)) return; pipedmc_clock_gating_wa(display, true); - disable_all_event_handlers(display); - pipedmc_clock_gating_wa(display, false); -} -void assert_dmc_loaded(struct intel_display *display) -{ - struct intel_dmc *dmc = display_to_dmc(display); + for_each_dmc_id(dmc_id) + disable_all_event_handlers(display, dmc_id); - drm_WARN_ONCE(display->drm, !dmc, "DMC not initialized\n"); - drm_WARN_ONCE(display->drm, dmc && - !intel_de_read(display, DMC_PROGRAM(dmc->dmc_info[DMC_FW_MAIN].start_mmioaddr, 0)), - "DMC program storage start is NULL\n"); - drm_WARN_ONCE(display->drm, !intel_de_read(display, DMC_SSP_BASE), - "DMC SSP Base Not fine\n"); - drm_WARN_ONCE(display->drm, !intel_de_read(display, DMC_HTP_SKL), - "DMC HTP Not fine\n"); + pipedmc_clock_gating_wa(display, false); } static bool fw_info_matches_stepping(const struct intel_fw_info *fw_info, @@ -1170,7 +1315,6 @@ out: */ void intel_dmc_init(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); struct intel_dmc *dmc; if (!HAS_DMC(display)) @@ -1213,7 +1357,7 @@ void intel_dmc_init(struct intel_display *display) display->dmc.dmc = dmc; drm_dbg_kms(display->drm, "Loading %s\n", dmc->fw_path); - queue_work(i915->unordered_wq, &dmc->work); + queue_work(display->wq.unordered, &dmc->work); return; @@ -1244,6 +1388,17 @@ void intel_dmc_suspend(struct intel_display *display) intel_dmc_runtime_pm_put(display); } +void intel_dmc_wait_fw_load(struct intel_display *display) +{ + struct intel_dmc *dmc = display_to_dmc(display); + + if (!HAS_DMC(display)) + return; + + if (dmc) + flush_work(&dmc->work); +} + /** * intel_dmc_resume() - init DMC firmware during system resume * @display: display instance @@ -1457,12 +1612,29 @@ void intel_dmc_debugfs_register(struct intel_display *display) void intel_pipedmc_irq_handler(struct intel_display *display, enum pipe pipe) { struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); - u32 tmp; + u32 tmp = 0, int_vector; if (DISPLAY_VER(display) >= 20) { tmp = intel_de_read(display, PIPEDMC_INTERRUPT(pipe)); intel_de_write(display, PIPEDMC_INTERRUPT(pipe), tmp); + if (tmp & PIPEDMC_FLIPQ_PROG_DONE) { + spin_lock(&display->drm->event_lock); + + if (crtc->flipq_event) { + /* + * Update vblank counter/timestamp in case it + * hasn't been done yet for this frame. + */ + drm_crtc_accurate_vblank_count(&crtc->base); + + drm_crtc_send_vblank_event(&crtc->base, crtc->flipq_event); + crtc->flipq_event = NULL; + } + + spin_unlock(&display->drm->event_lock); + } + if (tmp & PIPEDMC_ATS_FAULT) drm_err_ratelimited(display->drm, "[CRTC:%d:%s] PIPEDMC ATS fault\n", crtc->base.base.id, crtc->base.name); @@ -1474,8 +1646,35 @@ void intel_pipedmc_irq_handler(struct intel_display *display, enum pipe pipe) crtc->base.base.id, crtc->base.name); } - tmp = intel_de_read(display, PIPEDMC_STATUS(pipe)) & PIPEDMC_INT_VECTOR_MASK; - if (tmp) + int_vector = intel_de_read(display, PIPEDMC_STATUS(pipe)) & PIPEDMC_INT_VECTOR_MASK; + if (tmp == 0 && int_vector != 0) drm_err(display->drm, "[CRTC:%d:%s]] PIPEDMC interrupt vector 0x%x\n", crtc->base.base.id, crtc->base.name, tmp); } + +void intel_pipedmc_enable_event(struct intel_crtc *crtc, + enum pipedmc_event_id event) +{ + struct intel_display *display = to_intel_display(crtc); + enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(crtc->pipe); + + dmc_configure_event(display, dmc_id, event, true); +} + +void intel_pipedmc_disable_event(struct intel_crtc *crtc, + enum pipedmc_event_id event) +{ + struct intel_display *display = to_intel_display(crtc); + enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(crtc->pipe); + + dmc_configure_event(display, dmc_id, event, false); +} + +u32 intel_pipedmc_start_mmioaddr(struct intel_crtc *crtc) +{ + struct intel_display *display = to_intel_display(crtc); + struct intel_dmc *dmc = display_to_dmc(display); + enum intel_dmc_id dmc_id = PIPE_TO_DMC_ID(crtc->pipe); + + return dmc ? dmc->dmc_info[dmc_id].start_mmioaddr : 0; +} diff --git a/drivers/gpu/drm/i915/display/intel_dmc.h b/drivers/gpu/drm/i915/display/intel_dmc.h index a98e8deff13a..40e9dcb033cc 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.h +++ b/drivers/gpu/drm/i915/display/intel_dmc.h @@ -9,15 +9,19 @@ #include <linux/types.h> enum pipe; +enum pipedmc_event_id; struct drm_printer; +struct intel_crtc; +struct intel_crtc_state; struct intel_display; struct intel_dmc_snapshot; void intel_dmc_init(struct intel_display *display); void intel_dmc_load_program(struct intel_display *display); +void intel_dmc_wait_fw_load(struct intel_display *display); void intel_dmc_disable_program(struct intel_display *display); -void intel_dmc_enable_pipe(struct intel_display *display, enum pipe pipe); -void intel_dmc_disable_pipe(struct intel_display *display, enum pipe pipe); +void intel_dmc_enable_pipe(const struct intel_crtc_state *crtc_state); +void intel_dmc_disable_pipe(const struct intel_crtc_state *crtc_state); void intel_dmc_block_pkgc(struct intel_display *display, enum pipe pipe, bool block); void intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(struct intel_display *display, @@ -32,7 +36,15 @@ struct intel_dmc_snapshot *intel_dmc_snapshot_capture(struct intel_display *disp void intel_dmc_snapshot_print(const struct intel_dmc_snapshot *snapshot, struct drm_printer *p); void intel_dmc_update_dc6_allowed_count(struct intel_display *display, bool start_tracking); -void assert_dmc_loaded(struct intel_display *display); +void assert_main_dmc_loaded(struct intel_display *display); + +void intel_pipedmc_irq_handler(struct intel_display *display, enum pipe pipe); + +u32 intel_pipedmc_start_mmioaddr(struct intel_crtc *crtc); +void intel_pipedmc_enable_event(struct intel_crtc *crtc, + enum pipedmc_event_id event); +void intel_pipedmc_disable_event(struct intel_crtc *crtc, + enum pipedmc_event_id event); void intel_pipedmc_irq_handler(struct intel_display *display, enum pipe pipe); diff --git a/drivers/gpu/drm/i915/display/intel_dmc_regs.h b/drivers/gpu/drm/i915/display/intel_dmc_regs.h index 6f406315dd65..c5aa49921cb9 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc_regs.h +++ b/drivers/gpu/drm/i915/display/intel_dmc_regs.h @@ -287,6 +287,17 @@ enum pipedmc_event_id { #define MTL_PIPEDMC_CONTROL _MMIO(0x45250) #define PIPEDMC_ENABLE_MTL(pipe) REG_BIT(((pipe) - PIPE_A) * 4) +#define _PIPEDMC_LOAD_HTP_A 0x5f000 +#define _PIPEDMC_LOAD_HTP_B 0x5f400 +#define PIPEDMC_LOAD_HTP(pipe) _MMIO_PIPE((pipe), _PIPEDMC_LOAD_HTP_A, _PIPEDMC_LOAD_HTP_B) + +#define _PIPEDMC_CTL_A 0x5f064 +#define _PIPEDMC_CTL_B 0x5f464 +#define PIPEDMC_CTL(pipe) _MMIO_PIPE((pipe), _PIPEDMC_CTL_A, _PIPEDMC_CTL_B) +#define PIPEDMC_HALT REG_BIT(31) +#define PIPEDMC_STEP REG_BIT(27) +#define PIPEDMC_CLOCKGATE REG_BIT(23) + #define _PIPEDMC_STATUS_A 0x5f06c #define _PIPEDMC_STATUS_B 0x5f46c #define PIPEDMC_STATUS(pipe) _MMIO_PIPE((pipe), _PIPEDMC_STATUS_A, _PIPEDMC_STATUS_B) @@ -298,6 +309,138 @@ enum pipedmc_event_id { #define PIPEDMC_INT_VECTOR_FLIPQ_PROG_DONE REG_FIELD_PREP(PIPEDMC_INT_VECTOR_MASK, 0xff) /* Wa_16018781658:lnl[a0] */ #define PIPEDMC_EVT_PENDING REG_GENMASK(7, 0) +#define _PIPEDMC_FQ_CTRL_A 0x5f078 +#define _PIPEDMC_FQ_CTRL_B 0x5f478 +#define PIPEDMC_FQ_CTRL(pipe) _MMIO_PIPE((pipe), _PIPEDMC_FQ_CTRL_A, _PIPEDMC_FQ_CTRL_B) +#define PIPEDMC_FQ_CTRL_ENABLE REG_BIT(31) +#define PIPEDMC_FQ_CTRL_ASYNC REG_BIT(29) +#define PIPEDMC_FQ_CTRL_PREEMPT REG_BIT(0) + +#define _PIPEDMC_FQ_STATUS_A 0x5f098 +#define _PIPEDMC_FQ_STATUS_B 0x5f498 +#define PIPEDMC_FQ_STATUS(pipe) _MMIO_PIPE((pipe), _PIPEDMC_FQ_STATUS_A, _PIPEDMC_FQ_STATUS_B) +#define PIPEDMC_FQ_STATUS_BUSY REG_BIT(31) +#define PIPEDMC_FQ_STATUS_W2_LIVE_STATUS REG_BIT(1) +#define PIPEDMC_FQ_STATUS_W1_LIVE_STATUS REG_BIT(0) + +#define _PIPEDMC_FPQ_ATOMIC_TP_A 0x5f0a0 +#define _PIPEDMC_FPQ_ATOMIC_TP_B 0x5f4a0 +#define PIPEDMC_FPQ_ATOMIC_TP(pipe) _MMIO_PIPE((pipe), _PIPEDMC_FPQ_ATOMIC_TP_A, _PIPEDMC_FPQ_ATOMIC_TP_B) +#define PIPEDMC_FPQ_PLANEQ_3_TP_MASK REG_GENMASK(31, 26) +#define PIPEDMC_FPQ_PLANEQ_3_TP(tail) REG_FIELD_PREP(PIPEDMC_FPQ_PLANEQ_3_TP_MASK, (tail)) +#define PIPEDMC_FPQ_PLANEQ_2_TP_MASK REG_GENMASK(24, 19) +#define PIPEDMC_FPQ_PLANEQ_2_TP(tail) REG_FIELD_PREP(PIPEDMC_FPQ_PLANEQ_2_TP_MASK, (tail)) +#define PIPEDMC_FPQ_PLANEQ_1_TP_MASK REG_GENMASK(17, 12) +#define PIPEDMC_FPQ_PLANEQ_1_TP(tail) REG_FIELD_PREP(PIPEDMC_FPQ_PLANEQ_1_TP_MASK, (tail)) +#define PIPEDMC_FPQ_FASTQ_TP_MASK REG_GENMASK(10, 6) +#define PIPEDMC_FPQ_FASTQ_TP(tail) REG_FIELD_PREP(PIPEDMC_FPQ_FASTQ_TP_MASK, (tail)) +#define PIPEDMC_FPQ_GENERALQ_TP_MASK REG_GENMASK(4, 0) +#define PIPEDMC_FPQ_GENERALQ_TP(tail) REG_FIELD_PREP(PIPEDMC_FPQ_GENERALQ_TP_MASK, (tail)) + +#define _PIPEDMC_FPQ_LINES_TO_W1_A 0x5f0a4 +#define _PIPEDMC_FPQ_LINES_TO_W1_B 0x5f4a4 +#define PIPEDMC_FPQ_LINES_TO_W1 _MMIO_PIPE((pipe), _PIPEDMC_FPQ_LINES_TO_W1_A, _PIPEDMC_FPQ_LINES_TO_W1_B) + +#define _PIPEDMC_FPQ_LINES_TO_W2_A 0x5f0a8 +#define _PIPEDMC_FPQ_LINES_TO_W2_B 0x5f4a8 +#define PIPEDMC_FPQ_LINES_TO_W2 _MMIO_PIPE((pipe), _PIPEDMC_FPQ_LINES_TO_W2_A, _PIPEDMC_FPQ_LINES_TO_W2_B) + +#define _PIPEDMC_SCANLINECMP_A 0x5f11c +#define _PIPEDMC_SCANLINECMP_B 0x5f51c +#define PIPEDMC_SCANLINECMP(pipe) _MMIO_PIPE((pipe), _PIPEDMC_SCANLINECMP_A, _PIPEDMC_SCANLINECMP_B) +#define PIPEDMC_SCANLINECMP_EN REG_BIT(31) +#define PIPEDMC_SCANLINE_NUMBER REG_GENMASK(20, 0) + +#define _PIPEDMC_SCANLINECMPLOWER_A 0x5f120 +#define _PIPEDMC_SCANLINECMPLOWER_B 0x5f520 +#define PIPEDMC_SCANLINECMPLOWER(pipe) _MMIO_PIPE((pipe), _PIPEDMC_SCANLINECMPLOWER_A, _PIPEDMC_SCANLINECMPLOWER_B) +#define PIPEDMC_SCANLINEINRANGECMP_EN REG_BIT(31) +#define PIPEDMC_SCANLINEOUTRANGECMP_EN REG_BIT(30) +#define PIPEDMC_SCANLINE_LOWER_MASK REG_GENMASK(20, 0) +#define PIPEDMC_SCANLINE_LOWER(scanline) REG_FIELD_PREP(PIPEDMC_SCANLINE_LOWER_MASK, (scanline)) + +#define _PIPEDMC_SCANLINECMPUPPER_A 0x5f124 +#define _PIPEDMC_SCANLINECMPUPPER_B 0x5f524 +#define PIPEDMC_SCANLINECMPUPPER(pipe) _MMIO_PIPE((pipe), _PIPEDMC_SCANLINECMPUPPER_A, _PIPEDMC_SCANLINECMPUPPER_B) +#define PIPEDMC_SCANLINE_UPPER_MASK REG_GENMASK(20, 0) +#define PIPEDMC_SCANLINE_UPPER(scanline) REG_FIELD_PREP(PIPEDMC_SCANLINE_UPPER_MASK, (scanline)) + +#define _MMIO_PIPEDMC_FPQ(pipe, fq_id, \ + reg_fpq1_a, reg_fpq2_a, reg_fpq3_a, reg_fpq4_a, \ + reg_fpq1_b, reg_fpq2_b, reg_fpq3_b, reg_fpq4_b) \ + _MMIO(_PICK_EVEN_2RANGES((fq_id), INTEL_FLIPQ_PLANE_3, \ + _PIPE((pipe), (reg_fpq1_a), (reg_fpq1_b)), \ + _PIPE((pipe), (reg_fpq2_a), (reg_fpq2_b)), \ + _PIPE((pipe), (reg_fpq3_a), (reg_fpq3_b)), \ + _PIPE((pipe), (reg_fpq4_a), (reg_fpq4_b)))) + +#define _PIPEDMC_FPQ1_HP_A 0x5f128 +#define _PIPEDMC_FPQ2_HP_A 0x5f138 +#define _PIPEDMC_FPQ3_HP_A 0x5f168 +#define _PIPEDMC_FPQ4_HP_A 0x5f174 +#define _PIPEDMC_FPQ5_HP_A 0x5f180 +#define _PIPEDMC_FPQ1_HP_B 0x5f528 +#define _PIPEDMC_FPQ2_HP_B 0x5f538 +#define _PIPEDMC_FPQ3_HP_B 0x5f568 +#define _PIPEDMC_FPQ4_HP_B 0x5f574 +#define _PIPEDMC_FPQ5_HP_B 0x5f580 +#define PIPEDMC_FPQ_HP(pipe, fq_id) _MMIO_PIPEDMC_FPQ((pipe), (fq_id), \ + _PIPEDMC_FPQ1_HP_A, _PIPEDMC_FPQ2_HP_A, \ + _PIPEDMC_FPQ3_HP_A, _PIPEDMC_FPQ4_HP_A, \ + _PIPEDMC_FPQ1_HP_B, _PIPEDMC_FPQ2_HP_B, \ + _PIPEDMC_FPQ3_HP_B, _PIPEDMC_FPQ4_HP_B) + +#define _PIPEDMC_FPQ1_TP_A 0x5f12c +#define _PIPEDMC_FPQ2_TP_A 0x5f13c +#define _PIPEDMC_FPQ3_TP_A 0x5f16c +#define _PIPEDMC_FPQ4_TP_A 0x5f178 +#define _PIPEDMC_FPQ5_TP_A 0x5f184 +#define _PIPEDMC_FPQ1_TP_B 0x5f52c +#define _PIPEDMC_FPQ2_TP_B 0x5f53c +#define _PIPEDMC_FPQ3_TP_B 0x5f56c +#define _PIPEDMC_FPQ4_TP_B 0x5f578 +#define _PIPEDMC_FPQ5_TP_B 0x5f584 +#define PIPEDMC_FPQ_TP(pipe, fq_id) _MMIO_PIPEDMC_FPQ((pipe), (fq_id), \ + _PIPEDMC_FPQ1_TP_A, _PIPEDMC_FPQ2_TP_A, \ + _PIPEDMC_FPQ3_TP_A, _PIPEDMC_FPQ4_TP_A, \ + _PIPEDMC_FPQ1_TP_B, _PIPEDMC_FPQ2_TP_B, \ + _PIPEDMC_FPQ3_TP_B, _PIPEDMC_FPQ4_TP_B) + +#define _PIPEDMC_FPQ1_CHP_A 0x5f130 +#define _PIPEDMC_FPQ2_CHP_A 0x5f140 +#define _PIPEDMC_FPQ3_CHP_A 0x5f170 +#define _PIPEDMC_FPQ4_CHP_A 0x5f17c +#define _PIPEDMC_FPQ5_CHP_A 0x5f188 +#define _PIPEDMC_FPQ1_CHP_B 0x5f530 +#define _PIPEDMC_FPQ2_CHP_B 0x5f540 +#define _PIPEDMC_FPQ3_CHP_B 0x5f570 +#define _PIPEDMC_FPQ4_CHP_B 0x5f57c +#define _PIPEDMC_FPQ5_CHP_B 0x5f588 +#define PIPEDMC_FPQ_CHP(pipe, fq_id) _MMIO_PIPEDMC_FPQ((pipe), (fq_id), \ + _PIPEDMC_FPQ1_CHP_A, _PIPEDMC_FPQ2_CHP_A, \ + _PIPEDMC_FPQ3_CHP_A, _PIPEDMC_FPQ4_CHP_A, \ + _PIPEDMC_FPQ1_CHP_B, _PIPEDMC_FPQ2_CHP_B, \ + _PIPEDMC_FPQ3_CHP_B, _PIPEDMC_FPQ4_CHP_B) + +#define _PIPEDMC_FPQ_TS_A 0x5f134 +#define _PIPEDMC_FPQ_TS_B 0x5f534 +#define PIPEDMC_FPQ_TS(pipe) _MMIO_PIPE((pipe), _PIPEDMC_FPQ_TS_A, _PIPEDMC_FPQ_TS_B) + +#define _PIPEDMC_SCANLINE_RO_A 0x5f144 +#define _PIPEDMC_SCANLINE_RO_B 0x5f544 +#define PIPEDMC_SCANLINE_RO(pipe) _MMIO_PIPE((pipe), _PIPEDMC_SCANLINE_RO_A, _PIPEDMC_SCANLINE_RO_B) + +#define _PIPEDMC_FPQ_CTL1_A 0x5f160 +#define _PIPEDMC_FPQ_CTL1_B 0x5f560 +#define PIPEDMC_FPQ_CTL1(pipe) _MMIO_PIPE((pipe), _PIPEDMC_FPQ_CTL1_A, _PIPEDMC_FPQ_CTL1_B) +#define PIPEDMC_SW_DMC_WAKE REG_BIT(0) + +#define _PIPEDMC_FPQ_CTL2_A 0x5f164 +#define _PIPEDMC_FPQ_CTL2_B 0x5f564 +#define PIPEDMC_FPQ_CTL2(pipe) _MMIO_PIPE((pipe), _PIPEDMC_FPQ_CTL2_A, _PIPEDMC_FPQ_CTL2_B) +#define PIPEDMC_DMC_INT_AT_DELAYED_VBLANK REG_BIT(1) +#define PIPEDMC_W1_DMC_WAKE REG_BIT(0) + #define _PIPEDMC_INTERRUPT_A 0x5f190 /* lnl+ */ #define _PIPEDMC_INTERRUPT_B 0x5f590 /* lnl+ */ #define PIPEDMC_INTERRUPT(pipe) _MMIO_PIPE((pipe), _PIPEDMC_INTERRUPT_A, _PIPEDMC_INTERRUPT_B) @@ -394,4 +537,51 @@ enum pipedmc_event_id { #define DMC_WAKELOCK_CTL_REQ REG_BIT(31) #define DMC_WAKELOCK_CTL_ACK REG_BIT(15) +#define DMC_FQ_W2_PTS_CFG_SEL _MMIO(0x8f240) +#define PIPE_D_DMC_W2_PTS_CONFIG_SELECT_MASK REG_GENMASK(26, 24) +#define PIPE_D_DMC_W2_PTS_CONFIG_SELECT(pipe) REG_FIELD_PREP(PIPE_D_DMC_W2_PTS_CONFIG_SELECT_MASK, (pipe)) +#define PIPE_C_DMC_W2_PTS_CONFIG_SELECT_MASK REG_GENMASK(18, 16) +#define PIPE_C_DMC_W2_PTS_CONFIG_SELECT(pipe) REG_FIELD_PREP(PIPE_C_DMC_W2_PTS_CONFIG_SELECT_MASK, (pipe)) +#define PIPE_B_DMC_W2_PTS_CONFIG_SELECT_MASK REG_GENMASK(10, 8) +#define PIPE_B_DMC_W2_PTS_CONFIG_SELECT(pipe) REG_FIELD_PREP(PIPE_B_DMC_W2_PTS_CONFIG_SELECT_MASK, (pipe)) +#define PIPE_A_DMC_W2_PTS_CONFIG_SELECT_MASK REG_GENMASK(2, 0) +#define PIPE_A_DMC_W2_PTS_CONFIG_SELECT(pipe) REG_FIELD_PREP(PIPE_A_DMC_W2_PTS_CONFIG_SELECT_MASK, (pipe)) + +/* plane/general flip queue entries */ +#define PIPEDMC_FQ_RAM(start_mmioaddr, i) _MMIO((start_mmioaddr) + (i) * 4) +/* LNL */ +/* DW0 pts */ +/* DW1 head */ +/* DW2 size/etc. */ +#define LNL_FQ_INTERRUPT REG_BIT(31) +#define LNL_FQ_DSB_ID_MASK REG_GENMASK(30, 29) +#define LNL_FQ_DSB_ID(dsb_id) REG_FIELD_PREP(LNL_FQ_DSB_ID_MASK, (dsb_id)) +#define LNL_FQ_EXECUTED REG_BIT(28) +#define LNL_FQ_DSB_SIZE_MASK REG_GENMASK(15, 0) +#define LNL_FQ_DSB_SIZE(size) REG_FIELD_PREP(LNL_FQ_DSB_SIZE_MASK, (size)) +/* DW3 reserved (plane queues) */ +/* DW3 second DSB head (general queue) */ +/* DW4 second DSB size/etc. (general queue) */ +/* DW5 reserved (general queue) */ + +/* PTL+ */ +/* DW0 pts */ +/* DW1 reserved */ +/* DW2 size/etc. */ +#define PTL_FQ_INTERRUPT REG_BIT(31) +#define PTL_FQ_NEED_PUSH REG_BIT(30) +#define PTL_FQ_BLOCK_PUSH REG_BIT(29) +#define PTL_FQ_EXECUTED REG_BIT(28) +#define PTL_FQ_DSB_ID_MASK REG_GENMASK(25, 24) +#define PTL_FQ_DSB_ID(dsb_id) REG_FIELD_PREP(PTL_FQ_DSB_ID_MASK, (dsb_id)) +#define PTL_FQ_DSB_SIZE_MASK REG_GENMASK(15, 0) +#define PTL_FQ_DSB_SIZE(size) REG_FIELD_PREP(PTL_FQ_DSB_SIZE_MASK, (size)) +/* DW3 head */ +/* DW4 second DSB size/etc. (general queue) */ +/* DW5 second DSB head (general queue) */ + +/* undocumented magic DMC variables */ +#define PTL_PIPEDMC_EXEC_TIME_LINES(start_mmioaddr) _MMIO((start_mmioaddr) + 0x6b8) +#define PTL_PIPEDMC_END_OF_EXEC_GB(start_mmioaddr) _MMIO((start_mmioaddr) + 0x6c0) + #endif /* __INTEL_DMC_REGS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dmc_wl.c b/drivers/gpu/drm/i915/display/intel_dmc_wl.c index 44b3ee5c9be4..b3bb89ba34f9 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc_wl.c +++ b/drivers/gpu/drm/i915/display/intel_dmc_wl.c @@ -7,7 +7,6 @@ #include <drm/drm_print.h> -#include "i915_drv.h" #include "intel_de.h" #include "intel_display_regs.h" #include "intel_dmc_regs.h" @@ -155,12 +154,11 @@ static const struct intel_dmc_wl_range xe3lpd_dc3co_dmc_ranges[] = { static void __intel_dmc_wl_release(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); struct intel_dmc_wl *wl = &display->wl; WARN_ON(refcount_read(&wl->refcount)); - queue_delayed_work(i915->unordered_wq, &wl->work, + queue_delayed_work(display->wq.unordered, &wl->work, msecs_to_jiffies(DMC_WAKELOCK_HOLD_TIME)); } diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 277b40b13948..7976fec88606 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -1606,6 +1606,12 @@ int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, u8 *link_bw, u8 *rate_select) { + struct intel_display *display = to_intel_display(intel_dp); + + /* FIXME g4x can't generate an exact 2.7GHz with the 96MHz non-SSC refclk */ + if (display->platform.g4x && port_clock == 268800) + port_clock = 270000; + /* eDP 1.4 rate select method. */ if (intel_dp->use_rate_select) { *link_bw = 0; @@ -3726,6 +3732,9 @@ static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp) memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd)); + if (!drm_dp_is_branch(intel_dp->dpcd)) + return; + if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER, intel_dp->pcon_dsc_dpcd, sizeof(intel_dp->pcon_dsc_dpcd)) < 0) diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index 5537136c367a..41228478b21c 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -475,31 +475,6 @@ static u32 intel_dp_aux_vesa_get_backlight(struct intel_connector *connector, en return connector->panel.backlight.level; } -static int -intel_dp_aux_vesa_set_luminance(struct intel_connector *connector, u32 level) -{ - struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); - u8 buf[3]; - int ret; - - level = level * 1000; - level &= 0xffffff; - buf[0] = (level & 0x0000ff); - buf[1] = (level & 0x00ff00) >> 8; - buf[2] = (level & 0xff0000) >> 16; - - ret = drm_dp_dpcd_write(&intel_dp->aux, DP_EDP_PANEL_TARGET_LUMINANCE_VALUE, - buf, sizeof(buf)); - if (ret != sizeof(buf)) { - drm_err(intel_dp->aux.drm_dev, - "%s: Failed to set VESA Aux Luminance: %d\n", - intel_dp->aux.name, ret); - return -EINVAL; - } else { - return 0; - } -} - static void intel_dp_aux_vesa_set_backlight(const struct drm_connector_state *conn_state, u32 level) { @@ -507,11 +482,6 @@ intel_dp_aux_vesa_set_backlight(const struct drm_connector_state *conn_state, u3 struct intel_panel *panel = &connector->panel; struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); - if (panel->backlight.edp.vesa.luminance_control_support) { - if (!intel_dp_aux_vesa_set_luminance(connector, level)) - return; - } - if (!panel->backlight.edp.vesa.info.aux_set) { const u32 pwm_level = intel_backlight_level_to_pwm(connector, level); @@ -528,18 +498,6 @@ intel_dp_aux_vesa_enable_backlight(const struct intel_crtc_state *crtc_state, struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_panel *panel = &connector->panel; struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder); - int ret; - - if (panel->backlight.edp.vesa.luminance_control_support) { - ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, - DP_EDP_PANEL_LUMINANCE_CONTROL_ENABLE); - - if (ret == 1) - return; - - if (!intel_dp_aux_vesa_set_luminance(connector, level)) - return; - } if (!panel->backlight.edp.vesa.info.aux_enable) { u32 pwm_level; @@ -580,13 +538,41 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, &connector->base.display_info.luminance_range; struct intel_dp *intel_dp = intel_attached_dp(connector); struct intel_panel *panel = &connector->panel; - u16 current_level; + u32 current_level; u8 current_mode; int ret; - if (panel->backlight.edp.vesa.luminance_control_support) { + ret = drm_edp_backlight_init(&intel_dp->aux, &panel->backlight.edp.vesa.info, + luminance_range->max_luminance, + panel->vbt.backlight.pwm_freq_hz, + intel_dp->edp_dpcd, ¤t_level, ¤t_mode, + false); + if (ret < 0) + return ret; + + drm_dbg_kms(display->drm, + "[CONNECTOR:%d:%s] AUX VESA backlight enable is controlled through %s\n", + connector->base.base.id, connector->base.name, + dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_enable)); + drm_dbg_kms(display->drm, + "[CONNECTOR:%d:%s] AUX VESA backlight level is controlled through %s\n", + connector->base.base.id, connector->base.name, + dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_set)); + + if (!panel->backlight.edp.vesa.info.aux_set || + !panel->backlight.edp.vesa.info.aux_enable) { + ret = panel->backlight.pwm_funcs->setup(connector, pipe); + if (ret < 0) { + drm_err(display->drm, + "[CONNECTOR:%d:%s] Failed to setup PWM backlight controls for eDP backlight: %d\n", + connector->base.base.id, connector->base.name, ret); + return ret; + } + } + + if (panel->backlight.edp.vesa.info.luminance_set) { if (luminance_range->max_luminance) { - panel->backlight.max = luminance_range->max_luminance; + panel->backlight.max = panel->backlight.edp.vesa.info.max; panel->backlight.min = luminance_range->min_luminance; } else { panel->backlight.max = 512; @@ -597,54 +583,26 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] AUX VESA Nits backlight level is controlled through DPCD\n", connector->base.base.id, connector->base.name); - } else { - ret = drm_edp_backlight_init(&intel_dp->aux, &panel->backlight.edp.vesa.info, - panel->vbt.backlight.pwm_freq_hz, intel_dp->edp_dpcd, - ¤t_level, ¤t_mode); - if (ret < 0) - return ret; - - drm_dbg_kms(display->drm, - "[CONNECTOR:%d:%s] AUX VESA backlight enable is controlled through %s\n", - connector->base.base.id, connector->base.name, - dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_enable)); - drm_dbg_kms(display->drm, - "[CONNECTOR:%d:%s] AUX VESA backlight level is controlled through %s\n", - connector->base.base.id, connector->base.name, - dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_set)); - - if (!panel->backlight.edp.vesa.info.aux_set || - !panel->backlight.edp.vesa.info.aux_enable) { - ret = panel->backlight.pwm_funcs->setup(connector, pipe); - if (ret < 0) { - drm_err(display->drm, - "[CONNECTOR:%d:%s] Failed to setup PWM backlight controls for eDP backlight: %d\n", - connector->base.base.id, connector->base.name, ret); - return ret; - } + } else if (panel->backlight.edp.vesa.info.aux_set) { + panel->backlight.max = panel->backlight.edp.vesa.info.max; + panel->backlight.min = 0; + if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) { + panel->backlight.level = current_level; + panel->backlight.enabled = panel->backlight.level != 0; + } else { + panel->backlight.level = panel->backlight.max; + panel->backlight.enabled = false; } - - if (panel->backlight.edp.vesa.info.aux_set) { - panel->backlight.max = panel->backlight.edp.vesa.info.max; - panel->backlight.min = 0; - if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) { - panel->backlight.level = current_level; - panel->backlight.enabled = panel->backlight.level != 0; - } else { - panel->backlight.level = panel->backlight.max; - panel->backlight.enabled = false; - } + } else { + panel->backlight.max = panel->backlight.pwm_level_max; + panel->backlight.min = panel->backlight.pwm_level_min; + if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_PWM) { + panel->backlight.level = + panel->backlight.pwm_funcs->get(connector, pipe); + panel->backlight.enabled = panel->backlight.pwm_enabled; } else { - panel->backlight.max = panel->backlight.pwm_level_max; - panel->backlight.min = panel->backlight.pwm_level_min; - if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_PWM) { - panel->backlight.level = - panel->backlight.pwm_funcs->get(connector, pipe); - panel->backlight.enabled = panel->backlight.pwm_enabled; - } else { - panel->backlight.level = panel->backlight.max; - panel->backlight.enabled = false; - } + panel->backlight.level = panel->backlight.max; + panel->backlight.enabled = false; } } diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c index 7bd775fb65a0..bd757db85927 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c @@ -805,10 +805,16 @@ intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector, enum pipe pipe = (enum pipe)cpu_transcoder; enum port port = dig_port->base.port; int ret; + u32 val; + u8 stream_type; - drm_WARN_ON(display->drm, enable && - !!(intel_de_read(display, HDCP2_AUTH_STREAM(display, cpu_transcoder, port)) - & AUTH_STREAM_TYPE) != data->streams[0].stream_type); + if (DISPLAY_VER(display) < 30) { + val = intel_de_read(display, + HDCP2_AUTH_STREAM(display, cpu_transcoder, port)); + stream_type = REG_FIELD_GET(AUTH_STREAM_TYPE_MASK, val); + drm_WARN_ON(display->drm, enable && + stream_type != data->streams[0].stream_type); + } ret = intel_dp_mst_toggle_hdcp_stream_select(connector, enable); if (ret) @@ -824,6 +830,14 @@ intel_dp_mst_hdcp2_stream_encryption(struct intel_connector *connector, return -ETIMEDOUT; } + if (DISPLAY_VER(display) >= 30) { + val = intel_de_read(display, + HDCP2_STREAM_STATUS(display, cpu_transcoder, port)); + stream_type = REG_FIELD_GET(STREAM_TYPE_STATUS_MASK, val); + drm_WARN_ON(display->drm, enable && + stream_type != data->streams[0].stream_type); + } + return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c index 3fa94510458d..0fdb32ef241c 100644 --- a/drivers/gpu/drm/i915/display/intel_drrs.c +++ b/drivers/gpu/drm/i915/display/intel_drrs.c @@ -5,7 +5,8 @@ #include <linux/debugfs.h> -#include "i915_drv.h" +#include <drm/drm_print.h> + #include "intel_atomic.h" #include "intel_de.h" #include "intel_display_regs.h" @@ -123,9 +124,9 @@ static void intel_drrs_set_state(struct intel_crtc *crtc, static void intel_drrs_schedule_work(struct intel_crtc *crtc) { - struct drm_i915_private *i915 = to_i915(crtc->base.dev); + struct intel_display *display = to_intel_display(crtc); - mod_delayed_work(i915->unordered_wq, &crtc->drrs.work, msecs_to_jiffies(1000)); + mod_delayed_work(display->wq.unordered, &crtc->drrs.work, msecs_to_jiffies(1000)); } static unsigned int intel_drrs_frontbuffer_bits(const struct intel_crtc_state *crtc_state) diff --git a/drivers/gpu/drm/i915/display/intel_encoder.c b/drivers/gpu/drm/i915/display/intel_encoder.c index bad452ad979a..0b7bd26f4339 100644 --- a/drivers/gpu/drm/i915/display/intel_encoder.c +++ b/drivers/gpu/drm/i915/display/intel_encoder.c @@ -5,7 +5,6 @@ #include <linux/workqueue.h> -#include "i915_drv.h" #include "intel_display_core.h" #include "intel_display_types.h" #include "intel_encoder.h" @@ -32,9 +31,9 @@ void intel_encoder_link_check_flush_work(struct intel_encoder *encoder) void intel_encoder_link_check_queue_work(struct intel_encoder *encoder, int delay_ms) { - struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_display *display = to_intel_display(encoder); - mod_delayed_work(i915->unordered_wq, + mod_delayed_work(display->wq.unordered, &encoder->link_check_work, msecs_to_jiffies(delay_ms)); } diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c index a5906cb4900c..0da842bd2f2f 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fb.c @@ -11,7 +11,6 @@ #include <drm/drm_modeset_helper.h> #include "i915_drv.h" -#include "intel_atomic_plane.h" #include "intel_bo.h" #include "intel_display.h" #include "intel_display_core.h" @@ -20,6 +19,7 @@ #include "intel_fb.h" #include "intel_fb_bo.h" #include "intel_frontbuffer.h" +#include "intel_plane.h" #define check_array_bounds(display, a, i) drm_WARN_ON((display)->drm, (i) >= ARRAY_SIZE(a)) @@ -422,21 +422,22 @@ unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier) /** * intel_fb_get_format_info: Get a modifier specific format information - * @cmd: FB add command structure + * @pixel_format: pixel format + * @modifier: modifier * * Returns: - * Returns the format information for @cmd->pixel_format specific to @cmd->modifier[0], + * Returns the format information for @pixel_format specific to @modifier, * or %NULL if the modifier doesn't override the format. */ const struct drm_format_info * -intel_fb_get_format_info(const struct drm_mode_fb_cmd2 *cmd) +intel_fb_get_format_info(u32 pixel_format, u64 modifier) { - const struct intel_modifier_desc *md = lookup_modifier_or_null(cmd->modifier[0]); + const struct intel_modifier_desc *md = lookup_modifier_or_null(modifier); if (!md || !md->formats) return NULL; - return lookup_format_info(md->formats, md->format_count, cmd->pixel_format); + return lookup_format_info(md->formats, md->format_count, pixel_format); } static bool plane_caps_contain_any(u8 caps, u8 mask) @@ -1286,10 +1287,10 @@ bool intel_fb_needs_pot_stride_remap(const struct intel_framebuffer *fb) bool intel_plane_uses_fence(const struct intel_plane_state *plane_state) { + struct intel_display *display = to_intel_display(plane_state); struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - return DISPLAY_VER(dev_priv) < 4 || + return DISPLAY_VER(display) < 4 || (plane->fbc && !plane_state->no_fbc_reason && plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL); } @@ -2206,6 +2207,7 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = { int intel_framebuffer_init(struct intel_framebuffer *intel_fb, struct drm_gem_object *obj, + const struct drm_format_info *info, struct drm_mode_fb_cmd2 *mode_cmd) { struct intel_display *display = to_intel_display(obj->dev); @@ -2253,7 +2255,7 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb, goto err_frontbuffer_put; } - drm_helper_mode_fill_fb_struct(display->drm, fb, mode_cmd); + drm_helper_mode_fill_fb_struct(display->drm, fb, info, mode_cmd); for (i = 0; i < fb->format->num_planes; i++) { unsigned int stride_alignment; @@ -2323,6 +2325,7 @@ err: struct drm_framebuffer * intel_user_framebuffer_create(struct drm_device *dev, struct drm_file *filp, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *user_mode_cmd) { struct drm_framebuffer *fb; @@ -2333,7 +2336,7 @@ intel_user_framebuffer_create(struct drm_device *dev, if (IS_ERR(obj)) return ERR_CAST(obj); - fb = intel_framebuffer_create(obj, &mode_cmd); + fb = intel_framebuffer_create(obj, info, &mode_cmd); drm_gem_object_put(obj); return fb; @@ -2341,16 +2344,17 @@ intel_user_framebuffer_create(struct drm_device *dev, struct drm_framebuffer * intel_framebuffer_create(struct drm_gem_object *obj, + const struct drm_format_info *info, struct drm_mode_fb_cmd2 *mode_cmd) { struct intel_framebuffer *intel_fb; int ret; - intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); + intel_fb = intel_bo_alloc_framebuffer(); if (!intel_fb) return ERR_PTR(-ENOMEM); - ret = intel_framebuffer_init(intel_fb, obj, mode_cmd); + ret = intel_framebuffer_init(intel_fb, obj, info, mode_cmd); if (ret) goto err; diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h index bdd76b372957..403b8b63721a 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.h +++ b/drivers/gpu/drm/i915/display/intel_fb.h @@ -47,7 +47,7 @@ u64 *intel_fb_plane_get_modifiers(struct intel_display *display, bool intel_fb_plane_supports_modifier(struct intel_plane *plane, u64 modifier); const struct drm_format_info * -intel_fb_get_format_info(const struct drm_mode_fb_cmd2 *cmd); +intel_fb_get_format_info(u32 pixel_format, u64 modifier); bool intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, @@ -102,13 +102,16 @@ void intel_add_fb_offsets(int *x, int *y, int intel_framebuffer_init(struct intel_framebuffer *ifb, struct drm_gem_object *obj, + const struct drm_format_info *info, struct drm_mode_fb_cmd2 *mode_cmd); struct drm_framebuffer * intel_framebuffer_create(struct drm_gem_object *obj, + const struct drm_format_info *info, struct drm_mode_fb_cmd2 *mode_cmd); struct drm_framebuffer * intel_user_framebuffer_create(struct drm_device *dev, struct drm_file *filp, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *user_mode_cmd); bool intel_fb_modifier_uses_dpt(struct intel_display *display, u64 modifier); diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c index 98a61a7b0b93..5a0151775a3a 100644 --- a/drivers/gpu/drm/i915/display/intel_fb_pin.c +++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c @@ -11,13 +11,13 @@ #include "gem/i915_gem_object.h" #include "i915_drv.h" -#include "intel_atomic_plane.h" #include "intel_display_core.h" #include "intel_display_rpm.h" #include "intel_display_types.h" #include "intel_dpt.h" #include "intel_fb.h" #include "intel_fb_pin.h" +#include "intel_plane.h" static struct i915_vma * intel_fb_pin_to_dpt(const struct drm_framebuffer *fb, @@ -334,3 +334,8 @@ void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state) intel_dpt_unpin_from_ggtt(fb->dpt_vm); } } + +void intel_fb_get_map(struct i915_vma *vma, struct iosys_map *map) +{ + iosys_map_set_vaddr_iomem(map, i915_vma_get_iomap(vma)); +} diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.h b/drivers/gpu/drm/i915/display/intel_fb_pin.h index 01770dbba2e0..81ab79da1af7 100644 --- a/drivers/gpu/drm/i915/display/intel_fb_pin.h +++ b/drivers/gpu/drm/i915/display/intel_fb_pin.h @@ -12,6 +12,7 @@ struct drm_framebuffer; struct i915_vma; struct intel_plane_state; struct i915_gtt_view; +struct iosys_map; struct i915_vma * intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb, @@ -27,5 +28,6 @@ void intel_fb_unpin_vma(struct i915_vma *vma, unsigned long flags); int intel_plane_pin_fb(struct intel_plane_state *new_plane_state, const struct intel_plane_state *old_plane_state); void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state); +void intel_fb_get_map(struct i915_vma *vma, struct iosys_map *map); #endif diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index ec1ef8694c35..685ac98bd001 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -552,10 +552,6 @@ static void ilk_fbc_deactivate(struct intel_fbc *fbc) if (dpfc_ctl & DPFC_CTL_EN) { dpfc_ctl &= ~DPFC_CTL_EN; intel_de_write(display, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl); - - /* wa_18038517565 Enable DPFC clock gating after FBC disable */ - if (display->platform.dg2 || DISPLAY_VER(display) >= 14) - fbc_compressor_clkgate_disable_wa(fbc, false); } } @@ -1576,7 +1572,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, if (IS_ERR(cdclk_state)) return PTR_ERR(cdclk_state); - if (crtc_state->pixel_rate >= cdclk_state->logical.cdclk * 95 / 100) { + if (crtc_state->pixel_rate >= intel_cdclk_logical(cdclk_state) * 95 / 100) { plane_state->no_fbc_reason = "pixel rate too high"; return 0; } @@ -1710,6 +1706,10 @@ static void __intel_fbc_disable(struct intel_fbc *fbc) __intel_fbc_cleanup_cfb(fbc); + /* wa_18038517565 Enable DPFC clock gating after FBC disable */ + if (display->platform.dg2 || DISPLAY_VER(display) >= 14) + fbc_compressor_clkgate_disable_wa(fbc, false); + fbc->state.plane = NULL; fbc->flip_pending = false; fbc->busy_bits = 0; @@ -2011,7 +2011,7 @@ void intel_fbc_reset_underrun(struct intel_display *display) static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc) { - struct drm_i915_private *i915 = to_i915(fbc->display->drm); + struct intel_display *display = fbc->display; /* * There's no guarantee that underrun_detected won't be set to true @@ -2024,7 +2024,7 @@ static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc) if (READ_ONCE(fbc->underrun_detected)) return; - queue_work(i915->unordered_wq, &fbc->underrun_work); + queue_work(display->wq.unordered, &fbc->underrun_work); } /** diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index 2dc4029d71ed..7c4709d58aa3 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -512,3 +512,8 @@ struct i915_vma *intel_fbdev_vma_pointer(struct intel_fbdev *fbdev) { return fbdev ? fbdev->vma : NULL; } + +void intel_fbdev_get_map(struct intel_fbdev *fbdev, struct iosys_map *map) +{ + intel_fb_get_map(fbdev->vma, map); +} diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.h b/drivers/gpu/drm/i915/display/intel_fbdev.h index a15e3e222a0c..150cc5f45bb3 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.h +++ b/drivers/gpu/drm/i915/display/intel_fbdev.h @@ -13,6 +13,7 @@ struct drm_fb_helper_surface_size; struct intel_display; struct intel_fbdev; struct intel_framebuffer; +struct iosys_map; #ifdef CONFIG_DRM_FBDEV_EMULATION int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, @@ -22,7 +23,7 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, void intel_fbdev_setup(struct intel_display *display); struct intel_framebuffer *intel_fbdev_framebuffer(struct intel_fbdev *fbdev); struct i915_vma *intel_fbdev_vma_pointer(struct intel_fbdev *fbdev); - +void intel_fbdev_get_map(struct intel_fbdev *fbdev, struct iosys_map *map); #else #define INTEL_FBDEV_DRIVER_OPS \ .fbdev_probe = NULL @@ -39,6 +40,9 @@ static inline struct i915_vma *intel_fbdev_vma_pointer(struct intel_fbdev *fbdev return NULL; } +static inline void intel_fbdev_get_map(struct intel_fbdev *fbdev, struct iosys_map *map) +{ +} #endif #endif /* __INTEL_FBDEV_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c index 8db3af36b2f2..210aee9ae88b 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c @@ -62,7 +62,11 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, return ERR_PTR(-ENOMEM); } - fb = intel_framebuffer_create(intel_bo_to_drm_bo(obj), &mode_cmd); + fb = intel_framebuffer_create(intel_bo_to_drm_bo(obj), + drm_get_format_info(display->drm, + mode_cmd.pixel_format, + mode_cmd.modifier[0]), + &mode_cmd); i915_gem_object_put(obj); return to_intel_framebuffer(fb); diff --git a/drivers/gpu/drm/i915/display/intel_flipq.c b/drivers/gpu/drm/i915/display/intel_flipq.c new file mode 100644 index 000000000000..6ab2272ab2df --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_flipq.c @@ -0,0 +1,472 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2025 Intel Corporation + */ + +#include <linux/pci.h> + +#include <drm/drm_print.h> + +#include "i915_utils.h" +#include "intel_step.h" +#include "intel_crtc.h" +#include "intel_de.h" +#include "intel_display_core.h" +#include "intel_display_types.h" +#include "intel_flipq.h" +#include "intel_dmc.h" +#include "intel_dmc_regs.h" +#include "intel_dsb.h" +#include "intel_vblank.h" +#include "intel_vrr.h" + +/** + * DOC: DMC Flip Queue + * + * A flip queue is a ring buffer implemented by the pipe DMC firmware. + * The driver inserts entries into the queues to be executed by the + * pipe DMC at a specified presentation timestamp (PTS). + * + * Each pipe DMC provides several queues: + * + * - 1 general queue (two DSB buffers executed per entry) + * - 3 plane queues (one DSB buffer executed per entry) + * - 1 fast queue (deprecated) + */ + +#define for_each_flipq(flipq_id) \ + for ((flipq_id) = INTEL_FLIPQ_PLANE_1; (flipq_id) < MAX_INTEL_FLIPQ; (flipq_id)++) + +static int intel_flipq_offset(enum intel_flipq_id flipq_id) +{ + switch (flipq_id) { + case INTEL_FLIPQ_PLANE_1: + return 0x008; + case INTEL_FLIPQ_PLANE_2: + return 0x108; + case INTEL_FLIPQ_PLANE_3: + return 0x208; + case INTEL_FLIPQ_GENERAL: + return 0x308; + case INTEL_FLIPQ_FAST: + return 0x3c8; + default: + MISSING_CASE(flipq_id); + return 0; + } +} + +static int intel_flipq_size_dw(enum intel_flipq_id flipq_id) +{ + switch (flipq_id) { + case INTEL_FLIPQ_PLANE_1: + case INTEL_FLIPQ_PLANE_2: + case INTEL_FLIPQ_PLANE_3: + return 64; + case INTEL_FLIPQ_GENERAL: + case INTEL_FLIPQ_FAST: + return 48; + default: + MISSING_CASE(flipq_id); + return 1; + } +} + +static int intel_flipq_elem_size_dw(enum intel_flipq_id flipq_id) +{ + switch (flipq_id) { + case INTEL_FLIPQ_PLANE_1: + case INTEL_FLIPQ_PLANE_2: + case INTEL_FLIPQ_PLANE_3: + return 4; + case INTEL_FLIPQ_GENERAL: + case INTEL_FLIPQ_FAST: + return 6; + default: + MISSING_CASE(flipq_id); + return 1; + } +} + +static int intel_flipq_size_entries(enum intel_flipq_id flipq_id) +{ + return intel_flipq_size_dw(flipq_id) / intel_flipq_elem_size_dw(flipq_id); +} + +static void intel_flipq_crtc_init(struct intel_crtc *crtc) +{ + struct intel_display *display = to_intel_display(crtc); + enum intel_flipq_id flipq_id; + + for_each_flipq(flipq_id) { + struct intel_flipq *flipq = &crtc->flipq[flipq_id]; + + flipq->start_mmioaddr = intel_pipedmc_start_mmioaddr(crtc) + intel_flipq_offset(flipq_id); + flipq->flipq_id = flipq_id; + + drm_dbg_kms(display->drm, "[CRTC:%d:%s] FQ %d: start 0x%x\n", + crtc->base.base.id, crtc->base.name, + flipq_id, flipq->start_mmioaddr); + } +} + +bool intel_flipq_supported(struct intel_display *display) +{ + if (!display->params.enable_flipq) + return false; + + if (!display->dmc.dmc) + return false; + + if (DISPLAY_VER(display) == 20) + return true; + + /* DMC firmware expects VRR timing generator to be used */ + return DISPLAY_VER(display) >= 30 && intel_vrr_always_use_vrr_tg(display); +} + +void intel_flipq_init(struct intel_display *display) +{ + struct intel_crtc *crtc; + + intel_dmc_wait_fw_load(display); + + for_each_intel_crtc(display->drm, crtc) + intel_flipq_crtc_init(crtc); +} + +static int cdclk_factor(struct intel_display *display) +{ + if (DISPLAY_VER(display) >= 30) + return 120; + else + return 280; +} + +int intel_flipq_exec_time_us(struct intel_display *display) +{ + return intel_dsb_exec_time_us() + + DIV_ROUND_UP(display->cdclk.hw.cdclk * cdclk_factor(display), 540000) + + display->sagv.block_time_us; +} + +static int intel_flipq_preempt_timeout_ms(struct intel_display *display) +{ + return DIV_ROUND_UP(intel_flipq_exec_time_us(display), 1000); +} + +static void intel_flipq_preempt(struct intel_crtc *crtc, bool preempt) +{ + struct intel_display *display = to_intel_display(crtc); + + intel_de_rmw(display, PIPEDMC_FQ_CTRL(crtc->pipe), + PIPEDMC_FQ_CTRL_PREEMPT, preempt ? PIPEDMC_FQ_CTRL_PREEMPT : 0); + + if (preempt && + intel_de_wait_for_clear(display, + PIPEDMC_FQ_STATUS(crtc->pipe), + PIPEDMC_FQ_STATUS_BUSY, + intel_flipq_preempt_timeout_ms(display))) + drm_err(display->drm, "[CRTC:%d:%s] flip queue preempt timeout\n", + crtc->base.base.id, crtc->base.name); +} + +static int intel_flipq_current_head(struct intel_crtc *crtc, enum intel_flipq_id flipq_id) +{ + struct intel_display *display = to_intel_display(crtc); + + return intel_de_read(display, PIPEDMC_FPQ_CHP(crtc->pipe, flipq_id)); +} + +static void intel_flipq_write_tail(struct intel_crtc *crtc) +{ + struct intel_display *display = to_intel_display(crtc); + + intel_de_write(display, PIPEDMC_FPQ_ATOMIC_TP(crtc->pipe), + PIPEDMC_FPQ_PLANEQ_3_TP(crtc->flipq[INTEL_FLIPQ_PLANE_3].tail) | + PIPEDMC_FPQ_PLANEQ_2_TP(crtc->flipq[INTEL_FLIPQ_PLANE_2].tail) | + PIPEDMC_FPQ_PLANEQ_1_TP(crtc->flipq[INTEL_FLIPQ_PLANE_1].tail) | + PIPEDMC_FPQ_FASTQ_TP(crtc->flipq[INTEL_FLIPQ_FAST].tail) | + PIPEDMC_FPQ_GENERALQ_TP(crtc->flipq[INTEL_FLIPQ_GENERAL].tail)); +} + +static void intel_flipq_sw_dmc_wake(struct intel_crtc *crtc) +{ + struct intel_display *display = to_intel_display(crtc); + + intel_de_write(display, PIPEDMC_FPQ_CTL1(crtc->pipe), PIPEDMC_SW_DMC_WAKE); +} + +static int intel_flipq_exec_time_lines(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + + return intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, + intel_flipq_exec_time_us(display)); +} + +void intel_flipq_dump(struct intel_crtc *crtc, + enum intel_flipq_id flipq_id) +{ + struct intel_display *display = to_intel_display(crtc); + struct intel_flipq *flipq = &crtc->flipq[flipq_id]; + u32 tmp; + + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] FQ %d @ 0x%x: ", + crtc->base.base.id, crtc->base.name, flipq_id, + flipq->start_mmioaddr); + for (int i = 0 ; i < intel_flipq_size_dw(flipq_id); i++) { + printk(KERN_CONT " 0x%08x", + intel_de_read(display, PIPEDMC_FQ_RAM(flipq->start_mmioaddr, i))); + if (i % intel_flipq_elem_size_dw(flipq_id) == intel_flipq_elem_size_dw(flipq_id) - 1) + printk(KERN_CONT "\n"); + } + + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] FQ %d: chp=0x%x, hp=0x%x\n", + crtc->base.base.id, crtc->base.name, flipq_id, + intel_de_read(display, PIPEDMC_FPQ_CHP(crtc->pipe, flipq_id)), + intel_de_read(display, PIPEDMC_FPQ_HP(crtc->pipe, flipq_id))); + + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] FQ %d: current head %d\n", + crtc->base.base.id, crtc->base.name, flipq_id, + intel_flipq_current_head(crtc, flipq_id)); + + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] flip queue timestamp: 0x%x\n", + crtc->base.base.id, crtc->base.name, + intel_de_read(display, PIPEDMC_FPQ_TS(crtc->pipe))); + + tmp = intel_de_read(display, PIPEDMC_FPQ_ATOMIC_TP(crtc->pipe)); + + drm_dbg_kms(display->drm, + "[CRTC:%d:%s] flip queue atomic tails: P3 %d, P2 %d, P1 %d, G %d, F %d\n", + crtc->base.base.id, crtc->base.name, + REG_FIELD_GET(PIPEDMC_FPQ_PLANEQ_3_TP_MASK, tmp), + REG_FIELD_GET(PIPEDMC_FPQ_PLANEQ_2_TP_MASK, tmp), + REG_FIELD_GET(PIPEDMC_FPQ_PLANEQ_1_TP_MASK, tmp), + REG_FIELD_GET(PIPEDMC_FPQ_GENERALQ_TP_MASK, tmp), + REG_FIELD_GET(PIPEDMC_FPQ_FASTQ_TP_MASK, tmp)); +} + +void intel_flipq_reset(struct intel_display *display, enum pipe pipe) +{ + struct intel_crtc *crtc = intel_crtc_for_pipe(display, pipe); + enum intel_flipq_id flipq_id; + + intel_de_write(display, PIPEDMC_FQ_CTRL(pipe), 0); + + intel_de_write(display, PIPEDMC_SCANLINECMPLOWER(pipe), 0); + intel_de_write(display, PIPEDMC_SCANLINECMPUPPER(pipe), 0); + + for_each_flipq(flipq_id) { + struct intel_flipq *flipq = &crtc->flipq[flipq_id]; + + intel_de_write(display, PIPEDMC_FPQ_HP(pipe, flipq_id), 0); + intel_de_write(display, PIPEDMC_FPQ_CHP(pipe, flipq_id), 0); + + flipq->tail = 0; + } + + intel_de_write(display, PIPEDMC_FPQ_ATOMIC_TP(pipe), 0); +} + +static enum pipedmc_event_id flipq_event_id(struct intel_display *display) +{ + if (DISPLAY_VER(display) >= 30) + return PIPEDMC_EVENT_FULL_FQ_WAKE_TRIGGER; + else + return PIPEDMC_EVENT_SCANLINE_INRANGE_FQ_TRIGGER; +} + +void intel_flipq_enable(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + /* FIXME what to do with VRR? */ + int scanline = intel_mode_vblank_start(&crtc_state->hw.adjusted_mode) - + intel_flipq_exec_time_lines(crtc_state); + + if (DISPLAY_VER(display) >= 30) { + u32 start_mmioaddr = intel_pipedmc_start_mmioaddr(crtc); + + /* undocumented magic DMC variables */ + intel_de_write(display, PTL_PIPEDMC_EXEC_TIME_LINES(start_mmioaddr), + intel_flipq_exec_time_lines(crtc_state)); + intel_de_write(display, PTL_PIPEDMC_END_OF_EXEC_GB(start_mmioaddr), + 100); + } + + intel_de_write(display, PIPEDMC_SCANLINECMPUPPER(crtc->pipe), + PIPEDMC_SCANLINE_UPPER(scanline)); + intel_de_write(display, PIPEDMC_SCANLINECMPLOWER(crtc->pipe), + PIPEDMC_SCANLINEINRANGECMP_EN | + PIPEDMC_SCANLINE_LOWER(scanline - 2)); + + intel_pipedmc_enable_event(crtc, flipq_event_id(display)); + + intel_de_write(display, PIPEDMC_FQ_CTRL(crtc->pipe), PIPEDMC_FQ_CTRL_ENABLE); +} + +void intel_flipq_disable(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + intel_flipq_preempt(crtc, true); + + intel_de_write(display, PIPEDMC_FQ_CTRL(crtc->pipe), 0); + + intel_pipedmc_disable_event(crtc, flipq_event_id(display)); + + intel_de_write(display, PIPEDMC_SCANLINECMPLOWER(crtc->pipe), 0); + intel_de_write(display, PIPEDMC_SCANLINECMPUPPER(crtc->pipe), 0); +} + +static bool assert_flipq_has_room(struct intel_crtc *crtc, + enum intel_flipq_id flipq_id) +{ + struct intel_display *display = to_intel_display(crtc); + struct intel_flipq *flipq = &crtc->flipq[flipq_id]; + int head, size = intel_flipq_size_entries(flipq_id); + + head = intel_flipq_current_head(crtc, flipq_id); + + return !drm_WARN(display->drm, + (flipq->tail + size - head) % size >= size - 1, + "[CRTC:%d:%s] FQ %d overflow (head %d, tail %d, size %d)\n", + crtc->base.base.id, crtc->base.name, flipq_id, + head, flipq->tail, size); +} + +static void intel_flipq_write(struct intel_display *display, + struct intel_flipq *flipq, u32 data, int i) +{ + intel_de_write(display, PIPEDMC_FQ_RAM(flipq->start_mmioaddr, flipq->tail * + intel_flipq_elem_size_dw(flipq->flipq_id) + i), data); +} + +static void lnl_flipq_add(struct intel_display *display, + struct intel_flipq *flipq, + unsigned int pts, + enum intel_dsb_id dsb_id, + struct intel_dsb *dsb) +{ + int i = 0; + + switch (flipq->flipq_id) { + case INTEL_FLIPQ_GENERAL: + intel_flipq_write(display, flipq, pts, i++); + intel_flipq_write(display, flipq, intel_dsb_head(dsb), i++); + intel_flipq_write(display, flipq, LNL_FQ_INTERRUPT | + LNL_FQ_DSB_ID(dsb_id) | + LNL_FQ_DSB_SIZE(intel_dsb_size(dsb) / 64), i++); + intel_flipq_write(display, flipq, 0, i++); + intel_flipq_write(display, flipq, 0, i++); /* head for second DSB */ + intel_flipq_write(display, flipq, 0, i++); /* DSB engine + size for second DSB */ + break; + case INTEL_FLIPQ_PLANE_1: + case INTEL_FLIPQ_PLANE_2: + case INTEL_FLIPQ_PLANE_3: + intel_flipq_write(display, flipq, pts, i++); + intel_flipq_write(display, flipq, intel_dsb_head(dsb), i++); + intel_flipq_write(display, flipq, LNL_FQ_INTERRUPT | + LNL_FQ_DSB_ID(dsb_id) | + LNL_FQ_DSB_SIZE(intel_dsb_size(dsb) / 64), i++); + intel_flipq_write(display, flipq, 0, i++); + break; + default: + MISSING_CASE(flipq->flipq_id); + return; + } +} + +static void ptl_flipq_add(struct intel_display *display, + struct intel_flipq *flipq, + unsigned int pts, + enum intel_dsb_id dsb_id, + struct intel_dsb *dsb) +{ + int i = 0; + + switch (flipq->flipq_id) { + case INTEL_FLIPQ_GENERAL: + intel_flipq_write(display, flipq, pts, i++); + intel_flipq_write(display, flipq, 0, i++); + intel_flipq_write(display, flipq, PTL_FQ_INTERRUPT | + PTL_FQ_DSB_ID(dsb_id) | + PTL_FQ_DSB_SIZE(intel_dsb_size(dsb) / 64), i++); + intel_flipq_write(display, flipq, intel_dsb_head(dsb), i++); + intel_flipq_write(display, flipq, 0, i++); /* DSB engine + size for second DSB */ + intel_flipq_write(display, flipq, 0, i++); /* head for second DSB */ + break; + case INTEL_FLIPQ_PLANE_1: + case INTEL_FLIPQ_PLANE_2: + case INTEL_FLIPQ_PLANE_3: + intel_flipq_write(display, flipq, pts, i++); + intel_flipq_write(display, flipq, 0, i++); + intel_flipq_write(display, flipq, PTL_FQ_INTERRUPT | + PTL_FQ_DSB_ID(dsb_id) | + PTL_FQ_DSB_SIZE(intel_dsb_size(dsb) / 64), i++); + intel_flipq_write(display, flipq, intel_dsb_head(dsb), i++); + break; + default: + MISSING_CASE(flipq->flipq_id); + return; + } +} + +void intel_flipq_add(struct intel_crtc *crtc, + enum intel_flipq_id flipq_id, + unsigned int pts, + enum intel_dsb_id dsb_id, + struct intel_dsb *dsb) +{ + struct intel_display *display = to_intel_display(crtc); + struct intel_flipq *flipq = &crtc->flipq[flipq_id]; + + if (!assert_flipq_has_room(crtc, flipq_id)) + return; + + pts += intel_de_read(display, PIPEDMC_FPQ_TS(crtc->pipe)); + + intel_flipq_preempt(crtc, true); + + if (DISPLAY_VER(display) >= 30) + ptl_flipq_add(display, flipq, pts, dsb_id, dsb); + else + lnl_flipq_add(display, flipq, pts, dsb_id, dsb); + + flipq->tail = (flipq->tail + 1) % intel_flipq_size_entries(flipq->flipq_id); + intel_flipq_write_tail(crtc); + + intel_flipq_preempt(crtc, false); + + intel_flipq_sw_dmc_wake(crtc); +} + +/* Wa_18034343758 */ +static bool need_dmc_halt_wa(struct intel_display *display) +{ + return DISPLAY_VER(display) == 20 || + (display->platform.pantherlake && + IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)); +} + +void intel_flipq_wait_dmc_halt(struct intel_dsb *dsb, struct intel_crtc *crtc) +{ + struct intel_display *display = to_intel_display(crtc); + + if (need_dmc_halt_wa(display)) + intel_dsb_wait_usec(dsb, 2); +} + +void intel_flipq_unhalt_dmc(struct intel_dsb *dsb, struct intel_crtc *crtc) +{ + struct intel_display *display = to_intel_display(crtc); + + if (need_dmc_halt_wa(display)) + intel_dsb_reg_write(dsb, PIPEDMC_CTL(crtc->pipe), 0); +} diff --git a/drivers/gpu/drm/i915/display/intel_flipq.h b/drivers/gpu/drm/i915/display/intel_flipq.h new file mode 100644 index 000000000000..012e3e9a6bcb --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_flipq.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef __INTEL_FLIPQ_H__ +#define __INTEL_FLIPQ_H__ + +#include <linux/types.h> + +enum intel_dsb_id; +enum intel_flipq_id; +enum pipe; +struct intel_crtc; +struct intel_crtc_state; +struct intel_display; +struct intel_dsb; + +bool intel_flipq_supported(struct intel_display *display); +void intel_flipq_init(struct intel_display *display); +void intel_flipq_reset(struct intel_display *display, enum pipe pipe); + +void intel_flipq_enable(const struct intel_crtc_state *crtc_state); +void intel_flipq_disable(const struct intel_crtc_state *old_crtc_state); + +void intel_flipq_add(struct intel_crtc *crtc, + enum intel_flipq_id flip_queue_id, + unsigned int pts, + enum intel_dsb_id dsb_id, + struct intel_dsb *dsb); +int intel_flipq_exec_time_us(struct intel_display *display); +void intel_flipq_wait_dmc_halt(struct intel_dsb *dsb, struct intel_crtc *crtc); +void intel_flipq_unhalt_dmc(struct intel_dsb *dsb, struct intel_crtc *crtc); +void intel_flipq_dump(struct intel_crtc *crtc, + enum intel_flipq_id flip_queue_id); + +#endif /* __INTEL_FLIPQ_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 5235e4162555..42202c8bb066 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -14,10 +14,11 @@ #include <linux/random.h> #include <drm/display/drm_hdcp_helper.h> +#include <drm/drm_print.h> #include <drm/intel/i915_component.h> -#include "i915_drv.h" #include "i915_reg.h" +#include "i915_utils.h" #include "intel_connector.h" #include "intel_de.h" #include "intel_display_power.h" @@ -32,6 +33,7 @@ #include "intel_hdcp_regs.h" #include "intel_hdcp_shim.h" #include "intel_pcode.h" +#include "intel_step.h" #define USE_HDCP_GSC(__display) (DISPLAY_VER(__display) >= 14) @@ -374,7 +376,6 @@ static void intel_hdcp_clear_keys(struct intel_display *display) static int intel_hdcp_load_keys(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); int ret; u32 val; @@ -399,7 +400,7 @@ static int intel_hdcp_load_keys(struct intel_display *display) * Mailbox interface. */ if (DISPLAY_VER(display) == 9 && !display->platform.broxton) { - ret = snb_pcode_write(&i915->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1); + ret = intel_pcode_write(display->drm, SKL_PCODE_LOAD_HDCP_KEYS, 1); if (ret) { drm_err(display->drm, "Failed to initiate HDCP key load (%d)\n", @@ -1089,7 +1090,6 @@ static void intel_hdcp_update_value(struct intel_connector *connector, u64 value, bool update_property) { struct intel_display *display = to_intel_display(connector); - struct drm_i915_private *i915 = to_i915(display->drm); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); struct intel_hdcp *hdcp = &connector->hdcp; @@ -1110,7 +1110,7 @@ static void intel_hdcp_update_value(struct intel_connector *connector, hdcp->value = value; if (update_property) { drm_connector_get(&connector->base); - if (!queue_work(i915->unordered_wq, &hdcp->prop_work)) + if (!queue_work(display->wq.unordered, &hdcp->prop_work)) drm_connector_put(&connector->base); } } @@ -2237,16 +2237,15 @@ static void intel_hdcp_check_work(struct work_struct *work) check_work); struct intel_connector *connector = intel_hdcp_to_connector(hdcp); struct intel_display *display = to_intel_display(connector); - struct drm_i915_private *i915 = to_i915(display->drm); if (drm_connector_is_unregistered(&connector->base)) return; if (!intel_hdcp2_check_link(connector)) - queue_delayed_work(i915->unordered_wq, &hdcp->check_work, + queue_delayed_work(display->wq.unordered, &hdcp->check_work, DRM_HDCP2_CHECK_PERIOD_MS); else if (!intel_hdcp_check_link(connector)) - queue_delayed_work(i915->unordered_wq, &hdcp->check_work, + queue_delayed_work(display->wq.unordered, &hdcp->check_work, DRM_HDCP_CHECK_PERIOD_MS); } @@ -2437,7 +2436,6 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state, const struct drm_connector_state *conn_state) { struct intel_display *display = to_intel_display(encoder); - struct drm_i915_private *i915 = to_i915(display->drm); struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_digital_port *dig_port = intel_attached_dig_port(connector); @@ -2496,7 +2494,7 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state, } if (!ret) { - queue_delayed_work(i915->unordered_wq, &hdcp->check_work, + queue_delayed_work(display->wq.unordered, &hdcp->check_work, check_link_interval); intel_hdcp_update_value(connector, DRM_MODE_CONTENT_PROTECTION_ENABLED, @@ -2567,7 +2565,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state, to_intel_connector(conn_state->connector); struct intel_hdcp *hdcp = &connector->hdcp; bool content_protection_type_changed, desired_and_not_enabled = false; - struct drm_i915_private *i915 = to_i915(connector->base.dev); + struct intel_display *display = to_intel_display(connector); if (!connector->hdcp.shim) return; @@ -2594,7 +2592,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state, mutex_lock(&hdcp->mutex); hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; drm_connector_get(&connector->base); - if (!queue_work(i915->unordered_wq, &hdcp->prop_work)) + if (!queue_work(display->wq.unordered, &hdcp->prop_work)) drm_connector_put(&connector->base); mutex_unlock(&hdcp->mutex); } @@ -2612,7 +2610,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state, */ if (!desired_and_not_enabled && !content_protection_type_changed) { drm_connector_get(&connector->base); - if (!queue_work(i915->unordered_wq, &hdcp->prop_work)) + if (!queue_work(display->wq.unordered, &hdcp->prop_work)) drm_connector_put(&connector->base); } @@ -2736,7 +2734,6 @@ void intel_hdcp_handle_cp_irq(struct intel_connector *connector) { struct intel_hdcp *hdcp = &connector->hdcp; struct intel_display *display = to_intel_display(connector); - struct drm_i915_private *i915 = to_i915(display->drm); if (!hdcp->shim) return; @@ -2744,7 +2741,7 @@ void intel_hdcp_handle_cp_irq(struct intel_connector *connector) atomic_inc(&connector->hdcp.cp_irq_count); wake_up_all(&connector->hdcp.cp_irq_queue); - queue_delayed_work(i915->unordered_wq, &hdcp->check_work, 0); + queue_delayed_work(display->wq.unordered, &hdcp->check_work, 0); } static void __intel_hdcp_info(struct seq_file *m, struct intel_connector *connector, diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_regs.h b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h index f590d7f48ba7..112ce8c896d6 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp_regs.h +++ b/drivers/gpu/drm/i915/display/intel_hdcp_regs.h @@ -247,7 +247,7 @@ _TRANSA_HDCP2_STREAM_STATUS, \ _TRANSB_HDCP2_STREAM_STATUS) #define STREAM_ENCRYPTION_STATUS REG_BIT(31) -#define STREAM_TYPE_STATUS REG_BIT(30) +#define STREAM_TYPE_STATUS_MASK REG_GENMASK(30, 30) #define HDCP2_STREAM_STATUS(dev_priv, trans, port) \ (TRANS_HDCP(dev_priv) ? \ TRANS_HDCP2_STREAM_STATUS(trans) : \ @@ -263,7 +263,7 @@ #define TRANS_HDCP2_AUTH_STREAM(trans) _MMIO_TRANS(trans, \ _TRANSA_HDCP2_AUTH_STREAM, \ _TRANSB_HDCP2_AUTH_STREAM) -#define AUTH_STREAM_TYPE REG_BIT(31) +#define AUTH_STREAM_TYPE_MASK REG_GENMASK(31, 31) #define HDCP2_AUTH_STREAM(dev_priv, trans, port) \ (TRANS_HDCP(dev_priv) ? \ TRANS_HDCP2_AUTH_STREAM(trans) : \ diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index 901fda434af1..265aa97fcc75 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -193,40 +193,34 @@ static bool detection_work_enabled(struct intel_display *display) static bool mod_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay) { - struct drm_i915_private *i915 = to_i915(display->drm); - lockdep_assert_held(&display->irq.lock); if (!detection_work_enabled(display)) return false; - return mod_delayed_work(i915->unordered_wq, work, delay); + return mod_delayed_work(display->wq.unordered, work, delay); } static bool queue_delayed_detection_work(struct intel_display *display, struct delayed_work *work, int delay) { - struct drm_i915_private *i915 = to_i915(display->drm); - lockdep_assert_held(&display->irq.lock); if (!detection_work_enabled(display)) return false; - return queue_delayed_work(i915->unordered_wq, work, delay); + return queue_delayed_work(display->wq.unordered, work, delay); } static bool queue_detection_work(struct intel_display *display, struct work_struct *work) { - struct drm_i915_private *i915 = to_i915(display->drm); - lockdep_assert_held(&display->irq.lock); if (!detection_work_enabled(display)) return false; - return queue_work(i915->unordered_wq, work); + return queue_work(display->wq.unordered, work); } static void diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c index 05e1b309ba2c..8415f3d703ed 100644 --- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c +++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c @@ -961,7 +961,7 @@ void intel_modeset_setup_hw_state(struct intel_display *display, drm_crtc_vblank_reset(&crtc->base); if (crtc_state->hw.active) { - intel_dmc_enable_pipe(display, crtc->pipe); + intel_dmc_enable_pipe(crtc_state); intel_crtc_vblank_on(crtc_state); } } diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c index 5535cb799431..81efdb17fc0c 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.c +++ b/drivers/gpu/drm/i915/display/intel_opregion.c @@ -31,8 +31,10 @@ #include <acpi/video.h> #include <drm/drm_edid.h> +#include <drm/drm_file.h> +#include <drm/drm_print.h> -#include "i915_drv.h" +#include "i915_utils.h" #include "intel_acpi.h" #include "intel_backlight.h" #include "intel_display_core.h" @@ -665,11 +667,10 @@ bool intel_opregion_asle_present(struct intel_display *display) void intel_opregion_asle_intr(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); struct intel_opregion *opregion = display->opregion; if (opregion && opregion->asle) - queue_work(i915->unordered_wq, &opregion->asle_work); + queue_work(display->wq.unordered, &opregion->asle_work); } #define ACPI_EV_DISPLAY_SWITCH (1<<0) diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index f956919dc648..2a20aaaaac39 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -502,7 +502,7 @@ static void intel_panel_sync_state(struct intel_connector *connector) drm_modeset_unlock(&display->drm->mode_config.connection_mutex); } -const struct drm_panel_funcs dummy_panel_funcs = { +static const struct drm_panel_funcs dummy_panel_funcs = { }; int intel_panel_register(struct intel_connector *connector) @@ -515,7 +515,8 @@ int intel_panel_register(struct intel_connector *connector) if (ret) return ret; - if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) { + if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI || + connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) { struct device *dev = connector->base.kdev; struct drm_panel *base; diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_plane.c index 15ede7678636..36fb07471deb 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_plane.c @@ -33,19 +33,22 @@ #include <linux/dma-fence-chain.h> #include <linux/dma-resv.h> +#include <linux/iosys-map.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_blend.h> +#include <drm/drm_cache.h> #include <drm/drm_damage_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem.h> #include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_panic.h> #include "gem/i915_gem_object.h" #include "i915_scheduler_types.h" #include "i915_vma.h" #include "i9xx_plane_regs.h" -#include "intel_atomic_plane.h" +#include "intel_bo.h" #include "intel_cdclk.h" #include "intel_cursor.h" #include "intel_display_rps.h" @@ -53,6 +56,9 @@ #include "intel_display_types.h" #include "intel_fb.h" #include "intel_fb_pin.h" +#include "intel_fbdev.h" +#include "intel_plane.h" +#include "intel_psr.h" #include "skl_scaler.h" #include "skl_universal_plane.h" #include "skl_watermark.h" @@ -333,7 +339,7 @@ int intel_plane_calc_min_cdclk(struct intel_atomic_state *state, * display blinking due to constant cdclk changes. */ if (new_crtc_state->min_cdclk[plane->id] <= - cdclk_state->min_cdclk[crtc->pipe]) + intel_cdclk_min_cdclk(cdclk_state, crtc->pipe)) return 0; drm_dbg_kms(display->drm, @@ -341,7 +347,7 @@ int intel_plane_calc_min_cdclk(struct intel_atomic_state *state, plane->base.base.id, plane->base.name, new_crtc_state->min_cdclk[plane->id], crtc->base.base.id, crtc->base.name, - cdclk_state->min_cdclk[crtc->pipe]); + intel_cdclk_min_cdclk(cdclk_state, crtc->pipe)); *need_cdclk_calc = true; return 0; @@ -734,8 +740,8 @@ intel_crtc_get_plane(struct intel_crtc *crtc, enum plane_id plane_id) return NULL; } -int intel_plane_atomic_check(struct intel_atomic_state *state, - struct intel_plane *plane) +static int plane_atomic_check(struct intel_atomic_state *state, + struct intel_plane *plane) { struct intel_display *display = to_intel_display(state); struct intel_plane_state *new_plane_state = @@ -983,10 +989,10 @@ void intel_crtc_planes_update_arm(struct intel_dsb *dsb, i9xx_crtc_planes_update_arm(dsb, state, crtc); } -int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state, - struct intel_crtc_state *crtc_state, - int min_scale, int max_scale, - bool can_position) +int intel_plane_check_clipping(struct intel_plane_state *plane_state, + struct intel_crtc_state *crtc_state, + int min_scale, int max_scale, + bool can_position) { struct intel_display *display = to_intel_display(plane_state); struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); @@ -1085,7 +1091,8 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) /* Wa_16023981245 */ if ((DISPLAY_VERx100(display) == 2000 || - DISPLAY_VERx100(display) == 3000) && + DISPLAY_VERx100(display) == 3000 || + DISPLAY_VERx100(display) == 3002) && src_x % 2 != 0) hsub = 2; } else { @@ -1266,14 +1273,176 @@ intel_cleanup_plane_fb(struct drm_plane *plane, intel_plane_unpin_fb(old_plane_state); } +/* Handle Y-tiling, only if DPT is enabled (otherwise disabling tiling is easier) + * All DPT hardware have 128-bytes width tiling, so Y-tile dimension is 32x32 + * pixels for 32bits pixels. + */ +#define YTILE_WIDTH 32 +#define YTILE_HEIGHT 32 +#define YTILE_SIZE (YTILE_WIDTH * YTILE_HEIGHT * 4) + +static unsigned int intel_ytile_get_offset(unsigned int width, unsigned int x, unsigned int y) +{ + u32 offset; + unsigned int swizzle; + unsigned int width_in_blocks = DIV_ROUND_UP(width, 32); + + /* Block offset */ + offset = ((y / YTILE_HEIGHT) * width_in_blocks + (x / YTILE_WIDTH)) * YTILE_SIZE; + + x = x % YTILE_WIDTH; + y = y % YTILE_HEIGHT; + + /* bit order inside a block is x4 x3 x2 y4 y3 y2 y1 y0 x1 x0 */ + swizzle = (x & 3) | ((y & 0x1f) << 2) | ((x & 0x1c) << 5); + offset += swizzle * 4; + return offset; +} + +static unsigned int intel_4tile_get_offset(unsigned int width, unsigned int x, unsigned int y) +{ + u32 offset; + unsigned int swizzle; + unsigned int width_in_blocks = DIV_ROUND_UP(width, 32); + + /* Block offset */ + offset = ((y / YTILE_HEIGHT) * width_in_blocks + (x / YTILE_WIDTH)) * YTILE_SIZE; + + x = x % YTILE_WIDTH; + y = y % YTILE_HEIGHT; + + /* bit order inside a block is y4 y3 x4 y2 x3 x2 y1 y0 x1 x0 */ + swizzle = (x & 3) | ((y & 3) << 2) | ((x & 0xc) << 2) | (y & 4) << 4 | + ((x & 0x10) << 3) | ((y & 0x18) << 5); + offset += swizzle * 4; + return offset; +} + +static void intel_panic_flush(struct drm_plane *plane) +{ + struct intel_plane_state *plane_state = to_intel_plane_state(plane->state); + struct intel_crtc_state *crtc_state = to_intel_crtc_state(plane->state->crtc->state); + struct intel_plane *iplane = to_intel_plane(plane); + struct intel_display *display = to_intel_display(iplane); + struct drm_framebuffer *fb = plane_state->hw.fb; + struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); + + intel_bo_panic_finish(intel_fb); + + if (crtc_state->enable_psr2_sel_fetch) { + /* Force a full update for psr2 */ + intel_psr2_panic_force_full_update(display, crtc_state); + } + + /* Flush the cache and don't disable tiling if it's the fbdev framebuffer.*/ + if (intel_fb == intel_fbdev_framebuffer(display->fbdev.fbdev)) { + struct iosys_map map; + + intel_fbdev_get_map(display->fbdev.fbdev, &map); + drm_clflush_virt_range(map.vaddr, fb->pitches[0] * fb->height); + return; + } + + if (fb->modifier && iplane->disable_tiling) + iplane->disable_tiling(iplane); +} + +static unsigned int (*intel_get_tiling_func(u64 fb_modifier))(unsigned int width, + unsigned int x, + unsigned int y) +{ + switch (fb_modifier) { + case I915_FORMAT_MOD_Y_TILED: + case I915_FORMAT_MOD_Y_TILED_CCS: + case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC: + case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS: + case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS: + return intel_ytile_get_offset; + case I915_FORMAT_MOD_4_TILED: + case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS: + case I915_FORMAT_MOD_4_TILED_DG2_MC_CCS: + case I915_FORMAT_MOD_4_TILED_DG2_RC_CCS_CC: + case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS: + case I915_FORMAT_MOD_4_TILED_MTL_RC_CCS_CC: + case I915_FORMAT_MOD_4_TILED_MTL_MC_CCS: + case I915_FORMAT_MOD_4_TILED_BMG_CCS: + case I915_FORMAT_MOD_4_TILED_LNL_CCS: + return intel_4tile_get_offset; + case I915_FORMAT_MOD_X_TILED: + case I915_FORMAT_MOD_Yf_TILED: + case I915_FORMAT_MOD_Yf_TILED_CCS: + default: + /* Not supported yet */ + return NULL; + } +} + +static int intel_get_scanout_buffer(struct drm_plane *plane, + struct drm_scanout_buffer *sb) +{ + struct intel_plane_state *plane_state; + struct drm_gem_object *obj; + struct drm_framebuffer *fb; + struct intel_framebuffer *intel_fb; + struct intel_display *display = to_intel_display(plane->dev); + + if (!plane->state || !plane->state->fb || !plane->state->visible) + return -ENODEV; + + plane_state = to_intel_plane_state(plane->state); + fb = plane_state->hw.fb; + intel_fb = to_intel_framebuffer(fb); + + obj = intel_fb_bo(fb); + if (!obj) + return -ENODEV; + + if (intel_fb == intel_fbdev_framebuffer(display->fbdev.fbdev)) { + intel_fbdev_get_map(display->fbdev.fbdev, &sb->map[0]); + } else { + int ret; + /* Can't disable tiling if DPT is in use */ + if (intel_fb_uses_dpt(fb)) { + if (fb->format->cpp[0] != 4) + return -EOPNOTSUPP; + intel_fb->panic_tiling = intel_get_tiling_func(fb->modifier); + if (!intel_fb->panic_tiling) + return -EOPNOTSUPP; + } + sb->private = intel_fb; + ret = intel_bo_panic_setup(sb); + if (ret) + return ret; + } + sb->width = fb->width; + sb->height = fb->height; + /* Use the generic linear format, because tiling, RC, CCS, CC + * will be disabled in disable_tiling() + */ + sb->format = drm_format_info(fb->format->format); + sb->pitch[0] = fb->pitches[0]; + + return 0; +} + static const struct drm_plane_helper_funcs intel_plane_helper_funcs = { .prepare_fb = intel_prepare_plane_fb, .cleanup_fb = intel_cleanup_plane_fb, }; +static const struct drm_plane_helper_funcs intel_primary_plane_helper_funcs = { + .prepare_fb = intel_prepare_plane_fb, + .cleanup_fb = intel_cleanup_plane_fb, + .get_scanout_buffer = intel_get_scanout_buffer, + .panic_flush = intel_panic_flush, +}; + void intel_plane_helper_add(struct intel_plane *plane) { - drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); + if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) + drm_plane_helper_add(&plane->base, &intel_primary_plane_helper_funcs); + else + drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); } void intel_plane_init_cursor_vblank_work(struct intel_plane_state *old_plane_state, @@ -1433,8 +1602,8 @@ static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state, return 0; } -int intel_atomic_add_affected_planes(struct intel_atomic_state *state, - struct intel_crtc *crtc) +int intel_plane_add_affected(struct intel_atomic_state *state, + struct intel_crtc *crtc) { const struct intel_crtc_state *old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc); @@ -1528,7 +1697,7 @@ static int intel_add_affected_planes(struct intel_atomic_state *state) return 0; } -int intel_atomic_check_planes(struct intel_atomic_state *state) +int intel_plane_atomic_check(struct intel_atomic_state *state) { struct intel_display *display = to_intel_display(state); struct intel_crtc_state *old_crtc_state, *new_crtc_state; @@ -1542,7 +1711,7 @@ int intel_atomic_check_planes(struct intel_atomic_state *state) return ret; for_each_new_intel_plane_in_state(state, plane, plane_state, i) { - ret = intel_plane_atomic_check(state, plane); + ret = plane_atomic_check(state, plane); if (ret) { drm_dbg_atomic(display->drm, "[PLANE:%d:%s] atomic driver check failed\n", diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_plane.h index 317320c32285..4ef012c08fa4 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h +++ b/drivers/gpu/drm/i915/display/intel_plane.h @@ -3,8 +3,8 @@ * Copyright © 2019 Intel Corporation */ -#ifndef __INTEL_ATOMIC_PLANE_H__ -#define __INTEL_ATOMIC_PLANE_H__ +#ifndef __INTEL_PLANE_H__ +#define __INTEL_PLANE_H__ #include <linux/types.h> @@ -69,15 +69,13 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ struct intel_crtc_state *crtc_state, const struct intel_plane_state *old_plane_state, struct intel_plane_state *intel_state); -int intel_plane_atomic_check(struct intel_atomic_state *state, - struct intel_plane *plane); int intel_plane_calc_min_cdclk(struct intel_atomic_state *state, struct intel_plane *plane, bool *need_cdclk_calc); -int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state, - struct intel_crtc_state *crtc_state, - int min_scale, int max_scale, - bool can_position); +int intel_plane_check_clipping(struct intel_plane_state *plane_state, + struct intel_crtc_state *crtc_state, + int min_scale, int max_scale, + bool can_position); int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state); void intel_plane_set_invisible(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state); @@ -85,13 +83,13 @@ void intel_plane_helper_add(struct intel_plane *plane); bool intel_plane_needs_physical(struct intel_plane *plane); void intel_plane_init_cursor_vblank_work(struct intel_plane_state *old_plane_state, struct intel_plane_state *new_plane_state); -int intel_atomic_add_affected_planes(struct intel_atomic_state *state, - struct intel_crtc *crtc); -int intel_atomic_check_planes(struct intel_atomic_state *state); +int intel_plane_add_affected(struct intel_atomic_state *state, + struct intel_crtc *crtc); +int intel_plane_atomic_check(struct intel_atomic_state *state); u32 intel_plane_ggtt_offset(const struct intel_plane_state *plane_state); bool intel_plane_format_mod_supported_async(struct drm_plane *plane, u32 format, u64 modifier); -#endif /* __INTEL_ATOMIC_PLANE_H__ */ +#endif /* __INTEL_PLANE_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c index 8800a657cd21..4246173ed311 100644 --- a/drivers/gpu/drm/i915/display/intel_plane_initial.c +++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c @@ -6,13 +6,13 @@ #include "gem/i915_gem_lmem.h" #include "gem/i915_gem_region.h" #include "i915_drv.h" -#include "intel_atomic_plane.h" #include "intel_crtc.h" #include "intel_display.h" #include "intel_display_core.h" #include "intel_display_types.h" #include "intel_fb.h" #include "intel_frontbuffer.h" +#include "intel_plane.h" #include "intel_plane_initial.h" void intel_plane_initial_vblank_wait(struct intel_crtc *crtc) @@ -289,7 +289,8 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, mode_cmd.flags = DRM_MODE_FB_MODIFIERS; if (intel_framebuffer_init(to_intel_framebuffer(fb), - intel_bo_to_drm_bo(vma->obj), &mode_cmd)) { + intel_bo_to_drm_bo(vma->obj), + fb->format, &mode_cmd)) { drm_dbg_kms(display->drm, "intel fb init failed\n"); goto err_vma; } diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c index 93d5ee36fff1..d806c15db7ce 100644 --- a/drivers/gpu/drm/i915/display/intel_pmdemand.c +++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c @@ -294,40 +294,17 @@ intel_pmdemand_connector_needs_update(struct intel_atomic_state *state) static bool intel_pmdemand_needs_update(struct intel_atomic_state *state) { - struct intel_display *display = to_intel_display(state); - const struct intel_bw_state *new_bw_state, *old_bw_state; - const struct intel_cdclk_state *new_cdclk_state, *old_cdclk_state; const struct intel_crtc_state *new_crtc_state, *old_crtc_state; - const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state; struct intel_crtc *crtc; int i; - new_bw_state = intel_atomic_get_new_bw_state(state); - old_bw_state = intel_atomic_get_old_bw_state(state); - if (new_bw_state && new_bw_state->qgv_point_peakbw != - old_bw_state->qgv_point_peakbw) + if (intel_bw_pmdemand_needs_update(state)) return true; - new_dbuf_state = intel_atomic_get_new_dbuf_state(state); - old_dbuf_state = intel_atomic_get_old_dbuf_state(state); - if (new_dbuf_state && - new_dbuf_state->active_pipes != old_dbuf_state->active_pipes) + if (intel_dbuf_pmdemand_needs_update(state)) return true; - if (DISPLAY_VER(display) < 30) { - if (new_dbuf_state && - new_dbuf_state->enabled_slices != - old_dbuf_state->enabled_slices) - return true; - } - - new_cdclk_state = intel_atomic_get_new_cdclk_state(state); - old_cdclk_state = intel_atomic_get_old_cdclk_state(state); - if (new_cdclk_state && - (new_cdclk_state->actual.cdclk != - old_cdclk_state->actual.cdclk || - new_cdclk_state->actual.voltage_level != - old_cdclk_state->actual.voltage_level)) + if (intel_cdclk_pmdemand_needs_update(state)) return true; for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, @@ -362,7 +339,7 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state) /* firmware will calculate the qclk_gv_index, requirement is set to 0 */ new_pmdemand_state->params.qclk_gv_index = 0; - new_pmdemand_state->params.qclk_gv_bw = new_bw_state->qgv_point_peakbw; + new_pmdemand_state->params.qclk_gv_bw = intel_bw_qgv_point_peakbw(new_bw_state); new_dbuf_state = intel_atomic_get_dbuf_state(state); if (IS_ERR(new_dbuf_state)) @@ -370,12 +347,12 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state) if (DISPLAY_VER(display) < 30) { new_pmdemand_state->params.active_dbufs = - min_t(u8, hweight8(new_dbuf_state->enabled_slices), 3); + min_t(u8, intel_dbuf_num_enabled_slices(new_dbuf_state), 3); new_pmdemand_state->params.active_pipes = - min_t(u8, hweight8(new_dbuf_state->active_pipes), 3); + min_t(u8, intel_dbuf_num_active_pipes(new_dbuf_state), 3); } else { new_pmdemand_state->params.active_pipes = - min_t(u8, hweight8(new_dbuf_state->active_pipes), INTEL_NUM_PIPES(display)); + min_t(u8, intel_dbuf_num_active_pipes(new_dbuf_state), INTEL_NUM_PIPES(display)); } new_cdclk_state = intel_atomic_get_cdclk_state(state); @@ -383,9 +360,9 @@ int intel_pmdemand_atomic_check(struct intel_atomic_state *state) return PTR_ERR(new_cdclk_state); new_pmdemand_state->params.voltage_index = - new_cdclk_state->actual.voltage_level; + intel_cdclk_actual_voltage_level(new_cdclk_state); new_pmdemand_state->params.cdclk_freq_mhz = - DIV_ROUND_UP(new_cdclk_state->actual.cdclk, 1000); + DIV_ROUND_UP(intel_cdclk_actual(new_cdclk_state), 1000); intel_pmdemand_update_max_ddiclk(display, state, new_pmdemand_state); diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c index bff81fb5c316..b64d0b30f5b1 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.c +++ b/drivers/gpu/drm/i915/display/intel_pps.c @@ -5,9 +5,11 @@ #include <linux/debugfs.h> +#include <drm/drm_print.h> + #include "g4x_dp.h" -#include "i915_drv.h" #include "i915_reg.h" +#include "i915_utils.h" #include "intel_de.h" #include "intel_display_power_well.h" #include "intel_display_regs.h" @@ -892,7 +894,6 @@ static void edp_panel_vdd_work(struct work_struct *__work) static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *i915 = to_i915(display->drm); unsigned long delay; /* @@ -908,7 +909,7 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp) * operations. */ delay = msecs_to_jiffies(intel_dp->pps.panel_power_cycle_delay * 5); - queue_delayed_work(i915->unordered_wq, + queue_delayed_work(display->wq.unordered, &intel_dp->pps.panel_vdd_work, delay); } diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 8bee2f592ae7..41988e193a41 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -28,7 +28,6 @@ #include <drm/drm_debugfs.h> #include <drm/drm_vblank.h> -#include "i915_drv.h" #include "i915_reg.h" #include "intel_alpm.h" #include "intel_atomic.h" @@ -48,6 +47,7 @@ #include "intel_psr.h" #include "intel_psr_regs.h" #include "intel_snps_phy.h" +#include "intel_step.h" #include "intel_vblank.h" #include "intel_vrr.h" #include "skl_universal_plane.h" @@ -448,7 +448,6 @@ static void psr_event_print(struct intel_display *display, void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir) { struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *dev_priv = to_i915(display->drm); enum transcoder cpu_transcoder = intel_dp->psr.transcoder; ktime_t time_ns = ktime_get(); @@ -493,7 +492,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir) intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder), 0, psr_irq_psr_error_bit_get(intel_dp)); - queue_work(dev_priv->unordered_wq, &intel_dp->psr.work); + queue_work(display->wq.unordered, &intel_dp->psr.work); } } @@ -2889,6 +2888,26 @@ skip_sel_fetch_set_loop: return 0; } +void intel_psr2_panic_force_full_update(struct intel_display *display, + struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + u32 val = man_trk_ctl_enable_bit_get(display); + + /* SF partial frame enable has to be set even on full update */ + val |= man_trk_ctl_partial_frame_bit_get(display); + val |= man_trk_ctl_continuos_full_frame(display); + + /* Directly write the register */ + intel_de_write_fw(display, PSR2_MAN_TRK_CTL(display, cpu_transcoder), val); + + if (!crtc_state->enable_psr2_su_region_et) + return; + + intel_de_write_fw(display, PIPE_SRCSZ_ERLY_TPT(crtc->pipe), 0); +} + void intel_psr_pre_plane_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { @@ -3256,7 +3275,9 @@ static void intel_psr_configure_full_frame_update(struct intel_dp *intel_dp) static void _psr_invalidate_handle(struct intel_dp *intel_dp) { - if (intel_dp->psr.psr2_sel_fetch_enabled) { + struct intel_display *display = to_intel_display(intel_dp); + + if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) { if (!intel_dp->psr.psr2_sel_fetch_cff_enabled) { intel_dp->psr.psr2_sel_fetch_cff_enabled = true; intel_psr_configure_full_frame_update(intel_dp); @@ -3320,7 +3341,6 @@ tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits, enum fb_op_origin origin) { struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *i915 = to_i915(display->drm); if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.sel_update_enabled || !intel_dp->psr.active) @@ -3335,16 +3355,15 @@ tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits, return; tgl_psr2_enable_dc3co(intel_dp); - mod_delayed_work(i915->unordered_wq, &intel_dp->psr.dc3co_work, + mod_delayed_work(display->wq.unordered, &intel_dp->psr.dc3co_work, intel_dp->psr.dc3co_exit_delay); } static void _psr_flush_handle(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); - struct drm_i915_private *dev_priv = to_i915(display->drm); - if (intel_dp->psr.psr2_sel_fetch_enabled) { + if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) { if (intel_dp->psr.psr2_sel_fetch_cff_enabled) { /* can we turn CFF off? */ if (intel_dp->psr.busy_frontbuffer_bits == 0) @@ -3361,13 +3380,15 @@ static void _psr_flush_handle(struct intel_dp *intel_dp) * existing SU configuration */ intel_psr_configure_full_frame_update(intel_dp); - } - intel_psr_force_update(intel_dp); + intel_psr_force_update(intel_dp); + } else { + intel_psr_exit(intel_dp); + } - if (!intel_dp->psr.psr2_sel_fetch_enabled && !intel_dp->psr.active && + if ((!intel_dp->psr.psr2_sel_fetch_enabled || DISPLAY_VER(display) >= 20) && !intel_dp->psr.busy_frontbuffer_bits) - queue_work(dev_priv->unordered_wq, &intel_dp->psr.work); + queue_work(display->wq.unordered, &intel_dp->psr.work); } /** diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h index 0cf53184f13f..9b061a22361f 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.h +++ b/drivers/gpu/drm/i915/display/intel_psr.h @@ -57,6 +57,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, struct intel_crtc *crtc); void intel_psr2_program_trans_man_trk_ctl(struct intel_dsb *dsb, const struct intel_crtc_state *crtc_state); +void intel_psr2_panic_force_full_update(struct intel_display *display, + struct intel_crtc_state *crtc_state); void intel_psr_pause(struct intel_dp *intel_dp); void intel_psr_resume(struct intel_dp *intel_dp); bool intel_psr_needs_vblank_notification(const struct intel_crtc_state *crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c index 74bb3bedf30f..7fe6b4a18213 100644 --- a/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c +++ b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c @@ -103,10 +103,10 @@ static void get_ana_cp_int_prop(u64 vco_clk, DIV_ROUND_DOWN_ULL(curve_1_interpolated, CURVE0_MULTIPLIER))); ana_cp_int_temp = - DIV_ROUND_CLOSEST_ULL(DIV_ROUND_DOWN_ULL(adjusted_vco_clk1, curve_2_scaled1), - CURVE2_MULTIPLIER); + DIV64_U64_ROUND_CLOSEST(DIV_ROUND_DOWN_ULL(adjusted_vco_clk1, curve_2_scaled1), + CURVE2_MULTIPLIER); - *ana_cp_int = max(1, min(ana_cp_int_temp, 127)); + *ana_cp_int = clamp(ana_cp_int_temp, 1, 127); curve_2_scaled_int = curve_2_scaled1 * (*ana_cp_int); @@ -125,7 +125,7 @@ static void get_ana_cp_int_prop(u64 vco_clk, curve_1_interpolated); *ana_cp_prop = DIV64_U64_ROUND_UP(adjusted_vco_clk2, curve_2_scaled2); - *ana_cp_prop = max(1, min(*ana_cp_prop, 127)); + *ana_cp_prop = clamp(*ana_cp_prop, 1, 127); } static void compute_hdmi_tmds_pll(u64 pixel_clock, u32 refclk, diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index fd92e6b89b43..e6844df837af 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -41,11 +41,11 @@ #include "i915_utils.h" #include "i9xx_plane.h" -#include "intel_atomic_plane.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_fb.h" #include "intel_frontbuffer.h" +#include "intel_plane.h" #include "intel_sprite.h" #include "intel_sprite_regs.h" @@ -1366,8 +1366,8 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state, } } - ret = intel_atomic_plane_check_clipping(plane_state, crtc_state, - min_scale, max_scale, true); + ret = intel_plane_check_clipping(plane_state, crtc_state, + min_scale, max_scale, true); if (ret) return ret; @@ -1421,10 +1421,10 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state, if (ret) return ret; - ret = intel_atomic_plane_check_clipping(plane_state, crtc_state, - DRM_PLANE_NO_SCALING, - DRM_PLANE_NO_SCALING, - true); + ret = intel_plane_check_clipping(plane_state, crtc_state, + DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, + true); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 3bc57579fe53..668ef139391b 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -23,6 +23,7 @@ #include "intel_modeset_lock.h" #include "intel_tc.h" +#define DP_PIN_ASSIGNMENT_NONE 0x0 #define DP_PIN_ASSIGNMENT_C 0x3 #define DP_PIN_ASSIGNMENT_D 0x4 #define DP_PIN_ASSIGNMENT_E 0x5 @@ -66,6 +67,7 @@ struct intel_tc_port { enum tc_port_mode init_mode; enum phy_fia phy_fia; u8 phy_fia_idx; + u8 max_lane_count; }; static enum intel_display_power_domain @@ -307,6 +309,8 @@ static int lnl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port) REG_FIELD_GET(TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK, val); switch (pin_assignment) { + case DP_PIN_ASSIGNMENT_NONE: + return 0; default: MISSING_CASE(pin_assignment); fallthrough; @@ -365,12 +369,12 @@ static int intel_tc_port_get_max_lane_count(struct intel_digital_port *dig_port) } } -int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port) +static int get_max_lane_count(struct intel_tc_port *tc) { - struct intel_display *display = to_intel_display(dig_port); - struct intel_tc_port *tc = to_tc_port(dig_port); + struct intel_display *display = to_intel_display(tc->dig_port); + struct intel_digital_port *dig_port = tc->dig_port; - if (!intel_encoder_is_tc(&dig_port->base) || tc->mode != TC_PORT_DP_ALT) + if (tc->mode != TC_PORT_DP_ALT) return 4; assert_tc_cold_blocked(tc); @@ -384,6 +388,25 @@ int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port) return intel_tc_port_get_max_lane_count(dig_port); } +static void read_pin_configuration(struct intel_tc_port *tc) +{ + tc->max_lane_count = get_max_lane_count(tc); +} + +int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port) +{ + struct intel_display *display = to_intel_display(dig_port); + struct intel_tc_port *tc = to_tc_port(dig_port); + + if (!intel_encoder_is_tc(&dig_port->base)) + return 4; + + if (DISPLAY_VER(display) < 20) + return get_max_lane_count(tc); + + return tc->max_lane_count; +} + void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port, int required_lanes) { @@ -596,9 +619,12 @@ static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc) tc_cold_wref = __tc_cold_block(tc, &domain); tc->mode = tc_phy_get_current_mode(tc); - if (tc->mode != TC_PORT_DISCONNECTED) + if (tc->mode != TC_PORT_DISCONNECTED) { tc->lock_wakeref = tc_cold_block(tc); + read_pin_configuration(tc); + } + __tc_cold_unblock(tc, domain, tc_cold_wref); } @@ -656,8 +682,11 @@ static bool icl_tc_phy_connect(struct intel_tc_port *tc, tc->lock_wakeref = tc_cold_block(tc); - if (tc->mode == TC_PORT_TBT_ALT) + if (tc->mode == TC_PORT_TBT_ALT) { + read_pin_configuration(tc); + return true; + } if ((!tc_phy_is_ready(tc) || !icl_tc_phy_take_ownership(tc, true)) && @@ -668,6 +697,7 @@ static bool icl_tc_phy_connect(struct intel_tc_port *tc, goto out_unblock_tc_cold; } + read_pin_configuration(tc); if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes)) goto out_release_phy; @@ -858,9 +888,12 @@ static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc) port_wakeref = intel_display_power_get(display, port_power_domain); tc->mode = tc_phy_get_current_mode(tc); - if (tc->mode != TC_PORT_DISCONNECTED) + if (tc->mode != TC_PORT_DISCONNECTED) { tc->lock_wakeref = tc_cold_block(tc); + read_pin_configuration(tc); + } + intel_display_power_put(display, port_power_domain, port_wakeref); } @@ -873,6 +906,9 @@ static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes) if (tc->mode == TC_PORT_TBT_ALT) { tc->lock_wakeref = tc_cold_block(tc); + + read_pin_configuration(tc); + return true; } @@ -894,6 +930,8 @@ static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes) tc->lock_wakeref = tc_cold_block(tc); + read_pin_configuration(tc); + if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes)) goto out_unblock_tc_cold; @@ -1124,9 +1162,18 @@ static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc) tc_cold_wref = __tc_cold_block(tc, &domain); tc->mode = tc_phy_get_current_mode(tc); - if (tc->mode != TC_PORT_DISCONNECTED) + if (tc->mode != TC_PORT_DISCONNECTED) { tc->lock_wakeref = tc_cold_block(tc); + read_pin_configuration(tc); + /* + * Set a valid lane count value for a DP-alt sink which got + * disconnected. The driver can only disable the output on this PHY. + */ + if (tc->max_lane_count == 0) + tc->max_lane_count = 4; + } + drm_WARN_ON(display->drm, (tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) && !xelpdp_tc_phy_tcss_power_is_enabled(tc)); @@ -1138,14 +1185,19 @@ static bool xelpdp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes) { tc->lock_wakeref = tc_cold_block(tc); - if (tc->mode == TC_PORT_TBT_ALT) + if (tc->mode == TC_PORT_TBT_ALT) { + read_pin_configuration(tc); + return true; + } if (!xelpdp_tc_phy_enable_tcss_power(tc, true)) goto out_unblock_tccold; xelpdp_tc_phy_take_ownership(tc, true); + read_pin_configuration(tc); + if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes)) goto out_release_phy; @@ -1226,14 +1278,19 @@ static void tc_phy_get_hw_state(struct intel_tc_port *tc) tc->phy_ops->get_hw_state(tc); } -static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc, - bool phy_is_ready, bool phy_is_owned) +/* Is the PHY owned by display i.e. is it in legacy or DP-alt mode? */ +static bool tc_phy_owned_by_display(struct intel_tc_port *tc, + bool phy_is_ready, bool phy_is_owned) { struct intel_display *display = to_intel_display(tc->dig_port); - drm_WARN_ON(display->drm, phy_is_owned && !phy_is_ready); + if (DISPLAY_VER(display) < 20) { + drm_WARN_ON(display->drm, phy_is_owned && !phy_is_ready); - return phy_is_ready && phy_is_owned; + return phy_is_ready && phy_is_owned; + } else { + return phy_is_owned; + } } static bool tc_phy_is_connected(struct intel_tc_port *tc, @@ -1244,7 +1301,7 @@ static bool tc_phy_is_connected(struct intel_tc_port *tc, bool phy_is_owned = tc_phy_is_owned(tc); bool is_connected; - if (tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) + if (tc_phy_owned_by_display(tc, phy_is_ready, phy_is_owned)) is_connected = port_pll_type == ICL_PORT_DPLL_MG_PHY; else is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT; @@ -1352,7 +1409,7 @@ tc_phy_get_current_mode(struct intel_tc_port *tc) phy_is_ready = tc_phy_is_ready(tc); phy_is_owned = tc_phy_is_owned(tc); - if (!tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) { + if (!tc_phy_owned_by_display(tc, phy_is_ready, phy_is_owned)) { mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode); } else { drm_WARN_ON(display->drm, live_mode == TC_PORT_TBT_ALT); @@ -1441,11 +1498,11 @@ static void intel_tc_port_reset_mode(struct intel_tc_port *tc, intel_display_power_flush_work(display); if (!intel_tc_cold_requires_aux_pw(dig_port)) { enum intel_display_power_domain aux_domain; - bool aux_powered; aux_domain = intel_aux_power_domain(dig_port); - aux_powered = intel_display_power_is_enabled(display, aux_domain); - drm_WARN_ON(display->drm, aux_powered); + if (intel_display_power_is_enabled(display, aux_domain)) + drm_dbg_kms(display->drm, "Port %s: AUX unexpectedly powered\n", + tc->port_name); } tc_phy_disconnect(tc); diff --git a/drivers/gpu/drm/i915/display/intel_vrr_regs.h b/drivers/gpu/drm/i915/display/intel_vrr_regs.h index 09cdd50d6187..ba9b9215dc11 100644 --- a/drivers/gpu/drm/i915/display/intel_vrr_regs.h +++ b/drivers/gpu/drm/i915/display/intel_vrr_regs.h @@ -8,126 +8,119 @@ #include "intel_display_reg_defs.h" -/* VRR registers */ #define _TRANS_VRR_CTL_A 0x60420 #define _TRANS_VRR_CTL_B 0x61420 #define _TRANS_VRR_CTL_C 0x62420 #define _TRANS_VRR_CTL_D 0x63420 -#define TRANS_VRR_CTL(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_CTL_A) -#define VRR_CTL_VRR_ENABLE REG_BIT(31) -#define VRR_CTL_IGN_MAX_SHIFT REG_BIT(30) -#define VRR_CTL_FLIP_LINE_EN REG_BIT(29) -#define VRR_CTL_PIPELINE_FULL_MASK REG_GENMASK(10, 3) -#define VRR_CTL_PIPELINE_FULL(x) REG_FIELD_PREP(VRR_CTL_PIPELINE_FULL_MASK, (x)) -#define VRR_CTL_PIPELINE_FULL_OVERRIDE REG_BIT(0) -#define XELPD_VRR_CTL_VRR_GUARDBAND_MASK REG_GENMASK(15, 0) -#define XELPD_VRR_CTL_VRR_GUARDBAND(x) REG_FIELD_PREP(XELPD_VRR_CTL_VRR_GUARDBAND_MASK, (x)) +#define TRANS_VRR_CTL(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_CTL_A) +#define VRR_CTL_VRR_ENABLE REG_BIT(31) +#define VRR_CTL_IGN_MAX_SHIFT REG_BIT(30) +#define VRR_CTL_FLIP_LINE_EN REG_BIT(29) +#define VRR_CTL_CMRR_ENABLE REG_BIT(27) +#define VRR_CTL_PIPELINE_FULL_MASK REG_GENMASK(10, 3) +#define VRR_CTL_PIPELINE_FULL(x) REG_FIELD_PREP(VRR_CTL_PIPELINE_FULL_MASK, (x)) +#define VRR_CTL_PIPELINE_FULL_OVERRIDE REG_BIT(0) +#define XELPD_VRR_CTL_VRR_GUARDBAND_MASK REG_GENMASK(15, 0) +#define XELPD_VRR_CTL_VRR_GUARDBAND(x) REG_FIELD_PREP(XELPD_VRR_CTL_VRR_GUARDBAND_MASK, (x)) #define _TRANS_VRR_VMAX_A 0x60424 #define _TRANS_VRR_VMAX_B 0x61424 #define _TRANS_VRR_VMAX_C 0x62424 #define _TRANS_VRR_VMAX_D 0x63424 -#define TRANS_VRR_VMAX(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_VMAX_A) -#define VRR_VMAX_MASK REG_GENMASK(19, 0) +#define TRANS_VRR_VMAX(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_VMAX_A) +#define VRR_VMAX_MASK REG_GENMASK(19, 0) #define _TRANS_VRR_VMIN_A 0x60434 #define _TRANS_VRR_VMIN_B 0x61434 #define _TRANS_VRR_VMIN_C 0x62434 #define _TRANS_VRR_VMIN_D 0x63434 -#define TRANS_VRR_VMIN(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_VMIN_A) -#define VRR_VMIN_MASK REG_GENMASK(15, 0) +#define TRANS_VRR_VMIN(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_VMIN_A) +#define VRR_VMIN_MASK REG_GENMASK(15, 0) #define _TRANS_VRR_VMAXSHIFT_A 0x60428 #define _TRANS_VRR_VMAXSHIFT_B 0x61428 #define _TRANS_VRR_VMAXSHIFT_C 0x62428 #define _TRANS_VRR_VMAXSHIFT_D 0x63428 -#define TRANS_VRR_VMAXSHIFT(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, \ - _TRANS_VRR_VMAXSHIFT_A) -#define VRR_VMAXSHIFT_DEC_MASK REG_GENMASK(29, 16) -#define VRR_VMAXSHIFT_DEC REG_BIT(16) -#define VRR_VMAXSHIFT_INC_MASK REG_GENMASK(12, 0) +#define TRANS_VRR_VMAXSHIFT(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_VMAXSHIFT_A) +#define VRR_VMAXSHIFT_DEC_MASK REG_GENMASK(29, 16) +#define VRR_VMAXSHIFT_DEC REG_BIT(16) +#define VRR_VMAXSHIFT_INC_MASK REG_GENMASK(12, 0) #define _TRANS_VRR_STATUS_A 0x6042c #define _TRANS_VRR_STATUS_B 0x6142c #define _TRANS_VRR_STATUS_C 0x6242c #define _TRANS_VRR_STATUS_D 0x6342c -#define TRANS_VRR_STATUS(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_STATUS_A) -#define VRR_STATUS_VMAX_REACHED REG_BIT(31) -#define VRR_STATUS_NOFLIP_TILL_BNDR REG_BIT(30) -#define VRR_STATUS_FLIP_BEF_BNDR REG_BIT(29) -#define VRR_STATUS_NO_FLIP_FRAME REG_BIT(28) -#define VRR_STATUS_VRR_EN_LIVE REG_BIT(27) -#define VRR_STATUS_FLIPS_SERVICED REG_BIT(26) -#define VRR_STATUS_VBLANK_MASK REG_GENMASK(22, 20) -#define STATUS_FSM_IDLE REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 0) -#define STATUS_FSM_WAIT_TILL_FDB REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 1) -#define STATUS_FSM_WAIT_TILL_FS REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 2) -#define STATUS_FSM_WAIT_TILL_FLIP REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 3) -#define STATUS_FSM_PIPELINE_FILL REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 4) -#define STATUS_FSM_ACTIVE REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 5) -#define STATUS_FSM_LEGACY_VBLANK REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 6) +#define TRANS_VRR_STATUS(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_STATUS_A) +#define VRR_STATUS_VMAX_REACHED REG_BIT(31) +#define VRR_STATUS_NOFLIP_TILL_BNDR REG_BIT(30) +#define VRR_STATUS_FLIP_BEF_BNDR REG_BIT(29) +#define VRR_STATUS_NO_FLIP_FRAME REG_BIT(28) +#define VRR_STATUS_VRR_EN_LIVE REG_BIT(27) +#define VRR_STATUS_FLIPS_SERVICED REG_BIT(26) +#define VRR_STATUS_VBLANK_MASK REG_GENMASK(22, 20) +#define STATUS_FSM_IDLE REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 0) +#define STATUS_FSM_WAIT_TILL_FDB REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 1) +#define STATUS_FSM_WAIT_TILL_FS REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 2) +#define STATUS_FSM_WAIT_TILL_FLIP REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 3) +#define STATUS_FSM_PIPELINE_FILL REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 4) +#define STATUS_FSM_ACTIVE REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 5) +#define STATUS_FSM_LEGACY_VBLANK REG_FIELD_PREP(VRR_STATUS_VBLANK_MASK, 6) #define _TRANS_VRR_VTOTAL_PREV_A 0x60480 #define _TRANS_VRR_VTOTAL_PREV_B 0x61480 #define _TRANS_VRR_VTOTAL_PREV_C 0x62480 #define _TRANS_VRR_VTOTAL_PREV_D 0x63480 -#define TRANS_VRR_VTOTAL_PREV(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, \ - _TRANS_VRR_VTOTAL_PREV_A) -#define VRR_VTOTAL_FLIP_BEFR_BNDR REG_BIT(31) -#define VRR_VTOTAL_FLIP_AFTER_BNDR REG_BIT(30) -#define VRR_VTOTAL_FLIP_AFTER_DBLBUF REG_BIT(29) -#define VRR_VTOTAL_PREV_FRAME_MASK REG_GENMASK(19, 0) +#define TRANS_VRR_VTOTAL_PREV(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_VTOTAL_PREV_A) +#define VRR_VTOTAL_FLIP_BEFR_BNDR REG_BIT(31) +#define VRR_VTOTAL_FLIP_AFTER_BNDR REG_BIT(30) +#define VRR_VTOTAL_FLIP_AFTER_DBLBUF REG_BIT(29) +#define VRR_VTOTAL_PREV_FRAME_MASK REG_GENMASK(19, 0) #define _TRANS_VRR_FLIPLINE_A 0x60438 #define _TRANS_VRR_FLIPLINE_B 0x61438 #define _TRANS_VRR_FLIPLINE_C 0x62438 #define _TRANS_VRR_FLIPLINE_D 0x63438 -#define TRANS_VRR_FLIPLINE(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, \ - _TRANS_VRR_FLIPLINE_A) -#define VRR_FLIPLINE_MASK REG_GENMASK(19, 0) +#define TRANS_VRR_FLIPLINE(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_FLIPLINE_A) +#define VRR_FLIPLINE_MASK REG_GENMASK(19, 0) #define _TRANS_VRR_STATUS2_A 0x6043c #define _TRANS_VRR_STATUS2_B 0x6143c #define _TRANS_VRR_STATUS2_C 0x6243c #define _TRANS_VRR_STATUS2_D 0x6343c -#define TRANS_VRR_STATUS2(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_STATUS2_A) -#define VRR_STATUS2_VERT_LN_CNT_MASK REG_GENMASK(19, 0) +#define TRANS_VRR_STATUS2(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_STATUS2_A) +#define VRR_STATUS2_VERT_LN_CNT_MASK REG_GENMASK(19, 0) #define _TRANS_PUSH_A 0x60a70 #define _TRANS_PUSH_B 0x61a70 #define _TRANS_PUSH_C 0x62a70 #define _TRANS_PUSH_D 0x63a70 -#define TRANS_PUSH(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_PUSH_A) -#define TRANS_PUSH_EN REG_BIT(31) -#define TRANS_PUSH_SEND REG_BIT(30) +#define TRANS_PUSH(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_PUSH_A) +#define TRANS_PUSH_EN REG_BIT(31) +#define TRANS_PUSH_SEND REG_BIT(30) #define _TRANS_VRR_VSYNC_A 0x60078 -#define TRANS_VRR_VSYNC(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_VRR_VSYNC_A) -#define VRR_VSYNC_END_MASK REG_GENMASK(28, 16) -#define VRR_VSYNC_END(vsync_end) REG_FIELD_PREP(VRR_VSYNC_END_MASK, (vsync_end)) -#define VRR_VSYNC_START_MASK REG_GENMASK(12, 0) -#define VRR_VSYNC_START(vsync_start) REG_FIELD_PREP(VRR_VSYNC_START_MASK, (vsync_start)) +#define TRANS_VRR_VSYNC(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_VSYNC_A) +#define VRR_VSYNC_END_MASK REG_GENMASK(28, 16) +#define VRR_VSYNC_END(vsync_end) REG_FIELD_PREP(VRR_VSYNC_END_MASK, (vsync_end)) +#define VRR_VSYNC_START_MASK REG_GENMASK(12, 0) +#define VRR_VSYNC_START(vsync_start) REG_FIELD_PREP(VRR_VSYNC_START_MASK, (vsync_start)) /* Common register for HDMI EMP and DP AS SDP */ #define _EMP_AS_SDP_TL_A 0x60204 -#define EMP_AS_SDP_DB_TL_MASK REG_GENMASK(12, 0) -#define EMP_AS_SDP_TL(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _EMP_AS_SDP_TL_A) -#define EMP_AS_SDP_DB_TL(db_transmit_line) REG_FIELD_PREP(EMP_AS_SDP_DB_TL_MASK, (db_transmit_line)) - -/*CMRR Registers*/ +#define EMP_AS_SDP_TL(display, trans) _MMIO_TRANS2((display), (trans), _EMP_AS_SDP_TL_A) +#define EMP_AS_SDP_DB_TL_MASK REG_GENMASK(12, 0) +#define EMP_AS_SDP_DB_TL(db_transmit_line) REG_FIELD_PREP(EMP_AS_SDP_DB_TL_MASK, (db_transmit_line)) #define _TRANS_CMRR_M_LO_A 0x604F0 -#define TRANS_CMRR_M_LO(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_CMRR_M_LO_A) +#define TRANS_CMRR_M_LO(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_CMRR_M_LO_A) #define _TRANS_CMRR_M_HI_A 0x604F4 -#define TRANS_CMRR_M_HI(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_CMRR_M_HI_A) +#define TRANS_CMRR_M_HI(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_CMRR_M_HI_A) #define _TRANS_CMRR_N_LO_A 0x604F8 -#define TRANS_CMRR_N_LO(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_CMRR_N_LO_A) +#define TRANS_CMRR_N_LO(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_CMRR_N_LO_A) #define _TRANS_CMRR_N_HI_A 0x604FC -#define TRANS_CMRR_N_HI(dev_priv, trans) _MMIO_TRANS2(dev_priv, trans, _TRANS_CMRR_N_HI_A) - -#define VRR_CTL_CMRR_ENABLE REG_BIT(27) +#define TRANS_CMRR_N_HI(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_CMRR_N_HI_A) #endif /* __INTEL_VRR_REGS__ */ diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index 2aa64482d44b..e20972ddfa09 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -9,9 +9,7 @@ #include <drm/drm_fourcc.h> #include "pxp/intel_pxp.h" - #include "i915_drv.h" -#include "intel_atomic_plane.h" #include "intel_bo.h" #include "intel_de.h" #include "intel_display_irq.h" @@ -21,6 +19,7 @@ #include "intel_fb.h" #include "intel_fbc.h" #include "intel_frontbuffer.h" +#include "intel_plane.h" #include "intel_psr.h" #include "intel_psr_regs.h" #include "skl_scaler.h" @@ -2328,8 +2327,8 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state, max_scale = skl_plane_max_scale(display, fb); } - ret = intel_atomic_plane_check_clipping(plane_state, crtc_state, - min_scale, max_scale, true); + ret = intel_plane_check_clipping(plane_state, crtc_state, + min_scale, max_scale, true); if (ret) return ret; @@ -2792,6 +2791,32 @@ static u8 tgl_plane_caps(struct intel_display *display, return caps; } +static void skl_disable_tiling(struct intel_plane *plane) +{ + struct intel_plane_state *state = to_intel_plane_state(plane->base.state); + struct intel_display *display = to_intel_display(plane); + const struct drm_framebuffer *fb = state->hw.fb; + u32 plane_ctl; + + plane_ctl = intel_de_read(display, PLANE_CTL(plane->pipe, plane->id)); + + if (intel_fb_uses_dpt(fb)) { + /* if DPT is enabled, keep tiling, but disable compression */ + plane_ctl &= ~PLANE_CTL_RENDER_DECOMPRESSION_ENABLE; + } else { + /* if DPT is not supported, disable tiling, and update stride */ + u32 stride = state->view.color_plane[0].scanout_stride / 64; + + plane_ctl &= ~PLANE_CTL_TILED_MASK; + intel_de_write_fw(display, PLANE_STRIDE(plane->pipe, plane->id), + PLANE_STRIDE_(stride)); + } + intel_de_write_fw(display, PLANE_CTL(plane->pipe, plane->id), plane_ctl); + + intel_de_write_fw(display, PLANE_SURF(plane->pipe, plane->id), + skl_plane_surf(state, 0)); +} + struct intel_plane * skl_universal_plane_create(struct intel_display *display, enum pipe pipe, enum plane_id plane_id) @@ -2838,6 +2863,7 @@ skl_universal_plane_create(struct intel_display *display, plane->max_height = skl_plane_max_height; plane->min_cdclk = skl_plane_min_cdclk; } + plane->disable_tiling = skl_disable_tiling; if (DISPLAY_VER(display) >= 13) plane->max_stride = adl_plane_max_stride; @@ -3010,7 +3036,7 @@ skl_get_initial_plane_config(struct intel_crtc *crtc, return; } - intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); + intel_fb = intel_bo_alloc_framebuffer(); if (!intel_fb) { drm_dbg_kms(display->drm, "failed to alloc fb\n"); return; diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c index 2c2371574d6f..222c069fdadb 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.c +++ b/drivers/gpu/drm/i915/display/skl_watermark.c @@ -6,14 +6,14 @@ #include <linux/debugfs.h> #include <drm/drm_blend.h> +#include <drm/drm_file.h> +#include <drm/drm_print.h> #include "soc/intel_dram.h" - -#include "i915_drv.h" #include "i915_reg.h" +#include "i915_utils.h" #include "i9xx_wm.h" #include "intel_atomic.h" -#include "intel_atomic_plane.h" #include "intel_bw.h" #include "intel_cdclk.h" #include "intel_crtc.h" @@ -26,17 +26,33 @@ #include "intel_display_types.h" #include "intel_fb.h" #include "intel_fixed.h" +#include "intel_flipq.h" #include "intel_pcode.h" +#include "intel_plane.h" #include "intel_wm.h" #include "skl_universal_plane_regs.h" #include "skl_watermark.h" #include "skl_watermark_regs.h" -/*It is expected that DSB can do posted writes to every register in - * the pipe and planes within 100us. For flip queue use case, the - * recommended DSB execution time is 100us + one SAGV block time. - */ -#define DSB_EXE_TIME 100 +struct intel_dbuf_state { + struct intel_global_state base; + + struct skl_ddb_entry ddb[I915_MAX_PIPES]; + unsigned int weight[I915_MAX_PIPES]; + u8 slices[I915_MAX_PIPES]; + u8 enabled_slices; + u8 active_pipes; + u8 mdclk_cdclk_ratio; + bool joined_mbus; +}; + +#define to_intel_dbuf_state(global_state) \ + container_of_const((global_state), struct intel_dbuf_state, base) + +#define intel_atomic_get_old_dbuf_state(state) \ + to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_intel_display(state)->dbuf.obj)) +#define intel_atomic_get_new_dbuf_state(state) \ + to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_intel_display(state)->dbuf.obj)) static void skl_sagv_disable(struct intel_display *display); @@ -87,8 +103,6 @@ intel_has_sagv(struct intel_display *display) static u32 intel_sagv_block_time(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); - if (DISPLAY_VER(display) >= 14) { u32 val; @@ -99,9 +113,9 @@ intel_sagv_block_time(struct intel_display *display) u32 val = 0; int ret; - ret = snb_pcode_read(&i915->uncore, - GEN12_PCODE_READ_SAGV_BLOCK_TIME_US, - &val, NULL); + ret = intel_pcode_read(display->drm, + GEN12_PCODE_READ_SAGV_BLOCK_TIME_US, + &val, NULL); if (ret) { drm_dbg_kms(display->drm, "Couldn't read SAGV block time!\n"); return 0; @@ -159,7 +173,6 @@ static void intel_sagv_init(struct intel_display *display) */ static void skl_sagv_enable(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); int ret; if (!intel_has_sagv(display)) @@ -169,8 +182,8 @@ static void skl_sagv_enable(struct intel_display *display) return; drm_dbg_kms(display->drm, "Enabling SAGV\n"); - ret = snb_pcode_write(&i915->uncore, GEN9_PCODE_SAGV_CONTROL, - GEN9_SAGV_ENABLE); + ret = intel_pcode_write(display->drm, GEN9_PCODE_SAGV_CONTROL, + GEN9_SAGV_ENABLE); /* We don't need to wait for SAGV when enabling */ @@ -192,7 +205,6 @@ static void skl_sagv_enable(struct intel_display *display) static void skl_sagv_disable(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); int ret; if (!intel_has_sagv(display)) @@ -203,10 +215,9 @@ static void skl_sagv_disable(struct intel_display *display) drm_dbg_kms(display->drm, "Disabling SAGV\n"); /* bspec says to keep retrying for at least 1 ms */ - ret = skl_pcode_request(&i915->uncore, GEN9_PCODE_SAGV_CONTROL, - GEN9_SAGV_DISABLE, - GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED, - 1); + ret = intel_pcode_request(display->drm, GEN9_PCODE_SAGV_CONTROL, + GEN9_SAGV_DISABLE, + GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED, 1); /* * Some skl systems, pre-release machines in particular, * don't actually have SAGV. @@ -232,7 +243,7 @@ static void skl_sagv_pre_plane_update(struct intel_atomic_state *state) if (!new_bw_state) return; - if (!intel_can_enable_sagv(display, new_bw_state)) + if (!intel_bw_can_enable_sagv(display, new_bw_state)) skl_sagv_disable(display); } @@ -245,74 +256,10 @@ static void skl_sagv_post_plane_update(struct intel_atomic_state *state) if (!new_bw_state) return; - if (intel_can_enable_sagv(display, new_bw_state)) + if (intel_bw_can_enable_sagv(display, new_bw_state)) skl_sagv_enable(display); } -static void icl_sagv_pre_plane_update(struct intel_atomic_state *state) -{ - struct intel_display *display = to_intel_display(state); - const struct intel_bw_state *old_bw_state = - intel_atomic_get_old_bw_state(state); - const struct intel_bw_state *new_bw_state = - intel_atomic_get_new_bw_state(state); - u16 old_mask, new_mask; - - if (!new_bw_state) - return; - - old_mask = old_bw_state->qgv_points_mask; - new_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask; - - if (old_mask == new_mask) - return; - - WARN_ON(!new_bw_state->base.changed); - - drm_dbg_kms(display->drm, "Restricting QGV points: 0x%x -> 0x%x\n", - old_mask, new_mask); - - /* - * Restrict required qgv points before updating the configuration. - * According to BSpec we can't mask and unmask qgv points at the same - * time. Also masking should be done before updating the configuration - * and unmasking afterwards. - */ - icl_pcode_restrict_qgv_points(display, new_mask); -} - -static void icl_sagv_post_plane_update(struct intel_atomic_state *state) -{ - struct intel_display *display = to_intel_display(state); - const struct intel_bw_state *old_bw_state = - intel_atomic_get_old_bw_state(state); - const struct intel_bw_state *new_bw_state = - intel_atomic_get_new_bw_state(state); - u16 old_mask, new_mask; - - if (!new_bw_state) - return; - - old_mask = old_bw_state->qgv_points_mask | new_bw_state->qgv_points_mask; - new_mask = new_bw_state->qgv_points_mask; - - if (old_mask == new_mask) - return; - - WARN_ON(!new_bw_state->base.changed); - - drm_dbg_kms(display->drm, "Relaxing QGV points: 0x%x -> 0x%x\n", - old_mask, new_mask); - - /* - * Allow required qgv points after updating the configuration. - * According to BSpec we can't mask and unmask qgv points at the same - * time. Also masking should be done before updating the configuration - * and unmasking afterwards. - */ - icl_pcode_restrict_qgv_points(display, new_mask); -} - void intel_sagv_pre_plane_update(struct intel_atomic_state *state) { struct intel_display *display = to_intel_display(state); @@ -446,16 +393,6 @@ bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state) return skl_crtc_can_enable_sagv(crtc_state); } -bool intel_can_enable_sagv(struct intel_display *display, - const struct intel_bw_state *bw_state) -{ - if (DISPLAY_VER(display) < 11 && - bw_state->active_pipes && !is_power_of_2(bw_state->active_pipes)) - return false; - - return bw_state->pipe_sagv_reject == 0; -} - static u16 skl_ddb_entry_init(struct skl_ddb_entry *entry, u16 start, u16 end) { @@ -2236,7 +2173,7 @@ cdclk_prefill_adjustment(const struct intel_crtc_state *crtc_state) } return min(1, DIV_ROUND_UP(crtc_state->pixel_rate, - 2 * cdclk_state->logical.cdclk)); + 2 * intel_cdclk_logical(cdclk_state))); } static int @@ -2680,6 +2617,97 @@ static char enast(bool enable) return enable ? '*' : ' '; } +static noinline_for_stack void +skl_print_plane_changes(struct intel_display *display, + struct intel_plane *plane, + const struct skl_plane_wm *old_wm, + const struct skl_plane_wm *new_wm) +{ + drm_dbg_kms(display->drm, + "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm" + " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n", + plane->base.base.id, plane->base.name, + enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable), + enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable), + enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable), + enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable), + enast(old_wm->trans_wm.enable), + enast(old_wm->sagv.wm0.enable), + enast(old_wm->sagv.trans_wm.enable), + enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable), + enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable), + enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable), + enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable), + enast(new_wm->trans_wm.enable), + enast(new_wm->sagv.wm0.enable), + enast(new_wm->sagv.trans_wm.enable)); + + drm_dbg_kms(display->drm, + "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" + " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n", + plane->base.base.id, plane->base.name, + enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines, + enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines, + enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines, + enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines, + enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines, + enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines, + enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines, + enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines, + enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines, + enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, + enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines, + enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines, + enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines, + enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines, + enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines, + enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines, + enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines, + enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines, + enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines, + enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines, + enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines, + enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines); + + drm_dbg_kms(display->drm, + "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" + " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", + plane->base.base.id, plane->base.name, + old_wm->wm[0].blocks, old_wm->wm[1].blocks, + old_wm->wm[2].blocks, old_wm->wm[3].blocks, + old_wm->wm[4].blocks, old_wm->wm[5].blocks, + old_wm->wm[6].blocks, old_wm->wm[7].blocks, + old_wm->trans_wm.blocks, + old_wm->sagv.wm0.blocks, + old_wm->sagv.trans_wm.blocks, + new_wm->wm[0].blocks, new_wm->wm[1].blocks, + new_wm->wm[2].blocks, new_wm->wm[3].blocks, + new_wm->wm[4].blocks, new_wm->wm[5].blocks, + new_wm->wm[6].blocks, new_wm->wm[7].blocks, + new_wm->trans_wm.blocks, + new_wm->sagv.wm0.blocks, + new_wm->sagv.trans_wm.blocks); + + drm_dbg_kms(display->drm, + "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" + " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", + plane->base.base.id, plane->base.name, + old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc, + old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc, + old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc, + old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc, + old_wm->trans_wm.min_ddb_alloc, + old_wm->sagv.wm0.min_ddb_alloc, + old_wm->sagv.trans_wm.min_ddb_alloc, + new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc, + new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc, + new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc, + new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc, + new_wm->trans_wm.min_ddb_alloc, + new_wm->sagv.wm0.min_ddb_alloc, + new_wm->sagv.trans_wm.min_ddb_alloc); +} + static void skl_print_wm_changes(struct intel_atomic_state *state) { @@ -2709,7 +2737,6 @@ skl_print_wm_changes(struct intel_atomic_state *state) if (skl_ddb_entry_equal(old, new)) continue; - drm_dbg_kms(display->drm, "[PLANE:%d:%s] ddb (%4d - %4d) -> (%4d - %4d), size %4d -> %4d\n", plane->base.base.id, plane->base.name, @@ -2727,89 +2754,7 @@ skl_print_wm_changes(struct intel_atomic_state *state) if (skl_plane_wm_equals(display, old_wm, new_wm)) continue; - drm_dbg_kms(display->drm, - "[PLANE:%d:%s] level %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm" - " -> %cwm0,%cwm1,%cwm2,%cwm3,%cwm4,%cwm5,%cwm6,%cwm7,%ctwm,%cswm,%cstwm\n", - plane->base.base.id, plane->base.name, - enast(old_wm->wm[0].enable), enast(old_wm->wm[1].enable), - enast(old_wm->wm[2].enable), enast(old_wm->wm[3].enable), - enast(old_wm->wm[4].enable), enast(old_wm->wm[5].enable), - enast(old_wm->wm[6].enable), enast(old_wm->wm[7].enable), - enast(old_wm->trans_wm.enable), - enast(old_wm->sagv.wm0.enable), - enast(old_wm->sagv.trans_wm.enable), - enast(new_wm->wm[0].enable), enast(new_wm->wm[1].enable), - enast(new_wm->wm[2].enable), enast(new_wm->wm[3].enable), - enast(new_wm->wm[4].enable), enast(new_wm->wm[5].enable), - enast(new_wm->wm[6].enable), enast(new_wm->wm[7].enable), - enast(new_wm->trans_wm.enable), - enast(new_wm->sagv.wm0.enable), - enast(new_wm->sagv.trans_wm.enable)); - - drm_dbg_kms(display->drm, - "[PLANE:%d:%s] lines %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d" - " -> %c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%3d,%c%4d\n", - plane->base.base.id, plane->base.name, - enast(old_wm->wm[0].ignore_lines), old_wm->wm[0].lines, - enast(old_wm->wm[1].ignore_lines), old_wm->wm[1].lines, - enast(old_wm->wm[2].ignore_lines), old_wm->wm[2].lines, - enast(old_wm->wm[3].ignore_lines), old_wm->wm[3].lines, - enast(old_wm->wm[4].ignore_lines), old_wm->wm[4].lines, - enast(old_wm->wm[5].ignore_lines), old_wm->wm[5].lines, - enast(old_wm->wm[6].ignore_lines), old_wm->wm[6].lines, - enast(old_wm->wm[7].ignore_lines), old_wm->wm[7].lines, - enast(old_wm->trans_wm.ignore_lines), old_wm->trans_wm.lines, - enast(old_wm->sagv.wm0.ignore_lines), old_wm->sagv.wm0.lines, - enast(old_wm->sagv.trans_wm.ignore_lines), old_wm->sagv.trans_wm.lines, - enast(new_wm->wm[0].ignore_lines), new_wm->wm[0].lines, - enast(new_wm->wm[1].ignore_lines), new_wm->wm[1].lines, - enast(new_wm->wm[2].ignore_lines), new_wm->wm[2].lines, - enast(new_wm->wm[3].ignore_lines), new_wm->wm[3].lines, - enast(new_wm->wm[4].ignore_lines), new_wm->wm[4].lines, - enast(new_wm->wm[5].ignore_lines), new_wm->wm[5].lines, - enast(new_wm->wm[6].ignore_lines), new_wm->wm[6].lines, - enast(new_wm->wm[7].ignore_lines), new_wm->wm[7].lines, - enast(new_wm->trans_wm.ignore_lines), new_wm->trans_wm.lines, - enast(new_wm->sagv.wm0.ignore_lines), new_wm->sagv.wm0.lines, - enast(new_wm->sagv.trans_wm.ignore_lines), new_wm->sagv.trans_wm.lines); - - drm_dbg_kms(display->drm, - "[PLANE:%d:%s] blocks %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" - " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", - plane->base.base.id, plane->base.name, - old_wm->wm[0].blocks, old_wm->wm[1].blocks, - old_wm->wm[2].blocks, old_wm->wm[3].blocks, - old_wm->wm[4].blocks, old_wm->wm[5].blocks, - old_wm->wm[6].blocks, old_wm->wm[7].blocks, - old_wm->trans_wm.blocks, - old_wm->sagv.wm0.blocks, - old_wm->sagv.trans_wm.blocks, - new_wm->wm[0].blocks, new_wm->wm[1].blocks, - new_wm->wm[2].blocks, new_wm->wm[3].blocks, - new_wm->wm[4].blocks, new_wm->wm[5].blocks, - new_wm->wm[6].blocks, new_wm->wm[7].blocks, - new_wm->trans_wm.blocks, - new_wm->sagv.wm0.blocks, - new_wm->sagv.trans_wm.blocks); - - drm_dbg_kms(display->drm, - "[PLANE:%d:%s] min_ddb %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d" - " -> %4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%4d,%5d\n", - plane->base.base.id, plane->base.name, - old_wm->wm[0].min_ddb_alloc, old_wm->wm[1].min_ddb_alloc, - old_wm->wm[2].min_ddb_alloc, old_wm->wm[3].min_ddb_alloc, - old_wm->wm[4].min_ddb_alloc, old_wm->wm[5].min_ddb_alloc, - old_wm->wm[6].min_ddb_alloc, old_wm->wm[7].min_ddb_alloc, - old_wm->trans_wm.min_ddb_alloc, - old_wm->sagv.wm0.min_ddb_alloc, - old_wm->sagv.trans_wm.min_ddb_alloc, - new_wm->wm[0].min_ddb_alloc, new_wm->wm[1].min_ddb_alloc, - new_wm->wm[2].min_ddb_alloc, new_wm->wm[3].min_ddb_alloc, - new_wm->wm[4].min_ddb_alloc, new_wm->wm[5].min_ddb_alloc, - new_wm->wm[6].min_ddb_alloc, new_wm->wm[7].min_ddb_alloc, - new_wm->trans_wm.min_ddb_alloc, - new_wm->sagv.wm0.min_ddb_alloc, - new_wm->sagv.trans_wm.min_ddb_alloc); + skl_print_plane_changes(display, plane, old_wm, new_wm); } } } @@ -2913,67 +2858,79 @@ static int skl_wm_add_affected_planes(struct intel_atomic_state *state, return 0; } -/* - * If Fixed Refresh Rate or For VRR case Vmin = Vmax = Flipline: - * Program DEEP PKG_C_LATENCY Pkg C with highest valid latency from - * watermark level1 and up and above. If watermark level 1 is - * invalid program it with all 1's. - * Program PKG_C_LATENCY Added Wake Time = DSB execution time - * If Variable Refresh Rate where Vmin != Vmax != Flipline: - * Program DEEP PKG_C_LATENCY Pkg C with all 1's. - * Program PKG_C_LATENCY Added Wake Time = 0 - */ +static int pkgc_max_linetime(struct intel_atomic_state *state) +{ + struct intel_display *display = to_intel_display(state); + const struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc; + int i, max_linetime; + + /* + * Apparenty the hardware uses WM_LINETIME internally for + * this stuff, compute everything based on that. + */ + for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { + display->pkgc.disable[crtc->pipe] = crtc_state->vrr.enable; + display->pkgc.linetime[crtc->pipe] = DIV_ROUND_UP(crtc_state->linetime, 8); + } + + max_linetime = 0; + for_each_intel_crtc(display->drm, crtc) { + if (display->pkgc.disable[crtc->pipe]) + return 0; + + max_linetime = max(display->pkgc.linetime[crtc->pipe], max_linetime); + } + + return max_linetime; +} + void intel_program_dpkgc_latency(struct intel_atomic_state *state) { struct intel_display *display = to_intel_display(state); - struct intel_crtc *crtc; - struct intel_crtc_state *new_crtc_state; - u32 latency = LNL_PKG_C_LATENCY_MASK; - u32 added_wake_time = 0; - u32 max_linetime = 0; - u32 clear, val; - bool fixed_refresh_rate = false; - int i; + int max_linetime, latency, added_wake_time = 0; if (DISPLAY_VER(display) < 20) return; - for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { - if (!new_crtc_state->vrr.enable || - (new_crtc_state->vrr.vmin == new_crtc_state->vrr.vmax && - new_crtc_state->vrr.vmin == new_crtc_state->vrr.flipline)) - fixed_refresh_rate = true; + mutex_lock(&display->wm.wm_mutex); - max_linetime = max(new_crtc_state->linetime, max_linetime); - } + latency = skl_watermark_max_latency(display, 1); - if (fixed_refresh_rate) { - added_wake_time = DSB_EXE_TIME + - display->sagv.block_time_us; + /* FIXME runtime changes to enable_flipq are racy */ + if (display->params.enable_flipq) + added_wake_time = intel_flipq_exec_time_us(display); - latency = skl_watermark_max_latency(display, 1); + /* + * Wa_22020432604 + * "PKG_C_LATENCY Added Wake Time field is not working" + */ + if (latency && IS_DISPLAY_VER(display, 20, 30)) { + latency += added_wake_time; + added_wake_time = 0; + } - /* Wa_22020432604 */ - if ((DISPLAY_VER(display) == 20 || DISPLAY_VER(display) == 30) && !latency) { - latency += added_wake_time; - added_wake_time = 0; - } + max_linetime = pkgc_max_linetime(state); - /* Wa_22020299601 */ - if ((latency && max_linetime) && - (DISPLAY_VER(display) == 20 || DISPLAY_VER(display) == 30)) { - latency = max_linetime * DIV_ROUND_UP(latency, max_linetime); - } else if (!latency) { - latency = LNL_PKG_C_LATENCY_MASK; - } + if (max_linetime == 0 || latency == 0) { + latency = REG_FIELD_GET(LNL_PKG_C_LATENCY_MASK, + LNL_PKG_C_LATENCY_MASK); + added_wake_time = 0; + } else { + /* + * Wa_22020299601 + * "Increase the latency programmed in PKG_C_LATENCY Pkg C Latency to be a + * multiple of the pipeline time from WM_LINETIME" + */ + latency = roundup(latency, max_linetime); } - clear = LNL_ADDED_WAKE_TIME_MASK | LNL_PKG_C_LATENCY_MASK; - val = REG_FIELD_PREP(LNL_PKG_C_LATENCY_MASK, latency) | - REG_FIELD_PREP(LNL_ADDED_WAKE_TIME_MASK, added_wake_time); + intel_de_write(display, LNL_PKG_C_LATENCY, + REG_FIELD_PREP(LNL_ADDED_WAKE_TIME_MASK, added_wake_time) | + REG_FIELD_PREP(LNL_PKG_C_LATENCY_MASK, latency)); - intel_de_rmw(display, LNL_PKG_C_LATENCY, clear, val); + mutex_unlock(&display->wm.wm_mutex); } static int @@ -3011,7 +2968,7 @@ skl_compute_wm(struct intel_atomic_state *state) * drm_atomic_check_only() gets upset if we pull more crtcs * into the state, so we have to calculate this based on the * individual intel_crtc_can_enable_sagv() rather than - * the overall intel_can_enable_sagv(). Otherwise the + * the overall intel_bw_can_enable_sagv(). Otherwise the * crtcs not included in the commit would not switch to the * SAGV watermarks when we are about to enable SAGV, and that * would lead to underruns. This does mean extra power draw @@ -3279,7 +3236,6 @@ static void mtl_read_wm_latency(struct intel_display *display, u16 wm[]) static void skl_read_wm_latency(struct intel_display *display, u16 wm[]) { - struct drm_i915_private *i915 = to_i915(display->drm); int num_levels = display->wm.num_levels; int read_latency = DISPLAY_VER(display) >= 12 ? 3 : 2; int mult = display->platform.dg2 ? 2 : 1; @@ -3288,7 +3244,7 @@ static void skl_read_wm_latency(struct intel_display *display, u16 wm[]) /* read the first set of memory latencies[0:3] */ val = 0; /* data0 to be programmed to 0 for first set */ - ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL); + ret = intel_pcode_read(display->drm, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL); if (ret) { drm_err(display->drm, "SKL Mailbox read error = %d\n", ret); return; @@ -3301,7 +3257,7 @@ static void skl_read_wm_latency(struct intel_display *display, u16 wm[]) /* read the second set of memory latencies[4:7] */ val = 1; /* data0 to be programmed to 1 for second set */ - ret = snb_pcode_read(&i915->uncore, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL); + ret = intel_pcode_read(display->drm, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL); if (ret) { drm_err(display->drm, "SKL Mailbox read error = %d\n", ret); return; @@ -3693,6 +3649,38 @@ void intel_dbuf_post_plane_update(struct intel_atomic_state *state) gen9_dbuf_slices_update(display, new_slices); } +int intel_dbuf_num_enabled_slices(const struct intel_dbuf_state *dbuf_state) +{ + return hweight8(dbuf_state->enabled_slices); +} + +int intel_dbuf_num_active_pipes(const struct intel_dbuf_state *dbuf_state) +{ + return hweight8(dbuf_state->active_pipes); +} + +bool intel_dbuf_pmdemand_needs_update(struct intel_atomic_state *state) +{ + struct intel_display *display = to_intel_display(state); + const struct intel_dbuf_state *new_dbuf_state, *old_dbuf_state; + + new_dbuf_state = intel_atomic_get_new_dbuf_state(state); + old_dbuf_state = intel_atomic_get_old_dbuf_state(state); + + if (new_dbuf_state && + new_dbuf_state->active_pipes != old_dbuf_state->active_pipes) + return true; + + if (DISPLAY_VER(display) < 30) { + if (new_dbuf_state && + new_dbuf_state->enabled_slices != + old_dbuf_state->enabled_slices) + return true; + } + + return false; +} + static void skl_mbus_sanitize(struct intel_display *display) { struct intel_dbuf_state *dbuf_state = diff --git a/drivers/gpu/drm/i915/display/skl_watermark.h b/drivers/gpu/drm/i915/display/skl_watermark.h index 95b0b599d5c3..62790816f030 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.h +++ b/drivers/gpu/drm/i915/display/skl_watermark.h @@ -8,17 +8,15 @@ #include <linux/types.h> -#include "intel_display_limits.h" -#include "intel_global_state.h" -#include "intel_wm_types.h" - +enum plane_id; struct intel_atomic_state; -struct intel_bw_state; struct intel_crtc; struct intel_crtc_state; +struct intel_dbuf_state; struct intel_display; struct intel_plane; struct intel_plane_state; +struct skl_ddb_entry; struct skl_pipe_wm; struct skl_wm_level; @@ -27,8 +25,6 @@ u8 intel_enabled_dbuf_slices_mask(struct intel_display *display); void intel_sagv_pre_plane_update(struct intel_atomic_state *state); void intel_sagv_post_plane_update(struct intel_atomic_state *state); bool intel_crtc_can_enable_sagv(const struct intel_crtc_state *crtc_state); -bool intel_can_enable_sagv(struct intel_display *display, - const struct intel_bw_state *bw_state); bool intel_has_sagv(struct intel_display *display); u32 skl_ddb_dbuf_slice_mask(struct intel_display *display, @@ -63,28 +59,11 @@ unsigned int skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_st struct intel_plane *plane, int width, int height, int cpp); -struct intel_dbuf_state { - struct intel_global_state base; - - struct skl_ddb_entry ddb[I915_MAX_PIPES]; - unsigned int weight[I915_MAX_PIPES]; - u8 slices[I915_MAX_PIPES]; - u8 enabled_slices; - u8 active_pipes; - u8 mdclk_cdclk_ratio; - bool joined_mbus; -}; - struct intel_dbuf_state * intel_atomic_get_dbuf_state(struct intel_atomic_state *state); -#define to_intel_dbuf_state(global_state) \ - container_of_const((global_state), struct intel_dbuf_state, base) - -#define intel_atomic_get_old_dbuf_state(state) \ - to_intel_dbuf_state(intel_atomic_get_old_global_obj_state(state, &to_intel_display(state)->dbuf.obj)) -#define intel_atomic_get_new_dbuf_state(state) \ - to_intel_dbuf_state(intel_atomic_get_new_global_obj_state(state, &to_intel_display(state)->dbuf.obj)) +int intel_dbuf_num_enabled_slices(const struct intel_dbuf_state *dbuf_state); +int intel_dbuf_num_active_pipes(const struct intel_dbuf_state *dbuf_state); int intel_dbuf_init(struct intel_display *display); int intel_dbuf_state_set_mdclk_cdclk_ratio(struct intel_atomic_state *state, @@ -98,5 +77,7 @@ void intel_dbuf_mbus_pre_ddb_update(struct intel_atomic_state *state); void intel_dbuf_mbus_post_ddb_update(struct intel_atomic_state *state); void intel_program_dpkgc_latency(struct intel_atomic_state *state); +bool intel_dbuf_pmdemand_needs_update(struct intel_atomic_state *state); + #endif /* __SKL_WATERMARK_H__ */ diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index 3433deb635ef..6d9f3312de7e 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -1591,8 +1591,8 @@ static void vlv_dsi_add_properties(struct intel_connector *connector) static void vlv_dphy_param_init(struct intel_dsi *intel_dsi) { + struct intel_display *display = to_intel_display(&intel_dsi->base); struct intel_connector *connector = intel_dsi->attached_connector; - struct intel_display *display = to_intel_display(connector); struct mipi_config *mipi_config = connector->panel.vbt.dsi.config; u32 tlpx_ns, extra_byte_count, tlpx_ui; u32 ui_num, ui_den; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index 05e440643aa2..f4f1c979d1b9 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -105,7 +105,7 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct * if (!obj->base.filp) return -ENODEV; - ret = call_mmap(obj->base.filp, vma); + ret = vfs_mmap(obj->base.filp, vma); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index f6d37dff320d..75f5b0e871ef 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -5,7 +5,6 @@ #include <linux/anon_inodes.h> #include <linux/mman.h> -#include <linux/pfn_t.h> #include <linux/sizes.h> #include <drm/drm_cache.h> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index c34f41605b46..565f8fa330db 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -16,7 +16,9 @@ #include "i915_gem_ww.h" #include "i915_vma_types.h" +struct drm_scanout_buffer; enum intel_region_id; +struct intel_framebuffer; #define obj_to_i915(obj__) to_i915((obj__)->base.dev) @@ -691,6 +693,10 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj); int i915_gem_object_truncate(struct drm_i915_gem_object *obj); +struct intel_framebuffer *i915_gem_object_alloc_framebuffer(void); +int i915_gem_object_panic_setup(struct drm_scanout_buffer *sb); +void i915_gem_object_panic_finish(struct intel_framebuffer *fb); + /** * i915_gem_object_pin_map - return a contiguous mapping of the entire object * @obj: the object to map into kernel address space diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 7f83f8bdc8fb..c16a57160b26 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -4,8 +4,11 @@ */ #include <drm/drm_cache.h> +#include <drm/drm_panic.h> #include <linux/vmalloc.h> +#include "display/intel_fb.h" +#include "display/intel_display_types.h" #include "gt/intel_gt.h" #include "gt/intel_tlb.h" @@ -354,6 +357,145 @@ static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj, return vaddr ?: ERR_PTR(-ENOMEM); } +struct i915_panic_data { + struct page **pages; + int page; + void *vaddr; +}; + +struct i915_framebuffer { + struct intel_framebuffer base; + struct i915_panic_data panic; +}; + +static inline struct i915_panic_data *to_i915_panic_data(struct intel_framebuffer *fb) +{ + return &container_of_const(fb, struct i915_framebuffer, base)->panic; +} + +static void i915_panic_kunmap(struct i915_panic_data *panic) +{ + if (panic->vaddr) { + drm_clflush_virt_range(panic->vaddr, PAGE_SIZE); + kunmap_local(panic->vaddr); + panic->vaddr = NULL; + } +} + +static struct page **i915_gem_object_panic_pages(struct drm_i915_gem_object *obj) +{ + unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i; + struct page *page; + struct page **pages; + struct sgt_iter iter; + + /* For a 3840x2160 32 bits Framebuffer, this should require ~64K */ + pages = kmalloc_array(n_pages, sizeof(*pages), GFP_ATOMIC); + if (!pages) + return NULL; + + i = 0; + for_each_sgt_page(page, iter, obj->mm.pages) + pages[i++] = page; + return pages; +} + +static void i915_gem_object_panic_map_set_pixel(struct drm_scanout_buffer *sb, unsigned int x, + unsigned int y, u32 color) +{ + struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private; + unsigned int offset = fb->panic_tiling(sb->width, x, y); + + iosys_map_wr(&sb->map[0], offset, u32, color); +} + +/* + * The scanout buffer pages are not mapped, so for each pixel, + * use kmap_local_page_try_from_panic() to map the page, and write the pixel. + * Try to keep the map from the previous pixel, to avoid too much map/unmap. + */ +static void i915_gem_object_panic_page_set_pixel(struct drm_scanout_buffer *sb, unsigned int x, + unsigned int y, u32 color) +{ + unsigned int new_page; + unsigned int offset; + struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private; + struct i915_panic_data *panic = to_i915_panic_data(fb); + + if (fb->panic_tiling) + offset = fb->panic_tiling(sb->width, x, y); + else + offset = y * sb->pitch[0] + x * sb->format->cpp[0]; + + new_page = offset >> PAGE_SHIFT; + offset = offset % PAGE_SIZE; + if (new_page != panic->page) { + i915_panic_kunmap(panic); + panic->page = new_page; + panic->vaddr = + kmap_local_page_try_from_panic(panic->pages[panic->page]); + } + if (panic->vaddr) { + u32 *pix = panic->vaddr + offset; + *pix = color; + } +} + +struct intel_framebuffer *i915_gem_object_alloc_framebuffer(void) +{ + struct i915_framebuffer *i915_fb; + + i915_fb = kzalloc(sizeof(*i915_fb), GFP_KERNEL); + if (i915_fb) + return &i915_fb->base; + return NULL; +} + +/* + * Setup the gem framebuffer for drm_panic access. + * Use current vaddr if it exists, or setup a list of pages. + * pfn is not supported yet. + */ +int i915_gem_object_panic_setup(struct drm_scanout_buffer *sb) +{ + enum i915_map_type has_type; + struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private; + struct i915_panic_data *panic = to_i915_panic_data(fb); + struct drm_i915_gem_object *obj = to_intel_bo(intel_fb_bo(&fb->base)); + void *ptr; + + ptr = page_unpack_bits(obj->mm.mapping, &has_type); + if (ptr) { + if (i915_gem_object_has_iomem(obj)) + iosys_map_set_vaddr_iomem(&sb->map[0], (void __iomem *)ptr); + else + iosys_map_set_vaddr(&sb->map[0], ptr); + + if (fb->panic_tiling) + sb->set_pixel = i915_gem_object_panic_map_set_pixel; + return 0; + } + if (i915_gem_object_has_struct_page(obj)) { + panic->pages = i915_gem_object_panic_pages(obj); + if (!panic->pages) + return -ENOMEM; + panic->page = -1; + sb->set_pixel = i915_gem_object_panic_page_set_pixel; + return 0; + } + return -EOPNOTSUPP; +} + +void i915_gem_object_panic_finish(struct intel_framebuffer *fb) +{ + struct i915_panic_data *panic = to_i915_panic_data(fb); + + i915_panic_kunmap(panic); + panic->page = -1; + kfree(panic->pages); + panic->pages = NULL; +} + /* get, pin, and map the pages of the object into kernel space */ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, enum i915_map_type type) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index 19a3eb82dc6a..e3d188455f67 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -6,6 +6,7 @@ #include <linux/pagevec.h> #include <linux/shmem_fs.h> #include <linux/swap.h> +#include <linux/uio.h> #include <drm/drm_cache.h> @@ -302,7 +303,6 @@ void __shmem_writeback(size_t size, struct address_space *mapping) .nr_to_write = SWAP_CLUSTER_MAX, .range_start = 0, .range_end = LLONG_MAX, - .for_reclaim = 1, }; struct folio *folio = NULL; int error = 0; @@ -317,7 +317,7 @@ void __shmem_writeback(size_t size, struct address_space *mapping) if (folio_mapped(folio)) folio_redirty_for_writepage(&wbc, folio); else - error = shmem_writeout(folio, &wbc); + error = shmem_writeout(folio, NULL, NULL); } } @@ -400,12 +400,12 @@ static int shmem_pwrite(struct drm_i915_gem_object *obj, const struct drm_i915_gem_pwrite *arg) { - struct address_space *mapping = obj->base.filp->f_mapping; - const struct address_space_operations *aops = mapping->a_ops; char __user *user_data = u64_to_user_ptr(arg->data_ptr); - u64 remain; - loff_t pos; - unsigned int pg; + struct file *file = obj->base.filp; + struct kiocb kiocb; + struct iov_iter iter; + ssize_t written; + u64 size = arg->size; /* Caller already validated user args */ GEM_BUG_ON(!access_ok(user_data, arg->size)); @@ -428,63 +428,24 @@ shmem_pwrite(struct drm_i915_gem_object *obj, if (obj->mm.madv != I915_MADV_WILLNEED) return -EFAULT; - /* - * Before the pages are instantiated the object is treated as being - * in the CPU domain. The pages will be clflushed as required before - * use, and we can freely write into the pages directly. If userspace - * races pwrite with any other operation; corruption will ensue - - * that is userspace's prerogative! - */ - - remain = arg->size; - pos = arg->offset; - pg = offset_in_page(pos); - - do { - unsigned int len, unwritten; - struct folio *folio; - void *data, *vaddr; - int err; - char __maybe_unused c; - - len = PAGE_SIZE - pg; - if (len > remain) - len = remain; + if (size > MAX_RW_COUNT) + return -EFBIG; - /* Prefault the user page to reduce potential recursion */ - err = __get_user(c, user_data); - if (err) - return err; + if (!file->f_op->write_iter) + return -EINVAL; - err = __get_user(c, user_data + len - 1); - if (err) - return err; + init_sync_kiocb(&kiocb, file); + kiocb.ki_pos = arg->offset; + iov_iter_ubuf(&iter, ITER_SOURCE, (void __user *)user_data, size); - err = aops->write_begin(obj->base.filp, mapping, pos, len, - &folio, &data); - if (err < 0) - return err; + written = file->f_op->write_iter(&kiocb, &iter); + BUG_ON(written == -EIOCBQUEUED); - vaddr = kmap_local_folio(folio, offset_in_folio(folio, pos)); - pagefault_disable(); - unwritten = __copy_from_user_inatomic(vaddr, user_data, len); - pagefault_enable(); - kunmap_local(vaddr); + if (written != size) + return -EIO; - err = aops->write_end(obj->base.filp, mapping, pos, len, - len - unwritten, folio, data); - if (err < 0) - return err; - - /* We don't handle -EFAULT, leave it to the caller to check */ - if (unwritten) - return -ENODEV; - - remain -= len; - user_data += len; - pos += len; - pg = 0; - } while (remain); + if (written < 0) + return written; return 0; } @@ -637,9 +598,8 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915, { struct drm_i915_gem_object *obj; struct file *file; - const struct address_space_operations *aops; - loff_t pos; - int err; + loff_t pos = 0; + ssize_t err; GEM_WARN_ON(IS_DGFX(i915)); obj = i915_gem_object_create_shmem(i915, round_up(size, PAGE_SIZE)); @@ -649,29 +609,15 @@ i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915, GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU); file = obj->base.filp; - aops = file->f_mapping->a_ops; - pos = 0; - do { - unsigned int len = min_t(typeof(size), size, PAGE_SIZE); - struct folio *folio; - void *fsdata; + err = kernel_write(file, data, size, &pos); - err = aops->write_begin(file, file->f_mapping, pos, len, - &folio, &fsdata); - if (err < 0) - goto fail; + if (err < 0) + goto fail; - memcpy_to_folio(folio, offset_in_folio(folio, pos), data, len); - - err = aops->write_end(file, file->f_mapping, pos, len, len, - folio, fsdata); - if (err < 0) - goto fail; - - size -= len; - data += len; - pos += len; - } while (size); + if (err != size) { + err = -EIO; + goto fail; + } return obj; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c index 7127e90c1a8f..991666fd9f85 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c @@ -106,11 +106,6 @@ static void fence_set_priority(struct dma_fence *fence, rcu_read_unlock(); } -static inline bool __dma_fence_is_chain(const struct dma_fence *fence) -{ - return fence->ops == &dma_fence_chain_ops; -} - void i915_gem_fence_wait_priority(struct dma_fence *fence, const struct i915_sched_attr *attr) { @@ -126,7 +121,7 @@ void i915_gem_fence_wait_priority(struct dma_fence *fence, for (i = 0; i < array->num_fences; i++) fence_set_priority(array->fences[i], attr); - } else if (__dma_fence_is_chain(fence)) { + } else if (dma_fence_is_chain(fence)) { struct dma_fence *iter; /* The chain is ordered; if we boost the last, we boost all */ diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c index 65d84a93c525..a09e2eb47175 100644 --- a/drivers/gpu/drm/i915/gem/i915_gemfs.c +++ b/drivers/gpu/drm/i915/gem/i915_gemfs.c @@ -5,16 +5,23 @@ #include <linux/fs.h> #include <linux/mount.h> +#include <linux/fs_context.h> #include "i915_drv.h" #include "i915_gemfs.h" #include "i915_utils.h" +static int add_param(struct fs_context *fc, const char *key, const char *val) +{ + return vfs_parse_fs_string(fc, key, val, strlen(val)); +} + void i915_gemfs_init(struct drm_i915_private *i915) { - char huge_opt[] = "huge=within_size"; /* r/w */ struct file_system_type *type; + struct fs_context *fc; struct vfsmount *gemfs; + int ret; /* * By creating our own shmemfs mountpoint, we can pass in @@ -38,8 +45,16 @@ void i915_gemfs_init(struct drm_i915_private *i915) if (!type) goto err; - gemfs = vfs_kern_mount(type, SB_KERNMOUNT, type->name, huge_opt); - if (IS_ERR(gemfs)) + fc = fs_context_for_mount(type, SB_KERNMOUNT); + if (IS_ERR(fc)) + goto err; + ret = add_param(fc, "source", "tmpfs"); + if (!ret) + ret = add_param(fc, "huge", "within_size"); + if (!ret) + gemfs = fc_mount_longterm(fc); + put_fs_context(fc); + if (ret) goto err; i915->mm.gemfs = gemfs; diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 325da0414d94..f6a98cf1e5a5 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -79,6 +79,29 @@ struct lock_class_key; #define ENGINE_WRITE(...) __ENGINE_WRITE_OP(write, __VA_ARGS__) #define ENGINE_WRITE_FW(...) __ENGINE_WRITE_OP(write_fw, __VA_ARGS__) +#define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id)) +#define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id) + +#define __ENGINE_INSTANCES_MASK(mask, first, count) ({ \ + unsigned int first__ = (first); \ + unsigned int count__ = (count); \ + ((mask) & GENMASK(first__ + count__ - 1, first__)) >> first__; \ +}) + +#define ENGINE_INSTANCES_MASK(gt, first, count) \ + __ENGINE_INSTANCES_MASK((gt)->info.engine_mask, first, count) + +#define RCS_MASK(gt) \ + ENGINE_INSTANCES_MASK(gt, RCS0, I915_MAX_RCS) +#define BCS_MASK(gt) \ + ENGINE_INSTANCES_MASK(gt, BCS0, I915_MAX_BCS) +#define VDBOX_MASK(gt) \ + ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS) +#define VEBOX_MASK(gt) \ + ENGINE_INSTANCES_MASK(gt, VECS0, I915_MAX_VECS) +#define CCS_MASK(gt) \ + ENGINE_INSTANCES_MASK(gt, CCS0, I915_MAX_CCS) + #define GEN6_RING_FAULT_REG_READ(engine__) \ intel_uncore_read((engine__)->uncore, RING_FAULT_REG(engine__)) @@ -355,4 +378,12 @@ u64 intel_clamp_preempt_timeout_ms(struct intel_engine_cs *engine, u64 value); u64 intel_clamp_stop_timeout_ms(struct intel_engine_cs *engine, u64 value); u64 intel_clamp_timeslice_duration_ms(struct intel_engine_cs *engine, u64 value); +#define rb_to_uabi_engine(rb) \ + rb_entry_safe(rb, struct intel_engine_cs, uabi_node) + +#define for_each_uabi_engine(engine__, i915__) \ + for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\ + (engine__); \ + (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node))) + #endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.c b/drivers/gpu/drm/i915/gt/intel_gsc.c index 1e925c75fb08..c43febc862dc 100644 --- a/drivers/gpu/drm/i915/gt/intel_gsc.c +++ b/drivers/gpu/drm/i915/gt/intel_gsc.c @@ -284,7 +284,7 @@ static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id) if (gt->gsc.intf[intf_id].irq < 0) return; - ret = generic_handle_irq(gt->gsc.intf[intf_id].irq); + ret = generic_handle_irq_safe(gt->gsc.intf[intf_id].irq); if (ret) gt_err_ratelimited(gt, "error handling GSC irq: %d\n", ret); } diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index a876a34455f1..2a6d79abf25b 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -610,7 +610,6 @@ static int ring_context_alloc(struct intel_context *ce) /* One ringbuffer to rule them all */ GEM_BUG_ON(!engine->legacy.ring); ce->ring = engine->legacy.ring; - ce->timeline = intel_timeline_get(engine->legacy.timeline); GEM_BUG_ON(ce->state); if (engine->context_size) { @@ -623,6 +622,8 @@ static int ring_context_alloc(struct intel_context *ce) ce->state = vma; } + ce->timeline = intel_timeline_get(engine->legacy.timeline); + return 0; } diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index b37e400f74e5..5a95f06900b5 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -634,6 +634,8 @@ static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine, static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) { + struct drm_i915_private *i915 = engine->i915; + /* Wa_1406697149 (WaDisableBankHangMode:icl) */ wa_write(wal, GEN8_L3CNTLREG, GEN8_ERRDETBCTRL); @@ -669,6 +671,15 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, /* Wa_1406306137:icl,ehl */ wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU); + + if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) { + /* + * Disable Repacking for Compression (masked R/W access) + * before rendering compressed surfaces for display. + */ + wa_masked_en(wal, CACHE_MODE_0_GEN7, + DISABLE_REPACKING_FOR_COMPRESSION); + } } /* @@ -2306,15 +2317,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN8_RC_SEMA_IDLE_MSG_DISABLE); } - if (IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) { - /* - * "Disable Repacking for Compression (masked R/W access) - * before rendering compressed surfaces for display." - */ - wa_masked_en(wal, CACHE_MODE_0_GEN7, - DISABLE_REPACKING_FOR_COMPRESSION); - } - if (GRAPHICS_VER(i915) == 11) { /* This is not an Wa. Enable for better image quality */ wa_masked_en(wal, diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 9df80c325fc1..f360f020d8f1 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -313,8 +313,13 @@ static u32 guc_ctl_wa_flags(struct intel_guc *guc) * * The same WA bit is used for both and 22011391025 is applicable to * all DG2. + * + * Platforms post DG2 prevent this issue in hardware by stalling + * submissions. With this flag GuC will schedule as to avoid such + * stalls. */ - if (IS_DG2(gt->i915)) + if (IS_DG2(gt->i915) || + (CCS_MASK(gt) && GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 70))) flags |= GUC_WA_DUAL_QUEUE; /* Wa_22011802037: graphics version 11/12 */ diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c index e8a04e476c57..09a64f224c49 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c @@ -220,8 +220,7 @@ static int guc_action_control_log(struct intel_guc *guc, bool enable, */ static int subbuf_start_callback(struct rchan_buf *buf, void *subbuf, - void *prev_subbuf, - size_t prev_padding) + void *prev_subbuf) { /* * Use no-overwrite mode by default, where relay will stop accepting diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5e4c49f0d5d4..4e4e89746aa6 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -66,8 +66,6 @@ struct intel_display; struct intel_pxp; struct vlv_s0ix_state; -#define GEM_QUIRK_PIN_SWIZZLED_PAGES BIT(0) - /* Data Stolen Memory (DSM) aka "i915 stolen memory" */ struct i915_dsm { /* @@ -354,14 +352,6 @@ static inline struct intel_gt *to_gt(const struct drm_i915_private *i915) return i915->gt[0]; } -#define rb_to_uabi_engine(rb) \ - rb_entry_safe(rb, struct intel_engine_cs, uabi_node) - -#define for_each_uabi_engine(engine__, i915__) \ - for ((engine__) = rb_to_uabi_engine(rb_first(&(i915__)->uabi_engines));\ - (engine__); \ - (engine__) = rb_to_uabi_engine(rb_next(&(engine__)->uabi_node))) - #define INTEL_INFO(i915) ((i915)->__info) #define RUNTIME_INFO(i915) (&(i915)->__runtime) #define DRIVER_CAPS(i915) (&(i915)->caps) @@ -570,29 +560,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define IS_GEN9_LP(i915) (IS_BROXTON(i915) || IS_GEMINILAKE(i915)) #define IS_GEN9_BC(i915) (GRAPHICS_VER(i915) == 9 && !IS_GEN9_LP(i915)) -#define __HAS_ENGINE(engine_mask, id) ((engine_mask) & BIT(id)) -#define HAS_ENGINE(gt, id) __HAS_ENGINE((gt)->info.engine_mask, id) - -#define __ENGINE_INSTANCES_MASK(mask, first, count) ({ \ - unsigned int first__ = (first); \ - unsigned int count__ = (count); \ - ((mask) & GENMASK(first__ + count__ - 1, first__)) >> first__; \ -}) - -#define ENGINE_INSTANCES_MASK(gt, first, count) \ - __ENGINE_INSTANCES_MASK((gt)->info.engine_mask, first, count) - -#define RCS_MASK(gt) \ - ENGINE_INSTANCES_MASK(gt, RCS0, I915_MAX_RCS) -#define BCS_MASK(gt) \ - ENGINE_INSTANCES_MASK(gt, BCS0, I915_MAX_BCS) -#define VDBOX_MASK(gt) \ - ENGINE_INSTANCES_MASK(gt, VCS0, I915_MAX_VCS) -#define VEBOX_MASK(gt) \ - ENGINE_INSTANCES_MASK(gt, VECS0, I915_MAX_VECS) -#define CCS_MASK(gt) \ - ENGINE_INSTANCES_MASK(gt, CCS0, I915_MAX_CCS) - #define HAS_MEDIA_RATIO_MODE(i915) (INTEL_INFO(i915)->has_media_ratio_mode) /* diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h index 82e9d289398c..20b3cb29cfff 100644 --- a/drivers/gpu/drm/i915/i915_gem.h +++ b/drivers/gpu/drm/i915/i915_gem.h @@ -134,4 +134,6 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file); #define I915_GEM_IDLE_TIMEOUT (HZ / 5) +#define GEM_QUIRK_PIN_SWIZZLED_PAGES BIT(0) + #endif /* __I915_GEM_H__ */ diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index f434b6825fc2..0e4b832dff84 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -2506,8 +2506,8 @@ static const struct bin_attribute error_state_attr = { .attr.name = "error", .attr.mode = S_IRUSR | S_IWUSR, .size = 0, - .read_new = error_state_read, - .write_new = error_state_write, + .read = error_state_read, + .write = error_state_write, }; void i915_gpu_error_sysfs_setup(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 21006c7f615c..b2e311f4791a 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -663,7 +663,6 @@ static const struct intel_device_info dg1_info = { DGFX_FEATURES, .__runtime.graphics.ip.rel = 10, PLATFORM(INTEL_DG1), - .require_force_probe = 1, .platform_engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index e5a188ce3185..5bc696bfbb0f 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -108,11 +108,11 @@ static unsigned int config_bit(const u64 config) return other_bit(config); } -static u32 config_mask(const u64 config) +static __always_inline u32 config_mask(const u64 config) { unsigned int bit = config_bit(config); - if (__builtin_constant_p(config)) + if (__builtin_constant_p(bit)) BUILD_BUG_ON(bit > BITS_PER_TYPE(typeof_member(struct i915_pmu, enable)) - 1); @@ -121,7 +121,7 @@ static u32 config_mask(const u64 config) BITS_PER_TYPE(typeof_member(struct i915_pmu, enable)) - 1); - return BIT(config_bit(config)); + return BIT(bit); } static bool is_engine_event(struct perf_event *event) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 52a902532e6f..03b895897f60 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -385,7 +385,6 @@ #define VLV_PCBR _MMIO(VLV_DISPLAY_BASE + 0x2120) #define VLV_PCBR_ADDR_SHIFT 12 -#define DISPLAY_PLANE_FLIP_PENDING(plane) (1 << (11 - (plane))) /* A and B only */ #define EIR _MMIO(0x20b0) #define EMR _MMIO(0x20b4) #define ESR _MMIO(0x20b8) @@ -763,8 +762,7 @@ */ #define GEN9_CLKGATE_DIS_0 _MMIO(0x46530) #define DARBF_GATING_DIS REG_BIT(27) -#define MTL_PIPEDMC_GATING_DIS_A REG_BIT(15) -#define MTL_PIPEDMC_GATING_DIS_B REG_BIT(14) +#define MTL_PIPEDMC_GATING_DIS(pipe) REG_BIT(15 - (pipe)) #define PWM2_GATING_DIS REG_BIT(14) #define PWM1_GATING_DIS REG_BIT(13) @@ -1205,16 +1203,6 @@ */ #define GEN7_SO_WRITE_OFFSET(n) _MMIO(0x5280 + (n) * 4) -/* SKL Fuse Status */ -enum skl_power_gate { - SKL_PG0, - SKL_PG1, - SKL_PG2, - ICL_PG3, - ICL_PG4, -}; - - #define GEN9_TIMESTAMP_OVERRIDE _MMIO(0x44074) #define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT 0 #define GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK 0x3ff diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index f936e8f1f129..622c66666935 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -140,8 +140,8 @@ i915_l3_write(struct file *filp, struct kobject *kobj, static const struct bin_attribute dpf_attrs = { .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)}, .size = GEN7_L3LOG_SIZE, - .read_new = i915_l3_read, - .write_new = i915_l3_write, + .read = i915_l3_read, + .write = i915_l3_write, .mmap = NULL, .private = (void *)0 }; @@ -149,8 +149,8 @@ static const struct bin_attribute dpf_attrs = { static const struct bin_attribute dpf_attrs_1 = { .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)}, .size = GEN7_L3LOG_SIZE, - .read_new = i915_l3_read, - .write_new = i915_l3_write, + .read = i915_l3_read, + .write = i915_l3_write, .mmap = NULL, .private = (void *)1 }; diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 632e316f8b05..25e97031d76e 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -1607,6 +1607,26 @@ err_rpm: return err; } +int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) +{ + struct i915_gem_ww_ctx ww; + int err; + + i915_gem_ww_ctx_init(&ww, true); +retry: + err = i915_gem_object_lock(vma->obj, &ww); + if (!err) + err = i915_vma_pin_ww(vma, &ww, size, alignment, flags); + if (err == -EDEADLK) { + err = i915_gem_ww_ctx_backoff(&ww); + if (!err) + goto retry; + } + i915_gem_ww_ctx_fini(&ww); + + return err; +} + static void flush_idle_contexts(struct intel_gt *gt) { struct intel_engine_cs *engine; diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 6a6be8048aa8..0f9eee6d18d2 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -289,26 +289,8 @@ int __must_check i915_vma_pin_ww(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, u64 size, u64 alignment, u64 flags); -static inline int __must_check -i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) -{ - struct i915_gem_ww_ctx ww; - int err; - - i915_gem_ww_ctx_init(&ww, true); -retry: - err = i915_gem_object_lock(vma->obj, &ww); - if (!err) - err = i915_vma_pin_ww(vma, &ww, size, alignment, flags); - if (err == -EDEADLK) { - err = i915_gem_ww_ctx_backoff(&ww); - if (!err) - goto retry; - } - i915_gem_ww_ctx_fini(&ww); - - return err; -} +int __must_check +i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags); int i915_ggtt_pin(struct i915_vma *vma, struct i915_gem_ww_ctx *ww, u32 align, unsigned int flags); @@ -353,6 +335,11 @@ static inline bool i915_node_color_differs(const struct drm_mm_node *node, return drm_mm_node_allocated(node) && node->color != color; } +static inline void __iomem *i915_vma_get_iomap(struct i915_vma *vma) +{ + return READ_ONCE(vma->iomap); +} + /** * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture * @vma: VMA to iomap diff --git a/drivers/gpu/drm/i915/intel_pcode.c b/drivers/gpu/drm/i915/intel_pcode.c index 3db2ba439bb5..81da75108c60 100644 --- a/drivers/gpu/drm/i915/intel_pcode.c +++ b/drivers/gpu/drm/i915/intel_pcode.c @@ -110,13 +110,12 @@ int snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1) } int snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val, - int fast_timeout_us, int slow_timeout_ms) + int timeout_ms) { int err; mutex_lock(&uncore->i915->sb_lock); - err = __snb_pcode_rw(uncore, mbox, &val, NULL, - fast_timeout_us, slow_timeout_ms, false); + err = __snb_pcode_rw(uncore, mbox, &val, NULL, 250, timeout_ms, false); mutex_unlock(&uncore->i915->sb_lock); if (err) { @@ -273,3 +272,27 @@ int snb_pcode_write_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u3 return err; } + +/* Helpers with drm device */ +int intel_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1) +{ + struct drm_i915_private *i915 = to_i915(drm); + + return snb_pcode_read(&i915->uncore, mbox, val, val1); +} + +int intel_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms) +{ + struct drm_i915_private *i915 = to_i915(drm); + + return snb_pcode_write_timeout(&i915->uncore, mbox, val, timeout_ms); +} + +int intel_pcode_request(struct drm_device *drm, u32 mbox, u32 request, + u32 reply_mask, u32 reply, int timeout_base_ms) +{ + struct drm_i915_private *i915 = to_i915(drm); + + return skl_pcode_request(&i915->uncore, mbox, request, reply_mask, reply, + timeout_base_ms); +} diff --git a/drivers/gpu/drm/i915/intel_pcode.h b/drivers/gpu/drm/i915/intel_pcode.h index 8d2198e29422..c91a821a88d4 100644 --- a/drivers/gpu/drm/i915/intel_pcode.h +++ b/drivers/gpu/drm/i915/intel_pcode.h @@ -8,13 +8,13 @@ #include <linux/types.h> +struct drm_device; struct intel_uncore; int snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1); -int snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val, - int fast_timeout_us, int slow_timeout_ms); +int snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val, int timeout_ms); #define snb_pcode_write(uncore, mbox, val) \ - snb_pcode_write_timeout(uncore, mbox, val, 500, 0) + snb_pcode_write_timeout((uncore), (mbox), (val), 1) int skl_pcode_request(struct intel_uncore *uncore, u32 mbox, u32 request, u32 reply_mask, u32 reply, int timeout_base_ms); @@ -27,4 +27,13 @@ int intel_pcode_init(struct intel_uncore *uncore); int snb_pcode_read_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 *val); int snb_pcode_write_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 val); +/* Helpers with drm device */ +int intel_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1); +int intel_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms); +#define intel_pcode_write(drm, mbox, val) \ + intel_pcode_write_timeout((drm), (mbox), (val), 1) + +int intel_pcode_request(struct drm_device *drm, u32 mbox, u32 request, + u32 reply_mask, u32 reply, int timeout_base_ms); + #endif /* _INTEL_PCODE_H */ diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 8d9f4c410546..7ce3e6de0c19 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -59,7 +59,9 @@ static struct drm_i915_private *rpm_to_i915(struct intel_runtime_pm *rpm) static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) { - ref_tracker_dir_init(&rpm->debug, INTEL_REFTRACK_DEAD_COUNT, dev_name(rpm->kdev)); + if (!rpm->debug.class) + ref_tracker_dir_init(&rpm->debug, INTEL_REFTRACK_DEAD_COUNT, + "intel_runtime_pm"); } static intel_wakeref_t diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c index 51561b190b93..7fa194de5d35 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.c +++ b/drivers/gpu/drm/i915/intel_wakeref.c @@ -114,7 +114,8 @@ void __intel_wakeref_init(struct intel_wakeref *wf, "wakeref.work", &key->work, 0); #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_WAKEREF) - ref_tracker_dir_init(&wf->debug, INTEL_REFTRACK_DEAD_COUNT, name); + if (!wf->debug.class) + ref_tracker_dir_init(&wf->debug, INTEL_REFTRACK_DEAD_COUNT, "intel_wakeref"); #endif } diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 88870844b5bd..2fb7a9e7efec 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -73,8 +73,8 @@ static int igt_add_request(void *arg) /* Basic preliminary test to create a request and let it loose! */ request = mock_request(rcs0(i915)->kernel_context, HZ / 10); - if (!request) - return -ENOMEM; + if (IS_ERR(request)) + return PTR_ERR(request); i915_request_add(request); @@ -91,8 +91,8 @@ static int igt_wait_request(void *arg) /* Submit a request, then wait upon it */ request = mock_request(rcs0(i915)->kernel_context, T); - if (!request) - return -ENOMEM; + if (IS_ERR(request)) + return PTR_ERR(request); i915_request_get(request); @@ -160,8 +160,8 @@ static int igt_fence_wait(void *arg) /* Submit a request, treat it as a fence and wait upon it */ request = mock_request(rcs0(i915)->kernel_context, T); - if (!request) - return -ENOMEM; + if (IS_ERR(request)) + return PTR_ERR(request); if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) { pr_err("fence wait success before submit (expected timeout)!\n"); @@ -219,8 +219,8 @@ static int igt_request_rewind(void *arg) GEM_BUG_ON(IS_ERR(ce)); request = mock_request(ce, 2 * HZ); intel_context_put(ce); - if (!request) { - err = -ENOMEM; + if (IS_ERR(request)) { + err = PTR_ERR(request); goto err_context_0; } @@ -237,8 +237,8 @@ static int igt_request_rewind(void *arg) GEM_BUG_ON(IS_ERR(ce)); vip = mock_request(ce, 0); intel_context_put(ce); - if (!vip) { - err = -ENOMEM; + if (IS_ERR(vip)) { + err = PTR_ERR(vip); goto err_context_1; } diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index f08f6674911e..7b856b5090f9 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -413,15 +413,8 @@ static int igt_mock_splintered_region(void *arg) close_objects(mem, &objects); - /* - * While we should be able allocate everything without any flag - * restrictions, if we consider I915_BO_ALLOC_CONTIGUOUS then we are - * actually limited to the largest power-of-two for the region size i.e - * max_order, due to the inner workings of the buddy allocator. So make - * sure that does indeed hold true. - */ - - obj = igt_object_create(mem, &objects, size, I915_BO_ALLOC_CONTIGUOUS); + obj = igt_object_create(mem, &objects, roundup_pow_of_two(size), + I915_BO_ALLOC_CONTIGUOUS); if (!IS_ERR(obj)) { pr_err("%s too large contiguous allocation was not rejected\n", __func__); @@ -429,8 +422,7 @@ static int igt_mock_splintered_region(void *arg) goto out_close; } - obj = igt_object_create(mem, &objects, rounddown_pow_of_two(size), - I915_BO_ALLOC_CONTIGUOUS); + obj = igt_object_create(mem, &objects, size, I915_BO_ALLOC_CONTIGUOUS); if (IS_ERR(obj)) { pr_err("%s largest possible contiguous allocation failed\n", __func__); diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c index 09f747228dff..1b0cf073e964 100644 --- a/drivers/gpu/drm/i915/selftests/mock_request.c +++ b/drivers/gpu/drm/i915/selftests/mock_request.c @@ -35,7 +35,7 @@ mock_request(struct intel_context *ce, unsigned long delay) /* NB the i915->requests slab cache is enlarged to fit mock_request */ request = intel_context_create_request(ce); if (IS_ERR(request)) - return NULL; + return request; request->mock.delay = delay; return request; diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c index 59032c939d0f..deb159548a09 100644 --- a/drivers/gpu/drm/i915/soc/intel_dram.c +++ b/drivers/gpu/drm/i915/soc/intel_dram.c @@ -14,6 +14,7 @@ #include "intel_dram.h" #include "intel_mchbar_regs.h" #include "intel_pcode.h" +#include "intel_uncore.h" #include "vlv_iosf_sb.h" struct dram_dimm_info { @@ -590,8 +591,8 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv, u32 val = 0; int ret; - ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO | - ICL_PCODE_MEM_SS_READ_GLOBAL_INFO, &val, NULL); + ret = intel_pcode_read(&dev_priv->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO | + ICL_PCODE_MEM_SS_READ_GLOBAL_INFO, &val, NULL); if (ret) return ret; diff --git a/drivers/gpu/drm/imagination/pvr_power.c b/drivers/gpu/drm/imagination/pvr_power.c index 41f5d89e78b8..187a07e0bd9a 100644 --- a/drivers/gpu/drm/imagination/pvr_power.c +++ b/drivers/gpu/drm/imagination/pvr_power.c @@ -340,6 +340,63 @@ pvr_power_device_idle(struct device *dev) return pvr_power_is_idle(pvr_dev) ? 0 : -EBUSY; } +static int +pvr_power_clear_error(struct pvr_device *pvr_dev) +{ + struct device *dev = from_pvr_device(pvr_dev)->dev; + int err; + + /* Ensure the device state is known and nothing is happening past this point */ + pm_runtime_disable(dev); + + /* Attempt to clear the runtime PM error by setting the current state again */ + if (pm_runtime_status_suspended(dev)) + err = pm_runtime_set_suspended(dev); + else + err = pm_runtime_set_active(dev); + + if (err) { + drm_err(from_pvr_device(pvr_dev), + "%s: Failed to clear runtime PM error (new error %d)\n", + __func__, err); + } + + pm_runtime_enable(dev); + + return err; +} + +/** + * pvr_power_get_clear() - Acquire a power reference, correcting any errors + * @pvr_dev: Device pointer + * + * Attempt to acquire a power reference on the device. If the runtime PM + * is in error state, attempt to clear the error and retry. + * + * Returns: + * * 0 on success, or + * * Any error code returned by pvr_power_get() or the runtime PM API. + */ +static int +pvr_power_get_clear(struct pvr_device *pvr_dev) +{ + int err; + + err = pvr_power_get(pvr_dev); + if (err == 0) + return err; + + drm_warn(from_pvr_device(pvr_dev), + "%s: pvr_power_get returned error %d, attempting recovery\n", + __func__, err); + + err = pvr_power_clear_error(pvr_dev); + if (err) + return err; + + return pvr_power_get(pvr_dev); +} + /** * pvr_power_reset() - Reset the GPU * @pvr_dev: Device pointer @@ -364,7 +421,7 @@ pvr_power_reset(struct pvr_device *pvr_dev, bool hard_reset) * Take a power reference during the reset. This should prevent any interference with the * power state during reset. */ - WARN_ON(pvr_power_get(pvr_dev)); + WARN_ON(pvr_power_get_clear(pvr_dev)); down_write(&pvr_dev->reset_sem); @@ -386,13 +443,13 @@ pvr_power_reset(struct pvr_device *pvr_dev, bool hard_reset) if (!err) { if (hard_reset) { pvr_dev->fw_dev.booted = false; - WARN_ON(pm_runtime_force_suspend(from_pvr_device(pvr_dev)->dev)); + WARN_ON(pvr_power_device_suspend(from_pvr_device(pvr_dev)->dev)); err = pvr_fw_hard_reset(pvr_dev); if (err) goto err_device_lost; - err = pm_runtime_force_resume(from_pvr_device(pvr_dev)->dev); + err = pvr_power_device_resume(from_pvr_device(pvr_dev)->dev); pvr_dev->fw_dev.booted = true; if (err) goto err_device_lost; diff --git a/drivers/gpu/drm/imagination/pvr_queue.c b/drivers/gpu/drm/imagination/pvr_queue.c index 5a41ee79fed6..fc415dd0d7a7 100644 --- a/drivers/gpu/drm/imagination/pvr_queue.c +++ b/drivers/gpu/drm/imagination/pvr_queue.c @@ -803,7 +803,7 @@ static void pvr_queue_start(struct pvr_queue *queue) * the scheduler, and re-assign parent fences in the middle. * * Return: - * * DRM_GPU_SCHED_STAT_NOMINAL. + * * DRM_GPU_SCHED_STAT_RESET. */ static enum drm_gpu_sched_stat pvr_queue_timedout_job(struct drm_sched_job *s_job) @@ -854,7 +854,7 @@ pvr_queue_timedout_job(struct drm_sched_job *s_job) drm_sched_start(sched, 0); - return DRM_GPU_SCHED_STAT_NOMINAL; + return DRM_GPU_SCHED_STAT_RESET; } /** diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c index f851e9ffdb28..9db1ceaed518 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c @@ -901,14 +901,15 @@ static void ingenic_drm_disable_vblank(struct drm_crtc *crtc) static struct drm_framebuffer * ingenic_drm_gem_fb_create(struct drm_device *drm, struct drm_file *file, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { struct ingenic_drm *priv = drm_device_get_priv(drm); if (priv->soc_info->map_noncoherent) - return drm_gem_fb_create_with_dirty(drm, file, mode_cmd); + return drm_gem_fb_create_with_dirty(drm, file, info, mode_cmd); - return drm_gem_fb_create(drm, file, mode_cmd); + return drm_gem_fb_create(drm, file, info, mode_cmd); } static struct drm_gem_object * diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c index 11ace5cebf4c..65210ab081bb 100644 --- a/drivers/gpu/drm/lima/lima_drv.c +++ b/drivers/gpu/drm/lima/lima_drv.c @@ -362,8 +362,8 @@ static const struct bin_attribute lima_error_state_attr = { .attr.name = "error", .attr.mode = 0600, .size = 0, - .read_new = lima_error_state_read, - .write_new = lima_error_state_write, + .read = lima_error_state_read, + .write = lima_error_state_write, }; static int lima_pdev_probe(struct platform_device *pdev) diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c index 954f4325b859..739e8c6c6d90 100644 --- a/drivers/gpu/drm/lima/lima_sched.c +++ b/drivers/gpu/drm/lima/lima_sched.c @@ -412,7 +412,7 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job */ if (dma_fence_is_signaled(task->fence)) { DRM_WARN("%s spurious timeout\n", lima_ip_name(ip)); - return DRM_GPU_SCHED_STAT_NOMINAL; + return DRM_GPU_SCHED_STAT_RESET; } /* @@ -429,7 +429,7 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job if (dma_fence_is_signaled(task->fence)) { DRM_WARN("%s unexpectedly high interrupt latency\n", lima_ip_name(ip)); - return DRM_GPU_SCHED_STAT_NOMINAL; + return DRM_GPU_SCHED_STAT_RESET; } /* @@ -467,7 +467,7 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job drm_sched_resubmit_jobs(&pipe->base); drm_sched_start(&pipe->base, 0); - return DRM_GPU_SCHED_STAT_NOMINAL; + return DRM_GPU_SCHED_STAT_RESET; } static void lima_sched_free_job(struct drm_sched_job *job) diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c index 8f6fba4217ec..bc7527542fdc 100644 --- a/drivers/gpu/drm/mediatek/mtk_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_crtc.c @@ -719,6 +719,39 @@ int mtk_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, return 0; } +void mtk_crtc_plane_disable(struct drm_crtc *crtc, struct drm_plane *plane) +{ +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); + struct mtk_plane_state *plane_state = to_mtk_plane_state(plane->state); + int i; + + /* no need to wait for disabling the plane by CPU */ + if (!mtk_crtc->cmdq_client.chan) + return; + + if (!mtk_crtc->enabled) + return; + + /* set pending plane state to disabled */ + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *mtk_plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(mtk_plane->state); + + if (mtk_plane->index == plane->index) { + memcpy(mtk_plane_state, plane_state, sizeof(*plane_state)); + break; + } + } + mtk_crtc_update_config(mtk_crtc, false); + + /* wait for planes to be disabled by CMDQ */ + wait_event_timeout(mtk_crtc->cb_blocking_queue, + mtk_crtc->cmdq_vblank_cnt == 0, + msecs_to_jiffies(500)); +#endif +} + void mtk_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane, struct drm_atomic_state *state) { @@ -930,7 +963,8 @@ static int mtk_crtc_init_comp_planes(struct drm_device *drm_dev, mtk_ddp_comp_supported_rotations(comp), mtk_ddp_comp_get_blend_modes(comp), mtk_ddp_comp_get_formats(comp), - mtk_ddp_comp_get_num_formats(comp), i); + mtk_ddp_comp_get_num_formats(comp), + mtk_ddp_comp_is_afbc_supported(comp), i); if (ret) return ret; diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.h b/drivers/gpu/drm/mediatek/mtk_crtc.h index 388e900b6f4d..828f109b83e7 100644 --- a/drivers/gpu/drm/mediatek/mtk_crtc.h +++ b/drivers/gpu/drm/mediatek/mtk_crtc.h @@ -21,6 +21,7 @@ int mtk_crtc_create(struct drm_device *drm_dev, const unsigned int *path, unsigned int num_conn_routes); int mtk_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, struct mtk_plane_state *state); +void mtk_crtc_plane_disable(struct drm_crtc *crtc, struct drm_plane *plane); void mtk_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane, struct drm_atomic_state *plane_state); struct device *mtk_crtc_dma_dev_get(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c index edc6417639e6..ac6620e10262 100644 --- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c @@ -366,6 +366,7 @@ static const struct mtk_ddp_comp_funcs ddp_ovl = { .get_blend_modes = mtk_ovl_get_blend_modes, .get_formats = mtk_ovl_get_formats, .get_num_formats = mtk_ovl_get_num_formats, + .is_afbc_supported = mtk_ovl_is_afbc_supported, }; static const struct mtk_ddp_comp_funcs ddp_postmask = { diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h index 39720b27f4e9..7289b3dcf22f 100644 --- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h @@ -83,6 +83,7 @@ struct mtk_ddp_comp_funcs { u32 (*get_blend_modes)(struct device *dev); const u32 *(*get_formats)(struct device *dev); size_t (*get_num_formats)(struct device *dev); + bool (*is_afbc_supported)(struct device *dev); void (*connect)(struct device *dev, struct device *mmsys_dev, unsigned int next); void (*disconnect)(struct device *dev, struct device *mmsys_dev, unsigned int next); void (*add)(struct device *dev, struct mtk_mutex *mutex); @@ -294,6 +295,14 @@ size_t mtk_ddp_comp_get_num_formats(struct mtk_ddp_comp *comp) return 0; } +static inline bool mtk_ddp_comp_is_afbc_supported(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->is_afbc_supported) + return comp->funcs->is_afbc_supported(comp->dev); + + return false; +} + static inline bool mtk_ddp_comp_add(struct mtk_ddp_comp *comp, struct mtk_mutex *mutex) { if (comp->funcs && comp->funcs->add) { diff --git a/drivers/gpu/drm/mediatek/mtk_disp_drv.h b/drivers/gpu/drm/mediatek/mtk_disp_drv.h index 04217a36939c..679d413bf10b 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_disp_drv.h @@ -106,6 +106,7 @@ void mtk_ovl_disable_vblank(struct device *dev); u32 mtk_ovl_get_blend_modes(struct device *dev); const u32 *mtk_ovl_get_formats(struct device *dev); size_t mtk_ovl_get_num_formats(struct device *dev); +bool mtk_ovl_is_afbc_supported(struct device *dev); void mtk_ovl_adaptor_add_comp(struct device *dev, struct mtk_mutex *mutex); void mtk_ovl_adaptor_remove_comp(struct device *dev, struct mtk_mutex *mutex); diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index d0581c4e3c99..e0236353d499 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -236,6 +236,13 @@ size_t mtk_ovl_get_num_formats(struct device *dev) return ovl->data->num_formats; } +bool mtk_ovl_is_afbc_supported(struct device *dev) +{ + struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); + + return ovl->data->supports_afbc; +} + int mtk_ovl_clk_enable(struct device *dev) { struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); diff --git a/drivers/gpu/drm/mediatek/mtk_dp.c b/drivers/gpu/drm/mediatek/mtk_dp.c index a5b10b2545dc..bef6eeb30d3e 100644 --- a/drivers/gpu/drm/mediatek/mtk_dp.c +++ b/drivers/gpu/drm/mediatek/mtk_dp.c @@ -2118,7 +2118,8 @@ static void mtk_dp_update_plugged_status(struct mtk_dp *mtk_dp) mutex_unlock(&mtk_dp->update_plugged_status_lock); } -static enum drm_connector_status mtk_dp_bdg_detect(struct drm_bridge *bridge) +static enum drm_connector_status +mtk_dp_bdg_detect(struct drm_bridge *bridge, struct drm_connector *connector) { struct mtk_dp *mtk_dp = mtk_dp_from_bridge(bridge); enum drm_connector_status ret = connector_status_disconnected; diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index 8b58421bc39d..61cab32e213a 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -1095,7 +1095,6 @@ static const u32 mt8183_output_fmts[] = { }; static const u32 mt8195_dpi_output_fmts[] = { - MEDIA_BUS_FMT_BGR888_1X24, MEDIA_BUS_FMT_RGB888_1X24, MEDIA_BUS_FMT_RGB888_2X12_LE, MEDIA_BUS_FMT_RGB888_2X12_BE, @@ -1103,18 +1102,19 @@ static const u32 mt8195_dpi_output_fmts[] = { MEDIA_BUS_FMT_YUYV8_1X16, MEDIA_BUS_FMT_YUYV10_1X20, MEDIA_BUS_FMT_YUYV12_1X24, + MEDIA_BUS_FMT_BGR888_1X24, MEDIA_BUS_FMT_YUV8_1X24, MEDIA_BUS_FMT_YUV10_1X30, }; static const u32 mt8195_dp_intf_output_fmts[] = { - MEDIA_BUS_FMT_BGR888_1X24, MEDIA_BUS_FMT_RGB888_1X24, MEDIA_BUS_FMT_RGB888_2X12_LE, MEDIA_BUS_FMT_RGB888_2X12_BE, MEDIA_BUS_FMT_RGB101010_1X30, MEDIA_BUS_FMT_YUYV8_1X16, MEDIA_BUS_FMT_YUYV10_1X20, + MEDIA_BUS_FMT_BGR888_1X24, MEDIA_BUS_FMT_YUV8_1X24, MEDIA_BUS_FMT_YUV10_1X30, }; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 7c0c12dde488..d5e6bab36414 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -43,14 +43,13 @@ static const struct drm_mode_config_helper_funcs mtk_drm_mode_config_helpers = { static struct drm_framebuffer * mtk_drm_mode_fb_create(struct drm_device *dev, struct drm_file *file, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *cmd) { - const struct drm_format_info *info = drm_get_format_info(dev, cmd); - if (info->num_planes != 1) return ERR_PTR(-EINVAL); - return drm_gem_fb_create(dev, file, cmd); + return drm_gem_fb_create(dev, file, info, cmd); } static const struct drm_mode_config_funcs mtk_drm_mode_config_funcs = { diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 6943cdc77dec..845fd8aa43c3 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1174,7 +1174,8 @@ static void mtk_hdmi_hpd_event(bool hpd, struct device *dev) * Bridge callbacks */ -static enum drm_connector_status mtk_hdmi_bridge_detect(struct drm_bridge *bridge) +static enum drm_connector_status +mtk_hdmi_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) { struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c index 655106bbb76d..cbc4f37da8ba 100644 --- a/drivers/gpu/drm/mediatek/mtk_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_plane.c @@ -285,9 +285,14 @@ static void mtk_plane_atomic_disable(struct drm_plane *plane, struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(new_state); + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); + mtk_plane_state->pending.enable = false; wmb(); /* Make sure the above parameter is set before update */ mtk_plane_state->pending.dirty = true; + + mtk_crtc_plane_disable(old_state->crtc, plane); } static void mtk_plane_atomic_update(struct drm_plane *plane, @@ -321,7 +326,8 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = { int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, enum drm_plane_type type, unsigned int supported_rotations, const u32 blend_modes, - const u32 *formats, size_t num_formats, unsigned int plane_idx) + const u32 *formats, size_t num_formats, + bool supports_afbc, unsigned int plane_idx) { int err; @@ -332,7 +338,9 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, err = drm_universal_plane_init(dev, plane, possible_crtcs, &mtk_plane_funcs, formats, - num_formats, modifiers, type, NULL); + num_formats, + supports_afbc ? modifiers : NULL, + type, NULL); if (err) { DRM_ERROR("failed to initialize plane\n"); return err; diff --git a/drivers/gpu/drm/mediatek/mtk_plane.h b/drivers/gpu/drm/mediatek/mtk_plane.h index 3b13b89989c7..95c5fa5295d8 100644 --- a/drivers/gpu/drm/mediatek/mtk_plane.h +++ b/drivers/gpu/drm/mediatek/mtk_plane.h @@ -49,5 +49,6 @@ to_mtk_plane_state(struct drm_plane_state *state) int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, enum drm_plane_type type, unsigned int supported_rotations, const u32 blend_modes, - const u32 *formats, size_t num_formats, unsigned int plane_idx); + const u32 *formats, size_t num_formats, + bool supports_afbc, unsigned int plane_idx); #endif diff --git a/drivers/gpu/drm/mgag200/mgag200_ddc.c b/drivers/gpu/drm/mgag200/mgag200_ddc.c index 6d81ea8931e8..c31673eaa554 100644 --- a/drivers/gpu/drm/mgag200/mgag200_ddc.c +++ b/drivers/gpu/drm/mgag200/mgag200_ddc.c @@ -26,7 +26,6 @@ * Authors: Dave Airlie <[email protected]> */ -#include <linux/export.h> #include <linux/i2c-algo-bit.h> #include <linux/i2c.h> #include <linux/pci.h> diff --git a/drivers/gpu/drm/msm/dp/dp_audio.c b/drivers/gpu/drm/msm/dp/dp_audio.c index 959cf53be4b8..41018e82efa1 100644 --- a/drivers/gpu/drm/msm/dp/dp_audio.c +++ b/drivers/gpu/drm/msm/dp/dp_audio.c @@ -265,8 +265,8 @@ static struct msm_dp_audio_private *msm_dp_audio_get_data(struct msm_dp *msm_dp_ return container_of(msm_dp_audio, struct msm_dp_audio_private, msm_dp_audio); } -int msm_dp_audio_prepare(struct drm_connector *connector, - struct drm_bridge *bridge, +int msm_dp_audio_prepare(struct drm_bridge *bridge, + struct drm_connector *connector, struct hdmi_codec_daifmt *daifmt, struct hdmi_codec_params *params) { @@ -308,8 +308,8 @@ end: return rc; } -void msm_dp_audio_shutdown(struct drm_connector *connector, - struct drm_bridge *bridge) +void msm_dp_audio_shutdown(struct drm_bridge *bridge, + struct drm_connector *connecter) { struct msm_dp_audio_private *audio; struct msm_dp *msm_dp_display; diff --git a/drivers/gpu/drm/msm/dp/dp_audio.h b/drivers/gpu/drm/msm/dp/dp_audio.h index 842278516c99..ce2342856adb 100644 --- a/drivers/gpu/drm/msm/dp/dp_audio.h +++ b/drivers/gpu/drm/msm/dp/dp_audio.h @@ -45,12 +45,12 @@ struct msm_dp_audio *msm_dp_audio_get(struct platform_device *pdev, */ void msm_dp_audio_put(struct msm_dp_audio *msm_dp_audio); -int msm_dp_audio_prepare(struct drm_connector *connector, - struct drm_bridge *bridge, +int msm_dp_audio_prepare(struct drm_bridge *bridge, + struct drm_connector *connector, struct hdmi_codec_daifmt *daifmt, struct hdmi_codec_params *params); -void msm_dp_audio_shutdown(struct drm_connector *connector, - struct drm_bridge *bridge); +void msm_dp_audio_shutdown(struct drm_bridge *bridge, + struct drm_connector *connector); #endif /* _DP_AUDIO_H_ */ diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c index f222d7ccaa88..9a461ab2f32f 100644 --- a/drivers/gpu/drm/msm/dp/dp_drm.c +++ b/drivers/gpu/drm/msm/dp/dp_drm.c @@ -20,7 +20,8 @@ * @bridge: Pointer to drm bridge structure * Returns: Bridge's 'is connected' status */ -static enum drm_connector_status msm_dp_bridge_detect(struct drm_bridge *bridge) +static enum drm_connector_status +msm_dp_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) { struct msm_dp *dp; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h index d5e572d10d6a..02cfd46df594 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h @@ -200,12 +200,12 @@ struct hdmi_codec_daifmt; struct hdmi_codec_params; int msm_hdmi_audio_update(struct hdmi *hdmi); -int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector, - struct drm_bridge *bridge, +int msm_hdmi_bridge_audio_prepare(struct drm_bridge *bridge, + struct drm_connector *connector, struct hdmi_codec_daifmt *daifmt, struct hdmi_codec_params *params); -void msm_hdmi_bridge_audio_shutdown(struct drm_connector *connector, - struct drm_bridge *bridge); +void msm_hdmi_bridge_audio_shutdown(struct drm_bridge *bridge, + struct drm_connector *connector); /* * hdmi bridge: @@ -215,7 +215,7 @@ int msm_hdmi_bridge_init(struct hdmi *hdmi); void msm_hdmi_hpd_irq(struct drm_bridge *bridge); enum drm_connector_status msm_hdmi_bridge_detect( - struct drm_bridge *bridge); + struct drm_bridge *bridge, struct drm_connector *connector); void msm_hdmi_hpd_enable(struct drm_bridge *bridge); void msm_hdmi_hpd_disable(struct drm_bridge *bridge); diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c index b9ec14ef2c20..d9a8dc9dae8f 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_audio.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_audio.c @@ -122,8 +122,8 @@ int msm_hdmi_audio_update(struct hdmi *hdmi) return 0; } -int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector, - struct drm_bridge *bridge, +int msm_hdmi_bridge_audio_prepare(struct drm_bridge *bridge, + struct drm_connector *connector, struct hdmi_codec_daifmt *daifmt, struct hdmi_codec_params *params) { @@ -163,8 +163,8 @@ int msm_hdmi_bridge_audio_prepare(struct drm_connector *connector, return msm_hdmi_audio_update(hdmi); } -void msm_hdmi_bridge_audio_shutdown(struct drm_connector *connector, - struct drm_bridge *bridge) +void msm_hdmi_bridge_audio_shutdown(struct drm_bridge *bridge, + struct drm_connector *connector) { struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); struct hdmi *hdmi = hdmi_bridge->hdmi; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c index 53a7ce8cc7bc..46fd58646d32 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c @@ -475,7 +475,7 @@ msm_hdmi_hotplug_work(struct work_struct *work) container_of(work, struct hdmi_bridge, hpd_work); struct drm_bridge *bridge = &hdmi_bridge->base; - drm_bridge_hpd_notify(bridge, drm_bridge_detect(bridge)); + drm_bridge_hpd_notify(bridge, drm_bridge_detect(bridge, hdmi_bridge->hdmi->connector)); } /* initialize bridge */ diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c index 407e6c449ee0..114b0d507700 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi_hpd.c @@ -177,8 +177,8 @@ static enum drm_connector_status detect_gpio(struct hdmi *hdmi) connector_status_disconnected; } -enum drm_connector_status msm_hdmi_bridge_detect( - struct drm_bridge *bridge) +enum drm_connector_status +msm_hdmi_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) { struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); struct hdmi *hdmi = hdmi_bridge->hdmi; diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 9875ca62e9ad..985db9febd98 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -260,7 +260,8 @@ uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int plane); struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane); const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb); struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev, - struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd); + struct drm_file *file, const struct drm_format_info *info, + const struct drm_mode_fb_cmd2 *mode_cmd); struct drm_framebuffer * msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format); diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index bc7c2bb8f01e..1eff615ff9bf 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c @@ -30,6 +30,7 @@ struct msm_framebuffer { #define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base) static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); static int msm_framebuffer_dirtyfb(struct drm_framebuffer *fb, @@ -139,10 +140,9 @@ const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb) } struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev, - struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd) + struct drm_file *file, const struct drm_format_info *info, + const struct drm_mode_fb_cmd2 *mode_cmd) { - const struct drm_format_info *info = drm_get_format_info(dev, - mode_cmd); struct drm_gem_object *bos[4] = {0}; struct drm_framebuffer *fb; int ret, i, n = info->num_planes; @@ -155,7 +155,7 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev, } } - fb = msm_framebuffer_init(dev, mode_cmd, bos); + fb = msm_framebuffer_init(dev, info, mode_cmd, bos); if (IS_ERR(fb)) { ret = PTR_ERR(fb); goto out_unref; @@ -170,10 +170,9 @@ out_unref: } static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos) { - const struct drm_format_info *info = drm_get_format_info(dev, - mode_cmd); struct msm_drm_private *priv = dev->dev_private; struct msm_kms *kms = priv->kms; struct msm_framebuffer *msm_fb = NULL; @@ -227,7 +226,7 @@ static struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, msm_fb->base.obj[i] = bos[i]; } - drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd); ret = drm_framebuffer_init(dev, fb, &msm_framebuffer_funcs); if (ret) { @@ -276,7 +275,10 @@ msm_alloc_stolen_fb(struct drm_device *dev, int w, int h, int p, uint32_t format msm_gem_object_set_name(bo, "stolenfb"); - fb = msm_framebuffer_init(dev, &mode_cmd, &bo); + fb = msm_framebuffer_init(dev, + drm_get_format_info(dev, mode_cmd.pixel_format, + mode_cmd.modifier[0]), + &mode_cmd, &bo); if (IS_ERR(fb)) { DRM_DEV_ERROR(dev->dev, "failed to allocate fb\n"); /* note: if fb creation failed, we can't rely on fb destroy diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 9f0f5b77f1bd..e7631f4ef530 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -9,7 +9,6 @@ #include <linux/spinlock.h> #include <linux/shmem_fs.h> #include <linux/dma-buf.h> -#include <linux/pfn_t.h> #include <drm/drm_prime.h> #include <drm/drm_file.h> diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index c183b1112bc4..0b756da2fec2 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c @@ -91,20 +91,15 @@ void mxsfb_disable_axi_clk(struct mxsfb_drm_private *mxsfb) static struct drm_framebuffer * mxsfb_fb_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { - const struct drm_format_info *info; - - info = drm_get_format_info(dev, mode_cmd); - if (!info) - return ERR_PTR(-EINVAL); - if (mode_cmd->width * info->cpp[0] != mode_cmd->pitches[0]) { dev_dbg(dev->dev, "Invalid pitch: fb width must match pitch\n"); return ERR_PTR(-EINVAL); } - return drm_gem_fb_create(dev, file_priv, mode_cmd); + return drm_gem_fb_create(dev, file_priv, info, mode_cmd); } static const struct drm_mode_config_funcs mxsfb_mode_config_funcs = { diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index e5d37eee4301..e97e39abf3a2 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -1839,7 +1839,7 @@ nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *sta backlight = nv_connector->backlight; if (backlight && backlight->uses_dpcd) drm_edp_backlight_enable(&nv_connector->aux, &backlight->edp_info, - (u16)backlight->dev->props.brightness); + backlight->dev->props.brightness); #endif break; diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index 11d5b923d6e7..e2c55f4b9c5a 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -795,6 +795,10 @@ static bool nv50_plane_format_mod_supported(struct drm_plane *plane, struct nouveau_drm *drm = nouveau_drm(plane->dev); uint8_t i; + /* All chipsets can display all formats in linear layout */ + if (modifier == DRM_FORMAT_MOD_LINEAR) + return true; + if (drm->client.device.info.chipset < 0xc0) { const struct drm_format_info *info = drm_format_info(format); const uint8_t kind = (modifier >> 12) & 0xff; diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c index d47442125fa1..4a75d146a171 100644 --- a/drivers/gpu/drm/nouveau/nouveau_backlight.c +++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c @@ -42,7 +42,7 @@ #include "nouveau_acpi.h" static struct ida bl_ida; -#define BL_NAME_SIZE 15 // 12 for name + 2 for digits + 1 for '\0' +#define BL_NAME_SIZE 24 // 12 for name + 11 for digits + 1 for '\0' static bool nouveau_get_backlight_name(char backlight_name[BL_NAME_SIZE], @@ -245,7 +245,7 @@ nv50_backlight_init(struct nouveau_backlight *bl, if (nv_conn->type == DCB_CONNECTOR_eDP) { int ret; - u16 current_level; + u32 current_level; u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]; u8 current_mode; @@ -261,8 +261,9 @@ nv50_backlight_init(struct nouveau_backlight *bl, NV_DEBUG(drm, "DPCD backlight controls supported on %s\n", nv_conn->base.name); - ret = drm_edp_backlight_init(&nv_conn->aux, &bl->edp_info, 0, edp_dpcd, - ¤t_level, ¤t_mode); + ret = drm_edp_backlight_init(&nv_conn->aux, &bl->edp_info, + 0, 0, edp_dpcd, + ¤t_level, ¤t_mode, false); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c index 200e65a7cefc..c7869a639bef 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c @@ -314,14 +314,10 @@ nouveau_debugfs_fini(struct nouveau_drm *drm) drm->debugfs = NULL; } -int +void nouveau_module_debugfs_init(void) { nouveau_debugfs_root = debugfs_create_dir("nouveau", NULL); - if (IS_ERR(nouveau_debugfs_root)) - return PTR_ERR(nouveau_debugfs_root); - - return 0; } void diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.h b/drivers/gpu/drm/nouveau/nouveau_debugfs.h index b7617b344ee2..d05ed0e641c4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_debugfs.h +++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.h @@ -24,7 +24,7 @@ extern void nouveau_debugfs_fini(struct nouveau_drm *); extern struct dentry *nouveau_debugfs_root; -int nouveau_module_debugfs_init(void); +void nouveau_module_debugfs_init(void); void nouveau_module_debugfs_fini(void); #else static inline void @@ -42,10 +42,9 @@ nouveau_debugfs_fini(struct nouveau_drm *drm) { } -static inline int +static inline void nouveau_module_debugfs_init(void) { - return 0; } static inline void diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index c50ec347b30a..805d0a87aa54 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -253,6 +253,7 @@ nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo, int nouveau_framebuffer_new(struct drm_device *dev, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *gem, struct drm_framebuffer **pfb) @@ -260,7 +261,6 @@ nouveau_framebuffer_new(struct drm_device *dev, struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct drm_framebuffer *fb; - const struct drm_format_info *info; unsigned int height, i; uint32_t tile_mode; uint8_t kind; @@ -295,8 +295,6 @@ nouveau_framebuffer_new(struct drm_device *dev, kind = nvbo->kind; } - info = drm_get_format_info(dev, mode_cmd); - for (i = 0; i < info->num_planes; i++) { height = drm_format_info_plane_height(info, mode_cmd->height, @@ -320,7 +318,7 @@ nouveau_framebuffer_new(struct drm_device *dev, if (!(fb = *pfb = kzalloc(sizeof(*fb), GFP_KERNEL))) return -ENOMEM; - drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd); fb->obj[0] = gem; ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs); @@ -332,6 +330,7 @@ nouveau_framebuffer_new(struct drm_device *dev, struct drm_framebuffer * nouveau_user_framebuffer_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { struct drm_framebuffer *fb; @@ -342,7 +341,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev, if (!gem) return ERR_PTR(-ENOENT); - ret = nouveau_framebuffer_new(dev, mode_cmd, gem, &fb); + ret = nouveau_framebuffer_new(dev, info, mode_cmd, gem, &fb); if (ret == 0) return fb; diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h index 1f506f8b289c..470e0910d484 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.h +++ b/drivers/gpu/drm/nouveau/nouveau_display.h @@ -8,8 +8,11 @@ #include <drm/drm_framebuffer.h> +struct drm_format_info; + int nouveau_framebuffer_new(struct drm_device *dev, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *gem, struct drm_framebuffer **pfb); @@ -67,5 +70,6 @@ nouveau_framebuffer_get_layout(struct drm_framebuffer *fb, uint32_t *tile_mode, struct drm_framebuffer * nouveau_user_framebuffer_create(struct drm_device *, struct drm_file *, + const struct drm_format_info *, const struct drm_mode_fb_cmd2 *); #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 0c82a63cd49d..1527b801f013 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -1461,9 +1461,7 @@ nouveau_drm_init(void) if (!nouveau_modeset) return 0; - ret = nouveau_module_debugfs_init(); - if (ret) - return ret; + nouveau_module_debugfs_init(); #ifdef CONFIG_NOUVEAU_PLATFORM_DRIVER platform_driver_register(&nouveau_platform_driver); diff --git a/drivers/gpu/drm/nouveau/nouveau_exec.c b/drivers/gpu/drm/nouveau/nouveau_exec.c index 41b7c608c905..c4949e815eb3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_exec.c +++ b/drivers/gpu/drm/nouveau/nouveau_exec.c @@ -60,14 +60,14 @@ * virtual address in the GPU's VA space there is no guarantee that the actual * mappings are created in the GPU's MMU. If the given memory is swapped out * at the time the bind operation is executed the kernel will stash the mapping - * details into it's internal alloctor and create the actual MMU mappings once + * details into it's internal allocator and create the actual MMU mappings once * the memory is swapped back in. While this is transparent for userspace, it is * guaranteed that all the backing memory is swapped back in and all the memory * mappings, as requested by userspace previously, are actually mapped once the * DRM_NOUVEAU_EXEC ioctl is called to submit an exec job. * * A VM_BIND job can be executed either synchronously or asynchronously. If - * exectued asynchronously, userspace may provide a list of syncobjs this job + * executed asynchronously, userspace may provide a list of syncobjs this job * will wait for and/or a list of syncobj the kernel will signal once the * VM_BIND job finished execution. If executed synchronously the ioctl will * block until the bind job is finished. For synchronous jobs the kernel will @@ -82,7 +82,7 @@ * Since VM_BIND jobs update the GPU's VA space on job submit, EXEC jobs do have * an up to date view of the VA space. However, the actual mappings might still * be pending. Hence, EXEC jobs require to have the particular fences - of - * the corresponding VM_BIND jobs they depent on - attached to them. + * the corresponding VM_BIND jobs they depend on - attached to them. */ static int @@ -189,7 +189,7 @@ nouveau_exec_job_timeout(struct nouveau_job *job) NV_PRINTK(warn, job->cli, "job timeout, channel %d killed!\n", chan->chid); - return DRM_GPU_SCHED_STAT_NOMINAL; + return DRM_GPU_SCHED_STAT_RESET; } static const struct nouveau_job_ops nouveau_exec_job_ops = { diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index d5654e26d5bc..9f345a008717 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -38,12 +38,6 @@ static const struct dma_fence_ops nouveau_fence_ops_uevent; static const struct dma_fence_ops nouveau_fence_ops_legacy; -static inline struct nouveau_fence * -from_fence(struct dma_fence *fence) -{ - return container_of(fence, struct nouveau_fence, base); -} - static inline struct nouveau_fence_chan * nouveau_fctx(struct nouveau_fence *fence) { @@ -77,7 +71,7 @@ nouveau_local_fence(struct dma_fence *fence, struct nouveau_drm *drm) fence->ops != &nouveau_fence_ops_uevent) return NULL; - return from_fence(fence); + return to_nouveau_fence(fence); } void @@ -246,6 +240,21 @@ nouveau_fence_emit(struct nouveau_fence *fence) return ret; } +void +nouveau_fence_cancel(struct nouveau_fence *fence) +{ + struct nouveau_fence_chan *fctx = nouveau_fctx(fence); + unsigned long flags; + + spin_lock_irqsave(&fctx->lock, flags); + if (!dma_fence_is_signaled_locked(&fence->base)) { + dma_fence_set_error(&fence->base, -ECANCELED); + if (nouveau_fence_signal(fence)) + nvif_event_block(&fctx->event); + } + spin_unlock_irqrestore(&fctx->lock, flags); +} + bool nouveau_fence_done(struct nouveau_fence *fence) { @@ -268,7 +277,7 @@ nouveau_fence_done(struct nouveau_fence *fence) static long nouveau_fence_wait_legacy(struct dma_fence *f, bool intr, long wait) { - struct nouveau_fence *fence = from_fence(f); + struct nouveau_fence *fence = to_nouveau_fence(f); unsigned long sleep_time = NSEC_PER_MSEC / 1000; unsigned long t = jiffies, timeout = t + wait; @@ -448,7 +457,7 @@ static const char *nouveau_fence_get_get_driver_name(struct dma_fence *fence) static const char *nouveau_fence_get_timeline_name(struct dma_fence *f) { - struct nouveau_fence *fence = from_fence(f); + struct nouveau_fence *fence = to_nouveau_fence(f); struct nouveau_fence_chan *fctx = nouveau_fctx(fence); return !fctx->dead ? fctx->name : "dead channel"; @@ -462,7 +471,7 @@ static const char *nouveau_fence_get_timeline_name(struct dma_fence *f) */ static bool nouveau_fence_is_signaled(struct dma_fence *f) { - struct nouveau_fence *fence = from_fence(f); + struct nouveau_fence *fence = to_nouveau_fence(f); struct nouveau_fence_chan *fctx = nouveau_fctx(fence); struct nouveau_channel *chan; bool ret = false; @@ -478,7 +487,7 @@ static bool nouveau_fence_is_signaled(struct dma_fence *f) static bool nouveau_fence_no_signaling(struct dma_fence *f) { - struct nouveau_fence *fence = from_fence(f); + struct nouveau_fence *fence = to_nouveau_fence(f); /* * caller should have a reference on the fence, @@ -503,7 +512,7 @@ static bool nouveau_fence_no_signaling(struct dma_fence *f) static void nouveau_fence_release(struct dma_fence *f) { - struct nouveau_fence *fence = from_fence(f); + struct nouveau_fence *fence = to_nouveau_fence(f); struct nouveau_fence_chan *fctx = nouveau_fctx(fence); kref_put(&fctx->fence_ref, nouveau_fence_context_put); @@ -521,7 +530,7 @@ static const struct dma_fence_ops nouveau_fence_ops_legacy = { static bool nouveau_fence_enable_signaling(struct dma_fence *f) { - struct nouveau_fence *fence = from_fence(f); + struct nouveau_fence *fence = to_nouveau_fence(f); struct nouveau_fence_chan *fctx = nouveau_fctx(fence); bool ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h index 6a983dd9f7b9..9957a919bd38 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.h +++ b/drivers/gpu/drm/nouveau/nouveau_fence.h @@ -17,12 +17,19 @@ struct nouveau_fence { unsigned long timeout; }; +static inline struct nouveau_fence * +to_nouveau_fence(struct dma_fence *fence) +{ + return container_of(fence, struct nouveau_fence, base); +} + int nouveau_fence_create(struct nouveau_fence **, struct nouveau_channel *); int nouveau_fence_new(struct nouveau_fence **, struct nouveau_channel *); void nouveau_fence_unref(struct nouveau_fence **); int nouveau_fence_emit(struct nouveau_fence *); bool nouveau_fence_done(struct nouveau_fence *); +void nouveau_fence_cancel(struct nouveau_fence *fence); int nouveau_fence_wait(struct nouveau_fence *, bool lazy, bool intr); int nouveau_fence_sync(struct nouveau_bo *, struct nouveau_channel *, bool exclusive, bool intr); diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.c b/drivers/gpu/drm/nouveau/nouveau_sched.c index 460a5fb02412..0cc0bc9f9952 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sched.c +++ b/drivers/gpu/drm/nouveau/nouveau_sched.c @@ -11,6 +11,7 @@ #include "nouveau_exec.h" #include "nouveau_abi16.h" #include "nouveau_sched.h" +#include "nouveau_chan.h" #define NOUVEAU_SCHED_JOB_TIMEOUT_MS 10000 @@ -121,11 +122,9 @@ nouveau_job_done(struct nouveau_job *job) { struct nouveau_sched *sched = job->sched; - spin_lock(&sched->job.list.lock); + spin_lock(&sched->job_list.lock); list_del(&job->entry); - spin_unlock(&sched->job.list.lock); - - wake_up(&sched->job.wq); + spin_unlock(&sched->job_list.lock); } void @@ -306,9 +305,9 @@ nouveau_job_submit(struct nouveau_job *job) } /* Submit was successful; add the job to the schedulers job list. */ - spin_lock(&sched->job.list.lock); - list_add(&job->entry, &sched->job.list.head); - spin_unlock(&sched->job.list.lock); + spin_lock(&sched->job_list.lock); + list_add(&job->entry, &sched->job_list.head); + spin_unlock(&sched->job_list.lock); drm_sched_job_arm(&job->base); job->done_fence = dma_fence_get(&job->base.s_fence->finished); @@ -371,7 +370,7 @@ nouveau_sched_timedout_job(struct drm_sched_job *sched_job) { struct drm_gpu_scheduler *sched = sched_job->sched; struct nouveau_job *job = to_nouveau_job(sched_job); - enum drm_gpu_sched_stat stat = DRM_GPU_SCHED_STAT_NOMINAL; + enum drm_gpu_sched_stat stat = DRM_GPU_SCHED_STAT_RESET; drm_sched_stop(sched, sched_job); @@ -393,10 +392,23 @@ nouveau_sched_free_job(struct drm_sched_job *sched_job) nouveau_job_fini(job); } +static void +nouveau_sched_cancel_job(struct drm_sched_job *sched_job) +{ + struct nouveau_fence *fence; + struct nouveau_job *job; + + job = to_nouveau_job(sched_job); + fence = to_nouveau_fence(job->done_fence); + + nouveau_fence_cancel(fence); +} + static const struct drm_sched_backend_ops nouveau_sched_ops = { .run_job = nouveau_sched_run_job, .timedout_job = nouveau_sched_timedout_job, .free_job = nouveau_sched_free_job, + .cancel_job = nouveau_sched_cancel_job, }; static int @@ -446,9 +458,8 @@ nouveau_sched_init(struct nouveau_sched *sched, struct nouveau_drm *drm, goto fail_sched; mutex_init(&sched->mutex); - spin_lock_init(&sched->job.list.lock); - INIT_LIST_HEAD(&sched->job.list.head); - init_waitqueue_head(&sched->job.wq); + spin_lock_init(&sched->job_list.lock); + INIT_LIST_HEAD(&sched->job_list.head); return 0; @@ -482,16 +493,12 @@ nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm, return 0; } - static void nouveau_sched_fini(struct nouveau_sched *sched) { struct drm_gpu_scheduler *drm_sched = &sched->base; struct drm_sched_entity *entity = &sched->entity; - rmb(); /* for list_empty to work without lock */ - wait_event(sched->job.wq, list_empty(&sched->job.list.head)); - drm_sched_entity_fini(entity); drm_sched_fini(drm_sched); diff --git a/drivers/gpu/drm/nouveau/nouveau_sched.h b/drivers/gpu/drm/nouveau/nouveau_sched.h index 20cd1da8db73..b98c3f0bef30 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sched.h +++ b/drivers/gpu/drm/nouveau/nouveau_sched.h @@ -103,12 +103,9 @@ struct nouveau_sched { struct mutex mutex; struct { - struct { - struct list_head head; - spinlock_t lock; - } list; - struct wait_queue_head wq; - } job; + struct list_head head; + spinlock_t lock; + } job_list; }; int nouveau_sched_create(struct nouveau_sched **psched, struct nouveau_drm *drm, diff --git a/drivers/gpu/drm/nouveau/nouveau_uvmm.c b/drivers/gpu/drm/nouveau/nouveau_uvmm.c index 48f105239f42..ddfc46bc1b3e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_uvmm.c +++ b/drivers/gpu/drm/nouveau/nouveau_uvmm.c @@ -1019,8 +1019,8 @@ bind_validate_map_sparse(struct nouveau_job *job, u64 addr, u64 range) u64 end = addr + range; again: - spin_lock(&sched->job.list.lock); - list_for_each_entry(__job, &sched->job.list.head, entry) { + spin_lock(&sched->job_list.lock); + list_for_each_entry(__job, &sched->job_list.head, entry) { struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(__job); list_for_each_op(op, &bind_job->ops) { @@ -1030,7 +1030,7 @@ again: if (!(end <= op_addr || addr >= op_end)) { nouveau_uvmm_bind_job_get(bind_job); - spin_unlock(&sched->job.list.lock); + spin_unlock(&sched->job_list.lock); wait_for_completion(&bind_job->complete); nouveau_uvmm_bind_job_put(bind_job); goto again; @@ -1038,7 +1038,7 @@ again: } } } - spin_unlock(&sched->job.list.lock); + spin_unlock(&sched->job_list.lock); } static int diff --git a/drivers/gpu/drm/nouveau/nvif/chan.c b/drivers/gpu/drm/nouveau/nvif/chan.c index baa10227d51a..80c01017d642 100644 --- a/drivers/gpu/drm/nouveau/nvif/chan.c +++ b/drivers/gpu/drm/nouveau/nvif/chan.c @@ -39,6 +39,9 @@ nvif_chan_gpfifo_post(struct nvif_chan *chan) const u32 pbptr = (chan->push.cur - map) + chan->func->gpfifo.post_size; const u32 gpptr = (chan->gpfifo.cur + 1) & chan->gpfifo.max; + if (!chan->func->gpfifo.post) + return 0; + return chan->func->gpfifo.post(chan, gpptr, pbptr); } diff --git a/drivers/gpu/drm/nouveau/nvif/vmm.c b/drivers/gpu/drm/nouveau/nvif/vmm.c index 99296f03371a..07c1ebc2a941 100644 --- a/drivers/gpu/drm/nouveau/nvif/vmm.c +++ b/drivers/gpu/drm/nouveau/nvif/vmm.c @@ -219,7 +219,8 @@ nvif_vmm_ctor(struct nvif_mmu *mmu, const char *name, s32 oclass, case RAW: args->type = NVIF_VMM_V0_TYPE_RAW; break; default: WARN_ON(1); - return -EINVAL; + ret = -EINVAL; + goto done; } memcpy(args->data, argv, argc); diff --git a/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c b/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c index b7da3ab44c27..7c43397c19e6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/falcon/gm200.c @@ -103,7 +103,7 @@ gm200_flcn_pio_imem_wr_init(struct nvkm_falcon *falcon, u8 port, bool sec, u32 i static void gm200_flcn_pio_imem_wr(struct nvkm_falcon *falcon, u8 port, const u8 *img, int len, u16 tag) { - nvkm_falcon_wr32(falcon, 0x188 + (port * 0x10), tag++); + nvkm_falcon_wr32(falcon, 0x188 + (port * 0x10), tag); while (len >= 4) { nvkm_falcon_wr32(falcon, 0x184 + (port * 0x10), *(u32 *)img); img += 4; @@ -249,9 +249,11 @@ int gm200_flcn_fw_load(struct nvkm_falcon_fw *fw) { struct nvkm_falcon *falcon = fw->falcon; - int target, ret; + int ret; if (fw->inst) { + int target; + nvkm_falcon_mask(falcon, 0x048, 0x00000001, 0x00000001); switch (nvkm_memory_target(fw->inst)) { @@ -285,15 +287,6 @@ gm200_flcn_fw_load(struct nvkm_falcon_fw *fw) } if (fw->boot) { - switch (nvkm_memory_target(&fw->fw.mem.memory)) { - case NVKM_MEM_TARGET_VRAM: target = 4; break; - case NVKM_MEM_TARGET_HOST: target = 5; break; - case NVKM_MEM_TARGET_NCOH: target = 6; break; - default: - WARN_ON(1); - return -EINVAL; - } - ret = nvkm_falcon_pio_wr(falcon, fw->boot, 0, 0, IMEM, falcon->code.limit - fw->boot_size, fw->boot_size, fw->boot_addr >> 8, false); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c index 52412965fac1..5b721bd9d799 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c @@ -209,11 +209,12 @@ nvkm_gsp_fwsec_v2(struct nvkm_gsp *gsp, const char *name, fw->boot_addr = bld->start_tag << 8; fw->boot_size = bld->code_size; fw->boot = kmemdup(bl->data + hdr->data_offset + bld->code_off, fw->boot_size, GFP_KERNEL); - if (!fw->boot) - ret = -ENOMEM; nvkm_firmware_put(bl); + if (!fw->boot) + return -ENOMEM; + /* Patch in interface data. */ return nvkm_gsp_fwsec_patch(gsp, fw, desc->InterfaceOffset, init_cmd); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c index baf42339f93e..588cb4ab85cb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c @@ -719,7 +719,6 @@ r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps) union acpi_object argv4 = { .buffer.type = ACPI_TYPE_BUFFER, .buffer.length = 4, - .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL), }, *obj; caps->status = 0xffff; @@ -727,17 +726,22 @@ r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps) if (!acpi_check_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, BIT_ULL(0x1a))) return; + argv4.buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL); + if (!argv4.buffer.pointer) + return; + obj = acpi_evaluate_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, 0x1a, &argv4); if (!obj) - return; + goto done; if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || WARN_ON(obj->buffer.length != 4)) - return; + goto done; caps->status = 0; caps->optimusCaps = *(u32 *)obj->buffer.pointer; +done: ACPI_FREE(obj); kfree(argv4.buffer.pointer); @@ -754,24 +758,28 @@ r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt) union acpi_object argv4 = { .buffer.type = ACPI_TYPE_BUFFER, .buffer.length = sizeof(caps), - .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL), }, *obj; jt->status = 0xffff; + argv4.buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL); + if (!argv4.buffer.pointer) + return; + obj = acpi_evaluate_dsm(handle, &JT_DSM_GUID, JT_DSM_REV, 0x1, &argv4); if (!obj) - return; + goto done; if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || WARN_ON(obj->buffer.length != 4)) - return; + goto done; jt->status = 0; jt->jtCaps = *(u32 *)obj->buffer.pointer; jt->jtRevId = (jt->jtCaps & 0xfff00000) >> 20; jt->bSBIOSCaps = 0; +done: ACPI_FREE(obj); kfree(argv4.buffer.pointer); @@ -1744,6 +1752,13 @@ r535_gsp_fini(struct nvkm_gsp *gsp, bool suspend) nvkm_gsp_sg_free(gsp->subdev.device, &gsp->sr.sgt); return ret; } + + /* + * TODO: Debug the GSP firmware / RPC handling to find out why + * without this Turing (but none of the other architectures) + * ends up resetting all channels after resume. + */ + msleep(50); } ret = r535_gsp_rpc_unloading_guest_driver(gsp, suspend); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c index 5acb98d137bd..0dc4782df8c0 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/rpc.c @@ -325,7 +325,7 @@ r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries) rpc = r535_gsp_msgq_peek(gsp, sizeof(*rpc), info.retries); if (IS_ERR_OR_NULL(rpc)) { - kfree(buf); + kvfree(buf); return rpc; } @@ -334,7 +334,7 @@ r535_gsp_msgq_recv(struct nvkm_gsp *gsp, u32 gsp_rpc_len, int *retries) rpc = r535_gsp_msgq_recv_one_elem(gsp, &info); if (IS_ERR_OR_NULL(rpc)) { - kfree(buf); + kvfree(buf); return rpc; } @@ -637,12 +637,18 @@ r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload, if (payload_size > max_payload_size) { const u32 fn = rpc->function; u32 remain_payload_size = payload_size; + void *next; - /* Adjust length, and send initial RPC. */ - rpc->length = sizeof(*rpc) + max_payload_size; - msg->checksum = rpc->length; + /* Send initial RPC. */ + next = r535_gsp_rpc_get(gsp, fn, max_payload_size); + if (IS_ERR(next)) { + repv = next; + goto done; + } - repv = r535_gsp_rpc_send(gsp, payload, NVKM_GSP_RPC_REPLY_NOWAIT, 0); + memcpy(next, payload, max_payload_size); + + repv = r535_gsp_rpc_send(gsp, next, NVKM_GSP_RPC_REPLY_NOWAIT, 0); if (IS_ERR(repv)) goto done; @@ -653,7 +659,6 @@ r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload, while (remain_payload_size) { u32 size = min(remain_payload_size, max_payload_size); - void *next; next = r535_gsp_rpc_get(gsp, NV_VGPU_MSG_FUNCTION_CONTINUATION_RECORD, size); if (IS_ERR(next)) { @@ -674,6 +679,8 @@ r535_gsp_rpc_push(struct nvkm_gsp *gsp, void *payload, /* Wait for reply. */ repv = r535_gsp_rpc_handle_reply(gsp, fn, policy, payload_size + sizeof(*rpc)); + if (!IS_ERR(repv)) + kvfree(msg); } else { repv = r535_gsp_rpc_send(gsp, payload, policy, gsp_rpc_len); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c index 52f2e5f14517..f25ea610cd99 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/vmm.c @@ -121,7 +121,7 @@ r535_mmu_vaspace_new(struct nvkm_vmm *vmm, u32 handle, bool external) page_shift -= desc->bits; ctrl->levels[i].physAddress = pd->pt[0]->addr; - ctrl->levels[i].size = (1 << desc->bits) * desc->size; + ctrl->levels[i].size = BIT_ULL(desc->bits) * desc->size; ctrl->levels[i].aperture = 1; ctrl->levels[i].pageShift = page_shift; diff --git a/drivers/gpu/drm/nova/file.rs b/drivers/gpu/drm/nova/file.rs index 7e59a34b830d..4fe62cf98a23 100644 --- a/drivers/gpu/drm/nova/file.rs +++ b/drivers/gpu/drm/nova/file.rs @@ -39,7 +39,8 @@ impl File { _ => return Err(EINVAL), }; - getparam.set_value(value); + #[allow(clippy::useless_conversion)] + getparam.set_value(value.into()); Ok(0) } diff --git a/drivers/gpu/drm/nova/nova.rs b/drivers/gpu/drm/nova/nova.rs index 902876aa14d1..64fd670e99e1 100644 --- a/drivers/gpu/drm/nova/nova.rs +++ b/drivers/gpu/drm/nova/nova.rs @@ -12,7 +12,7 @@ use crate::driver::NovaDriver; kernel::module_auxiliary_driver! { type: NovaDriver, name: "Nova", - author: "Danilo Krummrich", + authors: ["Danilo Krummrich"], description: "Nova GPU driver", license: "GPL v2", } diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c index 449d521c78fe..bb3105556f19 100644 --- a/drivers/gpu/drm/omapdrm/omap_fb.c +++ b/drivers/gpu/drm/omapdrm/omap_fb.c @@ -335,10 +335,9 @@ void omap_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m) #endif struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev, - struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd) + struct drm_file *file, const struct drm_format_info *info, + const struct drm_mode_fb_cmd2 *mode_cmd) { - const struct drm_format_info *info = drm_get_format_info(dev, - mode_cmd); unsigned int num_planes = info->num_planes; struct drm_gem_object *bos[4]; struct drm_framebuffer *fb; @@ -352,7 +351,7 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev, } } - fb = omap_framebuffer_init(dev, mode_cmd, bos); + fb = omap_framebuffer_init(dev, info, mode_cmd, bos); if (IS_ERR(fb)) goto error; @@ -366,9 +365,9 @@ error: } struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos) { - const struct drm_format_info *format = NULL; struct omap_framebuffer *omap_fb = NULL; struct drm_framebuffer *fb = NULL; unsigned int pitch = mode_cmd->pitches[0]; @@ -378,14 +377,12 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, dev, mode_cmd, mode_cmd->width, mode_cmd->height, (char *)&mode_cmd->pixel_format); - format = drm_get_format_info(dev, mode_cmd); - for (i = 0; i < ARRAY_SIZE(formats); i++) { if (formats[i] == mode_cmd->pixel_format) break; } - if (!format || i == ARRAY_SIZE(formats)) { + if (i == ARRAY_SIZE(formats)) { dev_dbg(dev->dev, "unsupported pixel format: %4.4s\n", (char *)&mode_cmd->pixel_format); ret = -EINVAL; @@ -399,7 +396,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, } fb = &omap_fb->base; - omap_fb->format = format; + omap_fb->format = info; mutex_init(&omap_fb->lock); /* @@ -407,23 +404,23 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, * that the two planes of multiplane formats need the same number of * bytes per pixel. */ - if (format->num_planes == 2 && pitch != mode_cmd->pitches[1]) { + if (info->num_planes == 2 && pitch != mode_cmd->pitches[1]) { dev_dbg(dev->dev, "pitches differ between planes 0 and 1\n"); ret = -EINVAL; goto fail; } - if (pitch % format->cpp[0]) { + if (pitch % info->cpp[0]) { dev_dbg(dev->dev, "buffer pitch (%u bytes) is not a multiple of pixel size (%u bytes)\n", - pitch, format->cpp[0]); + pitch, info->cpp[0]); ret = -EINVAL; goto fail; } - for (i = 0; i < format->num_planes; i++) { + for (i = 0; i < info->num_planes; i++) { struct plane *plane = &omap_fb->planes[i]; - unsigned int vsub = i == 0 ? 1 : format->vsub; + unsigned int vsub = i == 0 ? 1 : info->vsub; unsigned int size; size = pitch * mode_cmd->height / vsub; @@ -440,7 +437,7 @@ struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, plane->dma_addr = 0; } - drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd); ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs); if (ret) { diff --git a/drivers/gpu/drm/omapdrm/omap_fb.h b/drivers/gpu/drm/omapdrm/omap_fb.h index b75f0b5ef1d8..e6010302a22b 100644 --- a/drivers/gpu/drm/omapdrm/omap_fb.h +++ b/drivers/gpu/drm/omapdrm/omap_fb.h @@ -13,6 +13,7 @@ struct drm_connector; struct drm_device; struct drm_file; struct drm_framebuffer; +struct drm_format_info; struct drm_gem_object; struct drm_mode_fb_cmd2; struct drm_plane_state; @@ -20,8 +21,10 @@ struct omap_overlay_info; struct seq_file; struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev, - struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd); + struct drm_file *file, const struct drm_format_info *info, + const struct drm_mode_fb_cmd2 *mode_cmd); struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); int omap_framebuffer_pin(struct drm_framebuffer *fb); void omap_framebuffer_unpin(struct drm_framebuffer *fb); diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c index 7b6396890681..948af7ec1130 100644 --- a/drivers/gpu/drm/omapdrm/omap_fbdev.c +++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c @@ -197,7 +197,10 @@ int omap_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, goto fail; } - fb = omap_framebuffer_init(dev, &mode_cmd, &bo); + fb = omap_framebuffer_init(dev, + drm_get_format_info(dev, mode_cmd.pixel_format, + mode_cmd.modifier[0]), + &mode_cmd, &bo); if (IS_ERR(fb)) { dev_err(dev->dev, "failed to allocate fb\n"); /* note: if fb creation failed, we can't rely on fb destroy diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index b9c67e4ca360..381552bfb409 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -8,7 +8,6 @@ #include <linux/seq_file.h> #include <linux/shmem_fs.h> #include <linux/spinlock.h> -#include <linux/pfn_t.h> #include <linux/vmalloc.h> #include <drm/drm_prime.h> @@ -371,8 +370,7 @@ static vm_fault_t omap_gem_fault_1d(struct drm_gem_object *obj, VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address, pfn, pfn << PAGE_SHIFT); - return vmf_insert_mixed(vma, vmf->address, - __pfn_to_pfn_t(pfn, PFN_DEV)); + return vmf_insert_mixed(vma, vmf->address, pfn); } /* Special handling for the case of faulting in 2d tiled buffers */ @@ -467,8 +465,7 @@ static vm_fault_t omap_gem_fault_2d(struct drm_gem_object *obj, pfn, pfn << PAGE_SHIFT); for (i = n; i > 0; i--) { - ret = vmf_insert_mixed(vma, - vaddr, __pfn_to_pfn_t(pfn, PFN_DEV)); + ret = vmf_insert_mixed(vma, vaddr, pfn); if (ret & VM_FAULT_ERROR) break; pfn += priv->usergart[fmt].stride_pfn; diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index d5aa1c95c6a4..09b9f7ff9340 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -193,6 +193,16 @@ config DRM_PANEL_HIMAX_HX83112A Say Y here if you want to enable support for Himax HX83112A-based display panels, such as the one found in the Fairphone 4 smartphone. +config DRM_PANEL_HIMAX_HX83112B + tristate "Himax HX83112B-based DSI panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + select DRM_KMS_HELPER + help + Say Y here if you want to enable support for Himax HX83112B-based + display panels, such as the one found in the Fairphone 3 smartphone. + config DRM_PANEL_HIMAX_HX8394 tristate "HIMAX HX8394 MIPI-DSI LCD panels" depends on OF diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile index 73a39bc72604..957555b49996 100644 --- a/drivers/gpu/drm/panel/Makefile +++ b/drivers/gpu/drm/panel/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d obj-$(CONFIG_DRM_PANEL_HIMAX_HX8279) += panel-himax-hx8279.o obj-$(CONFIG_DRM_PANEL_HIMAX_HX83102) += panel-himax-hx83102.o obj-$(CONFIG_DRM_PANEL_HIMAX_HX83112A) += panel-himax-hx83112a.o +obj-$(CONFIG_DRM_PANEL_HIMAX_HX83112B) += panel-himax-hx83112b.o obj-$(CONFIG_DRM_PANEL_HIMAX_HX8394) += panel-himax-hx8394.o obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9341) += panel-ilitek-ili9341.o diff --git a/drivers/gpu/drm/panel/panel-boe-himax8279d.c b/drivers/gpu/drm/panel/panel-boe-himax8279d.c index df746baae301..4a8560b4b899 100644 --- a/drivers/gpu/drm/panel/panel-boe-himax8279d.c +++ b/drivers/gpu/drm/panel/panel-boe-himax8279d.c @@ -847,9 +847,6 @@ static int panel_add(struct panel_info *pinfo) "failed to get enable gpio\n"); } - drm_panel_init(&pinfo->base, dev, &panel_funcs, - DRM_MODE_CONNECTOR_DSI); - ret = drm_panel_of_backlight(&pinfo->base); if (ret) return ret; @@ -865,9 +862,11 @@ static int panel_probe(struct mipi_dsi_device *dsi) const struct panel_desc *desc; int err; - pinfo = devm_kzalloc(&dsi->dev, sizeof(*pinfo), GFP_KERNEL); - if (!pinfo) - return -ENOMEM; + pinfo = devm_drm_panel_alloc(&dsi->dev, __typeof(*pinfo), base, + &panel_funcs, DRM_MODE_CONNECTOR_DSI); + + if (IS_ERR(pinfo)) + return PTR_ERR(pinfo); desc = of_device_get_match_data(&dsi->dev); dsi->mode_flags = desc->mode_flags; diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c index 3e5b0d8636d0..d5fe105bdbdd 100644 --- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c +++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c @@ -1720,8 +1720,6 @@ static int boe_panel_add(struct boe_panel *boe) boe->base.prepare_prev_first = true; - drm_panel_init(&boe->base, dev, &boe_panel_funcs, - DRM_MODE_CONNECTOR_DSI); err = of_drm_get_panel_orientation(dev->of_node, &boe->orientation); if (err < 0) { dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, err); @@ -1746,9 +1744,11 @@ static int boe_panel_probe(struct mipi_dsi_device *dsi) int ret; const struct panel_desc *desc; - boe = devm_kzalloc(&dsi->dev, sizeof(*boe), GFP_KERNEL); - if (!boe) - return -ENOMEM; + boe = devm_drm_panel_alloc(&dsi->dev, __typeof(*boe), base, + &boe_panel_funcs, DRM_MODE_CONNECTOR_DSI); + + if (IS_ERR(boe)) + return PTR_ERR(boe); desc = of_device_get_match_data(&dsi->dev); dsi->lanes = desc->lanes; diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index 6c45c9e879ec..9a56e208cbdd 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -1939,6 +1939,7 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ae8, &delay_200_500_e50_p2e80, "NV140WUM-N41"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b09, &delay_200_500_e50_po2e200, "NV140FHM-NZ"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b1e, &delay_200_500_e80, "NE140QDM-N6A"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b34, &delay_200_500_e80, "NV122WUM-N41"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b43, &delay_200_500_e200, "NV140FHM-T09"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b56, &delay_200_500_e80, "NT140FHM-N47"), @@ -1967,6 +1968,7 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('C', 'M', 'N', 0x115e, &delay_200_500_e80_d50, "N116BCA-EA1"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1160, &delay_200_500_e80_d50, "N116BCJ-EAK"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1161, &delay_200_500_e80, "N116BCP-EA2"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x1163, &delay_200_500_e80_d50, "N116BCJ-EAK"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1247, &delay_200_500_e80_d50, "N120ACA-EA1"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x142b, &delay_200_500_e80_d50, "N140HCA-EAC"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x142e, &delay_200_500_e80_d50, "N140BGA-EA4"), diff --git a/drivers/gpu/drm/panel/panel-himax-hx83102.c b/drivers/gpu/drm/panel/panel-himax-hx83102.c index 66abfc44e424..4c432d207634 100644 --- a/drivers/gpu/drm/panel/panel-himax-hx83102.c +++ b/drivers/gpu/drm/panel/panel-himax-hx83102.c @@ -989,8 +989,6 @@ static int hx83102_panel_add(struct hx83102 *ctx) ctx->base.prepare_prev_first = true; - drm_panel_init(&ctx->base, dev, &hx83102_drm_funcs, - DRM_MODE_CONNECTOR_DSI); err = of_drm_get_panel_orientation(dev->of_node, &ctx->orientation); if (err < 0) return dev_err_probe(dev, err, "failed to get orientation\n"); @@ -1013,9 +1011,11 @@ static int hx83102_probe(struct mipi_dsi_device *dsi) int ret; const struct hx83102_panel_desc *desc; - ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL); - if (!ctx) - return -ENOMEM; + ctx = devm_drm_panel_alloc(&dsi->dev, __typeof(*ctx), base, + &hx83102_drm_funcs, DRM_MODE_CONNECTOR_DSI); + + if (IS_ERR(ctx)) + return PTR_ERR(ctx); desc = of_device_get_match_data(&dsi->dev); dsi->lanes = 4; diff --git a/drivers/gpu/drm/panel/panel-himax-hx83112b.c b/drivers/gpu/drm/panel/panel-himax-hx83112b.c new file mode 100644 index 000000000000..263f79a967de --- /dev/null +++ b/drivers/gpu/drm/panel/panel-himax-hx83112b.c @@ -0,0 +1,430 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree. + * Copyright (c) 2025 Luca Weiss <[email protected]> + */ + +#include <linux/backlight.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/regulator/consumer.h> + +#include <video/mipi_display.h> + +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> +#include <drm/drm_probe_helper.h> + +/* Manufacturer specific DSI commands */ +#define HX83112B_SETPOWER1 0xb1 +#define HX83112B_SETDISP 0xb2 +#define HX83112B_SETDRV 0xb4 +#define HX83112B_SETEXTC 0xb9 +#define HX83112B_SETBANK 0xbd +#define HX83112B_SETDGCLUT 0xc1 +#define HX83112B_SETDISMO 0xc2 +#define HX83112B_UNKNOWN1 0xc6 +#define HX83112B_SETPANEL 0xcc +#define HX83112B_UNKNOWN2 0xd1 +#define HX83112B_SETPOWER2 0xd2 +#define HX83112B_SETGIP0 0xd3 +#define HX83112B_SETGIP1 0xd5 +#define HX83112B_SETGIP2 0xd6 +#define HX83112B_SETGIP3 0xd8 +#define HX83112B_SETIDLE 0xdd +#define HX83112B_UNKNOWN3 0xe7 +#define HX83112B_UNKNOWN4 0xe9 + +struct hx83112b_panel { + struct drm_panel panel; + struct mipi_dsi_device *dsi; + struct regulator_bulk_data *supplies; + struct gpio_desc *reset_gpio; +}; + +static const struct regulator_bulk_data hx83112b_supplies[] = { + { .supply = "iovcc" }, + { .supply = "vsn" }, + { .supply = "vsp" }, +}; + +static inline struct hx83112b_panel *to_hx83112b_panel(struct drm_panel *panel) +{ + return container_of(panel, struct hx83112b_panel, panel); +} + +static void hx83112b_reset(struct hx83112b_panel *ctx) +{ + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + usleep_range(10000, 11000); + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + usleep_range(10000, 11000); + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + usleep_range(10000, 11000); +} + +static int hx83112b_on(struct hx83112b_panel *ctx) +{ + struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi }; + + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETEXTC, 0x83, 0x11, 0x2b); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDISMO, 0x08, 0x70); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x03); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDISP, 0x04, 0x38, 0x08, 0x70); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETPOWER1, + 0xf8, 0x27, 0x27, 0x00, 0x00, 0x0b, 0x0e, + 0x0b, 0x0e, 0x33); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETPOWER2, 0x2d, 0x2d); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDISP, + 0x80, 0x02, 0x18, 0x80, 0x70, 0x00, 0x08, + 0x1c, 0x08, 0x11, 0x05); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xd1); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDISP, 0x00, 0x08); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x02); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDISP, 0xb5, 0x0a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETIDLE, + 0x00, 0x00, 0x08, 0x1c, 0x08, 0x34, 0x34, + 0x88); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDRV, + 0x65, 0x6b, 0x00, 0x00, 0xd0, 0xd4, 0x36, + 0xcf, 0x06, 0xce, 0x00, 0xce, 0x00, 0x00, + 0x00, 0x07, 0x00, 0x2a, 0x07, 0x01, 0x07, + 0x00, 0x00, 0x2a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x03); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xc3); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDRV, 0x01, 0x67, 0x2a); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDGCLUT, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDGCLUT, + 0xff, 0xfb, 0xf9, 0xf6, 0xf4, 0xf1, 0xef, + 0xea, 0xe7, 0xe5, 0xe2, 0xdf, 0xdd, 0xda, + 0xd8, 0xd5, 0xd2, 0xcf, 0xcc, 0xc5, 0xbe, + 0xb7, 0xb0, 0xa8, 0xa0, 0x98, 0x8e, 0x85, + 0x7b, 0x72, 0x69, 0x5e, 0x53, 0x48, 0x3e, + 0x35, 0x2b, 0x22, 0x17, 0x0d, 0x09, 0x07, + 0x05, 0x01, 0x00, 0x26, 0xf0, 0x86, 0x25, + 0x6e, 0xb6, 0xdd, 0xf3, 0xd8, 0xcc, 0x9b, + 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x02); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDGCLUT, + 0xff, 0xfb, 0xf9, 0xf6, 0xf4, 0xf1, 0xef, + 0xea, 0xe7, 0xe5, 0xe2, 0xdf, 0xdd, 0xda, + 0xd8, 0xd5, 0xd2, 0xcf, 0xcc, 0xc5, 0xbe, + 0xb7, 0xb0, 0xa8, 0xa0, 0x98, 0x8e, 0x85, + 0x7b, 0x72, 0x69, 0x5e, 0x53, 0x48, 0x3e, + 0x35, 0x2b, 0x22, 0x17, 0x0d, 0x09, 0x07, + 0x05, 0x01, 0x00, 0x26, 0xf0, 0x86, 0x25, + 0x6e, 0xb6, 0xdd, 0xf3, 0xd8, 0xcc, 0x9b, + 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x03); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDGCLUT, + 0xff, 0xfb, 0xf9, 0xf6, 0xf4, 0xf1, 0xef, + 0xea, 0xe7, 0xe5, 0xe2, 0xdf, 0xdd, 0xda, + 0xd8, 0xd5, 0xd2, 0xcf, 0xcc, 0xc5, 0xbe, + 0xb7, 0xb0, 0xa8, 0xa0, 0x98, 0x8e, 0x85, + 0x7b, 0x72, 0x69, 0x5e, 0x53, 0x48, 0x3e, + 0x35, 0x2b, 0x22, 0x17, 0x0d, 0x09, 0x07, + 0x05, 0x01, 0x00, 0x26, 0xf0, 0x86, 0x25, + 0x6e, 0xb6, 0xdd, 0xf3, 0xd8, 0xcc, 0x9b, + 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETDISMO, 0xc8); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETPANEL, 0x08); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP0, + 0x81, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, + 0x04, 0x00, 0x01, 0x13, 0x40, 0x04, 0x09, + 0x09, 0x0b, 0x0b, 0x32, 0x10, 0x08, 0x00, + 0x08, 0x32, 0x10, 0x08, 0x00, 0x08, 0x32, + 0x10, 0x08, 0x00, 0x08, 0x00, 0x00, 0x0a, + 0x08, 0x7b); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xc5); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN1, 0xf7); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xd4); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN1, 0x6e); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xef); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP0, 0x0c); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xc8); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP0, 0xa1); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP1, + 0x18, 0x18, 0x19, 0x18, 0x18, 0x20, 0x18, + 0x18, 0x18, 0x10, 0x10, 0x18, 0x18, 0x00, + 0x00, 0x18, 0x18, 0x01, 0x01, 0x18, 0x18, + 0x28, 0x28, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x2f, 0x2f, 0x30, 0x30, 0x31, 0x31, 0x35, + 0x35, 0x36, 0x36, 0x37, 0x37, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xfc, + 0xfc, 0x00, 0x00, 0xfc, 0xfc, 0x00, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP2, + 0x18, 0x18, 0x19, 0x18, 0x18, 0x20, 0x19, + 0x18, 0x18, 0x10, 0x10, 0x18, 0x18, 0x00, + 0x00, 0x18, 0x18, 0x01, 0x01, 0x18, 0x18, + 0x28, 0x28, 0x18, 0x18, 0x18, 0x18, 0x18, + 0x2f, 0x2f, 0x30, 0x30, 0x31, 0x31, 0x35, + 0x35, 0x36, 0x36, 0x37, 0x37, 0x18, 0x18, + 0x18, 0x18, 0x18, 0x18, 0x18, 0x18); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP3, + 0xaa, 0xaa, 0xaa, 0xaf, 0xea, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaf, 0xea, 0xaa, 0xaa, 0xaa, + 0xab, 0xaf, 0xef, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaf, 0xea, 0xaa); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP3, + 0xaa, 0xaa, 0xab, 0xaf, 0xea, 0xaa, 0xaa, + 0xaa, 0xae, 0xaf, 0xea, 0xaa); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x02); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP3, + 0xaa, 0xaa, 0xaa, 0xaf, 0xea, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaf, 0xea, 0xaa); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x03); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETGIP3, + 0xba, 0xaa, 0xaa, 0xaf, 0xea, 0xaa, 0xaa, + 0xaa, 0xaa, 0xaf, 0xea, 0xaa, 0xba, 0xaa, + 0xaa, 0xaf, 0xea, 0xaa, 0xaa, 0xaa, 0xaa, + 0xaf, 0xea, 0xaa); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xe4); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN3, 0x17, 0x69); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN3, + 0x09, 0x09, 0x00, 0x07, 0xe8, 0x00, 0x26, + 0x00, 0x07, 0x00, 0x00, 0xe8, 0x32, 0x00, + 0xe9, 0x0a, 0x0a, 0x00, 0x00, 0x00, 0x01, + 0x01, 0x00, 0x12, 0x04); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x01); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN3, + 0x02, 0x00, 0x01, 0x20, 0x01, 0x18, 0x08, + 0xa8, 0x09); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x02); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN3, 0x20, 0x20, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x03); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN3, + 0x00, 0xdc, 0x11, 0x70, 0x00, 0x20); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0xc9); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN3, + 0x2a, 0xce, 0x02, 0x70, 0x01, 0x04); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN4, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_SETBANK, 0x00); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, HX83112B_UNKNOWN2, 0x27); + mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 120); + mipi_dsi_dcs_set_display_on_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 20); + mipi_dsi_dcs_set_display_brightness_multi(&dsi_ctx, 0x0000); + mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, + 0x24); + mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK); + + return dsi_ctx.accum_err; +} + +static int hx83112b_off(struct hx83112b_panel *ctx) +{ + struct mipi_dsi_multi_context dsi_ctx = { .dsi = ctx->dsi }; + + mipi_dsi_dcs_set_display_off_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 20); + mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx); + mipi_dsi_msleep(&dsi_ctx, 120); + + return dsi_ctx.accum_err; +} + +static int hx83112b_prepare(struct drm_panel *panel) +{ + struct hx83112b_panel *ctx = to_hx83112b_panel(panel); + struct device *dev = &ctx->dsi->dev; + int ret; + + ret = regulator_bulk_enable(ARRAY_SIZE(hx83112b_supplies), ctx->supplies); + if (ret < 0) { + dev_err(dev, "Failed to enable regulators: %d\n", ret); + return ret; + } + + hx83112b_reset(ctx); + + ret = hx83112b_on(ctx); + if (ret < 0) { + dev_err(dev, "Failed to initialize panel: %d\n", ret); + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + regulator_bulk_disable(ARRAY_SIZE(hx83112b_supplies), ctx->supplies); + return ret; + } + + return 0; +} + +static int hx83112b_unprepare(struct drm_panel *panel) +{ + struct hx83112b_panel *ctx = to_hx83112b_panel(panel); + struct device *dev = &ctx->dsi->dev; + int ret; + + ret = hx83112b_off(ctx); + if (ret < 0) + dev_err(dev, "Failed to un-initialize panel: %d\n", ret); + + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + regulator_bulk_disable(ARRAY_SIZE(hx83112b_supplies), ctx->supplies); + + return 0; +} + +static const struct drm_display_mode hx83112b_mode = { + .clock = (1080 + 40 + 4 + 12) * (2160 + 32 + 2 + 2) * 60 / 1000, + .hdisplay = 1080, + .hsync_start = 1080 + 40, + .hsync_end = 1080 + 40 + 4, + .htotal = 1080 + 40 + 4 + 12, + .vdisplay = 2160, + .vsync_start = 2160 + 32, + .vsync_end = 2160 + 32 + 2, + .vtotal = 2160 + 32 + 2 + 2, + .width_mm = 65, + .height_mm = 128, + .type = DRM_MODE_TYPE_DRIVER, +}; + +static int hx83112b_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + return drm_connector_helper_get_modes_fixed(connector, &hx83112b_mode); +} + +static const struct drm_panel_funcs hx83112b_panel_funcs = { + .prepare = hx83112b_prepare, + .unprepare = hx83112b_unprepare, + .get_modes = hx83112b_get_modes, +}; + +static int hx83112b_bl_update_status(struct backlight_device *bl) +{ + struct mipi_dsi_device *dsi = bl_get_data(bl); + u16 brightness = backlight_get_brightness(bl); + int ret; + + dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; + + ret = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness); + if (ret < 0) + return ret; + + dsi->mode_flags |= MIPI_DSI_MODE_LPM; + + return 0; +} + +static const struct backlight_ops hx83112b_bl_ops = { + .update_status = hx83112b_bl_update_status, +}; + +static struct backlight_device * +hx83112b_create_backlight(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + const struct backlight_properties props = { + .type = BACKLIGHT_RAW, + .brightness = 4095, + .max_brightness = 4095, + }; + + return devm_backlight_device_register(dev, dev_name(dev), dev, dsi, + &hx83112b_bl_ops, &props); +} + +static int hx83112b_probe(struct mipi_dsi_device *dsi) +{ + struct device *dev = &dsi->dev; + struct hx83112b_panel *ctx; + int ret; + + ctx = devm_drm_panel_alloc(dev, struct hx83112b_panel, panel, + &hx83112b_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + ret = devm_regulator_bulk_get_const(dev, + ARRAY_SIZE(hx83112b_supplies), + hx83112b_supplies, + &ctx->supplies); + if (ret < 0) + return ret; + + ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(ctx->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), + "Failed to get reset-gpios\n"); + + ctx->dsi = dsi; + mipi_dsi_set_drvdata(dsi, ctx); + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_CLOCK_NON_CONTINUOUS | + MIPI_DSI_MODE_VIDEO_NO_HSA | MIPI_DSI_MODE_LPM; + + ctx->panel.prepare_prev_first = true; + + ctx->panel.backlight = hx83112b_create_backlight(dsi); + if (IS_ERR(ctx->panel.backlight)) + return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight), + "Failed to create backlight\n"); + + drm_panel_add(&ctx->panel); + + ret = mipi_dsi_attach(dsi); + if (ret < 0) { + drm_panel_remove(&ctx->panel); + return dev_err_probe(dev, ret, "Failed to attach to DSI host\n"); + } + + return 0; +} + +static void hx83112b_remove(struct mipi_dsi_device *dsi) +{ + struct hx83112b_panel *ctx = mipi_dsi_get_drvdata(dsi); + int ret; + + ret = mipi_dsi_detach(dsi); + if (ret < 0) + dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret); + + drm_panel_remove(&ctx->panel); +} + +static const struct of_device_id hx83112b_of_match[] = { + { .compatible = "djn,98-03057-6598b-i" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, hx83112b_of_match); + +static struct mipi_dsi_driver hx83112b_driver = { + .probe = hx83112b_probe, + .remove = hx83112b_remove, + .driver = { + .name = "panel-himax-hx83112b", + .of_match_table = hx83112b_of_match, + }, +}; +module_mipi_dsi_driver(hx83112b_driver); + +MODULE_DESCRIPTION("DRM driver for hx83112b-equipped DSI panels"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c b/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c index 3c24a63b6be8..85c7059be214 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c @@ -614,8 +614,6 @@ static int ili9882t_add(struct ili9882t *ili) gpiod_set_value(ili->enable_gpio, 0); - drm_panel_init(&ili->base, dev, &ili9882t_funcs, - DRM_MODE_CONNECTOR_DSI); err = of_drm_get_panel_orientation(dev->of_node, &ili->orientation); if (err < 0) { dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, err); @@ -640,9 +638,11 @@ static int ili9882t_probe(struct mipi_dsi_device *dsi) int ret; const struct panel_desc *desc; - ili = devm_kzalloc(&dsi->dev, sizeof(*ili), GFP_KERNEL); - if (!ili) - return -ENOMEM; + ili = devm_drm_panel_alloc(&dsi->dev, __typeof(*ili), base, + &ili9882t_funcs, DRM_MODE_CONNECTOR_DSI); + + if (IS_ERR(ili)) + return PTR_ERR(ili); desc = of_device_get_match_data(&dsi->dev); dsi->lanes = desc->lanes; diff --git a/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c b/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c index 5b5082efb282..5f897e143758 100644 --- a/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c +++ b/drivers/gpu/drm/panel/panel-jdi-lpm102a188a.c @@ -435,9 +435,6 @@ static int jdi_panel_add(struct jdi_panel *jdi) return dev_err_probe(dev, PTR_ERR(jdi->backlight), "failed to create backlight\n"); - drm_panel_init(&jdi->base, &jdi->link1->dev, &jdi_panel_funcs, - DRM_MODE_CONNECTOR_DSI); - drm_panel_add(&jdi->base); return 0; @@ -475,10 +472,13 @@ static int jdi_panel_dsi_probe(struct mipi_dsi_device *dsi) /* register a panel for only the DSI-LINK1 interface */ if (secondary) { - jdi = devm_kzalloc(&dsi->dev, sizeof(*jdi), GFP_KERNEL); - if (!jdi) { + jdi = devm_drm_panel_alloc(&dsi->dev, __typeof(*jdi), + base, &jdi_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + + if (IS_ERR(jdi)) { put_device(&secondary->dev); - return -ENOMEM; + return PTR_ERR(jdi); } mipi_dsi_set_drvdata(dsi, jdi); diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c index b1ce186de261..3513e5c4dd8c 100644 --- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c +++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c @@ -402,9 +402,6 @@ static int jdi_panel_add(struct jdi_panel *jdi) return dev_err_probe(dev, PTR_ERR(jdi->backlight), "failed to register backlight %d\n", ret); - drm_panel_init(&jdi->base, &jdi->dsi->dev, &jdi_panel_funcs, - DRM_MODE_CONNECTOR_DSI); - drm_panel_add(&jdi->base); return 0; @@ -426,9 +423,11 @@ static int jdi_panel_probe(struct mipi_dsi_device *dsi) dsi->mode_flags = MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO | MIPI_DSI_CLOCK_NON_CONTINUOUS; - jdi = devm_kzalloc(&dsi->dev, sizeof(*jdi), GFP_KERNEL); - if (!jdi) - return -ENOMEM; + jdi = devm_drm_panel_alloc(&dsi->dev, __typeof(*jdi), base, + &jdi_panel_funcs, DRM_MODE_CONNECTOR_DSI); + + if (IS_ERR(jdi)) + return PTR_ERR(jdi); mipi_dsi_set_drvdata(dsi, jdi); diff --git a/drivers/gpu/drm/panel/panel-khadas-ts050.c b/drivers/gpu/drm/panel/panel-khadas-ts050.c index 0e5e8e57bd1e..67ca055f06f3 100644 --- a/drivers/gpu/drm/panel/panel-khadas-ts050.c +++ b/drivers/gpu/drm/panel/panel-khadas-ts050.c @@ -821,9 +821,6 @@ static int khadas_ts050_panel_add(struct khadas_ts050_panel *khadas_ts050) return dev_err_probe(dev, PTR_ERR(khadas_ts050->enable_gpio), "failed to get enable gpio"); - drm_panel_init(&khadas_ts050->base, &khadas_ts050->link->dev, - &khadas_ts050_panel_funcs, DRM_MODE_CONNECTOR_DSI); - err = drm_panel_of_backlight(&khadas_ts050->base); if (err) return err; @@ -850,10 +847,12 @@ static int khadas_ts050_panel_probe(struct mipi_dsi_device *dsi) dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET; - khadas_ts050 = devm_kzalloc(&dsi->dev, sizeof(*khadas_ts050), - GFP_KERNEL); - if (!khadas_ts050) - return -ENOMEM; + khadas_ts050 = devm_drm_panel_alloc(&dsi->dev, __typeof(*khadas_ts050), + base, &khadas_ts050_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + + if (IS_ERR(khadas_ts050)) + return PTR_ERR(khadas_ts050); khadas_ts050->panel_data = (struct khadas_ts050_panel_data *)data; mipi_dsi_set_drvdata(dsi, khadas_ts050); diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c index d6b912277196..2fc7b0779b37 100644 --- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c +++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c @@ -337,9 +337,6 @@ static int kingdisplay_panel_add(struct kingdisplay_panel *kingdisplay) kingdisplay->enable_gpio = NULL; } - drm_panel_init(&kingdisplay->base, &kingdisplay->link->dev, - &kingdisplay_panel_funcs, DRM_MODE_CONNECTOR_DSI); - err = drm_panel_of_backlight(&kingdisplay->base); if (err) return err; @@ -364,9 +361,12 @@ static int kingdisplay_panel_probe(struct mipi_dsi_device *dsi) dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_LPM; - kingdisplay = devm_kzalloc(&dsi->dev, sizeof(*kingdisplay), GFP_KERNEL); - if (!kingdisplay) - return -ENOMEM; + kingdisplay = devm_drm_panel_alloc(&dsi->dev, __typeof(*kingdisplay), base, + &kingdisplay_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + + if (IS_ERR(kingdisplay)) + return PTR_ERR(kingdisplay); mipi_dsi_set_drvdata(dsi, kingdisplay); kingdisplay->link = dsi; diff --git a/drivers/gpu/drm/panel/panel-lg-sw43408.c b/drivers/gpu/drm/panel/panel-lg-sw43408.c index f3dcc39670ea..46a56ea92ad9 100644 --- a/drivers/gpu/drm/panel/panel-lg-sw43408.c +++ b/drivers/gpu/drm/panel/panel-lg-sw43408.c @@ -246,8 +246,6 @@ static int sw43408_add(struct sw43408_panel *ctx) ctx->base.prepare_prev_first = true; - drm_panel_init(&ctx->base, dev, &sw43408_funcs, DRM_MODE_CONNECTOR_DSI); - drm_panel_add(&ctx->base); return ret; } @@ -257,9 +255,11 @@ static int sw43408_probe(struct mipi_dsi_device *dsi) struct sw43408_panel *ctx; int ret; - ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL); - if (!ctx) - return -ENOMEM; + ctx = devm_drm_panel_alloc(&dsi->dev, __typeof(*ctx), base, + &sw43408_funcs, DRM_MODE_CONNECTOR_DSI); + + if (IS_ERR(ctx)) + return PTR_ERR(ctx); dsi->mode_flags = MIPI_DSI_MODE_LPM; dsi->format = MIPI_DSI_FMT_RGB888; diff --git a/drivers/gpu/drm/panel/panel-novatek-nt36672a.c b/drivers/gpu/drm/panel/panel-novatek-nt36672a.c index c2abd20e0734..29e1f6aea480 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt36672a.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt36672a.c @@ -608,8 +608,6 @@ static int nt36672a_panel_add(struct nt36672a_panel *pinfo) return dev_err_probe(dev, PTR_ERR(pinfo->reset_gpio), "failed to get reset gpio from DT\n"); - drm_panel_init(&pinfo->base, dev, &panel_funcs, DRM_MODE_CONNECTOR_DSI); - ret = drm_panel_of_backlight(&pinfo->base); if (ret) return dev_err_probe(dev, ret, "Failed to get backlight\n"); @@ -625,9 +623,11 @@ static int nt36672a_panel_probe(struct mipi_dsi_device *dsi) const struct nt36672a_panel_desc *desc; int err; - pinfo = devm_kzalloc(&dsi->dev, sizeof(*pinfo), GFP_KERNEL); - if (!pinfo) - return -ENOMEM; + pinfo = devm_drm_panel_alloc(&dsi->dev, __typeof(*pinfo), base, + &panel_funcs, DRM_MODE_CONNECTOR_DSI); + + if (IS_ERR(pinfo)) + return PTR_ERR(pinfo); desc = of_device_get_match_data(&dsi->dev); dsi->mode_flags = desc->mode_flags; diff --git a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c index dbea84f51514..2334b77f348c 100644 --- a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c +++ b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c @@ -132,9 +132,6 @@ static int osd101t2587_panel_add(struct osd101t2587_panel *osd101t2587) if (IS_ERR(osd101t2587->supply)) return PTR_ERR(osd101t2587->supply); - drm_panel_init(&osd101t2587->base, &osd101t2587->dsi->dev, - &osd101t2587_panel_funcs, DRM_MODE_CONNECTOR_DSI); - ret = drm_panel_of_backlight(&osd101t2587->base); if (ret) return ret; @@ -161,9 +158,12 @@ static int osd101t2587_panel_probe(struct mipi_dsi_device *dsi) MIPI_DSI_MODE_VIDEO_SYNC_PULSE | MIPI_DSI_MODE_NO_EOT_PACKET; - osd101t2587 = devm_kzalloc(&dsi->dev, sizeof(*osd101t2587), GFP_KERNEL); - if (!osd101t2587) - return -ENOMEM; + osd101t2587 = devm_drm_panel_alloc(&dsi->dev, __typeof(*osd101t2587), base, + &osd101t2587_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + + if (IS_ERR(osd101t2587)) + return PTR_ERR(osd101t2587); mipi_dsi_set_drvdata(dsi, osd101t2587); diff --git a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c index d1c5c9bc3c56..3c3308fc55df 100644 --- a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c +++ b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c @@ -166,9 +166,6 @@ static int wuxga_nt_panel_add(struct wuxga_nt_panel *wuxga_nt) if (IS_ERR(wuxga_nt->supply)) return PTR_ERR(wuxga_nt->supply); - drm_panel_init(&wuxga_nt->base, &wuxga_nt->dsi->dev, - &wuxga_nt_panel_funcs, DRM_MODE_CONNECTOR_DSI); - ret = drm_panel_of_backlight(&wuxga_nt->base); if (ret) return ret; @@ -196,9 +193,12 @@ static int wuxga_nt_panel_probe(struct mipi_dsi_device *dsi) MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM; - wuxga_nt = devm_kzalloc(&dsi->dev, sizeof(*wuxga_nt), GFP_KERNEL); - if (!wuxga_nt) - return -ENOMEM; + wuxga_nt = devm_drm_panel_alloc(&dsi->dev, __typeof(*wuxga_nt), base, + &wuxga_nt_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + + if (IS_ERR(wuxga_nt)) + return PTR_ERR(wuxga_nt); mipi_dsi_set_drvdata(dsi, wuxga_nt); diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c index e10e469aa7a6..dc4bb8ad9131 100644 --- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c +++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c @@ -373,9 +373,12 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c) .node = NULL, }; - ts = devm_kzalloc(dev, sizeof(*ts), GFP_KERNEL); - if (!ts) - return -ENOMEM; + ts = devm_drm_panel_alloc(dev, __typeof(*ts), base, + &rpi_touchscreen_funcs, + DRM_MODE_CONNECTOR_DSI); + + if (IS_ERR(ts)) + return PTR_ERR(ts); i2c_set_clientdata(i2c, ts); @@ -428,9 +431,6 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c) return PTR_ERR(ts->dsi); } - drm_panel_init(&ts->base, dev, &rpi_touchscreen_funcs, - DRM_MODE_CONNECTOR_DSI); - /* This appears last, as it's what will unblock the DSI host * driver's component bind function. */ diff --git a/drivers/gpu/drm/panel/panel-raydium-rm67200.c b/drivers/gpu/drm/panel/panel-raydium-rm67200.c index 1da8225e6f7a..333faed62da7 100644 --- a/drivers/gpu/drm/panel/panel-raydium-rm67200.c +++ b/drivers/gpu/drm/panel/panel-raydium-rm67200.c @@ -36,12 +36,14 @@ static inline struct raydium_rm67200 *to_raydium_rm67200(struct drm_panel *panel static void raydium_rm67200_reset(struct raydium_rm67200 *ctx) { - gpiod_set_value_cansleep(ctx->reset_gpio, 0); - msleep(60); - gpiod_set_value_cansleep(ctx->reset_gpio, 1); - msleep(60); - gpiod_set_value_cansleep(ctx->reset_gpio, 0); - msleep(60); + if (ctx->reset_gpio) { + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + msleep(60); + gpiod_set_value_cansleep(ctx->reset_gpio, 1); + msleep(60); + gpiod_set_value_cansleep(ctx->reset_gpio, 0); + msleep(60); + } } static void raydium_rm67200_write(struct mipi_dsi_multi_context *ctx, @@ -318,6 +320,7 @@ static void w552793baa_setup(struct mipi_dsi_multi_context *ctx) static int raydium_rm67200_prepare(struct drm_panel *panel) { struct raydium_rm67200 *ctx = to_raydium_rm67200(panel); + struct mipi_dsi_multi_context mctx = { .dsi = ctx->dsi }; int ret; ret = regulator_bulk_enable(ctx->num_supplies, ctx->supplies); @@ -328,6 +331,12 @@ static int raydium_rm67200_prepare(struct drm_panel *panel) msleep(60); + ctx->panel_info->panel_setup(&mctx); + mipi_dsi_dcs_exit_sleep_mode_multi(&mctx); + mipi_dsi_msleep(&mctx, 120); + mipi_dsi_dcs_set_display_on_multi(&mctx); + mipi_dsi_msleep(&mctx, 30); + return 0; } @@ -343,20 +352,6 @@ static int raydium_rm67200_unprepare(struct drm_panel *panel) return 0; } -static int raydium_rm67200_enable(struct drm_panel *panel) -{ - struct raydium_rm67200 *rm67200 = to_raydium_rm67200(panel); - struct mipi_dsi_multi_context ctx = { .dsi = rm67200->dsi }; - - rm67200->panel_info->panel_setup(&ctx); - mipi_dsi_dcs_exit_sleep_mode_multi(&ctx); - mipi_dsi_msleep(&ctx, 120); - mipi_dsi_dcs_set_display_on_multi(&ctx); - mipi_dsi_msleep(&ctx, 30); - - return ctx.accum_err; -} - static int raydium_rm67200_disable(struct drm_panel *panel) { struct raydium_rm67200 *rm67200 = to_raydium_rm67200(panel); @@ -381,7 +376,6 @@ static const struct drm_panel_funcs raydium_rm67200_funcs = { .prepare = raydium_rm67200_prepare, .unprepare = raydium_rm67200_unprepare, .get_modes = raydium_rm67200_get_modes, - .enable = raydium_rm67200_enable, .disable = raydium_rm67200_disable, }; @@ -409,7 +403,7 @@ static int raydium_rm67200_probe(struct mipi_dsi_device *dsi) if (ret < 0) return ret; - ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); + ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(ctx->reset_gpio)) return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), "Failed to get reset-gpios\n"); @@ -470,6 +464,7 @@ static const struct raydium_rm67200_panel_info w552793baa_info = { .vtotal = 1952, .width_mm = 68, /* 68.04mm */ .height_mm = 121, /* 120.96mm */ + .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, .type = DRM_MODE_TYPE_DRIVER, }, .regulators = w552793baa_regulators, diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c index b5b9e80690f6..692020081524 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c @@ -244,7 +244,7 @@ static const struct s6d7aa0_panel_desc s6d7aa0_lsl080al02_desc = { .init_func = s6d7aa0_lsl080al02_init, .off_func = s6d7aa0_lsl080al02_off, .drm_mode = &s6d7aa0_lsl080al02_mode, - .mode_flags = MIPI_DSI_MODE_VSYNC_FLUSH | MIPI_DSI_MODE_VIDEO_NO_HFP, + .mode_flags = MIPI_DSI_MODE_VIDEO_NO_HFP, .bus_flags = 0, .has_backlight = false, diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c index 897df195f2f3..1b5c500d4f4e 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c @@ -992,7 +992,7 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi) dsi->lanes = 4; dsi->format = MIPI_DSI_FMT_RGB888; dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST - | MIPI_DSI_MODE_VSYNC_FLUSH | MIPI_DSI_MODE_VIDEO_AUTO_VERT; + | MIPI_DSI_MODE_VIDEO_AUTO_VERT; ret = s6e8aa0_parse_dt(ctx); if (ret < 0) diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c index a0d76d588da1..d159b0e4fdb6 100644 --- a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c +++ b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c @@ -279,9 +279,6 @@ static int sharp_panel_add(struct sharp_panel *sharp) if (IS_ERR(sharp->supply)) return PTR_ERR(sharp->supply); - drm_panel_init(&sharp->base, &sharp->link1->dev, &sharp_panel_funcs, - DRM_MODE_CONNECTOR_DSI); - ret = drm_panel_of_backlight(&sharp->base); if (ret) return ret; @@ -323,10 +320,12 @@ static int sharp_panel_probe(struct mipi_dsi_device *dsi) /* register a panel for only the DSI-LINK1 interface */ if (secondary) { - sharp = devm_kzalloc(&dsi->dev, sizeof(*sharp), GFP_KERNEL); - if (!sharp) { + sharp = devm_drm_panel_alloc(&dsi->dev, __typeof(*sharp), base, + &sharp_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + if (IS_ERR(sharp)) { put_device(&secondary->dev); - return -ENOMEM; + return PTR_ERR(sharp); } mipi_dsi_set_drvdata(dsi, sharp); diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 47222d2d8129..3333d4a07504 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -26,6 +26,7 @@ #include <linux/i2c.h> #include <linux/media-bus-format.h> #include <linux/module.h> +#include <linux/of_device.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> @@ -136,6 +137,14 @@ struct panel_desc { int connector_type; }; +struct panel_desc_dsi { + struct panel_desc desc; + + unsigned long flags; + enum mipi_dsi_pixel_format format; + unsigned int lanes; +}; + struct panel_simple { struct drm_panel base; @@ -430,10 +439,7 @@ static const struct drm_panel_funcs panel_simple_funcs = { .get_timings = panel_simple_get_timings, }; -static struct panel_desc panel_dpi; - -static int panel_dpi_probe(struct device *dev, - struct panel_simple *panel) +static struct panel_desc *panel_dpi_probe(struct device *dev) { struct display_timing *timing; const struct device_node *np; @@ -445,17 +451,17 @@ static int panel_dpi_probe(struct device *dev, np = dev->of_node; desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); if (!desc) - return -ENOMEM; + return ERR_PTR(-ENOMEM); timing = devm_kzalloc(dev, sizeof(*timing), GFP_KERNEL); if (!timing) - return -ENOMEM; + return ERR_PTR(-ENOMEM); ret = of_get_display_timing(np, "panel-timing", timing); if (ret < 0) { dev_err(dev, "%pOF: no panel-timing node found for \"panel-dpi\" binding\n", np); - return ret; + return ERR_PTR(ret); } desc->timings = timing; @@ -473,9 +479,7 @@ static int panel_dpi_probe(struct device *dev, /* We do not know the connector for the DT node, so guess it */ desc->connector_type = DRM_MODE_CONNECTOR_DPI; - panel->desc = desc; - - return 0; + return desc; } #define PANEL_SIMPLE_BOUNDS_CHECK(to_check, bounds, field) \ @@ -570,8 +574,44 @@ static int panel_simple_override_nondefault_lvds_datamapping(struct device *dev, return 0; } -static int panel_simple_probe(struct device *dev, const struct panel_desc *desc) +static const struct panel_desc *panel_simple_get_desc(struct device *dev) { + if (IS_ENABLED(CONFIG_DRM_MIPI_DSI) && + dev_is_mipi_dsi(dev)) { + const struct panel_desc_dsi *dsi_desc; + + dsi_desc = of_device_get_match_data(dev); + if (!dsi_desc) + return ERR_PTR(-ENODEV); + + return &dsi_desc->desc; + } + + if (dev_is_platform(dev)) { + const struct panel_desc *desc; + + desc = of_device_get_match_data(dev); + if (!desc) { + /* + * panel-dpi probes without a descriptor and + * panel_dpi_probe() will initialize one for us + * based on the device tree. + */ + if (of_device_is_compatible(dev->of_node, "panel-dpi")) + return panel_dpi_probe(dev); + else + return ERR_PTR(-ENODEV); + } + + return desc; + } + + return ERR_PTR(-ENODEV); +} + +static struct panel_simple *panel_simple_probe(struct device *dev) +{ + const struct panel_desc *desc; struct panel_simple *panel; struct display_timing dt; struct device_node *ddc; @@ -579,27 +619,31 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc) u32 bus_flags; int err; + desc = panel_simple_get_desc(dev); + if (IS_ERR(desc)) + return ERR_CAST(desc); + panel = devm_drm_panel_alloc(dev, struct panel_simple, base, &panel_simple_funcs, desc->connector_type); if (IS_ERR(panel)) - return PTR_ERR(panel); + return ERR_CAST(panel); panel->desc = desc; panel->supply = devm_regulator_get(dev, "power"); if (IS_ERR(panel->supply)) - return PTR_ERR(panel->supply); + return ERR_CAST(panel->supply); panel->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(panel->enable_gpio)) - return dev_err_probe(dev, PTR_ERR(panel->enable_gpio), - "failed to request GPIO\n"); + return dev_err_cast_probe(dev, panel->enable_gpio, + "failed to request GPIO\n"); err = of_drm_get_panel_orientation(dev->of_node, &panel->orientation); if (err) { dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, err); - return err; + return ERR_PTR(err); } ddc = of_parse_phandle(dev->of_node, "ddc-i2c-bus", 0); @@ -608,19 +652,12 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc) of_node_put(ddc); if (!panel->ddc) - return -EPROBE_DEFER; + return ERR_PTR(-EPROBE_DEFER); } - if (desc == &panel_dpi) { - /* Handle the generic panel-dpi binding */ - err = panel_dpi_probe(dev, panel); - if (err) - goto free_ddc; - desc = panel->desc; - } else { - if (!of_get_display_timing(dev->of_node, "panel-timing", &dt)) - panel_simple_parse_panel_timing_node(dev, panel, &dt); - } + if (!of_device_is_compatible(dev->of_node, "panel-dpi") && + !of_get_display_timing(dev->of_node, "panel-timing", &dt)) + panel_simple_parse_panel_timing_node(dev, panel, &dt); if (desc->connector_type == DRM_MODE_CONNECTOR_LVDS) { /* Optional data-mapping property for overriding bus format */ @@ -703,7 +740,7 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc) drm_panel_add(&panel->base); - return 0; + return panel; disable_pm_runtime: pm_runtime_dont_use_autosuspend(dev); @@ -712,7 +749,7 @@ free_ddc: if (panel->ddc) put_device(&panel->ddc->dev); - return err; + return ERR_PTR(err); } static void panel_simple_shutdown(struct device *dev) @@ -5394,7 +5431,12 @@ static const struct of_device_id platform_of_match[] = { }, { /* Must be the last entry */ .compatible = "panel-dpi", - .data = &panel_dpi, + + /* + * Explicitly NULL, the panel_desc structure will be + * allocated by panel_dpi_probe(). + */ + .data = NULL, }, { /* sentinel */ } @@ -5403,13 +5445,13 @@ MODULE_DEVICE_TABLE(of, platform_of_match); static int panel_simple_platform_probe(struct platform_device *pdev) { - const struct panel_desc *desc; + struct panel_simple *panel; - desc = of_device_get_match_data(&pdev->dev); - if (!desc) - return -ENODEV; + panel = panel_simple_probe(&pdev->dev); + if (IS_ERR(panel)) + return PTR_ERR(panel); - return panel_simple_probe(&pdev->dev, desc); + return 0; } static void panel_simple_platform_remove(struct platform_device *pdev) @@ -5439,14 +5481,6 @@ static struct platform_driver panel_simple_platform_driver = { .shutdown = panel_simple_platform_shutdown, }; -struct panel_desc_dsi { - struct panel_desc desc; - - unsigned long flags; - enum mipi_dsi_pixel_format format; - unsigned int lanes; -}; - static const struct drm_display_mode auo_b080uan01_mode = { .clock = 154500, .hdisplay = 1200, @@ -5680,16 +5714,14 @@ MODULE_DEVICE_TABLE(of, dsi_of_match); static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi) { const struct panel_desc_dsi *desc; + struct panel_simple *panel; int err; - desc = of_device_get_match_data(&dsi->dev); - if (!desc) - return -ENODEV; - - err = panel_simple_probe(&dsi->dev, &desc->desc); - if (err < 0) - return err; + panel = panel_simple_probe(&dsi->dev); + if (IS_ERR(panel)) + return PTR_ERR(panel); + desc = container_of(panel->desc, struct panel_desc_dsi, desc); dsi->mode_flags = desc->flags; dsi->format = desc->format; dsi->lanes = desc->lanes; diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index bb73f2a68a12..85d6289a6eda 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -432,7 +432,7 @@ static void panfrost_gem_debugfs_bo_print(struct panfrost_gem_object *bo, if (!refcount) return; - resident_size = bo->base.pages ? bo->base.base.size : 0; + resident_size = panfrost_gem_rss(&bo->base.base); snprintf(creator_info, sizeof(creator_info), "%s/%d", bo->debugfs.creator.process_name, bo->debugfs.creator.tgid); diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index 5657106c2f7d..82acabb21b27 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -751,11 +751,11 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job int js = panfrost_job_get_slot(job); /* - * If the GPU managed to complete this jobs fence, the timeout is - * spurious. Bail out. + * If the GPU managed to complete this jobs fence, the timeout has + * fired before free-job worker. The timeout is spurious, so bail out. */ if (dma_fence_is_signaled(job->done_fence)) - return DRM_GPU_SCHED_STAT_NOMINAL; + return DRM_GPU_SCHED_STAT_NO_HANG; /* * Panfrost IRQ handler may take a long time to process an interrupt @@ -770,7 +770,7 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job if (dma_fence_is_signaled(job->done_fence)) { dev_warn(pfdev->dev, "unexpectedly high interrupt latency\n"); - return DRM_GPU_SCHED_STAT_NOMINAL; + return DRM_GPU_SCHED_STAT_NO_HANG; } dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p", @@ -786,7 +786,7 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job atomic_set(&pfdev->reset.pending, 1); panfrost_reset(pfdev, sched_job); - return DRM_GPU_SCHED_STAT_NOMINAL; + return DRM_GPU_SCHED_STAT_RESET; } static void panfrost_reset_work(struct work_struct *work) @@ -841,7 +841,6 @@ int panfrost_job_init(struct panfrost_device *pfdev) .num_rqs = DRM_SCHED_PRIORITY_COUNT, .credit_limit = 2, .timeout = msecs_to_jiffies(JOB_TIMEOUT_MS), - .timeout_wq = pfdev->reset.wq, .name = "pan_js", .dev = pfdev->dev, }; @@ -879,6 +878,7 @@ int panfrost_job_init(struct panfrost_device *pfdev) pfdev->reset.wq = alloc_ordered_workqueue("panfrost-reset", 0); if (!pfdev->reset.wq) return -ENOMEM; + args.timeout_wq = pfdev->reset.wq; for (j = 0; j < NUM_JOB_SLOTS; j++) { js->queue[j].fence_context = dma_fence_context_alloc(1); diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c index 7c00fd77758b..a123bc740ba1 100644 --- a/drivers/gpu/drm/panthor/panthor_gem.c +++ b/drivers/gpu/drm/panthor/panthor_gem.c @@ -16,10 +16,15 @@ #include "panthor_mmu.h" #ifdef CONFIG_DEBUG_FS -static void panthor_gem_debugfs_bo_add(struct panthor_device *ptdev, - struct panthor_gem_object *bo) +static void panthor_gem_debugfs_bo_init(struct panthor_gem_object *bo) { INIT_LIST_HEAD(&bo->debugfs.node); +} + +static void panthor_gem_debugfs_bo_add(struct panthor_gem_object *bo) +{ + struct panthor_device *ptdev = container_of(bo->base.base.dev, + struct panthor_device, base); bo->debugfs.creator.tgid = current->group_leader->pid; get_task_comm(bo->debugfs.creator.process_name, current->group_leader); @@ -44,14 +49,13 @@ static void panthor_gem_debugfs_bo_rm(struct panthor_gem_object *bo) static void panthor_gem_debugfs_set_usage_flags(struct panthor_gem_object *bo, u32 usage_flags) { - bo->debugfs.flags = usage_flags | PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED; + bo->debugfs.flags = usage_flags; + panthor_gem_debugfs_bo_add(bo); } #else -static void panthor_gem_debugfs_bo_add(struct panthor_device *ptdev, - struct panthor_gem_object *bo) -{} static void panthor_gem_debugfs_bo_rm(struct panthor_gem_object *bo) {} static void panthor_gem_debugfs_set_usage_flags(struct panthor_gem_object *bo, u32 usage_flags) {} +static void panthor_gem_debugfs_bo_init(struct panthor_gem_object *bo) {} #endif static void panthor_gem_free_object(struct drm_gem_object *obj) @@ -246,7 +250,7 @@ struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t drm_gem_gpuva_set_lock(&obj->base.base, &obj->gpuva_list_lock); mutex_init(&obj->label.lock); - panthor_gem_debugfs_bo_add(ptdev, obj); + panthor_gem_debugfs_bo_init(obj); return &obj->base.base; } @@ -285,6 +289,8 @@ panthor_gem_create_with_handle(struct drm_file *file, bo->base.base.resv = bo->exclusive_vm_root_gem->resv; } + panthor_gem_debugfs_set_usage_flags(bo, 0); + /* * Allocate an id of idr table where the obj is registered * and handle has the id what user can see. @@ -296,12 +302,6 @@ panthor_gem_create_with_handle(struct drm_file *file, /* drop reference from allocate - handle holds it now. */ drm_gem_object_put(&shmem->base); - /* - * No explicit flags are needed in the call below, since the - * function internally sets the INITIALIZED bit for us. - */ - panthor_gem_debugfs_set_usage_flags(bo, 0); - return ret; } @@ -387,7 +387,7 @@ static void panthor_gem_debugfs_bo_print(struct panthor_gem_object *bo, unsigned int refcount = kref_read(&bo->base.base.refcount); char creator_info[32] = {}; size_t resident_size; - u32 gem_usage_flags = bo->debugfs.flags & (u32)~PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED; + u32 gem_usage_flags = bo->debugfs.flags; u32 gem_state_flags = 0; /* Skip BOs being destroyed. */ @@ -436,8 +436,7 @@ void panthor_gem_debugfs_print_bos(struct panthor_device *ptdev, scoped_guard(mutex, &ptdev->gems.lock) { list_for_each_entry(bo, &ptdev->gems.node, debugfs.node) { - if (bo->debugfs.flags & PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED) - panthor_gem_debugfs_bo_print(bo, m, &totals); + panthor_gem_debugfs_bo_print(bo, m, &totals); } } diff --git a/drivers/gpu/drm/panthor/panthor_gem.h b/drivers/gpu/drm/panthor/panthor_gem.h index 4dd732dcd59f..8fc7215e9b90 100644 --- a/drivers/gpu/drm/panthor/panthor_gem.h +++ b/drivers/gpu/drm/panthor/panthor_gem.h @@ -35,9 +35,6 @@ enum panthor_debugfs_gem_usage_flags { /** @PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED: BO is mapped on the FW VM. */ PANTHOR_DEBUGFS_GEM_USAGE_FLAG_FW_MAPPED = BIT(PANTHOR_DEBUGFS_GEM_USAGE_FW_MAPPED_BIT), - - /** @PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED: BO is ready for DebugFS display. */ - PANTHOR_DEBUGFS_GEM_USAGE_FLAG_INITIALIZED = BIT(31), }; /** diff --git a/drivers/gpu/drm/panthor/panthor_gpu.c b/drivers/gpu/drm/panthor/panthor_gpu.c index 534735518824..cb7a335e07d7 100644 --- a/drivers/gpu/drm/panthor/panthor_gpu.c +++ b/drivers/gpu/drm/panthor/panthor_gpu.c @@ -297,8 +297,9 @@ int panthor_gpu_block_power_on(struct panthor_device *ptdev, gpu_write64(ptdev, pwron_reg, mask); - ret = gpu_read64_relaxed_poll_timeout(ptdev, pwrtrans_reg, val, - !(mask & val), 100, timeout_us); + ret = gpu_read64_relaxed_poll_timeout(ptdev, rdy_reg, val, + (mask & val) == val, + 100, timeout_us); if (ret) { drm_err(&ptdev->base, "timeout waiting on %s:%llx readiness", blk_name, mask); diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c index b39ea6acc6a9..4140f697ba5a 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.c +++ b/drivers/gpu/drm/panthor/panthor_mmu.c @@ -885,17 +885,6 @@ static int panthor_vm_flush_range(struct panthor_vm *vm, u64 iova, u64 size) return ret; } -/** - * panthor_vm_flush_all() - Flush L2 caches for the entirety of a VM's AS - * @vm: VM whose cache to flush - * - * Return: 0 on success, a negative error code if flush failed. - */ -int panthor_vm_flush_all(struct panthor_vm *vm) -{ - return panthor_vm_flush_range(vm, vm->base.mm_start, vm->base.mm_range); -} - static int panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size) { struct panthor_device *ptdev = vm->ptdev; @@ -2270,7 +2259,7 @@ static enum drm_gpu_sched_stat panthor_vm_bind_timedout_job(struct drm_sched_job *sched_job) { WARN(1, "VM_BIND ops are synchronous for now, there should be no timeout!"); - return DRM_GPU_SCHED_STAT_NOMINAL; + return DRM_GPU_SCHED_STAT_RESET; } static const struct drm_sched_backend_ops panthor_vm_bind_ops = { diff --git a/drivers/gpu/drm/panthor/panthor_mmu.h b/drivers/gpu/drm/panthor/panthor_mmu.h index fc274637114e..0e268fdfdb2f 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.h +++ b/drivers/gpu/drm/panthor/panthor_mmu.h @@ -33,7 +33,6 @@ int panthor_vm_active(struct panthor_vm *vm); void panthor_vm_idle(struct panthor_vm *vm); u32 panthor_vm_page_size(struct panthor_vm *vm); int panthor_vm_as(struct panthor_vm *vm); -int panthor_vm_flush_all(struct panthor_vm *vm); struct panthor_heap_pool * panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create); diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c index a2248f692a03..8f17394cc82a 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.c +++ b/drivers/gpu/drm/panthor/panthor_sched.c @@ -3241,7 +3241,7 @@ queue_timedout_job(struct drm_sched_job *sched_job) queue_start(queue); - return DRM_GPU_SCHED_STAT_NOMINAL; + return DRM_GPU_SCHED_STAT_RESET; } static void queue_free_job(struct drm_sched_job *sched_job) diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index 70aff64ced87..ae7e572b1b4a 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -1176,9 +1176,10 @@ err_drm_connector_cleanup: static struct drm_framebuffer * qxl_user_framebuffer_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { - return drm_gem_fb_create_with_funcs(dev, file_priv, mode_cmd, + return drm_gem_fb_create_with_funcs(dev, file_priv, info, mode_cmd, &qxl_fb_funcs); } diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index a46613283393..266c57733136 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c @@ -211,7 +211,7 @@ static int evergreen_surface_check_linear_aligned(struct radeon_cs_parser *p, surf->base_align = track->group_size; surf->palign = palign; surf->halign = 1; - if (surf->nbx & (palign - 1)) { + if ((surf->nbx & (palign - 1)) && !(palign == 64 && surf->nbx == 32)) { if (prefix) { dev_warn(p->dev, "%s:%d %s pitch %d invalid must be aligned with %d\n", __func__, __LINE__, prefix, surf->nbx, palign); @@ -2661,6 +2661,95 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, } break; } + case PACKET3_COND_EXEC: + { + u64 offset; + + if (pkt->count != 2) { + DRM_ERROR("bad COND_EXEC (invalid count)\n"); + return -EINVAL; + } + r = radeon_cs_packet_next_reloc(p, &reloc, 0); + if (r) { + DRM_ERROR("bad COND_EXEC (missing reloc)\n"); + return -EINVAL; + } + offset = radeon_get_ib_value(p, idx + 0); + offset += ((u64)(radeon_get_ib_value(p, idx + 1) & 0xff)) << 32UL; + if (offset & 0x7) { + DRM_ERROR("bad COND_EXEC (address not qwords aligned)\n"); + return -EINVAL; + } + if ((offset + 8) > radeon_bo_size(reloc->robj)) { + DRM_ERROR("bad COND_EXEC bo too small: 0x%llx, 0x%lx\n", + offset + 8, radeon_bo_size(reloc->robj)); + return -EINVAL; + } + offset += reloc->gpu_offset; + ib[idx + 0] = offset; + ib[idx + 1] = upper_32_bits(offset) & 0xff; + break; + } + case PACKET3_COND_WRITE: + if (pkt->count != 7) { + DRM_ERROR("bad COND_WRITE (invalid count)\n"); + return -EINVAL; + } + if (idx_value & 0x10) { + u64 offset; + /* POLL is memory. */ + r = radeon_cs_packet_next_reloc(p, &reloc, 0); + if (r) { + DRM_ERROR("bad COND_WRITE (missing src reloc)\n"); + return -EINVAL; + } + offset = radeon_get_ib_value(p, idx + 1); + offset += ((u64)(radeon_get_ib_value(p, idx + 2) & 0xff)) << 32; + if ((offset + 8) > radeon_bo_size(reloc->robj)) { + DRM_ERROR("bad COND_WRITE src bo too small: 0x%llx, 0x%lx\n", + offset + 8, radeon_bo_size(reloc->robj)); + return -EINVAL; + } + offset += reloc->gpu_offset; + ib[idx + 1] = offset; + ib[idx + 2] = upper_32_bits(offset) & 0xff; + } else { + /* POLL is a reg. */ + reg = radeon_get_ib_value(p, idx + 1) << 2; + if (!evergreen_is_safe_reg(p, reg)) { + dev_warn(p->dev, "forbidden register 0x%08x at %d\n", + reg, idx + 1); + return -EINVAL; + } + } + if (idx_value & 0x100) { + u64 offset; + /* WRITE is memory. */ + r = radeon_cs_packet_next_reloc(p, &reloc, 0); + if (r) { + DRM_ERROR("bad COND_WRITE (missing dst reloc)\n"); + return -EINVAL; + } + offset = radeon_get_ib_value(p, idx + 5); + offset += ((u64)(radeon_get_ib_value(p, idx + 6) & 0xff)) << 32; + if ((offset + 8) > radeon_bo_size(reloc->robj)) { + DRM_ERROR("bad COND_WRITE dst bo too small: 0x%llx, 0x%lx\n", + offset + 8, radeon_bo_size(reloc->robj)); + return -EINVAL; + } + offset += reloc->gpu_offset; + ib[idx + 5] = offset; + ib[idx + 6] = upper_32_bits(offset) & 0xff; + } else { + /* WRITE is a reg. */ + reg = radeon_get_ib_value(p, idx + 5) << 2; + if (!evergreen_is_safe_reg(p, reg)) { + dev_warn(p->dev, "forbidden register 0x%08x at %d\n", + reg, idx + 5); + return -EINVAL; + } + } + break; case PACKET3_NOP: break; default: @@ -3406,7 +3495,12 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev, case CAYMAN_PACKET3_DEALLOC_STATE: break; case PACKET3_COND_WRITE: - if (idx_value & 0x100) { + if (!(idx_value & 0x10)) { + reg = ib[idx + 1] * 4; + if (!evergreen_vm_reg_valid(reg)) + return -EINVAL; + } + if (!(idx_value & 0x100)) { reg = ib[idx + 5] * 4; if (!evergreen_vm_reg_valid(reg)) return -EINVAL; diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index bbd39348a7ab..7a3e510327b7 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -26,7 +26,6 @@ * Jerome Glisse */ -#include <linux/console.h> #include <linux/efi.h> #include <linux/pci.h> #include <linux/pm_runtime.h> @@ -1635,11 +1634,9 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, pci_set_power_state(pdev, PCI_D3hot); } - if (notify_clients) { - console_lock(); - drm_client_dev_suspend(dev, true); - console_unlock(); - } + if (notify_clients) + drm_client_dev_suspend(dev, false); + return 0; } @@ -1661,17 +1658,11 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool notify_clients) if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - if (notify_clients) { - console_lock(); - } if (resume) { pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); - if (pci_enable_device(pdev)) { - if (notify_clients) - console_unlock(); + if (pci_enable_device(pdev)) return -1; - } } /* resume AGP if in use */ radeon_agp_resume(rdev); @@ -1747,10 +1738,8 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool notify_clients) if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) radeon_pm_compute_clocks(rdev); - if (notify_clients) { - drm_client_dev_resume(dev, true); - console_unlock(); - } + if (notify_clients) + drm_client_dev_resume(dev, false); return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 8f5f8abcb1b4..4dc77c398617 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1297,12 +1297,13 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = { int radeon_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { int ret; fb->obj[0] = obj; - drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, fb, info, mode_cmd); ret = drm_framebuffer_init(dev, fb, &radeon_fb_funcs); if (ret) { fb->obj[0] = NULL; @@ -1314,6 +1315,7 @@ radeon_framebuffer_init(struct drm_device *dev, static struct drm_framebuffer * radeon_user_framebuffer_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { struct drm_gem_object *obj; @@ -1340,7 +1342,7 @@ radeon_user_framebuffer_create(struct drm_device *dev, return ERR_PTR(-ENOMEM); } - ret = radeon_framebuffer_init(dev, fb, mode_cmd, obj); + ret = radeon_framebuffer_init(dev, fb, info, mode_cmd, obj); if (ret) { kfree(fb); drm_gem_object_put(obj); diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 267f082bc430..88e821d67af7 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -110,9 +110,10 @@ * 2.48.0 - TA_CS_BC_BASE_ADDR allowed on SI * 2.49.0 - DRM_RADEON_GEM_INFO ioctl returns correct vram_size/visible values * 2.50.0 - Allows unaligned shader loads on CIK. (needed by OpenGL) + * 2.51.0 - Add evergreen/cayman OpenGL 4.6 compatibility */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 50 +#define KMS_DRIVER_MINOR 51 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_no_wb; diff --git a/drivers/gpu/drm/radeon/radeon_fbdev.c b/drivers/gpu/drm/radeon/radeon_fbdev.c index d4a58bd679db..dc81b0c2dbff 100644 --- a/drivers/gpu/drm/radeon/radeon_fbdev.c +++ b/drivers/gpu/drm/radeon/radeon_fbdev.c @@ -53,10 +53,10 @@ static void radeon_fbdev_destroy_pinned_object(struct drm_gem_object *gobj) } static int radeon_fbdev_create_pinned_object(struct drm_fb_helper *fb_helper, + const struct drm_format_info *info, struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **gobj_p) { - const struct drm_format_info *info; struct radeon_device *rdev = fb_helper->dev->dev_private; struct drm_gem_object *gobj = NULL; struct radeon_bo *rbo = NULL; @@ -67,7 +67,6 @@ static int radeon_fbdev_create_pinned_object(struct drm_fb_helper *fb_helper, int height = mode_cmd->height; u32 cpp; - info = drm_get_format_info(rdev_to_drm(rdev), mode_cmd); cpp = info->cpp[0]; /* need to align pitch with crtc limits */ @@ -205,6 +204,7 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, struct drm_fb_helper_surface_size *sizes) { struct radeon_device *rdev = fb_helper->dev->dev_private; + const struct drm_format_info *format_info; struct drm_mode_fb_cmd2 mode_cmd = { }; struct fb_info *info; struct drm_gem_object *gobj; @@ -223,7 +223,9 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); - ret = radeon_fbdev_create_pinned_object(fb_helper, &mode_cmd, &gobj); + format_info = drm_get_format_info(rdev_to_drm(rdev), mode_cmd.pixel_format, + mode_cmd.modifier[0]); + ret = radeon_fbdev_create_pinned_object(fb_helper, format_info, &mode_cmd, &gobj); if (ret) { DRM_ERROR("failed to create fbcon object %d\n", ret); return ret; @@ -235,7 +237,7 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, ret = -ENOMEM; goto err_radeon_fbdev_destroy_pinned_object; } - ret = radeon_framebuffer_init(rdev_to_drm(rdev), fb, &mode_cmd, gobj); + ret = radeon_framebuffer_init(rdev_to_drm(rdev), fb, format_info, &mode_cmd, gobj); if (ret) { DRM_ERROR("failed to initialize framebuffer %d\n", ret); goto err_kfree; diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 3102f6c2d055..9e34da2cacef 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h @@ -40,6 +40,7 @@ struct drm_fb_helper; struct drm_fb_helper_surface_size; +struct drm_format_info; struct edid; struct drm_edid; @@ -890,6 +891,7 @@ extern void radeon_combios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on); int radeon_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *rfb, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj); diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c index 4c8fe83dd610..216219accfd9 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/renesas/rcar-du/rcar_du_kms.c @@ -426,6 +426,7 @@ int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev, static struct drm_framebuffer * rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { struct rcar_du_device *rcdu = to_rcar_du_device(dev); @@ -490,7 +491,7 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv, } } - return drm_gem_fb_create(dev, file_priv, mode_cmd); + return drm_gem_fb_create(dev, file_priv, info, mode_cmd); } /* ----------------------------------------------------------------------------- diff --git a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c index 55a97691e9b2..87f171145a23 100644 --- a/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c +++ b/drivers/gpu/drm/renesas/rz-du/rzg2l_du_kms.c @@ -191,6 +191,7 @@ int rzg2l_du_dumb_create(struct drm_file *file, struct drm_device *dev, static struct drm_framebuffer * rzg2l_du_fb_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { const struct rzg2l_du_format_info *format; @@ -214,7 +215,7 @@ rzg2l_du_fb_create(struct drm_device *dev, struct drm_file *file_priv, return ERR_PTR(-EINVAL); } - return drm_gem_fb_create(dev, file_priv, mode_cmd); + return drm_gem_fb_create(dev, file_priv, info, mode_cmd); } /* ----------------------------------------------------------------------------- diff --git a/drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c b/drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c index 4202ab00fb0c..fd9460da1789 100644 --- a/drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c +++ b/drivers/gpu/drm/renesas/shmobile/shmob_drm_kms.c @@ -117,6 +117,7 @@ const struct shmob_drm_format_info *shmob_drm_format_info(u32 fourcc) static struct drm_framebuffer * shmob_drm_fb_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { const struct shmob_drm_format_info *format; @@ -144,7 +145,7 @@ shmob_drm_fb_create(struct drm_device *dev, struct drm_file *file_priv, } } - return drm_gem_fb_create(dev, file_priv, mode_cmd); + return drm_gem_fb_create(dev, file_priv, info, mode_cmd); } static const struct drm_mode_config_funcs shmob_drm_mode_config_funcs = { diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig index ab525668939a..faf50d872be3 100644 --- a/drivers/gpu/drm/rockchip/Kconfig +++ b/drivers/gpu/drm/rockchip/Kconfig @@ -53,6 +53,7 @@ config ROCKCHIP_CDN_DP bool "Rockchip cdn DP" depends on EXTCON=y || (EXTCON=m && DRM_ROCKCHIP=m) select DRM_DISPLAY_HELPER + select DRM_BRIDGE_CONNECTOR select DRM_DISPLAY_DP_HELPER help This selects support for Rockchip SoC specific extensions diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index 292c31de18f1..b7e3f5dcf8d5 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -16,7 +16,9 @@ #include <sound/hdmi-codec.h> #include <drm/display/drm_dp_helper.h> +#include <drm/display/drm_hdmi_audio_helper.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge_connector.h> #include <drm/drm_edid.h> #include <drm/drm_of.h> #include <drm/drm_probe_helper.h> @@ -25,9 +27,9 @@ #include "cdn-dp-core.h" #include "cdn-dp-reg.h" -static inline struct cdn_dp_device *connector_to_dp(struct drm_connector *connector) +static inline struct cdn_dp_device *bridge_to_dp(struct drm_bridge *bridge) { - return container_of(connector, struct cdn_dp_device, connector); + return container_of(bridge, struct cdn_dp_device, bridge); } static inline struct cdn_dp_device *encoder_to_dp(struct drm_encoder *encoder) @@ -231,9 +233,9 @@ static bool cdn_dp_check_sink_connection(struct cdn_dp_device *dp) } static enum drm_connector_status -cdn_dp_connector_detect(struct drm_connector *connector, bool force) +cdn_dp_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) { - struct cdn_dp_device *dp = connector_to_dp(connector); + struct cdn_dp_device *dp = bridge_to_dp(bridge); enum drm_connector_status status = connector_status_disconnected; mutex_lock(&dp->lock); @@ -244,41 +246,25 @@ cdn_dp_connector_detect(struct drm_connector *connector, bool force) return status; } -static void cdn_dp_connector_destroy(struct drm_connector *connector) +static const struct drm_edid * +cdn_dp_bridge_edid_read(struct drm_bridge *bridge, struct drm_connector *connector) { - drm_connector_unregister(connector); - drm_connector_cleanup(connector); -} - -static const struct drm_connector_funcs cdn_dp_atomic_connector_funcs = { - .detect = cdn_dp_connector_detect, - .destroy = cdn_dp_connector_destroy, - .fill_modes = drm_helper_probe_single_connector_modes, - .reset = drm_atomic_helper_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static int cdn_dp_connector_get_modes(struct drm_connector *connector) -{ - struct cdn_dp_device *dp = connector_to_dp(connector); - int ret = 0; + struct cdn_dp_device *dp = bridge_to_dp(bridge); + const struct drm_edid *drm_edid; mutex_lock(&dp->lock); - - ret = drm_edid_connector_add_modes(connector); - + drm_edid = drm_edid_read_custom(connector, cdn_dp_get_edid_block, dp); mutex_unlock(&dp->lock); - return ret; + return drm_edid; } static enum drm_mode_status -cdn_dp_connector_mode_valid(struct drm_connector *connector, - const struct drm_display_mode *mode) +cdn_dp_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *display_info, + const struct drm_display_mode *mode) { - struct cdn_dp_device *dp = connector_to_dp(connector); - struct drm_display_info *display_info = &dp->connector.display_info; + struct cdn_dp_device *dp = bridge_to_dp(bridge); u32 requested, actual, rate, sink_max, source_max = 0; u8 lanes, bpc; @@ -323,11 +309,6 @@ cdn_dp_connector_mode_valid(struct drm_connector *connector, return MODE_OK; } -static struct drm_connector_helper_funcs cdn_dp_connector_helper_funcs = { - .get_modes = cdn_dp_connector_get_modes, - .mode_valid = cdn_dp_connector_mode_valid, -}; - static int cdn_dp_firmware_init(struct cdn_dp_device *dp) { int ret; @@ -360,7 +341,6 @@ static int cdn_dp_firmware_init(struct cdn_dp_device *dp) static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp) { - const struct drm_display_info *info = &dp->connector.display_info; int ret; if (!cdn_dp_check_sink_connection(dp)) @@ -373,17 +353,6 @@ static int cdn_dp_get_sink_capability(struct cdn_dp_device *dp) return ret; } - drm_edid_free(dp->drm_edid); - dp->drm_edid = drm_edid_read_custom(&dp->connector, - cdn_dp_get_edid_block, dp); - drm_edid_connector_update(&dp->connector, dp->drm_edid); - - dp->sink_has_audio = info->has_audio; - - if (dp->drm_edid) - DRM_DEV_DEBUG_KMS(dp->dev, "got edid: width[%d] x height[%d]\n", - info->width_mm / 10, info->height_mm / 10); - return 0; } @@ -488,10 +457,6 @@ static int cdn_dp_disable(struct cdn_dp_device *dp) dp->active = false; dp->max_lanes = 0; dp->max_rate = 0; - if (!dp->connected) { - drm_edid_free(dp->drm_edid); - dp->drm_edid = NULL; - } return 0; } @@ -546,26 +511,13 @@ err_clk_disable: return ret; } -static void cdn_dp_encoder_mode_set(struct drm_encoder *encoder, - struct drm_display_mode *mode, - struct drm_display_mode *adjusted) +static void cdn_dp_bridge_mode_set(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted) { - struct cdn_dp_device *dp = encoder_to_dp(encoder); - struct drm_display_info *display_info = &dp->connector.display_info; + struct cdn_dp_device *dp = bridge_to_dp(bridge); struct video_info *video = &dp->video_info; - switch (display_info->bpc) { - case 10: - video->color_depth = 10; - break; - case 6: - video->color_depth = 6; - break; - default: - video->color_depth = 8; - break; - } - video->color_fmt = PXL_RGB; video->v_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NVSYNC); video->h_sync_polarity = !!(mode->flags & DRM_MODE_FLAG_NHSYNC); @@ -592,19 +544,37 @@ static bool cdn_dp_check_link_status(struct cdn_dp_device *dp) return drm_dp_channel_eq_ok(link_status, min(port->lanes, sink_lanes)); } -static void cdn_dp_audio_handle_plugged_change(struct cdn_dp_device *dp, - bool plugged) +static void cdn_dp_display_info_update(struct cdn_dp_device *dp, + struct drm_display_info *display_info) { - if (dp->codec_dev) - dp->plugged_cb(dp->codec_dev, plugged); + struct video_info *video = &dp->video_info; + + switch (display_info->bpc) { + case 10: + video->color_depth = 10; + break; + case 6: + video->color_depth = 6; + break; + default: + video->color_depth = 8; + break; + } } -static void cdn_dp_encoder_enable(struct drm_encoder *encoder) +static void cdn_dp_bridge_atomic_enable(struct drm_bridge *bridge, struct drm_atomic_state *state) { - struct cdn_dp_device *dp = encoder_to_dp(encoder); + struct cdn_dp_device *dp = bridge_to_dp(bridge); + struct drm_connector *connector; int ret, val; - ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, encoder); + connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); + if (!connector) + return; + + cdn_dp_display_info_update(dp, &connector->display_info); + + ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node, &dp->encoder.encoder); if (ret < 0) { DRM_DEV_ERROR(dp->dev, "Could not get vop id, %d", ret); return; @@ -625,7 +595,7 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder) ret = cdn_dp_enable(dp); if (ret) { - DRM_DEV_ERROR(dp->dev, "Failed to enable encoder %d\n", + DRM_DEV_ERROR(dp->dev, "Failed to enable bridge %d\n", ret); goto out; } @@ -655,24 +625,21 @@ static void cdn_dp_encoder_enable(struct drm_encoder *encoder) goto out; } - cdn_dp_audio_handle_plugged_change(dp, true); - out: mutex_unlock(&dp->lock); } -static void cdn_dp_encoder_disable(struct drm_encoder *encoder) +static void cdn_dp_bridge_atomic_disable(struct drm_bridge *bridge, struct drm_atomic_state *state) { - struct cdn_dp_device *dp = encoder_to_dp(encoder); + struct cdn_dp_device *dp = bridge_to_dp(bridge); int ret; mutex_lock(&dp->lock); - cdn_dp_audio_handle_plugged_change(dp, false); if (dp->active) { ret = cdn_dp_disable(dp); if (ret) { - DRM_DEV_ERROR(dp->dev, "Failed to disable encoder %d\n", + DRM_DEV_ERROR(dp->dev, "Failed to disable bridge %d\n", ret); } } @@ -704,9 +671,6 @@ static int cdn_dp_encoder_atomic_check(struct drm_encoder *encoder, } static const struct drm_encoder_helper_funcs cdn_dp_encoder_helper_funcs = { - .mode_set = cdn_dp_encoder_mode_set, - .enable = cdn_dp_encoder_enable, - .disable = cdn_dp_encoder_disable, .atomic_check = cdn_dp_encoder_atomic_check, }; @@ -779,11 +743,12 @@ static int cdn_dp_parse_dt(struct cdn_dp_device *dp) return 0; } -static int cdn_dp_audio_hw_params(struct device *dev, void *data, - struct hdmi_codec_daifmt *daifmt, - struct hdmi_codec_params *params) +static int cdn_dp_audio_prepare(struct drm_bridge *bridge, + struct drm_connector *connector, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params) { - struct cdn_dp_device *dp = dev_get_drvdata(dev); + struct cdn_dp_device *dp = bridge_to_dp(bridge); struct audio_info audio = { .sample_width = params->sample_width, .sample_rate = params->sample_rate, @@ -805,7 +770,7 @@ static int cdn_dp_audio_hw_params(struct device *dev, void *data, audio.format = AFMT_SPDIF; break; default: - DRM_DEV_ERROR(dev, "Invalid format %d\n", daifmt->fmt); + drm_err(bridge->dev, "Invalid format %d\n", daifmt->fmt); ret = -EINVAL; goto out; } @@ -819,9 +784,10 @@ out: return ret; } -static void cdn_dp_audio_shutdown(struct device *dev, void *data) +static void cdn_dp_audio_shutdown(struct drm_bridge *bridge, + struct drm_connector *connector) { - struct cdn_dp_device *dp = dev_get_drvdata(dev); + struct cdn_dp_device *dp = bridge_to_dp(bridge); int ret; mutex_lock(&dp->lock); @@ -835,10 +801,11 @@ out: mutex_unlock(&dp->lock); } -static int cdn_dp_audio_mute_stream(struct device *dev, void *data, +static int cdn_dp_audio_mute_stream(struct drm_bridge *bridge, + struct drm_connector *connector, bool enable, int direction) { - struct cdn_dp_device *dp = dev_get_drvdata(dev); + struct cdn_dp_device *dp = bridge_to_dp(bridge); int ret; mutex_lock(&dp->lock); @@ -854,57 +821,22 @@ out: return ret; } -static int cdn_dp_audio_get_eld(struct device *dev, void *data, - u8 *buf, size_t len) -{ - struct cdn_dp_device *dp = dev_get_drvdata(dev); - - memcpy(buf, dp->connector.eld, min(sizeof(dp->connector.eld), len)); - - return 0; -} - -static int cdn_dp_audio_hook_plugged_cb(struct device *dev, void *data, - hdmi_codec_plugged_cb fn, - struct device *codec_dev) -{ - struct cdn_dp_device *dp = dev_get_drvdata(dev); - - mutex_lock(&dp->lock); - dp->plugged_cb = fn; - dp->codec_dev = codec_dev; - cdn_dp_audio_handle_plugged_change(dp, dp->connected); - mutex_unlock(&dp->lock); +static const struct drm_bridge_funcs cdn_dp_bridge_funcs = { + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, + .detect = cdn_dp_bridge_detect, + .edid_read = cdn_dp_bridge_edid_read, + .atomic_enable = cdn_dp_bridge_atomic_enable, + .atomic_disable = cdn_dp_bridge_atomic_disable, + .mode_valid = cdn_dp_bridge_mode_valid, + .mode_set = cdn_dp_bridge_mode_set, - return 0; -} - -static const struct hdmi_codec_ops audio_codec_ops = { - .hw_params = cdn_dp_audio_hw_params, - .audio_shutdown = cdn_dp_audio_shutdown, - .mute_stream = cdn_dp_audio_mute_stream, - .get_eld = cdn_dp_audio_get_eld, - .hook_plugged_cb = cdn_dp_audio_hook_plugged_cb, + .dp_audio_prepare = cdn_dp_audio_prepare, + .dp_audio_mute_stream = cdn_dp_audio_mute_stream, + .dp_audio_shutdown = cdn_dp_audio_shutdown, }; -static int cdn_dp_audio_codec_init(struct cdn_dp_device *dp, - struct device *dev) -{ - struct hdmi_codec_pdata codec_data = { - .i2s = 1, - .spdif = 1, - .ops = &audio_codec_ops, - .max_i2s_channels = 8, - .no_capture_mute = 1, - }; - - dp->audio_pdev = platform_device_register_data( - dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO, - &codec_data, sizeof(codec_data)); - - return PTR_ERR_OR_ZERO(dp->audio_pdev); -} - static int cdn_dp_request_firmware(struct cdn_dp_device *dp) { int ret; @@ -1006,7 +938,9 @@ static void cdn_dp_pd_event_work(struct work_struct *work) out: mutex_unlock(&dp->lock); - drm_connector_helper_hpd_irq_event(&dp->connector); + drm_bridge_hpd_notify(&dp->bridge, + dp->connected ? connector_status_connected + : connector_status_disconnected); } static int cdn_dp_pd_event(struct notifier_block *nb, @@ -1062,26 +996,35 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data) drm_encoder_helper_add(encoder, &cdn_dp_encoder_helper_funcs); - connector = &dp->connector; - connector->polled = DRM_CONNECTOR_POLL_HPD; - connector->dpms = DRM_MODE_DPMS_OFF; + dp->bridge.ops = + DRM_BRIDGE_OP_DETECT | + DRM_BRIDGE_OP_EDID | + DRM_BRIDGE_OP_HPD | + DRM_BRIDGE_OP_DP_AUDIO; + dp->bridge.of_node = dp->dev->of_node; + dp->bridge.type = DRM_MODE_CONNECTOR_DisplayPort; + dp->bridge.hdmi_audio_dev = dp->dev; + dp->bridge.hdmi_audio_max_i2s_playback_channels = 8; + dp->bridge.hdmi_audio_spdif_playback = 1; + dp->bridge.hdmi_audio_dai_port = -1; - ret = drm_connector_init(drm_dev, connector, - &cdn_dp_atomic_connector_funcs, - DRM_MODE_CONNECTOR_DisplayPort); - if (ret) { - DRM_ERROR("failed to initialize connector with drm\n"); - goto err_free_encoder; - } + ret = devm_drm_bridge_add(dev, &dp->bridge); + if (ret) + return ret; - drm_connector_helper_add(connector, &cdn_dp_connector_helper_funcs); + ret = drm_bridge_attach(encoder, &dp->bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret) + return ret; - ret = drm_connector_attach_encoder(connector, encoder); - if (ret) { - DRM_ERROR("failed to attach connector and encoder\n"); - goto err_free_connector; + connector = drm_bridge_connector_init(drm_dev, encoder); + if (IS_ERR(connector)) { + ret = PTR_ERR(connector); + dev_err(dp->dev, "failed to init bridge connector: %d\n", ret); + return ret; } + drm_connector_attach_encoder(connector, encoder); + for (i = 0; i < dp->ports; i++) { port = dp->port[i]; @@ -1092,7 +1035,7 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data) if (ret) { DRM_DEV_ERROR(dev, "register EXTCON_DISP_DP notifier err\n"); - goto err_free_connector; + return ret; } } @@ -1101,30 +1044,19 @@ static int cdn_dp_bind(struct device *dev, struct device *master, void *data) schedule_work(&dp->event_work); return 0; - -err_free_connector: - drm_connector_cleanup(connector); -err_free_encoder: - drm_encoder_cleanup(encoder); - return ret; } static void cdn_dp_unbind(struct device *dev, struct device *master, void *data) { struct cdn_dp_device *dp = dev_get_drvdata(dev); struct drm_encoder *encoder = &dp->encoder.encoder; - struct drm_connector *connector = &dp->connector; cancel_work_sync(&dp->event_work); - cdn_dp_encoder_disable(encoder); encoder->funcs->destroy(encoder); - connector->funcs->destroy(connector); pm_runtime_disable(dev); if (dp->fw_loaded) release_firmware(dp->fw); - drm_edid_free(dp->drm_edid); - dp->drm_edid = NULL; } static const struct component_ops cdn_dp_component_ops = { @@ -1171,9 +1103,10 @@ static int cdn_dp_probe(struct platform_device *pdev) int ret; int i; - dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL); - if (!dp) - return -ENOMEM; + dp = devm_drm_bridge_alloc(dev, struct cdn_dp_device, bridge, + &cdn_dp_bridge_funcs); + if (IS_ERR(dp)) + return PTR_ERR(dp); dp->dev = dev; match = of_match_node(cdn_dp_dt_ids, pdev->dev.of_node); @@ -1209,19 +1142,11 @@ static int cdn_dp_probe(struct platform_device *pdev) mutex_init(&dp->lock); dev_set_drvdata(dev, dp); - ret = cdn_dp_audio_codec_init(dp, dev); - if (ret) - return ret; - ret = component_add(dev, &cdn_dp_component_ops); if (ret) - goto err_audio_deinit; + return ret; return 0; - -err_audio_deinit: - platform_device_unregister(dp->audio_pdev); - return ret; } static void cdn_dp_remove(struct platform_device *pdev) diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h index 17498f576ce7..e9c30b9fd543 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.h +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h @@ -8,6 +8,7 @@ #define _CDN_DP_CORE_H #include <drm/display/drm_dp_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_panel.h> #include <drm/drm_probe_helper.h> #include <sound/hdmi-codec.h> @@ -65,12 +66,11 @@ struct cdn_dp_port { struct cdn_dp_device { struct device *dev; struct drm_device *drm_dev; - struct drm_connector connector; + struct drm_bridge bridge; struct rockchip_encoder encoder; struct drm_display_mode mode; struct platform_device *audio_pdev; struct work_struct event_work; - const struct drm_edid *drm_edid; struct mutex lock; bool connected; @@ -101,9 +101,5 @@ struct cdn_dp_device { int active_port; u8 dpcd[DP_RECEIVER_CAP_SIZE]; - bool sink_has_audio; - - hdmi_codec_plugged_cb plugged_cb; - struct device *codec_dev; }; #endif /* _CDN_DP_CORE_H */ diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index f737e7d46e66..acb59b25d928 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -213,17 +213,13 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi) if (IS_ERR(hdmi->ref_clk)) { ret = PTR_ERR(hdmi->ref_clk); - if (ret != -EPROBE_DEFER) - dev_err(hdmi->dev, "failed to get reference clock\n"); - return ret; + return dev_err_probe(hdmi->dev, ret, "failed to get reference clock\n"); } hdmi->grf_clk = devm_clk_get_optional(hdmi->dev, "grf"); if (IS_ERR(hdmi->grf_clk)) { ret = PTR_ERR(hdmi->grf_clk); - if (ret != -EPROBE_DEFER) - dev_err(hdmi->dev, "failed to get grf clock\n"); - return ret; + return dev_err_probe(hdmi->dev, ret, "failed to get grf clock\n"); } ret = devm_regulator_get_enable(hdmi->dev, "avdd-0v9"); @@ -573,17 +569,13 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master, ret = rockchip_hdmi_parse_dt(hdmi); if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(hdmi->dev, "Unable to parse OF data\n"); - return ret; + return dev_err_probe(hdmi->dev, ret, "Unable to parse OF data\n"); } hdmi->phy = devm_phy_optional_get(dev, "hdmi"); if (IS_ERR(hdmi->phy)) { ret = PTR_ERR(hdmi->phy); - if (ret != -EPROBE_DEFER) - dev_err(hdmi->dev, "failed to get phy\n"); - return ret; + return dev_err_probe(hdmi->dev, ret, "failed to get phy\n"); } if (hdmi->phy) { diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c index db4b4038e51d..1ab3ad4bde9e 100644 --- a/drivers/gpu/drm/rockchip/inno_hdmi.c +++ b/drivers/gpu/drm/rockchip/inno_hdmi.c @@ -29,11 +29,360 @@ #include "rockchip_drm_drv.h" -#include "inno_hdmi.h" +#define INNO_HDMI_MIN_TMDS_CLOCK 25000000U -#define HIWORD_UPDATE(val, mask) ((val) | (mask) << 16) +#define DDC_SEGMENT_ADDR 0x30 -#define INNO_HDMI_MIN_TMDS_CLOCK 25000000U +#define HDMI_SCL_RATE (100 * 1000) + +#define DDC_BUS_FREQ_L 0x4b +#define DDC_BUS_FREQ_H 0x4c + +#define HDMI_SYS_CTRL 0x00 +#define m_RST_ANALOG BIT(6) +#define v_RST_ANALOG (0 << 6) +#define v_NOT_RST_ANALOG BIT(6) +#define m_RST_DIGITAL BIT(5) +#define v_RST_DIGITAL (0 << 5) +#define v_NOT_RST_DIGITAL BIT(5) +#define m_REG_CLK_INV BIT(4) +#define v_REG_CLK_NOT_INV (0 << 4) +#define v_REG_CLK_INV BIT(4) +#define m_VCLK_INV BIT(3) +#define v_VCLK_NOT_INV (0 << 3) +#define v_VCLK_INV BIT(3) +#define m_REG_CLK_SOURCE BIT(2) +#define v_REG_CLK_SOURCE_TMDS (0 << 2) +#define v_REG_CLK_SOURCE_SYS BIT(2) +#define m_POWER BIT(1) +#define v_PWR_ON (0 << 1) +#define v_PWR_OFF BIT(1) +#define m_INT_POL BIT(0) +#define v_INT_POL_HIGH 1 +#define v_INT_POL_LOW 0 + +#define HDMI_VIDEO_CONTRL1 0x01 +#define m_VIDEO_INPUT_FORMAT (7 << 1) +#define m_DE_SOURCE BIT(0) +#define v_VIDEO_INPUT_FORMAT(n) ((n) << 1) +#define v_DE_EXTERNAL 1 +#define v_DE_INTERNAL 0 +enum { + VIDEO_INPUT_SDR_RGB444 = 0, + VIDEO_INPUT_DDR_RGB444 = 5, + VIDEO_INPUT_DDR_YCBCR422 = 6 +}; + +#define HDMI_VIDEO_CONTRL2 0x02 +#define m_VIDEO_OUTPUT_COLOR (3 << 6) +#define m_VIDEO_INPUT_BITS (3 << 4) +#define m_VIDEO_INPUT_CSP BIT(0) +#define v_VIDEO_OUTPUT_COLOR(n) (((n) & 0x3) << 6) +#define v_VIDEO_INPUT_BITS(n) ((n) << 4) +#define v_VIDEO_INPUT_CSP(n) ((n) << 0) +enum { + VIDEO_INPUT_12BITS = 0, + VIDEO_INPUT_10BITS = 1, + VIDEO_INPUT_REVERT = 2, + VIDEO_INPUT_8BITS = 3, +}; + +#define HDMI_VIDEO_CONTRL 0x03 +#define m_VIDEO_AUTO_CSC BIT(7) +#define v_VIDEO_AUTO_CSC(n) ((n) << 7) +#define m_VIDEO_C0_C2_SWAP BIT(0) +#define v_VIDEO_C0_C2_SWAP(n) ((n) << 0) +enum { + C0_C2_CHANGE_ENABLE = 0, + C0_C2_CHANGE_DISABLE = 1, + AUTO_CSC_DISABLE = 0, + AUTO_CSC_ENABLE = 1, +}; + +#define HDMI_VIDEO_CONTRL3 0x04 +#define m_COLOR_DEPTH_NOT_INDICATED BIT(4) +#define m_SOF BIT(3) +#define m_COLOR_RANGE BIT(2) +#define m_CSC BIT(0) +#define v_COLOR_DEPTH_NOT_INDICATED(n) ((n) << 4) +#define v_SOF_ENABLE (0 << 3) +#define v_SOF_DISABLE BIT(3) +#define v_COLOR_RANGE_FULL BIT(2) +#define v_COLOR_RANGE_LIMITED (0 << 2) +#define v_CSC_ENABLE 1 +#define v_CSC_DISABLE 0 + +#define HDMI_AV_MUTE 0x05 +#define m_AVMUTE_CLEAR BIT(7) +#define m_AVMUTE_ENABLE BIT(6) +#define m_AUDIO_MUTE BIT(1) +#define m_VIDEO_BLACK BIT(0) +#define v_AVMUTE_CLEAR(n) ((n) << 7) +#define v_AVMUTE_ENABLE(n) ((n) << 6) +#define v_AUDIO_MUTE(n) ((n) << 1) +#define v_VIDEO_MUTE(n) ((n) << 0) + +#define HDMI_VIDEO_TIMING_CTL 0x08 +#define v_HSYNC_POLARITY(n) ((n) << 3) +#define v_VSYNC_POLARITY(n) ((n) << 2) +#define v_INETLACE(n) ((n) << 1) +#define v_EXTERANL_VIDEO(n) ((n) << 0) + +#define HDMI_VIDEO_EXT_HTOTAL_L 0x09 +#define HDMI_VIDEO_EXT_HTOTAL_H 0x0a +#define HDMI_VIDEO_EXT_HBLANK_L 0x0b +#define HDMI_VIDEO_EXT_HBLANK_H 0x0c +#define HDMI_VIDEO_EXT_HDELAY_L 0x0d +#define HDMI_VIDEO_EXT_HDELAY_H 0x0e +#define HDMI_VIDEO_EXT_HDURATION_L 0x0f +#define HDMI_VIDEO_EXT_HDURATION_H 0x10 +#define HDMI_VIDEO_EXT_VTOTAL_L 0x11 +#define HDMI_VIDEO_EXT_VTOTAL_H 0x12 +#define HDMI_VIDEO_EXT_VBLANK 0x13 +#define HDMI_VIDEO_EXT_VDELAY 0x14 +#define HDMI_VIDEO_EXT_VDURATION 0x15 + +#define HDMI_VIDEO_CSC_COEF 0x18 + +#define HDMI_AUDIO_CTRL1 0x35 +enum { + CTS_SOURCE_INTERNAL = 0, + CTS_SOURCE_EXTERNAL = 1, +}; + +#define v_CTS_SOURCE(n) ((n) << 7) + +enum { + DOWNSAMPLE_DISABLE = 0, + DOWNSAMPLE_1_2 = 1, + DOWNSAMPLE_1_4 = 2, +}; + +#define v_DOWN_SAMPLE(n) ((n) << 5) + +enum { + AUDIO_SOURCE_IIS = 0, + AUDIO_SOURCE_SPDIF = 1, +}; + +#define v_AUDIO_SOURCE(n) ((n) << 3) + +#define v_MCLK_ENABLE(n) ((n) << 2) + +enum { + MCLK_128FS = 0, + MCLK_256FS = 1, + MCLK_384FS = 2, + MCLK_512FS = 3, +}; + +#define v_MCLK_RATIO(n) (n) + +#define AUDIO_SAMPLE_RATE 0x37 + +enum { + AUDIO_32K = 0x3, + AUDIO_441K = 0x0, + AUDIO_48K = 0x2, + AUDIO_882K = 0x8, + AUDIO_96K = 0xa, + AUDIO_1764K = 0xc, + AUDIO_192K = 0xe, +}; + +#define AUDIO_I2S_MODE 0x38 + +enum { + I2S_CHANNEL_1_2 = 1, + I2S_CHANNEL_3_4 = 3, + I2S_CHANNEL_5_6 = 7, + I2S_CHANNEL_7_8 = 0xf +}; + +#define v_I2S_CHANNEL(n) ((n) << 2) + +enum { + I2S_STANDARD = 0, + I2S_LEFT_JUSTIFIED = 1, + I2S_RIGHT_JUSTIFIED = 2, +}; + +#define v_I2S_MODE(n) (n) + +#define AUDIO_I2S_MAP 0x39 +#define AUDIO_I2S_SWAPS_SPDIF 0x3a +#define v_SPIDF_FREQ(n) (n) + +#define N_32K 0x1000 +#define N_441K 0x1880 +#define N_882K 0x3100 +#define N_1764K 0x6200 +#define N_48K 0x1800 +#define N_96K 0x3000 +#define N_192K 0x6000 + +#define HDMI_AUDIO_CHANNEL_STATUS 0x3e +#define m_AUDIO_STATUS_NLPCM BIT(7) +#define m_AUDIO_STATUS_USE BIT(6) +#define m_AUDIO_STATUS_COPYRIGHT BIT(5) +#define m_AUDIO_STATUS_ADDITION (3 << 2) +#define m_AUDIO_STATUS_CLK_ACCURACY (2 << 0) +#define v_AUDIO_STATUS_NLPCM(n) (((n) & 1) << 7) +#define AUDIO_N_H 0x3f +#define AUDIO_N_M 0x40 +#define AUDIO_N_L 0x41 + +#define HDMI_AUDIO_CTS_H 0x45 +#define HDMI_AUDIO_CTS_M 0x46 +#define HDMI_AUDIO_CTS_L 0x47 + +#define HDMI_DDC_CLK_L 0x4b +#define HDMI_DDC_CLK_H 0x4c + +#define HDMI_EDID_SEGMENT_POINTER 0x4d +#define HDMI_EDID_WORD_ADDR 0x4e +#define HDMI_EDID_FIFO_OFFSET 0x4f +#define HDMI_EDID_FIFO_ADDR 0x50 + +#define HDMI_PACKET_SEND_MANUAL 0x9c +#define HDMI_PACKET_SEND_AUTO 0x9d +#define m_PACKET_GCP_EN BIT(7) +#define m_PACKET_MSI_EN BIT(6) +#define m_PACKET_SDI_EN BIT(5) +#define m_PACKET_VSI_EN BIT(4) +#define v_PACKET_GCP_EN(n) (((n) & 1) << 7) +#define v_PACKET_MSI_EN(n) (((n) & 1) << 6) +#define v_PACKET_SDI_EN(n) (((n) & 1) << 5) +#define v_PACKET_VSI_EN(n) (((n) & 1) << 4) + +#define HDMI_CONTROL_PACKET_BUF_INDEX 0x9f + +enum { + INFOFRAME_VSI = 0x05, + INFOFRAME_AVI = 0x06, + INFOFRAME_AAI = 0x08, +}; + +#define HDMI_CONTROL_PACKET_ADDR 0xa0 +#define HDMI_MAXIMUM_INFO_FRAME_SIZE 0x11 + +enum { + AVI_COLOR_MODE_RGB = 0, + AVI_COLOR_MODE_YCBCR422 = 1, + AVI_COLOR_MODE_YCBCR444 = 2, + AVI_COLORIMETRY_NO_DATA = 0, + + AVI_COLORIMETRY_SMPTE_170M = 1, + AVI_COLORIMETRY_ITU709 = 2, + AVI_COLORIMETRY_EXTENDED = 3, + + AVI_CODED_FRAME_ASPECT_NO_DATA = 0, + AVI_CODED_FRAME_ASPECT_4_3 = 1, + AVI_CODED_FRAME_ASPECT_16_9 = 2, + + ACTIVE_ASPECT_RATE_SAME_AS_CODED_FRAME = 0x08, + ACTIVE_ASPECT_RATE_4_3 = 0x09, + ACTIVE_ASPECT_RATE_16_9 = 0x0A, + ACTIVE_ASPECT_RATE_14_9 = 0x0B, +}; + +#define HDMI_HDCP_CTRL 0x52 +#define m_HDMI_DVI BIT(1) +#define v_HDMI_DVI(n) ((n) << 1) + +#define HDMI_INTERRUPT_MASK1 0xc0 +#define HDMI_INTERRUPT_STATUS1 0xc1 +#define m_INT_ACTIVE_VSYNC BIT(5) +#define m_INT_EDID_READY BIT(2) + +#define HDMI_INTERRUPT_MASK2 0xc2 +#define HDMI_INTERRUPT_STATUS2 0xc3 +#define m_INT_HDCP_ERR BIT(7) +#define m_INT_BKSV_FLAG BIT(6) +#define m_INT_HDCP_OK BIT(4) + +#define HDMI_STATUS 0xc8 +#define m_HOTPLUG BIT(7) +#define m_MASK_INT_HOTPLUG BIT(5) +#define m_INT_HOTPLUG BIT(1) +#define v_MASK_INT_HOTPLUG(n) (((n) & 0x1) << 5) + +#define HDMI_COLORBAR 0xc9 + +#define HDMI_PHY_SYNC 0xce +#define HDMI_PHY_SYS_CTL 0xe0 +#define m_TMDS_CLK_SOURCE BIT(5) +#define v_TMDS_FROM_PLL (0 << 5) +#define v_TMDS_FROM_GEN BIT(5) +#define m_PHASE_CLK BIT(4) +#define v_DEFAULT_PHASE (0 << 4) +#define v_SYNC_PHASE BIT(4) +#define m_TMDS_CURRENT_PWR BIT(3) +#define v_TURN_ON_CURRENT (0 << 3) +#define v_CAT_OFF_CURRENT BIT(3) +#define m_BANDGAP_PWR BIT(2) +#define v_BANDGAP_PWR_UP (0 << 2) +#define v_BANDGAP_PWR_DOWN BIT(2) +#define m_PLL_PWR BIT(1) +#define v_PLL_PWR_UP (0 << 1) +#define v_PLL_PWR_DOWN BIT(1) +#define m_TMDS_CHG_PWR BIT(0) +#define v_TMDS_CHG_PWR_UP (0 << 0) +#define v_TMDS_CHG_PWR_DOWN BIT(0) + +#define HDMI_PHY_CHG_PWR 0xe1 +#define v_CLK_CHG_PWR(n) (((n) & 1) << 3) +#define v_DATA_CHG_PWR(n) (((n) & 7) << 0) + +#define HDMI_PHY_DRIVER 0xe2 +#define v_CLK_MAIN_DRIVER(n) ((n) << 4) +#define v_DATA_MAIN_DRIVER(n) ((n) << 0) + +#define HDMI_PHY_PRE_EMPHASIS 0xe3 +#define v_PRE_EMPHASIS(n) (((n) & 7) << 4) +#define v_CLK_PRE_DRIVER(n) (((n) & 3) << 2) +#define v_DATA_PRE_DRIVER(n) (((n) & 3) << 0) + +#define HDMI_PHY_FEEDBACK_DIV_RATIO_LOW 0xe7 +#define v_FEEDBACK_DIV_LOW(n) ((n) & 0xff) +#define HDMI_PHY_FEEDBACK_DIV_RATIO_HIGH 0xe8 +#define v_FEEDBACK_DIV_HIGH(n) ((n) & 1) + +#define HDMI_PHY_PRE_DIV_RATIO 0xed +#define v_PRE_DIV_RATIO(n) ((n) & 0x1f) + +#define HDMI_CEC_CTRL 0xd0 +#define m_ADJUST_FOR_HISENSE BIT(6) +#define m_REJECT_RX_BROADCAST BIT(5) +#define m_BUSFREETIME_ENABLE BIT(2) +#define m_REJECT_RX BIT(1) +#define m_START_TX BIT(0) + +#define HDMI_CEC_DATA 0xd1 +#define HDMI_CEC_TX_OFFSET 0xd2 +#define HDMI_CEC_RX_OFFSET 0xd3 +#define HDMI_CEC_CLK_H 0xd4 +#define HDMI_CEC_CLK_L 0xd5 +#define HDMI_CEC_TX_LENGTH 0xd6 +#define HDMI_CEC_RX_LENGTH 0xd7 +#define HDMI_CEC_TX_INT_MASK 0xd8 +#define m_TX_DONE BIT(3) +#define m_TX_NOACK BIT(2) +#define m_TX_BROADCAST_REJ BIT(1) +#define m_TX_BUSNOTFREE BIT(0) + +#define HDMI_CEC_RX_INT_MASK 0xd9 +#define m_RX_LA_ERR BIT(4) +#define m_RX_GLITCH BIT(3) +#define m_RX_DONE BIT(0) + +#define HDMI_CEC_TX_INT 0xda +#define HDMI_CEC_RX_INT 0xdb +#define HDMI_CEC_BUSFREETIME_L 0xdc +#define HDMI_CEC_BUSFREETIME_H 0xdd +#define HDMI_CEC_LOGICADDR 0xde + +#define HIWORD_UPDATE(val, mask) ((val) | (mask) << 16) #define RK3036_GRF_SOC_CON2 0x148 #define RK3036_HDMI_PHSYNC BIT(4) @@ -255,22 +604,37 @@ static void inno_hdmi_power_up(struct inno_hdmi *hdmi, inno_hdmi_sys_power(hdmi, true); }; -static void inno_hdmi_reset(struct inno_hdmi *hdmi) +static void inno_hdmi_init_hw(struct inno_hdmi *hdmi) { u32 val; u32 msk; hdmi_modb(hdmi, HDMI_SYS_CTRL, m_RST_DIGITAL, v_NOT_RST_DIGITAL); - udelay(100); + usleep_range(100, 150); hdmi_modb(hdmi, HDMI_SYS_CTRL, m_RST_ANALOG, v_NOT_RST_ANALOG); - udelay(100); + usleep_range(100, 150); msk = m_REG_CLK_INV | m_REG_CLK_SOURCE | m_POWER | m_INT_POL; val = v_REG_CLK_INV | v_REG_CLK_SOURCE_SYS | v_PWR_ON | v_INT_POL_HIGH; hdmi_modb(hdmi, HDMI_SYS_CTRL, msk, val); inno_hdmi_standby(hdmi); + + /* + * When the controller isn't configured to an accurate + * video timing and there is no reference clock available, + * then the TMDS clock source would be switched to PCLK_HDMI, + * so we need to init the TMDS rate to PCLK rate, and + * reconfigure the DDC clock. + */ + if (hdmi->refclk) + inno_hdmi_i2c_init(hdmi, clk_get_rate(hdmi->refclk)); + else + inno_hdmi_i2c_init(hdmi, clk_get_rate(hdmi->pclk)); + + /* Unmute hotplug interrupt */ + hdmi_modb(hdmi, HDMI_STATUS, m_MASK_INT_HOTPLUG, v_MASK_INT_HOTPLUG(1)); } static int inno_hdmi_disable_frame(struct drm_connector *connector, @@ -775,8 +1139,7 @@ static int inno_hdmi_i2c_write(struct inno_hdmi *hdmi, struct i2c_msg *msgs) * we assume that each word write to this i2c adapter * should be the offset of EDID word address. */ - if ((msgs->len != 1) || - ((msgs->addr != DDC_ADDR) && (msgs->addr != DDC_SEGMENT_ADDR))) + if (msgs->len != 1 || (msgs->addr != DDC_ADDR && msgs->addr != DDC_SEGMENT_ADDR)) return -EINVAL; reinit_completion(&hdmi->i2c->cmp); @@ -867,10 +1230,9 @@ static struct i2c_adapter *inno_hdmi_i2c_adapter(struct inno_hdmi *hdmi) strscpy(adap->name, "Inno HDMI", sizeof(adap->name)); i2c_set_adapdata(adap, hdmi); - ret = i2c_add_adapter(adap); + ret = devm_i2c_add_adapter(hdmi->dev, adap); if (ret) { dev_warn(hdmi->dev, "cannot add %s I2C adapter\n", adap->name); - devm_kfree(hdmi->dev, i2c); return ERR_PTR(ret); } @@ -907,71 +1269,37 @@ static int inno_hdmi_bind(struct device *dev, struct device *master, if (IS_ERR(hdmi->regs)) return PTR_ERR(hdmi->regs); - hdmi->pclk = devm_clk_get(hdmi->dev, "pclk"); + hdmi->pclk = devm_clk_get_enabled(hdmi->dev, "pclk"); if (IS_ERR(hdmi->pclk)) return dev_err_probe(dev, PTR_ERR(hdmi->pclk), "Unable to get HDMI pclk\n"); - ret = clk_prepare_enable(hdmi->pclk); - if (ret) - return dev_err_probe(dev, ret, "Cannot enable HDMI pclk: %d\n", ret); - - hdmi->refclk = devm_clk_get_optional(hdmi->dev, "ref"); - if (IS_ERR(hdmi->refclk)) { - ret = dev_err_probe(dev, PTR_ERR(hdmi->refclk), "Unable to get HDMI refclk\n"); - goto err_disable_pclk; - } - - ret = clk_prepare_enable(hdmi->refclk); - if (ret) { - ret = dev_err_probe(dev, ret, "Cannot enable HDMI refclk: %d\n", ret); - goto err_disable_pclk; - } + hdmi->refclk = devm_clk_get_optional_enabled(hdmi->dev, "ref"); + if (IS_ERR(hdmi->refclk)) + return dev_err_probe(dev, PTR_ERR(hdmi->refclk), "Unable to get HDMI refclk\n"); if (hdmi->variant->dev_type == RK3036_HDMI) { hdmi->grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf"); - if (IS_ERR(hdmi->grf)) { - ret = dev_err_probe(dev, PTR_ERR(hdmi->grf), - "Unable to get rockchip,grf\n"); - goto err_disable_clk; - } + if (IS_ERR(hdmi->grf)) + return dev_err_probe(dev, + PTR_ERR(hdmi->grf), "Unable to get rockchip,grf\n"); } irq = platform_get_irq(pdev, 0); - if (irq < 0) { - ret = irq; - goto err_disable_clk; - } + if (irq < 0) + return irq; - inno_hdmi_reset(hdmi); + inno_hdmi_init_hw(hdmi); hdmi->ddc = inno_hdmi_i2c_adapter(hdmi); - if (IS_ERR(hdmi->ddc)) { - ret = PTR_ERR(hdmi->ddc); - hdmi->ddc = NULL; - goto err_disable_clk; - } - - /* - * When the controller isn't configured to an accurate - * video timing and there is no reference clock available, - * then the TMDS clock source would be switched to PCLK_HDMI, - * so we need to init the TMDS rate to PCLK rate, and - * reconfigure the DDC clock. - */ - if (hdmi->refclk) - inno_hdmi_i2c_init(hdmi, clk_get_rate(hdmi->refclk)); - else - inno_hdmi_i2c_init(hdmi, clk_get_rate(hdmi->pclk)); + if (IS_ERR(hdmi->ddc)) + return PTR_ERR(hdmi->ddc); ret = inno_hdmi_register(drm, hdmi); if (ret) - goto err_put_adapter; + return ret; dev_set_drvdata(dev, hdmi); - /* Unmute hotplug interrupt */ - hdmi_modb(hdmi, HDMI_STATUS, m_MASK_INT_HOTPLUG, v_MASK_INT_HOTPLUG(1)); - ret = devm_request_threaded_irq(dev, irq, inno_hdmi_hardirq, inno_hdmi_irq, IRQF_SHARED, dev_name(dev), hdmi); @@ -982,12 +1310,6 @@ static int inno_hdmi_bind(struct device *dev, struct device *master, err_cleanup_hdmi: hdmi->connector.funcs->destroy(&hdmi->connector); hdmi->encoder.encoder.funcs->destroy(&hdmi->encoder.encoder); -err_put_adapter: - i2c_put_adapter(hdmi->ddc); -err_disable_clk: - clk_disable_unprepare(hdmi->refclk); -err_disable_pclk: - clk_disable_unprepare(hdmi->pclk); return ret; } @@ -998,10 +1320,6 @@ static void inno_hdmi_unbind(struct device *dev, struct device *master, hdmi->connector.funcs->destroy(&hdmi->connector); hdmi->encoder.encoder.funcs->destroy(&hdmi->encoder.encoder); - - i2c_put_adapter(hdmi->ddc); - clk_disable_unprepare(hdmi->refclk); - clk_disable_unprepare(hdmi->pclk); } static const struct component_ops inno_hdmi_ops = { diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.h b/drivers/gpu/drm/rockchip/inno_hdmi.h deleted file mode 100644 index 8b7ef3fac485..000000000000 --- a/drivers/gpu/drm/rockchip/inno_hdmi.h +++ /dev/null @@ -1,349 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) Rockchip Electronics Co., Ltd. - * Zheng Yang <[email protected]> - * Yakir Yang <[email protected]> - */ - -#ifndef __INNO_HDMI_H__ -#define __INNO_HDMI_H__ - -#define DDC_SEGMENT_ADDR 0x30 - -#define HDMI_SCL_RATE (100*1000) -#define DDC_BUS_FREQ_L 0x4b -#define DDC_BUS_FREQ_H 0x4c - -#define HDMI_SYS_CTRL 0x00 -#define m_RST_ANALOG (1 << 6) -#define v_RST_ANALOG (0 << 6) -#define v_NOT_RST_ANALOG (1 << 6) -#define m_RST_DIGITAL (1 << 5) -#define v_RST_DIGITAL (0 << 5) -#define v_NOT_RST_DIGITAL (1 << 5) -#define m_REG_CLK_INV (1 << 4) -#define v_REG_CLK_NOT_INV (0 << 4) -#define v_REG_CLK_INV (1 << 4) -#define m_VCLK_INV (1 << 3) -#define v_VCLK_NOT_INV (0 << 3) -#define v_VCLK_INV (1 << 3) -#define m_REG_CLK_SOURCE (1 << 2) -#define v_REG_CLK_SOURCE_TMDS (0 << 2) -#define v_REG_CLK_SOURCE_SYS (1 << 2) -#define m_POWER (1 << 1) -#define v_PWR_ON (0 << 1) -#define v_PWR_OFF (1 << 1) -#define m_INT_POL (1 << 0) -#define v_INT_POL_HIGH 1 -#define v_INT_POL_LOW 0 - -#define HDMI_VIDEO_CONTRL1 0x01 -#define m_VIDEO_INPUT_FORMAT (7 << 1) -#define m_DE_SOURCE (1 << 0) -#define v_VIDEO_INPUT_FORMAT(n) (n << 1) -#define v_DE_EXTERNAL 1 -#define v_DE_INTERNAL 0 -enum { - VIDEO_INPUT_SDR_RGB444 = 0, - VIDEO_INPUT_DDR_RGB444 = 5, - VIDEO_INPUT_DDR_YCBCR422 = 6 -}; - -#define HDMI_VIDEO_CONTRL2 0x02 -#define m_VIDEO_OUTPUT_COLOR (3 << 6) -#define m_VIDEO_INPUT_BITS (3 << 4) -#define m_VIDEO_INPUT_CSP (1 << 0) -#define v_VIDEO_OUTPUT_COLOR(n) (((n) & 0x3) << 6) -#define v_VIDEO_INPUT_BITS(n) (n << 4) -#define v_VIDEO_INPUT_CSP(n) (n << 0) -enum { - VIDEO_INPUT_12BITS = 0, - VIDEO_INPUT_10BITS = 1, - VIDEO_INPUT_REVERT = 2, - VIDEO_INPUT_8BITS = 3, -}; - -#define HDMI_VIDEO_CONTRL 0x03 -#define m_VIDEO_AUTO_CSC (1 << 7) -#define v_VIDEO_AUTO_CSC(n) (n << 7) -#define m_VIDEO_C0_C2_SWAP (1 << 0) -#define v_VIDEO_C0_C2_SWAP(n) (n << 0) -enum { - C0_C2_CHANGE_ENABLE = 0, - C0_C2_CHANGE_DISABLE = 1, - AUTO_CSC_DISABLE = 0, - AUTO_CSC_ENABLE = 1, -}; - -#define HDMI_VIDEO_CONTRL3 0x04 -#define m_COLOR_DEPTH_NOT_INDICATED (1 << 4) -#define m_SOF (1 << 3) -#define m_COLOR_RANGE (1 << 2) -#define m_CSC (1 << 0) -#define v_COLOR_DEPTH_NOT_INDICATED(n) ((n) << 4) -#define v_SOF_ENABLE (0 << 3) -#define v_SOF_DISABLE (1 << 3) -#define v_COLOR_RANGE_FULL (1 << 2) -#define v_COLOR_RANGE_LIMITED (0 << 2) -#define v_CSC_ENABLE 1 -#define v_CSC_DISABLE 0 - -#define HDMI_AV_MUTE 0x05 -#define m_AVMUTE_CLEAR (1 << 7) -#define m_AVMUTE_ENABLE (1 << 6) -#define m_AUDIO_MUTE (1 << 1) -#define m_VIDEO_BLACK (1 << 0) -#define v_AVMUTE_CLEAR(n) (n << 7) -#define v_AVMUTE_ENABLE(n) (n << 6) -#define v_AUDIO_MUTE(n) (n << 1) -#define v_VIDEO_MUTE(n) (n << 0) - -#define HDMI_VIDEO_TIMING_CTL 0x08 -#define v_HSYNC_POLARITY(n) (n << 3) -#define v_VSYNC_POLARITY(n) (n << 2) -#define v_INETLACE(n) (n << 1) -#define v_EXTERANL_VIDEO(n) (n << 0) - -#define HDMI_VIDEO_EXT_HTOTAL_L 0x09 -#define HDMI_VIDEO_EXT_HTOTAL_H 0x0a -#define HDMI_VIDEO_EXT_HBLANK_L 0x0b -#define HDMI_VIDEO_EXT_HBLANK_H 0x0c -#define HDMI_VIDEO_EXT_HDELAY_L 0x0d -#define HDMI_VIDEO_EXT_HDELAY_H 0x0e -#define HDMI_VIDEO_EXT_HDURATION_L 0x0f -#define HDMI_VIDEO_EXT_HDURATION_H 0x10 -#define HDMI_VIDEO_EXT_VTOTAL_L 0x11 -#define HDMI_VIDEO_EXT_VTOTAL_H 0x12 -#define HDMI_VIDEO_EXT_VBLANK 0x13 -#define HDMI_VIDEO_EXT_VDELAY 0x14 -#define HDMI_VIDEO_EXT_VDURATION 0x15 - -#define HDMI_VIDEO_CSC_COEF 0x18 - -#define HDMI_AUDIO_CTRL1 0x35 -enum { - CTS_SOURCE_INTERNAL = 0, - CTS_SOURCE_EXTERNAL = 1, -}; -#define v_CTS_SOURCE(n) (n << 7) - -enum { - DOWNSAMPLE_DISABLE = 0, - DOWNSAMPLE_1_2 = 1, - DOWNSAMPLE_1_4 = 2, -}; -#define v_DOWN_SAMPLE(n) (n << 5) - -enum { - AUDIO_SOURCE_IIS = 0, - AUDIO_SOURCE_SPDIF = 1, -}; -#define v_AUDIO_SOURCE(n) (n << 3) - -#define v_MCLK_ENABLE(n) (n << 2) -enum { - MCLK_128FS = 0, - MCLK_256FS = 1, - MCLK_384FS = 2, - MCLK_512FS = 3, -}; -#define v_MCLK_RATIO(n) (n) - -#define AUDIO_SAMPLE_RATE 0x37 -enum { - AUDIO_32K = 0x3, - AUDIO_441K = 0x0, - AUDIO_48K = 0x2, - AUDIO_882K = 0x8, - AUDIO_96K = 0xa, - AUDIO_1764K = 0xc, - AUDIO_192K = 0xe, -}; - -#define AUDIO_I2S_MODE 0x38 -enum { - I2S_CHANNEL_1_2 = 1, - I2S_CHANNEL_3_4 = 3, - I2S_CHANNEL_5_6 = 7, - I2S_CHANNEL_7_8 = 0xf -}; -#define v_I2S_CHANNEL(n) ((n) << 2) -enum { - I2S_STANDARD = 0, - I2S_LEFT_JUSTIFIED = 1, - I2S_RIGHT_JUSTIFIED = 2, -}; -#define v_I2S_MODE(n) (n) - -#define AUDIO_I2S_MAP 0x39 -#define AUDIO_I2S_SWAPS_SPDIF 0x3a -#define v_SPIDF_FREQ(n) (n) - -#define N_32K 0x1000 -#define N_441K 0x1880 -#define N_882K 0x3100 -#define N_1764K 0x6200 -#define N_48K 0x1800 -#define N_96K 0x3000 -#define N_192K 0x6000 - -#define HDMI_AUDIO_CHANNEL_STATUS 0x3e -#define m_AUDIO_STATUS_NLPCM (1 << 7) -#define m_AUDIO_STATUS_USE (1 << 6) -#define m_AUDIO_STATUS_COPYRIGHT (1 << 5) -#define m_AUDIO_STATUS_ADDITION (3 << 2) -#define m_AUDIO_STATUS_CLK_ACCURACY (2 << 0) -#define v_AUDIO_STATUS_NLPCM(n) ((n & 1) << 7) -#define AUDIO_N_H 0x3f -#define AUDIO_N_M 0x40 -#define AUDIO_N_L 0x41 - -#define HDMI_AUDIO_CTS_H 0x45 -#define HDMI_AUDIO_CTS_M 0x46 -#define HDMI_AUDIO_CTS_L 0x47 - -#define HDMI_DDC_CLK_L 0x4b -#define HDMI_DDC_CLK_H 0x4c - -#define HDMI_EDID_SEGMENT_POINTER 0x4d -#define HDMI_EDID_WORD_ADDR 0x4e -#define HDMI_EDID_FIFO_OFFSET 0x4f -#define HDMI_EDID_FIFO_ADDR 0x50 - -#define HDMI_PACKET_SEND_MANUAL 0x9c -#define HDMI_PACKET_SEND_AUTO 0x9d -#define m_PACKET_GCP_EN (1 << 7) -#define m_PACKET_MSI_EN (1 << 6) -#define m_PACKET_SDI_EN (1 << 5) -#define m_PACKET_VSI_EN (1 << 4) -#define v_PACKET_GCP_EN(n) ((n & 1) << 7) -#define v_PACKET_MSI_EN(n) ((n & 1) << 6) -#define v_PACKET_SDI_EN(n) ((n & 1) << 5) -#define v_PACKET_VSI_EN(n) ((n & 1) << 4) - -#define HDMI_CONTROL_PACKET_BUF_INDEX 0x9f -enum { - INFOFRAME_VSI = 0x05, - INFOFRAME_AVI = 0x06, - INFOFRAME_AAI = 0x08, -}; - -#define HDMI_CONTROL_PACKET_ADDR 0xa0 -#define HDMI_MAXIMUM_INFO_FRAME_SIZE 0x11 -enum { - AVI_COLOR_MODE_RGB = 0, - AVI_COLOR_MODE_YCBCR422 = 1, - AVI_COLOR_MODE_YCBCR444 = 2, - AVI_COLORIMETRY_NO_DATA = 0, - - AVI_COLORIMETRY_SMPTE_170M = 1, - AVI_COLORIMETRY_ITU709 = 2, - AVI_COLORIMETRY_EXTENDED = 3, - - AVI_CODED_FRAME_ASPECT_NO_DATA = 0, - AVI_CODED_FRAME_ASPECT_4_3 = 1, - AVI_CODED_FRAME_ASPECT_16_9 = 2, - - ACTIVE_ASPECT_RATE_SAME_AS_CODED_FRAME = 0x08, - ACTIVE_ASPECT_RATE_4_3 = 0x09, - ACTIVE_ASPECT_RATE_16_9 = 0x0A, - ACTIVE_ASPECT_RATE_14_9 = 0x0B, -}; - -#define HDMI_HDCP_CTRL 0x52 -#define m_HDMI_DVI (1 << 1) -#define v_HDMI_DVI(n) (n << 1) - -#define HDMI_INTERRUPT_MASK1 0xc0 -#define HDMI_INTERRUPT_STATUS1 0xc1 -#define m_INT_ACTIVE_VSYNC (1 << 5) -#define m_INT_EDID_READY (1 << 2) - -#define HDMI_INTERRUPT_MASK2 0xc2 -#define HDMI_INTERRUPT_STATUS2 0xc3 -#define m_INT_HDCP_ERR (1 << 7) -#define m_INT_BKSV_FLAG (1 << 6) -#define m_INT_HDCP_OK (1 << 4) - -#define HDMI_STATUS 0xc8 -#define m_HOTPLUG (1 << 7) -#define m_MASK_INT_HOTPLUG (1 << 5) -#define m_INT_HOTPLUG (1 << 1) -#define v_MASK_INT_HOTPLUG(n) ((n & 0x1) << 5) - -#define HDMI_COLORBAR 0xc9 - -#define HDMI_PHY_SYNC 0xce -#define HDMI_PHY_SYS_CTL 0xe0 -#define m_TMDS_CLK_SOURCE (1 << 5) -#define v_TMDS_FROM_PLL (0 << 5) -#define v_TMDS_FROM_GEN (1 << 5) -#define m_PHASE_CLK (1 << 4) -#define v_DEFAULT_PHASE (0 << 4) -#define v_SYNC_PHASE (1 << 4) -#define m_TMDS_CURRENT_PWR (1 << 3) -#define v_TURN_ON_CURRENT (0 << 3) -#define v_CAT_OFF_CURRENT (1 << 3) -#define m_BANDGAP_PWR (1 << 2) -#define v_BANDGAP_PWR_UP (0 << 2) -#define v_BANDGAP_PWR_DOWN (1 << 2) -#define m_PLL_PWR (1 << 1) -#define v_PLL_PWR_UP (0 << 1) -#define v_PLL_PWR_DOWN (1 << 1) -#define m_TMDS_CHG_PWR (1 << 0) -#define v_TMDS_CHG_PWR_UP (0 << 0) -#define v_TMDS_CHG_PWR_DOWN (1 << 0) - -#define HDMI_PHY_CHG_PWR 0xe1 -#define v_CLK_CHG_PWR(n) ((n & 1) << 3) -#define v_DATA_CHG_PWR(n) ((n & 7) << 0) - -#define HDMI_PHY_DRIVER 0xe2 -#define v_CLK_MAIN_DRIVER(n) (n << 4) -#define v_DATA_MAIN_DRIVER(n) (n << 0) - -#define HDMI_PHY_PRE_EMPHASIS 0xe3 -#define v_PRE_EMPHASIS(n) ((n & 7) << 4) -#define v_CLK_PRE_DRIVER(n) ((n & 3) << 2) -#define v_DATA_PRE_DRIVER(n) ((n & 3) << 0) - -#define HDMI_PHY_FEEDBACK_DIV_RATIO_LOW 0xe7 -#define v_FEEDBACK_DIV_LOW(n) (n & 0xff) -#define HDMI_PHY_FEEDBACK_DIV_RATIO_HIGH 0xe8 -#define v_FEEDBACK_DIV_HIGH(n) (n & 1) - -#define HDMI_PHY_PRE_DIV_RATIO 0xed -#define v_PRE_DIV_RATIO(n) (n & 0x1f) - -#define HDMI_CEC_CTRL 0xd0 -#define m_ADJUST_FOR_HISENSE (1 << 6) -#define m_REJECT_RX_BROADCAST (1 << 5) -#define m_BUSFREETIME_ENABLE (1 << 2) -#define m_REJECT_RX (1 << 1) -#define m_START_TX (1 << 0) - -#define HDMI_CEC_DATA 0xd1 -#define HDMI_CEC_TX_OFFSET 0xd2 -#define HDMI_CEC_RX_OFFSET 0xd3 -#define HDMI_CEC_CLK_H 0xd4 -#define HDMI_CEC_CLK_L 0xd5 -#define HDMI_CEC_TX_LENGTH 0xd6 -#define HDMI_CEC_RX_LENGTH 0xd7 -#define HDMI_CEC_TX_INT_MASK 0xd8 -#define m_TX_DONE (1 << 3) -#define m_TX_NOACK (1 << 2) -#define m_TX_BROADCAST_REJ (1 << 1) -#define m_TX_BUSNOTFREE (1 << 0) - -#define HDMI_CEC_RX_INT_MASK 0xd9 -#define m_RX_LA_ERR (1 << 4) -#define m_RX_GLITCH (1 << 3) -#define m_RX_DONE (1 << 0) - -#define HDMI_CEC_TX_INT 0xda -#define HDMI_CEC_RX_INT 0xdb -#define HDMI_CEC_BUSFREETIME_L 0xdc -#define HDMI_CEC_BUSFREETIME_H 0xdd -#define HDMI_CEC_LOGICADDR 0xde - -#endif /* __INNO_HDMI_H__ */ diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c index e7875b52f298..ae4a5ac2299a 100644 --- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c +++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c @@ -450,7 +450,7 @@ struct drm_encoder_helper_funcs rk3066_hdmi_encoder_helper_funcs = { }; static enum drm_connector_status -rk3066_hdmi_bridge_detect(struct drm_bridge *bridge) +rk3066_hdmi_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) { struct rk3066_hdmi *hdmi = bridge_to_rk3066_hdmi(bridge); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c index 5829ee061c61..2f469d370021 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c @@ -30,21 +30,18 @@ static const struct drm_mode_config_helper_funcs rockchip_mode_config_helpers = static struct drm_framebuffer * rockchip_fb_create(struct drm_device *dev, struct drm_file *file, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { struct drm_afbc_framebuffer *afbc_fb; - const struct drm_format_info *info; int ret; - info = drm_get_format_info(dev, mode_cmd); - if (!info) - return ERR_PTR(-ENOMEM); - afbc_fb = kzalloc(sizeof(*afbc_fb), GFP_KERNEL); if (!afbc_fb) return ERR_PTR(-ENOMEM); - ret = drm_gem_fb_init_with_funcs(dev, &afbc_fb->base, file, mode_cmd, + ret = drm_gem_fb_init_with_funcs(dev, &afbc_fb->base, + file, info, mode_cmd, &rockchip_drm_fb_funcs); if (ret) { kfree(afbc_fb); @@ -52,7 +49,7 @@ rockchip_fb_create(struct drm_device *dev, struct drm_file *file, } if (drm_is_afbc(mode_cmd->modifier[0])) { - ret = drm_gem_fb_afbc_init(dev, mode_cmd, afbc_fb); + ret = drm_gem_fb_afbc_init(dev, info, mode_cmd, afbc_fb); if (ret) { drm_framebuffer_put(&afbc_fb->base); return ERR_PTR(ret); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index d0f5fea15e21..b50927a824b4 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -146,25 +146,6 @@ static void vop2_unlock(struct vop2 *vop2) mutex_unlock(&vop2->vop2_lock); } -/* - * Note: - * The write mask function is documented but missing on rk3566/8, writes - * to these bits have no effect. For newer soc(rk3588 and following) the - * write mask is needed for register writes. - * - * GLB_CFG_DONE_EN has no write mask bit. - * - */ -static void vop2_cfg_done(struct vop2_video_port *vp) -{ - struct vop2 *vop2 = vp->vop2; - u32 val = RK3568_REG_CFG_DONE__GLB_CFG_DONE_EN; - - val |= BIT(vp->id) | (BIT(vp->id) << 16); - - regmap_set_bits(vop2->map, RK3568_REG_CFG_DONE, val); -} - static void vop2_win_disable(struct vop2_win *win) { vop2_win_write(win, VOP2_WIN_ENABLE, 0); @@ -854,6 +835,11 @@ static void vop2_enable(struct vop2 *vop2) if (vop2->version == VOP_VERSION_RK3588) rk3588_vop2_power_domain_enable_all(vop2); + if (vop2->version <= VOP_VERSION_RK3588) { + vop2->old_layer_sel = vop2_readl(vop2, RK3568_OVL_LAYER_SEL); + vop2->old_port_sel = vop2_readl(vop2, RK3568_OVL_PORT_SEL); + } + vop2_writel(vop2, RK3568_REG_CFG_DONE, RK3568_REG_CFG_DONE__GLB_CFG_DONE_EN); /* @@ -2422,6 +2408,10 @@ static int vop2_create_crtcs(struct vop2 *vop2) break; } } + + if (!vp->primary_plane) + return dev_err_probe(drm->dev, -ENOENT, + "no primary plane for vp %d\n", i); } /* Register all unused window as overlay plane */ @@ -2589,12 +2579,13 @@ static int vop2_win_init(struct vop2 *vop2) } /* - * The window registers are only updated when config done is written. - * Until that they read back the old value. As we read-modify-write - * these registers mark them as non-volatile. This makes sure we read - * the new values from the regmap register cache. + * The window and video port registers are only updated when config + * done is written. Until that they read back the old value. As we + * read-modify-write these registers mark them as non-volatile. This + * makes sure we read the new values from the regmap register cache. */ static const struct regmap_range vop2_nonvolatile_range[] = { + regmap_reg_range(RK3568_VP0_CTRL_BASE, RK3588_VP3_CTRL_BASE + 255), regmap_reg_range(0x1000, 0x23ff), }; @@ -2724,6 +2715,7 @@ static int vop2_bind(struct device *dev, struct device *master, void *data) return dev_err_probe(drm->dev, vop2->irq, "cannot find irq for vop2\n"); mutex_init(&vop2->vop2_lock); + mutex_init(&vop2->ovl_lock); ret = devm_request_irq(dev, vop2->irq, vop2_isr, IRQF_SHARED, dev_name(dev), vop2); if (ret) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h index fc3ecb9fcd95..fa5c56f16047 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.h @@ -334,6 +334,19 @@ struct vop2 { /* optional internal rgb encoder */ struct rockchip_rgb *rgb; + /* + * Used to record layer selection configuration on rk356x/rk3588 + * as register RK3568_OVL_LAYER_SEL and RK3568_OVL_PORT_SEL are + * shared for all the Video Ports. + */ + u32 old_layer_sel; + u32 old_port_sel; + /* + * Ensure that the updates to these two registers(RKK3568_OVL_LAYER_SEL/RK3568_OVL_PORT_SEL) + * take effect in sequence. + */ + struct mutex ovl_lock; + /* must be put at the end of the struct */ struct vop2_win win[]; }; @@ -727,6 +740,7 @@ enum dst_factor_mode { #define RK3588_OVL_PORT_SEL__CLUSTER2 GENMASK(21, 20) #define RK3568_OVL_PORT_SEL__CLUSTER1 GENMASK(19, 18) #define RK3568_OVL_PORT_SEL__CLUSTER0 GENMASK(17, 16) +#define RK3588_OVL_PORT_SET__PORT3_MUX GENMASK(15, 12) #define RK3568_OVL_PORT_SET__PORT2_MUX GENMASK(11, 8) #define RK3568_OVL_PORT_SET__PORT1_MUX GENMASK(7, 4) #define RK3568_OVL_PORT_SET__PORT0_MUX GENMASK(3, 0) @@ -831,4 +845,23 @@ static inline struct vop2_win *to_vop2_win(struct drm_plane *p) return container_of(p, struct vop2_win, base); } +/* + * Note: + * The write mask function is documented but missing on rk3566/8, writes + * to these bits have no effect. For newer soc(rk3588 and following) the + * write mask is needed for register writes. + * + * GLB_CFG_DONE_EN has no write mask bit. + * + */ +static inline void vop2_cfg_done(struct vop2_video_port *vp) +{ + struct vop2 *vop2 = vp->vop2; + u32 val = RK3568_REG_CFG_DONE__GLB_CFG_DONE_EN; + + val |= BIT(vp->id) | (BIT(vp->id) << 16); + + regmap_set_bits(vop2->map, RK3568_REG_CFG_DONE, val); +} + #endif /* _ROCKCHIP_DRM_VOP2_H */ diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c index a673779de3d2..2411260db51d 100644 --- a/drivers/gpu/drm/rockchip/rockchip_lvds.c +++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c @@ -56,14 +56,13 @@ struct rockchip_lvds { struct drm_device *drm_dev; struct drm_panel *panel; struct drm_bridge *bridge; - struct drm_connector connector; struct rockchip_encoder encoder; struct dev_pin_info *pins; }; -static inline struct rockchip_lvds *connector_to_lvds(struct drm_connector *connector) +static inline struct rockchip_lvds *brige_to_lvds(struct drm_bridge *bridge) { - return container_of(connector, struct rockchip_lvds, connector); + return (struct rockchip_lvds *)bridge->driver_private; } static inline struct rockchip_lvds *encoder_to_lvds(struct drm_encoder *encoder) @@ -106,25 +105,21 @@ static inline int rockchip_lvds_name_to_output(const char *s) return -EINVAL; } -static const struct drm_connector_funcs rockchip_lvds_connector_funcs = { - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = drm_connector_cleanup, - .reset = drm_atomic_helper_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static int rockchip_lvds_connector_get_modes(struct drm_connector *connector) +static int +rockchip_lvds_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector *connector) { - struct rockchip_lvds *lvds = connector_to_lvds(connector); + struct rockchip_lvds *lvds = brige_to_lvds(bridge); struct drm_panel *panel = lvds->panel; return drm_panel_get_modes(panel, connector); } static const -struct drm_connector_helper_funcs rockchip_lvds_connector_helper_funcs = { - .get_modes = rockchip_lvds_connector_get_modes, +struct drm_bridge_funcs rockchip_lvds_bridge_funcs = { + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, + .get_modes = rockchip_lvds_bridge_get_modes, }; static int @@ -606,26 +601,23 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master, } drm_encoder_helper_add(encoder, lvds->soc_data->helper_funcs); - connector = &lvds->connector; if (lvds->panel) { - connector->dpms = DRM_MODE_DPMS_OFF; - ret = drm_connector_init(drm_dev, connector, - &rockchip_lvds_connector_funcs, - DRM_MODE_CONNECTOR_LVDS); - if (ret < 0) { - drm_err(drm_dev, - "failed to initialize connector: %d\n", ret); + lvds->bridge = drm_panel_bridge_add_typed(lvds->panel, DRM_MODE_CONNECTOR_LVDS); + if (IS_ERR(lvds->bridge)) { + ret = PTR_ERR(lvds->bridge); goto err_free_encoder; } + } - drm_connector_helper_add(connector, - &rockchip_lvds_connector_helper_funcs); - } else { - ret = drm_bridge_attach(encoder, lvds->bridge, NULL, - DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (lvds->bridge) { + lvds->bridge->driver_private = lvds; + lvds->bridge->ops = DRM_BRIDGE_OP_MODES; + lvds->bridge->funcs = &rockchip_lvds_bridge_funcs; + + ret = drm_bridge_attach(encoder, lvds->bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); if (ret) - goto err_free_encoder; + goto err_free_bridge; connector = drm_bridge_connector_init(lvds->drm_dev, encoder); if (IS_ERR(connector)) { @@ -633,14 +625,14 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master, "failed to initialize bridge connector: %pe\n", connector); ret = PTR_ERR(connector); - goto err_free_encoder; + goto err_free_bridge; } - } - ret = drm_connector_attach_encoder(connector, encoder); - if (ret < 0) { - drm_err(drm_dev, "failed to attach encoder: %d\n", ret); - goto err_free_connector; + ret = drm_connector_attach_encoder(connector, encoder); + if (ret < 0) { + drm_err(drm_dev, "failed to attach encoder: %d\n", ret); + goto err_free_bridge; + } } pm_runtime_enable(dev); @@ -649,8 +641,8 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master, return 0; -err_free_connector: - drm_connector_cleanup(connector); +err_free_bridge: + drm_panel_bridge_remove(lvds->bridge); err_free_encoder: drm_encoder_cleanup(encoder); err_put_remote: @@ -670,8 +662,6 @@ static void rockchip_lvds_unbind(struct device *dev, struct device *master, encoder_funcs = lvds->soc_data->helper_funcs; encoder_funcs->disable(&lvds->encoder.encoder); pm_runtime_disable(dev); - drm_connector_cleanup(&lvds->connector); - drm_encoder_cleanup(&lvds->encoder.encoder); } static const struct component_ops rockchip_lvds_component_ops = { diff --git a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c index 32c4ed685739..45c5e3987813 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c +++ b/drivers/gpu/drm/rockchip/rockchip_vop2_reg.c @@ -2052,12 +2052,55 @@ static void vop2_setup_alpha(struct vop2_video_port *vp) } } +static u32 rk3568_vop2_read_port_mux(struct vop2 *vop2) +{ + return vop2_readl(vop2, RK3568_OVL_PORT_SEL); +} + +static void rk3568_vop2_wait_for_port_mux_done(struct vop2 *vop2) +{ + u32 port_mux_sel; + int ret; + + /* + * Spin until the previous port_mux figuration is done. + */ + ret = readx_poll_timeout_atomic(rk3568_vop2_read_port_mux, vop2, port_mux_sel, + port_mux_sel == vop2->old_port_sel, 0, 50 * 1000); + if (ret) + DRM_DEV_ERROR(vop2->dev, "wait port_mux done timeout: 0x%x--0x%x\n", + port_mux_sel, vop2->old_port_sel); +} + +static u32 rk3568_vop2_read_layer_cfg(struct vop2 *vop2) +{ + return vop2_readl(vop2, RK3568_OVL_LAYER_SEL); +} + +static void rk3568_vop2_wait_for_layer_cfg_done(struct vop2 *vop2, u32 cfg) +{ + u32 atv_layer_cfg; + int ret; + + /* + * Spin until the previous layer configuration is done. + */ + ret = readx_poll_timeout_atomic(rk3568_vop2_read_layer_cfg, vop2, atv_layer_cfg, + atv_layer_cfg == cfg, 0, 50 * 1000); + if (ret) + DRM_DEV_ERROR(vop2->dev, "wait layer cfg done timeout: 0x%x--0x%x\n", + atv_layer_cfg, cfg); +} + static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp) { struct vop2 *vop2 = vp->vop2; struct drm_plane *plane; u32 layer_sel = 0; u32 port_sel; + u32 old_layer_sel = 0; + u32 atv_layer_sel = 0; + u32 old_port_sel = 0; u8 layer_id; u8 old_layer_id; u8 layer_sel_id; @@ -2069,19 +2112,18 @@ static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp) struct vop2_video_port *vp2 = &vop2->vps[2]; struct rockchip_crtc_state *vcstate = to_rockchip_crtc_state(vp->crtc.state); + mutex_lock(&vop2->ovl_lock); ovl_ctrl = vop2_readl(vop2, RK3568_OVL_CTRL); ovl_ctrl &= ~RK3568_OVL_CTRL__LAYERSEL_REGDONE_IMD; ovl_ctrl &= ~RK3568_OVL_CTRL__LAYERSEL_REGDONE_SEL; - ovl_ctrl |= FIELD_PREP(RK3568_OVL_CTRL__LAYERSEL_REGDONE_SEL, vp->id); if (vcstate->yuv_overlay) ovl_ctrl |= RK3568_OVL_CTRL__YUV_MODE(vp->id); else ovl_ctrl &= ~RK3568_OVL_CTRL__YUV_MODE(vp->id); - vop2_writel(vop2, RK3568_OVL_CTRL, ovl_ctrl); - - port_sel = vop2_readl(vop2, RK3568_OVL_PORT_SEL); + old_port_sel = vop2->old_port_sel; + port_sel = old_port_sel; port_sel &= RK3568_OVL_PORT_SEL__SEL_PORT; if (vp0->nlayers) @@ -2102,7 +2144,13 @@ static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp) else port_sel |= FIELD_PREP(RK3568_OVL_PORT_SET__PORT2_MUX, 8); - layer_sel = vop2_readl(vop2, RK3568_OVL_LAYER_SEL); + /* Fixed value for rk3588 */ + if (vop2->version == VOP_VERSION_RK3588) + port_sel |= FIELD_PREP(RK3588_OVL_PORT_SET__PORT3_MUX, 7); + + atv_layer_sel = vop2_readl(vop2, RK3568_OVL_LAYER_SEL); + old_layer_sel = vop2->old_layer_sel; + layer_sel = old_layer_sel; ofs = 0; for (i = 0; i < vp->id; i++) @@ -2186,8 +2234,37 @@ static void rk3568_vop2_setup_layer_mixer(struct vop2_video_port *vp) old_win->data->layer_sel_id[vp->id]); } + vop2->old_layer_sel = layer_sel; + vop2->old_port_sel = port_sel; + /* + * As the RK3568_OVL_LAYER_SEL and RK3568_OVL_PORT_SEL are shared by all Video Ports, + * and the configuration take effect by one Video Port's vsync. + * When performing layer migration or change the zpos of layers, there are two things + * to be observed and followed: + * 1. When a layer is migrated from one VP to another, the configuration of the layer + * can only take effect after the Port mux configuration is enabled. + * + * 2. When we change the zpos of layers, we must ensure that the change for the previous + * VP takes effect before we proceed to change the next VP. Otherwise, the new + * configuration might overwrite the previous one for the previous VP, or it could + * lead to the configuration of the previous VP being take effect along with the VSYNC + * of the new VP. + */ + if (layer_sel != old_layer_sel || port_sel != old_port_sel) + ovl_ctrl |= FIELD_PREP(RK3568_OVL_CTRL__LAYERSEL_REGDONE_SEL, vp->id); + vop2_writel(vop2, RK3568_OVL_CTRL, ovl_ctrl); + + if (port_sel != old_port_sel) { + vop2_writel(vop2, RK3568_OVL_PORT_SEL, port_sel); + vop2_cfg_done(vp); + rk3568_vop2_wait_for_port_mux_done(vop2); + } + + if (layer_sel != old_layer_sel && atv_layer_sel != old_layer_sel) + rk3568_vop2_wait_for_layer_cfg_done(vop2, vop2->old_layer_sel); + vop2_writel(vop2, RK3568_OVL_LAYER_SEL, layer_sel); - vop2_writel(vop2, RK3568_OVL_PORT_SEL, port_sel); + mutex_unlock(&vop2->ovl_lock); } static void rk3568_vop2_setup_dly_for_windows(struct vop2_video_port *vp) diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 5635b3a826d8..8867b95ab089 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -355,17 +355,6 @@ void drm_sched_entity_destroy(struct drm_sched_entity *entity) } EXPORT_SYMBOL(drm_sched_entity_destroy); -/* drm_sched_entity_clear_dep - callback to clear the entities dependency */ -static void drm_sched_entity_clear_dep(struct dma_fence *f, - struct dma_fence_cb *cb) -{ - struct drm_sched_entity *entity = - container_of(cb, struct drm_sched_entity, cb); - - entity->dependency = NULL; - dma_fence_put(f); -} - /* * drm_sched_entity_wakeup - callback to clear the entity's dependency and * wake up the scheduler @@ -376,7 +365,8 @@ static void drm_sched_entity_wakeup(struct dma_fence *f, struct drm_sched_entity *entity = container_of(cb, struct drm_sched_entity, cb); - drm_sched_entity_clear_dep(f, cb); + entity->dependency = NULL; + dma_fence_put(f); drm_sched_wakeup(entity->rq->sched); } @@ -429,13 +419,6 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) fence = dma_fence_get(&s_fence->scheduled); dma_fence_put(entity->dependency); entity->dependency = fence; - if (!dma_fence_add_callback(fence, &entity->cb, - drm_sched_entity_clear_dep)) - return true; - - /* Ignore it when it is already scheduled */ - dma_fence_put(fence); - return false; } if (!dma_fence_add_callback(entity->dependency, &entity->cb, diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index c63543132f9d..e2cda28a1af4 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -84,12 +84,6 @@ #define CREATE_TRACE_POINTS #include "gpu_scheduler_trace.h" -#ifdef CONFIG_LOCKDEP -static struct lockdep_map drm_sched_lockdep_map = { - .name = "drm_sched_lockdep_map" -}; -#endif - int drm_sched_policy = DRM_SCHED_POLICY_FIFO; /** @@ -269,38 +263,14 @@ drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched, entity = rq->current_entity; if (entity) { list_for_each_entry_continue(entity, &rq->entities, list) { - if (drm_sched_entity_is_ready(entity)) { - /* If we can't queue yet, preserve the current - * entity in terms of fairness. - */ - if (!drm_sched_can_queue(sched, entity)) { - spin_unlock(&rq->lock); - return ERR_PTR(-ENOSPC); - } - - rq->current_entity = entity; - reinit_completion(&entity->entity_idle); - spin_unlock(&rq->lock); - return entity; - } + if (drm_sched_entity_is_ready(entity)) + goto found; } } list_for_each_entry(entity, &rq->entities, list) { - if (drm_sched_entity_is_ready(entity)) { - /* If we can't queue yet, preserve the current entity in - * terms of fairness. - */ - if (!drm_sched_can_queue(sched, entity)) { - spin_unlock(&rq->lock); - return ERR_PTR(-ENOSPC); - } - - rq->current_entity = entity; - reinit_completion(&entity->entity_idle); - spin_unlock(&rq->lock); - return entity; - } + if (drm_sched_entity_is_ready(entity)) + goto found; if (entity == rq->current_entity) break; @@ -309,6 +279,22 @@ drm_sched_rq_select_entity_rr(struct drm_gpu_scheduler *sched, spin_unlock(&rq->lock); return NULL; + +found: + if (!drm_sched_can_queue(sched, entity)) { + /* + * If scheduler cannot take more jobs signal the caller to not + * consider lower priority queues. + */ + entity = ERR_PTR(-ENOSPC); + } else { + rq->current_entity = entity; + reinit_completion(&entity->entity_idle); + } + + spin_unlock(&rq->lock); + + return entity; } /** @@ -380,11 +366,16 @@ static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched) { struct drm_sched_job *job; - spin_lock(&sched->job_list_lock); job = list_first_entry_or_null(&sched->pending_list, struct drm_sched_job, list); if (job && dma_fence_is_signaled(&job->s_fence->finished)) __drm_sched_run_free_queue(sched); +} + +static void drm_sched_run_free_queue_unlocked(struct drm_gpu_scheduler *sched) +{ + spin_lock(&sched->job_list_lock); + drm_sched_run_free_queue(sched); spin_unlock(&sched->job_list_lock); } @@ -537,11 +528,37 @@ static void drm_sched_job_begin(struct drm_sched_job *s_job) spin_unlock(&sched->job_list_lock); } +/** + * drm_sched_job_reinsert_on_false_timeout - reinsert the job on a false timeout + * @sched: scheduler instance + * @job: job to be reinserted on the pending list + * + * In the case of a "false timeout" - when a timeout occurs but the GPU isn't + * hung and is making progress, the scheduler must reinsert the job back into + * @sched->pending_list. Otherwise, the job and its resources won't be freed + * through the &struct drm_sched_backend_ops.free_job callback. + * + * This function must be used in "false timeout" cases only. + */ +static void drm_sched_job_reinsert_on_false_timeout(struct drm_gpu_scheduler *sched, + struct drm_sched_job *job) +{ + spin_lock(&sched->job_list_lock); + list_add(&job->list, &sched->pending_list); + + /* After reinserting the job, the scheduler enqueues the free-job work + * again if ready. Otherwise, a signaled job could be added to the + * pending list, but never freed. + */ + drm_sched_run_free_queue(sched); + spin_unlock(&sched->job_list_lock); +} + static void drm_sched_job_timedout(struct work_struct *work) { struct drm_gpu_scheduler *sched; struct drm_sched_job *job; - enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_NOMINAL; + enum drm_gpu_sched_stat status = DRM_GPU_SCHED_STAT_RESET; sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work); @@ -570,6 +587,9 @@ static void drm_sched_job_timedout(struct work_struct *work) job->sched->ops->free_job(job); sched->free_guilty = false; } + + if (status == DRM_GPU_SCHED_STAT_NO_HANG) + drm_sched_job_reinsert_on_false_timeout(sched, job); } else { spin_unlock(&sched->job_list_lock); } @@ -592,6 +612,10 @@ static void drm_sched_job_timedout(struct work_struct *work) * This function is typically used for reset recovery (see the docu of * drm_sched_backend_ops.timedout_job() for details). Do not call it for * scheduler teardown, i.e., before calling drm_sched_fini(). + * + * As it's only used for reset recovery, drivers must not call this function + * in their &struct drm_sched_backend_ops.timedout_job callback when they + * skip a reset using &enum drm_gpu_sched_stat.DRM_GPU_SCHED_STAT_NO_HANG. */ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) { @@ -677,6 +701,10 @@ EXPORT_SYMBOL(drm_sched_stop); * drm_sched_backend_ops.timedout_job() for details). Do not call it for * scheduler startup. The scheduler itself is fully operational after * drm_sched_init() succeeded. + * + * As it's only used for reset recovery, drivers must not call this function + * in their &struct drm_sched_backend_ops.timedout_job callback when they + * skip a reset using &enum drm_gpu_sched_stat.DRM_GPU_SCHED_STAT_NO_HANG. */ void drm_sched_start(struct drm_gpu_scheduler *sched, int errno) { @@ -1198,7 +1226,7 @@ static void drm_sched_free_job_work(struct work_struct *w) if (job) sched->ops->free_job(job); - drm_sched_run_free_queue(sched); + drm_sched_run_free_queue_unlocked(sched); drm_sched_run_job_queue(sched); } @@ -1261,6 +1289,25 @@ static void drm_sched_run_job_work(struct work_struct *w) drm_sched_run_job_queue(sched); } +static struct workqueue_struct *drm_sched_alloc_wq(const char *name) +{ +#if (IS_ENABLED(CONFIG_LOCKDEP)) + static struct lockdep_map map = { + .name = "drm_sched_lockdep_map" + }; + + /* + * Avoid leaking a lockdep map on each drm sched creation and + * destruction by using a single lockdep map for all drm sched + * allocated submit_wq. + */ + + return alloc_ordered_workqueue_lockdep_map(name, WQ_MEM_RECLAIM, &map); +#else + return alloc_ordered_workqueue(name, WQ_MEM_RECLAIM); +#endif +} + /** * drm_sched_init - Init a gpu scheduler instance * @@ -1301,13 +1348,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, const struct drm_sched_init_ sched->submit_wq = args->submit_wq; sched->own_submit_wq = false; } else { -#ifdef CONFIG_LOCKDEP - sched->submit_wq = alloc_ordered_workqueue_lockdep_map(args->name, - WQ_MEM_RECLAIM, - &drm_sched_lockdep_map); -#else - sched->submit_wq = alloc_ordered_workqueue(args->name, WQ_MEM_RECLAIM); -#endif + sched->submit_wq = drm_sched_alloc_wq(args->name); if (!sched->submit_wq) return -ENOMEM; @@ -1353,6 +1394,18 @@ Out_check_own: } EXPORT_SYMBOL(drm_sched_init); +static void drm_sched_cancel_remaining_jobs(struct drm_gpu_scheduler *sched) +{ + struct drm_sched_job *job, *tmp; + + /* All other accessors are stopped. No locking necessary. */ + list_for_each_entry_safe_reverse(job, tmp, &sched->pending_list, list) { + sched->ops->cancel_job(job); + list_del(&job->list); + sched->ops->free_job(job); + } +} + /** * drm_sched_fini - Destroy a gpu scheduler * @@ -1360,19 +1413,11 @@ EXPORT_SYMBOL(drm_sched_init); * * Tears down and cleans up the scheduler. * - * This stops submission of new jobs to the hardware through - * drm_sched_backend_ops.run_job(). Consequently, drm_sched_backend_ops.free_job() - * will not be called for all jobs still in drm_gpu_scheduler.pending_list. - * There is no solution for this currently. Thus, it is up to the driver to make - * sure that: - * - * a) drm_sched_fini() is only called after for all submitted jobs - * drm_sched_backend_ops.free_job() has been called or that - * b) the jobs for which drm_sched_backend_ops.free_job() has not been called - * after drm_sched_fini() ran are freed manually. - * - * FIXME: Take care of the above problem and prevent this function from leaking - * the jobs in drm_gpu_scheduler.pending_list under any circumstances. + * This stops submission of new jobs to the hardware through &struct + * drm_sched_backend_ops.run_job. If &struct drm_sched_backend_ops.cancel_job + * is implemented, all jobs will be canceled through it and afterwards cleaned + * up through &struct drm_sched_backend_ops.free_job. If cancel_job is not + * implemented, memory could leak. */ void drm_sched_fini(struct drm_gpu_scheduler *sched) { @@ -1402,11 +1447,18 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched) /* Confirm no work left behind accessing device structures */ cancel_delayed_work_sync(&sched->work_tdr); + /* Avoid memory leaks if supported by the driver. */ + if (sched->ops->cancel_job) + drm_sched_cancel_remaining_jobs(sched); + if (sched->own_submit_wq) destroy_workqueue(sched->submit_wq); sched->ready = false; kfree(sched->sched_rq); sched->sched_rq = NULL; + + if (!list_empty(&sched->pending_list)) + dev_warn(sched->dev, "Tearing down scheduler while jobs are pending!\n"); } EXPORT_SYMBOL(drm_sched_fini); diff --git a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c index 7f947ab9d322..65acffc3fea8 100644 --- a/drivers/gpu/drm/scheduler/tests/mock_scheduler.c +++ b/drivers/gpu/drm/scheduler/tests/mock_scheduler.c @@ -63,7 +63,7 @@ static void drm_mock_sched_job_complete(struct drm_mock_sched_job *job) lockdep_assert_held(&sched->lock); job->flags |= DRM_MOCK_SCHED_JOB_DONE; - list_move_tail(&job->link, &sched->done_list); + list_del(&job->link); dma_fence_signal_locked(&job->hw_fence); complete(&job->done); } @@ -200,38 +200,82 @@ static struct dma_fence *mock_sched_run_job(struct drm_sched_job *sched_job) return &job->hw_fence; } +/* + * Normally, drivers would take appropriate measures in this callback, such as + * killing the entity the faulty job is associated with, resetting the hardware + * and / or resubmitting non-faulty jobs. + * + * For the mock scheduler, there are no hardware rings to be resetted nor jobs + * to be resubmitted. Thus, this function merely ensures that + * a) timedout fences get signaled properly and removed from the pending list + * b) the mock scheduler framework gets informed about the timeout via a flag + * c) The drm_sched_job, not longer needed, gets freed + */ static enum drm_gpu_sched_stat mock_sched_timedout_job(struct drm_sched_job *sched_job) { + struct drm_mock_scheduler *sched = drm_sched_to_mock_sched(sched_job->sched); struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job); + unsigned long flags; + + if (job->flags & DRM_MOCK_SCHED_JOB_DONT_RESET) { + job->flags &= ~DRM_MOCK_SCHED_JOB_DONT_RESET; + return DRM_GPU_SCHED_STAT_NO_HANG; + } - job->flags |= DRM_MOCK_SCHED_JOB_TIMEDOUT; + spin_lock_irqsave(&sched->lock, flags); + if (!dma_fence_is_signaled_locked(&job->hw_fence)) { + list_del(&job->link); + job->flags |= DRM_MOCK_SCHED_JOB_TIMEDOUT; + dma_fence_set_error(&job->hw_fence, -ETIMEDOUT); + dma_fence_signal_locked(&job->hw_fence); + } + spin_unlock_irqrestore(&sched->lock, flags); + + dma_fence_put(&job->hw_fence); + drm_sched_job_cleanup(sched_job); + /* Mock job itself is freed by the kunit framework. */ - return DRM_GPU_SCHED_STAT_NOMINAL; + return DRM_GPU_SCHED_STAT_RESET; } static void mock_sched_free_job(struct drm_sched_job *sched_job) { - struct drm_mock_scheduler *sched = - drm_sched_to_mock_sched(sched_job->sched); struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job); - unsigned long flags; - /* Remove from the scheduler done list. */ - spin_lock_irqsave(&sched->lock, flags); - list_del(&job->link); - spin_unlock_irqrestore(&sched->lock, flags); dma_fence_put(&job->hw_fence); - drm_sched_job_cleanup(sched_job); /* Mock job itself is freed by the kunit framework. */ } +static void mock_sched_cancel_job(struct drm_sched_job *sched_job) +{ + struct drm_mock_scheduler *sched = drm_sched_to_mock_sched(sched_job->sched); + struct drm_mock_sched_job *job = drm_sched_job_to_mock_job(sched_job); + unsigned long flags; + + hrtimer_cancel(&job->timer); + + spin_lock_irqsave(&sched->lock, flags); + if (!dma_fence_is_signaled_locked(&job->hw_fence)) { + list_del(&job->link); + dma_fence_set_error(&job->hw_fence, -ECANCELED); + dma_fence_signal_locked(&job->hw_fence); + } + spin_unlock_irqrestore(&sched->lock, flags); + + /* + * The GPU Scheduler will call drm_sched_backend_ops.free_job(), still. + * Mock job itself is freed by the kunit framework. + */ +} + static const struct drm_sched_backend_ops drm_mock_scheduler_ops = { .run_job = mock_sched_run_job, .timedout_job = mock_sched_timedout_job, - .free_job = mock_sched_free_job + .free_job = mock_sched_free_job, + .cancel_job = mock_sched_cancel_job, }; /** @@ -265,7 +309,6 @@ struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test, long timeout) sched->hw_timeline.context = dma_fence_context_alloc(1); atomic_set(&sched->hw_timeline.next_seqno, 0); INIT_LIST_HEAD(&sched->job_list); - INIT_LIST_HEAD(&sched->done_list); spin_lock_init(&sched->lock); return sched; @@ -280,38 +323,6 @@ struct drm_mock_scheduler *drm_mock_sched_new(struct kunit *test, long timeout) */ void drm_mock_sched_fini(struct drm_mock_scheduler *sched) { - struct drm_mock_sched_job *job, *next; - unsigned long flags; - LIST_HEAD(list); - - drm_sched_wqueue_stop(&sched->base); - - /* Force complete all unfinished jobs. */ - spin_lock_irqsave(&sched->lock, flags); - list_for_each_entry_safe(job, next, &sched->job_list, link) - list_move_tail(&job->link, &list); - spin_unlock_irqrestore(&sched->lock, flags); - - list_for_each_entry(job, &list, link) - hrtimer_cancel(&job->timer); - - spin_lock_irqsave(&sched->lock, flags); - list_for_each_entry_safe(job, next, &list, link) - drm_mock_sched_job_complete(job); - spin_unlock_irqrestore(&sched->lock, flags); - - /* - * Free completed jobs and jobs not yet processed by the DRM scheduler - * free worker. - */ - spin_lock_irqsave(&sched->lock, flags); - list_for_each_entry_safe(job, next, &sched->done_list, link) - list_move_tail(&job->link, &list); - spin_unlock_irqrestore(&sched->lock, flags); - - list_for_each_entry_safe(job, next, &list, link) - mock_sched_free_job(&job->base); - drm_sched_fini(&sched->base); } diff --git a/drivers/gpu/drm/scheduler/tests/sched_tests.h b/drivers/gpu/drm/scheduler/tests/sched_tests.h index fbba38137f0c..63d4f2ac7074 100644 --- a/drivers/gpu/drm/scheduler/tests/sched_tests.h +++ b/drivers/gpu/drm/scheduler/tests/sched_tests.h @@ -49,7 +49,6 @@ struct drm_mock_scheduler { spinlock_t lock; struct list_head job_list; - struct list_head done_list; struct { u64 context; @@ -98,6 +97,7 @@ struct drm_mock_sched_job { #define DRM_MOCK_SCHED_JOB_DONE 0x1 #define DRM_MOCK_SCHED_JOB_TIMEDOUT 0x2 +#define DRM_MOCK_SCHED_JOB_DONT_RESET 0x4 unsigned long flags; struct list_head link; diff --git a/drivers/gpu/drm/scheduler/tests/tests_basic.c b/drivers/gpu/drm/scheduler/tests/tests_basic.c index 7230057e0594..55eb142bd7c5 100644 --- a/drivers/gpu/drm/scheduler/tests/tests_basic.c +++ b/drivers/gpu/drm/scheduler/tests/tests_basic.c @@ -5,6 +5,8 @@ #include "sched_tests.h" +#define MOCK_TIMEOUT (HZ / 5) + /* * DRM scheduler basic tests should check the basic functional correctness of * the scheduler, including some very light smoke testing. More targeted tests, @@ -28,7 +30,7 @@ static void drm_sched_basic_exit(struct kunit *test) static int drm_sched_timeout_init(struct kunit *test) { - test->priv = drm_mock_sched_new(test, HZ); + test->priv = drm_mock_sched_new(test, MOCK_TIMEOUT); return 0; } @@ -204,6 +206,47 @@ static struct kunit_suite drm_sched_basic = { .test_cases = drm_sched_basic_tests, }; +static void drm_sched_basic_cancel(struct kunit *test) +{ + struct drm_mock_sched_entity *entity; + struct drm_mock_scheduler *sched; + struct drm_mock_sched_job *job; + bool done; + + /* + * Check that drm_sched_fini() uses the cancel_job() callback to cancel + * jobs that are still pending. + */ + + sched = drm_mock_sched_new(test, MAX_SCHEDULE_TIMEOUT); + entity = drm_mock_sched_entity_new(test, DRM_SCHED_PRIORITY_NORMAL, + sched); + + job = drm_mock_sched_job_new(test, entity); + + drm_mock_sched_job_submit(job); + + done = drm_mock_sched_job_wait_scheduled(job, HZ); + KUNIT_ASSERT_TRUE(test, done); + + drm_mock_sched_entity_free(entity); + drm_mock_sched_fini(sched); + + KUNIT_ASSERT_EQ(test, job->hw_fence.error, -ECANCELED); +} + +static struct kunit_case drm_sched_cancel_tests[] = { + KUNIT_CASE(drm_sched_basic_cancel), + {} +}; + +static struct kunit_suite drm_sched_cancel = { + .name = "drm_sched_basic_cancel_tests", + .init = drm_sched_basic_init, + .exit = drm_sched_basic_exit, + .test_cases = drm_sched_cancel_tests, +}; + static void drm_sched_basic_timeout(struct kunit *test) { struct drm_mock_scheduler *sched = test->priv; @@ -227,14 +270,14 @@ static void drm_sched_basic_timeout(struct kunit *test) done = drm_mock_sched_job_wait_scheduled(job, HZ); KUNIT_ASSERT_TRUE(test, done); - done = drm_mock_sched_job_wait_finished(job, HZ / 2); + done = drm_mock_sched_job_wait_finished(job, MOCK_TIMEOUT / 2); KUNIT_ASSERT_FALSE(test, done); KUNIT_ASSERT_EQ(test, job->flags & DRM_MOCK_SCHED_JOB_TIMEDOUT, 0); - done = drm_mock_sched_job_wait_finished(job, HZ); + done = drm_mock_sched_job_wait_finished(job, MOCK_TIMEOUT); KUNIT_ASSERT_FALSE(test, done); KUNIT_ASSERT_EQ(test, @@ -244,8 +287,51 @@ static void drm_sched_basic_timeout(struct kunit *test) drm_mock_sched_entity_free(entity); } +static void drm_sched_skip_reset(struct kunit *test) +{ + struct drm_mock_scheduler *sched = test->priv; + struct drm_mock_sched_entity *entity; + struct drm_mock_sched_job *job; + unsigned int i; + bool done; + + /* + * Submit a single job against a scheduler with the timeout configured + * and verify that if the job is still running, the timeout handler + * will skip the reset and allow the job to complete. + */ + + entity = drm_mock_sched_entity_new(test, + DRM_SCHED_PRIORITY_NORMAL, + sched); + job = drm_mock_sched_job_new(test, entity); + + job->flags = DRM_MOCK_SCHED_JOB_DONT_RESET; + + drm_mock_sched_job_submit(job); + + done = drm_mock_sched_job_wait_scheduled(job, HZ); + KUNIT_ASSERT_TRUE(test, done); + + done = drm_mock_sched_job_wait_finished(job, 2 * MOCK_TIMEOUT); + KUNIT_ASSERT_FALSE(test, done); + + KUNIT_ASSERT_EQ(test, + job->flags & DRM_MOCK_SCHED_JOB_DONT_RESET, + 0); + + i = drm_mock_sched_advance(sched, 1); + KUNIT_ASSERT_EQ(test, i, 1); + + done = drm_mock_sched_job_wait_finished(job, HZ); + KUNIT_ASSERT_TRUE(test, done); + + drm_mock_sched_entity_free(entity); +} + static struct kunit_case drm_sched_timeout_tests[] = { KUNIT_CASE(drm_sched_basic_timeout), + KUNIT_CASE(drm_sched_skip_reset), {} }; @@ -471,6 +557,7 @@ static struct kunit_suite drm_sched_credits = { kunit_test_suites(&drm_sched_basic, &drm_sched_timeout, + &drm_sched_cancel, &drm_sched_priority, &drm_sched_modify_sched, &drm_sched_credits); diff --git a/drivers/gpu/drm/sitronix/st7571-i2c.c b/drivers/gpu/drm/sitronix/st7571-i2c.c index eec846892962..453eb7e045e5 100644 --- a/drivers/gpu/drm/sitronix/st7571-i2c.c +++ b/drivers/gpu/drm/sitronix/st7571-i2c.c @@ -68,6 +68,9 @@ #define ST7571_SET_COLOR_MODE(c) (0x10 | FIELD_PREP(GENMASK(0, 0), (c))) #define ST7571_COMMAND_SET_NORMAL (0x00) +/* ST7567 commands */ +#define ST7567_SET_LCD_BIAS(m) (0xa2 | FIELD_PREP(GENMASK(0, 0), (m))) + #define ST7571_PAGE_HEIGHT 8 #define DRIVER_NAME "st7571" @@ -92,6 +95,7 @@ struct st7571_panel_constraints { struct st7571_panel_data { int (*init)(struct st7571_device *st7571); + int (*parse_dt)(struct st7571_device *st7571); struct st7571_panel_constraints constraints; }; @@ -550,8 +554,8 @@ static const struct drm_crtc_funcs st7571_crtc_funcs = { * Encoder */ -static void ssd130x_encoder_atomic_enable(struct drm_encoder *encoder, - struct drm_atomic_state *state) +static void st7571_encoder_atomic_enable(struct drm_encoder *encoder, + struct drm_atomic_state *state) { struct drm_device *drm = encoder->dev; struct st7571_device *st7571 = drm_to_st7571(drm); @@ -565,8 +569,8 @@ static void ssd130x_encoder_atomic_enable(struct drm_encoder *encoder, st7571_send_command_list(st7571, &command, 1); } -static void ssd130x_encoder_atomic_disable(struct drm_encoder *encoder, - struct drm_atomic_state *state) +static void st7571_encoder_atomic_disable(struct drm_encoder *encoder, + struct drm_atomic_state *state) { struct drm_device *drm = encoder->dev; struct st7571_device *st7571 = drm_to_st7571(drm); @@ -581,8 +585,8 @@ static const struct drm_encoder_funcs st7571_encoder_funcs = { }; static const struct drm_encoder_helper_funcs st7571_encoder_helper_funcs = { - .atomic_enable = ssd130x_encoder_atomic_enable, - .atomic_disable = ssd130x_encoder_atomic_disable, + .atomic_enable = st7571_encoder_atomic_enable, + .atomic_disable = st7571_encoder_atomic_disable, }; /* @@ -773,6 +777,32 @@ static int st7571_validate_parameters(struct st7571_device *st7571) return 0; } +static int st7567_parse_dt(struct st7571_device *st7567) +{ + struct device *dev = &st7567->client->dev; + struct device_node *np = dev->of_node; + struct display_timing dt; + int ret; + + ret = of_get_display_timing(np, "panel-timing", &dt); + if (ret) { + dev_err(dev, "Failed to get display timing from DT\n"); + return ret; + } + + of_property_read_u32(np, "width-mm", &st7567->width_mm); + of_property_read_u32(np, "height-mm", &st7567->height_mm); + + st7567->pformat = &st7571_monochrome; + st7567->bpp = 1; + + st7567->startline = dt.vfront_porch.typ; + st7567->nlines = dt.vactive.typ; + st7567->ncols = dt.hactive.typ; + + return 0; +} + static int st7571_parse_dt(struct st7571_device *st7571) { struct device *dev = &st7571->client->dev; @@ -804,7 +834,9 @@ static int st7571_parse_dt(struct st7571_device *st7571) st7571->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(st7571->reset)) - return PTR_ERR(st7571->reset); + return dev_err_probe(dev, PTR_ERR(st7571->reset), + "Failed to get reset gpio\n"); + return 0; } @@ -816,6 +848,38 @@ static void st7571_reset(struct st7571_device *st7571) gpiod_set_value_cansleep(st7571->reset, 0); } +static int st7567_lcd_init(struct st7571_device *st7567) +{ + /* + * Most of the initialization sequence is taken directly from the + * referential initial code in the ST7567 datasheet. + */ + u8 commands[] = { + ST7571_DISPLAY_OFF, + + ST7567_SET_LCD_BIAS(1), + + ST7571_SET_SEG_SCAN_DIR(0), + ST7571_SET_COM_SCAN_DIR(1), + + ST7571_SET_REGULATOR_REG(4), + ST7571_SET_CONTRAST_MSB, + ST7571_SET_CONTRAST_LSB(0x20), + + ST7571_SET_START_LINE_MSB, + ST7571_SET_START_LINE_LSB(st7567->startline), + + ST7571_SET_POWER(0x4), /* Power Control, VC: ON, VR: OFF, VF: OFF */ + ST7571_SET_POWER(0x6), /* Power Control, VC: ON, VR: ON, VF: OFF */ + ST7571_SET_POWER(0x7), /* Power Control, VC: ON, VR: ON, VF: ON */ + + ST7571_SET_REVERSE(0), + ST7571_SET_ENTIRE_DISPLAY_ON(0), + }; + + return st7571_send_command_list(st7567, commands, ARRAY_SIZE(commands)); +} + static int st7571_lcd_init(struct st7571_device *st7571) { /* @@ -879,7 +943,7 @@ static int st7571_probe(struct i2c_client *client) i2c_set_clientdata(client, st7571); st7571->pdata = device_get_match_data(&client->dev); - ret = st7571_parse_dt(st7571); + ret = st7571->pdata->parse_dt(st7571); if (ret) return ret; @@ -960,8 +1024,21 @@ static void st7571_remove(struct i2c_client *client) drm_dev_unplug(&st7571->dev); } +struct st7571_panel_data st7567_config = { + .init = st7567_lcd_init, + .parse_dt = st7567_parse_dt, + .constraints = { + .min_nlines = 1, + .max_nlines = 64, + .min_ncols = 128, + .max_ncols = 128, + .support_grayscale = false, + }, +}; + struct st7571_panel_data st7571_config = { .init = st7571_lcd_init, + .parse_dt = st7571_parse_dt, .constraints = { .min_nlines = 1, .max_nlines = 128, @@ -972,12 +1049,14 @@ struct st7571_panel_data st7571_config = { }; static const struct of_device_id st7571_of_match[] = { + { .compatible = "sitronix,st7567", .data = &st7567_config }, { .compatible = "sitronix,st7571", .data = &st7571_config }, {}, }; MODULE_DEVICE_TABLE(of, st7571_of_match); static const struct i2c_device_id st7571_id[] = { + { "st7567", 0 }, { "st7571", 0 }, { } }; diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c index dd2006d51c7a..eec43d1a5595 100644 --- a/drivers/gpu/drm/solomon/ssd130x.c +++ b/drivers/gpu/drm/solomon/ssd130x.c @@ -974,7 +974,7 @@ static void ssd130x_clear_screen(struct ssd130x_device *ssd130x, u8 *data_array) static void ssd132x_clear_screen(struct ssd130x_device *ssd130x, u8 *data_array) { - unsigned int columns = DIV_ROUND_UP(ssd130x->height, SSD132X_SEGMENT_WIDTH); + unsigned int columns = DIV_ROUND_UP(ssd130x->width, SSD132X_SEGMENT_WIDTH); unsigned int height = ssd130x->height; memset(data_array, 0, columns * height); diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c index d202b6c1eb8f..2c015f563de9 100644 --- a/drivers/gpu/drm/sti/sti_hda.c +++ b/drivers/gpu/drm/sti/sti_hda.c @@ -246,6 +246,7 @@ struct sti_hda { struct device dev; struct drm_device *drm_dev; struct drm_display_mode mode; + struct drm_bridge bridge; void __iomem *regs; void __iomem *video_dacs_ctrl; struct clk *clk_pix; @@ -262,6 +263,11 @@ struct sti_hda_connector { #define to_sti_hda_connector(x) \ container_of(x, struct sti_hda_connector, drm_connector) +static struct sti_hda *drm_bridge_to_sti_hda(struct drm_bridge *bridge) +{ + return container_of(bridge, struct sti_hda, bridge); +} + static u32 hda_read(struct sti_hda *hda, int offset) { return readl(hda->regs + offset); @@ -401,7 +407,7 @@ static void sti_hda_configure_awg(struct sti_hda *hda, u32 *awg_instr, int nb) static void sti_hda_disable(struct drm_bridge *bridge) { - struct sti_hda *hda = bridge->driver_private; + struct sti_hda *hda = drm_bridge_to_sti_hda(bridge); u32 val; if (!hda->enabled) @@ -426,7 +432,7 @@ static void sti_hda_disable(struct drm_bridge *bridge) static void sti_hda_pre_enable(struct drm_bridge *bridge) { - struct sti_hda *hda = bridge->driver_private; + struct sti_hda *hda = drm_bridge_to_sti_hda(bridge); u32 val, i, mode_idx; u32 src_filter_y, src_filter_c; u32 *coef_y, *coef_c; @@ -517,7 +523,7 @@ static void sti_hda_set_mode(struct drm_bridge *bridge, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode) { - struct sti_hda *hda = bridge->driver_private; + struct sti_hda *hda = drm_bridge_to_sti_hda(bridge); u32 mode_idx; int hddac_rate; int ret; @@ -677,7 +683,6 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data) struct drm_encoder *encoder; struct sti_hda_connector *connector; struct drm_connector *drm_connector; - struct drm_bridge *bridge; int err; /* Set the drm device handle */ @@ -693,13 +698,7 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data) connector->hda = hda; - bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL); - if (!bridge) - return -ENOMEM; - - bridge->driver_private = hda; - bridge->funcs = &sti_hda_bridge_funcs; - drm_bridge_attach(encoder, bridge, NULL, 0); + drm_bridge_attach(encoder, &hda->bridge, NULL, 0); connector->encoder = encoder; @@ -745,9 +744,9 @@ static int sti_hda_probe(struct platform_device *pdev) DRM_INFO("%s\n", __func__); - hda = devm_kzalloc(dev, sizeof(*hda), GFP_KERNEL); - if (!hda) - return -ENOMEM; + hda = devm_drm_bridge_alloc(dev, struct sti_hda, bridge, &sti_hda_bridge_funcs); + if (IS_ERR(hda)) + return PTR_ERR(hda); hda->dev = pdev->dev; hda->regs = devm_platform_ioremap_resource_byname(pdev, "hda-reg"); diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index 37b8d619066e..4e7c3d78b2b9 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -168,6 +168,11 @@ struct sti_hdmi_connector { #define to_sti_hdmi_connector(x) \ container_of(x, struct sti_hdmi_connector, drm_connector) +static struct sti_hdmi *drm_bridge_to_sti_hdmi(struct drm_bridge *bridge) +{ + return container_of(bridge, struct sti_hdmi, bridge); +} + static const struct drm_prop_enum_list colorspace_mode_names[] = { { HDMI_COLORSPACE_RGB, "rgb" }, { HDMI_COLORSPACE_YUV422, "yuv422" }, @@ -749,7 +754,7 @@ static void hdmi_debugfs_init(struct sti_hdmi *hdmi, struct drm_minor *minor) static void sti_hdmi_disable(struct drm_bridge *bridge) { - struct sti_hdmi *hdmi = bridge->driver_private; + struct sti_hdmi *hdmi = drm_bridge_to_sti_hdmi(bridge); u32 val = hdmi_read(hdmi, HDMI_CFG); @@ -881,7 +886,7 @@ static int hdmi_audio_configure(struct sti_hdmi *hdmi) static void sti_hdmi_pre_enable(struct drm_bridge *bridge) { - struct sti_hdmi *hdmi = bridge->driver_private; + struct sti_hdmi *hdmi = drm_bridge_to_sti_hdmi(bridge); DRM_DEBUG_DRIVER("\n"); @@ -936,7 +941,7 @@ static void sti_hdmi_set_mode(struct drm_bridge *bridge, const struct drm_display_mode *mode, const struct drm_display_mode *adjusted_mode) { - struct sti_hdmi *hdmi = bridge->driver_private; + struct sti_hdmi *hdmi = drm_bridge_to_sti_hdmi(bridge); int ret; DRM_DEBUG_DRIVER("\n"); @@ -1273,7 +1278,6 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data) struct sti_hdmi_connector *connector; struct cec_connector_info conn_info; struct drm_connector *drm_connector; - struct drm_bridge *bridge; int err; /* Set the drm device handle */ @@ -1289,13 +1293,7 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data) connector->hdmi = hdmi; - bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL); - if (!bridge) - return -EINVAL; - - bridge->driver_private = hdmi; - bridge->funcs = &sti_hdmi_bridge_funcs; - drm_bridge_attach(encoder, bridge, NULL, 0); + drm_bridge_attach(encoder, &hdmi->bridge, NULL, 0); connector->encoder = encoder; @@ -1385,9 +1383,9 @@ static int sti_hdmi_probe(struct platform_device *pdev) DRM_INFO("%s\n", __func__); - hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL); - if (!hdmi) - return -ENOMEM; + hdmi = devm_drm_bridge_alloc(dev, struct sti_hdmi, bridge, &sti_hdmi_bridge_funcs); + if (IS_ERR(hdmi)) + return PTR_ERR(hdmi); ddc = of_parse_phandle(pdev->dev.of_node, "ddc", 0); if (ddc) { diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h index 6d4c3f57bc46..91d43dd46f13 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.h +++ b/drivers/gpu/drm/sti/sti_hdmi.h @@ -12,6 +12,7 @@ #include <media/cec-notifier.h> +#include <drm/drm_bridge.h> #include <drm/drm_modes.h> #include <drm/drm_property.h> @@ -86,6 +87,7 @@ struct sti_hdmi { struct hdmi_audio_params audio; struct drm_connector *drm_connector; struct cec_notifier *notifier; + struct drm_bridge bridge; }; u32 hdmi_read(struct sti_hdmi *hdmi, int offset); diff --git a/drivers/gpu/drm/sysfb/vesadrm.c b/drivers/gpu/drm/sysfb/vesadrm.c index 2b107958942c..90615e9ac86b 100644 --- a/drivers/gpu/drm/sysfb/vesadrm.c +++ b/drivers/gpu/drm/sysfb/vesadrm.c @@ -334,14 +334,19 @@ static struct vesadrm_device *vesadrm_device_create(struct drm_driver *drv, if (!__screen_info_vbe_mode_nonvga(si)) { vesa->cmap_write = vesadrm_vga_cmap_write; -#if defined(CONFIG_X86_32) } else { +#if defined(CONFIG_X86_32) phys_addr_t pmi_base = __screen_info_vesapm_info_base(si); - const u16 *pmi_addr = phys_to_virt(pmi_base); - vesa->pmi.PrimaryPalette = (u8 *)pmi_addr + pmi_addr[2]; - vesa->cmap_write = vesadrm_pmi_cmap_write; + if (pmi_base) { + const u16 *pmi_addr = phys_to_virt(pmi_base); + + vesa->pmi.PrimaryPalette = (u8 *)pmi_addr + pmi_addr[2]; + vesa->cmap_write = vesadrm_pmi_cmap_write; + } else #endif + if (format->is_color_indexed) + drm_warn(dev, "hardware palette is unchangeable, colors may be incorrect\n"); } #if defined(CONFIG_FIRMWARE_EDID) diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index 0b65e69f3a8a..1dd3670f37db 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -185,11 +185,13 @@ bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer); int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer, struct tegra_bo_tiling *tiling); struct drm_framebuffer *tegra_fb_alloc(struct drm_device *drm, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct tegra_bo **planes, unsigned int num_planes); struct drm_framebuffer *tegra_fb_create(struct drm_device *drm, struct drm_file *file, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *cmd); #ifdef CONFIG_DRM_FBDEV_EMULATION diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c index 46170753699d..dd041089f797 100644 --- a/drivers/gpu/drm/tegra/fb.c +++ b/drivers/gpu/drm/tegra/fb.c @@ -102,6 +102,7 @@ static const struct drm_framebuffer_funcs tegra_fb_funcs = { }; struct drm_framebuffer *tegra_fb_alloc(struct drm_device *drm, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct tegra_bo **planes, unsigned int num_planes) @@ -114,7 +115,7 @@ struct drm_framebuffer *tegra_fb_alloc(struct drm_device *drm, if (!fb) return ERR_PTR(-ENOMEM); - drm_helper_mode_fill_fb_struct(drm, fb, mode_cmd); + drm_helper_mode_fill_fb_struct(drm, fb, info, mode_cmd); for (i = 0; i < fb->format->num_planes; i++) fb->obj[i] = &planes[i]->gem; @@ -132,9 +133,9 @@ struct drm_framebuffer *tegra_fb_alloc(struct drm_device *drm, struct drm_framebuffer *tegra_fb_create(struct drm_device *drm, struct drm_file *file, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *cmd) { - const struct drm_format_info *info = drm_get_format_info(drm, cmd); struct tegra_bo *planes[4]; struct drm_gem_object *gem; struct drm_framebuffer *fb; @@ -166,7 +167,7 @@ struct drm_framebuffer *tegra_fb_create(struct drm_device *drm, planes[i] = to_tegra_bo(gem); } - fb = tegra_fb_alloc(drm, cmd, planes, i); + fb = tegra_fb_alloc(drm, info, cmd, planes, i); if (IS_ERR(fb)) { err = PTR_ERR(fb); goto unreference; diff --git a/drivers/gpu/drm/tegra/fbdev.c b/drivers/gpu/drm/tegra/fbdev.c index cd9d798f8870..1b70f5e164af 100644 --- a/drivers/gpu/drm/tegra/fbdev.c +++ b/drivers/gpu/drm/tegra/fbdev.c @@ -106,7 +106,9 @@ int tegra_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper, return PTR_ERR(info); } - fb = tegra_fb_alloc(drm, &cmd, &bo, 1); + fb = tegra_fb_alloc(drm, + drm_get_format_info(drm, cmd.pixel_format, cmd.modifier[0]), + &cmd, &bo, 1); if (IS_ERR(fb)) { err = PTR_ERR(fb); dev_err(drm->dev, "failed to allocate DRM framebuffer: %d\n", diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index dbc1394f96b8..8ede07fb7a21 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -523,7 +523,7 @@ void tegra_bo_free_object(struct drm_gem_object *gem) if (tegra->domain) { tegra_bo_iommu_unmap(tegra, bo); - if (gem->import_attach) { + if (drm_gem_is_imported(gem)) { dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt, DMA_TO_DEVICE); dma_buf_detach(gem->import_attach->dmabuf, gem->import_attach); diff --git a/drivers/gpu/drm/tegra/nvdec.c b/drivers/gpu/drm/tegra/nvdec.c index 2d9a0a3f6c38..7a38664e890e 100644 --- a/drivers/gpu/drm/tegra/nvdec.c +++ b/drivers/gpu/drm/tegra/nvdec.c @@ -261,10 +261,8 @@ static int nvdec_load_falcon_firmware(struct nvdec *nvdec) if (!client->group) { virt = dma_alloc_coherent(nvdec->dev, size, &iova, GFP_KERNEL); - - err = dma_mapping_error(nvdec->dev, iova); - if (err < 0) - return err; + if (!virt) + return -ENOMEM; } else { virt = tegra_drm_alloc(tegra, size, &iova); if (IS_ERR(virt)) diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c index ad06762db671..981dada8f3a8 100644 --- a/drivers/gpu/drm/tests/drm_format_helper_test.c +++ b/drivers/gpu/drm/tests/drm_format_helper_test.c @@ -735,13 +735,13 @@ static void drm_test_fb_xrgb8888_to_rgb565(struct kunit *test) NULL : &result->dst_pitch; drm_fb_xrgb8888_to_rgb565(&dst, dst_pitch, &src, &fb, ¶ms->clip, - &fmtcnv_state, false); + &fmtcnv_state); buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16)); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); buf = dst.vaddr; /* restore original value of buf */ - drm_fb_xrgb8888_to_rgb565(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip, - &fmtcnv_state, true); + drm_fb_xrgb8888_to_rgb565be(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip, + &fmtcnv_state); buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16)); KUNIT_EXPECT_MEMEQ(test, buf, result->expected_swab, dst_size); @@ -749,7 +749,7 @@ static void drm_test_fb_xrgb8888_to_rgb565(struct kunit *test) memset(buf, 0, dst_size); drm_fb_xrgb8888_to_rgb565(&dst, dst_pitch, &src, &fb, ¶ms->clip, - &fmtcnv_state, false); + &fmtcnv_state); buf = le16buf_to_cpu(test, (__force const __le16 *)buf, dst_size / sizeof(__le16)); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); } @@ -1033,13 +1033,14 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test) NULL : &result->dst_pitch; drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state); - buf = le32buf_to_cpu(test, buf, dst_size / sizeof(u32)); + buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32)); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); buf = dst.vaddr; /* restore original value of buf */ memset(buf, 0, dst_size); drm_fb_xrgb8888_to_xrgb2101010(&dst, dst_pitch, &src, &fb, ¶ms->clip, &fmtcnv_state); + buf = le32buf_to_cpu(test, (__force const __le32 *)buf, dst_size / sizeof(u32)); KUNIT_EXPECT_MEMEQ(test, buf, result->expected, dst_size); } diff --git a/drivers/gpu/drm/tests/drm_framebuffer_test.c b/drivers/gpu/drm/tests/drm_framebuffer_test.c index 6ea04cc8f324..9b8e01e8cd91 100644 --- a/drivers/gpu/drm/tests/drm_framebuffer_test.c +++ b/drivers/gpu/drm/tests/drm_framebuffer_test.c @@ -363,6 +363,7 @@ struct drm_framebuffer_test_priv { static struct drm_framebuffer *fb_create_mock(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { struct drm_framebuffer_test_priv *priv = container_of(dev, typeof(*priv), dev); diff --git a/drivers/gpu/drm/tests/drm_kunit_edid.h b/drivers/gpu/drm/tests/drm_kunit_edid.h index 02e2761b3b1f..c59c8528a3f7 100644 --- a/drivers/gpu/drm/tests/drm_kunit_edid.h +++ b/drivers/gpu/drm/tests/drm_kunit_edid.h @@ -46,6 +46,13 @@ * Monitor ranges (GTF): 50-70 Hz V, 30-70 kHz H, max dotclock 150 MHz * Dummy Descriptor: * Checksum: 0xab + * + * ---------------- + * + * edid-decode 1.30.0-5367 + * edid-decode SHA: 41ebf7135691 2025-05-01 10:19:22 + * + * EDID conformity: PASS */ static const unsigned char test_edid_dvi_1080p[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00, @@ -62,6 +69,10 @@ static const unsigned char test_edid_dvi_1080p[] = { }; /* + * + * This edid is intentionally broken with the 100MHz limit. It's meant + * to be used only with tests in unusual situations. + * * edid-decode (hex): * * 00 ff ff ff ff ff ff 00 31 d8 2a 00 00 00 00 00 @@ -73,14 +84,14 @@ static const unsigned char test_edid_dvi_1080p[] = { * 46 1e 46 0f 00 0a 20 20 20 20 20 20 00 00 00 10 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 92 * - * 02 03 1b 81 e3 05 00 20 41 10 e2 00 4a 6d 03 0c - * 00 12 34 00 14 20 00 00 00 00 00 00 00 00 00 00 + * 02 03 15 81 e3 05 00 20 41 10 e2 00 4a 67 03 0c + * 00 12 34 00 14 00 00 00 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 e4 + * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 10 * * ---------------- * @@ -135,8 +146,19 @@ static const unsigned char test_edid_dvi_1080p[] = { * Vendor-Specific Data Block (HDMI), OUI 00-0C-03: * Source physical address: 1.2.3.4 * Maximum TMDS clock: 100 MHz - * Extended HDMI video details: - * Checksum: 0xe4 Unused space in Extension Block: 100 bytes + * Checksum: 0x10 Unused space in Extension Block: 106 bytes + * + * ---------------- + * + * edid-decode 1.30.0-5367 + * edid-decode SHA: 41ebf7135691 2025-05-01 10:19:22 + * + * Failures: + * + * EDID: + * CTA-861: The maximum HDMI TMDS clock is 100000 kHz, but one or more video timings go up to 148500 kHz. + * + * EDID conformity: FAIL */ static const unsigned char test_edid_hdmi_1080p_rgb_max_100mhz[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00, @@ -147,11 +169,11 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_100mhz[] = { 0x2d, 0x40, 0x58, 0x2c, 0x45, 0x00, 0x40, 0x84, 0x63, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x54, 0x65, 0x73, 0x74, 0x20, 0x45, 0x44, 0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x32, - 0x46, 0x00, 0x00, 0xc4, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x46, 0x1e, 0x46, 0x0f, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x41, 0x02, 0x03, 0x1b, 0x81, - 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x6d, 0x03, 0x0c, - 0x00, 0x12, 0x34, 0x00, 0x14, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x92, 0x02, 0x03, 0x15, 0x81, + 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x67, 0x03, 0x0c, + 0x00, 0x12, 0x34, 0x00, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -160,7 +182,7 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_100mhz[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0xe4 + 0x00, 0x00, 0x00, 0x10 }; /* @@ -175,14 +197,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_100mhz[] = { * 46 1e 46 0f 00 0a 20 20 20 20 20 20 00 00 00 10 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 92 * - * 02 03 1b 81 e3 05 00 20 41 10 e2 00 4a 6d 03 0c - * 00 12 34 00 28 20 00 00 00 00 00 00 00 00 00 00 + * 02 03 15 81 e3 05 00 20 41 10 e2 00 4a 67 03 0c + * 00 12 34 00 28 00 00 00 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 d0 + * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 fc * * ---------------- * @@ -237,8 +259,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_100mhz[] = { * Vendor-Specific Data Block (HDMI), OUI 00-0C-03: * Source physical address: 1.2.3.4 * Maximum TMDS clock: 200 MHz - * Extended HDMI video details: - * Checksum: 0xd0 Unused space in Extension Block: 100 bytes + * Checksum: 0xfc Unused space in Extension Block: 106 bytes + * + * ---------------- + * + * edid-decode 1.30.0-5367 + * edid-decode SHA: 41ebf7135691 2025-05-01 10:19:22 + * + * EDID conformity: PASS */ static const unsigned char test_edid_hdmi_1080p_rgb_max_200mhz[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00, @@ -249,11 +277,11 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_200mhz[] = { 0x2d, 0x40, 0x58, 0x2c, 0x45, 0x00, 0x40, 0x84, 0x63, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x54, 0x65, 0x73, 0x74, 0x20, 0x45, 0x44, 0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x32, - 0x46, 0x00, 0x00, 0xc4, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x46, 0x1e, 0x46, 0x0f, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x41, 0x02, 0x03, 0x1b, 0x81, - 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x6d, 0x03, 0x0c, - 0x00, 0x12, 0x34, 0x00, 0x28, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x92, 0x02, 0x03, 0x15, 0x81, + 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x67, 0x03, 0x0c, + 0x00, 0x12, 0x34, 0x00, 0x28, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -262,7 +290,7 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_200mhz[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0xd0 + 0x00, 0x00, 0x00, 0xfc }; /* @@ -277,14 +305,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_200mhz[] = { * 46 1e 46 0f 00 0a 20 20 20 20 20 20 00 00 00 10 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 92 * - * 02 03 1b 81 e3 05 00 20 41 10 e2 00 4a 6d 03 0c - * 00 12 34 00 28 20 00 00 00 00 00 00 00 00 00 00 + * 02 03 15 81 e3 05 00 20 41 10 e2 00 4a 67 03 0c + * 00 12 34 00 44 00 00 00 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 d0 + * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 e0 * * ---------------- * @@ -339,8 +367,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_200mhz[] = { * Vendor-Specific Data Block (HDMI), OUI 00-0C-03: * Source physical address: 1.2.3.4 * Maximum TMDS clock: 340 MHz - * Extended HDMI video details: - * Checksum: 0xd0 Unused space in Extension Block: 100 bytes + * Checksum: 0xe0 Unused space in Extension Block: 106 bytes + * + * ---------------- + * + * edid-decode 1.30.0-5367 + * edid-decode SHA: 41ebf7135691 2025-05-01 10:19:22 + * + * EDID conformity: PASS */ static const unsigned char test_edid_hdmi_1080p_rgb_max_340mhz[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00, @@ -351,11 +385,11 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_340mhz[] = { 0x2d, 0x40, 0x58, 0x2c, 0x45, 0x00, 0x40, 0x84, 0x63, 0x00, 0x00, 0x1e, 0x00, 0x00, 0x00, 0xfc, 0x00, 0x54, 0x65, 0x73, 0x74, 0x20, 0x45, 0x44, 0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x32, - 0x46, 0x00, 0x00, 0xc4, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x46, 0x1e, 0x46, 0x0f, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x41, 0x02, 0x03, 0x1b, 0x81, - 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x6d, 0x03, 0x0c, - 0x00, 0x12, 0x34, 0x00, 0x44, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x92, 0x02, 0x03, 0x15, 0x81, + 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0x4a, 0x67, 0x03, 0x0c, + 0x00, 0x12, 0x34, 0x00, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -364,7 +398,7 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_340mhz[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0xd0 + 0x00, 0x00, 0x00, 0xe0 }; /* @@ -379,14 +413,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_340mhz[] = { * 46 1e 46 0f 00 0a 20 20 20 20 20 20 00 00 00 10 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 7a * - * 02 03 1b b1 e3 05 00 20 41 10 e2 00 ca 6d 03 0c - * 00 12 34 78 28 20 00 00 00 00 00 00 00 00 00 00 + * 02 03 15 b1 e3 05 00 20 41 10 e2 00 ca 67 03 0c + * 00 12 34 78 28 00 00 00 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 a8 + * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 d4 * * ---------------- * @@ -447,8 +481,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_max_340mhz[] = { * DC_30bit * DC_Y444 * Maximum TMDS clock: 200 MHz - * Extended HDMI video details: - * Checksum: 0xa8 Unused space in Extension Block: 100 bytes + * Checksum: 0xd4 Unused space in Extension Block: 106 bytes + * + * ---------------- + * + * edid-decode 1.30.0-5367 + * edid-decode SHA: 41ebf7135691 2025-05-01 10:19:22 + * + * EDID conformity: PASS */ static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00, @@ -461,9 +501,9 @@ static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz[] = { 0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x32, 0x46, 0x1e, 0x46, 0x0f, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x7a, 0x02, 0x03, 0x1b, 0xb1, - 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0xca, 0x6d, 0x03, 0x0c, - 0x00, 0x12, 0x34, 0x78, 0x28, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x7a, 0x02, 0x03, 0x15, 0xb1, + 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0xca, 0x67, 0x03, 0x0c, + 0x00, 0x12, 0x34, 0x78, 0x28, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -472,7 +512,7 @@ static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0xa8 + 0x00, 0x00, 0x00, 0xd4 }; /* @@ -487,14 +527,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz[] = { * 46 1e 46 0f 00 0a 20 20 20 20 20 20 00 00 00 10 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 01 8a * - * 02 03 1b b1 e3 05 00 20 41 10 e2 00 ca 6d 03 0c - * 00 12 34 78 44 20 00 00 00 00 00 00 00 00 00 00 + * 02 03 15 b1 e3 05 00 20 41 10 e2 00 ca 67 03 0c + * 00 12 34 78 44 00 00 00 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 - * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 8c + * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 b8 * * ---------------- * @@ -555,8 +595,14 @@ static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_200mhz[] = { * DC_30bit * DC_Y444 * Maximum TMDS clock: 340 MHz - * Extended HDMI video details: - * Checksum: 0x8c Unused space in Extension Block: 100 bytes + * Checksum: 0xb8 Unused space in Extension Block: 106 bytes + * + * ---------------- + * + * edid-decode 1.30.0-5367 + * edid-decode SHA: 41ebf7135691 2025-05-01 10:19:22 + * + * EDID conformity: PASS */ static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x31, 0xd8, 0x2a, 0x00, @@ -569,9 +615,9 @@ static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz[] = { 0x49, 0x44, 0x0a, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x32, 0x46, 0x1e, 0x46, 0x0f, 0x00, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x8a, 0x02, 0x03, 0x1b, 0xb1, - 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0xca, 0x6d, 0x03, 0x0c, - 0x00, 0x12, 0x34, 0x78, 0x44, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x8a, 0x02, 0x03, 0x15, 0xb1, + 0xe3, 0x05, 0x00, 0x20, 0x41, 0x10, 0xe2, 0x00, 0xca, 0x67, 0x03, 0x0c, + 0x00, 0x12, 0x34, 0x78, 0x44, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -580,7 +626,7 @@ static const unsigned char test_edid_hdmi_1080p_rgb_yuv_dc_max_340mhz[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x8c + 0x00, 0x00, 0x00, 0xb8 }; /* diff --git a/drivers/gpu/drm/tidss/Makefile b/drivers/gpu/drm/tidss/Makefile index 312645271014..b6d6becf1683 100644 --- a/drivers/gpu/drm/tidss/Makefile +++ b/drivers/gpu/drm/tidss/Makefile @@ -7,6 +7,7 @@ tidss-y := tidss_crtc.o \ tidss_irq.o \ tidss_plane.o \ tidss_scale_coefs.o \ - tidss_dispc.o + tidss_dispc.o \ + tidss_oldi.o obj-$(CONFIG_DRM_TIDSS) += tidss.o diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c index 21363ccbd763..c0277fa36425 100644 --- a/drivers/gpu/drm/tidss/tidss_dispc.c +++ b/drivers/gpu/drm/tidss/tidss_dispc.c @@ -146,7 +146,7 @@ static const u16 tidss_am65x_common_regs[DISPC_COMMON_REG_TABLE_LEN] = { const struct dispc_features dispc_am65x_feats = { .max_pclk_khz = { [DISPC_VP_DPI] = 165000, - [DISPC_VP_OLDI] = 165000, + [DISPC_VP_OLDI_AM65X] = 165000, }, .scaling = { @@ -176,7 +176,7 @@ const struct dispc_features dispc_am65x_feats = { .vp_name = { "vp1", "vp2" }, .ovr_name = { "ovr1", "ovr2" }, .vpclk_name = { "vp1", "vp2" }, - .vp_bus_type = { DISPC_VP_OLDI, DISPC_VP_DPI }, + .vp_bus_type = { DISPC_VP_OLDI_AM65X, DISPC_VP_DPI }, .vp_feat = { .color = { .has_ctm = true, @@ -491,7 +491,7 @@ struct dispc_device { void __iomem *base_ovr[TIDSS_MAX_PORTS]; void __iomem *base_vp[TIDSS_MAX_PORTS]; - struct regmap *oldi_io_ctrl; + struct regmap *am65x_oldi_io_ctrl; struct clk *vp_clk[TIDSS_MAX_PORTS]; @@ -566,6 +566,29 @@ static u32 dispc_vp_read(struct dispc_device *dispc, u32 hw_videoport, u16 reg) return ioread32(base + reg); } +int tidss_configure_oldi(struct tidss_device *tidss, u32 hw_videoport, + u32 oldi_cfg) +{ + u32 count = 0; + u32 oldi_reset_bit = BIT(5 + hw_videoport); + + dispc_vp_write(tidss->dispc, hw_videoport, DISPC_VP_DSS_OLDI_CFG, oldi_cfg); + + while (!(oldi_reset_bit & dispc_read(tidss->dispc, DSS_SYSSTATUS)) && + count < 10000) + count++; + + if (!(oldi_reset_bit & dispc_read(tidss->dispc, DSS_SYSSTATUS))) + return -ETIMEDOUT; + + return 0; +} + +void tidss_disable_oldi(struct tidss_device *tidss, u32 hw_videoport) +{ + dispc_vp_write(tidss->dispc, hw_videoport, DISPC_VP_DSS_OLDI_CFG, 0); +} + /* * TRM gives bitfields as start:end, where start is the higher bit * number. For example 7:0 @@ -1016,13 +1039,11 @@ void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask) } } -enum dispc_oldi_mode_reg_val { SPWG_18 = 0, JEIDA_24 = 1, SPWG_24 = 2 }; - struct dispc_bus_format { u32 bus_fmt; u32 data_width; bool is_oldi_fmt; - enum dispc_oldi_mode_reg_val oldi_mode_reg_val; + enum oldi_mode_reg_val am65x_oldi_mode_reg_val; }; static const struct dispc_bus_format dispc_bus_formats[] = { @@ -1066,7 +1087,7 @@ int dispc_vp_bus_check(struct dispc_device *dispc, u32 hw_videoport, return -EINVAL; } - if (dispc->feat->vp_bus_type[hw_videoport] != DISPC_VP_OLDI && + if (dispc->feat->vp_bus_type[hw_videoport] != DISPC_VP_OLDI_AM65X && fmt->is_oldi_fmt) { dev_dbg(dispc->dev, "%s: %s is not OLDI-port\n", __func__, dispc->feat->vp_name[hw_videoport]); @@ -1076,23 +1097,23 @@ int dispc_vp_bus_check(struct dispc_device *dispc, u32 hw_videoport, return 0; } -static void dispc_oldi_tx_power(struct dispc_device *dispc, bool power) +static void dispc_am65x_oldi_tx_power(struct dispc_device *dispc, bool power) { - u32 val = power ? 0 : OLDI_PWRDN_TX; + u32 val = power ? 0 : AM65X_OLDI_PWRDN_TX; - if (WARN_ON(!dispc->oldi_io_ctrl)) + if (WARN_ON(!dispc->am65x_oldi_io_ctrl)) return; - regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT0_IO_CTRL, - OLDI_PWRDN_TX, val); - regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT1_IO_CTRL, - OLDI_PWRDN_TX, val); - regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT2_IO_CTRL, - OLDI_PWRDN_TX, val); - regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT3_IO_CTRL, - OLDI_PWRDN_TX, val); - regmap_update_bits(dispc->oldi_io_ctrl, OLDI_CLK_IO_CTRL, - OLDI_PWRDN_TX, val); + regmap_update_bits(dispc->am65x_oldi_io_ctrl, AM65X_OLDI_DAT0_IO_CTRL, + AM65X_OLDI_PWRDN_TX, val); + regmap_update_bits(dispc->am65x_oldi_io_ctrl, AM65X_OLDI_DAT1_IO_CTRL, + AM65X_OLDI_PWRDN_TX, val); + regmap_update_bits(dispc->am65x_oldi_io_ctrl, AM65X_OLDI_DAT2_IO_CTRL, + AM65X_OLDI_PWRDN_TX, val); + regmap_update_bits(dispc->am65x_oldi_io_ctrl, AM65X_OLDI_DAT3_IO_CTRL, + AM65X_OLDI_PWRDN_TX, val); + regmap_update_bits(dispc->am65x_oldi_io_ctrl, AM65X_OLDI_CLK_IO_CTRL, + AM65X_OLDI_PWRDN_TX, val); } static void dispc_set_num_datalines(struct dispc_device *dispc, @@ -1121,8 +1142,8 @@ static void dispc_set_num_datalines(struct dispc_device *dispc, VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, v, 10, 8); } -static void dispc_enable_oldi(struct dispc_device *dispc, u32 hw_videoport, - const struct dispc_bus_format *fmt) +static void dispc_enable_am65x_oldi(struct dispc_device *dispc, u32 hw_videoport, + const struct dispc_bus_format *fmt) { u32 oldi_cfg = 0; u32 oldi_reset_bit = BIT(5 + hw_videoport); @@ -1141,7 +1162,7 @@ static void dispc_enable_oldi(struct dispc_device *dispc, u32 hw_videoport, oldi_cfg |= BIT(7); /* DEPOL */ - oldi_cfg = FLD_MOD(oldi_cfg, fmt->oldi_mode_reg_val, 3, 1); + oldi_cfg = FLD_MOD(oldi_cfg, fmt->am65x_oldi_mode_reg_val, 3, 1); oldi_cfg |= BIT(12); /* SOFTRST */ @@ -1170,10 +1191,10 @@ void dispc_vp_prepare(struct dispc_device *dispc, u32 hw_videoport, if (WARN_ON(!fmt)) return; - if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI) { - dispc_oldi_tx_power(dispc, true); + if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI_AM65X) { + dispc_am65x_oldi_tx_power(dispc, true); - dispc_enable_oldi(dispc, hw_videoport, fmt); + dispc_enable_am65x_oldi(dispc, hw_videoport, fmt); } } @@ -1229,7 +1250,7 @@ void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport, align = true; /* always use DE_HIGH for OLDI */ - if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI) + if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI_AM65X) ieo = false; dispc_vp_write(dispc, hw_videoport, DISPC_VP_POL_FREQ, @@ -1255,10 +1276,10 @@ void dispc_vp_disable(struct dispc_device *dispc, u32 hw_videoport) void dispc_vp_unprepare(struct dispc_device *dispc, u32 hw_videoport) { - if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI) { + if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI_AM65X) { dispc_vp_write(dispc, hw_videoport, DISPC_VP_DSS_OLDI_CFG, 0); - dispc_oldi_tx_power(dispc, false); + dispc_am65x_oldi_tx_power(dispc, false); } } @@ -1420,7 +1441,6 @@ void dispc_vp_disable_clk(struct dispc_device *dispc, u32 hw_videoport) * Calculate the percentage difference between the requested pixel clock rate * and the effective rate resulting from calculating the clock divider value. */ -static unsigned int dispc_pclk_diff(unsigned long rate, unsigned long real_rate) { int r = rate / 100, rr = real_rate / 100; @@ -2852,15 +2872,15 @@ static int dispc_iomap_resource(struct platform_device *pdev, const char *name, static int dispc_init_am65x_oldi_io_ctrl(struct device *dev, struct dispc_device *dispc) { - dispc->oldi_io_ctrl = + dispc->am65x_oldi_io_ctrl = syscon_regmap_lookup_by_phandle(dev->of_node, "ti,am65x-oldi-io-ctrl"); - if (PTR_ERR(dispc->oldi_io_ctrl) == -ENODEV) { - dispc->oldi_io_ctrl = NULL; - } else if (IS_ERR(dispc->oldi_io_ctrl)) { + if (PTR_ERR(dispc->am65x_oldi_io_ctrl) == -ENODEV) { + dispc->am65x_oldi_io_ctrl = NULL; + } else if (IS_ERR(dispc->am65x_oldi_io_ctrl)) { dev_err(dev, "%s: syscon_regmap_lookup_by_phandle failed %ld\n", - __func__, PTR_ERR(dispc->oldi_io_ctrl)); - return PTR_ERR(dispc->oldi_io_ctrl); + __func__, PTR_ERR(dispc->am65x_oldi_io_ctrl)); + return PTR_ERR(dispc->am65x_oldi_io_ctrl); } return 0; } diff --git a/drivers/gpu/drm/tidss/tidss_dispc.h b/drivers/gpu/drm/tidss/tidss_dispc.h index 28958514b8f5..b8614f62186c 100644 --- a/drivers/gpu/drm/tidss/tidss_dispc.h +++ b/drivers/gpu/drm/tidss/tidss_dispc.h @@ -58,7 +58,7 @@ struct dispc_errata { enum dispc_vp_bus_type { DISPC_VP_DPI, /* DPI output */ - DISPC_VP_OLDI, /* OLDI (LVDS) output */ + DISPC_VP_OLDI_AM65X, /* OLDI (LVDS) output for AM65x DSS */ DISPC_VP_INTERNAL, /* SoC internal routing */ DISPC_VP_TIED_OFF, /* Tied off / Unavailable */ DISPC_VP_MAX_BUS_TYPE, @@ -101,6 +101,11 @@ extern const struct dispc_features dispc_am62l_feats; extern const struct dispc_features dispc_am65x_feats; extern const struct dispc_features dispc_j721e_feats; +int tidss_configure_oldi(struct tidss_device *tidss, u32 hw_videoport, + u32 oldi_cfg); +void tidss_disable_oldi(struct tidss_device *tidss, u32 hw_videoport); +unsigned int dispc_pclk_diff(unsigned long rate, unsigned long real_rate); + void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask); dispc_irq_t dispc_read_and_clear_irqstatus(struct dispc_device *dispc); diff --git a/drivers/gpu/drm/tidss/tidss_dispc_regs.h b/drivers/gpu/drm/tidss/tidss_dispc_regs.h index e88148e44937..50a3f28250ef 100644 --- a/drivers/gpu/drm/tidss/tidss_dispc_regs.h +++ b/drivers/gpu/drm/tidss/tidss_dispc_regs.h @@ -226,18 +226,35 @@ enum dispc_common_regs { #define DISPC_VP_DSS_DMA_THREADSIZE 0x170 /* J721E */ #define DISPC_VP_DSS_DMA_THREADSIZE_STATUS 0x174 /* J721E */ +/* OLDI Config Bits (DISPC_VP_DSS_OLDI_CFG) */ +#define OLDI_ENABLE BIT(0) +#define OLDI_MAP (BIT(1) | BIT(2) | BIT(3)) +#define OLDI_SRC BIT(4) +#define OLDI_CLONE_MODE BIT(5) +#define OLDI_MASTERSLAVE BIT(6) +#define OLDI_DEPOL BIT(7) +#define OLDI_MSB BIT(8) +#define OLDI_LBEN BIT(9) +#define OLDI_LBDATA BIT(10) +#define OLDI_DUALMODESYNC BIT(11) +#define OLDI_SOFTRST BIT(12) +#define OLDI_TPATCFG BIT(13) + +/* LVDS Format values for OLDI_MAP field in DISPC_VP_OLDI_CFG register */ +enum oldi_mode_reg_val { SPWG_18 = 0, JEIDA_24 = 1, SPWG_24 = 2 }; + /* * OLDI IO_CTRL register offsets. On AM654 the registers are found * from CTRL_MMR0, there the syscon regmap should map 0x14 bytes from * CTRLMMR0P1_OLDI_DAT0_IO_CTRL to CTRLMMR0P1_OLDI_CLK_IO_CTRL * register range. */ -#define OLDI_DAT0_IO_CTRL 0x00 -#define OLDI_DAT1_IO_CTRL 0x04 -#define OLDI_DAT2_IO_CTRL 0x08 -#define OLDI_DAT3_IO_CTRL 0x0C -#define OLDI_CLK_IO_CTRL 0x10 +#define AM65X_OLDI_DAT0_IO_CTRL 0x00 +#define AM65X_OLDI_DAT1_IO_CTRL 0x04 +#define AM65X_OLDI_DAT2_IO_CTRL 0x08 +#define AM65X_OLDI_DAT3_IO_CTRL 0x0C +#define AM65X_OLDI_CLK_IO_CTRL 0x10 -#define OLDI_PWRDN_TX BIT(8) +#define AM65X_OLDI_PWRDN_TX BIT(8) #endif /* __TIDSS_DISPC_REGS_H */ diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c index f2a4f659f574..a1b12e52aca4 100644 --- a/drivers/gpu/drm/tidss/tidss_drv.c +++ b/drivers/gpu/drm/tidss/tidss_drv.c @@ -24,6 +24,7 @@ #include "tidss_drv.h" #include "tidss_kms.h" #include "tidss_irq.h" +#include "tidss_oldi.h" /* Power management */ @@ -147,6 +148,10 @@ static int tidss_probe(struct platform_device *pdev) return ret; } + ret = tidss_oldi_init(tidss); + if (ret) + return dev_err_probe(dev, ret, "failed to init OLDI\n"); + pm_runtime_enable(dev); pm_runtime_set_autosuspend_delay(dev, 1000); @@ -203,6 +208,8 @@ err_runtime_suspend: pm_runtime_dont_use_autosuspend(dev); pm_runtime_disable(dev); + tidss_oldi_deinit(tidss); + return ret; } @@ -227,6 +234,8 @@ static void tidss_remove(struct platform_device *pdev) pm_runtime_dont_use_autosuspend(dev); pm_runtime_disable(dev); + tidss_oldi_deinit(tidss); + /* devm allocated dispc goes away with the dev so mark it NULL */ dispc_remove(tidss); diff --git a/drivers/gpu/drm/tidss/tidss_drv.h b/drivers/gpu/drm/tidss/tidss_drv.h index 7f4f4282bc04..d14d5d28f0a3 100644 --- a/drivers/gpu/drm/tidss/tidss_drv.h +++ b/drivers/gpu/drm/tidss/tidss_drv.h @@ -11,8 +11,10 @@ #define TIDSS_MAX_PORTS 4 #define TIDSS_MAX_PLANES 4 +#define TIDSS_MAX_OLDI_TXES 2 typedef u32 dispc_irq_t; +struct tidss_oldi; struct tidss_device { struct drm_device ddev; /* DRM device for DSS */ @@ -27,6 +29,9 @@ struct tidss_device { unsigned int num_planes; struct drm_plane *planes[TIDSS_MAX_PLANES]; + unsigned int num_oldis; + struct tidss_oldi *oldis[TIDSS_MAX_OLDI_TXES]; + unsigned int irq; /* protects the irq masks field and irqenable/irqstatus registers */ diff --git a/drivers/gpu/drm/tidss/tidss_encoder.c b/drivers/gpu/drm/tidss/tidss_encoder.c index 95b4aeff2775..81a04f767770 100644 --- a/drivers/gpu/drm/tidss/tidss_encoder.c +++ b/drivers/gpu/drm/tidss/tidss_encoder.c @@ -90,14 +90,18 @@ int tidss_encoder_create(struct tidss_device *tidss, struct drm_connector *connector; int ret; - t_enc = drmm_simple_encoder_alloc(&tidss->ddev, struct tidss_encoder, - encoder, encoder_type); + t_enc = devm_drm_bridge_alloc(tidss->dev, struct tidss_encoder, + bridge, &tidss_bridge_funcs); if (IS_ERR(t_enc)) return PTR_ERR(t_enc); + ret = drm_simple_encoder_init(&tidss->ddev, &t_enc->encoder, + encoder_type); + if (ret) + return ret; + t_enc->tidss = tidss; t_enc->next_bridge = next_bridge; - t_enc->bridge.funcs = &tidss_bridge_funcs; enc = &t_enc->encoder; enc->possible_crtcs = possible_crtcs; diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c index 19432c08ec6b..c34eb90cddbe 100644 --- a/drivers/gpu/drm/tidss/tidss_kms.c +++ b/drivers/gpu/drm/tidss/tidss_kms.c @@ -144,7 +144,7 @@ static int tidss_dispc_modeset_init(struct tidss_device *tidss) dev_dbg(dev, "Setting up panel for port %d\n", i); switch (feat->vp_bus_type[i]) { - case DISPC_VP_OLDI: + case DISPC_VP_OLDI_AM65X: enc_type = DRM_MODE_ENCODER_LVDS; conn_type = DRM_MODE_CONNECTOR_LVDS; break; diff --git a/drivers/gpu/drm/tidss/tidss_oldi.c b/drivers/gpu/drm/tidss/tidss_oldi.c new file mode 100644 index 000000000000..8f25159d0666 --- /dev/null +++ b/drivers/gpu/drm/tidss/tidss_oldi.c @@ -0,0 +1,598 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2025 - Texas Instruments Incorporated + * + * Aradhya Bhatia <[email protected]> + */ + +#include <linux/clk.h> +#include <linux/of.h> +#include <linux/of_graph.h> +#include <linux/mfd/syscon.h> +#include <linux/media-bus-format.h> +#include <linux/regmap.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_of.h> + +#include "tidss_dispc.h" +#include "tidss_dispc_regs.h" +#include "tidss_oldi.h" + +struct tidss_oldi { + struct tidss_device *tidss; + struct device *dev; + + struct drm_bridge bridge; + struct drm_bridge *next_bridge; + + enum tidss_oldi_link_type link_type; + const struct oldi_bus_format *bus_format; + u32 oldi_instance; + int companion_instance; /* -1 when OLDI TX operates in Single-Link */ + u32 parent_vp; + + struct clk *serial; + struct regmap *io_ctrl; +}; + +struct oldi_bus_format { + u32 bus_fmt; + u32 data_width; + enum oldi_mode_reg_val oldi_mode_reg_val; + u32 input_bus_fmt; +}; + +static const struct oldi_bus_format oldi_bus_formats[] = { + { MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, 18, SPWG_18, MEDIA_BUS_FMT_RGB666_1X18 }, + { MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, 24, SPWG_24, MEDIA_BUS_FMT_RGB888_1X24 }, + { MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA, 24, JEIDA_24, MEDIA_BUS_FMT_RGB888_1X24 }, +}; + +#define OLDI_IDLE_CLK_HZ 25000000 /*25 MHz */ + +static inline struct tidss_oldi * +drm_bridge_to_tidss_oldi(struct drm_bridge *bridge) +{ + return container_of(bridge, struct tidss_oldi, bridge); +} + +static int tidss_oldi_bridge_attach(struct drm_bridge *bridge, + struct drm_encoder *encoder, + enum drm_bridge_attach_flags flags) +{ + struct tidss_oldi *oldi = drm_bridge_to_tidss_oldi(bridge); + + if (!oldi->next_bridge) { + dev_err(oldi->dev, + "%s: OLDI%u Failure attach next bridge\n", + __func__, oldi->oldi_instance); + return -ENODEV; + } + + if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) { + dev_err(oldi->dev, + "%s: OLDI%u DRM_BRIDGE_ATTACH_NO_CONNECTOR is mandatory.\n", + __func__, oldi->oldi_instance); + return -EINVAL; + } + + return drm_bridge_attach(encoder, oldi->next_bridge, bridge, flags); +} + +static int +tidss_oldi_set_serial_clk(struct tidss_oldi *oldi, unsigned long rate) +{ + unsigned long new_rate; + int ret; + + ret = clk_set_rate(oldi->serial, rate); + if (ret) { + dev_err(oldi->dev, + "OLDI%u: failed to set serial clk rate to %lu Hz\n", + oldi->oldi_instance, rate); + return ret; + } + + new_rate = clk_get_rate(oldi->serial); + + if (dispc_pclk_diff(rate, new_rate) > 5) + dev_warn(oldi->dev, + "OLDI%u Clock rate %lu differs over 5%% from requested %lu\n", + oldi->oldi_instance, new_rate, rate); + + dev_dbg(oldi->dev, "OLDI%u: new rate %lu Hz (requested %lu Hz)\n", + oldi->oldi_instance, clk_get_rate(oldi->serial), rate); + + return 0; +} + +static void tidss_oldi_tx_power(struct tidss_oldi *oldi, bool enable) +{ + u32 mask; + + /* + * The power control bits are Active Low, and remain powered off by + * default. That is, the bits are set to 1. To power on the OLDI TXes, + * the bits must be cleared to 0. Since there are cases where not all + * OLDI TXes are being used, the power logic selectively powers them + * on. + * Setting the variable 'val' to particular bit masks, makes sure that + * the undesired OLDI TXes remain powered off. + */ + + if (enable) { + switch (oldi->link_type) { + case OLDI_MODE_SINGLE_LINK: + /* Power-on only the required OLDI TX's IO*/ + mask = OLDI_PWRDOWN_TX(oldi->oldi_instance) | OLDI_PWRDN_BG; + break; + case OLDI_MODE_CLONE_SINGLE_LINK: + case OLDI_MODE_DUAL_LINK: + /* Power-on both the OLDI TXes' IOs */ + mask = OLDI_PWRDOWN_TX(oldi->oldi_instance) | + OLDI_PWRDOWN_TX(oldi->companion_instance) | + OLDI_PWRDN_BG; + break; + default: + /* + * This code execution should never reach here as any + * OLDI with an unsupported OLDI mode would never get + * registered in the first place. + * However, power-off the OLDI in concern just in case. + */ + mask = OLDI_PWRDOWN_TX(oldi->oldi_instance); + enable = false; + break; + } + } else { + switch (oldi->link_type) { + case OLDI_MODE_CLONE_SINGLE_LINK: + case OLDI_MODE_DUAL_LINK: + mask = OLDI_PWRDOWN_TX(oldi->oldi_instance) | + OLDI_PWRDOWN_TX(oldi->companion_instance) | + OLDI_PWRDN_BG; + break; + case OLDI_MODE_SINGLE_LINK: + default: + mask = OLDI_PWRDOWN_TX(oldi->oldi_instance); + break; + } + } + + regmap_update_bits(oldi->io_ctrl, OLDI_PD_CTRL, mask, enable ? 0 : mask); +} + +static int tidss_oldi_config(struct tidss_oldi *oldi) +{ + const struct oldi_bus_format *bus_fmt = NULL; + u32 oldi_cfg = 0; + int ret; + + bus_fmt = oldi->bus_format; + + /* + * MASTERSLAVE and SRC bits of OLDI Config are always set to 0. + */ + + if (bus_fmt->data_width == 24) + oldi_cfg |= OLDI_MSB; + else if (bus_fmt->data_width != 18) + dev_warn(oldi->dev, + "OLDI%u: DSS port width %d not supported\n", + oldi->oldi_instance, bus_fmt->data_width); + + oldi_cfg |= OLDI_DEPOL; + + oldi_cfg = (oldi_cfg & (~OLDI_MAP)) | (bus_fmt->oldi_mode_reg_val << 1); + + oldi_cfg |= OLDI_SOFTRST; + + oldi_cfg |= OLDI_ENABLE; + + switch (oldi->link_type) { + case OLDI_MODE_SINGLE_LINK: + /* All configuration is done for this mode. */ + break; + + case OLDI_MODE_CLONE_SINGLE_LINK: + oldi_cfg |= OLDI_CLONE_MODE; + break; + + case OLDI_MODE_DUAL_LINK: + /* data-mapping field also indicates dual-link mode */ + oldi_cfg |= BIT(3); + oldi_cfg |= OLDI_DUALMODESYNC; + break; + + default: + dev_err(oldi->dev, "OLDI%u: Unsupported mode.\n", + oldi->oldi_instance); + return -EINVAL; + } + + ret = tidss_configure_oldi(oldi->tidss, oldi->parent_vp, oldi_cfg); + if (ret == -ETIMEDOUT) + dev_warn(oldi->dev, "OLDI%u: timeout waiting for OLDI reset done.\n", + oldi->oldi_instance); + + return ret; +} + +static void tidss_oldi_atomic_pre_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct tidss_oldi *oldi = drm_bridge_to_tidss_oldi(bridge); + struct drm_connector *connector; + struct drm_connector_state *conn_state; + struct drm_crtc_state *crtc_state; + struct drm_display_mode *mode; + + if (oldi->link_type == OLDI_MODE_SECONDARY_CLONE_SINGLE_LINK) + return; + + connector = drm_atomic_get_new_connector_for_encoder(state, + bridge->encoder); + if (WARN_ON(!connector)) + return; + + conn_state = drm_atomic_get_new_connector_state(state, connector); + if (WARN_ON(!conn_state)) + return; + + crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc); + if (WARN_ON(!crtc_state)) + return; + + mode = &crtc_state->adjusted_mode; + + /* Configure the OLDI params*/ + tidss_oldi_config(oldi); + + /* Set the OLDI serial clock (7 times the pixel clock) */ + tidss_oldi_set_serial_clk(oldi, mode->clock * 7 * 1000); + + /* Enable OLDI IO power */ + tidss_oldi_tx_power(oldi, true); +} + +static void tidss_oldi_atomic_post_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct tidss_oldi *oldi = drm_bridge_to_tidss_oldi(bridge); + + if (oldi->link_type == OLDI_MODE_SECONDARY_CLONE_SINGLE_LINK) + return; + + /* Disable OLDI IO power */ + tidss_oldi_tx_power(oldi, false); + + /* Set the OLDI serial clock to IDLE Frequency */ + tidss_oldi_set_serial_clk(oldi, OLDI_IDLE_CLK_HZ); + + /* Clear OLDI Config */ + tidss_disable_oldi(oldi->tidss, oldi->parent_vp); +} + +#define MAX_INPUT_SEL_FORMATS 1 + +static u32 *tidss_oldi_atomic_get_input_bus_fmts(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + u32 output_fmt, + unsigned int *num_input_fmts) +{ + struct tidss_oldi *oldi = drm_bridge_to_tidss_oldi(bridge); + u32 *input_fmts; + int i; + + *num_input_fmts = 0; + + for (i = 0; i < ARRAY_SIZE(oldi_bus_formats); i++) + if (oldi_bus_formats[i].bus_fmt == output_fmt) + break; + + if (i == ARRAY_SIZE(oldi_bus_formats)) + return NULL; + + input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts), + GFP_KERNEL); + if (!input_fmts) + return NULL; + + *num_input_fmts = 1; + input_fmts[0] = oldi_bus_formats[i].input_bus_fmt; + oldi->bus_format = &oldi_bus_formats[i]; + + return input_fmts; +} + +static const struct drm_bridge_funcs tidss_oldi_bridge_funcs = { + .attach = tidss_oldi_bridge_attach, + .atomic_pre_enable = tidss_oldi_atomic_pre_enable, + .atomic_post_disable = tidss_oldi_atomic_post_disable, + .atomic_get_input_bus_fmts = tidss_oldi_atomic_get_input_bus_fmts, + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, +}; + +static int get_oldi_mode(struct device_node *oldi_tx, int *companion_instance) +{ + struct device_node *companion; + struct device_node *port0, *port1; + u32 companion_reg; + bool secondary_oldi = false; + int pixel_order; + + /* + * Find if the OLDI is paired with another OLDI for combined OLDI + * operation (dual-link or clone). + */ + companion = of_parse_phandle(oldi_tx, "ti,companion-oldi", 0); + if (!companion) + /* + * The OLDI TX does not have a companion, nor is it a + * secondary OLDI. It will operate independently. + */ + return OLDI_MODE_SINGLE_LINK; + + if (of_property_read_u32(companion, "reg", &companion_reg)) + return OLDI_MODE_UNSUPPORTED; + + if (companion_reg > (TIDSS_MAX_OLDI_TXES - 1)) + /* Invalid companion OLDI reg value. */ + return OLDI_MODE_UNSUPPORTED; + + *companion_instance = (int)companion_reg; + + if (of_property_read_bool(oldi_tx, "ti,secondary-oldi")) + secondary_oldi = true; + + /* + * We need to work out if the sink is expecting us to function in + * dual-link mode. We do this by looking at the DT port nodes, the + * OLDI TX ports are connected to. If they are marked as expecting + * even pixels and odd pixels, then we need to enable dual-link. + */ + port0 = of_graph_get_port_by_id(oldi_tx, 1); + port1 = of_graph_get_port_by_id(companion, 1); + pixel_order = drm_of_lvds_get_dual_link_pixel_order(port0, port1); + of_node_put(port0); + of_node_put(port1); + of_node_put(companion); + + switch (pixel_order) { + case -EINVAL: + /* + * The dual-link properties were not found in at least + * one of the sink nodes. Since 2 OLDI ports are present + * in the DT, it can be safely assumed that the required + * configuration is Clone Mode. + */ + return (secondary_oldi ? OLDI_MODE_SECONDARY_CLONE_SINGLE_LINK : + OLDI_MODE_CLONE_SINGLE_LINK); + + case DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS: + /* + * Primary OLDI can only support "ODD" pixels. So, from its + * perspective, the pixel order has to be ODD-EVEN. + */ + return (secondary_oldi ? OLDI_MODE_UNSUPPORTED : + OLDI_MODE_DUAL_LINK); + + case DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS: + /* + * Secondary OLDI can only support "EVEN" pixels. So, from its + * perspective, the pixel order has to be EVEN-ODD. + */ + return (secondary_oldi ? OLDI_MODE_SECONDARY_DUAL_LINK : + OLDI_MODE_UNSUPPORTED); + + default: + return OLDI_MODE_UNSUPPORTED; + } +} + +static int get_parent_dss_vp(struct device_node *oldi_tx, u32 *parent_vp) +{ + struct device_node *ep, *dss_port; + int ret; + + ep = of_graph_get_endpoint_by_regs(oldi_tx, OLDI_INPUT_PORT, -1); + if (ep) { + dss_port = of_graph_get_remote_port(ep); + if (!dss_port) { + ret = -ENODEV; + goto err_return_ep_port; + } + + ret = of_property_read_u32(dss_port, "reg", parent_vp); + + of_node_put(dss_port); +err_return_ep_port: + of_node_put(ep); + return ret; + } + + return -ENODEV; +} + +static const struct drm_bridge_timings default_tidss_oldi_timings = { + .input_bus_flags = DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE + | DRM_BUS_FLAG_DE_HIGH, +}; + +void tidss_oldi_deinit(struct tidss_device *tidss) +{ + for (int i = 0; i < tidss->num_oldis; i++) { + if (tidss->oldis[i]) { + drm_bridge_remove(&tidss->oldis[i]->bridge); + tidss->oldis[i] = NULL; + } + } +} + +int tidss_oldi_init(struct tidss_device *tidss) +{ + struct tidss_oldi *oldi; + struct device_node *child; + struct drm_bridge *bridge; + u32 parent_vp, oldi_instance; + int companion_instance = -1; + enum tidss_oldi_link_type link_type = OLDI_MODE_UNSUPPORTED; + struct device_node *oldi_parent; + int ret = 0; + + tidss->num_oldis = 0; + + oldi_parent = of_get_child_by_name(tidss->dev->of_node, "oldi-transmitters"); + if (!oldi_parent) + /* Return gracefully */ + return 0; + + for_each_available_child_of_node(oldi_parent, child) { + ret = get_parent_dss_vp(child, &parent_vp); + if (ret) { + if (ret == -ENODEV) { + /* + * ENODEV means that this particular OLDI node + * is not connected with the DSS, which is not + * a harmful case. There could be another OLDI + * which may still be connected. + * Continue to search for that. + */ + ret = 0; + continue; + } + goto err_put_node; + } + + ret = of_property_read_u32(child, "reg", &oldi_instance); + if (ret) + goto err_put_node; + + /* + * Now that it's confirmed that OLDI is connected with DSS, + * let's continue getting the OLDI sinks ahead and other OLDI + * properties. + */ + bridge = devm_drm_of_get_bridge(tidss->dev, child, + OLDI_OUTPUT_PORT, 0); + if (IS_ERR(bridge)) { + /* + * Either there was no OLDI sink in the devicetree, or + * the OLDI sink has not been added yet. In any case, + * return. + * We don't want to have an OLDI node connected to DSS + * but not to any sink. + */ + ret = dev_err_probe(tidss->dev, PTR_ERR(bridge), + "no panel/bridge for OLDI%u.\n", + oldi_instance); + goto err_put_node; + } + + link_type = get_oldi_mode(child, &companion_instance); + if (link_type == OLDI_MODE_UNSUPPORTED) { + ret = dev_err_probe(tidss->dev, -EINVAL, + "OLDI%u: Unsupported OLDI connection.\n", + oldi_instance); + goto err_put_node; + } else if ((link_type == OLDI_MODE_SECONDARY_CLONE_SINGLE_LINK) || + (link_type == OLDI_MODE_CLONE_SINGLE_LINK)) { + /* + * The OLDI driver cannot support OLDI clone mode + * properly at present. + * The clone mode requires 2 working encoder-bridge + * pipelines, generating from the same crtc. The DRM + * framework does not support this at present. If + * there were to be, say, 2 OLDI sink bridges each + * connected to an OLDI TXes, they couldn't both be + * supported simultaneously. + * This driver still has some code pertaining to OLDI + * clone mode configuration in DSS hardware for future, + * when there is a better infrastructure in the DRM + * framework to support 2 encoder-bridge pipelines + * simultaneously. + * Till that time, this driver shall error out if it + * detects a clone mode configuration. + */ + ret = dev_err_probe(tidss->dev, -EOPNOTSUPP, + "The OLDI driver does not support Clone Mode at present.\n"); + goto err_put_node; + } else if (link_type == OLDI_MODE_SECONDARY_DUAL_LINK) { + /* + * This is the secondary OLDI node, which serves as a + * companion to the primary OLDI, when it is configured + * for the dual-link mode. Since the primary OLDI will + * be a part of bridge chain, no need to put this one + * too. Continue onto the next OLDI node. + */ + continue; + } + + oldi = devm_drm_bridge_alloc(tidss->dev, struct tidss_oldi, bridge, + &tidss_oldi_bridge_funcs); + if (IS_ERR(oldi)) { + ret = PTR_ERR(oldi); + goto err_put_node; + } + + oldi->parent_vp = parent_vp; + oldi->oldi_instance = oldi_instance; + oldi->companion_instance = companion_instance; + oldi->link_type = link_type; + oldi->dev = tidss->dev; + oldi->next_bridge = bridge; + + /* + * Only the primary OLDI needs to reference the io-ctrl system + * registers, and the serial clock. + * We don't require a check for secondary OLDI in dual-link mode + * because the driver will not create a drm_bridge instance. + * But the driver will need to create a drm_bridge instance, + * for secondary OLDI in clone mode (once it is supported). + */ + if (link_type != OLDI_MODE_SECONDARY_CLONE_SINGLE_LINK) { + oldi->io_ctrl = syscon_regmap_lookup_by_phandle(child, + "ti,oldi-io-ctrl"); + if (IS_ERR(oldi->io_ctrl)) { + ret = dev_err_probe(oldi->dev, PTR_ERR(oldi->io_ctrl), + "OLDI%u: syscon_regmap_lookup_by_phandle failed.\n", + oldi_instance); + goto err_put_node; + } + + oldi->serial = of_clk_get_by_name(child, "serial"); + if (IS_ERR(oldi->serial)) { + ret = dev_err_probe(oldi->dev, PTR_ERR(oldi->serial), + "OLDI%u: Failed to get serial clock.\n", + oldi_instance); + goto err_put_node; + } + } + + /* Register the bridge. */ + oldi->bridge.of_node = child; + oldi->bridge.driver_private = oldi; + oldi->bridge.timings = &default_tidss_oldi_timings; + + tidss->oldis[tidss->num_oldis++] = oldi; + oldi->tidss = tidss; + + drm_bridge_add(&oldi->bridge); + } + + of_node_put(child); + of_node_put(oldi_parent); + + return 0; + +err_put_node: + of_node_put(child); + of_node_put(oldi_parent); + return ret; +} diff --git a/drivers/gpu/drm/tidss/tidss_oldi.h b/drivers/gpu/drm/tidss/tidss_oldi.h new file mode 100644 index 000000000000..8cd535c5ee65 --- /dev/null +++ b/drivers/gpu/drm/tidss/tidss_oldi.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2025 - Texas Instruments Incorporated + * + * Aradhya Bhatia <[email protected]> + */ + +#ifndef __TIDSS_OLDI_H__ +#define __TIDSS_OLDI_H__ + +#include "tidss_drv.h" + +struct tidss_oldi; + +/* OLDI PORTS */ +#define OLDI_INPUT_PORT 0 +#define OLDI_OUTPUT_PORT 1 + +/* Control MMR Registers */ + +/* Register offsets */ +#define OLDI_PD_CTRL 0x100 +#define OLDI_LB_CTRL 0x104 + +/* Power control bits */ +#define OLDI_PWRDOWN_TX(n) BIT(n) + +/* LVDS Bandgap reference Enable/Disable */ +#define OLDI_PWRDN_BG BIT(8) + +enum tidss_oldi_link_type { + OLDI_MODE_UNSUPPORTED, + OLDI_MODE_SINGLE_LINK, + OLDI_MODE_CLONE_SINGLE_LINK, + OLDI_MODE_SECONDARY_CLONE_SINGLE_LINK, + OLDI_MODE_DUAL_LINK, + OLDI_MODE_SECONDARY_DUAL_LINK, +}; + +int tidss_oldi_init(struct tidss_device *tidss); +void tidss_oldi_deinit(struct tidss_device *tidss); + +#endif /* __TIDSS_OLDI_H__ */ diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c index 3148f5d3dbd6..1bcc67977f48 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c +++ b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c @@ -542,14 +542,15 @@ static void ttm_bo_validate_no_placement_signaled(struct kunit *test) bo->ttm = old_tt; } - err = ttm_resource_alloc(bo, place, &bo->resource, NULL); - KUNIT_EXPECT_EQ(test, err, 0); - KUNIT_ASSERT_EQ(test, man->usage, size); - placement = kunit_kzalloc(test, sizeof(*placement), GFP_KERNEL); KUNIT_ASSERT_NOT_NULL(test, placement); ttm_bo_reserve(bo, false, false, NULL); + + err = ttm_resource_alloc(bo, place, &bo->resource, NULL); + KUNIT_EXPECT_EQ(test, err, 0); + KUNIT_ASSERT_EQ(test, man->usage, size); + err = ttm_bo_validate(bo, placement, &ctx); ttm_bo_unreserve(bo); @@ -757,56 +758,6 @@ static void ttm_bo_validate_move_fence_not_signaled(struct kunit *test) ttm_mock_manager_fini(priv->ttm_dev, snd_mem); } -static void ttm_bo_validate_swapout(struct kunit *test) -{ - unsigned long size_big, size = ALIGN(BO_SIZE, PAGE_SIZE); - enum ttm_bo_type bo_type = ttm_bo_type_device; - struct ttm_buffer_object *bo_small, *bo_big; - struct ttm_test_devices *priv = test->priv; - struct ttm_operation_ctx ctx = { }; - struct ttm_placement *placement; - u32 mem_type = TTM_PL_TT; - struct ttm_place *place; - struct sysinfo si; - int err; - - si_meminfo(&si); - size_big = ALIGN(((u64)si.totalram * si.mem_unit / 2), PAGE_SIZE); - - ttm_mock_manager_init(priv->ttm_dev, mem_type, size_big + size); - - place = ttm_place_kunit_init(test, mem_type, 0); - placement = ttm_placement_kunit_init(test, place, 1); - - bo_small = kunit_kzalloc(test, sizeof(*bo_small), GFP_KERNEL); - KUNIT_ASSERT_NOT_NULL(test, bo_small); - - drm_gem_private_object_init(priv->drm, &bo_small->base, size); - - err = ttm_bo_init_reserved(priv->ttm_dev, bo_small, bo_type, placement, - PAGE_SIZE, &ctx, NULL, NULL, - &dummy_ttm_bo_destroy); - KUNIT_EXPECT_EQ(test, err, 0); - dma_resv_unlock(bo_small->base.resv); - - bo_big = ttm_bo_kunit_init(test, priv, size_big, NULL); - - dma_resv_lock(bo_big->base.resv, NULL); - err = ttm_bo_validate(bo_big, placement, &ctx); - dma_resv_unlock(bo_big->base.resv); - - KUNIT_EXPECT_EQ(test, err, 0); - KUNIT_EXPECT_NOT_NULL(test, bo_big->resource); - KUNIT_EXPECT_EQ(test, bo_big->resource->mem_type, mem_type); - KUNIT_EXPECT_EQ(test, bo_small->resource->mem_type, TTM_PL_SYSTEM); - KUNIT_EXPECT_TRUE(test, bo_small->ttm->page_flags & TTM_TT_FLAG_SWAPPED); - - ttm_bo_put(bo_big); - ttm_bo_put(bo_small); - - ttm_mock_manager_fini(priv->ttm_dev, mem_type); -} - static void ttm_bo_validate_happy_evict(struct kunit *test) { u32 mem_type = TTM_PL_VRAM, mem_multihop = TTM_PL_TT, @@ -1201,7 +1152,6 @@ static struct kunit_case ttm_bo_validate_test_cases[] = { KUNIT_CASE(ttm_bo_validate_move_fence_signaled), KUNIT_CASE_PARAM(ttm_bo_validate_move_fence_not_signaled, ttm_bo_validate_wait_gen_params), - KUNIT_CASE(ttm_bo_validate_swapout), KUNIT_CASE(ttm_bo_validate_happy_evict), KUNIT_CASE(ttm_bo_validate_all_pinned_evict), KUNIT_CASE(ttm_bo_validate_allowed_only_evict), diff --git a/drivers/gpu/drm/ttm/ttm_backup.c b/drivers/gpu/drm/ttm/ttm_backup.c index cb1b8e5dadf5..32530c75f038 100644 --- a/drivers/gpu/drm/ttm/ttm_backup.c +++ b/drivers/gpu/drm/ttm/ttm_backup.c @@ -114,15 +114,8 @@ ttm_backup_backup_page(struct file *backup, struct page *page, if (writeback && !folio_mapped(to_folio) && folio_clear_dirty_for_io(to_folio)) { - struct writeback_control wbc = { - .sync_mode = WB_SYNC_NONE, - .nr_to_write = SWAP_CLUSTER_MAX, - .range_start = 0, - .range_end = LLONG_MAX, - .for_reclaim = 1, - }; folio_set_reclaim(to_folio); - ret = shmem_writeout(to_folio, &wbc); + ret = shmem_writeout(to_folio, NULL, NULL); if (!folio_test_writeback(to_folio)) folio_clear_reclaim(to_folio); /* diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index bb9c5c8e16b5..f4d9e68b21e7 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -526,11 +526,11 @@ static s64 ttm_bo_evict_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object * return 0; if (bo->deleted) { - lret = ttm_bo_wait_ctx(bo, walk->ctx); + lret = ttm_bo_wait_ctx(bo, walk->arg.ctx); if (!lret) ttm_bo_cleanup_memtype_use(bo); } else { - lret = ttm_bo_evict(bo, walk->ctx); + lret = ttm_bo_evict(bo, walk->arg.ctx); } if (lret) @@ -566,8 +566,10 @@ static int ttm_bo_evict_alloc(struct ttm_device *bdev, struct ttm_bo_evict_walk evict_walk = { .walk = { .ops = &ttm_evict_walk_ops, - .ctx = ctx, - .ticket = ticket, + .arg = { + .ctx = ctx, + .ticket = ticket, + } }, .place = place, .evictor = evictor, @@ -576,7 +578,7 @@ static int ttm_bo_evict_alloc(struct ttm_device *bdev, }; s64 lret; - evict_walk.walk.trylock_only = true; + evict_walk.walk.arg.trylock_only = true; lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1); /* One more attempt if we hit low limit? */ @@ -590,12 +592,12 @@ static int ttm_bo_evict_alloc(struct ttm_device *bdev, /* Reset low limit */ evict_walk.try_low = evict_walk.hit_low = false; /* If ticket-locking, repeat while making progress. */ - evict_walk.walk.trylock_only = false; + evict_walk.walk.arg.trylock_only = false; retry: do { /* The walk may clear the evict_walk.walk.ticket field */ - evict_walk.walk.ticket = ticket; + evict_walk.walk.arg.ticket = ticket; evict_walk.evicted = 0; lret = ttm_lru_walk_for_evict(&evict_walk.walk, bdev, man, 1); } while (!lret && evict_walk.evicted); @@ -1106,7 +1108,7 @@ ttm_bo_swapout_cb(struct ttm_lru_walk *walk, struct ttm_buffer_object *bo) struct ttm_place place = {.mem_type = bo->resource->mem_type}; struct ttm_bo_swapout_walk *swapout_walk = container_of(walk, typeof(*swapout_walk), walk); - struct ttm_operation_ctx *ctx = walk->ctx; + struct ttm_operation_ctx *ctx = walk->arg.ctx; s64 ret; /* @@ -1217,8 +1219,10 @@ s64 ttm_bo_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx, struct ttm_bo_swapout_walk swapout_walk = { .walk = { .ops = &ttm_swap_ops, - .ctx = ctx, - .trylock_only = true, + .arg = { + .ctx = ctx, + .trylock_only = true, + }, }, .gfp_flags = gfp_flags, }; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index b9a772b26fa1..acbbca9d5c92 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -258,6 +258,13 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, ret = dma_resv_trylock(&fbo->base.base._resv); WARN_ON(!ret); + ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1); + if (ret) { + dma_resv_unlock(&fbo->base.base._resv); + kfree(fbo); + return ret; + } + if (fbo->base.resource) { ttm_resource_set_bo(fbo->base.resource, &fbo->base); bo->resource = NULL; @@ -266,12 +273,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, fbo->base.bulk_move = NULL; } - ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1); - if (ret) { - kfree(fbo); - return ret; - } - ttm_bo_get(bo); fbo->bo = bo; @@ -382,6 +383,32 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, } /** + * ttm_bo_kmap_try_from_panic + * + * @bo: The buffer object + * @page: The page to map + * + * Sets up a kernel virtual mapping using kmap_local_page_try_from_panic(). + * This should only be called from the panic handler, if you make sure the bo + * is the one being displayed, so is properly allocated, and protected. + * + * Returns the vaddr, that you can use to write to the bo, and that you should + * pass to kunmap_local() when you're done with this page, or NULL if the bo + * is in iomem. + */ +void *ttm_bo_kmap_try_from_panic(struct ttm_buffer_object *bo, unsigned long page) +{ + if (page + 1 > PFN_UP(bo->resource->size)) + return NULL; + + if (!bo->resource->bus.is_iomem && bo->ttm->pages && bo->ttm->pages[page]) + return kmap_local_page_try_from_panic(bo->ttm->pages[page]); + + return NULL; +} +EXPORT_SYMBOL(ttm_bo_kmap_try_from_panic); + +/** * ttm_bo_kmap * * @bo: The buffer object. @@ -773,14 +800,15 @@ error_destroy_tt: return ret; } -static bool ttm_lru_walk_trylock(struct ttm_operation_ctx *ctx, - struct ttm_buffer_object *bo, - bool *needs_unlock) +static bool ttm_lru_walk_trylock(struct ttm_bo_lru_cursor *curs, + struct ttm_buffer_object *bo) { - *needs_unlock = false; + struct ttm_operation_ctx *ctx = curs->arg->ctx; + + curs->needs_unlock = false; if (dma_resv_trylock(bo->base.resv)) { - *needs_unlock = true; + curs->needs_unlock = true; return true; } @@ -792,27 +820,27 @@ static bool ttm_lru_walk_trylock(struct ttm_operation_ctx *ctx, return false; } -static int ttm_lru_walk_ticketlock(struct ttm_lru_walk *walk, - struct ttm_buffer_object *bo, - bool *needs_unlock) +static int ttm_lru_walk_ticketlock(struct ttm_bo_lru_cursor *curs, + struct ttm_buffer_object *bo) { + struct ttm_lru_walk_arg *arg = curs->arg; struct dma_resv *resv = bo->base.resv; int ret; - if (walk->ctx->interruptible) - ret = dma_resv_lock_interruptible(resv, walk->ticket); + if (arg->ctx->interruptible) + ret = dma_resv_lock_interruptible(resv, arg->ticket); else - ret = dma_resv_lock(resv, walk->ticket); + ret = dma_resv_lock(resv, arg->ticket); if (!ret) { - *needs_unlock = true; + curs->needs_unlock = true; /* * Only a single ticketlock per loop. Ticketlocks are prone * to return -EDEADLK causing the eviction to fail, so * after waiting for the ticketlock, revert back to * trylocking for this walk. */ - walk->ticket = NULL; + arg->ticket = NULL; } else if (ret == -EDEADLK) { /* Caller needs to exit the ww transaction. */ ret = -ENOSPC; @@ -821,12 +849,6 @@ static int ttm_lru_walk_ticketlock(struct ttm_lru_walk *walk, return ret; } -static void ttm_lru_walk_unlock(struct ttm_buffer_object *bo, bool locked) -{ - if (locked) - dma_resv_unlock(bo->base.resv); -} - /** * ttm_lru_walk_for_evict() - Perform a LRU list walk, with actions taken on * valid items. @@ -861,64 +883,21 @@ static void ttm_lru_walk_unlock(struct ttm_buffer_object *bo, bool locked) s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev, struct ttm_resource_manager *man, s64 target) { - struct ttm_resource_cursor cursor; - struct ttm_resource *res; + struct ttm_bo_lru_cursor cursor; + struct ttm_buffer_object *bo; s64 progress = 0; s64 lret; - spin_lock(&bdev->lru_lock); - ttm_resource_cursor_init(&cursor, man); - ttm_resource_manager_for_each_res(&cursor, res) { - struct ttm_buffer_object *bo = res->bo; - bool bo_needs_unlock = false; - bool bo_locked = false; - int mem_type; - - /* - * Attempt a trylock before taking a reference on the bo, - * since if we do it the other way around, and the trylock fails, - * we need to drop the lru lock to put the bo. - */ - if (ttm_lru_walk_trylock(walk->ctx, bo, &bo_needs_unlock)) - bo_locked = true; - else if (!walk->ticket || walk->ctx->no_wait_gpu || - walk->trylock_only) - continue; - - if (!ttm_bo_get_unless_zero(bo)) { - ttm_lru_walk_unlock(bo, bo_needs_unlock); - continue; - } - - mem_type = res->mem_type; - spin_unlock(&bdev->lru_lock); - - lret = 0; - if (!bo_locked) - lret = ttm_lru_walk_ticketlock(walk, bo, &bo_needs_unlock); - - /* - * Note that in between the release of the lru lock and the - * ticketlock, the bo may have switched resource, - * and also memory type, since the resource may have been - * freed and allocated again with a different memory type. - * In that case, just skip it. - */ - if (!lret && bo->resource && bo->resource->mem_type == mem_type) - lret = walk->ops->process_bo(walk, bo); - - ttm_lru_walk_unlock(bo, bo_needs_unlock); - ttm_bo_put(bo); + ttm_bo_lru_for_each_reserved_guarded(&cursor, man, &walk->arg, bo) { + lret = walk->ops->process_bo(walk, bo); if (lret == -EBUSY || lret == -EALREADY) lret = 0; progress = (lret < 0) ? lret : progress + lret; - - spin_lock(&bdev->lru_lock); if (progress < 0 || progress >= target) break; } - ttm_resource_cursor_fini(&cursor); - spin_unlock(&bdev->lru_lock); + if (IS_ERR(bo)) + return PTR_ERR(bo); return progress; } @@ -956,44 +935,87 @@ EXPORT_SYMBOL(ttm_bo_lru_cursor_fini); * ttm_bo_lru_cursor_init() - Initialize a struct ttm_bo_lru_cursor * @curs: The ttm_bo_lru_cursor to initialize. * @man: The ttm resource_manager whose LRU lists to iterate over. - * @ctx: The ttm_operation_ctx to govern the locking. + * @arg: The ttm_lru_walk_arg to govern the walk. * - * Initialize a struct ttm_bo_lru_cursor. Currently only trylocking - * or prelocked buffer objects are available as detailed by - * @ctx::resv and @ctx::allow_res_evict. Ticketlocking is not - * supported. + * Initialize a struct ttm_bo_lru_cursor. * * Return: Pointer to @curs. The function does not fail. */ struct ttm_bo_lru_cursor * ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs, struct ttm_resource_manager *man, - struct ttm_operation_ctx *ctx) + struct ttm_lru_walk_arg *arg) { memset(curs, 0, sizeof(*curs)); ttm_resource_cursor_init(&curs->res_curs, man); - curs->ctx = ctx; + curs->arg = arg; return curs; } EXPORT_SYMBOL(ttm_bo_lru_cursor_init); static struct ttm_buffer_object * -ttm_bo_from_res_reserved(struct ttm_resource *res, struct ttm_bo_lru_cursor *curs) +__ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs) { - struct ttm_buffer_object *bo = res->bo; + spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock; + struct ttm_resource *res = NULL; + struct ttm_buffer_object *bo; + struct ttm_lru_walk_arg *arg = curs->arg; + bool first = !curs->bo; - if (!ttm_lru_walk_trylock(curs->ctx, bo, &curs->needs_unlock)) - return NULL; + ttm_bo_lru_cursor_cleanup_bo(curs); - if (!ttm_bo_get_unless_zero(bo)) { - if (curs->needs_unlock) - dma_resv_unlock(bo->base.resv); - return NULL; + spin_lock(lru_lock); + for (;;) { + int mem_type, ret = 0; + bool bo_locked = false; + + if (first) { + res = ttm_resource_manager_first(&curs->res_curs); + first = false; + } else { + res = ttm_resource_manager_next(&curs->res_curs); + } + if (!res) + break; + + bo = res->bo; + if (ttm_lru_walk_trylock(curs, bo)) + bo_locked = true; + else if (!arg->ticket || arg->ctx->no_wait_gpu || arg->trylock_only) + continue; + + if (!ttm_bo_get_unless_zero(bo)) { + if (curs->needs_unlock) + dma_resv_unlock(bo->base.resv); + continue; + } + + mem_type = res->mem_type; + spin_unlock(lru_lock); + if (!bo_locked) + ret = ttm_lru_walk_ticketlock(curs, bo); + + /* + * Note that in between the release of the lru lock and the + * ticketlock, the bo may have switched resource, + * and also memory type, since the resource may have been + * freed and allocated again with a different memory type. + * In that case, just skip it. + */ + curs->bo = bo; + if (!ret && bo->resource && bo->resource->mem_type == mem_type) + return bo; + + ttm_bo_lru_cursor_cleanup_bo(curs); + if (ret && ret != -EALREADY) + return ERR_PTR(ret); + + spin_lock(lru_lock); } - curs->bo = bo; - return bo; + spin_unlock(lru_lock); + return res ? bo : NULL; } /** @@ -1007,25 +1029,7 @@ ttm_bo_from_res_reserved(struct ttm_resource *res, struct ttm_bo_lru_cursor *cur */ struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs) { - spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock; - struct ttm_resource *res = NULL; - struct ttm_buffer_object *bo; - - ttm_bo_lru_cursor_cleanup_bo(curs); - - spin_lock(lru_lock); - for (;;) { - res = ttm_resource_manager_next(&curs->res_curs); - if (!res) - break; - - bo = ttm_bo_from_res_reserved(res, curs); - if (bo) - break; - } - - spin_unlock(lru_lock); - return res ? bo : NULL; + return __ttm_bo_lru_cursor_next(curs); } EXPORT_SYMBOL(ttm_bo_lru_cursor_next); @@ -1039,21 +1043,8 @@ EXPORT_SYMBOL(ttm_bo_lru_cursor_next); */ struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs) { - spinlock_t *lru_lock = &curs->res_curs.man->bdev->lru_lock; - struct ttm_buffer_object *bo; - struct ttm_resource *res; - - spin_lock(lru_lock); - res = ttm_resource_manager_first(&curs->res_curs); - if (!res) { - spin_unlock(lru_lock); - return NULL; - } - - bo = ttm_bo_from_res_reserved(res, curs); - spin_unlock(lru_lock); - - return bo ? bo : ttm_bo_lru_cursor_next(curs); + ttm_bo_lru_cursor_cleanup_bo(curs); + return __ttm_bo_lru_cursor_next(curs); } EXPORT_SYMBOL(ttm_bo_lru_cursor_first); diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c index 816e2cba6016..c3e2fcbdd2cc 100644 --- a/drivers/gpu/drm/ttm/ttm_device.c +++ b/drivers/gpu/drm/ttm/ttm_device.c @@ -125,6 +125,28 @@ out: return ret; } +/** + * ttm_device_prepare_hibernation - move GTT BOs to shmem for hibernation. + * + * @bdev: A pointer to a struct ttm_device to prepare hibernation for. + * + * Return: 0 on success, negative number on failure. + */ +int ttm_device_prepare_hibernation(struct ttm_device *bdev) +{ + struct ttm_operation_ctx ctx = { + .interruptible = false, + .no_wait_gpu = false, + }; + int ret; + + do { + ret = ttm_device_swapout(bdev, &ctx, GFP_KERNEL); + } while (ret > 0); + return ret; +} +EXPORT_SYMBOL(ttm_device_prepare_hibernation); + /* * A buffer object shrink method that tries to swap out the first * buffer object on the global::swap_lru list. diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index ce5ae7cacb90..1922988625eb 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -57,7 +57,7 @@ static const struct drm_driver driver = { /* GEM hooks */ .fops = &udl_driver_fops, - DRM_GEM_SHMEM_DRIVER_OPS_NO_MAP_SGT, + DRM_GEM_SHMEM_DRIVER_OPS, DRM_FBDEV_SHMEM_DRIVER_OPS, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c index bb7815599435..c41476ddde68 100644 --- a/drivers/gpu/drm/v3d/v3d_bo.c +++ b/drivers/gpu/drm/v3d/v3d_bo.c @@ -16,7 +16,6 @@ */ #include <linux/dma-buf.h> -#include <linux/pfn_t.h> #include <linux/vmalloc.h> #include "v3d_drv.h" diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index b51f0b648a08..411e47702f8a 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -101,6 +101,12 @@ enum v3d_gen { V3D_GEN_71 = 71, }; +enum v3d_irq { + V3D_CORE_IRQ, + V3D_HUB_IRQ, + V3D_MAX_IRQS, +}; + struct v3d_dev { struct drm_device drm; @@ -112,6 +118,8 @@ struct v3d_dev { bool single_irq_line; + int irq[V3D_MAX_IRQS]; + struct v3d_perfmon_info perfmon_info; void __iomem *hub_regs; diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index d7d16da78db3..37bf5eecdd2c 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -134,6 +134,8 @@ v3d_reset(struct v3d_dev *v3d) if (false) v3d_idle_axi(v3d, 0); + v3d_irq_disable(v3d); + v3d_idle_gca(v3d); v3d_reset_sms(v3d); v3d_reset_v3d(v3d); diff --git a/drivers/gpu/drm/v3d/v3d_gemfs.c b/drivers/gpu/drm/v3d/v3d_gemfs.c index 4c5e18590a5c..8ec6ed82b3d9 100644 --- a/drivers/gpu/drm/v3d/v3d_gemfs.c +++ b/drivers/gpu/drm/v3d/v3d_gemfs.c @@ -3,14 +3,21 @@ #include <linux/fs.h> #include <linux/mount.h> +#include <linux/fs_context.h> #include "v3d_drv.h" +static int add_param(struct fs_context *fc, const char *key, const char *val) +{ + return vfs_parse_fs_string(fc, key, val, strlen(val)); +} + void v3d_gemfs_init(struct v3d_dev *v3d) { - char huge_opt[] = "huge=within_size"; struct file_system_type *type; + struct fs_context *fc; struct vfsmount *gemfs; + int ret; /* * By creating our own shmemfs mountpoint, we can pass in @@ -28,8 +35,16 @@ void v3d_gemfs_init(struct v3d_dev *v3d) if (!type) goto err; - gemfs = vfs_kern_mount(type, SB_KERNMOUNT, type->name, huge_opt); - if (IS_ERR(gemfs)) + fc = fs_context_for_mount(type, SB_KERNMOUNT); + if (IS_ERR(fc)) + goto err; + ret = add_param(fc, "source", "tmpfs"); + if (!ret) + ret = add_param(fc, "huge", "within_size"); + if (!ret) + gemfs = fc_mount_longterm(fc); + put_fs_context(fc); + if (ret) goto err; v3d->gemfs = gemfs; diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c index 2cca5d3a26a2..a515a301e480 100644 --- a/drivers/gpu/drm/v3d/v3d_irq.c +++ b/drivers/gpu/drm/v3d/v3d_irq.c @@ -260,7 +260,7 @@ v3d_hub_irq(int irq, void *arg) int v3d_irq_init(struct v3d_dev *v3d) { - int irq1, ret, core; + int irq, ret, core; INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work); @@ -271,17 +271,24 @@ v3d_irq_init(struct v3d_dev *v3d) V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS(v3d->ver)); V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS(v3d->ver)); - irq1 = platform_get_irq_optional(v3d_to_pdev(v3d), 1); - if (irq1 == -EPROBE_DEFER) - return irq1; - if (irq1 > 0) { - ret = devm_request_irq(v3d->drm.dev, irq1, + irq = platform_get_irq_optional(v3d_to_pdev(v3d), 1); + if (irq == -EPROBE_DEFER) + return irq; + if (irq > 0) { + v3d->irq[V3D_CORE_IRQ] = irq; + + ret = devm_request_irq(v3d->drm.dev, v3d->irq[V3D_CORE_IRQ], v3d_irq, IRQF_SHARED, "v3d_core0", v3d); if (ret) goto fail; - ret = devm_request_irq(v3d->drm.dev, - platform_get_irq(v3d_to_pdev(v3d), 0), + + irq = platform_get_irq(v3d_to_pdev(v3d), 0); + if (irq < 0) + return irq; + v3d->irq[V3D_HUB_IRQ] = irq; + + ret = devm_request_irq(v3d->drm.dev, v3d->irq[V3D_HUB_IRQ], v3d_hub_irq, IRQF_SHARED, "v3d_hub", v3d); if (ret) @@ -289,8 +296,12 @@ v3d_irq_init(struct v3d_dev *v3d) } else { v3d->single_irq_line = true; - ret = devm_request_irq(v3d->drm.dev, - platform_get_irq(v3d_to_pdev(v3d), 0), + irq = platform_get_irq(v3d_to_pdev(v3d), 0); + if (irq < 0) + return irq; + v3d->irq[V3D_CORE_IRQ] = irq; + + ret = devm_request_irq(v3d->drm.dev, v3d->irq[V3D_CORE_IRQ], v3d_irq, IRQF_SHARED, "v3d", v3d); if (ret) @@ -331,6 +342,12 @@ v3d_irq_disable(struct v3d_dev *v3d) V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~0); V3D_WRITE(V3D_HUB_INT_MSK_SET, ~0); + /* Finish any interrupt handler still in flight. */ + for (int i = 0; i < V3D_MAX_IRQS; i++) { + if (v3d->irq[i]) + synchronize_irq(v3d->irq[i]); + } + /* Clear any pending interrupts we might have left. */ for (core = 0; core < v3d->cores; core++) V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS(v3d->ver)); diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index 35f131a46d07..cb9df8822472 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -199,7 +199,6 @@ v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue) struct v3d_dev *v3d = job->v3d; struct v3d_file_priv *file = job->file->driver_priv; struct v3d_stats *global_stats = &v3d->queue[queue].stats; - struct v3d_stats *local_stats = &file->stats[queue]; u64 now = local_clock(); unsigned long flags; @@ -209,7 +208,12 @@ v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue) else preempt_disable(); - v3d_stats_update(local_stats, now); + /* Don't update the local stats if the file context has already closed */ + if (file) + v3d_stats_update(&file->stats[queue], now); + else + drm_dbg(&v3d->drm, "The file descriptor was closed before job completion\n"); + v3d_stats_update(global_stats, now); if (IS_ENABLED(CONFIG_LOCKDEP)) @@ -741,17 +745,7 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) mutex_unlock(&v3d->reset_lock); - return DRM_GPU_SCHED_STAT_NOMINAL; -} - -static void -v3d_sched_skip_reset(struct drm_sched_job *sched_job) -{ - struct drm_gpu_scheduler *sched = sched_job->sched; - - spin_lock(&sched->job_list_lock); - list_add(&sched_job->list, &sched->pending_list); - spin_unlock(&sched->job_list_lock); + return DRM_GPU_SCHED_STAT_RESET; } static enum drm_gpu_sched_stat @@ -772,8 +766,7 @@ v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q, *timedout_ctca = ctca; *timedout_ctra = ctra; - v3d_sched_skip_reset(sched_job); - return DRM_GPU_SCHED_STAT_NOMINAL; + return DRM_GPU_SCHED_STAT_NO_HANG; } return v3d_gpu_reset_for_timeout(v3d, sched_job); @@ -818,8 +811,7 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job) if (job->timedout_batches != batches) { job->timedout_batches = batches; - v3d_sched_skip_reset(sched_job); - return DRM_GPU_SCHED_STAT_NOMINAL; + return DRM_GPU_SCHED_STAT_NO_HANG; } return v3d_gpu_reset_for_timeout(v3d, sched_job); diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index f5b167417428..8f983edb81ff 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -530,6 +530,7 @@ static int vc4_atomic_commit_setup(struct drm_atomic_state *state) static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { struct vc4_dev *vc4 = to_vc4_dev(dev); @@ -568,7 +569,7 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev, mode_cmd = &mode_cmd_local; } - return drm_gem_fb_create(dev, file_priv, mode_cmd); + return drm_gem_fb_create(dev, file_priv, info, mode_cmd); } /* Our CTM has some peculiar limitations: we can only enable it for one CRTC diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index 2752ab4f1c97..260c64733972 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -32,7 +32,7 @@ #include <linux/dma-buf.h> #include <linux/module.h> -#include <linux/platform_device.h> +#include <linux/device/faux.h> #include <linux/shmem_fs.h> #include <linux/vmalloc.h> @@ -52,7 +52,7 @@ static struct vgem_device { struct drm_device drm; - struct platform_device *platform; + struct faux_device *faux_dev; } *vgem_device; static int vgem_open(struct drm_device *dev, struct drm_file *file) @@ -127,27 +127,27 @@ static const struct drm_driver vgem_driver = { static int __init vgem_init(void) { int ret; - struct platform_device *pdev; + struct faux_device *fdev; - pdev = platform_device_register_simple("vgem", -1, NULL, 0); - if (IS_ERR(pdev)) - return PTR_ERR(pdev); + fdev = faux_device_create("vgem", NULL, NULL); + if (!fdev) + return -ENODEV; - if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) { + if (!devres_open_group(&fdev->dev, NULL, GFP_KERNEL)) { ret = -ENOMEM; goto out_unregister; } - dma_coerce_mask_and_coherent(&pdev->dev, + dma_coerce_mask_and_coherent(&fdev->dev, DMA_BIT_MASK(64)); - vgem_device = devm_drm_dev_alloc(&pdev->dev, &vgem_driver, + vgem_device = devm_drm_dev_alloc(&fdev->dev, &vgem_driver, struct vgem_device, drm); if (IS_ERR(vgem_device)) { ret = PTR_ERR(vgem_device); goto out_devres; } - vgem_device->platform = pdev; + vgem_device->faux_dev = fdev; /* Final step: expose the device/driver to userspace */ ret = drm_dev_register(&vgem_device->drm, 0); @@ -157,19 +157,19 @@ static int __init vgem_init(void) return 0; out_devres: - devres_release_group(&pdev->dev, NULL); + devres_release_group(&fdev->dev, NULL); out_unregister: - platform_device_unregister(pdev); + faux_device_destroy(fdev); return ret; } static void __exit vgem_exit(void) { - struct platform_device *pdev = vgem_device->platform; + struct faux_device *fdev = vgem_device->faux_dev; drm_dev_unregister(&vgem_device->drm); - devres_release_group(&pdev->dev, NULL); - platform_device_unregister(pdev); + devres_release_group(&fdev->dev, NULL); + faux_device_destroy(fdev); } module_init(vgem_init); diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c index 59a45e74a641..e5805ca646c7 100644 --- a/drivers/gpu/drm/virtio/virtgpu_display.c +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -66,6 +66,7 @@ static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = { static int virtio_gpu_framebuffer_init(struct drm_device *dev, struct virtio_gpu_framebuffer *vgfb, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object *obj) { @@ -73,7 +74,7 @@ virtio_gpu_framebuffer_init(struct drm_device *dev, vgfb->base.obj[0] = obj; - drm_helper_mode_fill_fb_struct(dev, &vgfb->base, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, &vgfb->base, info, mode_cmd); ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs); if (ret) { @@ -293,6 +294,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index) static struct drm_framebuffer * virtio_gpu_user_framebuffer_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { struct drm_gem_object *obj = NULL; @@ -314,7 +316,7 @@ virtio_gpu_user_framebuffer_create(struct drm_device *dev, return ERR_PTR(-ENOMEM); } - ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj); + ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, info, mode_cmd, obj); if (ret) { kfree(virtio_gpu_fb); drm_gem_object_put(obj); diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index e32e680c7197..71c6ccad4b99 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -130,10 +130,10 @@ static void virtio_gpu_remove(struct virtio_device *vdev) static void virtio_gpu_shutdown(struct virtio_device *vdev) { - /* - * drm does its own synchronization on shutdown. - * Do nothing here, opt out of device reset. - */ + struct drm_device *dev = vdev->priv; + + /* stop talking to the device */ + drm_dev_unplug(dev); } static void virtio_gpu_config_changed(struct virtio_device *vdev) diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index f7def8b42068..f17660a71a3e 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -88,7 +88,6 @@ struct virtio_gpu_object_params { struct virtio_gpu_object { struct drm_gem_shmem_object base; - struct dma_buf *dma_buf; struct sg_table *sgt; uint32_t hw_res_handle; bool dumb; diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c index 722cde5e2d86..ce49282198cb 100644 --- a/drivers/gpu/drm/virtio/virtgpu_prime.c +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c @@ -204,15 +204,16 @@ static void virtgpu_dma_buf_free_obj(struct drm_gem_object *obj) { struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); struct virtio_gpu_device *vgdev = obj->dev->dev_private; + struct dma_buf_attachment *attach = obj->import_attach; if (drm_gem_is_imported(obj)) { - struct dma_buf *dmabuf = bo->dma_buf; + struct dma_buf *dmabuf = attach->dmabuf; dma_resv_lock(dmabuf->resv, NULL); virtgpu_dma_buf_unmap(bo); dma_resv_unlock(dmabuf->resv); - dma_buf_detach(dmabuf, obj->import_attach); + dma_buf_detach(dmabuf, attach); dma_buf_put(dmabuf); } @@ -332,7 +333,6 @@ struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev, obj->import_attach = attach; get_dma_buf(buf); - bo->dma_buf = buf; ret = virtgpu_dma_buf_init_obj(dev, bo, attach); if (ret < 0) diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c index 8c9898b9055d..e60573e0f3e9 100644 --- a/drivers/gpu/drm/vkms/vkms_crtc.c +++ b/drivers/gpu/drm/vkms/vkms_crtc.c @@ -302,8 +302,6 @@ struct vkms_output *vkms_crtc_init(struct drm_device *dev, struct drm_plane *pri vkms_out->composer_workq = drmm_alloc_ordered_workqueue(dev, "vkms_composer", 0); if (IS_ERR(vkms_out->composer_workq)) return ERR_CAST(vkms_out->composer_workq); - if (!vkms_out->composer_workq) - return ERR_PTR(-ENOMEM); return vkms_out; } diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index a24d1655f7b8..e8472d9b6e3b 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -10,7 +10,7 @@ */ #include <linux/module.h> -#include <linux/platform_device.h> +#include <linux/device/faux.h> #include <linux/dma-mapping.h> #include <drm/clients/drm_client_setup.h> @@ -149,27 +149,27 @@ static int vkms_modeset_init(struct vkms_device *vkmsdev) static int vkms_create(struct vkms_config *config) { int ret; - struct platform_device *pdev; + struct faux_device *fdev; struct vkms_device *vkms_device; const char *dev_name; dev_name = vkms_config_get_device_name(config); - pdev = platform_device_register_simple(dev_name, -1, NULL, 0); - if (IS_ERR(pdev)) - return PTR_ERR(pdev); + fdev = faux_device_create(dev_name, NULL, NULL); + if (!fdev) + return -ENODEV; - if (!devres_open_group(&pdev->dev, NULL, GFP_KERNEL)) { + if (!devres_open_group(&fdev->dev, NULL, GFP_KERNEL)) { ret = -ENOMEM; goto out_unregister; } - vkms_device = devm_drm_dev_alloc(&pdev->dev, &vkms_driver, + vkms_device = devm_drm_dev_alloc(&fdev->dev, &vkms_driver, struct vkms_device, drm); if (IS_ERR(vkms_device)) { ret = PTR_ERR(vkms_device); goto out_devres; } - vkms_device->platform = pdev; + vkms_device->faux_dev = fdev; vkms_device->config = config; config->dev = vkms_device; @@ -203,9 +203,9 @@ static int vkms_create(struct vkms_config *config) return 0; out_devres: - devres_release_group(&pdev->dev, NULL); + devres_release_group(&fdev->dev, NULL); out_unregister: - platform_device_unregister(pdev); + faux_device_destroy(fdev); return ret; } @@ -231,19 +231,19 @@ static int __init vkms_init(void) static void vkms_destroy(struct vkms_config *config) { - struct platform_device *pdev; + struct faux_device *fdev; if (!config->dev) { DRM_INFO("vkms_device is NULL.\n"); return; } - pdev = config->dev->platform; + fdev = config->dev->faux_dev; drm_dev_unregister(&config->dev->drm); drm_atomic_helper_shutdown(&config->dev->drm); - devres_release_group(&pdev->dev, NULL); - platform_device_unregister(pdev); + devres_release_group(&fdev->dev, NULL); + faux_device_destroy(fdev); config->dev = NULL; } diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index 68b5e3fce455..8013c31efe3b 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -232,13 +232,13 @@ struct vkms_config; * struct vkms_device - Description of a VKMS device * * @drm - Base device in DRM - * @platform - Associated platform device + * @faux_dev - Associated faux device * @output - Configuration and sub-components of the VKMS device * @config: Configuration used in this VKMS device */ struct vkms_device { struct drm_device drm; - struct platform_device *platform; + struct faux_device *faux_dev; const struct vkms_config *config; }; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index bc0342c58b4b..8ff958d119be 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -750,7 +750,7 @@ static int vmw_setup_pci_resources(struct vmw_private *dev, dev->fifo_mem = devm_memremap(dev->drm.dev, fifo_start, fifo_size, - MEMREMAP_WB); + MEMREMAP_WB | MEMREMAP_DEC); if (IS_ERR(dev->fifo_mem)) { drm_err(&dev->drm, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c index c55382167c1b..eedf1fe60be7 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c @@ -85,10 +85,10 @@ static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) int ret; if (drm_gem_is_imported(obj)) { - ret = dma_buf_vmap(obj->dma_buf, map); + ret = dma_buf_vmap(obj->import_attach->dmabuf, map); if (!ret) { if (drm_WARN_ON(obj->dev, map->is_iomem)) { - dma_buf_vunmap(obj->dma_buf, map); + dma_buf_vunmap(obj->import_attach->dmabuf, map); return -EIO; } } @@ -102,7 +102,7 @@ static int vmw_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) static void vmw_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map) { if (drm_gem_is_imported(obj)) - dma_buf_vunmap(obj->dma_buf, map); + dma_buf_vunmap(obj->import_attach->dmabuf, map); else drm_gem_ttm_vunmap(obj, map); } @@ -284,11 +284,10 @@ static void vmw_bo_print_info(int id, struct vmw_bo *bo, struct seq_file *m) seq_printf(m, "\t\t0x%08x: %12zu bytes %s, type = %s", id, bo->tbo.base.size, placement, type); - seq_printf(m, ", priority = %u, pin_count = %u, GEM refs = %d, TTM refs = %d", + seq_printf(m, ", priority = %u, pin_count = %u, GEM refs = %d", bo->tbo.priority, bo->tbo.pin_count, - kref_read(&bo->tbo.base.refcount), - kref_read(&bo->tbo.kref)); + kref_read(&bo->tbo.base.refcount)); seq_puts(m, "\n"); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 05b1c54a070c..54ea1b513950 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -500,6 +500,7 @@ static const struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = { static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, struct vmw_user_object *uo, struct vmw_framebuffer **out, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) @@ -548,7 +549,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, goto out_err1; } - drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, info, mode_cmd); memcpy(&vfbs->uo, uo, sizeof(vfbs->uo)); vmw_user_object_ref(&vfbs->uo); @@ -602,6 +603,7 @@ static const struct drm_framebuffer_funcs vmw_framebuffer_bo_funcs = { static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv, struct vmw_bo *bo, struct vmw_framebuffer **out, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) @@ -634,7 +636,7 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv, } vfbd->base.base.obj[0] = &bo->tbo.base; - drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, info, mode_cmd); vfbd->base.bo = true; vfbd->buffer = vmw_bo_reference(bo); *out = &vfbd->base; @@ -679,11 +681,13 @@ vmw_kms_srf_ok(struct vmw_private *dev_priv, uint32_t width, uint32_t height) * @dev_priv: Pointer to device private struct. * @uo: Pointer to user object to wrap the kms framebuffer around. * Either the buffer or surface inside the user object must be NULL. + * @info: pixel format information. * @mode_cmd: Frame-buffer metadata. */ struct vmw_framebuffer * vmw_kms_new_framebuffer(struct vmw_private *dev_priv, struct vmw_user_object *uo, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { struct vmw_framebuffer *vfb = NULL; @@ -692,10 +696,10 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv, /* Create the new framebuffer depending one what we have */ if (vmw_user_object_surface(uo)) { ret = vmw_kms_new_framebuffer_surface(dev_priv, uo, &vfb, - mode_cmd); + info, mode_cmd); } else if (uo->buffer) { ret = vmw_kms_new_framebuffer_bo(dev_priv, uo->buffer, &vfb, - mode_cmd); + info, mode_cmd); } else { BUG(); } @@ -712,6 +716,7 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv, static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { struct vmw_private *dev_priv = vmw_priv(dev); @@ -741,7 +746,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, } - vfb = vmw_kms_new_framebuffer(dev_priv, &uo, mode_cmd); + vfb = vmw_kms_new_framebuffer(dev_priv, &uo, info, mode_cmd); if (IS_ERR(vfb)) { ret = PTR_ERR(vfb); goto err_out; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index 511e29cdb987..445471fe9be6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -399,6 +399,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv, struct vmw_framebuffer * vmw_kms_new_framebuffer(struct vmw_private *dev_priv, struct vmw_user_object *uo, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd); void vmw_guess_mode_timing(struct drm_display_mode *mode); void vmw_kms_update_implicit_fb(struct vmw_private *dev_priv); diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig index 30ed74ad29ab..714d5702dfd7 100644 --- a/drivers/gpu/drm/xe/Kconfig +++ b/drivers/gpu/drm/xe/Kconfig @@ -1,9 +1,11 @@ # SPDX-License-Identifier: GPL-2.0-only config DRM_XE tristate "Intel Xe2 Graphics" - depends on DRM && PCI && (m || (y && KUNIT=y)) + depends on DRM && PCI + depends on KUNIT || !KUNIT depends on INTEL_VSEC || !INTEL_VSEC depends on X86_PLATFORM_DEVICES || !(X86 && ACPI) + depends on PAGE_SIZE_4KB || COMPILE_TEST || BROKEN select INTERVAL_TREE # we need shmfs for the swappable backing store, and in particular # the shmem_readpage() which depends upon tmpfs @@ -44,6 +46,7 @@ config DRM_XE select WANT_DEV_COREDUMP select AUXILIARY_BUS select HMM_MIRROR + select REGMAP if I2C help Driver for Intel Xe2 series GPUs and later. Experimental support for Xe series is also available. @@ -85,16 +88,18 @@ config DRM_XE_GPUSVM Enable this option if you want support for CPU to GPU address mirroring. - If in doubut say "Y". + If in doubt say "Y". -config DRM_XE_DEVMEM_MIRROR - bool "Enable device memory mirror" +config DRM_XE_PAGEMAP + bool "Enable device memory pool for SVM" depends on DRM_XE_GPUSVM select GET_FREE_REGION default y help - Disable this option only if you want to compile out without device - memory mirror. Will reduce KMD memory footprint when disabled. + Disable this option only if you don't want to expose local device + memory for SVM. Will reduce KMD memory footprint when disabled. + + If in doubt say "Y". config DRM_XE_FORCE_PROBE string "Force probe xe for selected Intel hardware IDs" diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile index f5f5775acdc0..07c71a29963d 100644 --- a/drivers/gpu/drm/xe/Makefile +++ b/drivers/gpu/drm/xe/Makefile @@ -21,6 +21,13 @@ $(obj)/generated/%_wa_oob.c $(obj)/generated/%_wa_oob.h: $(obj)/xe_gen_wa_oob \ $(src)/xe_wa_oob.rules $(call cmd,wa_oob) +generated_device_oob := $(obj)/generated/xe_device_wa_oob.c $(obj)/generated/xe_device_wa_oob.h +quiet_cmd_device_wa_oob = GEN $(notdir $(generated_device_oob)) + cmd_device_wa_oob = mkdir -p $(@D); $^ $(generated_device_oob) +$(obj)/generated/%_device_wa_oob.c $(obj)/generated/%_device_wa_oob.h: $(obj)/xe_gen_wa_oob \ + $(src)/xe_device_wa_oob.rules + $(call cmd,device_wa_oob) + # Please keep these build lists sorted! # core driver code @@ -80,6 +87,7 @@ xe-y += xe_bb.o \ xe_mmio.o \ xe_mocs.o \ xe_module.o \ + xe_nvm.o \ xe_oa.o \ xe_observation.o \ xe_pat.o \ @@ -124,6 +132,7 @@ xe-y += xe_bb.o \ xe_wait_user_fence.o \ xe_wopcm.o +xe-$(CONFIG_I2C) += xe_i2c.o xe-$(CONFIG_HMM_MIRROR) += xe_hmm.o xe-$(CONFIG_DRM_XE_GPUSVM) += xe_svm.o @@ -154,7 +163,8 @@ xe-$(CONFIG_PCI_IOV) += \ xe_lmtt_2l.o \ xe_lmtt_ml.o \ xe_pci_sriov.o \ - xe_sriov_pf.o + xe_sriov_pf.o \ + xe_sriov_pf_service.o # include helpers for tests even when XE is built-in ifdef CONFIG_DRM_XE_KUNIT_TEST @@ -205,7 +215,6 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \ i915-display/icl_dsi.o \ i915-display/intel_alpm.o \ i915-display/intel_atomic.o \ - i915-display/intel_atomic_plane.o \ i915-display/intel_audio.o \ i915-display/intel_backlight.o \ i915-display/intel_bios.o \ @@ -255,6 +264,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \ i915-display/intel_fbc.o \ i915-display/intel_fdi.o \ i915-display/intel_fifo_underrun.o \ + i915-display/intel_flipq.o \ i915-display/intel_frontbuffer.o \ i915-display/intel_global_state.o \ i915-display/intel_gmbus.o \ @@ -271,6 +281,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \ i915-display/intel_modeset_verify.o \ i915-display/intel_panel.o \ i915-display/intel_pfit.o \ + i915-display/intel_plane.o \ i915-display/intel_pmdemand.o \ i915-display/intel_pch.o \ i915-display/intel_pps.o \ @@ -338,4 +349,4 @@ $(obj)/%.hdrtest: $(src)/%.h FORCE $(call if_changed_dep,hdrtest) uses_generated_oob := $(addprefix $(obj)/, $(xe-y)) -$(uses_generated_oob): $(obj)/generated/xe_wa_oob.h +$(uses_generated_oob): $(obj)/generated/xe_wa_oob.h $(obj)/generated/xe_device_wa_oob.h diff --git a/drivers/gpu/drm/xe/abi/guc_actions_abi.h b/drivers/gpu/drm/xe/abi/guc_actions_abi.h index ff4f412c28d8..81eb046aeebf 100644 --- a/drivers/gpu/drm/xe/abi/guc_actions_abi.h +++ b/drivers/gpu/drm/xe/abi/guc_actions_abi.h @@ -142,6 +142,7 @@ enum xe_guc_action { XE_GUC_ACTION_SET_ENG_UTIL_BUFF = 0x550A, XE_GUC_ACTION_SET_DEVICE_ENGINE_ACTIVITY_BUFFER = 0x550C, XE_GUC_ACTION_SET_FUNCTION_ENGINE_ACTIVITY_BUFFER = 0x550D, + XE_GUC_ACTION_OPT_IN_FEATURE_KLV = 0x550E, XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR = 0x6000, XE_GUC_ACTION_REPORT_PAGE_FAULT_REQ_DESC = 0x6002, XE_GUC_ACTION_PAGE_FAULT_RES_DESC = 0x6003, @@ -271,4 +272,7 @@ enum xe_guc_g2g_type { #define XE_G2G_DEREGISTER_TILE REG_GENMASK(15, 12) #define XE_G2G_DEREGISTER_TYPE REG_GENMASK(11, 8) +/* invalid type for XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR */ +#define XE_GUC_CAT_ERR_TYPE_INVALID 0xdeadbeef + #endif diff --git a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h index 7de8f827281f..0366a9da5977 100644 --- a/drivers/gpu/drm/xe/abi/guc_klvs_abi.h +++ b/drivers/gpu/drm/xe/abi/guc_klvs_abi.h @@ -16,6 +16,7 @@ * +===+=======+==============================================================+ * | 0 | 31:16 | **KEY** - KLV key identifier | * | | | - `GuC Self Config KLVs`_ | + * | | | - `GuC Opt In Feature KLVs`_ | * | | | - `GuC VGT Policy KLVs`_ | * | | | - `GuC VF Configuration KLVs`_ | * | | | | @@ -125,6 +126,33 @@ enum { }; /** + * DOC: GuC Opt In Feature KLVs + * + * `GuC KLV`_ keys available for use with OPT_IN_FEATURE_KLV + * + * _`GUC_KLV_OPT_IN_FEATURE_EXT_CAT_ERR_TYPE` : 0x4001 + * Adds an extra dword to the XE_GUC_ACTION_NOTIFY_MEMORY_CAT_ERROR G2H + * containing the type of the CAT error. On HW that does not support + * reporting the CAT error type, the extra dword is set to 0xdeadbeef. + * + * _`GUC_KLV_OPT_IN_FEATURE_DYNAMIC_INHIBIT_CONTEXT_SWITCH` : 0x4003 + * This KLV enables the Dynamic Inhibit Context Switch optimization, which + * consists in the GuC setting the CTX_CTRL_INHIBIT_SYN_CTX_SWITCH bit to + * zero in the CTX_CONTEXT_CONTROL register of LRCs that are submitted + * to an oversubscribed engine. This will cause those contexts to be + * switched out immediately if they hit an unsatisfied semaphore wait + * (instead of waiting the full timeslice duration). The bit is instead set + * to one if a single context is queued on the engine, to avoid it being + * switched out if there isn't another context that can run in its place. + */ + +#define GUC_KLV_OPT_IN_FEATURE_EXT_CAT_ERR_TYPE_KEY 0x4001 +#define GUC_KLV_OPT_IN_FEATURE_EXT_CAT_ERR_TYPE_LEN 0u + +#define GUC_KLV_OPT_IN_FEATURE_DYNAMIC_INHIBIT_CONTEXT_SWITCH_KEY 0x4003 +#define GUC_KLV_OPT_IN_FEATURE_DYNAMIC_INHIBIT_CONTEXT_SWITCH_LEN 0u + +/** * DOC: GuC VGT Policy KLVs * * `GuC KLV`_ keys available for use with PF2GUC_UPDATE_VGT_POLICY. diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h index a473aa6697d0..4fcd3bf6b76f 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h @@ -6,37 +6,6 @@ #ifndef __INTEL_PCODE_H__ #define __INTEL_PCODE_H__ -#include "intel_uncore.h" #include "xe_pcode.h" -static inline int -snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val, - int fast_timeout_us, int slow_timeout_ms) -{ - return xe_pcode_write_timeout(__compat_uncore_to_tile(uncore), mbox, val, - slow_timeout_ms ?: 1); -} - -static inline int -snb_pcode_write(struct intel_uncore *uncore, u32 mbox, u32 val) -{ - - return xe_pcode_write(__compat_uncore_to_tile(uncore), mbox, val); -} - -static inline int -snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1) -{ - return xe_pcode_read(__compat_uncore_to_tile(uncore), mbox, val, val1); -} - -static inline int -skl_pcode_request(struct intel_uncore *uncore, u32 mbox, - u32 request, u32 reply_mask, u32 reply, - int timeout_base_ms) -{ - return xe_pcode_request(__compat_uncore_to_tile(uncore), mbox, request, reply_mask, reply, - timeout_base_ms); -} - #endif /* __INTEL_PCODE_H__ */ diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h index 797091cf1c99..d012f02bc84f 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h @@ -24,13 +24,6 @@ static inline struct xe_mmio *__compat_uncore_to_mmio(struct intel_uncore *uncor return xe_root_tile_mmio(xe); } -static inline struct xe_tile *__compat_uncore_to_tile(struct intel_uncore *uncore) -{ - struct xe_device *xe = container_of(uncore, struct xe_device, uncore); - - return xe_device_get_root_tile(xe); -} - static inline u32 intel_uncore_read(struct intel_uncore *uncore, i915_reg_t i915_reg) { diff --git a/drivers/gpu/drm/xe/display/intel_bo.c b/drivers/gpu/drm/xe/display/intel_bo.c index 27437c22bd70..910632f57c3d 100644 --- a/drivers/gpu/drm/xe/display/intel_bo.c +++ b/drivers/gpu/drm/xe/display/intel_bo.c @@ -1,7 +1,12 @@ // SPDX-License-Identifier: MIT /* Copyright © 2024 Intel Corporation */ +#include <drm/drm_cache.h> #include <drm/drm_gem.h> +#include <drm/drm_panic.h> + +#include "intel_fb.h" +#include "intel_display_types.h" #include "xe_bo.h" #include "intel_bo.h" @@ -59,3 +64,89 @@ void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj) { /* FIXME */ } + +struct xe_panic_data { + struct page **pages; + int page; + void *vaddr; +}; + +struct xe_framebuffer { + struct intel_framebuffer base; + struct xe_panic_data panic; +}; + +static inline struct xe_panic_data *to_xe_panic_data(struct intel_framebuffer *fb) +{ + return &container_of_const(fb, struct xe_framebuffer, base)->panic; +} + +static void xe_panic_kunmap(struct xe_panic_data *panic) +{ + if (panic->vaddr) { + drm_clflush_virt_range(panic->vaddr, PAGE_SIZE); + kunmap_local(panic->vaddr); + panic->vaddr = NULL; + } +} + +/* + * The scanout buffer pages are not mapped, so for each pixel, + * use kmap_local_page_try_from_panic() to map the page, and write the pixel. + * Try to keep the map from the previous pixel, to avoid too much map/unmap. + */ +static void xe_panic_page_set_pixel(struct drm_scanout_buffer *sb, unsigned int x, + unsigned int y, u32 color) +{ + struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private; + struct xe_panic_data *panic = to_xe_panic_data(fb); + struct xe_bo *bo = gem_to_xe_bo(intel_fb_bo(&fb->base)); + unsigned int new_page; + unsigned int offset; + + if (fb->panic_tiling) + offset = fb->panic_tiling(sb->width, x, y); + else + offset = y * sb->pitch[0] + x * sb->format->cpp[0]; + + new_page = offset >> PAGE_SHIFT; + offset = offset % PAGE_SIZE; + if (new_page != panic->page) { + xe_panic_kunmap(panic); + panic->page = new_page; + panic->vaddr = ttm_bo_kmap_try_from_panic(&bo->ttm, + panic->page); + } + if (panic->vaddr) { + u32 *pix = panic->vaddr + offset; + *pix = color; + } +} + +struct intel_framebuffer *intel_bo_alloc_framebuffer(void) +{ + struct xe_framebuffer *xe_fb; + + xe_fb = kzalloc(sizeof(*xe_fb), GFP_KERNEL); + if (xe_fb) + return &xe_fb->base; + return NULL; +} + +int intel_bo_panic_setup(struct drm_scanout_buffer *sb) +{ + struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private; + struct xe_panic_data *panic = to_xe_panic_data(fb); + + panic->page = -1; + sb->set_pixel = xe_panic_page_set_pixel; + return 0; +} + +void intel_bo_panic_finish(struct intel_framebuffer *fb) +{ + struct xe_panic_data *panic = to_xe_panic_data(fb); + + xe_panic_kunmap(panic); + panic->page = -1; +} diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c index b28a94df824f..fba9617a75a5 100644 --- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c +++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c @@ -66,7 +66,11 @@ struct intel_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, goto err; } - fb = intel_framebuffer_create(&obj->ttm.base, &mode_cmd); + fb = intel_framebuffer_create(&obj->ttm.base, + drm_get_format_info(dev, + mode_cmd.pixel_format, + mode_cmd.modifier[0]), + &mode_cmd); if (IS_ERR(fb)) { xe_bo_unpin_map_no_vm(obj); goto err; diff --git a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c index f95375451e2f..9f941fc2e36b 100644 --- a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c +++ b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c @@ -17,10 +17,7 @@ u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf) void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val) { - struct xe_device *xe = dsb_buf->vma->bo->tile->xe; - iosys_map_wr(&dsb_buf->vma->bo->vmap, idx * 4, u32, val); - xe_device_l2_flush(xe); } u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx) @@ -30,12 +27,9 @@ u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx) void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size) { - struct xe_device *xe = dsb_buf->vma->bo->tile->xe; - WARN_ON(idx > (dsb_buf->buf_size - size) / sizeof(*dsb_buf->cmd_buf)); iosys_map_memset(&dsb_buf->vma->bo->vmap, idx * 4, val, size); - xe_device_l2_flush(xe); } bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *dsb_buf, size_t size) @@ -74,9 +68,12 @@ void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf) void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf) { + struct xe_device *xe = dsb_buf->vma->bo->tile->xe; + /* * The memory barrier here is to ensure coherency of DSB vs MMIO, * both for weak ordering archs and discrete cards. */ - xe_device_wmb(dsb_buf->vma->bo->tile->xe); + xe_device_wmb(xe); + xe_device_l2_flush(xe); } diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c index 6b362695d6b6..c38fba18effe 100644 --- a/drivers/gpu/drm/xe/display/xe_fb_pin.c +++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c @@ -163,6 +163,9 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb, vma->dpt = dpt; vma->node = dpt->ggtt_node[tile0->id]; + + /* Ensure DPT writes are flushed */ + xe_device_l2_flush(xe); return 0; } @@ -224,7 +227,7 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb, goto out_unlock; } - ret = xe_ggtt_node_insert_locked(vma->node, bo->size, align, 0); + ret = xe_ggtt_node_insert_locked(vma->node, xe_bo_size(bo), align, 0); if (ret) { xe_ggtt_node_fini(vma->node); goto out_unlock; @@ -326,8 +329,6 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb, if (ret) goto err_unpin; - /* Ensure DPT writes are flushed */ - xe_device_l2_flush(xe); return vma; err_unpin: @@ -457,3 +458,8 @@ u64 intel_dpt_offset(struct i915_vma *dpt_vma) { return 0; } + +void intel_fb_get_map(struct i915_vma *vma, struct iosys_map *map) +{ + *map = vma->bo->vmap; +} diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c index b35a6f201d4a..30f1073141fc 100644 --- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c +++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c @@ -85,7 +85,7 @@ static int intel_hdcp_gsc_initialize_message(struct xe_device *xe, cmd_in = xe_bo_ggtt_addr(bo); cmd_out = cmd_in + PAGE_SIZE; - xe_map_memset(xe, &bo->vmap, 0, 0, bo->size); + xe_map_memset(xe, &bo->vmap, 0, 0, xe_bo_size(bo)); gsc_context->hdcp_bo = bo; gsc_context->hdcp_cmd_in = cmd_in; diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c index af47ce34102c..dcbc4b2d3fd9 100644 --- a/drivers/gpu/drm/xe/display/xe_plane_initial.c +++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c @@ -10,7 +10,6 @@ #include "xe_ggtt.h" #include "xe_mmio.h" -#include "intel_atomic_plane.h" #include "intel_crtc.h" #include "intel_display.h" #include "intel_display_core.h" @@ -19,6 +18,7 @@ #include "intel_fb.h" #include "intel_fb_pin.h" #include "intel_frontbuffer.h" +#include "intel_plane.h" #include "intel_plane_initial.h" #include "xe_bo.h" #include "xe_wa.h" @@ -184,7 +184,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, return false; if (intel_framebuffer_init(to_intel_framebuffer(fb), - &bo->ttm.base, &mode_cmd)) { + &bo->ttm.base, fb->format, &mode_cmd)) { drm_dbg_kms(&xe->drm, "intel fb init failed\n"); goto err_bo; } diff --git a/drivers/gpu/drm/xe/regs/xe_bars.h b/drivers/gpu/drm/xe/regs/xe_bars.h index ce05b6ae832f..880140d6ccdc 100644 --- a/drivers/gpu/drm/xe/regs/xe_bars.h +++ b/drivers/gpu/drm/xe/regs/xe_bars.h @@ -7,5 +7,6 @@ #define GTTMMADR_BAR 0 /* MMIO + GTT */ #define LMEM_BAR 2 /* VRAM */ +#define VF_LMEM_BAR 9 /* VF VRAM */ #endif diff --git a/drivers/gpu/drm/xe/regs/xe_gsc_regs.h b/drivers/gpu/drm/xe/regs/xe_gsc_regs.h index 7702364b65f1..9b66cc972a63 100644 --- a/drivers/gpu/drm/xe/regs/xe_gsc_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_gsc_regs.h @@ -16,6 +16,10 @@ #define MTL_GSC_HECI1_BASE 0x00116000 #define MTL_GSC_HECI2_BASE 0x00117000 +#define DG1_GSC_HECI2_BASE 0x00259000 +#define PVC_GSC_HECI2_BASE 0x00285000 +#define DG2_GSC_HECI2_BASE 0x00374000 + #define HECI_H_CSR(base) XE_REG((base) + 0x4) #define HECI_H_CSR_IE REG_BIT(0) #define HECI_H_CSR_IS REG_BIT(1) diff --git a/drivers/gpu/drm/xe/regs/xe_i2c_regs.h b/drivers/gpu/drm/xe/regs/xe_i2c_regs.h new file mode 100644 index 000000000000..af781c8e4a80 --- /dev/null +++ b/drivers/gpu/drm/xe/regs/xe_i2c_regs.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef _XE_I2C_REGS_H_ +#define _XE_I2C_REGS_H_ + +#include <linux/pci_regs.h> + +#include "xe_reg_defs.h" +#include "xe_regs.h" + +#define I2C_BRIDGE_OFFSET (SOC_BASE + 0xd9000) +#define I2C_CONFIG_SPACE_OFFSET (SOC_BASE + 0xf6000) +#define I2C_MEM_SPACE_OFFSET (SOC_BASE + 0xf7400) + +#define REG_SG_REMAP_ADDR_PREFIX XE_REG(SOC_BASE + 0x0164) +#define REG_SG_REMAP_ADDR_POSTFIX XE_REG(SOC_BASE + 0x0168) + +#define I2C_CONFIG_CMD XE_REG(I2C_CONFIG_SPACE_OFFSET + PCI_COMMAND) +#define I2C_CONFIG_PMCSR XE_REG(I2C_CONFIG_SPACE_OFFSET + 0x84) + +#endif /* _XE_I2C_REGS_H_ */ diff --git a/drivers/gpu/drm/xe/regs/xe_irq_regs.h b/drivers/gpu/drm/xe/regs/xe_irq_regs.h index f0ecfcac4003..13635e4331d4 100644 --- a/drivers/gpu/drm/xe/regs/xe_irq_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_irq_regs.h @@ -19,6 +19,7 @@ #define MASTER_IRQ REG_BIT(31) #define GU_MISC_IRQ REG_BIT(29) #define DISPLAY_IRQ REG_BIT(16) +#define I2C_IRQ REG_BIT(12) #define GT_DW_IRQ(x) REG_BIT(x) /* diff --git a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h index 994af591a2e8..1b101edb838b 100644 --- a/drivers/gpu/drm/xe/regs/xe_lrc_layout.h +++ b/drivers/gpu/drm/xe/regs/xe_lrc_layout.h @@ -12,9 +12,13 @@ #define CTX_RING_START (0x08 + 1) #define CTX_RING_CTL (0x0a + 1) #define CTX_BB_PER_CTX_PTR (0x12 + 1) +#define CTX_CS_INDIRECT_CTX (0x14 + 1) +#define CTX_CS_INDIRECT_CTX_OFFSET (0x16 + 1) #define CTX_TIMESTAMP (0x22 + 1) #define CTX_TIMESTAMP_UDW (0x24 + 1) #define CTX_INDIRECT_RING_STATE (0x26 + 1) +#define CTX_ACC_CTR_THOLD (0x2a + 1) +#define CTX_ASID (0x2e + 1) #define CTX_PDP0_UDW (0x30 + 1) #define CTX_PDP0_LDW (0x32 + 1) @@ -36,4 +40,7 @@ #define INDIRECT_CTX_RING_START_UDW (0x08 + 1) #define INDIRECT_CTX_RING_CTL (0x0a + 1) +#define CTX_INDIRECT_CTX_OFFSET_MASK REG_GENMASK(15, 6) +#define CTX_INDIRECT_CTX_OFFSET_DEFAULT REG_FIELD_PREP(CTX_INDIRECT_CTX_OFFSET_MASK, 0xd) + #endif diff --git a/drivers/gpu/drm/xe/regs/xe_pmt.h b/drivers/gpu/drm/xe/regs/xe_pmt.h index b0efd9b48d1e..2995d72c3f78 100644 --- a/drivers/gpu/drm/xe/regs/xe_pmt.h +++ b/drivers/gpu/drm/xe/regs/xe_pmt.h @@ -5,7 +5,7 @@ #ifndef _XE_PMT_H_ #define _XE_PMT_H_ -#define SOC_BASE 0x280000 +#include "xe_regs.h" #define BMG_PMT_BASE_OFFSET 0xDB000 #define BMG_DISCOVERY_OFFSET (SOC_BASE + BMG_PMT_BASE_OFFSET) diff --git a/drivers/gpu/drm/xe/regs/xe_regs.h b/drivers/gpu/drm/xe/regs/xe_regs.h index 3abb17d2ca33..1926b4044314 100644 --- a/drivers/gpu/drm/xe/regs/xe_regs.h +++ b/drivers/gpu/drm/xe/regs/xe_regs.h @@ -7,6 +7,8 @@ #include "regs/xe_reg_defs.h" +#define SOC_BASE 0x280000 + #define GU_CNTL_PROTECTED XE_REG(0x10100C) #define DRIVERINT_FLR_DIS REG_BIT(31) diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c index 77ca1ab527ec..bb469096d072 100644 --- a/drivers/gpu/drm/xe/tests/xe_bo.c +++ b/drivers/gpu/drm/xe/tests/xe_bo.c @@ -106,7 +106,7 @@ static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo, } /* Check last CCS value, or at least last value in page. */ - offset = xe_device_ccs_bytes(tile_to_xe(tile), bo->size); + offset = xe_device_ccs_bytes(tile_to_xe(tile), xe_bo_size(bo)); offset = min_t(u32, offset, PAGE_SIZE) / sizeof(u64) - 1; if (cpu_map[offset] != get_val) { KUNIT_FAIL(test, diff --git a/drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_service_test.c b/drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_service_test.c deleted file mode 100644 index b683585db852..000000000000 --- a/drivers/gpu/drm/xe/tests/xe_gt_sriov_pf_service_test.c +++ /dev/null @@ -1,232 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 AND MIT -/* - * Copyright © 2024 Intel Corporation - */ - -#include <kunit/test.h> - -#include "xe_device.h" -#include "xe_kunit_helpers.h" -#include "xe_pci_test.h" - -static int pf_service_test_init(struct kunit *test) -{ - struct xe_pci_fake_data fake = { - .sriov_mode = XE_SRIOV_MODE_PF, - .platform = XE_TIGERLAKE, /* some random platform */ - .subplatform = XE_SUBPLATFORM_NONE, - }; - struct xe_device *xe; - struct xe_gt *gt; - - test->priv = &fake; - xe_kunit_helper_xe_device_test_init(test); - - xe = test->priv; - KUNIT_ASSERT_EQ(test, xe_sriov_init(xe), 0); - - gt = xe_device_get_gt(xe, 0); - pf_init_versions(gt); - - /* - * sanity check: - * - all supported platforms VF/PF ABI versions must be defined - * - base version can't be newer than latest - */ - KUNIT_ASSERT_NE(test, 0, gt->sriov.pf.service.version.base.major); - KUNIT_ASSERT_NE(test, 0, gt->sriov.pf.service.version.latest.major); - KUNIT_ASSERT_LE(test, gt->sriov.pf.service.version.base.major, - gt->sriov.pf.service.version.latest.major); - if (gt->sriov.pf.service.version.base.major == gt->sriov.pf.service.version.latest.major) - KUNIT_ASSERT_LE(test, gt->sriov.pf.service.version.base.minor, - gt->sriov.pf.service.version.latest.minor); - - test->priv = gt; - return 0; -} - -static void pf_negotiate_any(struct kunit *test) -{ - struct xe_gt *gt = test->priv; - u32 major, minor; - - KUNIT_ASSERT_EQ(test, 0, - pf_negotiate_version(gt, VF2PF_HANDSHAKE_MAJOR_ANY, - VF2PF_HANDSHAKE_MINOR_ANY, - &major, &minor)); - KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major); - KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.latest.minor); -} - -static void pf_negotiate_base_match(struct kunit *test) -{ - struct xe_gt *gt = test->priv; - u32 major, minor; - - KUNIT_ASSERT_EQ(test, 0, - pf_negotiate_version(gt, - gt->sriov.pf.service.version.base.major, - gt->sriov.pf.service.version.base.minor, - &major, &minor)); - KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.base.major); - KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.base.minor); -} - -static void pf_negotiate_base_newer(struct kunit *test) -{ - struct xe_gt *gt = test->priv; - u32 major, minor; - - KUNIT_ASSERT_EQ(test, 0, - pf_negotiate_version(gt, - gt->sriov.pf.service.version.base.major, - gt->sriov.pf.service.version.base.minor + 1, - &major, &minor)); - KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.base.major); - KUNIT_ASSERT_GE(test, minor, gt->sriov.pf.service.version.base.minor); - if (gt->sriov.pf.service.version.base.major == gt->sriov.pf.service.version.latest.major) - KUNIT_ASSERT_LE(test, minor, gt->sriov.pf.service.version.latest.minor); - else - KUNIT_FAIL(test, "FIXME: don't know how to test multi-version yet!\n"); -} - -static void pf_negotiate_base_next(struct kunit *test) -{ - struct xe_gt *gt = test->priv; - u32 major, minor; - - KUNIT_ASSERT_EQ(test, 0, - pf_negotiate_version(gt, - gt->sriov.pf.service.version.base.major + 1, 0, - &major, &minor)); - KUNIT_ASSERT_GE(test, major, gt->sriov.pf.service.version.base.major); - KUNIT_ASSERT_LE(test, major, gt->sriov.pf.service.version.latest.major); - if (major == gt->sriov.pf.service.version.latest.major) - KUNIT_ASSERT_LE(test, minor, gt->sriov.pf.service.version.latest.minor); - else - KUNIT_FAIL(test, "FIXME: don't know how to test multi-version yet!\n"); -} - -static void pf_negotiate_base_older(struct kunit *test) -{ - struct xe_gt *gt = test->priv; - u32 major, minor; - - if (!gt->sriov.pf.service.version.base.minor) - kunit_skip(test, "no older minor\n"); - - KUNIT_ASSERT_NE(test, 0, - pf_negotiate_version(gt, - gt->sriov.pf.service.version.base.major, - gt->sriov.pf.service.version.base.minor - 1, - &major, &minor)); -} - -static void pf_negotiate_base_prev(struct kunit *test) -{ - struct xe_gt *gt = test->priv; - u32 major, minor; - - KUNIT_ASSERT_NE(test, 0, - pf_negotiate_version(gt, - gt->sriov.pf.service.version.base.major - 1, 1, - &major, &minor)); -} - -static void pf_negotiate_latest_match(struct kunit *test) -{ - struct xe_gt *gt = test->priv; - u32 major, minor; - - KUNIT_ASSERT_EQ(test, 0, - pf_negotiate_version(gt, - gt->sriov.pf.service.version.latest.major, - gt->sriov.pf.service.version.latest.minor, - &major, &minor)); - KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major); - KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.latest.minor); -} - -static void pf_negotiate_latest_newer(struct kunit *test) -{ - struct xe_gt *gt = test->priv; - u32 major, minor; - - KUNIT_ASSERT_EQ(test, 0, - pf_negotiate_version(gt, - gt->sriov.pf.service.version.latest.major, - gt->sriov.pf.service.version.latest.minor + 1, - &major, &minor)); - KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major); - KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.latest.minor); -} - -static void pf_negotiate_latest_next(struct kunit *test) -{ - struct xe_gt *gt = test->priv; - u32 major, minor; - - KUNIT_ASSERT_EQ(test, 0, - pf_negotiate_version(gt, - gt->sriov.pf.service.version.latest.major + 1, 0, - &major, &minor)); - KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major); - KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.latest.minor); -} - -static void pf_negotiate_latest_older(struct kunit *test) -{ - struct xe_gt *gt = test->priv; - u32 major, minor; - - if (!gt->sriov.pf.service.version.latest.minor) - kunit_skip(test, "no older minor\n"); - - KUNIT_ASSERT_EQ(test, 0, - pf_negotiate_version(gt, - gt->sriov.pf.service.version.latest.major, - gt->sriov.pf.service.version.latest.minor - 1, - &major, &minor)); - KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major); - KUNIT_ASSERT_EQ(test, minor, gt->sriov.pf.service.version.latest.minor - 1); -} - -static void pf_negotiate_latest_prev(struct kunit *test) -{ - struct xe_gt *gt = test->priv; - u32 major, minor; - - if (gt->sriov.pf.service.version.base.major == gt->sriov.pf.service.version.latest.major) - kunit_skip(test, "no prev major"); - - KUNIT_ASSERT_EQ(test, 0, - pf_negotiate_version(gt, - gt->sriov.pf.service.version.latest.major - 1, - gt->sriov.pf.service.version.base.minor + 1, - &major, &minor)); - KUNIT_ASSERT_EQ(test, major, gt->sriov.pf.service.version.latest.major - 1); - KUNIT_ASSERT_GE(test, major, gt->sriov.pf.service.version.base.major); -} - -static struct kunit_case pf_service_test_cases[] = { - KUNIT_CASE(pf_negotiate_any), - KUNIT_CASE(pf_negotiate_base_match), - KUNIT_CASE(pf_negotiate_base_newer), - KUNIT_CASE(pf_negotiate_base_next), - KUNIT_CASE(pf_negotiate_base_older), - KUNIT_CASE(pf_negotiate_base_prev), - KUNIT_CASE(pf_negotiate_latest_match), - KUNIT_CASE(pf_negotiate_latest_newer), - KUNIT_CASE(pf_negotiate_latest_next), - KUNIT_CASE(pf_negotiate_latest_older), - KUNIT_CASE(pf_negotiate_latest_prev), - {} -}; - -static struct kunit_suite pf_service_suite = { - .name = "pf_service", - .test_cases = pf_service_test_cases, - .init = pf_service_test_init, -}; - -kunit_test_suite(pf_service_suite); diff --git a/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c b/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c index 537766cdd882..d266882adc0e 100644 --- a/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c +++ b/drivers/gpu/drm/xe/tests/xe_guc_buf_kunit.c @@ -32,7 +32,7 @@ static struct xe_bo *replacement_xe_managed_bo_create_pin_map(struct xe_device * bo->tile = tile; bo->ttm.bdev = &xe->ttm; - bo->size = size; + bo->ttm.base.size = size; iosys_map_set_vaddr(&bo->vmap, buf); if (flags & XE_BO_FLAG_GGTT) { @@ -43,7 +43,7 @@ static struct xe_bo *replacement_xe_managed_bo_create_pin_map(struct xe_device * KUNIT_ASSERT_EQ(test, 0, xe_ggtt_node_insert(bo->ggtt_node[tile->id], - bo->size, SZ_4K)); + xe_bo_size(bo), SZ_4K)); } return bo; diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c index 4a65e3103f77..edd1e701aa1c 100644 --- a/drivers/gpu/drm/xe/tests/xe_migrate.c +++ b/drivers/gpu/drm/xe/tests/xe_migrate.c @@ -74,13 +74,13 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo, { struct xe_device *xe = tile_to_xe(m->tile); u64 retval, expected = 0; - bool big = bo->size >= SZ_2M; + bool big = xe_bo_size(bo) >= SZ_2M; struct dma_fence *fence; const char *str = big ? "Copying big bo" : "Copying small bo"; int err; struct xe_bo *remote = xe_bo_create_locked(xe, m->tile, NULL, - bo->size, + xe_bo_size(bo), ttm_bo_type_kernel, region | XE_BO_FLAG_NEEDS_CPU_ACCESS | @@ -105,7 +105,7 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo, goto out_unlock; } - xe_map_memset(xe, &remote->vmap, 0, 0xd0, remote->size); + xe_map_memset(xe, &remote->vmap, 0, 0xd0, xe_bo_size(remote)); fence = xe_migrate_clear(m, remote, remote->ttm.resource, XE_MIGRATE_CLEAR_FLAG_FULL); if (!sanity_fence_failed(xe, fence, big ? "Clearing remote big bo" : @@ -113,15 +113,15 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo, retval = xe_map_rd(xe, &remote->vmap, 0, u64); check(retval, expected, "remote first offset should be cleared", test); - retval = xe_map_rd(xe, &remote->vmap, remote->size - 8, u64); + retval = xe_map_rd(xe, &remote->vmap, xe_bo_size(remote) - 8, u64); check(retval, expected, "remote last offset should be cleared", test); } dma_fence_put(fence); /* Try to copy 0xc0 from remote to vram with 2MB or 64KiB/4KiB pages */ - xe_map_memset(xe, &remote->vmap, 0, 0xc0, remote->size); - xe_map_memset(xe, &bo->vmap, 0, 0xd0, bo->size); + xe_map_memset(xe, &remote->vmap, 0, 0xc0, xe_bo_size(remote)); + xe_map_memset(xe, &bo->vmap, 0, 0xd0, xe_bo_size(bo)); expected = 0xc0c0c0c0c0c0c0c0; fence = xe_migrate_copy(m, remote, bo, remote->ttm.resource, @@ -131,15 +131,15 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo, retval = xe_map_rd(xe, &bo->vmap, 0, u64); check(retval, expected, "remote -> vram bo first offset should be copied", test); - retval = xe_map_rd(xe, &bo->vmap, bo->size - 8, u64); + retval = xe_map_rd(xe, &bo->vmap, xe_bo_size(bo) - 8, u64); check(retval, expected, "remote -> vram bo offset should be copied", test); } dma_fence_put(fence); /* And other way around.. slightly hacky.. */ - xe_map_memset(xe, &remote->vmap, 0, 0xd0, remote->size); - xe_map_memset(xe, &bo->vmap, 0, 0xc0, bo->size); + xe_map_memset(xe, &remote->vmap, 0, 0xd0, xe_bo_size(remote)); + xe_map_memset(xe, &bo->vmap, 0, 0xc0, xe_bo_size(bo)); fence = xe_migrate_copy(m, bo, remote, bo->ttm.resource, remote->ttm.resource, false); @@ -148,7 +148,7 @@ static void test_copy(struct xe_migrate *m, struct xe_bo *bo, retval = xe_map_rd(xe, &remote->vmap, 0, u64); check(retval, expected, "vram -> remote bo first offset should be copied", test); - retval = xe_map_rd(xe, &remote->vmap, bo->size - 8, u64); + retval = xe_map_rd(xe, &remote->vmap, xe_bo_size(bo) - 8, u64); check(retval, expected, "vram -> remote bo last offset should be copied", test); } @@ -245,9 +245,9 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) if (m->q->vm->flags & XE_VM_FLAG_64K) expected |= XE_PTE_PS64; if (xe_bo_is_vram(pt)) - xe_res_first(pt->ttm.resource, 0, pt->size, &src_it); + xe_res_first(pt->ttm.resource, 0, xe_bo_size(pt), &src_it); else - xe_res_first_sg(xe_bo_sg(pt), 0, pt->size, &src_it); + xe_res_first_sg(xe_bo_sg(pt), 0, xe_bo_size(pt), &src_it); emit_pte(m, bb, NUM_KERNEL_PDE - 1, xe_bo_is_vram(pt), false, &src_it, XE_PAGE_SIZE, pt->ttm.resource); @@ -276,7 +276,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) /* Clear a small bo */ kunit_info(test, "Clearing small buffer object\n"); - xe_map_memset(xe, &tiny->vmap, 0, 0x22, tiny->size); + xe_map_memset(xe, &tiny->vmap, 0, 0x22, xe_bo_size(tiny)); expected = 0; fence = xe_migrate_clear(m, tiny, tiny->ttm.resource, XE_MIGRATE_CLEAR_FLAG_FULL); @@ -286,7 +286,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) dma_fence_put(fence); retval = xe_map_rd(xe, &tiny->vmap, 0, u32); check(retval, expected, "Command clear small first value", test); - retval = xe_map_rd(xe, &tiny->vmap, tiny->size - 4, u32); + retval = xe_map_rd(xe, &tiny->vmap, xe_bo_size(tiny) - 4, u32); check(retval, expected, "Command clear small last value", test); kunit_info(test, "Copying small buffer object to system\n"); @@ -298,7 +298,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) /* Clear a big bo */ kunit_info(test, "Clearing big buffer object\n"); - xe_map_memset(xe, &big->vmap, 0, 0x11, big->size); + xe_map_memset(xe, &big->vmap, 0, 0x11, xe_bo_size(big)); expected = 0; fence = xe_migrate_clear(m, big, big->ttm.resource, XE_MIGRATE_CLEAR_FLAG_FULL); @@ -308,7 +308,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) dma_fence_put(fence); retval = xe_map_rd(xe, &big->vmap, 0, u32); check(retval, expected, "Command clear big first value", test); - retval = xe_map_rd(xe, &big->vmap, big->size - 4, u32); + retval = xe_map_rd(xe, &big->vmap, xe_bo_size(big) - 4, u32); check(retval, expected, "Command clear big last value", test); kunit_info(test, "Copying big buffer object to system\n"); @@ -370,7 +370,7 @@ static struct dma_fence *blt_copy(struct xe_tile *tile, struct xe_migrate *m = tile->migrate; struct xe_device *xe = gt_to_xe(gt); struct dma_fence *fence = NULL; - u64 size = src_bo->size; + u64 size = xe_bo_size(src_bo); struct xe_res_cursor src_it, dst_it; struct ttm_resource *src = src_bo->ttm.resource, *dst = dst_bo->ttm.resource; u64 src_L0_ofs, dst_L0_ofs; @@ -498,7 +498,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile, long ret; expected = 0xd0d0d0d0d0d0d0d0; - xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, sys_bo->size); + xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, xe_bo_size(sys_bo)); fence = blt_copy(tile, sys_bo, vram_bo, false, "Blit copy from sysmem to vram", test); if (!sanity_fence_failed(xe, fence, "Blit copy from sysmem to vram", test)) { @@ -523,7 +523,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile, retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64); check(retval, expected, "Clear evicted vram data first value", test); - retval = xe_map_rd(xe, &vram_bo->vmap, vram_bo->size - 8, u64); + retval = xe_map_rd(xe, &vram_bo->vmap, xe_bo_size(vram_bo) - 8, u64); check(retval, expected, "Clear evicted vram data last value", test); fence = blt_copy(tile, vram_bo, ccs_bo, @@ -532,7 +532,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile, retval = xe_map_rd(xe, &ccs_bo->vmap, 0, u64); check(retval, 0, "Clear ccs data first value", test); - retval = xe_map_rd(xe, &ccs_bo->vmap, ccs_bo->size - 8, u64); + retval = xe_map_rd(xe, &ccs_bo->vmap, xe_bo_size(ccs_bo) - 8, u64); check(retval, 0, "Clear ccs data last value", test); } dma_fence_put(fence); @@ -562,7 +562,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile, retval = xe_map_rd(xe, &vram_bo->vmap, 0, u64); check(retval, expected, "Restored value must be equal to initial value", test); - retval = xe_map_rd(xe, &vram_bo->vmap, vram_bo->size - 8, u64); + retval = xe_map_rd(xe, &vram_bo->vmap, xe_bo_size(vram_bo) - 8, u64); check(retval, expected, "Restored value must be equal to initial value", test); fence = blt_copy(tile, vram_bo, ccs_bo, @@ -570,7 +570,7 @@ static void test_migrate(struct xe_device *xe, struct xe_tile *tile, if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) { retval = xe_map_rd(xe, &ccs_bo->vmap, 0, u64); check(retval, 0, "Clear ccs data first value", test); - retval = xe_map_rd(xe, &ccs_bo->vmap, ccs_bo->size - 8, u64); + retval = xe_map_rd(xe, &ccs_bo->vmap, xe_bo_size(ccs_bo) - 8, u64); check(retval, 0, "Clear ccs data last value", test); } dma_fence_put(fence); @@ -583,7 +583,7 @@ static void test_clear(struct xe_device *xe, struct xe_tile *tile, u64 expected, retval; expected = 0xd0d0d0d0d0d0d0d0; - xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, sys_bo->size); + xe_map_memset(xe, &sys_bo->vmap, 0, 0xd0, xe_bo_size(sys_bo)); fence = blt_copy(tile, sys_bo, vram_bo, false, "Blit copy from sysmem to vram", test); if (!sanity_fence_failed(xe, fence, "Blit copy from sysmem to vram", test)) { @@ -597,7 +597,7 @@ static void test_clear(struct xe_device *xe, struct xe_tile *tile, if (!sanity_fence_failed(xe, fence, "Blit copy from vram to sysmem", test)) { retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64); check(retval, expected, "Decompressed value must be equal to initial value", test); - retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64); + retval = xe_map_rd(xe, &sys_bo->vmap, xe_bo_size(sys_bo) - 8, u64); check(retval, expected, "Decompressed value must be equal to initial value", test); } dma_fence_put(fence); @@ -615,7 +615,7 @@ static void test_clear(struct xe_device *xe, struct xe_tile *tile, if (!sanity_fence_failed(xe, fence, "Clear main buffer data", test)) { retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64); check(retval, expected, "Clear main buffer first value", test); - retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64); + retval = xe_map_rd(xe, &sys_bo->vmap, xe_bo_size(sys_bo) - 8, u64); check(retval, expected, "Clear main buffer last value", test); } dma_fence_put(fence); @@ -625,7 +625,7 @@ static void test_clear(struct xe_device *xe, struct xe_tile *tile, if (!sanity_fence_failed(xe, fence, "Clear ccs buffer data", test)) { retval = xe_map_rd(xe, &sys_bo->vmap, 0, u64); check(retval, expected, "Clear ccs data first value", test); - retval = xe_map_rd(xe, &sys_bo->vmap, sys_bo->size - 8, u64); + retval = xe_map_rd(xe, &sys_bo->vmap, xe_bo_size(sys_bo) - 8, u64); check(retval, expected, "Clear ccs data last value", test); } dma_fence_put(fence); diff --git a/drivers/gpu/drm/xe/tests/xe_pci.c b/drivers/gpu/drm/xe/tests/xe_pci.c index baccb657bd05..9c715e59f030 100644 --- a/drivers/gpu/drm/xe/tests/xe_pci.c +++ b/drivers/gpu/drm/xe/tests/xe_pci.c @@ -21,6 +21,18 @@ static void xe_ip_kunit_desc(const struct xe_ip *param, char *desc) KUNIT_ARRAY_PARAM(graphics_ip, graphics_ips, xe_ip_kunit_desc); KUNIT_ARRAY_PARAM(media_ip, media_ips, xe_ip_kunit_desc); +static void xe_pci_id_kunit_desc(const struct pci_device_id *param, char *desc) +{ + const struct xe_device_desc *dev_desc = + (const struct xe_device_desc *)param->driver_data; + + if (dev_desc) + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "0x%X (%s)", + param->device, dev_desc->platform_name); +} + +KUNIT_ARRAY_PARAM(pci_id, pciidlist, xe_pci_id_kunit_desc); + /** * xe_pci_graphics_ip_gen_param - Generate graphics struct xe_ip parameters * @prev: the pointer to the previous parameter to iterate from or NULL @@ -55,6 +67,25 @@ const void *xe_pci_media_ip_gen_param(const void *prev, char *desc) } EXPORT_SYMBOL_IF_KUNIT(xe_pci_media_ip_gen_param); +/** + * xe_pci_id_gen_param - Generate struct pci_device_id parameters + * @prev: the pointer to the previous parameter to iterate from or NULL + * @desc: output buffer with minimum size of KUNIT_PARAM_DESC_SIZE + * + * This function prepares struct pci_device_id parameter. + * + * To be used only as a parameter generator function in &KUNIT_CASE_PARAM. + * + * Return: pointer to the next parameter or NULL if no more parameters + */ +const void *xe_pci_id_gen_param(const void *prev, char *desc) +{ + const struct pci_device_id *pci = pci_id_gen_params(prev, desc); + + return pci->driver_data ? pci : NULL; +} +EXPORT_SYMBOL_IF_KUNIT(xe_pci_id_gen_param); + static void fake_read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, u32 *ver, u32 *revid) { diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.c b/drivers/gpu/drm/xe/tests/xe_pci_test.c index 95fed41f7ff2..37b344df2dc3 100644 --- a/drivers/gpu/drm/xe/tests/xe_pci_test.c +++ b/drivers/gpu/drm/xe/tests/xe_pci_test.c @@ -44,9 +44,21 @@ static void check_media_ip(struct kunit *test) KUNIT_ASSERT_EQ(test, mask, 0); } +static void check_platform_gt_count(struct kunit *test) +{ + const struct pci_device_id *pci = test->param_value; + const struct xe_device_desc *desc = + (const struct xe_device_desc *)pci->driver_data; + int max_gt = desc->max_gt_per_tile; + + KUNIT_ASSERT_GT(test, max_gt, 0); + KUNIT_ASSERT_LE(test, max_gt, XE_MAX_GT_PER_TILE); +} + static struct kunit_case xe_pci_tests[] = { KUNIT_CASE_PARAM(check_graphics_ip, xe_pci_graphics_ip_gen_param), KUNIT_CASE_PARAM(check_media_ip, xe_pci_media_ip_gen_param), + KUNIT_CASE_PARAM(check_platform_gt_count, xe_pci_id_gen_param), {} }; diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.h b/drivers/gpu/drm/xe/tests/xe_pci_test.h index 3a1df7a5e291..ce4d2b86b778 100644 --- a/drivers/gpu/drm/xe/tests/xe_pci_test.h +++ b/drivers/gpu/drm/xe/tests/xe_pci_test.h @@ -27,6 +27,7 @@ int xe_pci_fake_device_init(struct xe_device *xe); const void *xe_pci_graphics_ip_gen_param(const void *prev, char *desc); const void *xe_pci_media_ip_gen_param(const void *prev, char *desc); +const void *xe_pci_id_gen_param(const void *prev, char *desc); const void *xe_pci_live_device_gen_param(const void *prev, char *desc); #endif diff --git a/drivers/gpu/drm/xe/tests/xe_sriov_pf_service_kunit.c b/drivers/gpu/drm/xe/tests/xe_sriov_pf_service_kunit.c new file mode 100644 index 000000000000..ba95e29b597d --- /dev/null +++ b/drivers/gpu/drm/xe/tests/xe_sriov_pf_service_kunit.c @@ -0,0 +1,227 @@ +// SPDX-License-Identifier: GPL-2.0 AND MIT +/* + * Copyright © 2024-2025 Intel Corporation + */ + +#include <kunit/test.h> + +#include "xe_device.h" +#include "xe_kunit_helpers.h" +#include "xe_pci_test.h" + +static int pf_service_test_init(struct kunit *test) +{ + struct xe_pci_fake_data fake = { + .sriov_mode = XE_SRIOV_MODE_PF, + .platform = XE_TIGERLAKE, /* some random platform */ + .subplatform = XE_SUBPLATFORM_NONE, + }; + struct xe_device *xe; + + test->priv = &fake; + xe_kunit_helper_xe_device_test_init(test); + + xe = test->priv; + KUNIT_ASSERT_EQ(test, xe_sriov_init(xe), 0); + + xe_sriov_pf_service_init(xe); + /* + * sanity check: + * - all supported platforms VF/PF ABI versions must be defined + * - base version can't be newer than latest + */ + KUNIT_ASSERT_NE(test, 0, xe->sriov.pf.service.version.base.major); + KUNIT_ASSERT_NE(test, 0, xe->sriov.pf.service.version.latest.major); + KUNIT_ASSERT_LE(test, xe->sriov.pf.service.version.base.major, + xe->sriov.pf.service.version.latest.major); + if (xe->sriov.pf.service.version.base.major == xe->sriov.pf.service.version.latest.major) + KUNIT_ASSERT_LE(test, xe->sriov.pf.service.version.base.minor, + xe->sriov.pf.service.version.latest.minor); + return 0; +} + +static void pf_negotiate_any(struct kunit *test) +{ + struct xe_device *xe = test->priv; + u32 major, minor; + + KUNIT_ASSERT_EQ(test, 0, + pf_negotiate_version(xe, VF2PF_HANDSHAKE_MAJOR_ANY, + VF2PF_HANDSHAKE_MINOR_ANY, + &major, &minor)); + KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.latest.major); + KUNIT_ASSERT_EQ(test, minor, xe->sriov.pf.service.version.latest.minor); +} + +static void pf_negotiate_base_match(struct kunit *test) +{ + struct xe_device *xe = test->priv; + u32 major, minor; + + KUNIT_ASSERT_EQ(test, 0, + pf_negotiate_version(xe, + xe->sriov.pf.service.version.base.major, + xe->sriov.pf.service.version.base.minor, + &major, &minor)); + KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.base.major); + KUNIT_ASSERT_EQ(test, minor, xe->sriov.pf.service.version.base.minor); +} + +static void pf_negotiate_base_newer(struct kunit *test) +{ + struct xe_device *xe = test->priv; + u32 major, minor; + + KUNIT_ASSERT_EQ(test, 0, + pf_negotiate_version(xe, + xe->sriov.pf.service.version.base.major, + xe->sriov.pf.service.version.base.minor + 1, + &major, &minor)); + KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.base.major); + KUNIT_ASSERT_GE(test, minor, xe->sriov.pf.service.version.base.minor); + if (xe->sriov.pf.service.version.base.major == xe->sriov.pf.service.version.latest.major) + KUNIT_ASSERT_LE(test, minor, xe->sriov.pf.service.version.latest.minor); + else + KUNIT_FAIL(test, "FIXME: don't know how to test multi-version yet!\n"); +} + +static void pf_negotiate_base_next(struct kunit *test) +{ + struct xe_device *xe = test->priv; + u32 major, minor; + + KUNIT_ASSERT_EQ(test, 0, + pf_negotiate_version(xe, + xe->sriov.pf.service.version.base.major + 1, 0, + &major, &minor)); + KUNIT_ASSERT_GE(test, major, xe->sriov.pf.service.version.base.major); + KUNIT_ASSERT_LE(test, major, xe->sriov.pf.service.version.latest.major); + if (major == xe->sriov.pf.service.version.latest.major) + KUNIT_ASSERT_LE(test, minor, xe->sriov.pf.service.version.latest.minor); + else + KUNIT_FAIL(test, "FIXME: don't know how to test multi-version yet!\n"); +} + +static void pf_negotiate_base_older(struct kunit *test) +{ + struct xe_device *xe = test->priv; + u32 major, minor; + + if (!xe->sriov.pf.service.version.base.minor) + kunit_skip(test, "no older minor\n"); + + KUNIT_ASSERT_NE(test, 0, + pf_negotiate_version(xe, + xe->sriov.pf.service.version.base.major, + xe->sriov.pf.service.version.base.minor - 1, + &major, &minor)); +} + +static void pf_negotiate_base_prev(struct kunit *test) +{ + struct xe_device *xe = test->priv; + u32 major, minor; + + KUNIT_ASSERT_NE(test, 0, + pf_negotiate_version(xe, + xe->sriov.pf.service.version.base.major - 1, 1, + &major, &minor)); +} + +static void pf_negotiate_latest_match(struct kunit *test) +{ + struct xe_device *xe = test->priv; + u32 major, minor; + + KUNIT_ASSERT_EQ(test, 0, + pf_negotiate_version(xe, + xe->sriov.pf.service.version.latest.major, + xe->sriov.pf.service.version.latest.minor, + &major, &minor)); + KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.latest.major); + KUNIT_ASSERT_EQ(test, minor, xe->sriov.pf.service.version.latest.minor); +} + +static void pf_negotiate_latest_newer(struct kunit *test) +{ + struct xe_device *xe = test->priv; + u32 major, minor; + + KUNIT_ASSERT_EQ(test, 0, + pf_negotiate_version(xe, + xe->sriov.pf.service.version.latest.major, + xe->sriov.pf.service.version.latest.minor + 1, + &major, &minor)); + KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.latest.major); + KUNIT_ASSERT_EQ(test, minor, xe->sriov.pf.service.version.latest.minor); +} + +static void pf_negotiate_latest_next(struct kunit *test) +{ + struct xe_device *xe = test->priv; + u32 major, minor; + + KUNIT_ASSERT_EQ(test, 0, + pf_negotiate_version(xe, + xe->sriov.pf.service.version.latest.major + 1, 0, + &major, &minor)); + KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.latest.major); + KUNIT_ASSERT_EQ(test, minor, xe->sriov.pf.service.version.latest.minor); +} + +static void pf_negotiate_latest_older(struct kunit *test) +{ + struct xe_device *xe = test->priv; + u32 major, minor; + + if (!xe->sriov.pf.service.version.latest.minor) + kunit_skip(test, "no older minor\n"); + + KUNIT_ASSERT_EQ(test, 0, + pf_negotiate_version(xe, + xe->sriov.pf.service.version.latest.major, + xe->sriov.pf.service.version.latest.minor - 1, + &major, &minor)); + KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.latest.major); + KUNIT_ASSERT_EQ(test, minor, xe->sriov.pf.service.version.latest.minor - 1); +} + +static void pf_negotiate_latest_prev(struct kunit *test) +{ + struct xe_device *xe = test->priv; + u32 major, minor; + + if (xe->sriov.pf.service.version.base.major == xe->sriov.pf.service.version.latest.major) + kunit_skip(test, "no prev major"); + + KUNIT_ASSERT_EQ(test, 0, + pf_negotiate_version(xe, + xe->sriov.pf.service.version.latest.major - 1, + xe->sriov.pf.service.version.base.minor + 1, + &major, &minor)); + KUNIT_ASSERT_EQ(test, major, xe->sriov.pf.service.version.latest.major - 1); + KUNIT_ASSERT_GE(test, major, xe->sriov.pf.service.version.base.major); +} + +static struct kunit_case pf_service_test_cases[] = { + KUNIT_CASE(pf_negotiate_any), + KUNIT_CASE(pf_negotiate_base_match), + KUNIT_CASE(pf_negotiate_base_newer), + KUNIT_CASE(pf_negotiate_base_next), + KUNIT_CASE(pf_negotiate_base_older), + KUNIT_CASE(pf_negotiate_base_prev), + KUNIT_CASE(pf_negotiate_latest_match), + KUNIT_CASE(pf_negotiate_latest_newer), + KUNIT_CASE(pf_negotiate_latest_next), + KUNIT_CASE(pf_negotiate_latest_older), + KUNIT_CASE(pf_negotiate_latest_prev), + {} +}; + +static struct kunit_suite pf_service_suite = { + .name = "pf_service", + .test_cases = pf_service_test_cases, + .init = pf_service_test_init, +}; + +kunit_test_suite(pf_service_suite); diff --git a/drivers/gpu/drm/xe/xe_bb.c b/drivers/gpu/drm/xe/xe_bb.c index 9570672fce33..5ce0e26822f2 100644 --- a/drivers/gpu/drm/xe/xe_bb.c +++ b/drivers/gpu/drm/xe/xe_bb.c @@ -19,7 +19,7 @@ static int bb_prefetch(struct xe_gt *gt) { struct xe_device *xe = gt_to_xe(gt); - if (GRAPHICS_VERx100(xe) >= 1250 && !xe_gt_is_media_type(gt)) + if (GRAPHICS_VERx100(xe) >= 1250 && xe_gt_is_main_type(gt)) /* * RCS and CCS require 1K, although other engines would be * okay with 512. diff --git a/drivers/gpu/drm/xe/xe_bb.h b/drivers/gpu/drm/xe/xe_bb.h index fafacd73dcc3..b5cc65506696 100644 --- a/drivers/gpu/drm/xe/xe_bb.h +++ b/drivers/gpu/drm/xe/xe_bb.h @@ -14,7 +14,7 @@ struct xe_gt; struct xe_exec_queue; struct xe_sched_job; -struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 size, bool usm); +struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 dwords, bool usm); struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q, struct xe_bb *bb); struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q, diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index 4e39188a021a..1be2415966df 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -19,6 +19,8 @@ #include <kunit/static_stub.h> +#include <trace/events/gpu_mem.h> + #include "xe_device.h" #include "xe_dma_buf.h" #include "xe_drm_client.h" @@ -418,6 +420,19 @@ static void xe_ttm_tt_account_subtract(struct xe_device *xe, struct ttm_tt *tt) xe_shrinker_mod_pages(xe->mem.shrinker, -(long)tt->num_pages, 0); } +static void update_global_total_pages(struct ttm_device *ttm_dev, + long num_pages) +{ +#if IS_ENABLED(CONFIG_TRACE_GPU_MEM) + struct xe_device *xe = ttm_to_xe_device(ttm_dev); + u64 global_total_pages = + atomic64_add_return(num_pages, &xe->global_total_pages); + + trace_gpu_mem_total(xe->drm.primary->index, 0, + global_total_pages << PAGE_SHIFT); +#endif +} + static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo, u32 page_flags) { @@ -437,7 +452,7 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo, extra_pages = 0; if (xe_bo_needs_ccs_pages(bo)) - extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, bo->size), + extra_pages = DIV_ROUND_UP(xe_device_ccs_bytes(xe, xe_bo_size(bo)), PAGE_SIZE); /* @@ -525,6 +540,7 @@ static int xe_ttm_tt_populate(struct ttm_device *ttm_dev, struct ttm_tt *tt, xe_tt->purgeable = false; xe_ttm_tt_account_add(ttm_to_xe_device(ttm_dev), tt); + update_global_total_pages(ttm_dev, tt->num_pages); return 0; } @@ -541,6 +557,7 @@ static void xe_ttm_tt_unpopulate(struct ttm_device *ttm_dev, struct ttm_tt *tt) ttm_pool_free(&ttm_dev->pool, tt); xe_ttm_tt_account_subtract(xe, tt); + update_global_total_pages(ttm_dev, -(long)tt->num_pages); } static void xe_ttm_tt_destroy(struct ttm_device *ttm_dev, struct ttm_tt *tt) @@ -795,7 +812,8 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, } if (ttm_bo->type == ttm_bo_type_sg) { - ret = xe_bo_move_notify(bo, ctx); + if (new_mem->mem_type == XE_PL_SYSTEM) + ret = xe_bo_move_notify(bo, ctx); if (!ret) ret = xe_bo_move_dmabuf(ttm_bo, new_mem); return ret; @@ -1122,7 +1140,7 @@ int xe_bo_notifier_prepare_pinned(struct xe_bo *bo) if (bo->flags & XE_BO_FLAG_PINNED_NORESTORE) goto out_unlock_bo; - backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, bo->size, + backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, xe_bo_size(bo), DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel, XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS | XE_BO_FLAG_PINNED); @@ -1200,7 +1218,8 @@ int xe_bo_evict_pinned(struct xe_bo *bo) goto out_unlock_bo; if (!backup) { - backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, NULL, bo->size, + backup = ___xe_bo_create_locked(xe, NULL, NULL, bo->ttm.base.resv, + NULL, xe_bo_size(bo), DRM_XE_GEM_CPU_CACHING_WB, ttm_bo_type_kernel, XE_BO_FLAG_SYSTEM | XE_BO_FLAG_NEEDS_CPU_ACCESS | XE_BO_FLAG_PINNED); @@ -1254,7 +1273,7 @@ int xe_bo_evict_pinned(struct xe_bo *bo) } xe_map_memcpy_from(xe, backup->vmap.vaddr, &bo->vmap, 0, - bo->size); + xe_bo_size(bo)); } if (!bo->backup_obj) @@ -1347,7 +1366,7 @@ int xe_bo_restore_pinned(struct xe_bo *bo) } xe_map_memcpy_to(xe, &bo->vmap, 0, backup->vmap.vaddr, - bo->size); + xe_bo_size(bo)); } bo->backup_obj = NULL; @@ -1558,7 +1577,7 @@ static int xe_ttm_access_memory(struct ttm_buffer_object *ttm_bo, vram = res_to_mem_region(ttm_bo->resource); xe_res_first(ttm_bo->resource, offset & PAGE_MASK, - bo->size - (offset & PAGE_MASK), &cursor); + xe_bo_size(bo) - (offset & PAGE_MASK), &cursor); do { unsigned long page_offset = (offset & ~PAGE_MASK); @@ -1858,7 +1877,6 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo, bo->ccs_cleared = false; bo->tile = tile; - bo->size = size; bo->flags = flags; bo->cpu_caching = cpu_caching; bo->ttm.base.funcs = &xe_gem_object_funcs; @@ -2036,7 +2054,7 @@ __xe_bo_create_locked(struct xe_device *xe, if (flags & XE_BO_FLAG_FIXED_PLACEMENT) { err = xe_ggtt_insert_bo_at(t->mem.ggtt, bo, - start + bo->size, U64_MAX); + start + xe_bo_size(bo), U64_MAX); } else { err = xe_ggtt_insert_bo(t->mem.ggtt, bo); } @@ -2157,21 +2175,6 @@ struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile, return xe_bo_create_pin_map_at(xe, tile, vm, size, ~0ull, type, flags); } -struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile, - const void *data, size_t size, - enum ttm_bo_type type, u32 flags) -{ - struct xe_bo *bo = xe_bo_create_pin_map(xe, tile, NULL, - ALIGN(size, PAGE_SIZE), - type, flags); - if (IS_ERR(bo)) - return bo; - - xe_map_memcpy_to(xe, &bo->vmap, 0, data, size); - - return bo; -} - static void __xe_bo_unpin_map_no_vm(void *arg) { xe_bo_unpin_map_no_vm(arg); @@ -2234,7 +2237,7 @@ int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, str xe_assert(xe, !(*src)->vmap.is_iomem); bo = xe_managed_bo_create_from_data(xe, tile, (*src)->vmap.vaddr, - (*src)->size, dst_flags); + xe_bo_size(*src), dst_flags); if (IS_ERR(bo)) return PTR_ERR(bo); @@ -2436,7 +2439,6 @@ int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict) .no_wait_gpu = false, .gfp_retry_mayfail = true, }; - struct pin_cookie cookie; int ret; if (vm) { @@ -2447,10 +2449,10 @@ int xe_bo_validate(struct xe_bo *bo, struct xe_vm *vm, bool allow_res_evict) ctx.resv = xe_vm_resv(vm); } - cookie = xe_vm_set_validating(vm, allow_res_evict); + xe_vm_set_validating(vm, allow_res_evict); trace_xe_bo_validate(bo); ret = ttm_bo_validate(&bo->ttm, &bo->placement, &ctx); - xe_vm_clear_validating(vm, allow_res_evict, cookie); + xe_vm_clear_validating(vm, allow_res_evict); return ret; } @@ -2524,7 +2526,7 @@ int xe_bo_vmap(struct xe_bo *bo) * TODO: Fix up ttm_bo_vmap to do that, or fix up ttm_bo_kmap * to use struct iosys_map. */ - ret = ttm_bo_kmap(&bo->ttm, 0, bo->size >> PAGE_SHIFT, &bo->kmap); + ret = ttm_bo_kmap(&bo->ttm, 0, xe_bo_size(bo) >> PAGE_SHIFT, &bo->kmap); if (ret) return ret; diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h index 02ada1fb8a23..02e8cde4c6b2 100644 --- a/drivers/gpu/drm/xe/xe_bo.h +++ b/drivers/gpu/drm/xe/xe_bo.h @@ -118,9 +118,6 @@ struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe, size_t size, u64 offset, enum ttm_bo_type type, u32 flags, u64 alignment); -struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile, - const void *data, size_t size, - enum ttm_bo_type type, u32 flags); struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile, size_t size, u32 flags); struct xe_bo *xe_managed_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile, @@ -238,6 +235,19 @@ xe_bo_main_addr(struct xe_bo *bo, size_t page_size) return xe_bo_addr(bo, 0, page_size); } +/** + * xe_bo_size() - Xe BO size + * @bo: The bo object. + * + * Simple helper to return Xe BO's size. + * + * Return: Xe BO's size + */ +static inline size_t xe_bo_size(struct xe_bo *bo) +{ + return bo->ttm.base.size; +} + static inline u32 __xe_bo_ggtt_addr(struct xe_bo *bo, u8 tile_id) { @@ -246,7 +256,7 @@ __xe_bo_ggtt_addr(struct xe_bo *bo, u8 tile_id) if (XE_WARN_ON(!ggtt_node)) return 0; - XE_WARN_ON(ggtt_node->base.size > bo->size); + XE_WARN_ON(ggtt_node->base.size > xe_bo_size(bo)); XE_WARN_ON(ggtt_node->base.start + ggtt_node->base.size > (1ull << 32)); return ggtt_node->base.start; } @@ -300,7 +310,7 @@ bool xe_bo_needs_ccs_pages(struct xe_bo *bo); static inline size_t xe_bo_ccs_pages_start(struct xe_bo *bo) { - return PAGE_ALIGN(bo->ttm.base.size); + return PAGE_ALIGN(xe_bo_size(bo)); } static inline bool xe_bo_has_pages(struct xe_bo *bo) diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h index eb5e83c5f233..ff560d82496f 100644 --- a/drivers/gpu/drm/xe/xe_bo_types.h +++ b/drivers/gpu/drm/xe/xe_bo_types.h @@ -32,8 +32,6 @@ struct xe_bo { struct xe_bo *backup_obj; /** @parent_obj: Ref to parent bo if this a backup_obj */ struct xe_bo *parent_obj; - /** @size: Size of this buffer object */ - size_t size; /** @flags: flags for this buffer object */ u32 flags; /** @vm: VM this BO is attached to, for extobj this will be NULL */ @@ -86,7 +84,7 @@ struct xe_bo { u16 cpu_caching; /** @devmem_allocation: SVM device memory allocation */ - struct drm_gpusvm_devmem devmem_allocation; + struct drm_pagemap_devmem devmem_allocation; /** @vram_userfault_link: Link into @mem_access.vram_userfault.list */ struct list_head vram_userfault_link; diff --git a/drivers/gpu/drm/xe/xe_configfs.c b/drivers/gpu/drm/xe/xe_configfs.c index 8ec1ff1e4e80..e9b46a2d0019 100644 --- a/drivers/gpu/drm/xe/xe_configfs.c +++ b/drivers/gpu/drm/xe/xe_configfs.c @@ -267,7 +267,8 @@ static struct config_group *xe_config_make_device_group(struct config_group *gro pdev = pci_get_domain_bus_and_slot(domain, bus, PCI_DEVFN(slot, function)); if (!pdev) - return ERR_PTR(-EINVAL); + return ERR_PTR(-ENODEV); + pci_dev_put(pdev); dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) diff --git a/drivers/gpu/drm/xe/xe_debugfs.c b/drivers/gpu/drm/xe/xe_debugfs.c index d83cd6ed3fa8..26e9d146ccbf 100644 --- a/drivers/gpu/drm/xe/xe_debugfs.c +++ b/drivers/gpu/drm/xe/xe_debugfs.c @@ -20,7 +20,9 @@ #include "xe_pm.h" #include "xe_pxp_debugfs.h" #include "xe_sriov.h" +#include "xe_sriov_pf.h" #include "xe_step.h" +#include "xe_wa.h" #ifdef CONFIG_DRM_XE_DEBUG #include "xe_bo_evict.h" @@ -82,9 +84,28 @@ static int sriov_info(struct seq_file *m, void *data) return 0; } +static int workarounds(struct xe_device *xe, struct drm_printer *p) +{ + xe_pm_runtime_get(xe); + xe_wa_device_dump(xe, p); + xe_pm_runtime_put(xe); + + return 0; +} + +static int workaround_info(struct seq_file *m, void *data) +{ + struct xe_device *xe = node_to_xe(m->private); + struct drm_printer p = drm_seq_file_printer(m); + + workarounds(xe, &p); + return 0; +} + static const struct drm_info_list debugfs_list[] = { {"info", info, 0}, { .name = "sriov_info", .show = sriov_info, }, + { .name = "workarounds", .show = workaround_info, }, }; static int forcewake_open(struct inode *inode, struct file *file) @@ -273,4 +294,7 @@ void xe_debugfs_register(struct xe_device *xe) xe_pxp_debugfs_register(xe->pxp); fault_create_debugfs_attr("fail_gt_reset", root, >_reset_failure); + + if (IS_SRIOV_PF(xe)) + xe_sriov_pf_debugfs_register(xe, root); } diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c index 7a8af2311318..203e3038cc81 100644 --- a/drivers/gpu/drm/xe/xe_devcoredump.c +++ b/drivers/gpu/drm/xe/xe_devcoredump.c @@ -171,14 +171,32 @@ static void xe_devcoredump_snapshot_free(struct xe_devcoredump_snapshot *ss) #define XE_DEVCOREDUMP_CHUNK_MAX (SZ_512M + SZ_1G) +/** + * xe_devcoredump_read() - Read data from the Xe device coredump snapshot + * @buffer: Destination buffer to copy the coredump data into + * @offset: Offset in the coredump data to start reading from + * @count: Number of bytes to read + * @data: Pointer to the xe_devcoredump structure + * @datalen: Length of the data (unused) + * + * Reads a chunk of the coredump snapshot data into the provided buffer. + * If the devcoredump is smaller than 1.5 GB (XE_DEVCOREDUMP_CHUNK_MAX), + * it is read directly from a pre-written buffer. For larger devcoredumps, + * the pre-written buffer must be periodically repopulated from the snapshot + * state due to kmalloc size limitations. + * + * Return: Number of bytes copied on success, or a negative error code on failure. + */ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset, size_t count, void *data, size_t datalen) { struct xe_devcoredump *coredump = data; struct xe_devcoredump_snapshot *ss; - ssize_t byte_copied; + ssize_t byte_copied = 0; u32 chunk_offset; ssize_t new_chunk_position; + bool pm_needed = false; + int ret = 0; if (!coredump) return -ENODEV; @@ -188,20 +206,19 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset, /* Ensure delayed work is captured before continuing */ flush_work(&ss->work); - if (ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX) + pm_needed = ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX; + if (pm_needed) xe_pm_runtime_get(gt_to_xe(ss->gt)); mutex_lock(&coredump->lock); if (!ss->read.buffer) { - mutex_unlock(&coredump->lock); - return -ENODEV; + ret = -ENODEV; + goto unlock; } - if (offset >= ss->read.size) { - mutex_unlock(&coredump->lock); - return 0; - } + if (offset >= ss->read.size) + goto unlock; new_chunk_position = div_u64_rem(offset, XE_DEVCOREDUMP_CHUNK_MAX, @@ -221,12 +238,13 @@ static ssize_t xe_devcoredump_read(char *buffer, loff_t offset, ss->read.size - offset; memcpy(buffer, ss->read.buffer + chunk_offset, byte_copied); +unlock: mutex_unlock(&coredump->lock); - if (ss->read.size > XE_DEVCOREDUMP_CHUNK_MAX) + if (pm_needed) xe_pm_runtime_put(gt_to_xe(ss->gt)); - return byte_copied; + return byte_copied ? byte_copied : ret; } static void xe_devcoredump_free(void *data) @@ -313,13 +331,9 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump, { struct xe_devcoredump_snapshot *ss = &coredump->snapshot; struct xe_guc *guc = exec_queue_to_guc(q); - u32 adj_logical_mask = q->logical_mask; - u32 width_mask = (0x1 << q->width) - 1; const char *process_name = "no process"; - unsigned int fw_ref; bool cookie; - int i; ss->snapshot_time = ktime_get_real(); ss->boot_time = ktime_get_boottime(); @@ -335,14 +349,6 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump, INIT_WORK(&ss->work, xe_devcoredump_deferred_snap_work); cookie = dma_fence_begin_signalling(); - for (i = 0; q->width > 1 && i < XE_HW_ENGINE_MAX_INSTANCE;) { - if (adj_logical_mask & BIT(i)) { - adj_logical_mask |= width_mask << i; - i += q->width; - } else { - ++i; - } - } /* keep going if fw fails as we still want to save the memory and SW data */ fw_ref = xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL); diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 8cfcfff250ca..6ece4defa9df 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -40,12 +40,14 @@ #include "xe_gt_printk.h" #include "xe_gt_sriov_vf.h" #include "xe_guc.h" +#include "xe_guc_pc.h" #include "xe_hw_engine_group.h" #include "xe_hwmon.h" +#include "xe_i2c.h" #include "xe_irq.h" -#include "xe_memirq.h" #include "xe_mmio.h" #include "xe_module.h" +#include "xe_nvm.h" #include "xe_oa.h" #include "xe_observation.h" #include "xe_pat.h" @@ -66,6 +68,7 @@ #include "xe_wait_user_fence.h" #include "xe_wa.h" +#include <generated/xe_device_wa_oob.h> #include <generated/xe_wa_oob.h> static int xe_file_open(struct drm_device *dev, struct drm_file *file) @@ -678,6 +681,7 @@ static void sriov_update_device_info(struct xe_device *xe) /* disable features that are not available/applicable to VFs */ if (IS_SRIOV_VF(xe)) { xe->info.probe_display = 0; + xe->info.has_heci_cscfi = 0; xe->info.has_heci_gscfi = 0; xe->info.skip_guc_pc = 1; xe->info.skip_pcode = 1; @@ -698,6 +702,9 @@ int xe_device_probe_early(struct xe_device *xe) { int err; + xe_wa_device_init(xe); + xe_wa_process_device_oob(xe); + err = xe_mmio_probe_early(xe); if (err) return err; @@ -783,52 +790,18 @@ int xe_device_probe(struct xe_device *xe) if (err) return err; - err = xe_ttm_sys_mgr_init(xe); - if (err) - return err; - for_each_gt(gt, xe, id) { err = xe_gt_init_early(gt); if (err) return err; - - /* - * Only after this point can GT-specific MMIO operations - * (including things like communication with the GuC) - * be performed. - */ - xe_gt_mmio_init(gt); - - if (IS_SRIOV_VF(xe)) { - xe_guc_comm_init_early(>->uc.guc); - err = xe_gt_sriov_vf_bootstrap(gt); - if (err) - return err; - err = xe_gt_sriov_vf_query_config(gt); - if (err) - return err; - } } for_each_tile(tile, xe, id) { err = xe_ggtt_init_early(tile->mem.ggtt); if (err) return err; - err = xe_memirq_init(&tile->memirq); - if (err) - return err; } - for_each_gt(gt, xe, id) { - err = xe_gt_init_hwconfig(gt); - if (err) - return err; - } - - err = xe_devcoredump_init(xe); - if (err) - return err; - /* * From here on, if a step fails, make sure a Driver-FLR is triggereed */ @@ -850,6 +823,14 @@ int xe_device_probe(struct xe_device *xe) return err; } + /* + * Allow allocations only now to ensure xe_display_init_early() + * is the first to allocate, always. + */ + err = xe_ttm_sys_mgr_init(xe); + if (err) + return err; + /* Allocate and map stolen after potential VRAM resize */ err = xe_ttm_stolen_mgr_init(xe); if (err) @@ -881,6 +862,16 @@ int xe_device_probe(struct xe_device *xe) return err; } + if (xe->tiles->media_gt && + XE_WA(xe->tiles->media_gt, 15015404425_disable)) + XE_DEVICE_WA_DISABLE(xe, 15015404425); + + err = xe_devcoredump_init(xe); + if (err) + return err; + + xe_nvm_init(xe); + err = xe_heci_gsc_init(xe); if (err) return err; @@ -921,6 +912,10 @@ int xe_device_probe(struct xe_device *xe) if (err) goto err_unregister_display; + err = xe_i2c_probe(xe); + if (err) + goto err_unregister_display; + for_each_gt(gt, xe, id) xe_gt_sanitize_freq(gt); @@ -938,6 +933,8 @@ void xe_device_remove(struct xe_device *xe) { xe_display_unregister(xe); + xe_nvm_fini(xe); + drm_dev_unplug(&xe->drm); xe_bo_pci_dev_remove_all(xe); @@ -981,38 +978,15 @@ void xe_device_wmb(struct xe_device *xe) xe_mmio_write32(xe_root_tile_mmio(xe), VF_CAP_REG, 0); } -/** - * xe_device_td_flush() - Flush transient L3 cache entries - * @xe: The device - * - * Display engine has direct access to memory and is never coherent with L3/L4 - * caches (or CPU caches), however KMD is responsible for specifically flushing - * transient L3 GPU cache entries prior to the flip sequence to ensure scanout - * can happen from such a surface without seeing corruption. - * - * Display surfaces can be tagged as transient by mapping it using one of the - * various L3:XD PAT index modes on Xe2. - * - * Note: On non-discrete xe2 platforms, like LNL, the entire L3 cache is flushed - * at the end of each submission via PIPE_CONTROL for compute/render, since SA - * Media is not coherent with L3 and we want to support render-vs-media - * usescases. For other engines like copy/blt the HW internally forces uncached - * behaviour, hence why we can skip the TDF on such platforms. +/* + * Issue a TRANSIENT_FLUSH_REQUEST and wait for completion on each gt. */ -void xe_device_td_flush(struct xe_device *xe) +static void tdf_request_sync(struct xe_device *xe) { - struct xe_gt *gt; unsigned int fw_ref; + struct xe_gt *gt; u8 id; - if (!IS_DGFX(xe) || GRAPHICS_VER(xe) < 20) - return; - - if (XE_WA(xe_root_mmio_gt(xe), 16023588340)) { - xe_device_l2_flush(xe); - return; - } - for_each_gt(gt, xe, id) { if (xe_gt_is_media_type(gt)) continue; @@ -1022,6 +996,7 @@ void xe_device_td_flush(struct xe_device *xe) return; xe_mmio_write32(>->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST); + /* * FIXME: We can likely do better here with our choice of * timeout. Currently we just assume the worst case, i.e. 150us, @@ -1052,15 +1027,52 @@ void xe_device_l2_flush(struct xe_device *xe) return; spin_lock(>->global_invl_lock); - xe_mmio_write32(>->mmio, XE2_GLOBAL_INVAL, 0x1); + xe_mmio_write32(>->mmio, XE2_GLOBAL_INVAL, 0x1); if (xe_mmio_wait32(>->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 500, NULL, true)) xe_gt_err_once(gt, "Global invalidation timeout\n"); + spin_unlock(>->global_invl_lock); xe_force_wake_put(gt_to_fw(gt), fw_ref); } +/** + * xe_device_td_flush() - Flush transient L3 cache entries + * @xe: The device + * + * Display engine has direct access to memory and is never coherent with L3/L4 + * caches (or CPU caches), however KMD is responsible for specifically flushing + * transient L3 GPU cache entries prior to the flip sequence to ensure scanout + * can happen from such a surface without seeing corruption. + * + * Display surfaces can be tagged as transient by mapping it using one of the + * various L3:XD PAT index modes on Xe2. + * + * Note: On non-discrete xe2 platforms, like LNL, the entire L3 cache is flushed + * at the end of each submission via PIPE_CONTROL for compute/render, since SA + * Media is not coherent with L3 and we want to support render-vs-media + * usescases. For other engines like copy/blt the HW internally forces uncached + * behaviour, hence why we can skip the TDF on such platforms. + */ +void xe_device_td_flush(struct xe_device *xe) +{ + struct xe_gt *root_gt; + + if (!IS_DGFX(xe) || GRAPHICS_VER(xe) < 20) + return; + + root_gt = xe_root_mmio_gt(xe); + if (XE_WA(root_gt, 16023588340)) { + /* A transient flush is not sufficient: flush the L2 */ + xe_device_l2_flush(xe); + } else { + xe_guc_pc_apply_flush_freq_limit(&root_gt->uc.guc.pc); + tdf_request_sync(xe); + xe_guc_pc_remove_flush_freq_limit(&root_gt->uc.guc.pc); + } +} + u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size) { return xe_device_has_flat_ccs(xe) ? diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h index e4da797a984b..bc802e066a7d 100644 --- a/drivers/gpu/drm/xe/xe_device.h +++ b/drivers/gpu/drm/xe/xe_device.h @@ -60,35 +60,32 @@ static inline struct xe_tile *xe_device_get_root_tile(struct xe_device *xe) return &xe->tiles[0]; } +/* + * Highest GT/tile count for any platform. Used only for memory allocation + * sizing. Any logic looping over GTs or mapping userspace GT IDs into GT + * structures should use the per-platform xe->info.max_gt_per_tile instead. + */ #define XE_MAX_GT_PER_TILE 2 -static inline struct xe_gt *xe_tile_get_gt(struct xe_tile *tile, u8 gt_id) -{ - if (drm_WARN_ON(&tile_to_xe(tile)->drm, gt_id >= XE_MAX_GT_PER_TILE)) - gt_id = 0; - - return gt_id ? tile->media_gt : tile->primary_gt; -} - static inline struct xe_gt *xe_device_get_gt(struct xe_device *xe, u8 gt_id) { - struct xe_tile *root_tile = xe_device_get_root_tile(xe); + struct xe_tile *tile; struct xe_gt *gt; - /* - * FIXME: This only works for now because multi-tile and standalone - * media are mutually exclusive on the platforms we have today. - * - * id => GT mapping may change once we settle on how we want to handle - * our UAPI. - */ - if (MEDIA_VER(xe) >= 13) { - gt = xe_tile_get_gt(root_tile, gt_id); - } else { - if (drm_WARN_ON(&xe->drm, gt_id >= XE_MAX_TILES_PER_DEVICE)) - gt_id = 0; + if (gt_id >= xe->info.tile_count * xe->info.max_gt_per_tile) + return NULL; - gt = xe->tiles[gt_id].primary_gt; + tile = &xe->tiles[gt_id / xe->info.max_gt_per_tile]; + switch (gt_id % xe->info.max_gt_per_tile) { + default: + xe_assert(xe, false); + fallthrough; + case 0: + gt = tile->primary_gt; + break; + case 1: + gt = tile->media_gt; + break; } if (!gt) @@ -130,14 +127,14 @@ static inline bool xe_device_uc_enabled(struct xe_device *xe) for ((id__) = 1; (id__) < (xe__)->info.tile_count; (id__)++) \ for_each_if((tile__) = &(xe__)->tiles[(id__)]) -/* - * FIXME: This only works for now since multi-tile and standalone media - * happen to be mutually exclusive. Future platforms may change this... - */ #define for_each_gt(gt__, xe__, id__) \ - for ((id__) = 0; (id__) < (xe__)->info.gt_count; (id__)++) \ + for ((id__) = 0; (id__) < (xe__)->info.tile_count * (xe__)->info.max_gt_per_tile; (id__)++) \ for_each_if((gt__) = xe_device_get_gt((xe__), (id__))) +#define for_each_gt_on_tile(gt__, tile__, id__) \ + for_each_gt((gt__), (tile__)->xe, (id__)) \ + for_each_if((gt__)->tile == (tile__)) + static inline struct xe_force_wake *gt_to_fw(struct xe_gt *gt) { return >->pm.fw; diff --git a/drivers/gpu/drm/xe/xe_device_sysfs.c b/drivers/gpu/drm/xe/xe_device_sysfs.c index b9440f8c781e..bd9015761aa0 100644 --- a/drivers/gpu/drm/xe/xe_device_sysfs.c +++ b/drivers/gpu/drm/xe/xe_device_sysfs.c @@ -24,6 +24,12 @@ * * vram_d3cold_threshold - Report/change vram used threshold(in MB) below * which vram save/restore is permissible during runtime D3cold entry/exit. + * + * lb_fan_control_version - Fan control version provisioned by late binding. + * Exposed only if supported by the device. + * + * lb_voltage_regulator_version - Voltage regulator version provisioned by late + * binding. Exposed only if supported by the device. */ static ssize_t @@ -65,6 +71,140 @@ vram_d3cold_threshold_store(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR_RW(vram_d3cold_threshold); +static ssize_t +lb_fan_control_version_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev)); + struct xe_tile *root = xe_device_get_root_tile(xe); + u32 cap, ver_low = FAN_TABLE, ver_high = FAN_TABLE; + u16 major = 0, minor = 0, hotfix = 0, build = 0; + int ret; + + xe_pm_runtime_get(xe); + + ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0), + &cap, NULL); + if (ret) + goto out; + + if (REG_FIELD_GET(V1_FAN_PROVISIONED, cap)) { + ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_LOW, 0), + &ver_low, NULL); + if (ret) + goto out; + + ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_HIGH, 0), + &ver_high, NULL); + if (ret) + goto out; + + major = REG_FIELD_GET(MAJOR_VERSION_MASK, ver_low); + minor = REG_FIELD_GET(MINOR_VERSION_MASK, ver_low); + hotfix = REG_FIELD_GET(HOTFIX_VERSION_MASK, ver_high); + build = REG_FIELD_GET(BUILD_VERSION_MASK, ver_high); + } +out: + xe_pm_runtime_put(xe); + + return ret ?: sysfs_emit(buf, "%u.%u.%u.%u\n", major, minor, hotfix, build); +} +static DEVICE_ATTR_ADMIN_RO(lb_fan_control_version); + +static ssize_t +lb_voltage_regulator_version_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev)); + struct xe_tile *root = xe_device_get_root_tile(xe); + u32 cap, ver_low = VR_CONFIG, ver_high = VR_CONFIG; + u16 major = 0, minor = 0, hotfix = 0, build = 0; + int ret; + + xe_pm_runtime_get(xe); + + ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0), + &cap, NULL); + if (ret) + goto out; + + if (REG_FIELD_GET(VR_PARAMS_PROVISIONED, cap)) { + ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_LOW, 0), + &ver_low, NULL); + if (ret) + goto out; + + ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_VERSION_HIGH, 0), + &ver_high, NULL); + if (ret) + goto out; + + major = REG_FIELD_GET(MAJOR_VERSION_MASK, ver_low); + minor = REG_FIELD_GET(MINOR_VERSION_MASK, ver_low); + hotfix = REG_FIELD_GET(HOTFIX_VERSION_MASK, ver_high); + build = REG_FIELD_GET(BUILD_VERSION_MASK, ver_high); + } +out: + xe_pm_runtime_put(xe); + + return ret ?: sysfs_emit(buf, "%u.%u.%u.%u\n", major, minor, hotfix, build); +} +static DEVICE_ATTR_ADMIN_RO(lb_voltage_regulator_version); + +static int late_bind_create_files(struct device *dev) +{ + struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev)); + struct xe_tile *root = xe_device_get_root_tile(xe); + u32 cap; + int ret; + + xe_pm_runtime_get(xe); + + ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0), + &cap, NULL); + if (ret) { + if (ret == -ENXIO) { + drm_dbg(&xe->drm, "Late binding not supported by firmware\n"); + ret = 0; + } + goto out; + } + + if (REG_FIELD_GET(V1_FAN_SUPPORTED, cap)) { + ret = sysfs_create_file(&dev->kobj, &dev_attr_lb_fan_control_version.attr); + if (ret) + goto out; + } + + if (REG_FIELD_GET(VR_PARAMS_SUPPORTED, cap)) + ret = sysfs_create_file(&dev->kobj, &dev_attr_lb_voltage_regulator_version.attr); +out: + xe_pm_runtime_put(xe); + + return ret; +} + +static void late_bind_remove_files(struct device *dev) +{ + struct xe_device *xe = pdev_to_xe_device(to_pci_dev(dev)); + struct xe_tile *root = xe_device_get_root_tile(xe); + u32 cap; + int ret; + + xe_pm_runtime_get(xe); + + ret = xe_pcode_read(root, PCODE_MBOX(PCODE_LATE_BINDING, GET_CAPABILITY_STATUS, 0), + &cap, NULL); + if (ret) + goto out; + + if (REG_FIELD_GET(V1_FAN_SUPPORTED, cap)) + sysfs_remove_file(&dev->kobj, &dev_attr_lb_fan_control_version.attr); + + if (REG_FIELD_GET(VR_PARAMS_SUPPORTED, cap)) + sysfs_remove_file(&dev->kobj, &dev_attr_lb_voltage_regulator_version.attr); +out: + xe_pm_runtime_put(xe); +} + /** * DOC: PCIe Gen5 Limitations * @@ -151,8 +291,10 @@ static void xe_device_sysfs_fini(void *arg) if (xe->d3cold.capable) sysfs_remove_file(&xe->drm.dev->kobj, &dev_attr_vram_d3cold_threshold.attr); - if (xe->info.platform == XE_BATTLEMAGE) + if (xe->info.platform == XE_BATTLEMAGE) { sysfs_remove_files(&xe->drm.dev->kobj, auto_link_downgrade_attrs); + late_bind_remove_files(xe->drm.dev); + } } int xe_device_sysfs_init(struct xe_device *xe) @@ -170,6 +312,10 @@ int xe_device_sysfs_init(struct xe_device *xe) ret = sysfs_create_files(&dev->kobj, auto_link_downgrade_attrs); if (ret) return ret; + + ret = late_bind_create_files(dev); + if (ret) + return ret; } return devm_add_action_or_reset(dev, xe_device_sysfs_fini, xe); diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index 003afb279a5e..d4d2c6854790 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -21,7 +21,9 @@ #include "xe_platform_types.h" #include "xe_pmu_types.h" #include "xe_pt_types.h" +#include "xe_sriov_pf_types.h" #include "xe_sriov_types.h" +#include "xe_sriov_vf_types.h" #include "xe_step_types.h" #include "xe_survivability_mode_types.h" #include "xe_ttm_vram_mgr_types.h" @@ -32,7 +34,9 @@ struct dram_info; struct intel_display; +struct intel_dg_nvm_dev; struct xe_ggtt; +struct xe_i2c; struct xe_pat_ops; struct xe_pxp; @@ -105,7 +109,7 @@ struct xe_vram_region { void __iomem *mapping; /** @ttm: VRAM TTM manager */ struct xe_ttm_vram_mgr ttm; -#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) +#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) /** @pagemap: Used to remap device memory as ZONE_DEVICE */ struct dev_pagemap pagemap; /** @@ -293,6 +297,8 @@ struct xe_device { u8 vram_flags; /** @info.tile_count: Number of tiles */ u8 tile_count; + /** @info.max_gt_per_tile: Number of GT IDs allocated to each tile */ + u8 max_gt_per_tile; /** @info.gt_count: Total number of GTs for entire device */ u8 gt_count; /** @info.vm_max_level: Max VM level */ @@ -316,6 +322,8 @@ struct xe_device { u8 has_fan_control:1; /** @info.has_flat_ccs: Whether flat CCS metadata is used */ u8 has_flat_ccs:1; + /** @info.has_gsc_nvm: Device has gsc non-volatile memory */ + u8 has_gsc_nvm:1; /** @info.has_heci_cscfi: device has heci cscfi */ u8 has_heci_cscfi:1; /** @info.has_heci_gscfi: device has heci gscfi */ @@ -357,6 +365,19 @@ struct xe_device { u8 skip_pcode:1; } info; + /** @wa_active: keep track of active workarounds */ + struct { + /** @wa_active.oob: bitmap with active OOB workarounds */ + unsigned long *oob; + + /** + * @wa_active.oob_initialized: Mark oob as initialized to help detecting misuse + * of XE_DEVICE_WA() - it can only be called on initialization after + * Device OOB WAs have been processed. + */ + bool oob_initialized; + } wa_active; + /** @survivability: survivability information for device */ struct xe_survivability survivability; @@ -403,10 +424,12 @@ struct xe_device { /** @sriov.__mode: SR-IOV mode (Don't access directly!) */ enum xe_sriov_mode __mode; - /** @sriov.pf: PF specific data */ - struct xe_device_pf pf; - /** @sriov.vf: VF specific data */ - struct xe_device_vf vf; + union { + /** @sriov.pf: PF specific data */ + struct xe_device_pf pf; + /** @sriov.vf: VF specific data */ + struct xe_device_vf vf; + }; /** @sriov.wq: workqueue used by the virtualization workers */ struct workqueue_struct *wq; @@ -549,6 +572,9 @@ struct xe_device { /** @heci_gsc: graphics security controller */ struct xe_heci_gsc heci_gsc; + /** @nvm: discrete graphics non-volatile memory */ + struct intel_dg_nvm_dev *nvm; + /** @oa: oa observation subsystem */ struct xe_oa oa; @@ -577,6 +603,9 @@ struct xe_device { /** @pmu: performance monitoring unit */ struct xe_pmu pmu; + /** @i2c: I2C host controller */ + struct xe_i2c *i2c; + /** @atomic_svm_timeslice_ms: Atomic SVM fault timeslice MS */ u32 atomic_svm_timeslice_ms; @@ -588,6 +617,14 @@ struct xe_device { u8 vm_inject_error_position; #endif +#if IS_ENABLED(CONFIG_TRACE_GPU_MEM) + /** + * @global_total_pages: global GPU page usage tracked for gpu_mem + * tracepoints + */ + atomic64_t global_total_pages; +#endif + /* private: */ #if IS_ENABLED(CONFIG_DRM_XE_DISPLAY) diff --git a/drivers/gpu/drm/xe/xe_device_wa_oob.rules b/drivers/gpu/drm/xe/xe_device_wa_oob.rules new file mode 100644 index 000000000000..3a0c4ccc4224 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_device_wa_oob.rules @@ -0,0 +1,2 @@ +15015404425 PLATFORM(LUNARLAKE) + PLATFORM(PANTHERLAKE) diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c index 31f688e953d7..f931ff9b1ec0 100644 --- a/drivers/gpu/drm/xe/xe_drm_client.c +++ b/drivers/gpu/drm/xe/xe_drm_client.c @@ -167,7 +167,7 @@ void xe_drm_client_remove_bo(struct xe_bo *bo) static void bo_meminfo(struct xe_bo *bo, struct drm_memory_stats stats[TTM_NUM_MEM_TYPES]) { - u64 sz = bo->size; + u64 sz = xe_bo_size(bo); u32 mem_type = bo->ttm.resource->mem_type; xe_bo_assert_held(bo); diff --git a/drivers/gpu/drm/xe/xe_eu_stall.c b/drivers/gpu/drm/xe/xe_eu_stall.c index 96732613b4b7..af7916315ac6 100644 --- a/drivers/gpu/drm/xe/xe_eu_stall.c +++ b/drivers/gpu/drm/xe/xe_eu_stall.c @@ -258,11 +258,13 @@ static int set_prop_eu_stall_wait_num_reports(struct xe_device *xe, u64 value, static int set_prop_eu_stall_gt_id(struct xe_device *xe, u64 value, struct eu_stall_open_properties *props) { - if (value >= xe->info.gt_count) { + struct xe_gt *gt = xe_device_get_gt(xe, value); + + if (!gt) { drm_dbg(&xe->drm, "Invalid GT ID %llu for EU stall sampling\n", value); return -EINVAL; } - props->gt = xe_device_get_gt(xe, value); + props->gt = gt; return 0; } diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c index fee22358cc09..8991b4aed440 100644 --- a/drivers/gpu/drm/xe/xe_exec_queue.c +++ b/drivers/gpu/drm/xe/xe_exec_queue.c @@ -610,7 +610,7 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data, if (XE_IOCTL_DBG(xe, err)) return -EFAULT; - if (XE_IOCTL_DBG(xe, eci[0].gt_id >= xe->info.gt_count)) + if (XE_IOCTL_DBG(xe, !xe_device_get_gt(xe, eci[0].gt_id))) return -EINVAL; if (args->flags & DRM_XE_EXEC_QUEUE_LOW_LATENCY_HINT) diff --git a/drivers/gpu/drm/xe/xe_force_wake.c b/drivers/gpu/drm/xe/xe_force_wake.c index 8a5cba22b586..c59a9b330697 100644 --- a/drivers/gpu/drm/xe/xe_force_wake.c +++ b/drivers/gpu/drm/xe/xe_force_wake.c @@ -64,7 +64,7 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw) { int i, j; - if (!xe_gt_is_media_type(gt)) + if (xe_gt_is_main_type(gt)) init_domain(fw, XE_FW_DOMAIN_ID_RENDER, FORCEWAKE_RENDER, FORCEWAKE_ACK_RENDER); diff --git a/drivers/gpu/drm/xe/xe_gen_wa_oob.c b/drivers/gpu/drm/xe/xe_gen_wa_oob.c index ed9183599e31..247e41c1c48d 100644 --- a/drivers/gpu/drm/xe/xe_gen_wa_oob.c +++ b/drivers/gpu/drm/xe/xe_gen_wa_oob.c @@ -18,8 +18,8 @@ " *\n" \ " * This file was generated from rules: %s\n" \ " */\n" \ - "#ifndef _GENERATED_XE_WA_OOB_\n" \ - "#define _GENERATED_XE_WA_OOB_\n" \ + "#ifndef _GENERATED_%s_\n" \ + "#define _GENERATED_%s_\n" \ "\n" \ "enum {\n" @@ -52,7 +52,7 @@ static char *strip(char *line, size_t linelen) } #define MAX_LINE_LEN 4096 -static int parse(FILE *input, FILE *csource, FILE *cheader) +static int parse(FILE *input, FILE *csource, FILE *cheader, char *prefix) { char line[MAX_LINE_LEN + 1]; char *name, *prev_name = NULL, *rules; @@ -96,7 +96,7 @@ static int parse(FILE *input, FILE *csource, FILE *cheader) } if (name) { - fprintf(cheader, "\tXE_WA_OOB_%s = %u,\n", name, idx); + fprintf(cheader, "\t%s_%s = %u,\n", prefix, name, idx); /* Close previous entry before starting a new one */ if (idx) @@ -118,7 +118,41 @@ static int parse(FILE *input, FILE *csource, FILE *cheader) if (idx) fprintf(csource, ") },\n"); - fprintf(cheader, "\t_XE_WA_OOB_COUNT = %u\n", idx); + fprintf(cheader, "\t_%s_COUNT = %u\n", prefix, idx); + + return 0; +} + +/* Avoid GNU vs POSIX basename() discrepancy, just use our own */ +static const char *xbasename(const char *s) +{ + const char *p = strrchr(s, '/'); + + return p ? p + 1 : s; +} + +static int fn_to_prefix(const char *fn, char *prefix, size_t size) +{ + size_t len; + + fn = xbasename(fn); + len = strlen(fn); + + if (len > size - 1) + return -ENAMETOOLONG; + + memcpy(prefix, fn, len + 1); + + for (char *p = prefix; *p; p++) { + switch (*p) { + case '.': + *p = '\0'; + return 0; + default: + *p = toupper(*p); + break; + } + } return 0; } @@ -141,6 +175,7 @@ int main(int argc, const char *argv[]) [ARGS_CHEADER] = { .fn = argv[3], .mode = "w" }, }; int ret = 1; + char prefix[128]; if (argc < 3) { fprintf(stderr, "ERROR: wrong arguments\n"); @@ -148,6 +183,9 @@ int main(int argc, const char *argv[]) return 1; } + if (fn_to_prefix(args[ARGS_CHEADER].fn, prefix, sizeof(prefix)) < 0) + return 1; + for (int i = 0; i < _ARGS_COUNT; i++) { args[i].f = fopen(args[i].fn, args[i].mode); if (!args[i].f) { @@ -157,9 +195,10 @@ int main(int argc, const char *argv[]) } } - fprintf(args[ARGS_CHEADER].f, HEADER, args[ARGS_INPUT].fn); + fprintf(args[ARGS_CHEADER].f, HEADER, args[ARGS_INPUT].fn, prefix, prefix); + ret = parse(args[ARGS_INPUT].f, args[ARGS_CSOURCE].f, - args[ARGS_CHEADER].f); + args[ARGS_CHEADER].f, prefix); if (!ret) fprintf(args[ARGS_CHEADER].f, FOOTER); diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c index 7b11fa1356f0..29d4d3f51da1 100644 --- a/drivers/gpu/drm/xe/xe_ggtt.c +++ b/drivers/gpu/drm/xe/xe_ggtt.c @@ -238,6 +238,13 @@ int xe_ggtt_init_kunit(struct xe_ggtt *ggtt, u32 reserved, u32 size) } EXPORT_SYMBOL_IF_KUNIT(xe_ggtt_init_kunit); +static void dev_fini_ggtt(void *arg) +{ + struct xe_ggtt *ggtt = arg; + + drain_workqueue(ggtt->wq); +} + /** * xe_ggtt_init_early - Early GGTT initialization * @ggtt: the &xe_ggtt to be initialized @@ -290,6 +297,10 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt) if (err) return err; + err = devm_add_action_or_reset(xe->drm.dev, dev_fini_ggtt, ggtt); + if (err) + return err; + if (IS_SRIOV_VF(xe)) { err = xe_tile_sriov_vf_prepare_ggtt(ggtt->tile); if (err) @@ -410,7 +421,7 @@ int xe_ggtt_init(struct xe_ggtt *ggtt) goto err; } - xe_map_memset(xe, &ggtt->scratch->vmap, 0, 0, ggtt->scratch->size); + xe_map_memset(xe, &ggtt->scratch->vmap, 0, 0, xe_bo_size(ggtt->scratch)); xe_ggtt_initial_clear(ggtt); @@ -682,13 +693,13 @@ void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_ggtt_node *node, return; start = node->base.start; - end = start + bo->size; + end = start + xe_bo_size(bo); pte = ggtt->pt_ops->pte_encode_flags(bo, pat_index); if (!xe_bo_is_vram(bo) && !xe_bo_is_stolen(bo)) { xe_assert(xe_bo_device(bo), bo->ttm.ttm); - for (xe_res_first_sg(xe_bo_sg(bo), 0, bo->size, &cur); + for (xe_res_first_sg(xe_bo_sg(bo), 0, xe_bo_size(bo), &cur); cur.remaining; xe_res_next(&cur, XE_PAGE_SIZE)) ggtt->pt_ops->ggtt_set_pte(ggtt, end - cur.remaining, pte | xe_res_dma(&cur)); @@ -696,7 +707,7 @@ void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_ggtt_node *node, /* Prepend GPU offset */ pte |= vram_region_gpu_offset(bo->ttm.resource); - for (xe_res_first(bo->ttm.resource, 0, bo->size, &cur); + for (xe_res_first(bo->ttm.resource, 0, xe_bo_size(bo), &cur); cur.remaining; xe_res_next(&cur, XE_PAGE_SIZE)) ggtt->pt_ops->ggtt_set_pte(ggtt, end - cur.remaining, pte + cur.start); @@ -732,7 +743,7 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo, if (XE_WARN_ON(bo->ggtt_node[tile_id])) { /* Someone's already inserted this BO in the GGTT */ - xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == bo->size); + xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == xe_bo_size(bo)); return 0; } @@ -751,7 +762,7 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo, mutex_lock(&ggtt->lock); err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node[tile_id]->base, - bo->size, alignment, 0, start, end, 0); + xe_bo_size(bo), alignment, 0, start, end, 0); if (err) { xe_ggtt_node_fini(bo->ggtt_node[tile_id]); bo->ggtt_node[tile_id] = NULL; @@ -812,7 +823,7 @@ void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo) return; /* This BO is not currently in the GGTT */ - xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == bo->size); + xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == xe_bo_size(bo)); xe_ggtt_node_remove(bo->ggtt_node[tile_id], bo->flags & XE_BO_FLAG_GGTT_INVALIDATE); diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c index 0bcf97063ff6..1d84bf2f2cef 100644 --- a/drivers/gpu/drm/xe/xe_gsc.c +++ b/drivers/gpu/drm/xe/xe_gsc.c @@ -59,7 +59,8 @@ static int memcpy_fw(struct xe_gsc *gsc) xe_map_memcpy_from(xe, storage, &gsc->fw.bo->vmap, 0, fw_size); xe_map_memcpy_to(xe, &gsc->private->vmap, 0, storage, fw_size); - xe_map_memset(xe, &gsc->private->vmap, fw_size, 0, gsc->private->size - fw_size); + xe_map_memset(xe, &gsc->private->vmap, fw_size, 0, + xe_bo_size(gsc->private) - fw_size); kfree(storage); @@ -82,7 +83,8 @@ static int emit_gsc_upload(struct xe_gsc *gsc) bb->cs[bb->len++] = GSC_FW_LOAD; bb->cs[bb->len++] = lower_32_bits(offset); bb->cs[bb->len++] = upper_32_bits(offset); - bb->cs[bb->len++] = (gsc->private->size / SZ_4K) | GSC_FW_LOAD_LIMIT_VALID; + bb->cs[bb->len++] = (xe_bo_size(gsc->private) / SZ_4K) | + GSC_FW_LOAD_LIMIT_VALID; job = xe_bb_create_job(gsc->q, bb); if (IS_ERR(job)) { diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.c b/drivers/gpu/drm/xe/xe_gsc_proxy.c index d0519cd6704a..464282a89eef 100644 --- a/drivers/gpu/drm/xe/xe_gsc_proxy.c +++ b/drivers/gpu/drm/xe/xe_gsc_proxy.c @@ -23,6 +23,7 @@ #include "xe_map.h" #include "xe_mmio.h" #include "xe_pm.h" +#include "xe_tile.h" /* * GSC proxy: @@ -483,7 +484,7 @@ int xe_gsc_proxy_init(struct xe_gsc *gsc) } /* no multi-tile devices with this feature yet */ - if (tile->id > 0) { + if (!xe_tile_is_root(tile)) { xe_gt_err(gt, "unexpected GSC proxy init on tile %u\n", tile->id); return -EINVAL; } diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 9752a38c0162..c8eda36546d3 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -112,7 +112,7 @@ static void xe_gt_enable_host_l2_vram(struct xe_gt *gt) if (!fw_ref) return; - if (!xe_gt_is_media_type(gt)) { + if (xe_gt_is_main_type(gt)) { reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL); reg |= CG_DIS_CNTLBUS; xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg); @@ -146,30 +146,23 @@ static void xe_gt_disable_host_l2_vram(struct xe_gt *gt) static void gt_reset_worker(struct work_struct *w); -static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q) +static int emit_job_sync(struct xe_exec_queue *q, struct xe_bb *bb, + long timeout_jiffies) { struct xe_sched_job *job; - struct xe_bb *bb; struct dma_fence *fence; long timeout; - bb = xe_bb_new(gt, 4, false); - if (IS_ERR(bb)) - return PTR_ERR(bb); - job = xe_bb_create_job(q, bb); - if (IS_ERR(job)) { - xe_bb_free(bb, NULL); + if (IS_ERR(job)) return PTR_ERR(job); - } xe_sched_job_arm(job); fence = dma_fence_get(&job->drm.s_fence->finished); xe_sched_job_push(job); - timeout = dma_fence_wait_timeout(fence, false, HZ); + timeout = dma_fence_wait_timeout(fence, false, timeout_jiffies); dma_fence_put(fence); - xe_bb_free(bb, NULL); if (timeout < 0) return timeout; else if (!timeout) @@ -178,27 +171,30 @@ static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q) return 0; } +static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q) +{ + struct xe_bb *bb; + int ret; + + bb = xe_bb_new(gt, 4, false); + if (IS_ERR(bb)) + return PTR_ERR(bb); + + ret = emit_job_sync(q, bb, HZ); + xe_bb_free(bb, NULL); + + return ret; +} + static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q) { struct xe_reg_sr *sr = &q->hwe->reg_lrc; struct xe_reg_sr_entry *entry; + int count_rmw = 0, count = 0, ret; unsigned long idx; - struct xe_sched_job *job; struct xe_bb *bb; - struct dma_fence *fence; - long timeout; - int count_rmw = 0; - int count = 0; - - if (q->hwe->class == XE_ENGINE_CLASS_RENDER) - /* Big enough to emit all of the context's 3DSTATE */ - bb = xe_bb_new(gt, xe_gt_lrc_size(gt, q->hwe->class), false); - else - /* Just pick a large BB size */ - bb = xe_bb_new(gt, SZ_4K, false); - - if (IS_ERR(bb)) - return PTR_ERR(bb); + size_t bb_len = 0; + u32 *cs; /* count RMW registers as those will be handled separately */ xa_for_each(&sr->xa, idx, entry) { @@ -208,13 +204,34 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q) ++count_rmw; } - if (count || count_rmw) - xe_gt_dbg(gt, "LRC WA %s save-restore batch\n", sr->name); + if (count) + bb_len += count * 2 + 1; + + if (count_rmw) + bb_len += count_rmw * 20 + 7; + + if (q->hwe->class == XE_ENGINE_CLASS_RENDER) + /* + * Big enough to emit all of the context's 3DSTATE via + * xe_lrc_emit_hwe_state_instructions() + */ + bb_len += xe_gt_lrc_size(gt, q->hwe->class) / sizeof(u32); + + xe_gt_dbg(gt, "LRC %s WA job: %zu dwords\n", q->hwe->name, bb_len); + + bb = xe_bb_new(gt, bb_len, false); + if (IS_ERR(bb)) + return PTR_ERR(bb); + + cs = bb->cs; if (count) { - /* emit single LRI with all non RMW regs */ + /* + * Emit single LRI with all non RMW regs: 1 leading dw + 2dw per + * reg + 1 + */ - bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count); + *cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(count); xa_for_each(&sr->xa, idx, entry) { struct xe_reg reg = entry->reg; @@ -229,79 +246,68 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q) val |= entry->set_bits; - bb->cs[bb->len++] = reg.addr; - bb->cs[bb->len++] = val; + *cs++ = reg.addr; + *cs++ = val; xe_gt_dbg(gt, "REG[0x%x] = 0x%08x", reg.addr, val); } } if (count_rmw) { - /* emit MI_MATH for each RMW reg */ + /* Emit MI_MATH for each RMW reg: 20dw per reg + 7 trailing dw */ xa_for_each(&sr->xa, idx, entry) { if (entry->reg.masked || entry->clr_bits == ~0) continue; - bb->cs[bb->len++] = MI_LOAD_REGISTER_REG | MI_LRR_DST_CS_MMIO; - bb->cs[bb->len++] = entry->reg.addr; - bb->cs[bb->len++] = CS_GPR_REG(0, 0).addr; + *cs++ = MI_LOAD_REGISTER_REG | MI_LRR_DST_CS_MMIO; + *cs++ = entry->reg.addr; + *cs++ = CS_GPR_REG(0, 0).addr; - bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(2) | - MI_LRI_LRM_CS_MMIO; - bb->cs[bb->len++] = CS_GPR_REG(0, 1).addr; - bb->cs[bb->len++] = entry->clr_bits; - bb->cs[bb->len++] = CS_GPR_REG(0, 2).addr; - bb->cs[bb->len++] = entry->set_bits; + *cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(2) | + MI_LRI_LRM_CS_MMIO; + *cs++ = CS_GPR_REG(0, 1).addr; + *cs++ = entry->clr_bits; + *cs++ = CS_GPR_REG(0, 2).addr; + *cs++ = entry->set_bits; - bb->cs[bb->len++] = MI_MATH(8); - bb->cs[bb->len++] = CS_ALU_INSTR_LOAD(SRCA, REG0); - bb->cs[bb->len++] = CS_ALU_INSTR_LOADINV(SRCB, REG1); - bb->cs[bb->len++] = CS_ALU_INSTR_AND; - bb->cs[bb->len++] = CS_ALU_INSTR_STORE(REG0, ACCU); - bb->cs[bb->len++] = CS_ALU_INSTR_LOAD(SRCA, REG0); - bb->cs[bb->len++] = CS_ALU_INSTR_LOAD(SRCB, REG2); - bb->cs[bb->len++] = CS_ALU_INSTR_OR; - bb->cs[bb->len++] = CS_ALU_INSTR_STORE(REG0, ACCU); + *cs++ = MI_MATH(8); + *cs++ = CS_ALU_INSTR_LOAD(SRCA, REG0); + *cs++ = CS_ALU_INSTR_LOADINV(SRCB, REG1); + *cs++ = CS_ALU_INSTR_AND; + *cs++ = CS_ALU_INSTR_STORE(REG0, ACCU); + *cs++ = CS_ALU_INSTR_LOAD(SRCA, REG0); + *cs++ = CS_ALU_INSTR_LOAD(SRCB, REG2); + *cs++ = CS_ALU_INSTR_OR; + *cs++ = CS_ALU_INSTR_STORE(REG0, ACCU); - bb->cs[bb->len++] = MI_LOAD_REGISTER_REG | MI_LRR_SRC_CS_MMIO; - bb->cs[bb->len++] = CS_GPR_REG(0, 0).addr; - bb->cs[bb->len++] = entry->reg.addr; + *cs++ = MI_LOAD_REGISTER_REG | MI_LRR_SRC_CS_MMIO; + *cs++ = CS_GPR_REG(0, 0).addr; + *cs++ = entry->reg.addr; xe_gt_dbg(gt, "REG[%#x] = ~%#x|%#x\n", entry->reg.addr, entry->clr_bits, entry->set_bits); } /* reset used GPR */ - bb->cs[bb->len++] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(3) | MI_LRI_LRM_CS_MMIO; - bb->cs[bb->len++] = CS_GPR_REG(0, 0).addr; - bb->cs[bb->len++] = 0; - bb->cs[bb->len++] = CS_GPR_REG(0, 1).addr; - bb->cs[bb->len++] = 0; - bb->cs[bb->len++] = CS_GPR_REG(0, 2).addr; - bb->cs[bb->len++] = 0; + *cs++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(3) | + MI_LRI_LRM_CS_MMIO; + *cs++ = CS_GPR_REG(0, 0).addr; + *cs++ = 0; + *cs++ = CS_GPR_REG(0, 1).addr; + *cs++ = 0; + *cs++ = CS_GPR_REG(0, 2).addr; + *cs++ = 0; } - xe_lrc_emit_hwe_state_instructions(q, bb); + cs = xe_lrc_emit_hwe_state_instructions(q, cs); - job = xe_bb_create_job(q, bb); - if (IS_ERR(job)) { - xe_bb_free(bb, NULL); - return PTR_ERR(job); - } + bb->len = cs - bb->cs; - xe_sched_job_arm(job); - fence = dma_fence_get(&job->drm.s_fence->finished); - xe_sched_job_push(job); + ret = emit_job_sync(q, bb, HZ); - timeout = dma_fence_wait_timeout(fence, false, HZ); - dma_fence_put(fence); xe_bb_free(bb, NULL); - if (timeout < 0) - return timeout; - else if (!timeout) - return -ETIME; - return 0; + return ret; } int xe_gt_record_default_lrcs(struct xe_gt *gt) @@ -363,14 +369,6 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt) goto put_nop_q; } - /* Reload golden LRC to record the effect of any indirect W/A */ - err = emit_nop_job(gt, q); - if (err) { - xe_gt_err(gt, "hwe %s: emit_nop_job failed (%pe) guc_id=%u\n", - hwe->name, ERR_PTR(err), q->guc->id); - goto put_nop_q; - } - xe_map_memcpy_from(xe, default_lrc, &q->lrc[0]->bo->vmap, xe_lrc_pphwsp_offset(q->lrc[0]), @@ -390,6 +388,7 @@ put_exec_queue: int xe_gt_init_early(struct xe_gt *gt) { + unsigned int fw_ref; int err; if (IS_SRIOV_PF(gt_to_xe(gt))) { @@ -419,6 +418,25 @@ int xe_gt_init_early(struct xe_gt *gt) xe_mocs_init_early(gt); + /* + * Only after this point can GT-specific MMIO operations + * (including things like communication with the GuC) + * be performed. + */ + xe_gt_mmio_init(gt); + + err = xe_uc_init_noalloc(>->uc); + if (err) + return err; + + fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (!fw_ref) + return -ETIMEDOUT; + + xe_gt_mcr_init_early(gt); + xe_pat_init(gt); + xe_force_wake_put(gt_to_fw(gt), fw_ref); + return 0; } @@ -433,7 +451,7 @@ static void dump_pat_on_error(struct xe_gt *gt) xe_pat_dump(gt, &p); } -static int gt_fw_domain_init(struct xe_gt *gt) +static int gt_init_with_gt_forcewake(struct xe_gt *gt) { unsigned int fw_ref; int err; @@ -442,7 +460,15 @@ static int gt_fw_domain_init(struct xe_gt *gt) if (!fw_ref) return -ETIMEDOUT; - if (!xe_gt_is_media_type(gt)) { + err = xe_uc_init(>->uc); + if (err) + goto err_force_wake; + + xe_gt_topology_init(gt); + xe_gt_mcr_init(gt); + xe_gt_enable_host_l2_vram(gt); + + if (xe_gt_is_main_type(gt)) { err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt); if (err) goto err_force_wake; @@ -457,8 +483,10 @@ static int gt_fw_domain_init(struct xe_gt *gt) xe_gt_mcr_init(gt); err = xe_hw_engines_init_early(gt); - if (err) + if (err) { + dump_pat_on_error(gt); goto err_force_wake; + } err = xe_hw_engine_class_sysfs_init(gt); if (err) @@ -479,13 +507,12 @@ static int gt_fw_domain_init(struct xe_gt *gt) return 0; err_force_wake: - dump_pat_on_error(gt); xe_force_wake_put(gt_to_fw(gt), fw_ref); return err; } -static int all_fw_domain_init(struct xe_gt *gt) +static int gt_init_with_all_forcewake(struct xe_gt *gt) { unsigned int fw_ref; int err; @@ -518,7 +545,7 @@ static int all_fw_domain_init(struct xe_gt *gt) if (err) goto err_force_wake; - if (!xe_gt_is_media_type(gt)) { + if (xe_gt_is_main_type(gt)) { /* * USM has its only SA pool to non-block behind user operations */ @@ -534,7 +561,7 @@ static int all_fw_domain_init(struct xe_gt *gt) } } - if (!xe_gt_is_media_type(gt)) { + if (xe_gt_is_main_type(gt)) { struct xe_tile *tile = gt_to_tile(gt); tile->migrate = xe_migrate_init(tile); @@ -544,7 +571,7 @@ static int all_fw_domain_init(struct xe_gt *gt) } } - err = xe_uc_init_hw(>->uc); + err = xe_uc_load_hw(>->uc); if (err) goto err_force_wake; @@ -554,7 +581,7 @@ static int all_fw_domain_init(struct xe_gt *gt) xe_gt_apply_ccs_mode(gt); } - if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt)) + if (IS_SRIOV_PF(gt_to_xe(gt)) && xe_gt_is_main_type(gt)) xe_lmtt_init_hw(>_to_tile(gt)->sriov.pf.lmtt); if (IS_SRIOV_PF(gt_to_xe(gt))) { @@ -572,39 +599,6 @@ err_force_wake: return err; } -/* - * Initialize enough GT to be able to load GuC in order to obtain hwconfig and - * enable CTB communication. - */ -int xe_gt_init_hwconfig(struct xe_gt *gt) -{ - unsigned int fw_ref; - int err; - - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (!fw_ref) - return -ETIMEDOUT; - - xe_gt_mcr_init_early(gt); - xe_pat_init(gt); - - err = xe_uc_init(>->uc); - if (err) - goto out_fw; - - err = xe_uc_init_hwconfig(>->uc); - if (err) - goto out_fw; - - xe_gt_topology_init(gt); - xe_gt_mcr_init(gt); - xe_gt_enable_host_l2_vram(gt); - -out_fw: - xe_force_wake_put(gt_to_fw(gt), fw_ref); - return err; -} - static void xe_gt_fini(void *arg) { struct xe_gt *gt = arg; @@ -632,15 +626,15 @@ int xe_gt_init(struct xe_gt *gt) if (err) return err; - err = xe_gt_pagefault_init(gt); + err = xe_gt_sysfs_init(gt); if (err) return err; - err = xe_gt_sysfs_init(gt); + err = gt_init_with_gt_forcewake(gt); if (err) return err; - err = gt_fw_domain_init(gt); + err = xe_gt_pagefault_init(gt); if (err) return err; @@ -654,7 +648,7 @@ int xe_gt_init(struct xe_gt *gt) xe_force_wake_init_engines(gt, gt_to_fw(gt)); - err = all_fw_domain_init(gt); + err = gt_init_with_all_forcewake(gt); if (err) return err; @@ -742,7 +736,7 @@ static int vf_gt_restart(struct xe_gt *gt) if (err) return err; - err = xe_uc_init_hw(>->uc); + err = xe_uc_load_hw(>->uc); if (err) return err; @@ -780,11 +774,11 @@ static int do_gt_restart(struct xe_gt *gt) if (err) return err; - err = xe_uc_init_hw(>->uc); + err = xe_uc_load_hw(>->uc); if (err) return err; - if (IS_SRIOV_PF(gt_to_xe(gt)) && !xe_gt_is_media_type(gt)) + if (IS_SRIOV_PF(gt_to_xe(gt)) && xe_gt_is_main_type(gt)) xe_lmtt_init_hw(>_to_tile(gt)->sriov.pf.lmtt); if (IS_SRIOV_PF(gt_to_xe(gt))) @@ -839,6 +833,9 @@ static int gt_reset(struct xe_gt *gt) goto err_out; } + if (IS_SRIOV_PF(gt_to_xe(gt))) + xe_gt_sriov_pf_stop_prepare(gt); + xe_uc_gucrc_disable(>->uc); xe_uc_stop_prepare(>->uc); xe_gt_pagefault_reset(gt); diff --git a/drivers/gpu/drm/xe/xe_gt.h b/drivers/gpu/drm/xe/xe_gt.h index 187fa6490eaf..41880979f4de 100644 --- a/drivers/gpu/drm/xe/xe_gt.h +++ b/drivers/gpu/drm/xe/xe_gt.h @@ -24,11 +24,10 @@ extern struct fault_attr gt_reset_failure; static inline bool xe_fault_inject_gt_reset(void) { - return should_fail(>_reset_failure, 1); + return IS_ENABLED(CONFIG_DEBUG_FS) && should_fail(>_reset_failure, 1); } struct xe_gt *xe_gt_alloc(struct xe_tile *tile); -int xe_gt_init_hwconfig(struct xe_gt *gt); int xe_gt_init_early(struct xe_gt *gt); int xe_gt_init(struct xe_gt *gt); void xe_gt_mmio_init(struct xe_gt *gt); @@ -107,6 +106,11 @@ static inline bool xe_gt_has_indirect_ring_state(struct xe_gt *gt) xe_device_uc_enabled(gt_to_xe(gt)); } +static inline bool xe_gt_is_main_type(struct xe_gt *gt) +{ + return gt->info.type == XE_GT_TYPE_MAIN; +} + static inline bool xe_gt_is_media_type(struct xe_gt *gt) { return gt->info.type == XE_GT_TYPE_MEDIA; diff --git a/drivers/gpu/drm/xe/xe_gt_idle.c b/drivers/gpu/drm/xe/xe_gt_idle.c index c11206410a4d..ffb210216aa9 100644 --- a/drivers/gpu/drm/xe/xe_gt_idle.c +++ b/drivers/gpu/drm/xe/xe_gt_idle.c @@ -121,7 +121,7 @@ void xe_gt_idle_enable_pg(struct xe_gt *gt) if (vcs_mask || vecs_mask) gtidle->powergate_enable = MEDIA_POWERGATE_ENABLE; - if (!xe_gt_is_media_type(gt)) + if (xe_gt_is_main_type(gt)) gtidle->powergate_enable |= RENDER_POWERGATE_ENABLE; if (xe->info.platform != XE_DG1) { diff --git a/drivers/gpu/drm/xe/xe_gt_mcr.c b/drivers/gpu/drm/xe/xe_gt_mcr.c index d4d9730f0d2c..64a2f0d6aaf9 100644 --- a/drivers/gpu/drm/xe/xe_gt_mcr.c +++ b/drivers/gpu/drm/xe/xe_gt_mcr.c @@ -420,12 +420,6 @@ static void init_steering_sqidi_psmi(struct xe_gt *gt) gt->steering[SQIDI_PSMI].instance_target = select & 0x1; } -static void init_steering_inst0(struct xe_gt *gt) -{ - gt->steering[INSTANCE0].group_target = 0; /* unused */ - gt->steering[INSTANCE0].instance_target = 0; /* unused */ -} - static const struct { const char *name; void (*init)(struct xe_gt *gt); @@ -436,7 +430,7 @@ static const struct { [DSS] = { "DSS", init_steering_dss }, [OADDRM] = { "OADDRM / GPMXMT", init_steering_oaddrm }, [SQIDI_PSMI] = { "SQIDI_PSMI", init_steering_sqidi_psmi }, - [INSTANCE0] = { "INSTANCE 0", init_steering_inst0 }, + [INSTANCE0] = { "INSTANCE 0", NULL }, [IMPLICIT_STEERING] = { "IMPLICIT", NULL }, }; @@ -446,25 +440,17 @@ static const struct { * * Perform early software only initialization of the MCR lock to allow * the synchronization on accessing the STEER_SEMAPHORE register and - * use the xe_gt_mcr_multicast_write() function. + * use the xe_gt_mcr_multicast_write() function, plus the minimum + * safe MCR registers required for VRAM/CCS probing. */ void xe_gt_mcr_init_early(struct xe_gt *gt) { + struct xe_device *xe = gt_to_xe(gt); + BUILD_BUG_ON(IMPLICIT_STEERING + 1 != NUM_STEERING_TYPES); BUILD_BUG_ON(ARRAY_SIZE(xe_steering_types) != NUM_STEERING_TYPES); spin_lock_init(>->mcr_lock); -} - -/** - * xe_gt_mcr_init - Normal initialization of the MCR support - * @gt: GT structure - * - * Perform normal initialization of the MCR for all usages. - */ -void xe_gt_mcr_init(struct xe_gt *gt) -{ - struct xe_device *xe = gt_to_xe(gt); if (IS_SRIOV_VF(xe)) return; @@ -505,10 +491,27 @@ void xe_gt_mcr_init(struct xe_gt *gt) } } + /* Mark instance 0 as initialized, we need this early for VRAM and CCS probe. */ + gt->steering[INSTANCE0].initialized = true; +} + +/** + * xe_gt_mcr_init - Normal initialization of the MCR support + * @gt: GT structure + * + * Perform normal initialization of the MCR for all usages. + */ +void xe_gt_mcr_init(struct xe_gt *gt) +{ + if (IS_SRIOV_VF(gt_to_xe(gt))) + return; + /* Select non-terminated steering target for each type */ - for (int i = 0; i < NUM_STEERING_TYPES; i++) + for (int i = 0; i < NUM_STEERING_TYPES; i++) { + gt->steering[i].initialized = true; if (gt->steering[i].ranges && xe_steering_types[i].init) xe_steering_types[i].init(gt); + } } /** @@ -570,6 +573,10 @@ bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt, for (int i = 0; gt->steering[type].ranges[i].end > 0; i++) { if (xe_mmio_in_range(>->mmio, >->steering[type].ranges[i], reg)) { + drm_WARN(>_to_xe(gt)->drm, !gt->steering[type].initialized, + "Uninitialized usage of MCR register %s/%#x\n", + xe_steering_types[type].name, reg.addr); + *group = gt->steering[type].group_target; *instance = gt->steering[type].instance_target; return true; diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c index 3522865c67c9..5a75d56d8558 100644 --- a/drivers/gpu/drm/xe/xe_gt_pagefault.c +++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c @@ -419,6 +419,7 @@ static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue *pf_queue) #define PF_MULTIPLIER 8 pf_queue->num_dw = (num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW * PF_MULTIPLIER; + pf_queue->num_dw = roundup_pow_of_two(pf_queue->num_dw); #undef PF_MULTIPLIER pf_queue->gt = gt; diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c index c08efca6420e..bdbd15f3afe3 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c @@ -16,6 +16,7 @@ #include "xe_gt_sriov_pf_migration.h" #include "xe_gt_sriov_pf_service.h" #include "xe_gt_sriov_printk.h" +#include "xe_guc_submit.h" #include "xe_mmio.h" #include "xe_pm.h" @@ -47,9 +48,16 @@ static int pf_alloc_metadata(struct xe_gt *gt) static void pf_init_workers(struct xe_gt *gt) { + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); INIT_WORK(>->sriov.pf.workers.restart, pf_worker_restart_func); } +static void pf_fini_workers(struct xe_gt *gt) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + disable_work_sync(>->sriov.pf.workers.restart); +} + /** * xe_gt_sriov_pf_init_early - Prepare SR-IOV PF data structures on PF. * @gt: the &xe_gt to initialize @@ -79,6 +87,21 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt) return 0; } +static void pf_fini_action(void *arg) +{ + struct xe_gt *gt = arg; + + pf_fini_workers(gt); +} + +static int pf_init_late(struct xe_gt *gt) +{ + struct xe_device *xe = gt_to_xe(gt); + + xe_gt_assert(gt, IS_SRIOV_PF(xe)); + return devm_add_action_or_reset(xe->drm.dev, pf_fini_action, gt); +} + /** * xe_gt_sriov_pf_init - Prepare SR-IOV PF data structures on PF. * @gt: the &xe_gt to initialize @@ -95,7 +118,15 @@ int xe_gt_sriov_pf_init(struct xe_gt *gt) if (err) return err; - return xe_gt_sriov_pf_migration_init(gt); + err = xe_gt_sriov_pf_migration_init(gt); + if (err) + return err; + + err = pf_init_late(gt); + if (err) + return err; + + return 0; } static bool pf_needs_enable_ggtt_guest_update(struct xe_device *xe) @@ -172,6 +203,25 @@ void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid) pf_clear_vf_scratch_regs(gt, vfid); } +static void pf_cancel_restart(struct xe_gt *gt) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + + if (cancel_work_sync(>->sriov.pf.workers.restart)) + xe_gt_sriov_dbg_verbose(gt, "pending restart canceled!\n"); +} + +/** + * xe_gt_sriov_pf_stop_prepare() - Prepare to stop SR-IOV support. + * @gt: the &xe_gt + * + * This function can only be called on the PF. + */ +void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt) +{ + pf_cancel_restart(gt); +} + static void pf_restart(struct xe_gt *gt) { struct xe_device *xe = gt_to_xe(gt); @@ -211,3 +261,27 @@ void xe_gt_sriov_pf_restart(struct xe_gt *gt) { pf_queue_restart(gt); } + +static void pf_flush_restart(struct xe_gt *gt) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + flush_work(>->sriov.pf.workers.restart); +} + +/** + * xe_gt_sriov_pf_wait_ready() - Wait until per-GT PF SR-IOV support is ready. + * @gt: the &xe_gt + * + * This function can only be called on PF. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_gt_sriov_pf_wait_ready(struct xe_gt *gt) +{ + /* don't wait if there is another ongoing reset */ + if (xe_guc_read_stopped(>->uc.guc)) + return -EBUSY; + + pf_flush_restart(gt); + return 0; +} diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h index f474509411c0..e7fde3f9937a 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h @@ -11,8 +11,10 @@ struct xe_gt; #ifdef CONFIG_PCI_IOV int xe_gt_sriov_pf_init_early(struct xe_gt *gt); int xe_gt_sriov_pf_init(struct xe_gt *gt); +int xe_gt_sriov_pf_wait_ready(struct xe_gt *gt); void xe_gt_sriov_pf_init_hw(struct xe_gt *gt); void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid); +void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt); void xe_gt_sriov_pf_restart(struct xe_gt *gt); #else static inline int xe_gt_sriov_pf_init_early(struct xe_gt *gt) @@ -29,6 +31,10 @@ static inline void xe_gt_sriov_pf_init_hw(struct xe_gt *gt) { } +static inline void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt) +{ +} + static inline void xe_gt_sriov_pf_restart(struct xe_gt *gt) { } diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c index 3556c41c041b..494909f74eb2 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c @@ -104,13 +104,13 @@ static int pf_push_vf_buf_klvs(struct xe_gt *gt, unsigned int vfid, u32 num_klvs } if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) { - struct drm_printer p = xe_gt_info_printer(gt); + struct drm_printer p = xe_gt_dbg_printer(gt); void *klvs = xe_guc_buf_cpu_ptr(buf); char name[8]; - xe_gt_sriov_info(gt, "pushed %s config with %u KLV%s:\n", - xe_sriov_function_name(vfid, name, sizeof(name)), - num_klvs, str_plural(num_klvs)); + xe_gt_sriov_dbg(gt, "pushed %s config with %u KLV%s:\n", + xe_sriov_function_name(vfid, name, sizeof(name)), + num_klvs, str_plural(num_klvs)); xe_guc_klv_print(klvs, num_dwords, &p); } @@ -238,26 +238,35 @@ static struct xe_gt_sriov_config *pf_pick_vf_config(struct xe_gt *gt, unsigned i } /* Return: number of configuration dwords written */ -static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config, bool details) +static u32 encode_ggtt(u32 *cfg, u64 start, u64 size, bool details) { u32 n = 0; - if (xe_ggtt_node_allocated(config->ggtt_region)) { - if (details) { - cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START); - cfg[n++] = lower_32_bits(config->ggtt_region->base.start); - cfg[n++] = upper_32_bits(config->ggtt_region->base.start); - } - - cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE); - cfg[n++] = lower_32_bits(config->ggtt_region->base.size); - cfg[n++] = upper_32_bits(config->ggtt_region->base.size); + if (details) { + cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_START); + cfg[n++] = lower_32_bits(start); + cfg[n++] = upper_32_bits(start); } + cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_GGTT_SIZE); + cfg[n++] = lower_32_bits(size); + cfg[n++] = upper_32_bits(size); + return n; } /* Return: number of configuration dwords written */ +static u32 encode_config_ggtt(u32 *cfg, const struct xe_gt_sriov_config *config, bool details) +{ + struct xe_ggtt_node *node = config->ggtt_region; + + if (!xe_ggtt_node_allocated(node)) + return 0; + + return encode_ggtt(cfg, node->base.start, node->base.size, details); +} + +/* Return: number of configuration dwords written */ static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool details) { u32 n = 0; @@ -282,8 +291,8 @@ static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config, bool if (config->lmem_obj) { cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_LMEM_SIZE); - cfg[n++] = lower_32_bits(config->lmem_obj->size); - cfg[n++] = upper_32_bits(config->lmem_obj->size); + cfg[n++] = lower_32_bits(xe_bo_size(config->lmem_obj)); + cfg[n++] = upper_32_bits(xe_bo_size(config->lmem_obj)); } cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_EXEC_QUANTUM); @@ -332,6 +341,17 @@ static int pf_push_full_vf_config(struct xe_gt *gt, unsigned int vfid) } xe_gt_assert(gt, num_dwords <= max_cfg_dwords); + if (vfid == PFID) { + u64 ggtt_start = xe_wopcm_size(gt_to_xe(gt)); + u64 ggtt_size = gt_to_tile(gt)->mem.ggtt->size - ggtt_start; + + /* plain PF config data will never include a real GGTT region */ + xe_gt_assert(gt, !encode_config_ggtt(cfg + num_dwords, config, true)); + + /* fake PF GGTT config covers full GGTT range except reserved WOPCM */ + num_dwords += encode_ggtt(cfg + num_dwords, ggtt_start, ggtt_size, true); + } + num_klvs = xe_guc_klv_count(cfg, num_dwords); err = pf_push_vf_buf_klvs(gt, vfid, num_klvs, buf, num_dwords); @@ -376,7 +396,7 @@ static u64 pf_get_spare_ggtt(struct xe_gt *gt) { u64 spare; - xe_gt_assert(gt, !xe_gt_is_media_type(gt)); + xe_gt_assert(gt, xe_gt_is_main_type(gt)); xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); @@ -388,7 +408,7 @@ static u64 pf_get_spare_ggtt(struct xe_gt *gt) static int pf_set_spare_ggtt(struct xe_gt *gt, u64 size) { - xe_gt_assert(gt, !xe_gt_is_media_type(gt)); + xe_gt_assert(gt, xe_gt_is_main_type(gt)); xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); @@ -443,7 +463,7 @@ static int pf_provision_vf_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size) int err; xe_gt_assert(gt, vfid); - xe_gt_assert(gt, !xe_gt_is_media_type(gt)); + xe_gt_assert(gt, xe_gt_is_main_type(gt)); xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); size = round_up(size, alignment); @@ -492,7 +512,7 @@ static u64 pf_get_vf_config_ggtt(struct xe_gt *gt, unsigned int vfid) struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); struct xe_ggtt_node *node = config->ggtt_region; - xe_gt_assert(gt, !xe_gt_is_media_type(gt)); + xe_gt_assert(gt, xe_gt_is_main_type(gt)); return xe_ggtt_node_allocated(node) ? node->base.size : 0; } @@ -560,7 +580,7 @@ int xe_gt_sriov_pf_config_set_ggtt(struct xe_gt *gt, unsigned int vfid, u64 size { int err; - xe_gt_assert(gt, !xe_gt_is_media_type(gt)); + xe_gt_assert(gt, xe_gt_is_main_type(gt)); mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); if (vfid) @@ -622,7 +642,7 @@ int xe_gt_sriov_pf_config_bulk_set_ggtt(struct xe_gt *gt, unsigned int vfid, int err = 0; xe_gt_assert(gt, vfid); - xe_gt_assert(gt, !xe_gt_is_media_type(gt)); + xe_gt_assert(gt, xe_gt_is_main_type(gt)); if (!num_vfs) return 0; @@ -693,7 +713,7 @@ int xe_gt_sriov_pf_config_set_fair_ggtt(struct xe_gt *gt, unsigned int vfid, xe_gt_assert(gt, vfid); xe_gt_assert(gt, num_vfs); - xe_gt_assert(gt, !xe_gt_is_media_type(gt)); + xe_gt_assert(gt, xe_gt_is_main_type(gt)); mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); fair = pf_estimate_fair_ggtt(gt, num_vfs); @@ -1299,7 +1319,7 @@ static u64 pf_get_vf_config_lmem(struct xe_gt *gt, unsigned int vfid) struct xe_bo *bo; bo = config->lmem_obj; - return bo ? bo->size : 0; + return bo ? xe_bo_size(bo) : 0; } static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 size) @@ -1327,7 +1347,17 @@ static int pf_distribute_config_lmem(struct xe_gt *gt, unsigned int vfid, u64 si static void pf_force_lmtt_invalidate(struct xe_device *xe) { - /* TODO */ + struct xe_lmtt *lmtt; + struct xe_tile *tile; + unsigned int tid; + + xe_assert(xe, xe_device_has_lmtt(xe)); + xe_assert(xe, IS_SRIOV_PF(xe)); + + for_each_tile(tile, xe, tid) { + lmtt = &tile->sriov.pf.lmtt; + xe_lmtt_invalidate_hw(lmtt); + } } static void pf_reset_vf_lmtt(struct xe_device *xe, unsigned int vfid) @@ -1388,7 +1418,7 @@ static int pf_update_vf_lmtt(struct xe_device *xe, unsigned int vfid) err = xe_lmtt_populate_pages(lmtt, vfid, bo, offset); if (err) goto fail; - offset += bo->size; + offset += xe_bo_size(bo); } } @@ -1406,7 +1436,7 @@ fail: static void pf_release_vf_config_lmem(struct xe_gt *gt, struct xe_gt_sriov_config *config) { xe_gt_assert(gt, IS_DGFX(gt_to_xe(gt))); - xe_gt_assert(gt, !xe_gt_is_media_type(gt)); + xe_gt_assert(gt, xe_gt_is_main_type(gt)); lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt)); if (config->lmem_obj) { @@ -1425,7 +1455,7 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size) xe_gt_assert(gt, vfid); xe_gt_assert(gt, IS_DGFX(xe)); - xe_gt_assert(gt, !xe_gt_is_media_type(gt)); + xe_gt_assert(gt, xe_gt_is_main_type(gt)); size = round_up(size, pf_get_lmem_alignment(gt)); @@ -1469,12 +1499,12 @@ static int pf_provision_vf_lmem(struct xe_gt *gt, unsigned int vfid, u64 size) goto release; } - err = pf_push_vf_cfg_lmem(gt, vfid, bo->size); + err = pf_push_vf_cfg_lmem(gt, vfid, xe_bo_size(bo)); if (unlikely(err)) goto reset_lmtt; xe_gt_sriov_dbg_verbose(gt, "VF%u LMEM %zu (%zuM)\n", - vfid, bo->size, bo->size / SZ_1M); + vfid, xe_bo_size(bo), xe_bo_size(bo) / SZ_1M); return 0; reset_lmtt: @@ -1552,7 +1582,7 @@ int xe_gt_sriov_pf_config_bulk_set_lmem(struct xe_gt *gt, unsigned int vfid, int err = 0; xe_gt_assert(gt, vfid); - xe_gt_assert(gt, !xe_gt_is_media_type(gt)); + xe_gt_assert(gt, xe_gt_is_main_type(gt)); if (!num_vfs) return 0; @@ -1629,7 +1659,7 @@ int xe_gt_sriov_pf_config_set_fair_lmem(struct xe_gt *gt, unsigned int vfid, xe_gt_assert(gt, vfid); xe_gt_assert(gt, num_vfs); - xe_gt_assert(gt, !xe_gt_is_media_type(gt)); + xe_gt_assert(gt, xe_gt_is_main_type(gt)); if (!xe_device_has_lmtt(gt_to_xe(gt))) return 0; @@ -1663,7 +1693,7 @@ int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid, xe_gt_assert(gt, vfid); xe_gt_assert(gt, num_vfs); - if (!xe_gt_is_media_type(gt)) { + if (xe_gt_is_main_type(gt)) { err = xe_gt_sriov_pf_config_set_fair_ggtt(gt, vfid, num_vfs); result = result ?: err; err = xe_gt_sriov_pf_config_set_fair_lmem(gt, vfid, num_vfs); @@ -1991,7 +2021,7 @@ static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid) struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid); struct xe_device *xe = gt_to_xe(gt); - if (!xe_gt_is_media_type(gt)) { + if (xe_gt_is_main_type(gt)) { pf_release_vf_config_ggtt(gt, config); if (IS_DGFX(xe)) { pf_release_vf_config_lmem(gt, config); @@ -2082,7 +2112,7 @@ static int pf_sanitize_vf_resources(struct xe_gt *gt, u32 vfid, long timeout) * Only GGTT and LMEM requires to be cleared by the PF. * GuC doorbell IDs and context IDs do not need any clearing. */ - if (!xe_gt_is_media_type(gt)) { + if (xe_gt_is_main_type(gt)) { pf_sanitize_ggtt(config->ggtt_region, vfid); if (IS_DGFX(xe)) err = pf_sanitize_lmem(tile, config->lmem_obj, timeout); @@ -2149,7 +2179,7 @@ static int pf_validate_vf_config(struct xe_gt *gt, unsigned int vfid) { struct xe_gt *primary_gt = gt_to_tile(gt)->primary_gt; struct xe_device *xe = gt_to_xe(gt); - bool is_primary = !xe_gt_is_media_type(gt); + bool is_primary = xe_gt_is_main_type(gt); bool valid_ggtt, valid_ctxs, valid_dbs; bool valid_any, valid_all; @@ -2349,7 +2379,7 @@ int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid, return -EINVAL; if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) { - struct drm_printer p = xe_gt_info_printer(gt); + struct drm_printer p = xe_gt_dbg_printer(gt); drm_printf(&p, "restoring VF%u config:\n", vfid); xe_guc_klv_print(buf, size / sizeof(u32), &p); @@ -2366,6 +2396,35 @@ int xe_gt_sriov_pf_config_restore(struct xe_gt *gt, unsigned int vfid, return err; } +static void pf_prepare_self_config(struct xe_gt *gt) +{ + struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, PFID); + + /* + * We want PF to be allowed to use all of context ID, doorbells IDs + * and whole usable GGTT area. While we can store ctxs/dbs numbers + * directly in the config structure, can't do the same with the GGTT + * configuration, so let it be prepared on demand while pushing KLVs. + */ + config->num_ctxs = GUC_ID_MAX; + config->num_dbs = GUC_NUM_DOORBELLS; +} + +static int pf_push_self_config(struct xe_gt *gt) +{ + int err; + + err = pf_push_full_vf_config(gt, PFID); + if (err) { + xe_gt_sriov_err(gt, "Failed to push self configuration (%pe)\n", + ERR_PTR(err)); + return err; + } + + xe_gt_sriov_dbg_verbose(gt, "self configuration completed\n"); + return 0; +} + static void fini_config(void *arg) { struct xe_gt *gt = arg; @@ -2389,9 +2448,18 @@ static void fini_config(void *arg) int xe_gt_sriov_pf_config_init(struct xe_gt *gt) { struct xe_device *xe = gt_to_xe(gt); + int err; xe_gt_assert(gt, IS_SRIOV_PF(xe)); + mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); + pf_prepare_self_config(gt); + err = pf_push_self_config(gt); + mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); + + if (err) + return err; + return devm_add_action_or_reset(xe->drm.dev, fini_config, gt); } @@ -2409,6 +2477,10 @@ void xe_gt_sriov_pf_config_restart(struct xe_gt *gt) unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(gt_to_xe(gt)); unsigned int fail = 0, skip = 0; + mutex_lock(xe_gt_sriov_pf_master_mutex(gt)); + pf_push_self_config(gt); + mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); + for (n = 1; n <= total_vfs; n++) { if (xe_gt_sriov_pf_config_is_empty(gt, n)) skip++; @@ -2552,10 +2624,10 @@ int xe_gt_sriov_pf_config_print_lmem(struct xe_gt *gt, struct drm_printer *p) if (!config->lmem_obj) continue; - string_get_size(config->lmem_obj->size, 1, STRING_UNITS_2, + string_get_size(xe_bo_size(config->lmem_obj), 1, STRING_UNITS_2, buf, sizeof(buf)); drm_printf(p, "VF%u:\t%zu\t(%s)\n", - n, config->lmem_obj->size, buf); + n, xe_bo_size(config->lmem_obj), buf); } mutex_unlock(xe_gt_sriov_pf_master_mutex(gt)); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c index 1f50aec3a059..4f7fff892bc0 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c @@ -15,10 +15,11 @@ #include "xe_gt_sriov_pf_helpers.h" #include "xe_gt_sriov_pf_migration.h" #include "xe_gt_sriov_pf_monitor.h" -#include "xe_gt_sriov_pf_service.h" #include "xe_gt_sriov_printk.h" #include "xe_guc_ct.h" #include "xe_sriov.h" +#include "xe_sriov_pf_service.h" +#include "xe_tile.h" static const char *control_cmd_to_string(u32 cmd) { @@ -1064,7 +1065,9 @@ static bool pf_exit_vf_flr_reset_data(struct xe_gt *gt, unsigned int vfid) if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_DATA)) return false; - xe_gt_sriov_pf_service_reset(gt, vfid); + if (xe_tile_is_root(gt->tile) && xe_gt_is_main_type(gt)) + xe_sriov_pf_service_reset_vf(gt_to_xe(gt), vfid); + xe_gt_sriov_pf_monitor_flr(gt, vfid); pf_enter_vf_flr_reset_mmio(gt, vfid); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c index 13970d5a2867..3ed245e04d0c 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_debugfs.c @@ -22,6 +22,7 @@ #include "xe_gt_sriov_pf_policy.h" #include "xe_gt_sriov_pf_service.h" #include "xe_pm.h" +#include "xe_sriov_pf.h" /* * /sys/kernel/debug/dri/0/ @@ -78,11 +79,6 @@ static const struct drm_info_list pf_info[] = { .data = xe_gt_sriov_pf_service_print_runtime, }, { - "negotiated_versions", - .show = xe_gt_debugfs_simple_show, - .data = xe_gt_sriov_pf_service_print_version, - }, - { "adverse_events", .show = xe_gt_debugfs_simple_show, .data = xe_gt_sriov_pf_monitor_print_events, @@ -210,7 +206,8 @@ static int CONFIG##_set(void *data, u64 val) \ return -EOVERFLOW; \ \ xe_pm_runtime_get(xe); \ - err = xe_gt_sriov_pf_config_set_##CONFIG(gt, vfid, val); \ + err = xe_sriov_pf_wait_ready(xe) ?: \ + xe_gt_sriov_pf_config_set_##CONFIG(gt, vfid, val); \ xe_pm_runtime_put(xe); \ \ return err; \ @@ -305,7 +302,7 @@ static void pf_add_config_attrs(struct xe_gt *gt, struct dentry *parent, unsigne xe_gt_assert(gt, gt == extract_gt(parent)); xe_gt_assert(gt, vfid == extract_vfid(parent)); - if (!xe_gt_is_media_type(gt)) { + if (xe_gt_is_main_type(gt)) { debugfs_create_file_unsafe(vfid ? "ggtt_quota" : "ggtt_spare", 0644, parent, parent, &ggtt_fops); if (xe_device_has_lmtt(gt_to_xe(gt))) @@ -554,7 +551,7 @@ void xe_gt_sriov_pf_debugfs_register(struct xe_gt *gt, struct dentry *root) pfdentry->d_inode->i_private = gt; drm_debugfs_create_files(pf_info, ARRAY_SIZE(pf_info), pfdentry, minor); - if (!xe_gt_is_media_type(gt)) { + if (xe_gt_is_main_type(gt)) { drm_debugfs_create_files(pf_ggtt_info, ARRAY_SIZE(pf_ggtt_info), pfdentry, minor); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c index 821cfcc34e6b..76dd9233ef9f 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.c @@ -19,91 +19,7 @@ #include "xe_gt_sriov_pf_service_types.h" #include "xe_guc_ct.h" #include "xe_guc_hxg_helpers.h" - -static void pf_init_versions(struct xe_gt *gt) -{ - BUILD_BUG_ON(!GUC_RELAY_VERSION_BASE_MAJOR && !GUC_RELAY_VERSION_BASE_MINOR); - BUILD_BUG_ON(GUC_RELAY_VERSION_BASE_MAJOR > GUC_RELAY_VERSION_LATEST_MAJOR); - - /* base versions may differ between platforms */ - gt->sriov.pf.service.version.base.major = GUC_RELAY_VERSION_BASE_MAJOR; - gt->sriov.pf.service.version.base.minor = GUC_RELAY_VERSION_BASE_MINOR; - - /* latest version is same for all platforms */ - gt->sriov.pf.service.version.latest.major = GUC_RELAY_VERSION_LATEST_MAJOR; - gt->sriov.pf.service.version.latest.minor = GUC_RELAY_VERSION_LATEST_MINOR; -} - -/* Return: 0 on success or a negative error code on failure. */ -static int pf_negotiate_version(struct xe_gt *gt, - u32 wanted_major, u32 wanted_minor, - u32 *major, u32 *minor) -{ - struct xe_gt_sriov_pf_service_version base = gt->sriov.pf.service.version.base; - struct xe_gt_sriov_pf_service_version latest = gt->sriov.pf.service.version.latest; - - xe_gt_assert(gt, base.major); - xe_gt_assert(gt, base.major <= latest.major); - xe_gt_assert(gt, (base.major < latest.major) || (base.minor <= latest.minor)); - - /* VF doesn't care - return our latest */ - if (wanted_major == VF2PF_HANDSHAKE_MAJOR_ANY && - wanted_minor == VF2PF_HANDSHAKE_MINOR_ANY) { - *major = latest.major; - *minor = latest.minor; - return 0; - } - - /* VF wants newer than our - return our latest */ - if (wanted_major > latest.major) { - *major = latest.major; - *minor = latest.minor; - return 0; - } - - /* VF wants older than min required - reject */ - if (wanted_major < base.major || - (wanted_major == base.major && wanted_minor < base.minor)) { - return -EPERM; - } - - /* previous major - return wanted, as we should still support it */ - if (wanted_major < latest.major) { - /* XXX: we are not prepared for multi-versions yet */ - xe_gt_assert(gt, base.major == latest.major); - return -ENOPKG; - } - - /* same major - return common minor */ - *major = wanted_major; - *minor = min_t(u32, latest.minor, wanted_minor); - return 0; -} - -static void pf_connect(struct xe_gt *gt, u32 vfid, u32 major, u32 minor) -{ - xe_gt_sriov_pf_assert_vfid(gt, vfid); - xe_gt_assert(gt, major || minor); - - gt->sriov.pf.vfs[vfid].version.major = major; - gt->sriov.pf.vfs[vfid].version.minor = minor; -} - -static void pf_disconnect(struct xe_gt *gt, u32 vfid) -{ - xe_gt_sriov_pf_assert_vfid(gt, vfid); - - gt->sriov.pf.vfs[vfid].version.major = 0; - gt->sriov.pf.vfs[vfid].version.minor = 0; -} - -static bool pf_is_negotiated(struct xe_gt *gt, u32 vfid, u32 major, u32 minor) -{ - xe_gt_sriov_pf_assert_vfid(gt, vfid); - - return major == gt->sriov.pf.vfs[vfid].version.major && - minor <= gt->sriov.pf.vfs[vfid].version.minor; -} +#include "xe_sriov_pf_service.h" static const struct xe_reg tgl_runtime_regs[] = { RPM_CONFIG0, /* _MMIO(0x0d00) */ @@ -266,7 +182,7 @@ static void pf_prepare_runtime_info(struct xe_gt *gt) read_many(gt, size, regs, values); if (IS_ENABLED(CONFIG_DRM_XE_DEBUG_SRIOV)) { - struct drm_printer p = xe_gt_info_printer(gt); + struct drm_printer p = xe_gt_dbg_printer(gt); xe_gt_sriov_pf_service_print_runtime(gt, &p); } @@ -285,8 +201,6 @@ int xe_gt_sriov_pf_service_init(struct xe_gt *gt) { int err; - pf_init_versions(gt); - err = pf_alloc_runtime_info(gt); if (unlikely(err)) goto failed; @@ -311,47 +225,6 @@ void xe_gt_sriov_pf_service_update(struct xe_gt *gt) pf_prepare_runtime_info(gt); } -/** - * xe_gt_sriov_pf_service_reset - Reset a connection with the VF. - * @gt: the &xe_gt - * @vfid: the VF identifier - * - * Reset a VF driver negotiated VF/PF ABI version. - * After that point, the VF driver will have to perform new version handshake - * to continue use of the PF services again. - * - * This function can only be called on PF. - */ -void xe_gt_sriov_pf_service_reset(struct xe_gt *gt, unsigned int vfid) -{ - pf_disconnect(gt, vfid); -} - -/* Return: 0 on success or a negative error code on failure. */ -static int pf_process_handshake(struct xe_gt *gt, u32 vfid, - u32 wanted_major, u32 wanted_minor, - u32 *major, u32 *minor) -{ - int err; - - xe_gt_sriov_dbg_verbose(gt, "VF%u wants ABI version %u.%u\n", - vfid, wanted_major, wanted_minor); - - err = pf_negotiate_version(gt, wanted_major, wanted_minor, major, minor); - - if (err < 0) { - xe_gt_sriov_notice(gt, "VF%u failed to negotiate ABI %u.%u (%pe)\n", - vfid, wanted_major, wanted_minor, ERR_PTR(err)); - pf_disconnect(gt, vfid); - } else { - xe_gt_sriov_dbg(gt, "VF%u negotiated ABI version %u.%u\n", - vfid, *major, *minor); - pf_connect(gt, vfid, *major, *minor); - } - - return 0; -} - /* Return: length of the response message or a negative error code on failure. */ static int pf_process_handshake_msg(struct xe_gt *gt, u32 origin, const u32 *request, u32 len, u32 *response, u32 size) @@ -371,7 +244,8 @@ static int pf_process_handshake_msg(struct xe_gt *gt, u32 origin, wanted_major = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_1_MAJOR, request[1]); wanted_minor = FIELD_GET(VF2PF_HANDSHAKE_REQUEST_MSG_1_MINOR, request[1]); - err = pf_process_handshake(gt, origin, wanted_major, wanted_minor, &major, &minor); + err = xe_sriov_pf_service_handshake_vf(gt_to_xe(gt), origin, wanted_major, wanted_minor, + &major, &minor); if (err < 0) return err; @@ -430,8 +304,10 @@ static int pf_process_runtime_query_msg(struct xe_gt *gt, u32 origin, u32 remaining = 0; int ret; - if (!pf_is_negotiated(gt, origin, 1, 0)) + /* this action is available from ABI 1.0 */ + if (!xe_sriov_pf_service_is_negotiated(gt_to_xe(gt), origin, 1, 0)) return -EACCES; + if (unlikely(msg_len > VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN)) return -EMSGSIZE; if (unlikely(msg_len < VF2PF_QUERY_RUNTIME_REQUEST_MSG_LEN)) @@ -528,33 +404,3 @@ int xe_gt_sriov_pf_service_print_runtime(struct xe_gt *gt, struct drm_printer *p return 0; } - -/** - * xe_gt_sriov_pf_service_print_version - Print ABI versions negotiated with VFs. - * @gt: the &xe_gt - * @p: the &drm_printer - * - * This function is for PF use only. - */ -int xe_gt_sriov_pf_service_print_version(struct xe_gt *gt, struct drm_printer *p) -{ - struct xe_device *xe = gt_to_xe(gt); - unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(xe); - struct xe_gt_sriov_pf_service_version *version; - - xe_gt_assert(gt, IS_SRIOV_PF(xe)); - - for (n = 1; n <= total_vfs; n++) { - version = >->sriov.pf.vfs[n].version; - if (!version->major && !version->minor) - continue; - - drm_printf(p, "VF%u:\t%u.%u\n", n, version->major, version->minor); - } - - return 0; -} - -#if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST) -#include "tests/xe_gt_sriov_pf_service_test.c" -#endif diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.h index 56aaadf0360d..10b02c9b651c 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_service.h @@ -14,9 +14,7 @@ struct xe_gt; int xe_gt_sriov_pf_service_init(struct xe_gt *gt); void xe_gt_sriov_pf_service_update(struct xe_gt *gt); -void xe_gt_sriov_pf_service_reset(struct xe_gt *gt, unsigned int vfid); -int xe_gt_sriov_pf_service_print_version(struct xe_gt *gt, struct drm_printer *p); int xe_gt_sriov_pf_service_print_runtime(struct xe_gt *gt, struct drm_printer *p); #ifdef CONFIG_PCI_IOV diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c index 9b2fc9db55b8..b282838d59e6 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf.c @@ -552,7 +552,7 @@ int xe_gt_sriov_vf_query_config(struct xe_gt *gt) if (unlikely(err)) return err; - if (IS_DGFX(xe) && !xe_gt_is_media_type(gt)) { + if (IS_DGFX(xe) && xe_gt_is_main_type(gt)) { err = vf_get_lmem_info(gt); if (unlikely(err)) return err; @@ -649,7 +649,7 @@ s64 xe_gt_sriov_vf_ggtt_shift(struct xe_gt *gt) struct xe_gt_sriov_vf_selfconfig *config = >->sriov.vf.self_config; xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); - xe_gt_assert(gt, !xe_gt_is_media_type(gt)); + xe_gt_assert(gt, xe_gt_is_main_type(gt)); return config->ggtt_shift; } @@ -686,21 +686,22 @@ static int relay_action_handshake(struct xe_gt *gt, u32 *major, u32 *minor) return 0; } -static void vf_connect_pf(struct xe_gt *gt, u16 major, u16 minor) +static void vf_connect_pf(struct xe_device *xe, u16 major, u16 minor) { - xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); + xe_assert(xe, IS_SRIOV_VF(xe)); - gt->sriov.vf.pf_version.major = major; - gt->sriov.vf.pf_version.minor = minor; + xe->sriov.vf.pf_version.major = major; + xe->sriov.vf.pf_version.minor = minor; } -static void vf_disconnect_pf(struct xe_gt *gt) +static void vf_disconnect_pf(struct xe_device *xe) { - vf_connect_pf(gt, 0, 0); + vf_connect_pf(xe, 0, 0); } static int vf_handshake_with_pf(struct xe_gt *gt) { + struct xe_device *xe = gt_to_xe(gt); u32 major_wanted = GUC_RELAY_VERSION_LATEST_MAJOR; u32 minor_wanted = GUC_RELAY_VERSION_LATEST_MINOR; u32 major = major_wanted, minor = minor_wanted; @@ -716,13 +717,13 @@ static int vf_handshake_with_pf(struct xe_gt *gt) } xe_gt_sriov_dbg(gt, "using VF/PF ABI %u.%u\n", major, minor); - vf_connect_pf(gt, major, minor); + vf_connect_pf(xe, major, minor); return 0; failed: xe_gt_sriov_err(gt, "Unable to confirm VF/PF ABI version %u.%u (%pe)\n", major, minor, ERR_PTR(err)); - vf_disconnect_pf(gt); + vf_disconnect_pf(xe); return err; } @@ -775,10 +776,12 @@ void xe_gt_sriov_vf_migrated_event_handler(struct xe_gt *gt) static bool vf_is_negotiated(struct xe_gt *gt, u16 major, u16 minor) { - xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); + struct xe_device *xe = gt_to_xe(gt); - return major == gt->sriov.vf.pf_version.major && - minor <= gt->sriov.vf.pf_version.minor; + xe_gt_assert(gt, IS_SRIOV_VF(xe)); + + return major == xe->sriov.vf.pf_version.major && + minor <= xe->sriov.vf.pf_version.minor; } static int vf_prepare_runtime_info(struct xe_gt *gt, unsigned int num_regs) @@ -966,7 +969,6 @@ u32 xe_gt_sriov_vf_read32(struct xe_gt *gt, struct xe_reg reg) struct vf_runtime_reg *rr; xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); - xe_gt_assert(gt, gt->sriov.vf.pf_version.major); xe_gt_assert(gt, !reg.vf); if (reg.addr == GMD_ID.addr) { @@ -1037,7 +1039,7 @@ void xe_gt_sriov_vf_print_config(struct xe_gt *gt, struct drm_printer *p) drm_printf(p, "GGTT shift on last restore:\t%lld\n", config->ggtt_shift); - if (IS_DGFX(xe) && !xe_gt_is_media_type(gt)) { + if (IS_DGFX(xe) && xe_gt_is_main_type(gt)) { string_get_size(config->lmem_size, 1, STRING_UNITS_2, buf, sizeof(buf)); drm_printf(p, "LMEM size:\t%llu (%s)\n", config->lmem_size, buf); } @@ -1073,9 +1075,10 @@ void xe_gt_sriov_vf_print_runtime(struct xe_gt *gt, struct drm_printer *p) */ void xe_gt_sriov_vf_print_version(struct xe_gt *gt, struct drm_printer *p) { + struct xe_device *xe = gt_to_xe(gt); struct xe_uc_fw_version *guc_version = >->sriov.vf.guc_version; struct xe_uc_fw_version *wanted = >->sriov.vf.wanted_guc_version; - struct xe_gt_sriov_vf_relay_version *pf_version = >->sriov.vf.pf_version; + struct xe_sriov_vf_relay_version *pf_version = &xe->sriov.vf.pf_version; struct xe_uc_fw_version ver; xe_gt_assert(gt, IS_SRIOV_VF(gt_to_xe(gt))); diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h index ef041679e9d4..298dedf4b009 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_vf_types.h @@ -10,16 +10,6 @@ #include "xe_uc_fw_types.h" /** - * struct xe_gt_sriov_vf_relay_version - PF ABI version details. - */ -struct xe_gt_sriov_vf_relay_version { - /** @major: major version. */ - u16 major; - /** @minor: minor version. */ - u16 minor; -}; - -/** * struct xe_gt_sriov_vf_selfconfig - VF configuration data. */ struct xe_gt_sriov_vf_selfconfig { @@ -66,8 +56,6 @@ struct xe_gt_sriov_vf { struct xe_uc_fw_version guc_version; /** @self_config: resource configurations. */ struct xe_gt_sriov_vf_selfconfig self_config; - /** @pf_version: negotiated VF/PF ABI version. */ - struct xe_gt_sriov_vf_relay_version pf_version; /** @runtime: runtime data retrieved from the PF. */ struct xe_gt_sriov_vf_runtime runtime; }; diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c index 6088df8e159c..086c12ee3d9d 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.c @@ -330,6 +330,40 @@ int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt) return 0; } +static int send_tlb_invalidation_all(struct xe_gt *gt, + struct xe_gt_tlb_invalidation_fence *fence) +{ + u32 action[] = { + XE_GUC_ACTION_TLB_INVALIDATION_ALL, + 0, /* seqno, replaced in send_tlb_invalidation */ + MAKE_INVAL_OP(XE_GUC_TLB_INVAL_FULL), + }; + + return send_tlb_invalidation(>->uc.guc, fence, action, ARRAY_SIZE(action)); +} + +/** + * xe_gt_tlb_invalidation_all - Invalidate all TLBs across PF and all VFs. + * @gt: the &xe_gt structure + * @fence: the &xe_gt_tlb_invalidation_fence to be signaled on completion + * + * Send a request to invalidate all TLBs across PF and all VFs. + * + * Return: 0 on success, negative error code on error + */ +int xe_gt_tlb_invalidation_all(struct xe_gt *gt, struct xe_gt_tlb_invalidation_fence *fence) +{ + int err; + + xe_gt_assert(gt, gt == fence->gt); + + err = send_tlb_invalidation_all(gt, fence); + if (err) + xe_gt_err(gt, "TLB invalidation request failed (%pe)", ERR_PTR(err)); + + return err; +} + /* * Ensure that roundup_pow_of_two(length) doesn't overflow. * Note that roundup_pow_of_two() operates on unsigned long, diff --git a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h index 31072dbcad8e..f7f0f2eaf4b5 100644 --- a/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h +++ b/drivers/gpu/drm/xe/xe_gt_tlb_invalidation.h @@ -20,6 +20,7 @@ int xe_gt_tlb_invalidation_init_early(struct xe_gt *gt); void xe_gt_tlb_invalidation_reset(struct xe_gt *gt); int xe_gt_tlb_invalidation_ggtt(struct xe_gt *gt); void xe_gt_tlb_invalidation_vm(struct xe_gt *gt, struct xe_vm *vm); +int xe_gt_tlb_invalidation_all(struct xe_gt *gt, struct xe_gt_tlb_invalidation_fence *fence); int xe_gt_tlb_invalidation_range(struct xe_gt *gt, struct xe_gt_tlb_invalidation_fence *fence, u64 start, u64 end, u32 asid); diff --git a/drivers/gpu/drm/xe/xe_gt_topology.c b/drivers/gpu/drm/xe/xe_gt_topology.c index 305939c69747..8c63e3263643 100644 --- a/drivers/gpu/drm/xe/xe_gt_topology.c +++ b/drivers/gpu/drm/xe/xe_gt_topology.c @@ -290,11 +290,6 @@ xe_dss_mask_group_ffs(const xe_dss_mask_t mask, int groupsize, int groupnum) return find_next_bit(mask, XE_MAX_DSS_FUSE_BITS, groupnum * groupsize); } -bool xe_dss_mask_empty(const xe_dss_mask_t mask) -{ - return bitmap_empty(mask, XE_MAX_DSS_FUSE_BITS); -} - /** * xe_gt_topology_has_dss_in_quadrant - check fusing of DSS in GT quadrant * @gt: GT to check diff --git a/drivers/gpu/drm/xe/xe_gt_topology.h b/drivers/gpu/drm/xe/xe_gt_topology.h index a72d26ba0653..c8140704ad4c 100644 --- a/drivers/gpu/drm/xe/xe_gt_topology.h +++ b/drivers/gpu/drm/xe/xe_gt_topology.h @@ -41,8 +41,6 @@ xe_gt_topology_mask_last_dss(const xe_dss_mask_t mask) unsigned int xe_dss_mask_group_ffs(const xe_dss_mask_t mask, int groupsize, int groupnum); -bool xe_dss_mask_empty(const xe_dss_mask_t mask); - bool xe_gt_topology_has_dss_in_quadrant(struct xe_gt *gt, int quad); diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h index 7def0959da35..96344c604726 100644 --- a/drivers/gpu/drm/xe/xe_gt_types.h +++ b/drivers/gpu/drm/xe/xe_gt_types.h @@ -377,6 +377,8 @@ struct xe_gt { u16 group_target; /** @steering.instance_target: instance to steer accesses to */ u16 instance_target; + /** @steering.initialized: Whether this steering range is initialized */ + bool initialized; } steering[NUM_STEERING_TYPES]; /** diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c index 209e5d53c290..b1d1d6da3758 100644 --- a/drivers/gpu/drm/xe/xe_guc.c +++ b/drivers/gpu/drm/xe/xe_guc.c @@ -29,6 +29,7 @@ #include "xe_guc_db_mgr.h" #include "xe_guc_engine_activity.h" #include "xe_guc_hwconfig.h" +#include "xe_guc_klv_helpers.h" #include "xe_guc_log.h" #include "xe_guc_pc.h" #include "xe_guc_relay.h" @@ -59,7 +60,7 @@ static u32 guc_bo_ggtt_addr(struct xe_guc *guc, /* GuC addresses above GUC_GGTT_TOP don't map through the GTT */ xe_assert(xe, addr >= xe_wopcm_size(guc_to_xe(guc))); xe_assert(xe, addr < GUC_GGTT_TOP); - xe_assert(xe, bo->size <= GUC_GGTT_TOP - addr); + xe_assert(xe, xe_bo_size(bo) <= GUC_GGTT_TOP - addr); return addr; } @@ -420,7 +421,7 @@ static int guc_g2g_register(struct xe_guc *near_guc, struct xe_gt *far_gt, u32 t buf = base + G2G_DESC_AREA_SIZE + slot * G2G_BUFFER_SIZE; xe_assert(xe, (desc - base + G2G_DESC_SIZE) <= G2G_DESC_AREA_SIZE); - xe_assert(xe, (buf - base + G2G_BUFFER_SIZE) <= g2g_bo->size); + xe_assert(xe, (buf - base + G2G_BUFFER_SIZE) <= xe_bo_size(g2g_bo)); return guc_action_register_g2g_buffer(near_guc, type, far_tile, far_dev, desc, buf, G2G_BUFFER_SIZE); @@ -570,6 +571,86 @@ err_deregister: return err; } +static int __guc_opt_in_features_enable(struct xe_guc *guc, u64 addr, u32 num_dwords) +{ + u32 action[] = { + XE_GUC_ACTION_OPT_IN_FEATURE_KLV, + lower_32_bits(addr), + upper_32_bits(addr), + num_dwords + }; + + return xe_guc_ct_send_block(&guc->ct, action, ARRAY_SIZE(action)); +} + +static bool supports_dynamic_ics(struct xe_guc *guc) +{ + struct xe_device *xe = guc_to_xe(guc); + struct xe_gt *gt = guc_to_gt(guc); + + /* Dynamic ICS is available for PVC and Xe2 and newer platforms. */ + if (xe->info.platform != XE_PVC && GRAPHICS_VER(xe) < 20) + return false; + + /* + * The feature is currently not compatible with multi-lrc, so the GuC + * does not support it at all on the media engines (which are the main + * users of mlrc). On the primary GT side, to avoid it being used in + * conjunction with mlrc, we only enable it if we are in single CCS + * mode. + */ + if (xe_gt_is_media_type(gt) || gt->ccs_mode > 1) + return false; + + /* + * Dynamic ICS requires GuC v70.40.1, which maps to compatibility + * version v1.18.4. + */ + return GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 18, 4); +} + +#define OPT_IN_MAX_DWORDS 16 +int xe_guc_opt_in_features_enable(struct xe_guc *guc) +{ + struct xe_device *xe = guc_to_xe(guc); + CLASS(xe_guc_buf, buf)(&guc->buf, OPT_IN_MAX_DWORDS); + u32 count = 0; + u32 *klvs; + int ret; + + if (!xe_guc_buf_is_valid(buf)) + return -ENOBUFS; + + klvs = xe_guc_buf_cpu_ptr(buf); + + /* + * The extra CAT error type opt-in was added in GuC v70.17.0, which maps + * to compatibility version v1.7.0. + * Note that the GuC allows enabling this KLV even on platforms that do + * not support the extra type; in such case the returned type variable + * will be set to a known invalid value which we can check against. + */ + if (GUC_SUBMIT_VER(guc) >= MAKE_GUC_VER(1, 7, 0)) + klvs[count++] = PREP_GUC_KLV_TAG(OPT_IN_FEATURE_EXT_CAT_ERR_TYPE); + + if (supports_dynamic_ics(guc)) + klvs[count++] = PREP_GUC_KLV_TAG(OPT_IN_FEATURE_DYNAMIC_INHIBIT_CONTEXT_SWITCH); + + if (count) { + xe_assert(xe, count <= OPT_IN_MAX_DWORDS); + + ret = __guc_opt_in_features_enable(guc, xe_guc_buf_flush(buf), count); + if (ret < 0) { + xe_gt_err(guc_to_gt(guc), + "failed to enable GuC opt-in features: %pe\n", + ERR_PTR(ret)); + return ret; + } + } + + return 0; +} + static void guc_fini_hw(void *arg) { struct xe_guc *guc = arg; @@ -577,7 +658,7 @@ static void guc_fini_hw(void *arg) unsigned int fw_ref; fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - xe_uc_fini_hw(&guc_to_gt(guc)->uc); + xe_uc_sanitize_reset(&guc_to_gt(guc)->uc); xe_force_wake_put(gt_to_fw(gt), fw_ref); guc_g2g_fini(guc); @@ -627,23 +708,51 @@ static int xe_guc_realloc_post_hwconfig(struct xe_guc *guc) return 0; } -static int vf_guc_init(struct xe_guc *guc) +static int vf_guc_init_noalloc(struct xe_guc *guc) { + struct xe_gt *gt = guc_to_gt(guc); int err; - xe_guc_comm_init_early(guc); - - err = xe_guc_ct_init(&guc->ct); + err = xe_gt_sriov_vf_bootstrap(gt); if (err) return err; - err = xe_guc_relay_init(&guc->relay); + err = xe_gt_sriov_vf_query_config(gt); if (err) return err; return 0; } +int xe_guc_init_noalloc(struct xe_guc *guc) +{ + struct xe_device *xe = guc_to_xe(guc); + struct xe_gt *gt = guc_to_gt(guc); + int ret; + + xe_guc_comm_init_early(guc); + + ret = xe_guc_ct_init_noalloc(&guc->ct); + if (ret) + goto out; + + ret = xe_guc_relay_init(&guc->relay); + if (ret) + goto out; + + if (IS_SRIOV_VF(xe)) { + ret = vf_guc_init_noalloc(guc); + if (ret) + goto out; + } + + return 0; + +out: + xe_gt_err(gt, "GuC init failed with %pe\n", ERR_PTR(ret)); + return ret; +} + int xe_guc_init(struct xe_guc *guc) { struct xe_device *xe = guc_to_xe(guc); @@ -653,13 +762,13 @@ int xe_guc_init(struct xe_guc *guc) guc->fw.type = XE_UC_FW_TYPE_GUC; ret = xe_uc_fw_init(&guc->fw); if (ret) - goto out; + return ret; if (!xe_uc_fw_is_enabled(&guc->fw)) return 0; if (IS_SRIOV_VF(xe)) { - ret = vf_guc_init(guc); + ret = xe_guc_ct_init(&guc->ct); if (ret) goto out; return 0; @@ -681,10 +790,6 @@ int xe_guc_init(struct xe_guc *guc) if (ret) goto out; - ret = xe_guc_relay_init(&guc->relay); - if (ret) - goto out; - xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE); ret = devm_add_action_or_reset(xe->drm.dev, guc_fini_hw, guc); @@ -693,8 +798,6 @@ int xe_guc_init(struct xe_guc *guc) guc_init_params(guc); - xe_guc_comm_init_early(guc); - return 0; out: @@ -767,6 +870,10 @@ int xe_guc_post_load_init(struct xe_guc *guc) xe_guc_ads_populate_post_load(&guc->ads); + ret = xe_guc_opt_in_features_enable(guc); + if (ret) + return ret; + if (xe_guc_g2g_wanted(guc_to_xe(guc))) { ret = guc_g2g_start(guc); if (ret) @@ -1112,13 +1219,17 @@ static int vf_guc_min_load_for_hwconfig(struct xe_guc *guc) ret = xe_gt_sriov_vf_connect(gt); if (ret) - return ret; + goto err_out; ret = xe_gt_sriov_vf_query_runtime(gt); if (ret) - return ret; + goto err_out; return 0; + +err_out: + xe_guc_sanitize(guc); + return ret; } /** diff --git a/drivers/gpu/drm/xe/xe_guc.h b/drivers/gpu/drm/xe/xe_guc.h index 58338be44558..22cf019a11bf 100644 --- a/drivers/gpu/drm/xe/xe_guc.h +++ b/drivers/gpu/drm/xe/xe_guc.h @@ -26,6 +26,7 @@ struct drm_printer; void xe_guc_comm_init_early(struct xe_guc *guc); +int xe_guc_init_noalloc(struct xe_guc *guc); int xe_guc_init(struct xe_guc *guc); int xe_guc_init_post_hwconfig(struct xe_guc *guc); int xe_guc_post_load_init(struct xe_guc *guc); @@ -33,6 +34,7 @@ int xe_guc_reset(struct xe_guc *guc); int xe_guc_upload(struct xe_guc *guc); int xe_guc_min_load_for_hwconfig(struct xe_guc *guc); int xe_guc_enable_communication(struct xe_guc *guc); +int xe_guc_opt_in_features_enable(struct xe_guc *guc); int xe_guc_suspend(struct xe_guc *guc); void xe_guc_notify(struct xe_guc *guc); int xe_guc_auth_huc(struct xe_guc *guc, u32 rsa_addr); diff --git a/drivers/gpu/drm/xe/xe_guc_ads.c b/drivers/gpu/drm/xe/xe_guc_ads.c index 07a027755627..131cfc56be00 100644 --- a/drivers/gpu/drm/xe/xe_guc_ads.c +++ b/drivers/gpu/drm/xe/xe_guc_ads.c @@ -890,7 +890,7 @@ void xe_guc_ads_populate_minimal(struct xe_guc_ads *ads) xe_gt_assert(gt, ads->bo); - xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, ads->bo->size); + xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, xe_bo_size(ads->bo)); guc_policies_init(ads); guc_golden_lrc_init(ads); guc_mapping_table_init_invalid(gt, &info_map); @@ -914,7 +914,7 @@ void xe_guc_ads_populate(struct xe_guc_ads *ads) xe_gt_assert(gt, ads->bo); - xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, ads->bo->size); + xe_map_memset(ads_to_xe(ads), ads_to_map(ads), 0, 0, xe_bo_size(ads->bo)); guc_policies_init(ads); fill_engine_enable_masks(gt, &info_map); guc_mmio_reg_state_init(ads); @@ -995,16 +995,6 @@ static int guc_ads_action_update_policies(struct xe_guc_ads *ads, u32 policy_off return xe_guc_ct_send(ct, action, ARRAY_SIZE(action), 0, 0); } -static int guc_ads_update_policies(struct xe_guc_ads *ads, const struct guc_policies *policies) -{ - CLASS(xe_guc_buf_from_data, buf)(&ads_to_guc(ads)->buf, policies, sizeof(*policies)); - - if (!xe_guc_buf_is_valid(buf)) - return -ENOBUFS; - - return guc_ads_action_update_policies(ads, xe_guc_buf_flush(buf)); -} - /** * xe_guc_ads_scheduler_policy_toggle_reset - Toggle reset policy * @ads: Additional data structures object @@ -1015,13 +1005,16 @@ static int guc_ads_update_policies(struct xe_guc_ads *ads, const struct guc_poli */ int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads) { - struct xe_device *xe = ads_to_xe(ads); struct guc_policies *policies; - int ret; + struct xe_guc *guc = ads_to_guc(ads); + struct xe_device *xe = ads_to_xe(ads); + CLASS(xe_guc_buf, buf)(&guc->buf, sizeof(*policies)); - policies = kmalloc(sizeof(*policies), GFP_KERNEL); - if (!policies) - return -ENOMEM; + if (!xe_guc_buf_is_valid(buf)) + return -ENOBUFS; + + policies = xe_guc_buf_cpu_ptr(buf); + memset(policies, 0, sizeof(*policies)); policies->dpc_promote_time = ads_blob_read(ads, policies.dpc_promote_time); policies->max_num_work_items = ads_blob_read(ads, policies.max_num_work_items); @@ -1031,7 +1024,5 @@ int xe_guc_ads_scheduler_policy_toggle_reset(struct xe_guc_ads *ads) else policies->global_flags &= ~GLOBAL_POLICY_DISABLE_ENGINE_RESET; - ret = guc_ads_update_policies(ads, policies); - kfree(policies); - return ret; + return guc_ads_action_update_policies(ads, xe_guc_buf_flush(buf)); } diff --git a/drivers/gpu/drm/xe/xe_guc_capture.c b/drivers/gpu/drm/xe/xe_guc_capture.c index 859a3ba91be5..243dad3e2418 100644 --- a/drivers/gpu/drm/xe/xe_guc_capture.c +++ b/drivers/gpu/drm/xe/xe_guc_capture.c @@ -1817,6 +1817,12 @@ void xe_engine_snapshot_print(struct xe_hw_engine_snapshot *snapshot, struct drm str_yes_no(snapshot->kernel_reserved)); for (type = GUC_STATE_CAPTURE_TYPE_GLOBAL; type < GUC_STATE_CAPTURE_TYPE_MAX; type++) { + /* + * FIXME: During devcoredump print we should avoid accessing the + * driver pointers for gt or engine. Printing should be done only + * using the snapshot captured. Here we are accessing the gt + * pointer. It should be fixed. + */ list = xe_guc_capture_get_reg_desc_list(gt, GUC_CAPTURE_LIST_INDEX_PF, type, capture_class, false); snapshot_print_by_list_order(snapshot, p, type, list); diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c index 37509f619503..3f4e6a46ff16 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.c +++ b/drivers/gpu/drm/xe/xe_guc_ct.c @@ -35,6 +35,11 @@ #include "xe_pm.h" #include "xe_trace_guc.h" +static void receive_g2h(struct xe_guc_ct *ct); +static void g2h_worker_func(struct work_struct *w); +static void safe_mode_worker_func(struct work_struct *w); +static void ct_exit_safe_mode(struct xe_guc_ct *ct); + #if IS_ENABLED(CONFIG_DRM_XE_DEBUG) enum { /* Internal states, not error conditions */ @@ -80,6 +85,7 @@ struct g2h_fence { u16 error; u16 hint; u16 reason; + bool cancel; bool retry; bool fail; bool done; @@ -89,15 +95,18 @@ struct g2h_fence { static void g2h_fence_init(struct g2h_fence *g2h_fence, u32 *response_buffer) { + memset(g2h_fence, 0, sizeof(*g2h_fence)); g2h_fence->response_buffer = response_buffer; - g2h_fence->response_data = 0; - g2h_fence->response_len = 0; - g2h_fence->fail = false; - g2h_fence->retry = false; - g2h_fence->done = false; g2h_fence->seqno = ~0x0; } +static void g2h_fence_cancel(struct g2h_fence *g2h_fence) +{ + g2h_fence->cancel = true; + g2h_fence->fail = true; + g2h_fence->done = true; +} + static bool g2h_fence_needs_alloc(struct g2h_fence *g2h_fence) { return g2h_fence->seqno == ~0x0; @@ -189,14 +198,11 @@ static void guc_ct_fini(struct drm_device *drm, void *arg) { struct xe_guc_ct *ct = arg; + ct_exit_safe_mode(ct); destroy_workqueue(ct->g2h_wq); xa_destroy(&ct->fence_lookup); } -static void receive_g2h(struct xe_guc_ct *ct); -static void g2h_worker_func(struct work_struct *w); -static void safe_mode_worker_func(struct work_struct *w); - static void primelockdep(struct xe_guc_ct *ct) { if (!IS_ENABLED(CONFIG_LOCKDEP)) @@ -207,12 +213,10 @@ static void primelockdep(struct xe_guc_ct *ct) fs_reclaim_release(GFP_KERNEL); } -int xe_guc_ct_init(struct xe_guc_ct *ct) +int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct) { struct xe_device *xe = ct_to_xe(ct); struct xe_gt *gt = ct_to_gt(ct); - struct xe_tile *tile = gt_to_tile(gt); - struct xe_bo *bo; int err; xe_gt_assert(gt, !(guc_ct_size() % PAGE_SIZE)); @@ -238,6 +242,23 @@ int xe_guc_ct_init(struct xe_guc_ct *ct) primelockdep(ct); + err = drmm_add_action_or_reset(&xe->drm, guc_ct_fini, ct); + if (err) + return err; + + xe_gt_assert(gt, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED); + ct->state = XE_GUC_CT_STATE_DISABLED; + return 0; +} +ALLOW_ERROR_INJECTION(xe_guc_ct_init_noalloc, ERRNO); /* See xe_pci_probe() */ + +int xe_guc_ct_init(struct xe_guc_ct *ct) +{ + struct xe_device *xe = ct_to_xe(ct); + struct xe_gt *gt = ct_to_gt(ct); + struct xe_tile *tile = gt_to_tile(gt); + struct xe_bo *bo; + bo = xe_managed_bo_create_pin_map(xe, tile, guc_ct_size(), XE_BO_FLAG_SYSTEM | XE_BO_FLAG_GGTT | @@ -247,13 +268,6 @@ int xe_guc_ct_init(struct xe_guc_ct *ct) return PTR_ERR(bo); ct->bo = bo; - - err = drmm_add_action_or_reset(&xe->drm, guc_ct_fini, ct); - if (err) - return err; - - xe_gt_assert(gt, ct->state == XE_GUC_CT_STATE_NOT_INITIALIZED); - ct->state = XE_GUC_CT_STATE_DISABLED; return 0; } ALLOW_ERROR_INJECTION(xe_guc_ct_init, ERRNO); /* See xe_pci_probe() */ @@ -374,9 +388,13 @@ static int guc_ct_control_toggle(struct xe_guc_ct *ct, bool enable) return ret > 0 ? -EPROTO : ret; } -static void xe_guc_ct_set_state(struct xe_guc_ct *ct, +static void guc_ct_change_state(struct xe_guc_ct *ct, enum xe_guc_ct_state state) { + struct xe_gt *gt = ct_to_gt(ct); + struct g2h_fence *g2h_fence; + unsigned long idx; + mutex_lock(&ct->lock); /* Serialise dequeue_one_g2h() */ spin_lock_irq(&ct->fast_lock); /* Serialise CT fast-path */ @@ -388,8 +406,20 @@ static void xe_guc_ct_set_state(struct xe_guc_ct *ct, ct->g2h_outstanding = 0; ct->state = state; + xe_gt_dbg(gt, "GuC CT communication channel %s\n", + state == XE_GUC_CT_STATE_STOPPED ? "stopped" : + str_enabled_disabled(state == XE_GUC_CT_STATE_ENABLED)); + spin_unlock_irq(&ct->fast_lock); + /* cancel all in-flight send-recv requests */ + xa_for_each(&ct->fence_lookup, idx, g2h_fence) + g2h_fence_cancel(g2h_fence); + + /* make sure guc_ct_send_recv() will see g2h_fence changes */ + smp_mb(); + wake_up_all(&ct->g2h_fence_wq); + /* * Lockdep doesn't like this under the fast lock and he destroy only * needs to be serialized with the send path which ct lock provides. @@ -443,7 +473,7 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct) xe_gt_assert(gt, !xe_guc_ct_enabled(ct)); - xe_map_memset(xe, &ct->bo->vmap, 0, 0, ct->bo->size); + xe_map_memset(xe, &ct->bo->vmap, 0, 0, xe_bo_size(ct->bo)); guc_ct_ctb_h2g_init(xe, &ct->ctbs.h2g, &ct->bo->vmap); guc_ct_ctb_g2h_init(xe, &ct->ctbs.g2h, &ct->bo->vmap); @@ -459,11 +489,10 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct) if (err) goto err_out; - xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_ENABLED); + guc_ct_change_state(ct, XE_GUC_CT_STATE_ENABLED); smp_mb(); wake_up_all(&ct->wq); - xe_gt_dbg(gt, "GuC CT communication channel enabled\n"); if (ct_needs_safe_mode(ct)) ct_enter_safe_mode(ct); @@ -504,7 +533,7 @@ static void stop_g2h_handler(struct xe_guc_ct *ct) */ void xe_guc_ct_disable(struct xe_guc_ct *ct) { - xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_DISABLED); + guc_ct_change_state(ct, XE_GUC_CT_STATE_DISABLED); ct_exit_safe_mode(ct); stop_g2h_handler(ct); } @@ -520,7 +549,7 @@ void xe_guc_ct_stop(struct xe_guc_ct *ct) if (!xe_guc_ct_initialized(ct)) return; - xe_guc_ct_set_state(ct, XE_GUC_CT_STATE_STOPPED); + guc_ct_change_state(ct, XE_GUC_CT_STATE_STOPPED); stop_g2h_handler(ct); } @@ -1083,6 +1112,11 @@ retry_same_fence: goto retry; } if (g2h_fence.fail) { + if (g2h_fence.cancel) { + xe_gt_dbg(gt, "H2G request %#x canceled!\n", action[0]); + ret = -ECANCELED; + goto unlock; + } xe_gt_err(gt, "H2G request %#x failed: error %#x hint %#x\n", action[0], g2h_fence.error, g2h_fence.hint); ret = -EIO; @@ -1091,6 +1125,7 @@ retry_same_fence: if (ret > 0) ret = response_buffer ? g2h_fence.response_len : g2h_fence.response_data; +unlock: mutex_unlock(&ct->lock); return ret; @@ -1897,7 +1932,7 @@ static struct xe_guc_ct_snapshot *guc_ct_snapshot_alloc(struct xe_guc_ct *ct, bo return NULL; if (ct->bo && want_ctb) { - snapshot->ctb_size = ct->bo->size; + snapshot->ctb_size = xe_bo_size(ct->bo); snapshot->ctb = kmalloc(snapshot->ctb_size, atomic ? GFP_ATOMIC : GFP_KERNEL); } diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h index 99c5dec446f2..18d4225e6502 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.h +++ b/drivers/gpu/drm/xe/xe_guc_ct.h @@ -11,6 +11,7 @@ struct drm_printer; struct xe_device; +int xe_guc_ct_init_noalloc(struct xe_guc_ct *ct); int xe_guc_ct_init(struct xe_guc_ct *ct); int xe_guc_ct_enable(struct xe_guc_ct *ct); void xe_guc_ct_disable(struct xe_guc_ct *ct); diff --git a/drivers/gpu/drm/xe/xe_guc_log.c b/drivers/gpu/drm/xe/xe_guc_log.c index 38039c411387..c01ccb35dc75 100644 --- a/drivers/gpu/drm/xe/xe_guc_log.c +++ b/drivers/gpu/drm/xe/xe_guc_log.c @@ -79,7 +79,7 @@ static struct xe_guc_log_snapshot *xe_guc_log_snapshot_alloc(struct xe_guc_log * * Also, can't use vmalloc as might be called from atomic context. So need * to break the buffer up into smaller chunks that can be allocated. */ - snapshot->size = log->bo->size; + snapshot->size = xe_bo_size(log->bo); snapshot->num_chunks = DIV_ROUND_UP(snapshot->size, GUC_LOG_CHUNK_SIZE); snapshot->copy = kcalloc(snapshot->num_chunks, sizeof(*snapshot->copy), diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c index 9fab5f5b10fa..68b192fe3b32 100644 --- a/drivers/gpu/drm/xe/xe_guc_pc.c +++ b/drivers/gpu/drm/xe/xe_guc_pc.c @@ -5,8 +5,11 @@ #include "xe_guc_pc.h" +#include <linux/cleanup.h> #include <linux/delay.h> +#include <linux/jiffies.h> #include <linux/ktime.h> +#include <linux/wait_bit.h> #include <drm/drm_managed.h> #include <drm/drm_print.h> @@ -52,9 +55,11 @@ #define LNL_MERT_FREQ_CAP 800 #define BMG_MERT_FREQ_CAP 2133 #define BMG_MIN_FREQ 1200 +#define BMG_MERT_FLUSH_FREQ_CAP 2600 #define SLPC_RESET_TIMEOUT_MS 5 /* roughly 5ms, but no need for precision */ #define SLPC_RESET_EXTENDED_TIMEOUT_MS 1000 /* To be used only at pc_start */ +#define SLPC_ACT_FREQ_TIMEOUT_MS 100 /** * DOC: GuC Power Conservation (PC) @@ -142,6 +147,36 @@ static int wait_for_pc_state(struct xe_guc_pc *pc, return -ETIMEDOUT; } +static int wait_for_flush_complete(struct xe_guc_pc *pc) +{ + const unsigned long timeout = msecs_to_jiffies(30); + + if (!wait_var_event_timeout(&pc->flush_freq_limit, + !atomic_read(&pc->flush_freq_limit), + timeout)) + return -ETIMEDOUT; + + return 0; +} + +static int wait_for_act_freq_limit(struct xe_guc_pc *pc, u32 freq) +{ + int timeout_us = SLPC_ACT_FREQ_TIMEOUT_MS * USEC_PER_MSEC; + int slept, wait = 10; + + for (slept = 0; slept < timeout_us;) { + if (xe_guc_pc_get_act_freq(pc) <= freq) + return 0; + + usleep_range(wait, wait << 1); + slept += wait; + wait <<= 1; + if (slept + wait > timeout_us) + wait = timeout_us - slept; + } + + return -ETIMEDOUT; +} static int pc_action_reset(struct xe_guc_pc *pc) { struct xe_guc_ct *ct = pc_to_ct(pc); @@ -554,6 +589,25 @@ u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc) return pc->rpn_freq; } +static int xe_guc_pc_get_min_freq_locked(struct xe_guc_pc *pc, u32 *freq) +{ + int ret; + + lockdep_assert_held(&pc->freq_lock); + + /* Might be in the middle of a gt reset */ + if (!pc->freq_ready) + return -EAGAIN; + + ret = pc_action_query_task_state(pc); + if (ret) + return ret; + + *freq = pc_get_min_freq(pc); + + return 0; +} + /** * xe_guc_pc_get_min_freq - Get the min operational frequency * @pc: The GuC PC @@ -564,26 +618,28 @@ u32 xe_guc_pc_get_rpn_freq(struct xe_guc_pc *pc) */ int xe_guc_pc_get_min_freq(struct xe_guc_pc *pc, u32 *freq) { + guard(mutex)(&pc->freq_lock); + + return xe_guc_pc_get_min_freq_locked(pc, freq); +} + +static int xe_guc_pc_set_min_freq_locked(struct xe_guc_pc *pc, u32 freq) +{ int ret; - xe_device_assert_mem_access(pc_to_xe(pc)); + lockdep_assert_held(&pc->freq_lock); - mutex_lock(&pc->freq_lock); - if (!pc->freq_ready) { - /* Might be in the middle of a gt reset */ - ret = -EAGAIN; - goto out; - } + /* Might be in the middle of a gt reset */ + if (!pc->freq_ready) + return -EAGAIN; - ret = pc_action_query_task_state(pc); + ret = pc_set_min_freq(pc, freq); if (ret) - goto out; + return ret; - *freq = pc_get_min_freq(pc); + pc->user_requested_min = freq; -out: - mutex_unlock(&pc->freq_lock); - return ret; + return 0; } /** @@ -597,24 +653,28 @@ out: */ int xe_guc_pc_set_min_freq(struct xe_guc_pc *pc, u32 freq) { + guard(mutex)(&pc->freq_lock); + + return xe_guc_pc_set_min_freq_locked(pc, freq); +} + +static int xe_guc_pc_get_max_freq_locked(struct xe_guc_pc *pc, u32 *freq) +{ int ret; - mutex_lock(&pc->freq_lock); - if (!pc->freq_ready) { - /* Might be in the middle of a gt reset */ - ret = -EAGAIN; - goto out; - } + lockdep_assert_held(&pc->freq_lock); - ret = pc_set_min_freq(pc, freq); + /* Might be in the middle of a gt reset */ + if (!pc->freq_ready) + return -EAGAIN; + + ret = pc_action_query_task_state(pc); if (ret) - goto out; + return ret; - pc->user_requested_min = freq; + *freq = pc_get_max_freq(pc); -out: - mutex_unlock(&pc->freq_lock); - return ret; + return 0; } /** @@ -627,24 +687,28 @@ out: */ int xe_guc_pc_get_max_freq(struct xe_guc_pc *pc, u32 *freq) { + guard(mutex)(&pc->freq_lock); + + return xe_guc_pc_get_max_freq_locked(pc, freq); +} + +static int xe_guc_pc_set_max_freq_locked(struct xe_guc_pc *pc, u32 freq) +{ int ret; - mutex_lock(&pc->freq_lock); - if (!pc->freq_ready) { - /* Might be in the middle of a gt reset */ - ret = -EAGAIN; - goto out; - } + lockdep_assert_held(&pc->freq_lock); - ret = pc_action_query_task_state(pc); + /* Might be in the middle of a gt reset */ + if (!pc->freq_ready) + return -EAGAIN; + + ret = pc_set_max_freq(pc, freq); if (ret) - goto out; + return ret; - *freq = pc_get_max_freq(pc); + pc->user_requested_max = freq; -out: - mutex_unlock(&pc->freq_lock); - return ret; + return 0; } /** @@ -658,24 +722,14 @@ out: */ int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq) { - int ret; - - mutex_lock(&pc->freq_lock); - if (!pc->freq_ready) { - /* Might be in the middle of a gt reset */ - ret = -EAGAIN; - goto out; + if (XE_WA(pc_to_gt(pc), 22019338487)) { + if (wait_for_flush_complete(pc) != 0) + return -EAGAIN; } - ret = pc_set_max_freq(pc, freq); - if (ret) - goto out; + guard(mutex)(&pc->freq_lock); - pc->user_requested_max = freq; - -out: - mutex_unlock(&pc->freq_lock); - return ret; + return xe_guc_pc_set_max_freq_locked(pc, freq); } /** @@ -873,30 +927,117 @@ static int pc_adjust_requested_freq(struct xe_guc_pc *pc) return ret; } -static int pc_set_mert_freq_cap(struct xe_guc_pc *pc) +static bool needs_flush_freq_limit(struct xe_guc_pc *pc) { - int ret = 0; + struct xe_gt *gt = pc_to_gt(pc); - if (XE_WA(pc_to_gt(pc), 22019338487)) { - /* - * Get updated min/max and stash them. - */ - ret = xe_guc_pc_get_min_freq(pc, &pc->stashed_min_freq); - if (!ret) - ret = xe_guc_pc_get_max_freq(pc, &pc->stashed_max_freq); - if (ret) - return ret; + return XE_WA(gt, 22019338487) && + pc->rp0_freq > BMG_MERT_FLUSH_FREQ_CAP; +} + +/** + * xe_guc_pc_apply_flush_freq_limit() - Limit max GT freq during L2 flush + * @pc: the xe_guc_pc object + * + * As per the WA, reduce max GT frequency during L2 cache flush + */ +void xe_guc_pc_apply_flush_freq_limit(struct xe_guc_pc *pc) +{ + struct xe_gt *gt = pc_to_gt(pc); + u32 max_freq; + int ret; + + if (!needs_flush_freq_limit(pc)) + return; + + guard(mutex)(&pc->freq_lock); + + ret = xe_guc_pc_get_max_freq_locked(pc, &max_freq); + if (!ret && max_freq > BMG_MERT_FLUSH_FREQ_CAP) { + ret = pc_set_max_freq(pc, BMG_MERT_FLUSH_FREQ_CAP); + if (ret) { + xe_gt_err_once(gt, "Failed to cap max freq on flush to %u, %pe\n", + BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret)); + return; + } + + atomic_set(&pc->flush_freq_limit, 1); /* - * Ensure min and max are bound by MERT_FREQ_CAP until driver loads. + * If user has previously changed max freq, stash that value to + * restore later, otherwise use the current max. New user + * requests wait on flush. */ - mutex_lock(&pc->freq_lock); - ret = pc_set_min_freq(pc, min(pc->rpe_freq, pc_max_freq_cap(pc))); - if (!ret) - ret = pc_set_max_freq(pc, min(pc->rp0_freq, pc_max_freq_cap(pc))); - mutex_unlock(&pc->freq_lock); + if (pc->user_requested_max != 0) + pc->stashed_max_freq = pc->user_requested_max; + else + pc->stashed_max_freq = max_freq; } + /* + * Wait for actual freq to go below the flush cap: even if the previous + * max was below cap, the current one might still be above it + */ + ret = wait_for_act_freq_limit(pc, BMG_MERT_FLUSH_FREQ_CAP); + if (ret) + xe_gt_err_once(gt, "Actual freq did not reduce to %u, %pe\n", + BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret)); +} + +/** + * xe_guc_pc_remove_flush_freq_limit() - Remove max GT freq limit after L2 flush completes. + * @pc: the xe_guc_pc object + * + * Retrieve the previous GT max frequency value. + */ +void xe_guc_pc_remove_flush_freq_limit(struct xe_guc_pc *pc) +{ + struct xe_gt *gt = pc_to_gt(pc); + int ret = 0; + + if (!needs_flush_freq_limit(pc)) + return; + + if (!atomic_read(&pc->flush_freq_limit)) + return; + + mutex_lock(&pc->freq_lock); + + ret = pc_set_max_freq(>->uc.guc.pc, pc->stashed_max_freq); + if (ret) + xe_gt_err_once(gt, "Failed to restore max freq %u:%d", + pc->stashed_max_freq, ret); + + atomic_set(&pc->flush_freq_limit, 0); + mutex_unlock(&pc->freq_lock); + wake_up_var(&pc->flush_freq_limit); +} + +static int pc_set_mert_freq_cap(struct xe_guc_pc *pc) +{ + int ret; + + if (!XE_WA(pc_to_gt(pc), 22019338487)) + return 0; + + guard(mutex)(&pc->freq_lock); + + /* + * Get updated min/max and stash them. + */ + ret = xe_guc_pc_get_min_freq_locked(pc, &pc->stashed_min_freq); + if (!ret) + ret = xe_guc_pc_get_max_freq_locked(pc, &pc->stashed_max_freq); + if (ret) + return ret; + + /* + * Ensure min and max are bound by MERT_FREQ_CAP until driver loads. + */ + ret = pc_set_min_freq(pc, min(pc->rpe_freq, pc_max_freq_cap(pc))); + if (!ret) + ret = pc_set_max_freq(pc, min(pc->rp0_freq, pc_max_freq_cap(pc))); + return ret; } diff --git a/drivers/gpu/drm/xe/xe_guc_pc.h b/drivers/gpu/drm/xe/xe_guc_pc.h index 0a2664d5c811..52ecdd5ddbff 100644 --- a/drivers/gpu/drm/xe/xe_guc_pc.h +++ b/drivers/gpu/drm/xe/xe_guc_pc.h @@ -38,5 +38,7 @@ u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc); void xe_guc_pc_init_early(struct xe_guc_pc *pc); int xe_guc_pc_restore_stashed_freq(struct xe_guc_pc *pc); void xe_guc_pc_raise_unslice(struct xe_guc_pc *pc); +void xe_guc_pc_apply_flush_freq_limit(struct xe_guc_pc *pc); +void xe_guc_pc_remove_flush_freq_limit(struct xe_guc_pc *pc); #endif /* _XE_GUC_PC_H_ */ diff --git a/drivers/gpu/drm/xe/xe_guc_pc_types.h b/drivers/gpu/drm/xe/xe_guc_pc_types.h index 2978ac9a249b..c02053948a57 100644 --- a/drivers/gpu/drm/xe/xe_guc_pc_types.h +++ b/drivers/gpu/drm/xe/xe_guc_pc_types.h @@ -15,6 +15,8 @@ struct xe_guc_pc { /** @bo: GGTT buffer object that is shared with GuC PC */ struct xe_bo *bo; + /** @flush_freq_limit: 1 when max freq changes are limited by driver */ + atomic_t flush_freq_limit; /** @rp0_freq: HW RP0 frequency - The Maximum one */ u32 rp0_freq; /** @rpa_freq: HW RPa frequency - The Achievable one */ diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index df7a5a4eec74..cafb47711e9b 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -908,12 +908,13 @@ static void xe_guc_exec_queue_lr_cleanup(struct work_struct *w) struct xe_exec_queue *q = ge->q; struct xe_guc *guc = exec_queue_to_guc(q); struct xe_gpu_scheduler *sched = &ge->sched; - bool wedged; + bool wedged = false; xe_gt_assert(guc_to_gt(guc), xe_exec_queue_is_lr(q)); trace_xe_exec_queue_lr_cleanup(q); - wedged = guc_submit_hint_wedged(exec_queue_to_guc(q)); + if (!exec_queue_killed(q)) + wedged = guc_submit_hint_wedged(exec_queue_to_guc(q)); /* Kill the run_job / process_msg entry points */ xe_sched_submission_stop(sched); @@ -1084,7 +1085,7 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) int err = -ETIME; pid_t pid = -1; int i = 0; - bool wedged, skip_timeout_check; + bool wedged = false, skip_timeout_check; /* * TDR has fired before free job worker. Common if exec queue @@ -1092,12 +1093,8 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) * list so job can be freed and kick scheduler ensuring free job is not * lost. */ - if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) { - xe_sched_add_pending_job(sched, job); - xe_sched_submission_start(sched); - - return DRM_GPU_SCHED_STAT_NOMINAL; - } + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &job->fence->flags)) + return DRM_GPU_SCHED_STAT_NO_HANG; /* Kill the run_job entry point */ xe_sched_submission_stop(sched); @@ -1130,7 +1127,8 @@ guc_exec_queue_timedout_job(struct drm_sched_job *drm_job) * doesn't work for SRIOV. For now assuming timeouts in wedged mode are * genuine timeouts. */ - wedged = guc_submit_hint_wedged(exec_queue_to_guc(q)); + if (!exec_queue_killed(q)) + wedged = guc_submit_hint_wedged(exec_queue_to_guc(q)); /* Engine state now stable, disable scheduling to check timestamp */ if (!wedged && exec_queue_registered(q)) { @@ -1265,7 +1263,7 @@ trigger_reset: /* Start fence signaling */ xe_hw_fence_irq_start(q->fence_irq); - return DRM_GPU_SCHED_STAT_NOMINAL; + return DRM_GPU_SCHED_STAT_RESET; sched_enable: enable_scheduling(q); @@ -1275,10 +1273,8 @@ rearm: * but there is not currently an easy way to do in DRM scheduler. With * some thought, do this in a follow up. */ - xe_sched_add_pending_job(sched, job); xe_sched_submission_start(sched); - - return DRM_GPU_SCHED_STAT_NOMINAL; + return DRM_GPU_SCHED_STAT_NO_HANG; } static void __guc_exec_queue_fini_async(struct work_struct *w) @@ -2090,12 +2086,16 @@ int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg, struct xe_gt *gt = guc_to_gt(guc); struct xe_exec_queue *q; u32 guc_id; + u32 type = XE_GUC_CAT_ERR_TYPE_INVALID; - if (unlikely(len < 1)) + if (unlikely(!len || len > 2)) return -EPROTO; guc_id = msg[0]; + if (len == 2) + type = msg[1]; + if (guc_id == GUC_ID_UNKNOWN) { /* * GuC uses GUC_ID_UNKNOWN if it can not map the CAT fault to any PF/VF @@ -2109,8 +2109,19 @@ int xe_guc_exec_queue_memory_cat_error_handler(struct xe_guc *guc, u32 *msg, if (unlikely(!q)) return -EPROTO; - xe_gt_dbg(gt, "Engine memory cat error: engine_class=%s, logical_mask: 0x%x, guc_id=%d", - xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id); + /* + * The type is HW-defined and changes based on platform, so we don't + * decode it in the kernel and only check if it is valid. + * See bspec 54047 and 72187 for details. + */ + if (type != XE_GUC_CAT_ERR_TYPE_INVALID) + xe_gt_dbg(gt, + "Engine memory CAT error [%u]: class=%s, logical_mask: 0x%x, guc_id=%d", + type, xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id); + else + xe_gt_dbg(gt, + "Engine memory CAT error: class=%s, logical_mask: 0x%x, guc_id=%d", + xe_hw_engine_class_to_str(q->class), q->logical_mask, guc_id); trace_xe_exec_queue_memory_cat_error(q); diff --git a/drivers/gpu/drm/xe/xe_heci_gsc.c b/drivers/gpu/drm/xe/xe_heci_gsc.c index 27d11e06a82b..6d7b62724126 100644 --- a/drivers/gpu/drm/xe/xe_heci_gsc.c +++ b/drivers/gpu/drm/xe/xe_heci_gsc.c @@ -11,15 +11,12 @@ #include "xe_device_types.h" #include "xe_drv.h" #include "xe_heci_gsc.h" +#include "regs/xe_gsc_regs.h" #include "xe_platform_types.h" #include "xe_survivability_mode.h" #define GSC_BAR_LENGTH 0x00000FFC -#define DG1_GSC_HECI2_BASE 0x259000 -#define PVC_GSC_HECI2_BASE 0x285000 -#define DG2_GSC_HECI2_BASE 0x374000 - static void heci_gsc_irq_mask(struct irq_data *d) { /* generic irq handling */ diff --git a/drivers/gpu/drm/xe/xe_huc.c b/drivers/gpu/drm/xe/xe_huc.c index 6a846e4cb221..7e43b2dd6a32 100644 --- a/drivers/gpu/drm/xe/xe_huc.c +++ b/drivers/gpu/drm/xe/xe_huc.c @@ -171,7 +171,7 @@ static int huc_auth_via_gsccs(struct xe_huc *huc) sizeof(struct pxp43_new_huc_auth_in)); wr_offset = huc_emit_pxp_auth_msg(xe, &pkt->vmap, wr_offset, xe_bo_ggtt_addr(huc->fw.bo), - huc->fw.bo->size); + xe_bo_size(huc->fw.bo)); do { err = xe_gsc_pkt_submit_kernel(>->uc.gsc, ggtt_offset, wr_offset, ggtt_offset + PXP43_HUC_AUTH_INOUT_SIZE, diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c index 3439c8522d01..796ba8c34a16 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine.c +++ b/drivers/gpu/drm/xe/xe_hw_engine.c @@ -1059,12 +1059,13 @@ struct xe_hw_engine * xe_hw_engine_lookup(struct xe_device *xe, struct drm_xe_engine_class_instance eci) { + struct xe_gt *gt = xe_device_get_gt(xe, eci.gt_id); unsigned int idx; if (eci.engine_class >= ARRAY_SIZE(user_to_xe_engine_class)) return NULL; - if (eci.gt_id >= xe->info.gt_count) + if (!gt) return NULL; idx = array_index_nospec(eci.engine_class, diff --git a/drivers/gpu/drm/xe/xe_hw_engine_group.c b/drivers/gpu/drm/xe/xe_hw_engine_group.c index 2d68c5b5262a..c926f840c87b 100644 --- a/drivers/gpu/drm/xe/xe_hw_engine_group.c +++ b/drivers/gpu/drm/xe/xe_hw_engine_group.c @@ -13,15 +13,6 @@ #include "xe_vm.h" static void -hw_engine_group_free(struct drm_device *drm, void *arg) -{ - struct xe_hw_engine_group *group = arg; - - destroy_workqueue(group->resume_wq); - kfree(group); -} - -static void hw_engine_group_resume_lr_jobs_func(struct work_struct *w) { struct xe_exec_queue *q; @@ -53,7 +44,7 @@ hw_engine_group_alloc(struct xe_device *xe) struct xe_hw_engine_group *group; int err; - group = kzalloc(sizeof(*group), GFP_KERNEL); + group = drmm_kzalloc(&xe->drm, sizeof(*group), GFP_KERNEL); if (!group) return ERR_PTR(-ENOMEM); @@ -61,14 +52,14 @@ hw_engine_group_alloc(struct xe_device *xe) if (!group->resume_wq) return ERR_PTR(-ENOMEM); + err = drmm_add_action_or_reset(&xe->drm, __drmm_workqueue_release, group->resume_wq); + if (err) + return ERR_PTR(err); + init_rwsem(&group->mode_sem); INIT_WORK(&group->resume_work, hw_engine_group_resume_lr_jobs_func); INIT_LIST_HEAD(&group->exec_queue_list); - err = drmm_add_action_or_reset(&xe->drm, hw_engine_group_free, group); - if (err) - return ERR_PTR(err); - return group; } @@ -84,25 +75,18 @@ int xe_hw_engine_setup_groups(struct xe_gt *gt) enum xe_hw_engine_id id; struct xe_hw_engine_group *group_rcs_ccs, *group_bcs, *group_vcs_vecs; struct xe_device *xe = gt_to_xe(gt); - int err; group_rcs_ccs = hw_engine_group_alloc(xe); - if (IS_ERR(group_rcs_ccs)) { - err = PTR_ERR(group_rcs_ccs); - goto err_group_rcs_ccs; - } + if (IS_ERR(group_rcs_ccs)) + return PTR_ERR(group_rcs_ccs); group_bcs = hw_engine_group_alloc(xe); - if (IS_ERR(group_bcs)) { - err = PTR_ERR(group_bcs); - goto err_group_bcs; - } + if (IS_ERR(group_bcs)) + return PTR_ERR(group_bcs); group_vcs_vecs = hw_engine_group_alloc(xe); - if (IS_ERR(group_vcs_vecs)) { - err = PTR_ERR(group_vcs_vecs); - goto err_group_vcs_vecs; - } + if (IS_ERR(group_vcs_vecs)) + return PTR_ERR(group_vcs_vecs); for_each_hw_engine(hwe, gt, id) { switch (hwe->class) { @@ -125,15 +109,6 @@ int xe_hw_engine_setup_groups(struct xe_gt *gt) } return 0; - -err_group_vcs_vecs: - kfree(group_vcs_vecs); -err_group_bcs: - kfree(group_bcs); -err_group_rcs_ccs: - kfree(group_rcs_ccs); - - return err; } /** diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c index f08fc4377d25..c17ed1ae8649 100644 --- a/drivers/gpu/drm/xe/xe_hwmon.c +++ b/drivers/gpu/drm/xe/xe_hwmon.c @@ -332,6 +332,7 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channe int ret = 0; u32 reg_val, max; struct xe_reg rapl_limit; + u64 max_supp_power_limit = 0; mutex_lock(&hwmon->hwmon_lock); @@ -356,6 +357,20 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channe goto unlock; } + /* + * If the sysfs value exceeds the maximum pcode supported power limit value, clamp it to + * the supported maximum (U12.3 format). + * This is to avoid truncation during reg_val calculation below and ensure the valid + * power limit is sent for pcode which would clamp it to card-supported value. + */ + max_supp_power_limit = ((PWR_LIM_VAL) >> hwmon->scl_shift_power) * SF_POWER; + if (value > max_supp_power_limit) { + value = max_supp_power_limit; + drm_info(&hwmon->xe->drm, + "Power limit clamped as selected %s exceeds channel %d limit\n", + PWR_ATTR_TO_STR(attr), channel); + } + /* Computation in 64-bits to avoid overflow. Round to nearest. */ reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER); @@ -739,9 +754,23 @@ static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel, { int ret; u32 uval; + u64 max_crit_power_curr = 0; mutex_lock(&hwmon->hwmon_lock); + /* + * If the sysfs value exceeds the pcode mailbox cmd POWER_SETUP_SUBCOMMAND_WRITE_I1 + * max supported value, clamp it to the command's max (U10.6 format). + * This is to avoid truncation during uval calculation below and ensure the valid power + * limit is sent for pcode which would clamp it to card-supported value. + */ + max_crit_power_curr = (POWER_SETUP_I1_DATA_MASK >> POWER_SETUP_I1_SHIFT) * scale_factor; + if (value > max_crit_power_curr) { + value = max_crit_power_curr; + drm_info(&hwmon->xe->drm, + "Power limit clamped as selected exceeds channel %d limit\n", + channel); + } uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor); ret = xe_hwmon_pcode_write_i1(hwmon, uval); diff --git a/drivers/gpu/drm/xe/xe_i2c.c b/drivers/gpu/drm/xe/xe_i2c.c new file mode 100644 index 000000000000..bc7dc2099470 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_i2c.c @@ -0,0 +1,332 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* + * Intel Xe I2C attached Microcontroller Units (MCU) + * + * Copyright (C) 2025 Intel Corporation. + */ + +#include <linux/array_size.h> +#include <linux/container_of.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/ioport.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/notifier.h> +#include <linux/pci.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/regmap.h> +#include <linux/sprintf.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +#include "regs/xe_i2c_regs.h" +#include "regs/xe_irq_regs.h" + +#include "xe_device.h" +#include "xe_device_types.h" +#include "xe_i2c.h" +#include "xe_mmio.h" +#include "xe_platform_types.h" + +/** + * DOC: Xe I2C devices + * + * Register a platform device for the I2C host controller (Synpsys DesignWare + * I2C) if the registers of that controller are mapped to the MMIO, and also the + * I2C client device for the Add-In Management Controller (the MCU) attached to + * the host controller. + * + * See drivers/i2c/busses/i2c-designware-* for more information on the I2C host + * controller. + */ + +static const char adapter_name[] = "i2c_designware"; + +static const struct property_entry xe_i2c_adapter_properties[] = { + PROPERTY_ENTRY_STRING("compatible", "intel,xe-i2c"), + PROPERTY_ENTRY_U32("clock-frequency", I2C_MAX_FAST_MODE_PLUS_FREQ), + { } +}; + +static inline void xe_i2c_read_endpoint(struct xe_mmio *mmio, void *ep) +{ + u32 *val = ep; + + val[0] = xe_mmio_read32(mmio, REG_SG_REMAP_ADDR_PREFIX); + val[1] = xe_mmio_read32(mmio, REG_SG_REMAP_ADDR_POSTFIX); +} + +static void xe_i2c_client_work(struct work_struct *work) +{ + struct xe_i2c *i2c = container_of(work, struct xe_i2c, work); + struct i2c_board_info info = { + .type = "amc", + .flags = I2C_CLIENT_HOST_NOTIFY, + .addr = i2c->ep.addr[1], + }; + + i2c->client[0] = i2c_new_client_device(i2c->adapter, &info); +} + +static int xe_i2c_notifier(struct notifier_block *nb, unsigned long action, void *data) +{ + struct xe_i2c *i2c = container_of(nb, struct xe_i2c, bus_notifier); + struct i2c_adapter *adapter = i2c_verify_adapter(data); + struct device *dev = data; + + if (action == BUS_NOTIFY_ADD_DEVICE && + adapter && dev->parent == &i2c->pdev->dev) { + i2c->adapter = adapter; + schedule_work(&i2c->work); + return NOTIFY_OK; + } + + return NOTIFY_DONE; +} + +static int xe_i2c_register_adapter(struct xe_i2c *i2c) +{ + struct pci_dev *pci = to_pci_dev(i2c->drm_dev); + struct platform_device *pdev; + struct fwnode_handle *fwnode; + int ret; + + fwnode = fwnode_create_software_node(xe_i2c_adapter_properties, NULL); + if (IS_ERR(fwnode)) + return PTR_ERR(fwnode); + + /* + * Not using platform_device_register_full() here because we don't have + * a handle to the platform_device before it returns. xe_i2c_notifier() + * uses that handle, but it may be called before + * platform_device_register_full() is done. + */ + pdev = platform_device_alloc(adapter_name, pci_dev_id(pci)); + if (!pdev) { + ret = -ENOMEM; + goto err_fwnode_remove; + } + + if (i2c->adapter_irq) { + struct resource res; + + res = DEFINE_RES_IRQ_NAMED(i2c->adapter_irq, "xe_i2c"); + + ret = platform_device_add_resources(pdev, &res, 1); + if (ret) + goto err_pdev_put; + } + + pdev->dev.parent = i2c->drm_dev; + pdev->dev.fwnode = fwnode; + i2c->adapter_node = fwnode; + i2c->pdev = pdev; + + ret = platform_device_add(pdev); + if (ret) + goto err_pdev_put; + + return 0; + +err_pdev_put: + platform_device_put(pdev); +err_fwnode_remove: + fwnode_remove_software_node(fwnode); + + return ret; +} + +static void xe_i2c_unregister_adapter(struct xe_i2c *i2c) +{ + platform_device_unregister(i2c->pdev); + fwnode_remove_software_node(i2c->adapter_node); +} + +/** + * xe_i2c_irq_handler: Handler for I2C interrupts + * @xe: xe device instance + * @master_ctl: interrupt register + * + * Forward interrupts generated by the I2C host adapter to the I2C host adapter + * driver. + */ +void xe_i2c_irq_handler(struct xe_device *xe, u32 master_ctl) +{ + if (!xe->i2c || !xe->i2c->adapter_irq) + return; + + if (master_ctl & I2C_IRQ) + generic_handle_irq_safe(xe->i2c->adapter_irq); +} + +static int xe_i2c_irq_map(struct irq_domain *h, unsigned int virq, + irq_hw_number_t hw_irq_num) +{ + irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_simple_irq); + return 0; +} + +static const struct irq_domain_ops xe_i2c_irq_ops = { + .map = xe_i2c_irq_map, +}; + +static int xe_i2c_create_irq(struct xe_i2c *i2c) +{ + struct irq_domain *domain; + + if (!(i2c->ep.capabilities & XE_I2C_EP_CAP_IRQ)) + return 0; + + domain = irq_domain_create_linear(dev_fwnode(i2c->drm_dev), 1, &xe_i2c_irq_ops, NULL); + if (!domain) + return -ENOMEM; + + i2c->adapter_irq = irq_create_mapping(domain, 0); + i2c->irqdomain = domain; + + return 0; +} + +static void xe_i2c_remove_irq(struct xe_i2c *i2c) +{ + if (!i2c->irqdomain) + return; + + irq_dispose_mapping(i2c->adapter_irq); + irq_domain_remove(i2c->irqdomain); +} + +static int xe_i2c_read(void *context, unsigned int reg, unsigned int *val) +{ + struct xe_i2c *i2c = context; + + *val = xe_mmio_read32(i2c->mmio, XE_REG(reg + I2C_MEM_SPACE_OFFSET)); + + return 0; +} + +static int xe_i2c_write(void *context, unsigned int reg, unsigned int val) +{ + struct xe_i2c *i2c = context; + + xe_mmio_write32(i2c->mmio, XE_REG(reg + I2C_MEM_SPACE_OFFSET), val); + + return 0; +} + +static const struct regmap_config i2c_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_read = xe_i2c_read, + .reg_write = xe_i2c_write, + .fast_io = true, +}; + +void xe_i2c_pm_suspend(struct xe_device *xe) +{ + struct xe_mmio *mmio = xe_root_tile_mmio(xe); + + if (!xe->i2c || xe->i2c->ep.cookie != XE_I2C_EP_COOKIE_DEVICE) + return; + + xe_mmio_rmw32(mmio, I2C_CONFIG_PMCSR, PCI_PM_CTRL_STATE_MASK, (__force u32)PCI_D3hot); + drm_dbg(&xe->drm, "pmcsr: 0x%08x\n", xe_mmio_read32(mmio, I2C_CONFIG_PMCSR)); +} + +void xe_i2c_pm_resume(struct xe_device *xe, bool d3cold) +{ + struct xe_mmio *mmio = xe_root_tile_mmio(xe); + + if (!xe->i2c || xe->i2c->ep.cookie != XE_I2C_EP_COOKIE_DEVICE) + return; + + if (d3cold) + xe_mmio_rmw32(mmio, I2C_CONFIG_CMD, 0, PCI_COMMAND_MEMORY); + + xe_mmio_rmw32(mmio, I2C_CONFIG_PMCSR, PCI_PM_CTRL_STATE_MASK, (__force u32)PCI_D0); + drm_dbg(&xe->drm, "pmcsr: 0x%08x\n", xe_mmio_read32(mmio, I2C_CONFIG_PMCSR)); +} + +static void xe_i2c_remove(void *data) +{ + struct xe_i2c *i2c = data; + unsigned int i; + + for (i = 0; i < XE_I2C_MAX_CLIENTS; i++) + i2c_unregister_device(i2c->client[i]); + + bus_unregister_notifier(&i2c_bus_type, &i2c->bus_notifier); + xe_i2c_unregister_adapter(i2c); + xe_i2c_remove_irq(i2c); +} + +/** + * xe_i2c_probe: Probe the I2C host adapter and the I2C clients attached to it + * @xe: xe device instance + * + * Register all the I2C devices described in the I2C Endpoint data structure. + * + * Return: 0 on success, error code on failure + */ +int xe_i2c_probe(struct xe_device *xe) +{ + struct device *drm_dev = xe->drm.dev; + struct xe_i2c_endpoint ep; + struct regmap *regmap; + struct xe_i2c *i2c; + int ret; + + if (xe->info.platform != XE_BATTLEMAGE) + return 0; + + if (IS_SRIOV_VF(xe)) + return 0; + + xe_i2c_read_endpoint(xe_root_tile_mmio(xe), &ep); + if (ep.cookie != XE_I2C_EP_COOKIE_DEVICE) + return 0; + + i2c = devm_kzalloc(drm_dev, sizeof(*i2c), GFP_KERNEL); + if (!i2c) + return -ENOMEM; + + INIT_WORK(&i2c->work, xe_i2c_client_work); + i2c->mmio = xe_root_tile_mmio(xe); + i2c->drm_dev = drm_dev; + i2c->ep = ep; + xe->i2c = i2c; + + /* PCI PM isn't aware of this device, bring it up and match it with SGUnit state. */ + xe_i2c_pm_resume(xe, true); + + regmap = devm_regmap_init(drm_dev, NULL, i2c, &i2c_regmap_config); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + i2c->bus_notifier.notifier_call = xe_i2c_notifier; + ret = bus_register_notifier(&i2c_bus_type, &i2c->bus_notifier); + if (ret) + return ret; + + ret = xe_i2c_create_irq(i2c); + if (ret) + goto err_unregister_notifier; + + ret = xe_i2c_register_adapter(i2c); + if (ret) + goto err_remove_irq; + + return devm_add_action_or_reset(drm_dev, xe_i2c_remove, i2c); + +err_remove_irq: + xe_i2c_remove_irq(i2c); + +err_unregister_notifier: + bus_unregister_notifier(&i2c_bus_type, &i2c->bus_notifier); + + return ret; +} diff --git a/drivers/gpu/drm/xe/xe_i2c.h b/drivers/gpu/drm/xe/xe_i2c.h new file mode 100644 index 000000000000..b767ed8ce52b --- /dev/null +++ b/drivers/gpu/drm/xe/xe_i2c.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: MIT */ +#ifndef _XE_I2C_H_ +#define _XE_I2C_H_ + +#include <linux/bits.h> +#include <linux/notifier.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +struct device; +struct fwnode_handle; +struct i2c_adapter; +struct i2c_client; +struct irq_domain; +struct platform_device; +struct xe_device; +struct xe_mmio; + +#define XE_I2C_MAX_CLIENTS 3 + +#define XE_I2C_EP_COOKIE_DEVICE 0xde + +/* Endpoint Capabilities */ +#define XE_I2C_EP_CAP_IRQ BIT(0) + +struct xe_i2c_endpoint { + u8 cookie; + u8 capabilities; + u16 addr[XE_I2C_MAX_CLIENTS]; +}; + +struct xe_i2c { + struct fwnode_handle *adapter_node; + struct platform_device *pdev; + struct i2c_adapter *adapter; + struct i2c_client *client[XE_I2C_MAX_CLIENTS]; + + struct notifier_block bus_notifier; + struct work_struct work; + + struct irq_domain *irqdomain; + int adapter_irq; + + struct xe_i2c_endpoint ep; + struct device *drm_dev; + + struct xe_mmio *mmio; +}; + +#if IS_ENABLED(CONFIG_I2C) +int xe_i2c_probe(struct xe_device *xe); +void xe_i2c_irq_handler(struct xe_device *xe, u32 master_ctl); +void xe_i2c_pm_suspend(struct xe_device *xe); +void xe_i2c_pm_resume(struct xe_device *xe, bool d3cold); +#else +static inline int xe_i2c_probe(struct xe_device *xe) { return 0; } +static inline void xe_i2c_irq_handler(struct xe_device *xe, u32 master_ctl) { } +static inline void xe_i2c_pm_suspend(struct xe_device *xe) { } +static inline void xe_i2c_pm_resume(struct xe_device *xe, bool d3cold) { } +#endif + +#endif diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c index 5362d3174b06..5df5b8c2a3e4 100644 --- a/drivers/gpu/drm/xe/xe_irq.c +++ b/drivers/gpu/drm/xe/xe_irq.c @@ -18,10 +18,12 @@ #include "xe_gt.h" #include "xe_guc.h" #include "xe_hw_engine.h" +#include "xe_i2c.h" #include "xe_memirq.h" #include "xe_mmio.h" #include "xe_pxp.h" #include "xe_sriov.h" +#include "xe_tile.h" /* * Interrupt registers for a unit are always consecutive and ordered @@ -160,7 +162,7 @@ void xe_irq_enable_hwe(struct xe_gt *gt) dmask = irqs << 16 | irqs; smask = irqs << 16; - if (!xe_gt_is_media_type(gt)) { + if (xe_gt_is_main_type(gt)) { /* Enable interrupts for each engine class */ xe_mmio_write32(mmio, RENDER_COPY_INTR_ENABLE, dmask); if (ccs_mask) @@ -260,7 +262,7 @@ gt_engine_identity(struct xe_device *xe, static void gt_other_irq_handler(struct xe_gt *gt, const u8 instance, const u16 iir) { - if (instance == OTHER_GUC_INSTANCE && !xe_gt_is_media_type(gt)) + if (instance == OTHER_GUC_INSTANCE && xe_gt_is_main_type(gt)) return xe_guc_irq_handler(>->uc.guc, iir); if (instance == OTHER_MEDIA_GUC_INSTANCE && xe_gt_is_media_type(gt)) return xe_guc_irq_handler(>->uc.guc, iir); @@ -476,6 +478,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg) if (xe->info.has_heci_cscfi) xe_heci_csc_irq_handler(xe, master_ctl); xe_display_irq_handler(xe, master_ctl); + xe_i2c_irq_handler(xe, master_ctl); gu_misc_iir = gu_misc_irq_ack(xe, master_ctl); } } @@ -550,7 +553,7 @@ static void xelp_irq_reset(struct xe_tile *tile) static void dg1_irq_reset(struct xe_tile *tile) { - if (tile->id == 0) + if (xe_tile_is_root(tile)) dg1_intr_disable(tile_to_xe(tile)); gt_irq_reset(tile); diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c index 63db66df064b..a2000307d5bf 100644 --- a/drivers/gpu/drm/xe/xe_lmtt.c +++ b/drivers/gpu/drm/xe/xe_lmtt.c @@ -11,6 +11,7 @@ #include "xe_assert.h" #include "xe_bo.h" +#include "xe_gt_tlb_invalidation.h" #include "xe_lmtt.h" #include "xe_map.h" #include "xe_mmio.h" @@ -78,6 +79,9 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level } lmtt_assert(lmtt, xe_bo_is_vram(bo)); + lmtt_debug(lmtt, "level=%u addr=%#llx\n", level, (u64)xe_bo_main_addr(bo, XE_PAGE_SIZE)); + + xe_map_memset(lmtt_to_xe(lmtt), &bo->vmap, 0, 0, xe_bo_size(bo)); pt->level = level; pt->bo = bo; @@ -91,6 +95,9 @@ out: static void lmtt_pt_free(struct xe_lmtt_pt *pt) { + lmtt_debug(&pt->bo->tile->sriov.pf.lmtt, "level=%u addr=%llx\n", + pt->level, (u64)xe_bo_main_addr(pt->bo, XE_PAGE_SIZE)); + xe_bo_unpin_map_no_vm(pt->bo); kfree(pt); } @@ -216,6 +223,58 @@ void xe_lmtt_init_hw(struct xe_lmtt *lmtt) lmtt_setup_dir_ptr(lmtt); } +static int lmtt_invalidate_hw(struct xe_lmtt *lmtt) +{ + struct xe_gt_tlb_invalidation_fence fences[XE_MAX_GT_PER_TILE]; + struct xe_gt_tlb_invalidation_fence *fence = fences; + struct xe_tile *tile = lmtt_to_tile(lmtt); + struct xe_gt *gt; + int result = 0; + int err; + u8 id; + + for_each_gt_on_tile(gt, tile, id) { + xe_gt_tlb_invalidation_fence_init(gt, fence, true); + err = xe_gt_tlb_invalidation_all(gt, fence); + result = result ?: err; + fence++; + } + + lmtt_debug(lmtt, "num_fences=%d err=%d\n", (int)(fence - fences), result); + + /* + * It is fine to wait for all fences, even for those which covers the + * invalidation request that failed, as such fence should be already + * marked as signaled. + */ + fence = fences; + for_each_gt_on_tile(gt, tile, id) + xe_gt_tlb_invalidation_fence_wait(fence++); + + return result; +} + +/** + * xe_lmtt_invalidate_hw - Invalidate LMTT hardware. + * @lmtt: the &xe_lmtt to invalidate + * + * Send requests to all GuCs on this tile to invalidate all TLBs. + * + * This function should be called only when running as a PF driver. + */ +void xe_lmtt_invalidate_hw(struct xe_lmtt *lmtt) +{ + struct xe_device *xe = lmtt_to_xe(lmtt); + int err; + + lmtt_assert(lmtt, IS_SRIOV_PF(xe)); + + err = lmtt_invalidate_hw(lmtt); + if (err) + xe_sriov_warn(xe, "LMTT%u invalidation failed (%pe)", + lmtt_to_tile(lmtt)->id, ERR_PTR(err)); +} + static void lmtt_write_pte(struct xe_lmtt *lmtt, struct xe_lmtt_pt *pt, u64 pte, unsigned int idx) { @@ -226,9 +285,14 @@ static void lmtt_write_pte(struct xe_lmtt *lmtt, struct xe_lmtt_pt *pt, switch (lmtt->ops->lmtt_pte_size(level)) { case sizeof(u32): + lmtt_assert(lmtt, !overflows_type(pte, u32)); + lmtt_assert(lmtt, !pte || !iosys_map_rd(&pt->bo->vmap, idx * sizeof(u32), u32)); + xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u32), u32, pte); break; case sizeof(u64): + lmtt_assert(lmtt, !pte || !iosys_map_rd(&pt->bo->vmap, idx * sizeof(u64), u64)); + xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u64), u64, pte); break; default: @@ -265,6 +329,7 @@ static void lmtt_drop_pages(struct xe_lmtt *lmtt, unsigned int vfid) return; lmtt_write_pte(lmtt, pd, LMTT_PTE_INVALID, vfid); + lmtt_invalidate_hw(lmtt); lmtt_assert(lmtt, pd->level > 0); lmtt_assert(lmtt, pt->level == pd->level - 1); @@ -386,11 +451,11 @@ static void lmtt_insert_bo(struct xe_lmtt *lmtt, unsigned int vfid, struct xe_bo u64 addr, vram_offset; lmtt_assert(lmtt, IS_ALIGNED(start, page_size)); - lmtt_assert(lmtt, IS_ALIGNED(bo->size, page_size)); + lmtt_assert(lmtt, IS_ALIGNED(xe_bo_size(bo), page_size)); lmtt_assert(lmtt, xe_bo_is_vram(bo)); vram_offset = vram_region_gpu_offset(bo->ttm.resource); - xe_res_first(bo->ttm.resource, 0, bo->size, &cur); + xe_res_first(bo->ttm.resource, 0, xe_bo_size(bo), &cur); while (cur.remaining) { addr = xe_res_dma(&cur); addr += vram_offset; /* XXX */ diff --git a/drivers/gpu/drm/xe/xe_lmtt.h b/drivers/gpu/drm/xe/xe_lmtt.h index cb10ef994db6..75a234fbf367 100644 --- a/drivers/gpu/drm/xe/xe_lmtt.h +++ b/drivers/gpu/drm/xe/xe_lmtt.h @@ -15,6 +15,7 @@ struct xe_lmtt_ops; #ifdef CONFIG_PCI_IOV int xe_lmtt_init(struct xe_lmtt *lmtt); void xe_lmtt_init_hw(struct xe_lmtt *lmtt); +void xe_lmtt_invalidate_hw(struct xe_lmtt *lmtt); int xe_lmtt_prepare_pages(struct xe_lmtt *lmtt, unsigned int vfid, u64 range); int xe_lmtt_populate_pages(struct xe_lmtt *lmtt, unsigned int vfid, struct xe_bo *bo, u64 offset); void xe_lmtt_drop_pages(struct xe_lmtt *lmtt, unsigned int vfid); diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c index 37598588a54f..6d38411bdeba 100644 --- a/drivers/gpu/drm/xe/xe_lrc.c +++ b/drivers/gpu/drm/xe/xe_lrc.c @@ -39,15 +39,46 @@ #define LRC_ENGINE_INSTANCE GENMASK_ULL(53, 48) #define LRC_PPHWSP_SIZE SZ_4K +#define LRC_INDIRECT_CTX_BO_SIZE SZ_4K #define LRC_INDIRECT_RING_STATE_SIZE SZ_4K #define LRC_WA_BB_SIZE SZ_4K +/* + * Layout of the LRC and associated data allocated as + * lrc->bo: + * + * Region Size + * +============================+=================================+ <- __xe_lrc_ring_offset() + * | Ring | ring_size, see | + * | | xe_lrc_init() | + * +============================+=================================+ <- __xe_lrc_pphwsp_offset() + * | PPHWSP (includes SW state) | 4K | + * +----------------------------+---------------------------------+ <- __xe_lrc_regs_offset() + * | Engine Context Image | n * 4K, see | + * | | xe_gt_lrc_size() | + * +----------------------------+---------------------------------+ <- __xe_lrc_indirect_ring_offset() + * | Indirect Ring State Page | 0 or 4k, see | + * | | XE_LRC_FLAG_INDIRECT_RING_STATE | + * +============================+=================================+ <- __xe_lrc_indirect_ctx_offset() + * | Indirect Context Page | 0 or 4k, see | + * | | XE_LRC_FLAG_INDIRECT_CTX | + * +============================+=================================+ <- __xe_lrc_wa_bb_offset() + * | WA BB Per Ctx | 4k | + * +============================+=================================+ <- xe_bo_size(lrc->bo) + */ + static struct xe_device * lrc_to_xe(struct xe_lrc *lrc) { return gt_to_xe(lrc->fence_ctx.gt); } +static bool +gt_engine_needs_indirect_ctx(struct xe_gt *gt, enum xe_engine_class class) +{ + return false; +} + size_t xe_gt_lrc_size(struct xe_gt *gt, enum xe_engine_class class) { struct xe_device *xe = gt_to_xe(gt); @@ -582,8 +613,6 @@ static void set_context_control(u32 *regs, struct xe_hw_engine *hwe) if (xe_gt_has_indirect_ring_state(hwe->gt)) regs[CTX_CONTEXT_CONTROL] |= _MASKED_BIT_ENABLE(CTX_CTRL_INDIRECT_RING_STATE_ENABLE); - - /* TODO: Timestamp */ } static void set_memory_based_intr(u32 *regs, struct xe_hw_engine *hwe) @@ -717,8 +746,23 @@ static u32 __xe_lrc_ctx_timestamp_udw_offset(struct xe_lrc *lrc) static inline u32 __xe_lrc_indirect_ring_offset(struct xe_lrc *lrc) { - /* Indirect ring state page is at the very end of LRC */ - return lrc->size - LRC_INDIRECT_RING_STATE_SIZE; + u32 offset = xe_bo_size(lrc->bo) - LRC_WA_BB_SIZE - + LRC_INDIRECT_RING_STATE_SIZE; + + if (lrc->flags & XE_LRC_FLAG_INDIRECT_CTX) + offset -= LRC_INDIRECT_CTX_BO_SIZE; + + return offset; +} + +static inline u32 __xe_lrc_indirect_ctx_offset(struct xe_lrc *lrc) +{ + return xe_bo_size(lrc->bo) - LRC_WA_BB_SIZE - LRC_INDIRECT_CTX_BO_SIZE; +} + +static inline u32 __xe_lrc_wa_bb_offset(struct xe_lrc *lrc) +{ + return xe_bo_size(lrc->bo) - LRC_WA_BB_SIZE; } #define DECL_MAP_ADDR_HELPERS(elem) \ @@ -940,8 +984,10 @@ static void xe_lrc_finish(struct xe_lrc *lrc) * store it in the PPHSWP. */ #define CONTEXT_ACTIVE 1ULL -static ssize_t wa_bb_setup_utilization(struct xe_lrc *lrc, struct xe_hw_engine *hwe, - u32 *batch, size_t max_len) +static ssize_t setup_utilization_wa(struct xe_lrc *lrc, + struct xe_hw_engine *hwe, + u32 *batch, + size_t max_len) { u32 *cmd = batch; @@ -968,91 +1014,187 @@ static ssize_t wa_bb_setup_utilization(struct xe_lrc *lrc, struct xe_hw_engine * return cmd - batch; } -struct wa_bb_setup { +struct bo_setup { ssize_t (*setup)(struct xe_lrc *lrc, struct xe_hw_engine *hwe, u32 *batch, size_t max_size); }; -static size_t wa_bb_offset(struct xe_lrc *lrc) -{ - return lrc->bo->size - LRC_WA_BB_SIZE; -} +struct bo_setup_state { + /* Input: */ + struct xe_lrc *lrc; + struct xe_hw_engine *hwe; + size_t max_size; + size_t reserve_dw; + unsigned int offset; + const struct bo_setup *funcs; + unsigned int num_funcs; -static int setup_wa_bb(struct xe_lrc *lrc, struct xe_hw_engine *hwe) + /* State: */ + u32 *buffer; + u32 *ptr; + unsigned int written; +}; + +static int setup_bo(struct bo_setup_state *state) { - const size_t max_size = LRC_WA_BB_SIZE; - static const struct wa_bb_setup funcs[] = { - { .setup = wa_bb_setup_utilization }, - }; ssize_t remain; - u32 *cmd, *buf = NULL; - if (lrc->bo->vmap.is_iomem) { - buf = kmalloc(max_size, GFP_KERNEL); - if (!buf) + if (state->lrc->bo->vmap.is_iomem) { + state->buffer = kmalloc(state->max_size, GFP_KERNEL); + if (!state->buffer) return -ENOMEM; - cmd = buf; + state->ptr = state->buffer; } else { - cmd = lrc->bo->vmap.vaddr + wa_bb_offset(lrc); + state->ptr = state->lrc->bo->vmap.vaddr + state->offset; + state->buffer = NULL; } - remain = max_size / sizeof(*cmd); + remain = state->max_size / sizeof(u32); - for (size_t i = 0; i < ARRAY_SIZE(funcs); i++) { - ssize_t len = funcs[i].setup(lrc, hwe, cmd, remain); + for (size_t i = 0; i < state->num_funcs; i++) { + ssize_t len = state->funcs[i].setup(state->lrc, state->hwe, + state->ptr, remain); remain -= len; /* - * There should always be at least 1 additional dword for - * the end marker + * Caller has asked for at least reserve_dw to remain unused. */ - if (len < 0 || xe_gt_WARN_ON(lrc->gt, remain < 1)) + if (len < 0 || + xe_gt_WARN_ON(state->lrc->gt, remain < state->reserve_dw)) goto fail; - cmd += len; - } - - *cmd++ = MI_BATCH_BUFFER_END; - - if (buf) { - xe_map_memcpy_to(gt_to_xe(lrc->gt), &lrc->bo->vmap, - wa_bb_offset(lrc), buf, - (cmd - buf) * sizeof(*cmd)); - kfree(buf); + state->ptr += len; + state->written += len; } - xe_lrc_write_ctx_reg(lrc, CTX_BB_PER_CTX_PTR, xe_bo_ggtt_addr(lrc->bo) + - wa_bb_offset(lrc) + 1); - return 0; fail: - kfree(buf); + kfree(state->buffer); return -ENOSPC; } -#define PVC_CTX_ASID (0x2e + 1) -#define PVC_CTX_ACC_CTR_THOLD (0x2a + 1) +static void finish_bo(struct bo_setup_state *state) +{ + if (!state->buffer) + return; + + xe_map_memcpy_to(gt_to_xe(state->lrc->gt), &state->lrc->bo->vmap, + state->offset, state->buffer, + state->written * sizeof(u32)); + kfree(state->buffer); +} + +static int setup_wa_bb(struct xe_lrc *lrc, struct xe_hw_engine *hwe) +{ + static const struct bo_setup funcs[] = { + { .setup = setup_utilization_wa }, + }; + struct bo_setup_state state = { + .lrc = lrc, + .hwe = hwe, + .max_size = LRC_WA_BB_SIZE, + .reserve_dw = 1, + .offset = __xe_lrc_wa_bb_offset(lrc), + .funcs = funcs, + .num_funcs = ARRAY_SIZE(funcs), + }; + int ret; + + ret = setup_bo(&state); + if (ret) + return ret; + + *state.ptr++ = MI_BATCH_BUFFER_END; + state.written++; + + finish_bo(&state); + + xe_lrc_write_ctx_reg(lrc, CTX_BB_PER_CTX_PTR, + xe_bo_ggtt_addr(lrc->bo) + state.offset + 1); + + return 0; +} + +static int +setup_indirect_ctx(struct xe_lrc *lrc, struct xe_hw_engine *hwe) +{ + static struct bo_setup rcs_funcs[] = { + }; + struct bo_setup_state state = { + .lrc = lrc, + .hwe = hwe, + .max_size = (63 * 64) /* max 63 cachelines */, + .offset = __xe_lrc_indirect_ctx_offset(lrc), + }; + int ret; + + if (!(lrc->flags & XE_LRC_FLAG_INDIRECT_CTX)) + return 0; + + if (hwe->class == XE_ENGINE_CLASS_RENDER || + hwe->class == XE_ENGINE_CLASS_COMPUTE) { + state.funcs = rcs_funcs; + state.num_funcs = ARRAY_SIZE(rcs_funcs); + } + + if (xe_gt_WARN_ON(lrc->gt, !state.funcs)) + return 0; + + ret = setup_bo(&state); + if (ret) + return ret; + + /* + * Align to 64B cacheline so there's no garbage at the end for CS to + * execute: size for indirect ctx must be a multiple of 64. + */ + while (state.written & 0xf) { + *state.ptr++ = MI_NOOP; + state.written++; + } + + finish_bo(&state); + + xe_lrc_write_ctx_reg(lrc, + CTX_CS_INDIRECT_CTX, + (xe_bo_ggtt_addr(lrc->bo) + state.offset) | + /* Size in CLs. */ + (state.written * sizeof(u32) / 64)); + xe_lrc_write_ctx_reg(lrc, + CTX_CS_INDIRECT_CTX_OFFSET, + CTX_INDIRECT_CTX_OFFSET_DEFAULT); + + return 0; +} static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, struct xe_vm *vm, u32 ring_size, u16 msix_vec, u32 init_flags) { struct xe_gt *gt = hwe->gt; + const u32 lrc_size = xe_gt_lrc_size(gt, hwe->class); + u32 bo_size = ring_size + lrc_size + LRC_WA_BB_SIZE; struct xe_tile *tile = gt_to_tile(gt); struct xe_device *xe = gt_to_xe(gt); struct iosys_map map; - void *init_data = NULL; u32 arb_enable; - u32 lrc_size; u32 bo_flags; int err; kref_init(&lrc->refcount); lrc->gt = gt; + lrc->size = lrc_size; lrc->flags = 0; - lrc_size = ring_size + xe_gt_lrc_size(gt, hwe->class); + lrc->ring.size = ring_size; + lrc->ring.tail = 0; + + if (gt_engine_needs_indirect_ctx(gt, hwe->class)) { + lrc->flags |= XE_LRC_FLAG_INDIRECT_CTX; + bo_size += LRC_INDIRECT_CTX_BO_SIZE; + } + if (xe_gt_has_indirect_ring_state(gt)) lrc->flags |= XE_LRC_FLAG_INDIRECT_RING_STATE; @@ -1061,45 +1203,36 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, if (vm && vm->xef) /* userspace */ bo_flags |= XE_BO_FLAG_PINNED_LATE_RESTORE; - /* - * FIXME: Perma-pinning LRC as we don't yet support moving GGTT address - * via VM bind calls. - */ - lrc->bo = xe_bo_create_pin_map(xe, tile, NULL, - lrc_size + LRC_WA_BB_SIZE, + lrc->bo = xe_bo_create_pin_map(xe, tile, NULL, bo_size, ttm_bo_type_kernel, bo_flags); if (IS_ERR(lrc->bo)) return PTR_ERR(lrc->bo); - lrc->size = lrc_size; - lrc->ring.size = ring_size; - lrc->ring.tail = 0; - xe_hw_fence_ctx_init(&lrc->fence_ctx, hwe->gt, hwe->fence_irq, hwe->name); - if (!gt->default_lrc[hwe->class]) { - init_data = empty_lrc_data(hwe); - if (!init_data) { - err = -ENOMEM; - goto err_lrc_finish; - } - } - /* * Init Per-Process of HW status Page, LRC / context state to known - * values + * values. If there's already a primed default_lrc, just copy it, otherwise + * it's the early submission to record the lrc: build a new empty one from + * scratch. */ map = __xe_lrc_pphwsp_map(lrc); - if (!init_data) { + if (gt->default_lrc[hwe->class]) { xe_map_memset(xe, &map, 0, 0, LRC_PPHWSP_SIZE); /* PPHWSP */ xe_map_memcpy_to(xe, &map, LRC_PPHWSP_SIZE, gt->default_lrc[hwe->class] + LRC_PPHWSP_SIZE, - xe_gt_lrc_size(gt, hwe->class) - LRC_PPHWSP_SIZE); + lrc_size - LRC_PPHWSP_SIZE); } else { - xe_map_memcpy_to(xe, &map, 0, init_data, - xe_gt_lrc_size(gt, hwe->class)); + void *init_data = empty_lrc_data(hwe); + + if (!init_data) { + err = -ENOMEM; + goto err_lrc_finish; + } + + xe_map_memcpy_to(xe, &map, 0, init_data, lrc_size); kfree(init_data); } @@ -1153,7 +1286,7 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, xe_lrc_write_ctx_reg(lrc, CTX_TIMESTAMP_UDW, 0); if (xe->info.has_asid && vm) - xe_lrc_write_ctx_reg(lrc, PVC_CTX_ASID, vm->usm.asid); + xe_lrc_write_ctx_reg(lrc, CTX_ASID, vm->usm.asid); lrc->desc = LRC_VALID; lrc->desc |= FIELD_PREP(LRC_ADDRESSING_MODE, LRC_LEGACY_64B_CONTEXT); @@ -1183,6 +1316,10 @@ static int xe_lrc_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, if (err) goto err_lrc_finish; + err = setup_indirect_ctx(lrc, hwe); + if (err) + goto err_lrc_finish; + return 0; err_lrc_finish: @@ -1775,7 +1912,7 @@ static const struct instr_state xe_hpg_svg_state[] = { { .instr = CMD_3DSTATE_DRAWING_RECTANGLE, .num_dw = 4 }, }; -void xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, struct xe_bb *bb) +u32 *xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, u32 *cs) { struct xe_gt *gt = q->hwe->gt; struct xe_device *xe = gt_to_xe(gt); @@ -1810,7 +1947,7 @@ void xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, struct xe_bb *b if (!state_table) { xe_gt_dbg(gt, "No non-register state to emit on graphics ver %d.%02d\n", GRAPHICS_VER(xe), GRAPHICS_VERx100(xe) % 100); - return; + return cs; } for (int i = 0; i < state_table_size; i++) { @@ -1833,12 +1970,14 @@ void xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, struct xe_bb *b instr == CMD_3DSTATE_DRAWING_RECTANGLE) instr = CMD_3DSTATE_DRAWING_RECTANGLE_FAST; - bb->cs[bb->len] = instr; + *cs = instr; if (!is_single_dw) - bb->cs[bb->len] |= (num_dw - 2); + *cs |= (num_dw - 2); - bb->len += num_dw; + cs += num_dw; } + + return cs; } struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc) @@ -1859,8 +1998,7 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc) snapshot->seqno = xe_lrc_seqno(lrc); snapshot->lrc_bo = xe_bo_get(lrc->bo); snapshot->lrc_offset = xe_lrc_pphwsp_offset(lrc); - snapshot->lrc_size = lrc->bo->size - snapshot->lrc_offset - - LRC_WA_BB_SIZE; + snapshot->lrc_size = lrc->size; snapshot->lrc_snapshot = NULL; snapshot->ctx_timestamp = lower_32_bits(xe_lrc_ctx_timestamp(lrc)); snapshot->ctx_job_timestamp = xe_lrc_ctx_job_timestamp(lrc); diff --git a/drivers/gpu/drm/xe/xe_lrc.h b/drivers/gpu/drm/xe/xe_lrc.h index eb6e8de8c939..b6c8053c581b 100644 --- a/drivers/gpu/drm/xe/xe_lrc.h +++ b/drivers/gpu/drm/xe/xe_lrc.h @@ -112,7 +112,7 @@ void xe_lrc_dump_default(struct drm_printer *p, struct xe_gt *gt, enum xe_engine_class); -void xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, struct xe_bb *bb); +u32 *xe_lrc_emit_hwe_state_instructions(struct xe_exec_queue *q, u32 *cs); struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc); void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot); diff --git a/drivers/gpu/drm/xe/xe_lrc_types.h b/drivers/gpu/drm/xe/xe_lrc_types.h index 883e550a9423..e9883706e004 100644 --- a/drivers/gpu/drm/xe/xe_lrc_types.h +++ b/drivers/gpu/drm/xe/xe_lrc_types.h @@ -22,14 +22,15 @@ struct xe_lrc { */ struct xe_bo *bo; - /** @size: size of lrc including any indirect ring state page */ + /** @size: size of the lrc and optional indirect ring state */ u32 size; /** @gt: gt which this LRC belongs to */ struct xe_gt *gt; /** @flags: LRC flags */ -#define XE_LRC_FLAG_INDIRECT_RING_STATE 0x1 +#define XE_LRC_FLAG_INDIRECT_CTX 0x1 +#define XE_LRC_FLAG_INDIRECT_RING_STATE 0x2 u32 flags; /** @refcount: ref count of this lrc */ diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 8f8e9fdfb2a8..84f412fd3c5d 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -82,7 +82,7 @@ struct xe_migrate { * of the instruction. Subtracting the instruction header (1 dword) and * address (2 dwords), that leaves 0x3FD dwords (0x1FE qwords) for PTE values. */ -#define MAX_PTE_PER_SDI 0x1FE +#define MAX_PTE_PER_SDI 0x1FEU /** * xe_tile_migrate_exec_queue() - Get this tile's migrate exec queue. @@ -203,7 +203,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, BUILD_BUG_ON(!(NUM_KERNEL_PDE & 1)); /* Need to be sure everything fits in the first PT, or create more */ - xe_tile_assert(tile, m->batch_base_ofs + batch->size < SZ_2M); + xe_tile_assert(tile, m->batch_base_ofs + xe_bo_size(batch) < SZ_2M); bo = xe_bo_create_pin_map(vm->xe, tile, vm, num_entries * XE_PAGE_SIZE, @@ -214,7 +214,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, return PTR_ERR(bo); /* PT30 & PT31 reserved for 2M identity map */ - pt29_ofs = bo->size - 3 * XE_PAGE_SIZE; + pt29_ofs = xe_bo_size(bo) - 3 * XE_PAGE_SIZE; entry = vm->pt_ops->pde_encode_bo(bo, pt29_ofs, pat_index); xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry); @@ -236,7 +236,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, if (!IS_DGFX(xe)) { /* Write out batch too */ m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE; - for (i = 0; i < batch->size; + for (i = 0; i < xe_bo_size(batch); i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE : XE_PAGE_SIZE) { entry = vm->pt_ops->pte_encode_bo(batch, i, @@ -247,13 +247,13 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, level++; } if (xe->info.has_usm) { - xe_tile_assert(tile, batch->size == SZ_1M); + xe_tile_assert(tile, xe_bo_size(batch) == SZ_1M); batch = tile->primary_gt->usm.bb_pool->bo; m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M; - xe_tile_assert(tile, batch->size == SZ_512K); + xe_tile_assert(tile, xe_bo_size(batch) == SZ_512K); - for (i = 0; i < batch->size; + for (i = 0; i < xe_bo_size(batch); i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE : XE_PAGE_SIZE) { entry = vm->pt_ops->pte_encode_bo(batch, i, @@ -306,7 +306,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, /* Identity map the entire vram at 256GiB offset */ if (IS_DGFX(xe)) { - u64 pt30_ofs = bo->size - 2 * XE_PAGE_SIZE; + u64 pt30_ofs = xe_bo_size(bo) - 2 * XE_PAGE_SIZE; xe_migrate_program_identity(xe, vm, bo, map_ofs, IDENTITY_OFFSET, pat_index, pt30_ofs); @@ -321,7 +321,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, u16 comp_pat_index = xe->pat.idx[XE_CACHE_NONE_COMPRESSION]; u64 vram_offset = IDENTITY_OFFSET + DIV_ROUND_UP_ULL(xe->mem.vram.actual_physical_size, SZ_1G); - u64 pt31_ofs = bo->size - XE_PAGE_SIZE; + u64 pt31_ofs = xe_bo_size(bo) - XE_PAGE_SIZE; xe_assert(xe, xe->mem.vram.actual_physical_size <= (MAX_NUM_PTE - IDENTITY_OFFSET - IDENTITY_OFFSET / 2) * SZ_1G); @@ -408,7 +408,7 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile) /* Special layout, prepared below.. */ vm = xe_vm_create(xe, XE_VM_FLAG_MIGRATION | - XE_VM_FLAG_SET_TILE_ID(tile)); + XE_VM_FLAG_SET_TILE_ID(tile), NULL); if (IS_ERR(vm)) return ERR_CAST(vm); @@ -768,7 +768,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, struct xe_gt *gt = m->tile->primary_gt; struct xe_device *xe = gt_to_xe(gt); struct dma_fence *fence = NULL; - u64 size = src_bo->size; + u64 size = xe_bo_size(src_bo); struct xe_res_cursor src_it, dst_it, ccs_it; u64 src_L0_ofs, dst_L0_ofs; u32 src_L0_pt, dst_L0_pt; @@ -791,7 +791,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, if (XE_WARN_ON(copy_ccs && src_bo != dst_bo)) return ERR_PTR(-EINVAL); - if (src_bo != dst_bo && XE_WARN_ON(src_bo->size != dst_bo->size)) + if (src_bo != dst_bo && XE_WARN_ON(xe_bo_size(src_bo) != xe_bo_size(dst_bo))) return ERR_PTR(-EINVAL); if (!src_is_vram) @@ -863,7 +863,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, if (src_is_vram && xe_migrate_allow_identity(src_L0, &src_it)) xe_res_next(&src_it, src_L0); else - emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs, + emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs || use_comp_pat, &src_it, src_L0, src); if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it)) @@ -1064,7 +1064,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m, struct xe_device *xe = gt_to_xe(gt); bool clear_only_system_ccs = false; struct dma_fence *fence = NULL; - u64 size = bo->size; + u64 size = xe_bo_size(bo); struct xe_res_cursor src_it; struct ttm_resource *src = dst; int err; @@ -1076,9 +1076,9 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m, clear_only_system_ccs = true; if (!clear_vram) - xe_res_first_sg(xe_bo_sg(bo), 0, bo->size, &src_it); + xe_res_first_sg(xe_bo_sg(bo), 0, xe_bo_size(bo), &src_it); else - xe_res_first(src, 0, bo->size, &src_it); + xe_res_first(src, 0, xe_bo_size(bo), &src_it); while (size) { u64 clear_L0_ofs; @@ -1407,7 +1407,7 @@ __xe_migrate_update_pgtables(struct xe_migrate *m, if (idx == chunk) goto next_cmd; - xe_tile_assert(tile, pt_bo->size == SZ_4K); + xe_tile_assert(tile, xe_bo_size(pt_bo) == SZ_4K); /* Map a PT at most once */ if (pt_bo->update_index < 0) @@ -1553,15 +1553,17 @@ static u32 pte_update_cmd_size(u64 size) u64 entries = DIV_U64_ROUND_UP(size, XE_PAGE_SIZE); XE_WARN_ON(size > MAX_PREEMPTDISABLE_TRANSFER); + /* * MI_STORE_DATA_IMM command is used to update page table. Each - * instruction can update maximumly 0x1ff pte entries. To update - * n (n <= 0x1ff) pte entries, we need: - * 1 dword for the MI_STORE_DATA_IMM command header (opcode etc) - * 2 dword for the page table's physical location - * 2*n dword for value of pte to fill (each pte entry is 2 dwords) + * instruction can update maximumly MAX_PTE_PER_SDI pte entries. To + * update n (n <= MAX_PTE_PER_SDI) pte entries, we need: + * + * - 1 dword for the MI_STORE_DATA_IMM command header (opcode etc) + * - 2 dword for the page table's physical location + * - 2*n dword for value of pte to fill (each pte entry is 2 dwords) */ - num_dword = (1 + 2) * DIV_U64_ROUND_UP(entries, 0x1ff); + num_dword = (1 + 2) * DIV_U64_ROUND_UP(entries, MAX_PTE_PER_SDI); num_dword += entries * 2; return num_dword; @@ -1577,7 +1579,7 @@ static void build_pt_update_batch_sram(struct xe_migrate *m, ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE); while (ptes) { - u32 chunk = min(0x1ffU, ptes); + u32 chunk = min(MAX_PTE_PER_SDI, ptes); bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk); bb->cs[bb->len++] = pt_offset; @@ -1815,18 +1817,22 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, xe_bo_assert_held(bo); /* Use bounce buffer for small access and unaligned access */ - if (len & XE_CACHELINE_MASK || - ((uintptr_t)buf | offset) & XE_CACHELINE_MASK) { + if (!IS_ALIGNED(len, XE_CACHELINE_BYTES) || + !IS_ALIGNED((unsigned long)buf + offset, XE_CACHELINE_BYTES)) { int buf_offset = 0; + void *bounce; + int err; + + BUILD_BUG_ON(!is_power_of_2(XE_CACHELINE_BYTES)); + bounce = kmalloc(XE_CACHELINE_BYTES, GFP_KERNEL); + if (!bounce) + return -ENOMEM; /* * Less than ideal for large unaligned access but this should be * fairly rare, can fixup if this becomes common. */ do { - u8 bounce[XE_CACHELINE_BYTES]; - void *ptr = (void *)bounce; - int err; int copy_bytes = min_t(int, bytes_left, XE_CACHELINE_BYTES - (offset & XE_CACHELINE_MASK)); @@ -1835,22 +1841,22 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, err = xe_migrate_access_memory(m, bo, offset & ~XE_CACHELINE_MASK, - (void *)ptr, - sizeof(bounce), 0); + bounce, + XE_CACHELINE_BYTES, 0); if (err) - return err; + break; if (write) { - memcpy(ptr + ptr_offset, buf + buf_offset, copy_bytes); + memcpy(bounce + ptr_offset, buf + buf_offset, copy_bytes); err = xe_migrate_access_memory(m, bo, offset & ~XE_CACHELINE_MASK, - (void *)ptr, - sizeof(bounce), 0); + bounce, + XE_CACHELINE_BYTES, write); if (err) - return err; + break; } else { - memcpy(buf + buf_offset, ptr + ptr_offset, + memcpy(buf + buf_offset, bounce + ptr_offset, copy_bytes); } @@ -1859,14 +1865,15 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, offset += copy_bytes; } while (bytes_left); - return 0; + kfree(bounce); + return err; } dma_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write); if (IS_ERR(dma_addr)) return PTR_ERR(dma_addr); - xe_res_first(bo->ttm.resource, offset, bo->size - offset, &cursor); + xe_res_first(bo->ttm.resource, offset, xe_bo_size(bo) - offset, &cursor); do { struct dma_fence *__fence; @@ -1880,8 +1887,11 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, else current_bytes = min_t(int, bytes_left, cursor.size); - if (fence) - dma_fence_put(fence); + if (current_bytes & ~PAGE_MASK) { + int pitch = 4; + + current_bytes = min_t(int, current_bytes, S16_MAX * pitch); + } __fence = xe_migrate_vram(m, current_bytes, (unsigned long)buf & ~PAGE_MASK, @@ -1890,11 +1900,15 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, XE_MIGRATE_COPY_TO_VRAM : XE_MIGRATE_COPY_TO_SRAM); if (IS_ERR(__fence)) { - if (fence) + if (fence) { dma_fence_wait(fence, false); + dma_fence_put(fence); + } fence = __fence; goto out_err; } + + dma_fence_put(fence); fence = __fence; buf += current_bytes; diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c index 7357458bc0d2..e4db8d58ea2d 100644 --- a/drivers/gpu/drm/xe/xe_mmio.c +++ b/drivers/gpu/drm/xe/xe_mmio.c @@ -22,6 +22,9 @@ #include "xe_macros.h" #include "xe_sriov.h" #include "xe_trace.h" +#include "xe_wa.h" + +#include "generated/xe_device_wa_oob.h" static void tiles_fini(void *arg) { @@ -55,6 +58,7 @@ static void tiles_fini(void *arg) static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size) { struct xe_tile *tile; + struct xe_gt *gt; u8 id; /* @@ -67,7 +71,7 @@ static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size) /* Possibly override number of tile based on configuration register */ if (!xe->info.skip_mtcfg) { struct xe_mmio *mmio = xe_root_tile_mmio(xe); - u8 tile_count; + u8 tile_count, gt_count; u32 mtcfg; /* @@ -84,12 +88,15 @@ static void mmio_multi_tile_setup(struct xe_device *xe, size_t tile_mmio_size) xe->info.tile_count = tile_count; /* - * FIXME: Needs some work for standalone media, but - * should be impossible with multi-tile for now: - * multi-tile platform with standalone media doesn't - * exist + * We've already setup gt_count according to the full + * tile count. Re-calculate it to only include the GTs + * that belong to the remaining tile(s). */ - xe->info.gt_count = xe->info.tile_count; + gt_count = 0; + for_each_gt(gt, xe, id) + if (gt->info.id < tile_count * xe->info.max_gt_per_tile) + gt_count++; + xe->info.gt_count = gt_count; } } @@ -163,7 +170,7 @@ static void mmio_flush_pending_writes(struct xe_mmio *mmio) #define DUMMY_REG_OFFSET 0x130030 int i; - if (mmio->tile->xe->info.platform != XE_LUNARLAKE) + if (!XE_DEVICE_WA(mmio->tile->xe, 15015404425)) return; /* 4 dummy writes */ @@ -176,7 +183,6 @@ u8 xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg) u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr); u8 val; - /* Wa_15015404425 */ mmio_flush_pending_writes(mmio); val = readb(mmio->regs + addr); @@ -190,7 +196,6 @@ u16 xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg) u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr); u16 val; - /* Wa_15015404425 */ mmio_flush_pending_writes(mmio); val = readw(mmio->regs + addr); @@ -217,7 +222,6 @@ u32 xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg) u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr); u32 val; - /* Wa_15015404425 */ mmio_flush_pending_writes(mmio); if (!reg.vf && IS_SRIOV_VF(mmio->tile->xe)) diff --git a/drivers/gpu/drm/xe/xe_module.c b/drivers/gpu/drm/xe/xe_module.c index e332f3142435..d9391bd08194 100644 --- a/drivers/gpu/drm/xe/xe_module.c +++ b/drivers/gpu/drm/xe/xe_module.c @@ -19,31 +19,45 @@ #include "xe_sched_job.h" #if IS_ENABLED(CONFIG_DRM_XE_DEBUG) -#define DEFAULT_GUC_LOG_LEVEL 3 +#define DEFAULT_GUC_LOG_LEVEL 3 #else -#define DEFAULT_GUC_LOG_LEVEL 1 +#define DEFAULT_GUC_LOG_LEVEL 1 #endif +#define DEFAULT_PROBE_DISPLAY true +#define DEFAULT_VRAM_BAR_SIZE 0 +#define DEFAULT_FORCE_PROBE CONFIG_DRM_XE_FORCE_PROBE +#define DEFAULT_MAX_VFS ~0 +#define DEFAULT_MAX_VFS_STR "unlimited" +#define DEFAULT_WEDGED_MODE 1 +#define DEFAULT_SVM_NOTIFIER_SIZE 512 + struct xe_modparam xe_modparam = { - .probe_display = true, - .guc_log_level = DEFAULT_GUC_LOG_LEVEL, - .force_probe = CONFIG_DRM_XE_FORCE_PROBE, - .wedged_mode = 1, - .svm_notifier_size = 512, + .probe_display = DEFAULT_PROBE_DISPLAY, + .guc_log_level = DEFAULT_GUC_LOG_LEVEL, + .force_probe = DEFAULT_FORCE_PROBE, +#ifdef CONFIG_PCI_IOV + .max_vfs = DEFAULT_MAX_VFS, +#endif + .wedged_mode = DEFAULT_WEDGED_MODE, + .svm_notifier_size = DEFAULT_SVM_NOTIFIER_SIZE, /* the rest are 0 by default */ }; module_param_named(svm_notifier_size, xe_modparam.svm_notifier_size, uint, 0600); -MODULE_PARM_DESC(svm_notifier_size, "Set the svm notifier size(in MiB), must be power of 2"); +MODULE_PARM_DESC(svm_notifier_size, "Set the svm notifier size in MiB, must be power of 2 " + "[default=" __stringify(DEFAULT_SVM_NOTIFIER_SIZE) "]"); module_param_named_unsafe(force_execlist, xe_modparam.force_execlist, bool, 0444); MODULE_PARM_DESC(force_execlist, "Force Execlist submission"); module_param_named(probe_display, xe_modparam.probe_display, bool, 0444); -MODULE_PARM_DESC(probe_display, "Probe display HW, otherwise it's left untouched (default: true)"); +MODULE_PARM_DESC(probe_display, "Probe display HW, otherwise it's left untouched " + "[default=" __stringify(DEFAULT_PROBE_DISPLAY) "])"); module_param_named(vram_bar_size, xe_modparam.force_vram_bar_size, int, 0600); -MODULE_PARM_DESC(vram_bar_size, "Set the vram bar size (in MiB) - <0=disable-resize, 0=max-needed-size[default], >0=force-size"); +MODULE_PARM_DESC(vram_bar_size, "Set the vram bar size in MiB (<0=disable-resize, 0=max-needed-size, >0=force-size " + "[default=" __stringify(DEFAULT_VRAM_BAR_SIZE) "])"); module_param_named(guc_log_level, xe_modparam.guc_log_level, int, 0600); MODULE_PARM_DESC(guc_log_level, "GuC firmware logging level (0=disable, 1=normal, 2..5=verbose-levels " @@ -63,18 +77,21 @@ MODULE_PARM_DESC(gsc_firmware_path, module_param_named_unsafe(force_probe, xe_modparam.force_probe, charp, 0400); MODULE_PARM_DESC(force_probe, - "Force probe options for specified devices. See CONFIG_DRM_XE_FORCE_PROBE for details."); + "Force probe options for specified devices. See CONFIG_DRM_XE_FORCE_PROBE for details " + "[default=" DEFAULT_FORCE_PROBE "])"); #ifdef CONFIG_PCI_IOV module_param_named(max_vfs, xe_modparam.max_vfs, uint, 0400); MODULE_PARM_DESC(max_vfs, "Limit number of Virtual Functions (VFs) that could be managed. " - "(0 = no VFs [default]; N = allow up to N VFs)"); + "(0=no VFs; N=allow up to N VFs " + "[default=" DEFAULT_MAX_VFS_STR "])"); #endif module_param_named_unsafe(wedged_mode, xe_modparam.wedged_mode, int, 0600); MODULE_PARM_DESC(wedged_mode, - "Module's default policy for the wedged mode - 0=never, 1=upon-critical-errors[default], 2=upon-any-hang"); + "Module's default policy for the wedged mode (0=never, 1=upon-critical-errors, 2=upon-any-hang " + "[default=" __stringify(DEFAULT_WEDGED_MODE) "])"); static int xe_check_nomodeset(void) { diff --git a/drivers/gpu/drm/xe/xe_nvm.c b/drivers/gpu/drm/xe/xe_nvm.c new file mode 100644 index 000000000000..61b0a1531a53 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_nvm.c @@ -0,0 +1,167 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright(c) 2019-2025, Intel Corporation. All rights reserved. + */ + +#include <linux/intel_dg_nvm_aux.h> +#include <linux/pci.h> + +#include "xe_device.h" +#include "xe_device_types.h" +#include "xe_mmio.h" +#include "xe_nvm.h" +#include "regs/xe_gsc_regs.h" +#include "xe_sriov.h" + +#define GEN12_GUNIT_NVM_BASE 0x00102040 +#define GEN12_DEBUG_NVM_BASE 0x00101018 + +#define GEN12_CNTL_PROTECTED_NVM_REG 0x0010100C + +#define GEN12_GUNIT_NVM_SIZE 0x80 +#define GEN12_DEBUG_NVM_SIZE 0x4 + +#define NVM_NON_POSTED_ERASE_CHICKEN_BIT BIT(13) + +#define HECI_FW_STATUS_2_NVM_ACCESS_MODE BIT(3) + +static const struct intel_dg_nvm_region regions[INTEL_DG_NVM_REGIONS] = { + [0] = { .name = "DESCRIPTOR", }, + [2] = { .name = "GSC", }, + [9] = { .name = "PADDING", }, + [11] = { .name = "OptionROM", }, + [12] = { .name = "DAM", }, +}; + +static void xe_nvm_release_dev(struct device *dev) +{ +} + +static bool xe_nvm_non_posted_erase(struct xe_device *xe) +{ + struct xe_gt *gt = xe_root_mmio_gt(xe); + + if (xe->info.platform != XE_BATTLEMAGE) + return false; + return !(xe_mmio_read32(>->mmio, XE_REG(GEN12_CNTL_PROTECTED_NVM_REG)) & + NVM_NON_POSTED_ERASE_CHICKEN_BIT); +} + +static bool xe_nvm_writable_override(struct xe_device *xe) +{ + struct xe_gt *gt = xe_root_mmio_gt(xe); + bool writable_override; + resource_size_t base; + + switch (xe->info.platform) { + case XE_BATTLEMAGE: + base = DG2_GSC_HECI2_BASE; + break; + case XE_PVC: + base = PVC_GSC_HECI2_BASE; + break; + case XE_DG2: + base = DG2_GSC_HECI2_BASE; + break; + case XE_DG1: + base = DG1_GSC_HECI2_BASE; + break; + default: + drm_err(&xe->drm, "Unknown platform\n"); + return true; + } + + writable_override = + !(xe_mmio_read32(>->mmio, HECI_FWSTS2(base)) & + HECI_FW_STATUS_2_NVM_ACCESS_MODE); + if (writable_override) + drm_info(&xe->drm, "NVM access overridden by jumper\n"); + return writable_override; +} + +int xe_nvm_init(struct xe_device *xe) +{ + struct pci_dev *pdev = to_pci_dev(xe->drm.dev); + struct auxiliary_device *aux_dev; + struct intel_dg_nvm_dev *nvm; + int ret; + + if (!xe->info.has_gsc_nvm) + return 0; + + /* No access to internal NVM from VFs */ + if (IS_SRIOV_VF(xe)) + return 0; + + /* Nvm pointer should be NULL here */ + if (WARN_ON(xe->nvm)) + return -EFAULT; + + xe->nvm = kzalloc(sizeof(*nvm), GFP_KERNEL); + if (!xe->nvm) + return -ENOMEM; + + nvm = xe->nvm; + + nvm->writable_override = xe_nvm_writable_override(xe); + nvm->non_posted_erase = xe_nvm_non_posted_erase(xe); + nvm->bar.parent = &pdev->resource[0]; + nvm->bar.start = GEN12_GUNIT_NVM_BASE + pdev->resource[0].start; + nvm->bar.end = nvm->bar.start + GEN12_GUNIT_NVM_SIZE - 1; + nvm->bar.flags = IORESOURCE_MEM; + nvm->bar.desc = IORES_DESC_NONE; + nvm->regions = regions; + + nvm->bar2.parent = &pdev->resource[0]; + nvm->bar2.start = GEN12_DEBUG_NVM_BASE + pdev->resource[0].start; + nvm->bar2.end = nvm->bar2.start + GEN12_DEBUG_NVM_SIZE - 1; + nvm->bar2.flags = IORESOURCE_MEM; + nvm->bar2.desc = IORES_DESC_NONE; + + aux_dev = &nvm->aux_dev; + + aux_dev->name = "nvm"; + aux_dev->id = (pci_domain_nr(pdev->bus) << 16) | pci_dev_id(pdev); + aux_dev->dev.parent = &pdev->dev; + aux_dev->dev.release = xe_nvm_release_dev; + + ret = auxiliary_device_init(aux_dev); + if (ret) { + drm_err(&xe->drm, "xe-nvm aux init failed %d\n", ret); + goto err; + } + + ret = auxiliary_device_add(aux_dev); + if (ret) { + drm_err(&xe->drm, "xe-nvm aux add failed %d\n", ret); + auxiliary_device_uninit(aux_dev); + goto err; + } + return 0; + +err: + kfree(nvm); + xe->nvm = NULL; + return ret; +} + +void xe_nvm_fini(struct xe_device *xe) +{ + struct intel_dg_nvm_dev *nvm = xe->nvm; + + if (!xe->info.has_gsc_nvm) + return; + + /* No access to internal NVM from VFs */ + if (IS_SRIOV_VF(xe)) + return; + + /* Nvm pointer should not be NULL here */ + if (WARN_ON(!nvm)) + return; + + auxiliary_device_delete(&nvm->aux_dev); + auxiliary_device_uninit(&nvm->aux_dev); + kfree(nvm); + xe->nvm = NULL; +} diff --git a/drivers/gpu/drm/xe/xe_nvm.h b/drivers/gpu/drm/xe/xe_nvm.h new file mode 100644 index 000000000000..7f3d5f57bed0 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_nvm.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright(c) 2019-2025 Intel Corporation. All rights reserved. + */ + +#ifndef __XE_NVM_H__ +#define __XE_NVM_H__ + +struct xe_device; + +int xe_nvm_init(struct xe_device *xe); + +void xe_nvm_fini(struct xe_device *xe); + +#endif diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c index 4829ed46a8b4..5729e7d3e335 100644 --- a/drivers/gpu/drm/xe/xe_oa.c +++ b/drivers/gpu/drm/xe/xe_oa.c @@ -403,7 +403,7 @@ static int xe_oa_append_reports(struct xe_oa_stream *stream, char __user *buf, static void xe_oa_init_oa_buffer(struct xe_oa_stream *stream) { u32 gtt_offset = xe_bo_ggtt_addr(stream->oa_buffer.bo); - int size_exponent = __ffs(stream->oa_buffer.bo->size); + int size_exponent = __ffs(xe_bo_size(stream->oa_buffer.bo)); u32 oa_buf = gtt_offset | OAG_OABUFFER_MEMORY_SELECT; struct xe_mmio *mmio = &stream->gt->mmio; unsigned long flags; @@ -435,7 +435,7 @@ static void xe_oa_init_oa_buffer(struct xe_oa_stream *stream) spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); /* Zero out the OA buffer since we rely on zero report id and timestamp fields */ - memset(stream->oa_buffer.vaddr, 0, stream->oa_buffer.bo->size); + memset(stream->oa_buffer.vaddr, 0, xe_bo_size(stream->oa_buffer.bo)); } static u32 __format_to_oactrl(const struct xe_oa_format *format, int counter_sel_mask) @@ -1065,7 +1065,7 @@ static u32 oag_report_ctx_switches(const struct xe_oa_stream *stream) static u32 oag_buf_size_select(const struct xe_oa_stream *stream) { return _MASKED_FIELD(OAG_OA_DEBUG_BUF_SIZE_SELECT, - stream->oa_buffer.bo->size > SZ_16M ? + xe_bo_size(stream->oa_buffer.bo) > SZ_16M ? OAG_OA_DEBUG_BUF_SIZE_SELECT : 0); } @@ -1582,7 +1582,7 @@ static long xe_oa_status_locked(struct xe_oa_stream *stream, unsigned long arg) static long xe_oa_info_locked(struct xe_oa_stream *stream, unsigned long arg) { - struct drm_xe_oa_stream_info info = { .oa_buf_size = stream->oa_buffer.bo->size, }; + struct drm_xe_oa_stream_info info = { .oa_buf_size = xe_bo_size(stream->oa_buffer.bo), }; void __user *uaddr = (void __user *)arg; if (copy_to_user(uaddr, &info, sizeof(info))) @@ -1668,7 +1668,7 @@ static int xe_oa_mmap(struct file *file, struct vm_area_struct *vma) } /* Can mmap the entire OA buffer or nothing (no partial OA buffer mmaps) */ - if (vma->vm_end - vma->vm_start != stream->oa_buffer.bo->size) { + if (vma->vm_end - vma->vm_start != xe_bo_size(stream->oa_buffer.bo)) { drm_dbg(&stream->oa->xe->drm, "Wrong mmap size, must be OA buffer size\n"); return -EINVAL; } @@ -1941,7 +1941,7 @@ static int xe_oa_assign_hwe(struct xe_oa *oa, struct xe_oa_open_param *param) /* If not provided, OA unit defaults to OA unit 0 as per uapi */ if (!param->oa_unit) - param->oa_unit = &xe_device_get_gt(oa->xe, 0)->oa.oa_unit[0]; + param->oa_unit = &xe_root_mmio_gt(oa->xe)->oa.oa_unit[0]; /* When we have an exec_q, get hwe from the exec_q */ if (param->exec_q) { @@ -2493,7 +2493,7 @@ int xe_oa_register(struct xe_device *xe) static u32 num_oa_units_per_gt(struct xe_gt *gt) { - if (!xe_gt_is_media_type(gt) || GRAPHICS_VER(gt_to_xe(gt)) < 20) + if (xe_gt_is_main_type(gt) || GRAPHICS_VER(gt_to_xe(gt)) < 20) return 1; else if (!IS_DGFX(gt_to_xe(gt))) return XE_OAM_UNIT_SCMI_0 + 1; /* SAG + SCMI_0 */ @@ -2506,7 +2506,7 @@ static u32 __hwe_oam_unit(struct xe_hw_engine *hwe) if (GRAPHICS_VERx100(gt_to_xe(hwe->gt)) < 1270) return XE_OA_UNIT_INVALID; - xe_gt_WARN_ON(hwe->gt, !xe_gt_is_media_type(hwe->gt)); + xe_gt_WARN_ON(hwe->gt, xe_gt_is_main_type(hwe->gt)); if (GRAPHICS_VER(gt_to_xe(hwe->gt)) < 20) return 0; @@ -2589,7 +2589,7 @@ static void __xe_oa_init_oa_units(struct xe_gt *gt) for (i = 0; i < num_units; i++) { struct xe_oa_unit *u = >->oa.oa_unit[i]; - if (!xe_gt_is_media_type(gt)) { + if (xe_gt_is_main_type(gt)) { u->regs = __oag_regs(); u->type = DRM_XE_OA_UNIT_TYPE_OAG; } else { diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c index 89814b32e585..3c40ef426f0c 100644 --- a/drivers/gpu/drm/xe/xe_pci.c +++ b/drivers/gpu/drm/xe/xe_pci.c @@ -38,43 +38,6 @@ enum toggle_d3cold { D3COLD_ENABLE, }; -struct xe_subplatform_desc { - enum xe_subplatform subplatform; - const char *name; - const u16 *pciidlist; -}; - -struct xe_device_desc { - /* Should only ever be set for platforms without GMD_ID */ - const struct xe_ip *pre_gmdid_graphics_ip; - /* Should only ever be set for platforms without GMD_ID */ - const struct xe_ip *pre_gmdid_media_ip; - - const char *platform_name; - const struct xe_subplatform_desc *subplatforms; - - enum xe_platform platform; - - u8 dma_mask_size; - u8 max_remote_tiles:2; - - u8 require_force_probe:1; - u8 is_dgfx:1; - - u8 has_display:1; - u8 has_fan_control:1; - u8 has_heci_gscfi:1; - u8 has_heci_cscfi:1; - u8 has_llc:1; - u8 has_mbx_power_limits:1; - u8 has_pxp:1; - u8 has_sriov:1; - u8 needs_scratch:1; - u8 skip_guc_pc:1; - u8 skip_mtcfg:1; - u8 skip_pcode:1; -}; - __diag_push(); __diag_ignore_all("-Woverride-init", "Allow field overrides in table"); @@ -140,7 +103,6 @@ static const struct xe_graphics_desc graphics_xelpg = { .has_asid = 1, \ .has_atomic_enable_pte_bit = 1, \ .has_flat_ccs = 1, \ - .has_indirect_ring_state = 1, \ .has_range_tlb_invalidation = 1, \ .has_usm = 1, \ .has_64bit_timestamp = 1, \ @@ -184,6 +146,7 @@ static const struct xe_ip graphics_ips[] = { { 2004, "Xe2_LPG", &graphics_xe2 }, { 3000, "Xe3_LPG", &graphics_xe2 }, { 3001, "Xe3_LPG", &graphics_xe2 }, + { 3003, "Xe3_LPG", &graphics_xe2 }, }; /* Pre-GMDID Media IPs */ @@ -196,6 +159,7 @@ static const struct xe_ip media_ips[] = { { 1301, "Xe2_HPM", &media_xelpmp }, { 2000, "Xe2_LPM", &media_xelpmp }, { 3000, "Xe3_LPM", &media_xelpmp }, + { 3002, "Xe3_LPM", &media_xelpmp }, }; static const struct xe_device_desc tgl_desc = { @@ -205,6 +169,7 @@ static const struct xe_device_desc tgl_desc = { .dma_mask_size = 39, .has_display = true, .has_llc = true, + .max_gt_per_tile = 1, .require_force_probe = true, }; @@ -215,6 +180,7 @@ static const struct xe_device_desc rkl_desc = { .dma_mask_size = 39, .has_display = true, .has_llc = true, + .max_gt_per_tile = 1, .require_force_probe = true, }; @@ -227,6 +193,7 @@ static const struct xe_device_desc adl_s_desc = { .dma_mask_size = 39, .has_display = true, .has_llc = true, + .max_gt_per_tile = 1, .require_force_probe = true, .subplatforms = (const struct xe_subplatform_desc[]) { { XE_SUBPLATFORM_ALDERLAKE_S_RPLS, "RPLS", adls_rpls_ids }, @@ -243,6 +210,7 @@ static const struct xe_device_desc adl_p_desc = { .dma_mask_size = 39, .has_display = true, .has_llc = true, + .max_gt_per_tile = 1, .require_force_probe = true, .subplatforms = (const struct xe_subplatform_desc[]) { { XE_SUBPLATFORM_ALDERLAKE_P_RPLU, "RPLU", adlp_rplu_ids }, @@ -257,6 +225,7 @@ static const struct xe_device_desc adl_n_desc = { .dma_mask_size = 39, .has_display = true, .has_llc = true, + .max_gt_per_tile = 1, .require_force_probe = true, }; @@ -270,7 +239,9 @@ static const struct xe_device_desc dg1_desc = { PLATFORM(DG1), .dma_mask_size = 39, .has_display = true, + .has_gsc_nvm = 1, .has_heci_gscfi = 1, + .max_gt_per_tile = 1, .require_force_probe = true, }; @@ -281,6 +252,7 @@ static const u16 dg2_g12_ids[] = { INTEL_DG2_G12_IDS(NOP), 0 }; #define DG2_FEATURES \ DGFX_FEATURES, \ PLATFORM(DG2), \ + .has_gsc_nvm = 1, \ .has_heci_gscfi = 1, \ .subplatforms = (const struct xe_subplatform_desc[]) { \ { XE_SUBPLATFORM_DG2_G10, "G10", dg2_g10_ids }, \ @@ -293,6 +265,7 @@ static const struct xe_device_desc ats_m_desc = { .pre_gmdid_graphics_ip = &graphics_ip_xehpg, .pre_gmdid_media_ip = &media_ip_xehpm, .dma_mask_size = 46, + .max_gt_per_tile = 1, .require_force_probe = true, DG2_FEATURES, @@ -303,6 +276,7 @@ static const struct xe_device_desc dg2_desc = { .pre_gmdid_graphics_ip = &graphics_ip_xehpg, .pre_gmdid_media_ip = &media_ip_xehpm, .dma_mask_size = 46, + .max_gt_per_tile = 1, .require_force_probe = true, DG2_FEATURES, @@ -317,7 +291,9 @@ static const __maybe_unused struct xe_device_desc pvc_desc = { PLATFORM(PVC), .dma_mask_size = 52, .has_display = false, + .has_gsc_nvm = 1, .has_heci_gscfi = 1, + .max_gt_per_tile = 1, .max_remote_tiles = 1, .require_force_probe = true, .has_mbx_power_limits = false, @@ -330,6 +306,7 @@ static const struct xe_device_desc mtl_desc = { .dma_mask_size = 46, .has_display = true, .has_pxp = true, + .max_gt_per_tile = 2, }; static const struct xe_device_desc lnl_desc = { @@ -337,6 +314,7 @@ static const struct xe_device_desc lnl_desc = { .dma_mask_size = 46, .has_display = true, .has_pxp = true, + .max_gt_per_tile = 2, .needs_scratch = true, }; @@ -347,7 +325,10 @@ static const struct xe_device_desc bmg_desc = { .has_display = true, .has_fan_control = true, .has_mbx_power_limits = true, + .has_gsc_nvm = 1, .has_heci_cscfi = 1, + .has_sriov = true, + .max_gt_per_tile = 2, .needs_scratch = true, }; @@ -356,7 +337,7 @@ static const struct xe_device_desc ptl_desc = { .dma_mask_size = 46, .has_display = true, .has_sriov = true, - .require_force_probe = true, + .max_gt_per_tile = 2, .needs_scratch = true, }; @@ -590,6 +571,7 @@ static int xe_info_init_early(struct xe_device *xe, xe->info.is_dgfx = desc->is_dgfx; xe->info.has_fan_control = desc->has_fan_control; xe->info.has_mbx_power_limits = desc->has_mbx_power_limits; + xe->info.has_gsc_nvm = desc->has_gsc_nvm; xe->info.has_heci_gscfi = desc->has_heci_gscfi; xe->info.has_heci_cscfi = desc->has_heci_cscfi; xe->info.has_llc = desc->has_llc; @@ -603,6 +585,10 @@ static int xe_info_init_early(struct xe_device *xe, xe->info.probe_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) && xe_modparam.probe_display && desc->has_display; + + xe_assert(xe, desc->max_gt_per_tile > 0); + xe_assert(xe, desc->max_gt_per_tile <= XE_MAX_GT_PER_TILE); + xe->info.max_gt_per_tile = desc->max_gt_per_tile; xe->info.tile_count = 1 + desc->max_remote_tiles; err = xe_tile_init_early(xe_device_get_root_tile(xe), xe, 0); @@ -702,10 +688,11 @@ static int xe_info_init(struct xe_device *xe, */ for_each_tile(tile, xe, id) { gt = tile->primary_gt; - gt->info.id = xe->info.gt_count++; gt->info.type = XE_GT_TYPE_MAIN; + gt->info.id = tile->id * xe->info.max_gt_per_tile; gt->info.has_indirect_ring_state = graphics_desc->has_indirect_ring_state; gt->info.engine_mask = graphics_desc->hw_engine_mask; + xe->info.gt_count++; if (MEDIA_VER(xe) < 13 && media_desc) gt->info.engine_mask |= media_desc->hw_engine_mask; @@ -723,17 +710,10 @@ static int xe_info_init(struct xe_device *xe, gt = tile->media_gt; gt->info.type = XE_GT_TYPE_MEDIA; + gt->info.id = tile->id * xe->info.max_gt_per_tile + 1; gt->info.has_indirect_ring_state = media_desc->has_indirect_ring_state; gt->info.engine_mask = media_desc->hw_engine_mask; - - /* - * FIXME: At the moment multi-tile and standalone media are - * mutually exclusive on current platforms. We'll need to - * come up with a better way to number GTs if we ever wind - * up with platforms that support both together. - */ - drm_WARN_ON(&xe->drm, id != 0); - gt->info.id = xe->info.gt_count++; + xe->info.gt_count++; } return 0; diff --git a/drivers/gpu/drm/xe/xe_pci_sriov.c b/drivers/gpu/drm/xe/xe_pci_sriov.c index 8813efdcafbb..af05db07162e 100644 --- a/drivers/gpu/drm/xe/xe_pci_sriov.c +++ b/drivers/gpu/drm/xe/xe_pci_sriov.c @@ -3,6 +3,10 @@ * Copyright © 2023-2024 Intel Corporation */ +#include <linux/bitops.h> +#include <linux/pci.h> + +#include "regs/xe_bars.h" #include "xe_assert.h" #include "xe_device.h" #include "xe_gt_sriov_pf_config.h" @@ -12,6 +16,7 @@ #include "xe_pci_sriov.h" #include "xe_pm.h" #include "xe_sriov.h" +#include "xe_sriov_pf.h" #include "xe_sriov_pf_helpers.h" #include "xe_sriov_printk.h" @@ -127,6 +132,18 @@ static void pf_engine_activity_stats(struct xe_device *xe, unsigned int num_vfs, } } +static int resize_vf_vram_bar(struct xe_device *xe, int num_vfs) +{ + struct pci_dev *pdev = to_pci_dev(xe->drm.dev); + u32 sizes; + + sizes = pci_iov_vf_bar_get_sizes(pdev, VF_LMEM_BAR, num_vfs); + if (!sizes) + return 0; + + return pci_iov_vf_bar_set_size(pdev, VF_LMEM_BAR, __fls(sizes)); +} + static int pf_enable_vfs(struct xe_device *xe, int num_vfs) { struct pci_dev *pdev = to_pci_dev(xe->drm.dev); @@ -138,6 +155,10 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs) xe_assert(xe, num_vfs <= total_vfs); xe_sriov_dbg(xe, "enabling %u VF%s\n", num_vfs, str_plural(num_vfs)); + err = xe_sriov_pf_wait_ready(xe); + if (err) + goto out; + /* * We must hold additional reference to the runtime PM to keep PF in D0 * during VFs lifetime, as our VFs do not implement the PM capability. @@ -153,6 +174,12 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs) if (err < 0) goto failed; + if (IS_DGFX(xe)) { + err = resize_vf_vram_bar(xe, num_vfs); + if (err) + xe_sriov_info(xe, "Failed to set VF LMEM BAR size: %d\n", err); + } + err = pci_enable_sriov(pdev, num_vfs); if (err < 0) goto failed; @@ -169,7 +196,7 @@ static int pf_enable_vfs(struct xe_device *xe, int num_vfs) failed: pf_unprovision_vfs(xe, num_vfs); xe_pm_runtime_put(xe); - +out: xe_sriov_notice(xe, "Failed to enable %u VF%s (%pe)\n", num_vfs, str_plural(num_vfs), ERR_PTR(err)); return err; diff --git a/drivers/gpu/drm/xe/xe_pci_types.h b/drivers/gpu/drm/xe/xe_pci_types.h index ca6b10d35573..4de6f69ed975 100644 --- a/drivers/gpu/drm/xe/xe_pci_types.h +++ b/drivers/gpu/drm/xe/xe_pci_types.h @@ -8,6 +8,47 @@ #include <linux/types.h> +#include "xe_platform_types.h" + +struct xe_subplatform_desc { + enum xe_subplatform subplatform; + const char *name; + const u16 *pciidlist; +}; + +struct xe_device_desc { + /* Should only ever be set for platforms without GMD_ID */ + const struct xe_ip *pre_gmdid_graphics_ip; + /* Should only ever be set for platforms without GMD_ID */ + const struct xe_ip *pre_gmdid_media_ip; + + const char *platform_name; + const struct xe_subplatform_desc *subplatforms; + + enum xe_platform platform; + + u8 dma_mask_size; + u8 max_remote_tiles:2; + u8 max_gt_per_tile:2; + + u8 require_force_probe:1; + u8 is_dgfx:1; + + u8 has_display:1; + u8 has_fan_control:1; + u8 has_gsc_nvm:1; + u8 has_heci_gscfi:1; + u8 has_heci_cscfi:1; + u8 has_llc:1; + u8 has_mbx_power_limits:1; + u8 has_pxp:1; + u8 has_sriov:1; + u8 needs_scratch:1; + u8 skip_guc_pc:1; + u8 skip_mtcfg:1; + u8 skip_pcode:1; +}; + struct xe_graphics_desc { u8 va_bits; u8 vm_max_level; diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c index 9189117fe825..6a7ddb9005f9 100644 --- a/drivers/gpu/drm/xe/xe_pcode.c +++ b/drivers/gpu/drm/xe/xe_pcode.c @@ -336,3 +336,33 @@ int xe_pcode_probe_early(struct xe_device *xe) return xe_pcode_ready(xe, false); } ALLOW_ERROR_INJECTION(xe_pcode_probe_early, ERRNO); /* See xe_pci_probe */ + +/* Helpers with drm device. These should only be called by the display side */ +#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY) + +int intel_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1) +{ + struct xe_device *xe = to_xe_device(drm); + struct xe_tile *tile = xe_device_get_root_tile(xe); + + return xe_pcode_read(tile, mbox, val, val1); +} + +int intel_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms) +{ + struct xe_device *xe = to_xe_device(drm); + struct xe_tile *tile = xe_device_get_root_tile(xe); + + return xe_pcode_write_timeout(tile, mbox, val, timeout_ms); +} + +int intel_pcode_request(struct drm_device *drm, u32 mbox, u32 request, + u32 reply_mask, u32 reply, int timeout_base_ms) +{ + struct xe_device *xe = to_xe_device(drm); + struct xe_tile *tile = xe_device_get_root_tile(xe); + + return xe_pcode_request(tile, mbox, request, reply_mask, reply, timeout_base_ms); +} + +#endif diff --git a/drivers/gpu/drm/xe/xe_pcode.h b/drivers/gpu/drm/xe/xe_pcode.h index de38f44f3201..a5584c1c75f9 100644 --- a/drivers/gpu/drm/xe/xe_pcode.h +++ b/drivers/gpu/drm/xe/xe_pcode.h @@ -7,8 +7,10 @@ #define _XE_PCODE_H_ #include <linux/types.h> -struct xe_tile; + +struct drm_device; struct xe_device; +struct xe_tile; void xe_pcode_init(struct xe_tile *tile); int xe_pcode_probe_early(struct xe_device *xe); @@ -32,4 +34,12 @@ int xe_pcode_request(struct xe_tile *tile, u32 mbox, u32 request, | FIELD_PREP(PCODE_MB_PARAM1, param1)\ | FIELD_PREP(PCODE_MB_PARAM2, param2)) +/* Helpers with drm device */ +int intel_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1); +int intel_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms); +#define intel_pcode_write(drm, mbox, val) \ + intel_pcode_write_timeout((drm), (mbox), (val), 1) +int intel_pcode_request(struct drm_device *drm, u32 mbox, u32 request, + u32 reply_mask, u32 reply, int timeout_base_ms); + #endif diff --git a/drivers/gpu/drm/xe/xe_pcode_api.h b/drivers/gpu/drm/xe/xe_pcode_api.h index 0befdea77db1..92bfcba51e19 100644 --- a/drivers/gpu/drm/xe/xe_pcode_api.h +++ b/drivers/gpu/drm/xe/xe_pcode_api.h @@ -50,6 +50,21 @@ #define READ_PL_FROM_FW 0x1 #define READ_PL_FROM_PCODE 0x0 +#define PCODE_LATE_BINDING 0x5C +#define GET_CAPABILITY_STATUS 0x0 +#define V1_FAN_SUPPORTED REG_BIT(0) +#define VR_PARAMS_SUPPORTED REG_BIT(3) +#define V1_FAN_PROVISIONED REG_BIT(16) +#define VR_PARAMS_PROVISIONED REG_BIT(19) +#define GET_VERSION_LOW 0x1 +#define GET_VERSION_HIGH 0x2 +#define MAJOR_VERSION_MASK REG_GENMASK(31, 16) +#define MINOR_VERSION_MASK REG_GENMASK(15, 0) +#define HOTFIX_VERSION_MASK REG_GENMASK(31, 16) +#define BUILD_VERSION_MASK REG_GENMASK(15, 0) +#define FAN_TABLE 1 +#define VR_CONFIG 2 + #define PCODE_FREQUENCY_CONFIG 0x6e /* Frequency Config Sub Commands (param1) */ #define PCODE_MBOX_FC_SC_READ_FUSED_P0 0x0 diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c index ff749edc005b..e279b47ba03b 100644 --- a/drivers/gpu/drm/xe/xe_pm.c +++ b/drivers/gpu/drm/xe/xe_pm.c @@ -19,6 +19,7 @@ #include "xe_ggtt.h" #include "xe_gt.h" #include "xe_guc.h" +#include "xe_i2c.h" #include "xe_irq.h" #include "xe_pcode.h" #include "xe_pxp.h" @@ -134,7 +135,7 @@ int xe_pm_suspend(struct xe_device *xe) /* FIXME: Super racey... */ err = xe_bo_evict_all(xe); if (err) - goto err_pxp; + goto err_display; for_each_gt(gt, xe, id) { err = xe_gt_suspend(gt); @@ -146,12 +147,13 @@ int xe_pm_suspend(struct xe_device *xe) xe_display_pm_suspend_late(xe); + xe_i2c_pm_suspend(xe); + drm_dbg(&xe->drm, "Device suspended\n"); return 0; err_display: xe_display_pm_resume(xe); -err_pxp: xe_pxp_pm_resume(xe->pxp); err: drm_dbg(&xe->drm, "Device suspend failed %d\n", err); @@ -191,6 +193,8 @@ int xe_pm_resume(struct xe_device *xe) if (err) goto err; + xe_i2c_pm_resume(xe, xe->d3cold.allowed); + xe_irq_resume(xe); for_each_gt(gt, xe, id) @@ -488,6 +492,8 @@ int xe_pm_runtime_suspend(struct xe_device *xe) xe_display_pm_runtime_suspend_late(xe); + xe_i2c_pm_suspend(xe); + xe_rpm_lockmap_release(xe); xe_pm_write_callback_task(xe, NULL); return 0; @@ -535,6 +541,8 @@ int xe_pm_runtime_resume(struct xe_device *xe) goto out; } + xe_i2c_pm_resume(xe, xe->d3cold.allowed); + xe_irq_resume(xe); for_each_gt(gt, xe, id) @@ -753,11 +761,13 @@ void xe_pm_assert_unbounded_bridge(struct xe_device *xe) } /** - * xe_pm_set_vram_threshold - Set a vram threshold for allowing/blocking D3Cold + * xe_pm_set_vram_threshold - Set a VRAM threshold for allowing/blocking D3Cold * @xe: xe device instance - * @threshold: VRAM size in bites for the D3cold threshold + * @threshold: VRAM size in MiB for the D3cold threshold * - * Returns 0 for success, negative error code otherwise. + * Return: + * * 0 - success + * * -EINVAL - invalid argument */ int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold) { diff --git a/drivers/gpu/drm/xe/xe_pmu.c b/drivers/gpu/drm/xe/xe_pmu.c index 69df0e3520a5..cab51d826345 100644 --- a/drivers/gpu/drm/xe/xe_pmu.c +++ b/drivers/gpu/drm/xe/xe_pmu.c @@ -157,10 +157,13 @@ static bool event_gt_forcewake(struct perf_event *event) return true; } -static bool event_supported(struct xe_pmu *pmu, unsigned int gt, +static bool event_supported(struct xe_pmu *pmu, unsigned int gt_id, unsigned int id) { - if (gt >= XE_MAX_GT_PER_TILE) + struct xe_device *xe = container_of(pmu, typeof(*xe), pmu); + struct xe_gt *gt = xe_device_get_gt(xe, gt_id); + + if (!gt) return false; return id < sizeof(pmu->supported_events) * BITS_PER_BYTE && diff --git a/drivers/gpu/drm/xe/xe_pxp_submit.c b/drivers/gpu/drm/xe/xe_pxp_submit.c index d92ec0f515b0..ca95f2a4d4ef 100644 --- a/drivers/gpu/drm/xe/xe_pxp_submit.c +++ b/drivers/gpu/drm/xe/xe_pxp_submit.c @@ -101,7 +101,7 @@ static int allocate_gsc_client_resources(struct xe_gt *gt, xe_assert(xe, hwe); /* PXP instructions must be issued from PPGTT */ - vm = xe_vm_create(xe, XE_VM_FLAG_GSC); + vm = xe_vm_create(xe, XE_VM_FLAG_GSC, NULL); if (IS_ERR(vm)) return PTR_ERR(vm); diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c index e8e1743dcb1e..d517ec9ddcbf 100644 --- a/drivers/gpu/drm/xe/xe_query.c +++ b/drivers/gpu/drm/xe/xe_query.c @@ -141,7 +141,7 @@ query_engine_cycles(struct xe_device *xe, return -EINVAL; eci = &resp.eci; - if (eci->gt_id >= XE_MAX_GT_PER_TILE) + if (eci->gt_id >= xe->info.max_gt_per_tile) return -EINVAL; gt = xe_device_get_gt(xe, eci->gt_id); @@ -368,6 +368,7 @@ static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query struct drm_xe_query_gt_list __user *query_ptr = u64_to_user_ptr(query->data); struct drm_xe_query_gt_list *gt_list; + int iter = 0; u8 id; if (query->size == 0) { @@ -385,12 +386,12 @@ static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query for_each_gt(gt, xe, id) { if (xe_gt_is_media_type(gt)) - gt_list->gt_list[id].type = DRM_XE_QUERY_GT_TYPE_MEDIA; + gt_list->gt_list[iter].type = DRM_XE_QUERY_GT_TYPE_MEDIA; else - gt_list->gt_list[id].type = DRM_XE_QUERY_GT_TYPE_MAIN; - gt_list->gt_list[id].tile_id = gt_to_tile(gt)->id; - gt_list->gt_list[id].gt_id = gt->info.id; - gt_list->gt_list[id].reference_clock = gt->info.reference_clock; + gt_list->gt_list[iter].type = DRM_XE_QUERY_GT_TYPE_MAIN; + gt_list->gt_list[iter].tile_id = gt_to_tile(gt)->id; + gt_list->gt_list[iter].gt_id = gt->info.id; + gt_list->gt_list[iter].reference_clock = gt->info.reference_clock; /* * The mem_regions indexes in the mask below need to * directly identify the struct @@ -406,19 +407,21 @@ static int query_gt_list(struct xe_device *xe, struct drm_xe_device_query *query * assumption. */ if (!IS_DGFX(xe)) - gt_list->gt_list[id].near_mem_regions = 0x1; + gt_list->gt_list[iter].near_mem_regions = 0x1; else - gt_list->gt_list[id].near_mem_regions = + gt_list->gt_list[iter].near_mem_regions = BIT(gt_to_tile(gt)->id) << 1; - gt_list->gt_list[id].far_mem_regions = xe->info.mem_region_mask ^ - gt_list->gt_list[id].near_mem_regions; + gt_list->gt_list[iter].far_mem_regions = xe->info.mem_region_mask ^ + gt_list->gt_list[iter].near_mem_regions; - gt_list->gt_list[id].ip_ver_major = + gt_list->gt_list[iter].ip_ver_major = REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid); - gt_list->gt_list[id].ip_ver_minor = + gt_list->gt_list[iter].ip_ver_minor = REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid); - gt_list->gt_list[id].ip_ver_rev = + gt_list->gt_list[iter].ip_ver_rev = REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid); + + iter++; } if (copy_to_user(query_ptr, gt_list, size)) { diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c index bc1689db4cd7..7b50c7c1ee21 100644 --- a/drivers/gpu/drm/xe/xe_ring_ops.c +++ b/drivers/gpu/drm/xe/xe_ring_ops.c @@ -110,13 +110,14 @@ static int emit_bb_start(u64 batch_addr, u32 ppgtt_flag, u32 *dw, int i) return i; } -static int emit_flush_invalidate(u32 *dw, int i) +static int emit_flush_invalidate(u32 addr, u32 val, u32 *dw, int i) { dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW | - MI_FLUSH_IMM_DW | MI_FLUSH_DW_STORE_INDEX; - dw[i++] = LRC_PPHWSP_FLUSH_INVAL_SCRATCH_ADDR; - dw[i++] = 0; + MI_FLUSH_IMM_DW; + + dw[i++] = addr | MI_FLUSH_DW_USE_GTT; dw[i++] = 0; + dw[i++] = val; return i; } @@ -397,23 +398,20 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job, static void emit_migration_job_gen12(struct xe_sched_job *job, struct xe_lrc *lrc, u32 seqno) { + u32 saddr = xe_lrc_start_seqno_ggtt_addr(lrc); u32 dw[MAX_JOB_SIZE_DW], i = 0; i = emit_copy_timestamp(lrc, dw, i); - i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc), - seqno, dw, i); + i = emit_store_imm_ggtt(saddr, seqno, dw, i); dw[i++] = MI_ARB_ON_OFF | MI_ARB_DISABLE; /* Enabled again below */ i = emit_bb_start(job->ptrs[0].batch_addr, BIT(8), dw, i); - if (!IS_SRIOV_VF(gt_to_xe(job->q->gt))) { - /* XXX: Do we need this? Leaving for now. */ - dw[i++] = preparser_disable(true); - i = emit_flush_invalidate(dw, i); - dw[i++] = preparser_disable(false); - } + dw[i++] = preparser_disable(true); + i = emit_flush_invalidate(saddr, seqno, dw, i); + dw[i++] = preparser_disable(false); i = emit_bb_start(job->ptrs[1].batch_addr, BIT(8), dw, i); diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c index 29e694bb1219..95571b87aa73 100644 --- a/drivers/gpu/drm/xe/xe_rtp.c +++ b/drivers/gpu/drm/xe/xe_rtp.c @@ -56,37 +56,61 @@ static bool rule_matches(const struct xe_device *xe, xe->info.subplatform == r->subplatform; break; case XE_RTP_MATCH_GRAPHICS_VERSION: + if (drm_WARN_ON(&xe->drm, !gt)) + return false; + match = xe->info.graphics_verx100 == r->ver_start && (!has_samedia(xe) || !xe_gt_is_media_type(gt)); break; case XE_RTP_MATCH_GRAPHICS_VERSION_RANGE: + if (drm_WARN_ON(&xe->drm, !gt)) + return false; + match = xe->info.graphics_verx100 >= r->ver_start && xe->info.graphics_verx100 <= r->ver_end && (!has_samedia(xe) || !xe_gt_is_media_type(gt)); break; case XE_RTP_MATCH_GRAPHICS_VERSION_ANY_GT: + if (drm_WARN_ON(&xe->drm, !gt)) + return false; + match = xe->info.graphics_verx100 == r->ver_start; break; case XE_RTP_MATCH_GRAPHICS_STEP: + if (drm_WARN_ON(&xe->drm, !gt)) + return false; + match = xe->info.step.graphics >= r->step_start && xe->info.step.graphics < r->step_end && (!has_samedia(xe) || !xe_gt_is_media_type(gt)); break; case XE_RTP_MATCH_MEDIA_VERSION: + if (drm_WARN_ON(&xe->drm, !gt)) + return false; + match = xe->info.media_verx100 == r->ver_start && (!has_samedia(xe) || xe_gt_is_media_type(gt)); break; case XE_RTP_MATCH_MEDIA_VERSION_RANGE: + if (drm_WARN_ON(&xe->drm, !gt)) + return false; + match = xe->info.media_verx100 >= r->ver_start && xe->info.media_verx100 <= r->ver_end && (!has_samedia(xe) || xe_gt_is_media_type(gt)); break; case XE_RTP_MATCH_MEDIA_STEP: + if (drm_WARN_ON(&xe->drm, !gt)) + return false; + match = xe->info.step.media >= r->step_start && xe->info.step.media < r->step_end && (!has_samedia(xe) || xe_gt_is_media_type(gt)); break; case XE_RTP_MATCH_MEDIA_VERSION_ANY_GT: + if (drm_WARN_ON(&xe->drm, !gt)) + return false; + match = xe->info.media_verx100 == r->ver_start; break; case XE_RTP_MATCH_INTEGRATED: @@ -108,6 +132,9 @@ static bool rule_matches(const struct xe_device *xe, match = hwe->class != r->engine_class; break; case XE_RTP_MATCH_FUNC: + if (drm_WARN_ON(&xe->drm, !gt)) + return false; + match = r->match_func(gt, hwe); break; default: @@ -186,6 +213,11 @@ static void rtp_get_context(struct xe_rtp_process_ctx *ctx, struct xe_device **xe) { switch (ctx->type) { + case XE_RTP_PROCESS_TYPE_DEVICE: + *hwe = NULL; + *gt = NULL; + *xe = ctx->xe; + break; case XE_RTP_PROCESS_TYPE_GT: *hwe = NULL; *gt = ctx->gt; @@ -326,21 +358,6 @@ bool xe_rtp_match_first_render_or_compute(const struct xe_gt *gt, hwe->engine_id == __ffs(render_compute_mask); } -bool xe_rtp_match_first_gslice_fused_off(const struct xe_gt *gt, - const struct xe_hw_engine *hwe) -{ - unsigned int dss_per_gslice = 4; - unsigned int dss; - - if (drm_WARN(>_to_xe(gt)->drm, xe_dss_mask_empty(gt->fuse_topo.g_dss_mask), - "Checking gslice for platform without geometry pipeline\n")) - return false; - - dss = xe_dss_mask_group_ffs(gt->fuse_topo.g_dss_mask, 0, 0); - - return dss >= dss_per_gslice; -} - bool xe_rtp_match_not_sriov_vf(const struct xe_gt *gt, const struct xe_hw_engine *hwe) { diff --git a/drivers/gpu/drm/xe/xe_rtp.h b/drivers/gpu/drm/xe/xe_rtp.h index 4fe736a11c42..5ed6c14b9ae3 100644 --- a/drivers/gpu/drm/xe/xe_rtp.h +++ b/drivers/gpu/drm/xe/xe_rtp.h @@ -422,7 +422,8 @@ struct xe_reg_sr; #define XE_RTP_PROCESS_CTX_INITIALIZER(arg__) _Generic((arg__), \ struct xe_hw_engine * : (struct xe_rtp_process_ctx){ { (void *)(arg__) }, XE_RTP_PROCESS_TYPE_ENGINE }, \ - struct xe_gt * : (struct xe_rtp_process_ctx){ { (void *)(arg__) }, XE_RTP_PROCESS_TYPE_GT }) + struct xe_gt * : (struct xe_rtp_process_ctx){ { (void *)(arg__) }, XE_RTP_PROCESS_TYPE_GT }, \ + struct xe_device * : (struct xe_rtp_process_ctx){ { (void *)(arg__) }, XE_RTP_PROCESS_TYPE_DEVICE }) void xe_rtp_process_ctx_enable_active_tracking(struct xe_rtp_process_ctx *ctx, unsigned long *active_entries, @@ -466,17 +467,6 @@ bool xe_rtp_match_first_render_or_compute(const struct xe_gt *gt, const struct xe_hw_engine *hwe); /* - * xe_rtp_match_first_gslice_fused_off - Match when first gslice is fused off - * - * @gt: GT structure - * @hwe: Engine instance - * - * Returns: true if first gslice is fused off, false otherwise. - */ -bool xe_rtp_match_first_gslice_fused_off(const struct xe_gt *gt, - const struct xe_hw_engine *hwe); - -/* * xe_rtp_match_not_sriov_vf - Match when not on SR-IOV VF device * * @gt: GT structure diff --git a/drivers/gpu/drm/xe/xe_rtp_types.h b/drivers/gpu/drm/xe/xe_rtp_types.h index 1b76b947c706..f4cf30e298cf 100644 --- a/drivers/gpu/drm/xe/xe_rtp_types.h +++ b/drivers/gpu/drm/xe/xe_rtp_types.h @@ -110,12 +110,14 @@ struct xe_rtp_entry { }; enum xe_rtp_process_type { + XE_RTP_PROCESS_TYPE_DEVICE, XE_RTP_PROCESS_TYPE_GT, XE_RTP_PROCESS_TYPE_ENGINE, }; struct xe_rtp_process_ctx { union { + struct xe_device *xe; struct xe_gt *gt; struct xe_hw_engine *hwe; }; diff --git a/drivers/gpu/drm/xe/xe_shrinker.c b/drivers/gpu/drm/xe/xe_shrinker.c index 125c836e0ee4..90244fe59b59 100644 --- a/drivers/gpu/drm/xe/xe_shrinker.c +++ b/drivers/gpu/drm/xe/xe_shrinker.c @@ -54,10 +54,10 @@ xe_shrinker_mod_pages(struct xe_shrinker *shrinker, long shrinkable, long purgea write_unlock(&shrinker->lock); } -static s64 xe_shrinker_walk(struct xe_device *xe, - struct ttm_operation_ctx *ctx, - const struct xe_bo_shrink_flags flags, - unsigned long to_scan, unsigned long *scanned) +static s64 __xe_shrinker_walk(struct xe_device *xe, + struct ttm_operation_ctx *ctx, + const struct xe_bo_shrink_flags flags, + unsigned long to_scan, unsigned long *scanned) { unsigned int mem_type; s64 freed = 0, lret; @@ -66,11 +66,15 @@ static s64 xe_shrinker_walk(struct xe_device *xe, struct ttm_resource_manager *man = ttm_manager_type(&xe->ttm, mem_type); struct ttm_bo_lru_cursor curs; struct ttm_buffer_object *ttm_bo; + struct ttm_lru_walk_arg arg = { + .ctx = ctx, + .trylock_only = true, + }; if (!man || !man->use_tt) continue; - ttm_bo_lru_for_each_reserved_guarded(&curs, man, ctx, ttm_bo) { + ttm_bo_lru_for_each_reserved_guarded(&curs, man, &arg, ttm_bo) { if (!ttm_bo_shrink_suitable(ttm_bo, ctx)) continue; @@ -82,6 +86,50 @@ static s64 xe_shrinker_walk(struct xe_device *xe, if (*scanned >= to_scan) break; } + /* Trylocks should never error, just fail. */ + xe_assert(xe, !IS_ERR(ttm_bo)); + } + + return freed; +} + +/* + * Try shrinking idle objects without writeback first, then if not sufficient, + * try also non-idle objects and finally if that's not sufficient either, + * add writeback. This avoids stalls and explicit writebacks with light or + * moderate memory pressure. + */ +static s64 xe_shrinker_walk(struct xe_device *xe, + struct ttm_operation_ctx *ctx, + const struct xe_bo_shrink_flags flags, + unsigned long to_scan, unsigned long *scanned) +{ + bool no_wait_gpu = true; + struct xe_bo_shrink_flags save_flags = flags; + s64 lret, freed; + + swap(no_wait_gpu, ctx->no_wait_gpu); + save_flags.writeback = false; + lret = __xe_shrinker_walk(xe, ctx, save_flags, to_scan, scanned); + swap(no_wait_gpu, ctx->no_wait_gpu); + if (lret < 0 || *scanned >= to_scan) + return lret; + + freed = lret; + if (!ctx->no_wait_gpu) { + lret = __xe_shrinker_walk(xe, ctx, save_flags, to_scan, scanned); + if (lret < 0) + return lret; + freed += lret; + if (*scanned >= to_scan) + return freed; + } + + if (flags.writeback) { + lret = __xe_shrinker_walk(xe, ctx, flags, to_scan, scanned); + if (lret < 0) + return lret; + freed += lret; } return freed; @@ -193,6 +241,7 @@ static unsigned long xe_shrinker_scan(struct shrinker *shrink, struct shrink_con runtime_pm = xe_shrinker_runtime_pm_get(shrinker, true, 0, can_backup); shrink_flags.purge = false; + lret = xe_shrinker_walk(shrinker->xe, &ctx, shrink_flags, nr_to_scan, &nr_scanned); if (lret >= 0) diff --git a/drivers/gpu/drm/xe/xe_sriov_pf.c b/drivers/gpu/drm/xe/xe_sriov_pf.c index 0f721ae17b26..27ddf3cc80e9 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf.c +++ b/drivers/gpu/drm/xe/xe_sriov_pf.c @@ -3,13 +3,18 @@ * Copyright © 2023-2024 Intel Corporation */ +#include <linux/debugfs.h> +#include <drm/drm_debugfs.h> #include <drm/drm_managed.h> #include "xe_assert.h" #include "xe_device.h" +#include "xe_gt_sriov_pf.h" #include "xe_module.h" #include "xe_sriov.h" #include "xe_sriov_pf.h" +#include "xe_sriov_pf_helpers.h" +#include "xe_sriov_pf_service.h" #include "xe_sriov_printk.h" static unsigned int wanted_max_vfs(struct xe_device *xe) @@ -80,9 +85,48 @@ bool xe_sriov_pf_readiness(struct xe_device *xe) */ int xe_sriov_pf_init_early(struct xe_device *xe) { + int err; + xe_assert(xe, IS_SRIOV_PF(xe)); - return drmm_mutex_init(&xe->drm, &xe->sriov.pf.master_lock); + xe->sriov.pf.vfs = drmm_kcalloc(&xe->drm, 1 + xe_sriov_pf_get_totalvfs(xe), + sizeof(*xe->sriov.pf.vfs), GFP_KERNEL); + if (!xe->sriov.pf.vfs) + return -ENOMEM; + + err = drmm_mutex_init(&xe->drm, &xe->sriov.pf.master_lock); + if (err) + return err; + + xe_sriov_pf_service_init(xe); + + return 0; +} + +/** + * xe_sriov_pf_wait_ready() - Wait until PF is ready to operate. + * @xe: the &xe_device to test + * + * This function can only be called on PF. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_wait_ready(struct xe_device *xe) +{ + struct xe_gt *gt; + unsigned int id; + int err; + + if (xe_device_wedged(xe)) + return -ECANCELED; + + for_each_gt(gt, xe, id) { + err = xe_gt_sriov_pf_wait_ready(gt); + if (err) + return err; + } + + return 0; } /** @@ -102,3 +146,45 @@ void xe_sriov_pf_print_vfs_summary(struct xe_device *xe, struct drm_printer *p) drm_printf(p, "supported: %u\n", xe->sriov.pf.driver_max_vfs); drm_printf(p, "enabled: %u\n", pci_num_vf(pdev)); } + +static int simple_show(struct seq_file *m, void *data) +{ + struct drm_printer p = drm_seq_file_printer(m); + struct drm_info_node *node = m->private; + struct dentry *parent = node->dent->d_parent; + struct xe_device *xe = parent->d_inode->i_private; + void (*print)(struct xe_device *, struct drm_printer *) = node->info_ent->data; + + print(xe, &p); + return 0; +} + +static const struct drm_info_list debugfs_list[] = { + { .name = "vfs", .show = simple_show, .data = xe_sriov_pf_print_vfs_summary }, + { .name = "versions", .show = simple_show, .data = xe_sriov_pf_service_print_versions }, +}; + +/** + * xe_sriov_pf_debugfs_register - Register PF debugfs attributes. + * @xe: the &xe_device + * @root: the root &dentry + * + * Prepare debugfs attributes exposed by the PF. + */ +void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root) +{ + struct drm_minor *minor = xe->drm.primary; + struct dentry *parent; + + /* + * /sys/kernel/debug/dri/0/ + * ├── pf + * │  ├── ... + */ + parent = debugfs_create_dir("pf", root); + if (IS_ERR(parent)) + return; + parent->d_inode->i_private = xe; + + drm_debugfs_create_files(debugfs_list, ARRAY_SIZE(debugfs_list), parent, minor); +} diff --git a/drivers/gpu/drm/xe/xe_sriov_pf.h b/drivers/gpu/drm/xe/xe_sriov_pf.h index d1220e70e1c0..e3b34f8f5e04 100644 --- a/drivers/gpu/drm/xe/xe_sriov_pf.h +++ b/drivers/gpu/drm/xe/xe_sriov_pf.h @@ -8,12 +8,15 @@ #include <linux/types.h> +struct dentry; struct drm_printer; struct xe_device; #ifdef CONFIG_PCI_IOV bool xe_sriov_pf_readiness(struct xe_device *xe); int xe_sriov_pf_init_early(struct xe_device *xe); +int xe_sriov_pf_wait_ready(struct xe_device *xe); +void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root); void xe_sriov_pf_print_vfs_summary(struct xe_device *xe, struct drm_printer *p); #else static inline bool xe_sriov_pf_readiness(struct xe_device *xe) @@ -25,6 +28,10 @@ static inline int xe_sriov_pf_init_early(struct xe_device *xe) { return 0; } + +static inline void xe_sriov_pf_debugfs_register(struct xe_device *xe, struct dentry *root) +{ +} #endif #endif diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_service.c b/drivers/gpu/drm/xe/xe_sriov_pf_service.c new file mode 100644 index 000000000000..eee3b2a1ba41 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_sriov_pf_service.c @@ -0,0 +1,216 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2023-2025 Intel Corporation + */ + +#include "abi/guc_relay_actions_abi.h" + +#include "xe_device_types.h" +#include "xe_sriov.h" +#include "xe_sriov_pf_helpers.h" +#include "xe_sriov_printk.h" + +#include "xe_sriov_pf_service.h" +#include "xe_sriov_pf_service_types.h" + +/** + * xe_sriov_pf_service_init - Early initialization of the SR-IOV PF service. + * @xe: the &xe_device to initialize + * + * Performs early initialization of the SR-IOV PF service. + * + * This function can only be called on PF. + */ +void xe_sriov_pf_service_init(struct xe_device *xe) +{ + BUILD_BUG_ON(!GUC_RELAY_VERSION_BASE_MAJOR && !GUC_RELAY_VERSION_BASE_MINOR); + BUILD_BUG_ON(GUC_RELAY_VERSION_BASE_MAJOR > GUC_RELAY_VERSION_LATEST_MAJOR); + + xe_assert(xe, IS_SRIOV_PF(xe)); + + /* base versions may differ between platforms */ + xe->sriov.pf.service.version.base.major = GUC_RELAY_VERSION_BASE_MAJOR; + xe->sriov.pf.service.version.base.minor = GUC_RELAY_VERSION_BASE_MINOR; + + /* latest version is same for all platforms */ + xe->sriov.pf.service.version.latest.major = GUC_RELAY_VERSION_LATEST_MAJOR; + xe->sriov.pf.service.version.latest.minor = GUC_RELAY_VERSION_LATEST_MINOR; +} + +/* Return: 0 on success or a negative error code on failure. */ +static int pf_negotiate_version(struct xe_device *xe, + u32 wanted_major, u32 wanted_minor, + u32 *major, u32 *minor) +{ + struct xe_sriov_pf_service_version base = xe->sriov.pf.service.version.base; + struct xe_sriov_pf_service_version latest = xe->sriov.pf.service.version.latest; + + xe_assert(xe, IS_SRIOV_PF(xe)); + xe_assert(xe, base.major); + xe_assert(xe, base.major <= latest.major); + xe_assert(xe, (base.major < latest.major) || (base.minor <= latest.minor)); + + /* VF doesn't care - return our latest */ + if (wanted_major == VF2PF_HANDSHAKE_MAJOR_ANY && + wanted_minor == VF2PF_HANDSHAKE_MINOR_ANY) { + *major = latest.major; + *minor = latest.minor; + return 0; + } + + /* VF wants newer than our - return our latest */ + if (wanted_major > latest.major) { + *major = latest.major; + *minor = latest.minor; + return 0; + } + + /* VF wants older than min required - reject */ + if (wanted_major < base.major || + (wanted_major == base.major && wanted_minor < base.minor)) { + return -EPERM; + } + + /* previous major - return wanted, as we should still support it */ + if (wanted_major < latest.major) { + /* XXX: we are not prepared for multi-versions yet */ + xe_assert(xe, base.major == latest.major); + return -ENOPKG; + } + + /* same major - return common minor */ + *major = wanted_major; + *minor = min_t(u32, latest.minor, wanted_minor); + return 0; +} + +static void pf_connect(struct xe_device *xe, u32 vfid, u32 major, u32 minor) +{ + xe_sriov_pf_assert_vfid(xe, vfid); + xe_assert(xe, major || minor); + + xe->sriov.pf.vfs[vfid].version.major = major; + xe->sriov.pf.vfs[vfid].version.minor = minor; +} + +static void pf_disconnect(struct xe_device *xe, u32 vfid) +{ + xe_sriov_pf_assert_vfid(xe, vfid); + + xe->sriov.pf.vfs[vfid].version.major = 0; + xe->sriov.pf.vfs[vfid].version.minor = 0; +} + +/** + * xe_sriov_pf_service_is_negotiated - Check if VF has negotiated given ABI version. + * @xe: the &xe_device + * @vfid: the VF identifier + * @major: the major version to check + * @minor: the minor version to check + * + * Performs early initialization of the SR-IOV PF service. + * + * This function can only be called on PF. + * + * Returns: true if VF can use given ABI version functionality. + */ +bool xe_sriov_pf_service_is_negotiated(struct xe_device *xe, u32 vfid, u32 major, u32 minor) +{ + xe_sriov_pf_assert_vfid(xe, vfid); + + return major == xe->sriov.pf.vfs[vfid].version.major && + minor <= xe->sriov.pf.vfs[vfid].version.minor; +} + +/** + * xe_sriov_pf_service_handshake_vf - Confirm a connection with the VF. + * @xe: the &xe_device + * @vfid: the VF identifier + * @wanted_major: the major service version expected by the VF + * @wanted_minor: the minor service version expected by the VF + * @major: the major service version to be used by the VF + * @minor: the minor service version to be used by the VF + * + * Negotiate a VF/PF ABI version to allow VF use the PF services. + * + * This function can only be called on PF. + * + * Return: 0 on success or a negative error code on failure. + */ +int xe_sriov_pf_service_handshake_vf(struct xe_device *xe, u32 vfid, + u32 wanted_major, u32 wanted_minor, + u32 *major, u32 *minor) +{ + int err; + + xe_sriov_dbg_verbose(xe, "VF%u wants ABI version %u.%u\n", + vfid, wanted_major, wanted_minor); + + err = pf_negotiate_version(xe, wanted_major, wanted_minor, major, minor); + + if (err < 0) { + xe_sriov_notice(xe, "VF%u failed to negotiate ABI %u.%u (%pe)\n", + vfid, wanted_major, wanted_minor, ERR_PTR(err)); + pf_disconnect(xe, vfid); + } else { + xe_sriov_dbg(xe, "VF%u negotiated ABI version %u.%u\n", + vfid, *major, *minor); + pf_connect(xe, vfid, *major, *minor); + } + + return err; +} + +/** + * xe_sriov_pf_service_reset_vf - Reset a connection with the VF. + * @xe: the &xe_device + * @vfid: the VF identifier + * + * Reset a VF driver negotiated VF/PF ABI version. + * + * After that point, the VF driver will have to perform new version handshake + * to continue use of the PF services again. + * + * This function can only be called on PF. + */ +void xe_sriov_pf_service_reset_vf(struct xe_device *xe, unsigned int vfid) +{ + pf_disconnect(xe, vfid); +} + +static void print_pf_version(struct drm_printer *p, const char *name, + const struct xe_sriov_pf_service_version *version) +{ + drm_printf(p, "%s:\t%u.%u\n", name, version->major, version->minor); +} + +/** + * xe_sriov_pf_service_print_versions - Print ABI versions negotiated with VFs. + * @xe: the &xe_device + * @p: the &drm_printer + * + * This function is for PF use only. + */ +void xe_sriov_pf_service_print_versions(struct xe_device *xe, struct drm_printer *p) +{ + unsigned int n, total_vfs = xe_sriov_pf_get_totalvfs(xe); + struct xe_sriov_pf_service_version *version; + char name[8]; + + xe_assert(xe, IS_SRIOV_PF(xe)); + + print_pf_version(p, "base", &xe->sriov.pf.service.version.base); + print_pf_version(p, "latest", &xe->sriov.pf.service.version.latest); + + for (n = 1; n <= total_vfs; n++) { + version = &xe->sriov.pf.vfs[n].version; + if (!version->major && !version->minor) + continue; + + print_pf_version(p, xe_sriov_function_name(n, name, sizeof(name)), version); + } +} + +#if IS_BUILTIN(CONFIG_DRM_XE_KUNIT_TEST) +#include "tests/xe_sriov_pf_service_kunit.c" +#endif diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_service.h b/drivers/gpu/drm/xe/xe_sriov_pf_service.h new file mode 100644 index 000000000000..d38c18f5ed10 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_sriov_pf_service.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2025 Intel Corporation + */ + +#ifndef _XE_SRIOV_PF_SERVICE_H_ +#define _XE_SRIOV_PF_SERVICE_H_ + +#include <linux/types.h> + +struct drm_printer; +struct xe_device; + +void xe_sriov_pf_service_init(struct xe_device *xe); +void xe_sriov_pf_service_print_versions(struct xe_device *xe, struct drm_printer *p); + +int xe_sriov_pf_service_handshake_vf(struct xe_device *xe, u32 vfid, + u32 wanted_major, u32 wanted_minor, + u32 *major, u32 *minor); +bool xe_sriov_pf_service_is_negotiated(struct xe_device *xe, u32 vfid, u32 major, u32 minor); +void xe_sriov_pf_service_reset_vf(struct xe_device *xe, unsigned int vfid); + +#endif diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_service_types.h b/drivers/gpu/drm/xe/xe_sriov_pf_service_types.h new file mode 100644 index 000000000000..0835dde358c1 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_sriov_pf_service_types.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023-2025 Intel Corporation + */ + +#ifndef _XE_SRIOV_PF_SERVICE_TYPES_H_ +#define _XE_SRIOV_PF_SERVICE_TYPES_H_ + +#include <linux/types.h> + +/** + * struct xe_sriov_pf_service_version - VF/PF ABI Version. + * @major: the major version of the VF/PF ABI + * @minor: the minor version of the VF/PF ABI + * + * See `GuC Relay Communication`_. + */ +struct xe_sriov_pf_service_version { + u16 major; + u16 minor; +}; + +/** + * struct xe_sriov_pf_service - Data used by the PF service. + * @version: information about VF/PF ABI versions for current platform. + * @version.base: lowest VF/PF ABI version that could be negotiated with VF. + * @version.latest: latest VF/PF ABI version supported by the PF driver. + */ +struct xe_sriov_pf_service { + struct { + struct xe_sriov_pf_service_version base; + struct xe_sriov_pf_service_version latest; + } version; +}; + +#endif diff --git a/drivers/gpu/drm/xe/xe_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_sriov_pf_types.h new file mode 100644 index 000000000000..956a88f9f213 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_sriov_pf_types.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023-2025 Intel Corporation + */ + +#ifndef _XE_SRIOV_PF_TYPES_H_ +#define _XE_SRIOV_PF_TYPES_H_ + +#include <linux/mutex.h> +#include <linux/types.h> + +#include "xe_sriov_pf_service_types.h" + +/** + * struct xe_sriov_metadata - per-VF device level metadata + */ +struct xe_sriov_metadata { + /** @version: negotiated VF/PF ABI version */ + struct xe_sriov_pf_service_version version; +}; + +/** + * struct xe_device_pf - Xe PF related data + * + * The data in this structure is valid only if driver is running in the + * @XE_SRIOV_MODE_PF mode. + */ +struct xe_device_pf { + /** @device_total_vfs: Maximum number of VFs supported by the device. */ + u16 device_total_vfs; + + /** @driver_max_vfs: Maximum number of VFs supported by the driver. */ + u16 driver_max_vfs; + + /** @master_lock: protects all VFs configurations across GTs */ + struct mutex master_lock; + + /** @service: device level service data. */ + struct xe_sriov_pf_service service; + + /** @vfs: metadata for all VFs. */ + struct xe_sriov_metadata *vfs; +}; + +#endif diff --git a/drivers/gpu/drm/xe/xe_sriov_types.h b/drivers/gpu/drm/xe/xe_sriov_types.h index ca94382a721e..1a138108d139 100644 --- a/drivers/gpu/drm/xe/xe_sriov_types.h +++ b/drivers/gpu/drm/xe/xe_sriov_types.h @@ -7,9 +7,6 @@ #define _XE_SRIOV_TYPES_H_ #include <linux/build_bug.h> -#include <linux/mutex.h> -#include <linux/types.h> -#include <linux/workqueue_types.h> /** * VFID - Virtual Function Identifier @@ -40,37 +37,4 @@ enum xe_sriov_mode { }; static_assert(XE_SRIOV_MODE_NONE); -/** - * struct xe_device_pf - Xe PF related data - * - * The data in this structure is valid only if driver is running in the - * @XE_SRIOV_MODE_PF mode. - */ -struct xe_device_pf { - /** @device_total_vfs: Maximum number of VFs supported by the device. */ - u16 device_total_vfs; - - /** @driver_max_vfs: Maximum number of VFs supported by the driver. */ - u16 driver_max_vfs; - - /** @master_lock: protects all VFs configurations across GTs */ - struct mutex master_lock; -}; - -/** - * struct xe_device_vf - Xe Virtual Function related data - * - * The data in this structure is valid only if driver is running in the - * @XE_SRIOV_MODE_VF mode. - */ -struct xe_device_vf { - /** @migration: VF Migration state data */ - struct { - /** @migration.worker: VF migration recovery worker */ - struct work_struct worker; - /** @migration.gt_flags: Per-GT request flags for VF migration recovery */ - unsigned long gt_flags; - } migration; -}; - #endif diff --git a/drivers/gpu/drm/xe/xe_sriov_vf.c b/drivers/gpu/drm/xe/xe_sriov_vf.c index 6526fe450e55..26e243c28994 100644 --- a/drivers/gpu/drm/xe/xe_sriov_vf.c +++ b/drivers/gpu/drm/xe/xe_sriov_vf.c @@ -147,127 +147,113 @@ void xe_sriov_vf_init_early(struct xe_device *xe) xe_sriov_info(xe, "migration not supported by this module version\n"); } -/** - * vf_post_migration_requery_guc - Re-query GuC for current VF provisioning. +static bool gt_vf_post_migration_needed(struct xe_gt *gt) +{ + return test_bit(gt->info.id, >_to_xe(gt)->sriov.vf.migration.gt_flags); +} + +/* + * Notify GuCs marked in flags about resource fixups apply finished. * @xe: the &xe_device struct instance - * - * After migration, we need to re-query all VF configuration to make sure - * they match previous provisioning. Note that most of VF provisioning - * shall be the same, except GGTT range, since GGTT is not virtualized per-VF. - * - * Returns: 0 if the operation completed successfully, or a negative error - * code otherwise. + * @gt_flags: flags marking to which GTs the notification shall be sent */ -static int vf_post_migration_requery_guc(struct xe_device *xe) +static int vf_post_migration_notify_resfix_done(struct xe_device *xe, unsigned long gt_flags) { struct xe_gt *gt; unsigned int id; - int err, ret = 0; + int err = 0; for_each_gt(gt, xe, id) { - err = xe_gt_sriov_vf_query_config(gt); - ret = ret ?: err; + if (!test_bit(id, >_flags)) + continue; + /* skip asking GuC for RESFIX exit if new recovery request arrived */ + if (gt_vf_post_migration_needed(gt)) + continue; + err = xe_gt_sriov_vf_notify_resfix_done(gt); + if (err) + break; + clear_bit(id, >_flags); } - return ret; + if (gt_flags && !err) + drm_dbg(&xe->drm, "another recovery imminent, skipped some notifications\n"); + return err; } -static void vf_post_migration_fixup_ctb(struct xe_device *xe) +static int vf_get_next_migrated_gt_id(struct xe_device *xe) { struct xe_gt *gt; unsigned int id; - xe_assert(xe, IS_SRIOV_VF(xe)); - for_each_gt(gt, xe, id) { - s32 shift = xe_gt_sriov_vf_ggtt_shift(gt); - - xe_guc_ct_fixup_messages_with_ggtt(>->uc.guc.ct, shift); + if (test_and_clear_bit(id, &xe->sriov.vf.migration.gt_flags)) + return id; } + return -1; } -/* - * vf_post_migration_imminent - Check if post-restore recovery is coming. - * @xe: the &xe_device struct instance +/** + * Perform post-migration fixups on a single GT. * - * Return: True if migration recovery worker will soon be running. Any worker currently - * executing does not affect the result. + * After migration, GuC needs to be re-queried for VF configuration to check + * if it matches previous provisioning. Most of VF provisioning shall be the + * same, except GGTT range, since GGTT is not virtualized per-VF. If GGTT + * range has changed, we have to perform fixups - shift all GGTT references + * used anywhere within the driver. After the fixups in this function succeed, + * it is allowed to ask the GuC bound to this GT to continue normal operation. + * + * Returns: 0 if the operation completed successfully, or a negative error + * code otherwise. */ -static bool vf_post_migration_imminent(struct xe_device *xe) +static int gt_vf_post_migration_fixups(struct xe_gt *gt) { - return xe->sriov.vf.migration.gt_flags != 0 || - work_pending(&xe->sriov.vf.migration.worker); -} - -static bool vf_post_migration_fixup_ggtt_nodes(struct xe_device *xe) -{ - bool need_fixups = false; - struct xe_tile *tile; - unsigned int id; - - for_each_tile(tile, xe, id) { - struct xe_gt *gt = tile->primary_gt; - s64 shift; - - shift = xe_gt_sriov_vf_ggtt_shift(gt); - if (shift) { - need_fixups = true; - xe_tile_sriov_vf_fixup_ggtt_nodes(tile, shift); - } - } - return need_fixups; -} + s64 shift; + int err; -/* - * Notify all GuCs about resource fixups apply finished. - */ -static void vf_post_migration_notify_resfix_done(struct xe_device *xe) -{ - struct xe_gt *gt; - unsigned int id; + err = xe_gt_sriov_vf_query_config(gt); + if (err) + return err; - for_each_gt(gt, xe, id) { - if (vf_post_migration_imminent(xe)) - goto skip; - xe_gt_sriov_vf_notify_resfix_done(gt); + shift = xe_gt_sriov_vf_ggtt_shift(gt); + if (shift) { + xe_tile_sriov_vf_fixup_ggtt_nodes(gt_to_tile(gt), shift); + /* FIXME: add the recovery steps */ + xe_guc_ct_fixup_messages_with_ggtt(>->uc.guc.ct, shift); } - return; - -skip: - drm_dbg(&xe->drm, "another recovery imminent, skipping notifications\n"); + return 0; } static void vf_post_migration_recovery(struct xe_device *xe) { - bool need_fixups; - int err; + unsigned long fixed_gts = 0; + int id, err; drm_dbg(&xe->drm, "migration recovery in progress\n"); xe_pm_runtime_get(xe); - err = vf_post_migration_requery_guc(xe); - if (vf_post_migration_imminent(xe)) - goto defer; - if (unlikely(err)) - goto fail; + if (!vf_migration_supported(xe)) { xe_sriov_err(xe, "migration not supported by this module version\n"); err = -ENOTRECOVERABLE; goto fail; } - need_fixups = vf_post_migration_fixup_ggtt_nodes(xe); - /* FIXME: add the recovery steps */ - if (need_fixups) - vf_post_migration_fixup_ctb(xe); + while (id = vf_get_next_migrated_gt_id(xe), id >= 0) { + struct xe_gt *gt = xe_device_get_gt(xe, id); + + err = gt_vf_post_migration_fixups(gt); + if (err) + goto fail; + + set_bit(id, &fixed_gts); + } + + err = vf_post_migration_notify_resfix_done(xe, fixed_gts); + if (err) + goto fail; - vf_post_migration_notify_resfix_done(xe); xe_pm_runtime_put(xe); drm_notice(&xe->drm, "migration recovery ended\n"); return; -defer: - xe_pm_runtime_put(xe); - drm_dbg(&xe->drm, "migration recovery deferred\n"); - return; fail: xe_pm_runtime_put(xe); drm_err(&xe->drm, "migration recovery failed (%pe)\n", ERR_PTR(err)); @@ -282,18 +268,23 @@ static void migration_worker_func(struct work_struct *w) vf_post_migration_recovery(xe); } -static bool vf_ready_to_recovery_on_all_gts(struct xe_device *xe) +/* + * Check if post-restore recovery is coming on any of GTs. + * @xe: the &xe_device struct instance + * + * Return: True if migration recovery worker will soon be running. Any worker currently + * executing does not affect the result. + */ +static bool vf_ready_to_recovery_on_any_gts(struct xe_device *xe) { struct xe_gt *gt; unsigned int id; for_each_gt(gt, xe, id) { - if (!test_bit(id, &xe->sriov.vf.migration.gt_flags)) { - xe_gt_sriov_dbg_verbose(gt, "still not ready to recover\n"); - return false; - } + if (test_bit(id, &xe->sriov.vf.migration.gt_flags)) + return true; } - return true; + return false; } /** @@ -308,13 +299,9 @@ void xe_sriov_vf_start_migration_recovery(struct xe_device *xe) xe_assert(xe, IS_SRIOV_VF(xe)); - if (!vf_ready_to_recovery_on_all_gts(xe)) + if (!vf_ready_to_recovery_on_any_gts(xe)) return; - WRITE_ONCE(xe->sriov.vf.migration.gt_flags, 0); - /* Ensure other threads see that no flags are set now. */ - smp_mb(); - started = queue_work(xe->sriov.wq, &xe->sriov.vf.migration.worker); drm_info(&xe->drm, "VF migration recovery %s\n", started ? "scheduled" : "already in progress"); diff --git a/drivers/gpu/drm/xe/xe_sriov_vf_types.h b/drivers/gpu/drm/xe/xe_sriov_vf_types.h new file mode 100644 index 000000000000..8300416a6226 --- /dev/null +++ b/drivers/gpu/drm/xe/xe_sriov_vf_types.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2023-2025 Intel Corporation + */ + +#ifndef _XE_SRIOV_VF_TYPES_H_ +#define _XE_SRIOV_VF_TYPES_H_ + +#include <linux/types.h> +#include <linux/workqueue_types.h> + +/** + * struct xe_sriov_vf_relay_version - PF ABI version details. + */ +struct xe_sriov_vf_relay_version { + /** @major: major version. */ + u16 major; + /** @minor: minor version. */ + u16 minor; +}; + +/** + * struct xe_device_vf - Xe Virtual Function related data + * + * The data in this structure is valid only if driver is running in the + * @XE_SRIOV_MODE_VF mode. + */ +struct xe_device_vf { + /** @pf_version: negotiated VF/PF ABI version. */ + struct xe_sriov_vf_relay_version pf_version; + + /** @migration: VF Migration state data */ + struct { + /** @migration.worker: VF migration recovery worker */ + struct work_struct worker; + /** @migration.gt_flags: Per-GT request flags for VF migration recovery */ + unsigned long gt_flags; + } migration; +}; + +#endif diff --git a/drivers/gpu/drm/xe/xe_step.c b/drivers/gpu/drm/xe/xe_step.c index c77b5c317fa0..10e88f2c9615 100644 --- a/drivers/gpu/drm/xe/xe_step.c +++ b/drivers/gpu/drm/xe/xe_step.c @@ -5,6 +5,7 @@ #include "xe_step.h" +#include <kunit/visibility.h> #include <linux/bitfield.h> #include "xe_device.h" @@ -255,3 +256,4 @@ const char *xe_step_name(enum xe_step step) return "**"; } } +EXPORT_SYMBOL_IF_KUNIT(xe_step_name); diff --git a/drivers/gpu/drm/xe/xe_survivability_mode.c b/drivers/gpu/drm/xe/xe_survivability_mode.c index 1f710b3fc599..41705f5d52e3 100644 --- a/drivers/gpu/drm/xe/xe_survivability_mode.c +++ b/drivers/gpu/drm/xe/xe_survivability_mode.c @@ -14,6 +14,7 @@ #include "xe_device.h" #include "xe_gt.h" #include "xe_heci_gsc.h" +#include "xe_i2c.h" #include "xe_mmio.h" #include "xe_pcode_api.h" #include "xe_vsec.h" @@ -173,20 +174,22 @@ static int enable_survivability_mode(struct pci_dev *pdev) survivability->mode = true; ret = xe_heci_gsc_init(xe); - if (ret) { - /* - * But if it fails, device can't enter survivability - * so move it back for correct error handling - */ - survivability->mode = false; - return ret; - } + if (ret) + goto err; xe_vsec_init(xe); + ret = xe_i2c_probe(xe); + if (ret) + goto err; + dev_err(dev, "In Survivability Mode\n"); return 0; + +err: + survivability->mode = false; + return ret; } /** diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c index 26418e9bdff0..a7ff5975873f 100644 --- a/drivers/gpu/drm/xe/xe_svm.c +++ b/drivers/gpu/drm/xe/xe_svm.c @@ -3,13 +3,17 @@ * Copyright © 2024 Intel Corporation */ +#include <drm/drm_drv.h> + #include "xe_bo.h" #include "xe_gt_stats.h" #include "xe_gt_tlb_invalidation.h" #include "xe_migrate.h" #include "xe_module.h" +#include "xe_pm.h" #include "xe_pt.h" #include "xe_svm.h" +#include "xe_tile.h" #include "xe_ttm_vram_mgr.h" #include "xe_vm.h" #include "xe_vm_types.h" @@ -295,7 +299,7 @@ static void xe_svm_garbage_collector_work_func(struct work_struct *w) up_write(&vm->lock); } -#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) +#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) static struct xe_vram_region *page_to_vr(struct page *page) { @@ -483,16 +487,18 @@ static int xe_svm_copy_to_ram(struct page **pages, dma_addr_t *dma_addr, return xe_svm_copy(pages, dma_addr, npages, XE_SVM_COPY_TO_SRAM); } -static struct xe_bo *to_xe_bo(struct drm_gpusvm_devmem *devmem_allocation) +static struct xe_bo *to_xe_bo(struct drm_pagemap_devmem *devmem_allocation) { return container_of(devmem_allocation, struct xe_bo, devmem_allocation); } -static void xe_svm_devmem_release(struct drm_gpusvm_devmem *devmem_allocation) +static void xe_svm_devmem_release(struct drm_pagemap_devmem *devmem_allocation) { struct xe_bo *bo = to_xe_bo(devmem_allocation); + struct xe_device *xe = xe_bo_device(bo); xe_bo_put_async(bo); + xe_pm_runtime_put(xe); } static u64 block_offset_to_pfn(struct xe_vram_region *vr, u64 offset) @@ -505,7 +511,7 @@ static struct drm_buddy *tile_to_buddy(struct xe_tile *tile) return &tile->mem.vram.ttm.mm; } -static int xe_svm_populate_devmem_pfn(struct drm_gpusvm_devmem *devmem_allocation, +static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocation, unsigned long npages, unsigned long *pfn) { struct xe_bo *bo = to_xe_bo(devmem_allocation); @@ -528,7 +534,7 @@ static int xe_svm_populate_devmem_pfn(struct drm_gpusvm_devmem *devmem_allocatio return 0; } -static const struct drm_gpusvm_devmem_ops gpusvm_devmem_ops = { +static const struct drm_pagemap_devmem_ops dpagemap_devmem_ops = { .devmem_release = xe_svm_devmem_release, .populate_devmem_pfn = xe_svm_populate_devmem_pfn, .copy_to_devmem = xe_svm_copy_to_devmem, @@ -676,75 +682,69 @@ u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 start, u64 end, struct xe_vma *v min(end, xe_vma_end(vma))); } -#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) +#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) static struct xe_vram_region *tile_to_vr(struct xe_tile *tile) { return &tile->mem.vram; } -/** - * xe_svm_alloc_vram()- Allocate device memory pages for range, - * migrating existing data. - * @vm: The VM. - * @tile: tile to allocate vram from - * @range: SVM range - * @ctx: DRM GPU SVM context - * - * Return: 0 on success, error code on failure. - */ -int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile, - struct xe_svm_range *range, - const struct drm_gpusvm_ctx *ctx) +static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap, + unsigned long start, unsigned long end, + struct mm_struct *mm, + unsigned long timeslice_ms) { - struct mm_struct *mm = vm->svm.gpusvm.mm; + struct xe_tile *tile = container_of(dpagemap, typeof(*tile), mem.vram.dpagemap); + struct xe_device *xe = tile_to_xe(tile); + struct device *dev = xe->drm.dev; struct xe_vram_region *vr = tile_to_vr(tile); struct drm_buddy_block *block; struct list_head *blocks; struct xe_bo *bo; - ktime_t end = 0; - int err; + ktime_t time_end = 0; + int err, idx; - range_debug(range, "ALLOCATE VRAM"); + if (!drm_dev_enter(&xe->drm, &idx)) + return -ENODEV; - if (!mmget_not_zero(mm)) - return -EFAULT; - mmap_read_lock(mm); + xe_pm_runtime_get(xe); -retry: - bo = xe_bo_create_locked(tile_to_xe(tile), NULL, NULL, - xe_svm_range_size(range), + retry: + bo = xe_bo_create_locked(tile_to_xe(tile), NULL, NULL, end - start, ttm_bo_type_device, XE_BO_FLAG_VRAM_IF_DGFX(tile) | XE_BO_FLAG_CPU_ADDR_MIRROR); if (IS_ERR(bo)) { err = PTR_ERR(bo); - if (xe_vm_validate_should_retry(NULL, err, &end)) + if (xe_vm_validate_should_retry(NULL, err, &time_end)) goto retry; - goto unlock; + goto out_pm_put; } - drm_gpusvm_devmem_init(&bo->devmem_allocation, - vm->xe->drm.dev, mm, - &gpusvm_devmem_ops, - &tile->mem.vram.dpagemap, - xe_svm_range_size(range)); + drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm, + &dpagemap_devmem_ops, + &tile->mem.vram.dpagemap, + end - start); blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks; list_for_each_entry(block, blocks, link) block->private = vr; xe_bo_get(bo); - err = drm_gpusvm_migrate_to_devmem(&vm->svm.gpusvm, &range->base, - &bo->devmem_allocation, ctx); + + /* Ensure the device has a pm ref while there are device pages active. */ + xe_pm_runtime_get_noresume(xe); + err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, mm, + start, end, timeslice_ms, + xe_svm_devm_owner(xe)); if (err) xe_svm_devmem_release(&bo->devmem_allocation); xe_bo_unlock(bo); xe_bo_put(bo); -unlock: - mmap_read_unlock(mm); - mmput(mm); +out_pm_put: + xe_pm_runtime_put(xe); + drm_dev_exit(idx); return err; } @@ -810,13 +810,13 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma, struct drm_gpusvm_ctx ctx = { .read_only = xe_vma_read_only(vma), .devmem_possible = IS_DGFX(vm->xe) && - IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR), + IS_ENABLED(CONFIG_DRM_XE_PAGEMAP), .check_pages_threshold = IS_DGFX(vm->xe) && - IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? SZ_64K : 0, + IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ? SZ_64K : 0, .devmem_only = atomic && IS_DGFX(vm->xe) && - IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR), + IS_ENABLED(CONFIG_DRM_XE_PAGEMAP), .timeslice_ms = atomic && IS_DGFX(vm->xe) && - IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) ? + IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ? vm->xe->atomic_svm_timeslice_ms : 0, }; struct xe_svm_range *range; @@ -852,7 +852,7 @@ retry: if (--migrate_try_count >= 0 && xe_svm_range_needs_migrate_to_vram(range, vma, IS_DGFX(vm->xe))) { - err = xe_svm_alloc_vram(vm, tile, range, &ctx); + err = xe_svm_alloc_vram(tile, range, &ctx); ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */ if (err) { if (migrate_try_count || !ctx.devmem_only) { @@ -944,7 +944,7 @@ bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end) */ int xe_svm_bo_evict(struct xe_bo *bo) { - return drm_gpusvm_evict_to_ram(&bo->devmem_allocation); + return drm_pagemap_evict_to_ram(&bo->devmem_allocation); } /** @@ -997,7 +997,31 @@ int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range, return err; } -#if IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR) +#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) + +/** + * xe_svm_alloc_vram()- Allocate device memory pages for range, + * migrating existing data. + * @tile: tile to allocate vram from + * @range: SVM range + * @ctx: DRM GPU SVM context + * + * Return: 0 on success, error code on failure. + */ +int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range, + const struct drm_gpusvm_ctx *ctx) +{ + struct drm_pagemap *dpagemap; + + xe_assert(tile_to_xe(tile), range->base.flags.migrate_devmem); + range_debug(range, "ALLOCATE VRAM"); + + dpagemap = xe_tile_local_pagemap(tile); + return drm_pagemap_populate_mm(dpagemap, xe_svm_range_start(range), + xe_svm_range_end(range), + range->base.gpusvm->mm, + ctx->timeslice_ms); +} static struct drm_pagemap_device_addr xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap, @@ -1023,6 +1047,7 @@ xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap, static const struct drm_pagemap_ops xe_drm_pagemap_ops = { .device_map = xe_drm_pagemap_device_map, + .populate_mm = xe_drm_pagemap_populate_mm, }; /** @@ -1054,7 +1079,7 @@ int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr) vr->pagemap.range.start = res->start; vr->pagemap.range.end = res->end; vr->pagemap.nr_range = 1; - vr->pagemap.ops = drm_gpusvm_pagemap_ops_get(); + vr->pagemap.ops = drm_pagemap_pagemap_ops_get(); vr->pagemap.owner = xe_svm_devm_owner(xe); addr = devm_memremap_pages(dev, &vr->pagemap); @@ -1075,7 +1100,7 @@ int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr) return 0; } #else -int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile, +int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range, const struct drm_gpusvm_ctx *ctx) { diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h index 19ce4f2754a7..da9a69ea0bb1 100644 --- a/drivers/gpu/drm/xe/xe_svm.h +++ b/drivers/gpu/drm/xe/xe_svm.h @@ -70,8 +70,7 @@ int xe_svm_bo_evict(struct xe_bo *bo); void xe_svm_range_debug(struct xe_svm_range *range, const char *operation); -int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile, - struct xe_svm_range *range, +int xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range, const struct drm_gpusvm_ctx *ctx); struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr, @@ -237,10 +236,9 @@ void xe_svm_range_debug(struct xe_svm_range *range, const char *operation) { } -static inline -int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile, - struct xe_svm_range *range, - const struct drm_gpusvm_ctx *ctx) +static inline int +xe_svm_alloc_vram(struct xe_tile *tile, struct xe_svm_range *range, + const struct drm_gpusvm_ctx *ctx) { return -EOPNOTSUPP; } diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c index f87276df18f2..82872a51f098 100644 --- a/drivers/gpu/drm/xe/xe_sync.c +++ b/drivers/gpu/drm/xe/xe_sync.c @@ -77,6 +77,7 @@ static void user_fence_worker(struct work_struct *w) { struct xe_user_fence *ufence = container_of(w, struct xe_user_fence, worker); + WRITE_ONCE(ufence->signalled, 1); if (mmget_not_zero(ufence->mm)) { kthread_use_mm(ufence->mm); if (copy_to_user(ufence->addr, &ufence->value, sizeof(ufence->value))) @@ -91,7 +92,6 @@ static void user_fence_worker(struct work_struct *w) * Wake up waiters only after updating the ufence state, allowing the UMD * to safely reuse the same ufence without encountering -EBUSY errors. */ - WRITE_ONCE(ufence->signalled, 1); wake_up_all(&ufence->xe->ufence_wq); user_fence_put(ufence); } diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c index 672faa0b67f1..86e9811e60ba 100644 --- a/drivers/gpu/drm/xe/xe_tile.c +++ b/drivers/gpu/drm/xe/xe_tile.c @@ -10,6 +10,7 @@ #include "xe_device.h" #include "xe_ggtt.h" #include "xe_gt.h" +#include "xe_memirq.h" #include "xe_migrate.h" #include "xe_pcode.h" #include "xe_sa.h" @@ -174,6 +175,12 @@ int xe_tile_init_noalloc(struct xe_tile *tile) int xe_tile_init(struct xe_tile *tile) { + int err; + + err = xe_memirq_init(&tile->memirq); + if (err) + return err; + tile->mem.kernel_bb_pool = xe_sa_bo_manager_init(tile, SZ_1M, 16); if (IS_ERR(tile->mem.kernel_bb_pool)) return PTR_ERR(tile->mem.kernel_bb_pool); diff --git a/drivers/gpu/drm/xe/xe_tile.h b/drivers/gpu/drm/xe/xe_tile.h index eb939316d55b..cc33e8733983 100644 --- a/drivers/gpu/drm/xe/xe_tile.h +++ b/drivers/gpu/drm/xe/xe_tile.h @@ -16,4 +16,21 @@ int xe_tile_init(struct xe_tile *tile); void xe_tile_migrate_wait(struct xe_tile *tile); +#if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) +static inline struct drm_pagemap *xe_tile_local_pagemap(struct xe_tile *tile) +{ + return &tile->mem.vram.dpagemap; +} +#else +static inline struct drm_pagemap *xe_tile_local_pagemap(struct xe_tile *tile) +{ + return NULL; +} +#endif + +static inline bool xe_tile_is_root(struct xe_tile *tile) +{ + return tile->id == 0; +} + #endif diff --git a/drivers/gpu/drm/xe/xe_trace_bo.h b/drivers/gpu/drm/xe/xe_trace_bo.h index ccebd5f0878e..86323cf3be2c 100644 --- a/drivers/gpu/drm/xe/xe_trace_bo.h +++ b/drivers/gpu/drm/xe/xe_trace_bo.h @@ -33,7 +33,7 @@ DECLARE_EVENT_CLASS(xe_bo, TP_fast_assign( __assign_str(dev); - __entry->size = bo->size; + __entry->size = xe_bo_size(bo); __entry->flags = bo->flags; __entry->vm = bo->vm; ), @@ -73,7 +73,7 @@ TRACE_EVENT(xe_bo_move, TP_fast_assign( __entry->bo = bo; - __entry->size = bo->size; + __entry->size = xe_bo_size(bo); __assign_str(new_placement_name); __assign_str(old_placement_name); __assign_str(device_id); diff --git a/drivers/gpu/drm/xe/xe_uc.c b/drivers/gpu/drm/xe/xe_uc.c index 3a8751a8b92d..465bda355443 100644 --- a/drivers/gpu/drm/xe/xe_uc.c +++ b/drivers/gpu/drm/xe/xe_uc.c @@ -33,6 +33,22 @@ uc_to_xe(struct xe_uc *uc) } /* Should be called once at driver load only */ +int xe_uc_init_noalloc(struct xe_uc *uc) +{ + int ret; + + ret = xe_guc_init_noalloc(&uc->guc); + if (ret) + goto err; + + /* HuC and GSC have no early dependencies and will be initialized during xe_uc_init(). */ + return 0; + +err: + xe_gt_err(uc_to_gt(uc), "Failed to early initialize uC (%pe)\n", ERR_PTR(ret)); + return ret; +} + int xe_uc_init(struct xe_uc *uc) { int ret; @@ -56,15 +72,17 @@ int xe_uc_init(struct xe_uc *uc) if (!xe_device_uc_enabled(uc_to_xe(uc))) return 0; - if (IS_SRIOV_VF(uc_to_xe(uc))) - return 0; + if (!IS_SRIOV_VF(uc_to_xe(uc))) { + ret = xe_wopcm_init(&uc->wopcm); + if (ret) + goto err; + } - ret = xe_wopcm_init(&uc->wopcm); + ret = xe_guc_min_load_for_hwconfig(&uc->guc); if (ret) goto err; return 0; - err: xe_gt_err(uc_to_gt(uc), "Failed to initialize uC (%pe)\n", ERR_PTR(ret)); return ret; @@ -126,28 +144,7 @@ int xe_uc_sanitize_reset(struct xe_uc *uc) return uc_reset(uc); } -/** - * xe_uc_init_hwconfig - minimally init Uc, read and parse hwconfig - * @uc: The UC object - * - * Return: 0 on success, negative error code on error. - */ -int xe_uc_init_hwconfig(struct xe_uc *uc) -{ - int ret; - - /* GuC submission not enabled, nothing to do */ - if (!xe_device_uc_enabled(uc_to_xe(uc))) - return 0; - - ret = xe_guc_min_load_for_hwconfig(&uc->guc); - if (ret) - return ret; - - return 0; -} - -static int vf_uc_init_hw(struct xe_uc *uc) +static int vf_uc_load_hw(struct xe_uc *uc) { int err; @@ -161,22 +158,30 @@ static int vf_uc_init_hw(struct xe_uc *uc) err = xe_gt_sriov_vf_connect(uc_to_gt(uc)); if (err) - return err; + goto err_out; uc->guc.submission_state.enabled = true; + err = xe_guc_opt_in_features_enable(&uc->guc); + if (err) + goto err_out; + err = xe_gt_record_default_lrcs(uc_to_gt(uc)); if (err) - return err; + goto err_out; return 0; + +err_out: + xe_guc_sanitize(&uc->guc); + return err; } /* * Should be called during driver load, after every GT reset, and after every * suspend to reload / auth the firmwares. */ -int xe_uc_init_hw(struct xe_uc *uc) +int xe_uc_load_hw(struct xe_uc *uc) { int ret; @@ -185,7 +190,7 @@ int xe_uc_init_hw(struct xe_uc *uc) return 0; if (IS_SRIOV_VF(uc_to_xe(uc))) - return vf_uc_init_hw(uc); + return vf_uc_load_hw(uc); ret = xe_huc_upload(&uc->huc); if (ret) @@ -201,15 +206,15 @@ int xe_uc_init_hw(struct xe_uc *uc) ret = xe_gt_record_default_lrcs(uc_to_gt(uc)); if (ret) - return ret; + goto err_out; ret = xe_guc_post_load_init(&uc->guc); if (ret) - return ret; + goto err_out; ret = xe_guc_pc_start(&uc->guc.pc); if (ret) - return ret; + goto err_out; xe_guc_engine_activity_enable_stats(&uc->guc); @@ -221,11 +226,10 @@ int xe_uc_init_hw(struct xe_uc *uc) xe_gsc_load_start(&uc->gsc); return 0; -} -int xe_uc_fini_hw(struct xe_uc *uc) -{ - return xe_uc_sanitize_reset(uc); +err_out: + xe_guc_sanitize(&uc->guc); + return ret; } int xe_uc_reset_prepare(struct xe_uc *uc) diff --git a/drivers/gpu/drm/xe/xe_uc.h b/drivers/gpu/drm/xe/xe_uc.h index c23e6f5e2514..21c9306098cf 100644 --- a/drivers/gpu/drm/xe/xe_uc.h +++ b/drivers/gpu/drm/xe/xe_uc.h @@ -8,11 +8,10 @@ struct xe_uc; +int xe_uc_init_noalloc(struct xe_uc *uc); int xe_uc_init(struct xe_uc *uc); -int xe_uc_init_hwconfig(struct xe_uc *uc); int xe_uc_init_post_hwconfig(struct xe_uc *uc); -int xe_uc_init_hw(struct xe_uc *uc); -int xe_uc_fini_hw(struct xe_uc *uc); +int xe_uc_load_hw(struct xe_uc *uc); void xe_uc_gucrc_disable(struct xe_uc *uc); int xe_uc_reset_prepare(struct xe_uc *uc); void xe_uc_stop_prepare(struct xe_uc *uc); diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c index 6d0869518652..9bbdde604923 100644 --- a/drivers/gpu/drm/xe/xe_uc_fw.c +++ b/drivers/gpu/drm/xe/xe_uc_fw.c @@ -115,10 +115,11 @@ struct fw_blobs_by_type { #define XE_GT_TYPE_ANY XE_GT_TYPE_UNINITIALIZED #define XE_GUC_FIRMWARE_DEFS(fw_def, mmp_ver, major_ver) \ - fw_def(BATTLEMAGE, GT_TYPE_ANY, major_ver(xe, guc, bmg, 70, 44, 1)) \ - fw_def(LUNARLAKE, GT_TYPE_ANY, major_ver(xe, guc, lnl, 70, 44, 1)) \ + fw_def(PANTHERLAKE, GT_TYPE_ANY, major_ver(xe, guc, ptl, 70, 47, 0)) \ + fw_def(BATTLEMAGE, GT_TYPE_ANY, major_ver(xe, guc, bmg, 70, 45, 2)) \ + fw_def(LUNARLAKE, GT_TYPE_ANY, major_ver(xe, guc, lnl, 70, 45, 2)) \ fw_def(METEORLAKE, GT_TYPE_ANY, major_ver(i915, guc, mtl, 70, 44, 1)) \ - fw_def(DG2, GT_TYPE_ANY, major_ver(i915, guc, dg2, 70, 44, 1)) \ + fw_def(DG2, GT_TYPE_ANY, major_ver(i915, guc, dg2, 70, 45, 2)) \ fw_def(DG1, GT_TYPE_ANY, major_ver(i915, guc, dg1, 70, 44, 1)) \ fw_def(ALDERLAKE_N, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 44, 1)) \ fw_def(ALDERLAKE_P, GT_TYPE_ANY, major_ver(i915, guc, adlp, 70, 44, 1)) \ @@ -127,6 +128,7 @@ struct fw_blobs_by_type { fw_def(TIGERLAKE, GT_TYPE_ANY, major_ver(i915, guc, tgl, 70, 44, 1)) #define XE_HUC_FIRMWARE_DEFS(fw_def, mmp_ver, no_ver) \ + fw_def(PANTHERLAKE, GT_TYPE_ANY, no_ver(xe, huc, ptl)) \ fw_def(BATTLEMAGE, GT_TYPE_ANY, no_ver(xe, huc, bmg)) \ fw_def(LUNARLAKE, GT_TYPE_ANY, no_ver(xe, huc, lnl)) \ fw_def(METEORLAKE, GT_TYPE_ANY, no_ver(i915, huc_gsc, mtl)) \ diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c index 04d1a43b81e3..d60c4b115304 100644 --- a/drivers/gpu/drm/xe/xe_vm.c +++ b/drivers/gpu/drm/xe/xe_vm.c @@ -1610,8 +1610,12 @@ static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile, for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) { vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i); - if (IS_ERR(vm->scratch_pt[id][i])) - return PTR_ERR(vm->scratch_pt[id][i]); + if (IS_ERR(vm->scratch_pt[id][i])) { + int err = PTR_ERR(vm->scratch_pt[id][i]); + + vm->scratch_pt[id][i] = NULL; + return err; + } xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]); } @@ -1640,7 +1644,7 @@ static void xe_vm_free_scratch(struct xe_vm *vm) } } -struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) +struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef) { struct drm_gem_object *vm_resv_obj; struct xe_vm *vm; @@ -1661,9 +1665,10 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) vm->xe = xe; vm->size = 1ull << xe->info.va_bits; - vm->flags = flags; + if (xef) + vm->xef = xe_file_get(xef); /** * GSC VMs are kernel-owned, only used for PXP ops and can sometimes be * manipulated under the PXP mutex. However, the PXP mutex can be taken @@ -1794,6 +1799,20 @@ struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) if (number_tiles > 1) vm->composite_fence_ctx = dma_fence_context_alloc(1); + if (xef && xe->info.has_asid) { + u32 asid; + + down_write(&xe->usm.lock); + err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm, + XA_LIMIT(1, XE_MAX_ASID - 1), + &xe->usm.next_asid, GFP_KERNEL); + up_write(&xe->usm.lock); + if (err < 0) + goto err_unlock_close; + + vm->usm.asid = asid; + } + trace_xe_vm_create(vm); return vm; @@ -1814,6 +1833,8 @@ err_no_resv: for_each_tile(tile, xe, id) xe_range_fence_tree_fini(&vm->rftree[id]); ttm_lru_bulk_move_fini(&xe->ttm, &vm->lru_bulk_move); + if (vm->xef) + xe_file_put(vm->xef); kfree(vm); if (flags & XE_VM_FLAG_LR_MODE) xe_pm_runtime_put(xe); @@ -2059,9 +2080,8 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data, struct xe_device *xe = to_xe_device(dev); struct xe_file *xef = to_xe_file(file); struct drm_xe_vm_create *args = data; - struct xe_tile *tile; struct xe_vm *vm; - u32 id, asid; + u32 id; int err; u32 flags = 0; @@ -2097,29 +2117,10 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data, if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) flags |= XE_VM_FLAG_FAULT_MODE; - vm = xe_vm_create(xe, flags); + vm = xe_vm_create(xe, flags, xef); if (IS_ERR(vm)) return PTR_ERR(vm); - if (xe->info.has_asid) { - down_write(&xe->usm.lock); - err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm, - XA_LIMIT(1, XE_MAX_ASID - 1), - &xe->usm.next_asid, GFP_KERNEL); - up_write(&xe->usm.lock); - if (err < 0) - goto err_close_and_put; - - vm->usm.asid = asid; - } - - vm->xef = xe_file_get(xef); - - /* Record BO memory for VM pagetable created against client */ - for_each_tile(tile, xe, id) - if (vm->pt_root[id]) - xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo); - #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM) /* Warning: Security issue - never enable by default */ args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE); @@ -2380,7 +2381,7 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops, ctx.read_only = xe_vma_read_only(vma); ctx.devmem_possible = IS_DGFX(vm->xe) && - IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR); + IS_ENABLED(CONFIG_DRM_XE_PAGEMAP); for_each_tile(tile, vm->xe, id) tile_mask |= 0x1 << id; @@ -2887,7 +2888,7 @@ static int check_ufence(struct xe_vma *vma) static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op) { - bool devmem_possible = IS_DGFX(vm->xe) && IS_ENABLED(CONFIG_DRM_XE_DEVMEM_MIRROR); + bool devmem_possible = IS_DGFX(vm->xe) && IS_ENABLED(CONFIG_DRM_XE_PAGEMAP); struct xe_vma *vma = gpuva_to_vma(op->base.prefetch.va); int err = 0; @@ -2913,7 +2914,7 @@ static int prefetch_ranges(struct xe_vm *vm, struct xe_vma_op *op) if (xe_svm_range_needs_migrate_to_vram(svm_range, vma, region)) { tile = &vm->xe->tiles[region_to_mem_type[region] - XE_PL_VRAM0]; - err = xe_svm_alloc_vram(vm, tile, svm_range, &ctx); + err = xe_svm_alloc_vram(tile, svm_range, &ctx); if (err) { drm_dbg(&vm->xe->drm, "VRAM allocation failed, retry from userspace, asid=%u, gpusvm=%p, errno=%pe\n", vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err)); @@ -3421,6 +3422,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm, free_bind_ops: if (args->num_binds > 1) kvfree(*bind_ops); + *bind_ops = NULL; return err; } @@ -3466,9 +3468,9 @@ static int xe_vm_bind_ioctl_validate_bo(struct xe_device *xe, struct xe_bo *bo, { u16 coh_mode; - if (XE_IOCTL_DBG(xe, range > bo->size) || + if (XE_IOCTL_DBG(xe, range > xe_bo_size(bo)) || XE_IOCTL_DBG(xe, obj_offset > - bo->size - range)) { + xe_bo_size(bo) - range)) { return -EINVAL; } @@ -3527,7 +3529,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) struct xe_exec_queue *q = NULL; u32 num_syncs, num_ufence = 0; struct xe_sync_entry *syncs = NULL; - struct drm_xe_vm_bind_op *bind_ops; + struct drm_xe_vm_bind_op *bind_ops = NULL; struct xe_vma_ops vops; struct dma_fence *fence; int err; @@ -3771,7 +3773,7 @@ struct dma_fence *xe_vm_bind_kernel_bo(struct xe_vm *vm, struct xe_bo *bo, xe_vma_ops_init(&vops, vm, q, NULL, 0); - ops = vm_bind_ioctl_ops_create(vm, &vops, bo, 0, addr, bo->size, + ops = vm_bind_ioctl_ops_create(vm, &vops, bo, 0, addr, xe_bo_size(bo), DRM_XE_VM_BIND_OP_MAP, 0, 0, vm->xe->pat.idx[cache_lvl]); if (IS_ERR(ops)) { diff --git a/drivers/gpu/drm/xe/xe_vm.h b/drivers/gpu/drm/xe/xe_vm.h index 3475a118f666..2ecb417c19a2 100644 --- a/drivers/gpu/drm/xe/xe_vm.h +++ b/drivers/gpu/drm/xe/xe_vm.h @@ -26,7 +26,7 @@ struct xe_sync_entry; struct xe_svm_range; struct drm_exec; -struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags); +struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags, struct xe_file *xef); struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id); int xe_vma_cmp_vma_cb(const void *key, const struct rb_node *node); @@ -315,22 +315,14 @@ void xe_vm_snapshot_free(struct xe_vm_snapshot *snap); * Register this task as currently making bos resident for the vm. Intended * to avoid eviction by the same task of shared bos bound to the vm. * Call with the vm's resv lock held. - * - * Return: A pin cookie that should be used for xe_vm_clear_validating(). */ -static inline struct pin_cookie xe_vm_set_validating(struct xe_vm *vm, - bool allow_res_evict) +static inline void xe_vm_set_validating(struct xe_vm *vm, bool allow_res_evict) { - struct pin_cookie cookie = {}; - if (vm && !allow_res_evict) { xe_vm_assert_held(vm); - cookie = lockdep_pin_lock(&xe_vm_resv(vm)->lock.base); /* Pairs with READ_ONCE in xe_vm_is_validating() */ WRITE_ONCE(vm->validating, current); } - - return cookie; } /** @@ -338,17 +330,14 @@ static inline struct pin_cookie xe_vm_set_validating(struct xe_vm *vm, * @vm: Pointer to the vm or NULL * @allow_res_evict: Eviction from @vm was allowed. Must be set to the same * value as for xe_vm_set_validation(). - * @cookie: Cookie obtained from xe_vm_set_validating(). * * Register this task as currently making bos resident for the vm. Intended * to avoid eviction by the same task of shared bos bound to the vm. * Call with the vm's resv lock held. */ -static inline void xe_vm_clear_validating(struct xe_vm *vm, bool allow_res_evict, - struct pin_cookie cookie) +static inline void xe_vm_clear_validating(struct xe_vm *vm, bool allow_res_evict) { if (vm && !allow_res_evict) { - lockdep_unpin_lock(&xe_vm_resv(vm)->lock.base, cookie); /* Pairs with READ_ONCE in xe_vm_is_validating() */ WRITE_ONCE(vm->validating, NULL); } diff --git a/drivers/gpu/drm/xe/xe_vm_types.h b/drivers/gpu/drm/xe/xe_vm_types.h index bed6088e1bb3..8a07feef503b 100644 --- a/drivers/gpu/drm/xe/xe_vm_types.h +++ b/drivers/gpu/drm/xe/xe_vm_types.h @@ -266,7 +266,7 @@ struct xe_vm { * up for revalidation. Protected from access with the * @invalidated_lock. Removing items from the list * additionally requires @lock in write mode, and adding - * items to the list requires either the @userptr.notifer_lock in + * items to the list requires either the @userptr.notifier_lock in * write mode, OR @lock in write mode. */ struct list_head invalidated; diff --git a/drivers/gpu/drm/xe/xe_vsec.c b/drivers/gpu/drm/xe/xe_vsec.c index 3e573b0b7ebd..8f23a27871b6 100644 --- a/drivers/gpu/drm/xe/xe_vsec.c +++ b/drivers/gpu/drm/xe/xe_vsec.c @@ -24,6 +24,7 @@ #define BMG_DEVICE_ID 0xE2F8 static struct intel_vsec_header bmg_telemetry = { + .rev = 1, .length = 0x10, .id = VSEC_ID_TELEMETRY, .num_entries = 2, @@ -32,28 +33,19 @@ static struct intel_vsec_header bmg_telemetry = { .offset = BMG_DISCOVERY_OFFSET, }; -static struct intel_vsec_header bmg_punit_crashlog = { +static struct intel_vsec_header bmg_crashlog = { + .rev = 1, .length = 0x10, .id = VSEC_ID_CRASHLOG, - .num_entries = 1, - .entry_size = 4, + .num_entries = 2, + .entry_size = 6, .tbir = 0, .offset = BMG_DISCOVERY_OFFSET + 0x60, }; -static struct intel_vsec_header bmg_oobmsm_crashlog = { - .length = 0x10, - .id = VSEC_ID_CRASHLOG, - .num_entries = 1, - .entry_size = 4, - .tbir = 0, - .offset = BMG_DISCOVERY_OFFSET + 0x78, -}; - static struct intel_vsec_header *bmg_capabilities[] = { &bmg_telemetry, - &bmg_punit_crashlog, - &bmg_oobmsm_crashlog, + &bmg_crashlog, NULL }; diff --git a/drivers/gpu/drm/xe/xe_wa.c b/drivers/gpu/drm/xe/xe_wa.c index 4a76de391abb..22a98600fd8f 100644 --- a/drivers/gpu/drm/xe/xe_wa.c +++ b/drivers/gpu/drm/xe/xe_wa.c @@ -10,6 +10,7 @@ #include <linux/compiler_types.h> #include <linux/fault-inject.h> +#include <generated/xe_device_wa_oob.h> #include <generated/xe_wa_oob.h> #include "regs/xe_engine_regs.h" @@ -285,6 +286,18 @@ static const struct xe_rtp_entry_sr gt_was[] = { XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F10(0), IECPUNIT_CLKGATE_DIS)), XE_RTP_ENTRY_FLAG(FOREACH_ENGINE), }, + { XE_RTP_NAME("16021865536"), + XE_RTP_RULES(MEDIA_VERSION(3002), + ENGINE_CLASS(VIDEO_DECODE)), + XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F10(0), IECPUNIT_CLKGATE_DIS)), + XE_RTP_ENTRY_FLAG(FOREACH_ENGINE), + }, + { XE_RTP_NAME("16021867713"), + XE_RTP_RULES(MEDIA_VERSION(3002), + ENGINE_CLASS(VIDEO_DECODE)), + XE_RTP_ACTIONS(SET(VDBOX_CGCTL3F1C(0), MFXPIPE_CLKGATE_DIS)), + XE_RTP_ENTRY_FLAG(FOREACH_ENGINE), + }, { XE_RTP_NAME("14021486841"), XE_RTP_RULES(MEDIA_VERSION(3000), MEDIA_STEP(A0, B0), ENGINE_CLASS(VIDEO_DECODE)), @@ -644,6 +657,10 @@ static const struct xe_rtp_entry_sr engine_was[] = { XE_RTP_ACTIONS(SET(RING_PSMI_CTL(0), RC_SEMA_IDLE_MSG_DISABLE, XE_RTP_ACTION_FLAG(ENGINE_BASE))) }, + { XE_RTP_NAME("14021402888"), + XE_RTP_RULES(GRAPHICS_VERSION(3003), FUNC(xe_rtp_match_first_render_or_compute)), + XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE)) + }, }; static const struct xe_rtp_entry_sr lrc_was[] = { @@ -860,9 +877,34 @@ static __maybe_unused const struct xe_rtp_entry oob_was[] = { static_assert(ARRAY_SIZE(oob_was) - 1 == _XE_WA_OOB_COUNT); +static __maybe_unused const struct xe_rtp_entry device_oob_was[] = { +#include <generated/xe_device_wa_oob.c> + {} +}; + +static_assert(ARRAY_SIZE(device_oob_was) - 1 == _XE_DEVICE_WA_OOB_COUNT); + __diag_pop(); /** + * xe_wa_process_device_oob - process OOB workaround table + * @xe: device instance to process workarounds for + * + * process OOB workaround table for this device, marking in @xe the + * workarounds that are active. + */ + +void xe_wa_process_device_oob(struct xe_device *xe) +{ + struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(xe); + + xe_rtp_process_ctx_enable_active_tracking(&ctx, xe->wa_active.oob, ARRAY_SIZE(device_oob_was)); + + xe->wa_active.oob_initialized = true; + xe_rtp_process(&ctx, device_oob_was); +} + +/** * xe_wa_process_oob - process OOB workaround table * @gt: GT instance to process workarounds for * @@ -931,6 +973,28 @@ void xe_wa_process_lrc(struct xe_hw_engine *hwe) } /** + * xe_wa_device_init - initialize device with workaround oob bookkeeping + * @xe: Xe device instance to initialize + * + * Returns 0 for success, negative with error code otherwise + */ +int xe_wa_device_init(struct xe_device *xe) +{ + unsigned long *p; + + p = drmm_kzalloc(&xe->drm, + sizeof(*p) * BITS_TO_LONGS(ARRAY_SIZE(device_oob_was)), + GFP_KERNEL); + + if (!p) + return -ENOMEM; + + xe->wa_active.oob = p; + + return 0; +} + +/** * xe_wa_init - initialize gt with workaround bookkeeping * @gt: GT instance to initialize * @@ -964,6 +1028,16 @@ int xe_wa_init(struct xe_gt *gt) } ALLOW_ERROR_INJECTION(xe_wa_init, ERRNO); /* See xe_pci_probe() */ +void xe_wa_device_dump(struct xe_device *xe, struct drm_printer *p) +{ + size_t idx; + + drm_printf(p, "Device OOB Workarounds\n"); + for_each_set_bit(idx, xe->wa_active.oob, ARRAY_SIZE(device_oob_was)) + if (device_oob_was[idx].name) + drm_printf_indent(p, 1, "%s\n", device_oob_was[idx].name); +} + void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p) { size_t idx; diff --git a/drivers/gpu/drm/xe/xe_wa.h b/drivers/gpu/drm/xe/xe_wa.h index 52337405b5bc..f3880c65cb8d 100644 --- a/drivers/gpu/drm/xe/xe_wa.h +++ b/drivers/gpu/drm/xe/xe_wa.h @@ -13,17 +13,19 @@ struct xe_gt; struct xe_hw_engine; struct xe_tile; +int xe_wa_device_init(struct xe_device *xe); int xe_wa_init(struct xe_gt *gt); +void xe_wa_process_device_oob(struct xe_device *xe); void xe_wa_process_oob(struct xe_gt *gt); void xe_wa_process_gt(struct xe_gt *gt); void xe_wa_process_engine(struct xe_hw_engine *hwe); void xe_wa_process_lrc(struct xe_hw_engine *hwe); void xe_wa_apply_tile_workarounds(struct xe_tile *tile); +void xe_wa_device_dump(struct xe_device *xe, struct drm_printer *p); void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p); /** - * XE_WA - Out-of-band workarounds, that don't fit the lifecycle any - * other more specific type + * XE_WA - Out-of-band workarounds, to be queried and called as needed. * @gt__: gt instance * @id__: XE_OOB_<id__>, as generated by build system in generated/xe_wa_oob.h */ @@ -32,4 +34,20 @@ void xe_wa_dump(struct xe_gt *gt, struct drm_printer *p); test_bit(XE_WA_OOB_ ## id__, (gt__)->wa_active.oob); \ }) +/** + * XE_DEVICE_WA - Out-of-band Device workarounds, to be queried and called + * as needed. + * @xe__: xe_device + * @id__: XE_DEVICE_WA_OOB_<id__>, as generated by build system in generated/xe_device_wa_oob.h + */ +#define XE_DEVICE_WA(xe__, id__) ({ \ + xe_assert(xe__, (xe__)->wa_active.oob_initialized); \ + test_bit(XE_DEVICE_WA_OOB_ ## id__, (xe__)->wa_active.oob); \ +}) + +#define XE_DEVICE_WA_DISABLE(xe__, id__) ({ \ + xe_assert(xe__, (xe__)->wa_active.oob_initialized); \ + clear_bit(XE_DEVICE_WA_OOB_ ## id__, (xe__)->wa_active.oob); \ +}) + #endif diff --git a/drivers/gpu/drm/xe/xe_wa_oob.rules b/drivers/gpu/drm/xe/xe_wa_oob.rules index 0ee74a5b2407..e990f20eccfe 100644 --- a/drivers/gpu/drm/xe/xe_wa_oob.rules +++ b/drivers/gpu/drm/xe/xe_wa_oob.rules @@ -34,14 +34,16 @@ 14022293748 GRAPHICS_VERSION_RANGE(2001, 2002) GRAPHICS_VERSION(2004) GRAPHICS_VERSION_RANGE(3000, 3001) + GRAPHICS_VERSION(3003) 22019794406 GRAPHICS_VERSION_RANGE(2001, 2002) GRAPHICS_VERSION(2004) GRAPHICS_VERSION_RANGE(3000, 3001) + GRAPHICS_VERSION(3003) 22019338487 MEDIA_VERSION(2000) - GRAPHICS_VERSION(2001) + GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_not_sriov_vf) MEDIA_VERSION(3000), MEDIA_STEP(A0, B0), FUNC(xe_rtp_match_not_sriov_vf) 22019338487_display PLATFORM(LUNARLAKE) -16023588340 GRAPHICS_VERSION(2001) +16023588340 GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_not_sriov_vf) 14019789679 GRAPHICS_VERSION(1255) GRAPHICS_VERSION_RANGE(1270, 2004) no_media_l3 MEDIA_VERSION(3000) @@ -58,9 +60,15 @@ no_media_l3 MEDIA_VERSION(3000) GRAPHICS_VERSION(1260), GRAPHICS_STEP(A0, B0) 16023105232 GRAPHICS_VERSION_RANGE(2001, 3001) MEDIA_VERSION_RANGE(1301, 3000) + MEDIA_VERSION(3002) + GRAPHICS_VERSION(3003) 16026508708 GRAPHICS_VERSION_RANGE(1200, 3001) MEDIA_VERSION_RANGE(1300, 3000) + MEDIA_VERSION(3002) + GRAPHICS_VERSION(3003) # SoC workaround - currently applies to all platforms with the following # primary GT GMDID 14022085890 GRAPHICS_VERSION(2001) + +15015404425_disable PLATFORM(PANTHERLAKE), MEDIA_STEP(B0, FOREVER) diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c index dfa78a49a6d9..806ec66ee7f7 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_kms.c +++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c @@ -54,6 +54,7 @@ static const struct drm_framebuffer_funcs fb_funcs = { static struct drm_framebuffer * fb_create(struct drm_device *dev, struct drm_file *filp, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { struct xen_drm_front_drm_info *drm_info = dev->dev_private; @@ -61,7 +62,7 @@ fb_create(struct drm_device *dev, struct drm_file *filp, struct drm_gem_object *gem_obj; int ret; - fb = drm_gem_fb_create_with_funcs(dev, filp, mode_cmd, &fb_funcs); + fb = drm_gem_fb_create_with_funcs(dev, filp, info, mode_cmd, &fb_funcs); if (IS_ERR(fb)) return fb; diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c index 02e1feaa6115..34ddbf98e81d 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_dp.c +++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c @@ -1720,7 +1720,8 @@ disconnected: return connector_status_disconnected; } -static enum drm_connector_status zynqmp_dp_bridge_detect(struct drm_bridge *bridge) +static enum drm_connector_status +zynqmp_dp_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector) { struct zynqmp_dp *dp = bridge_to_dp(bridge); @@ -1869,20 +1870,14 @@ static int zynqmp_dp_test_setup(struct zynqmp_dp *dp) static ssize_t zynqmp_dp_pattern_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { - struct dentry *dentry = file->f_path.dentry; struct zynqmp_dp *dp = file->private_data; char buf[16]; ssize_t ret; - ret = debugfs_file_get(dentry); - if (unlikely(ret)) - return ret; - scoped_guard(mutex, &dp->lock) ret = snprintf(buf, sizeof(buf), "%s\n", test_pattern_str[dp->test.pattern]); - debugfs_file_put(dentry); return simple_read_from_buffer(user_buf, count, ppos, buf, ret); } @@ -1890,27 +1885,20 @@ static ssize_t zynqmp_dp_pattern_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { - struct dentry *dentry = file->f_path.dentry; struct zynqmp_dp *dp = file->private_data; char buf[16]; ssize_t ret; int pattern; - ret = debugfs_file_get(dentry); - if (unlikely(ret)) - return ret; - ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count); if (ret < 0) - goto out; + return ret; buf[ret] = '\0'; pattern = sysfs_match_string(test_pattern_str, buf); - if (pattern < 0) { - ret = -EINVAL; - goto out; - } + if (pattern < 0) + return -EINVAL; mutex_lock(&dp->lock); dp->test.pattern = pattern; @@ -1919,8 +1907,6 @@ static ssize_t zynqmp_dp_pattern_write(struct file *file, dp->test.custom) ?: ret; mutex_unlock(&dp->lock); -out: - debugfs_file_put(dentry); return ret; } @@ -2026,20 +2012,13 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_zynqmp_dp_active, zynqmp_dp_active_get, static ssize_t zynqmp_dp_custom_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { - struct dentry *dentry = file->f_path.dentry; struct zynqmp_dp *dp = file->private_data; ssize_t ret; - ret = debugfs_file_get(dentry); - if (unlikely(ret)) - return ret; - mutex_lock(&dp->lock); ret = simple_read_from_buffer(user_buf, count, ppos, &dp->test.custom, sizeof(dp->test.custom)); mutex_unlock(&dp->lock); - - debugfs_file_put(dentry); return ret; } @@ -2047,18 +2026,13 @@ static ssize_t zynqmp_dp_custom_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { - struct dentry *dentry = file->f_path.dentry; struct zynqmp_dp *dp = file->private_data; ssize_t ret; char buf[sizeof(dp->test.custom)]; - ret = debugfs_file_get(dentry); - if (unlikely(ret)) - return ret; - ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count); if (ret < 0) - goto out; + return ret; mutex_lock(&dp->lock); memcpy(dp->test.custom, buf, ret); @@ -2066,9 +2040,6 @@ static ssize_t zynqmp_dp_custom_write(struct file *file, ret = zynqmp_dp_set_test_pattern(dp, dp->test.pattern, dp->test.custom) ?: ret; mutex_unlock(&dp->lock); - -out: - debugfs_file_put(dentry); return ret; } diff --git a/drivers/gpu/drm/xlnx/zynqmp_kms.c b/drivers/gpu/drm/xlnx/zynqmp_kms.c index b47463473472..2bee0a2275ed 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_kms.c +++ b/drivers/gpu/drm/xlnx/zynqmp_kms.c @@ -373,6 +373,7 @@ static int zynqmp_dpsub_dumb_create(struct drm_file *file_priv, static struct drm_framebuffer * zynqmp_dpsub_fb_create(struct drm_device *drm, struct drm_file *file_priv, + const struct drm_format_info *info, const struct drm_mode_fb_cmd2 *mode_cmd) { struct zynqmp_dpsub *dpsub = to_zynqmp_dpsub(drm); @@ -383,7 +384,7 @@ zynqmp_dpsub_fb_create(struct drm_device *drm, struct drm_file *file_priv, for (i = 0; i < ARRAY_SIZE(cmd.pitches); ++i) cmd.pitches[i] = ALIGN(cmd.pitches[i], dpsub->dma_align); - return drm_gem_fb_create(drm, file_priv, &cmd); + return drm_gem_fb_create(drm, file_priv, info, &cmd); } static const struct drm_mode_config_funcs zynqmp_dpsub_mode_config_funcs = { |
