diff options
Diffstat (limited to 'drivers/gpu')
574 files changed, 26666 insertions, 22671 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 0973f408d75f..8bf103de1594 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -214,10 +214,6 @@ config DRM_GEM_SHMEM_HELPER help Choose this if you need the GEM shmem helper functions -config DRM_VM - bool - depends on DRM && MMU - config DRM_SCHED tristate depends on DRM @@ -391,7 +387,6 @@ source "drivers/gpu/drm/xlnx/Kconfig" menuconfig DRM_LEGACY bool "Enable legacy drivers (DANGEROUS)" depends on DRM && MMU - select DRM_VM help Enable legacy DRI1 drivers. Those drivers expose unsafe and dangerous APIs to user-space, which can be used to circumvent access diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index fefaff4c832d..926adef289db 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -5,7 +5,7 @@ drm-y := drm_auth.o drm_cache.o \ drm_file.o drm_gem.o drm_ioctl.o drm_irq.o \ - drm_memory.o drm_drv.o \ + drm_drv.o \ drm_sysfs.o drm_hashtab.o drm_mm.o \ drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \ drm_encoder_slave.o \ @@ -20,9 +20,9 @@ drm-y := drm_auth.o drm_cache.o \ drm_client_modeset.o drm_atomic_uapi.o drm_hdcp.o \ drm_managed.o drm_vblank_work.o -drm-$(CONFIG_DRM_LEGACY) += drm_legacy_misc.o drm_bufs.o drm_context.o drm_dma.o drm_scatter.o drm_lock.o +drm-$(CONFIG_DRM_LEGACY) += drm_bufs.o drm_context.o drm_dma.o drm_legacy_misc.o drm_lock.o \ + drm_memory.o drm_scatter.o drm_vm.o drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o -drm-$(CONFIG_DRM_VM) += drm_vm.o drm-$(CONFIG_COMPAT) += drm_ioc32.o drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o drm-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_gem_shmem_helper.o diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 714d7180ab43..f4ff8ddb52d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -55,7 +55,6 @@ #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> -#include <drm/ttm/ttm_module.h> #include <drm/ttm/ttm_execbuf_util.h> #include <drm/amdgpu_drm.h> @@ -1288,6 +1287,8 @@ int amdgpu_enable_vblank_kms(struct drm_crtc *crtc); void amdgpu_disable_vblank_kms(struct drm_crtc *crtc); long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); +int amdgpu_info_ioctl(struct drm_device *dev, void *data, + struct drm_file *filp); /* * functions used by amdgpu_encoder.c diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 2d991da2cead..0849b68e784f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -453,7 +453,7 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem, struct amdgpu_bo *bo = mem->bo; uint64_t va = mem->va; struct list_head *list_bo_va = &mem->bo_va_list; - unsigned long bo_size = bo->tbo.mem.size; + unsigned long bo_size = bo->tbo.base.size; if (!va) { pr_err("Invalid VA when adding BO to VM\n"); @@ -1281,7 +1281,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( struct kgd_dev *kgd, struct kgd_mem *mem, uint64_t *size) { struct amdkfd_process_info *process_info = mem->process_info; - unsigned long bo_size = mem->bo->tbo.mem.size; + unsigned long bo_size = mem->bo->tbo.base.size; struct kfd_bo_va_list *entry, *tmp; struct bo_vm_reservation_context ctx; struct ttm_validate_buffer *bo_list_entry; @@ -1402,7 +1402,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( mutex_lock(&mem->lock); domain = mem->domain; - bo_size = bo->tbo.mem.size; + bo_size = bo->tbo.base.size; pr_debug("Map VA 0x%llx - 0x%llx to vm %p domain %s\n", mem->va, @@ -1506,7 +1506,7 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdkfd_process_info *process_info = ((struct amdgpu_vm *)vm)->process_info; - unsigned long bo_size = mem->bo->tbo.mem.size; + unsigned long bo_size = mem->bo->tbo.base.size; struct kfd_bo_va_list *entry; struct bo_vm_reservation_context ctx; int ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 6069cfab5216..0a25fecf488a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -1428,7 +1428,7 @@ static void amdgpu_ib_preempt_job_recovery(struct drm_gpu_scheduler *sched) struct dma_fence *fence; spin_lock(&sched->job_list_lock); - list_for_each_entry(s_job, &sched->ring_mirror_list, node) { + list_for_each_entry(s_job, &sched->pending_list, list) { fence = sched->ops->run_job(s_job); dma_fence_put(fence); } @@ -1460,10 +1460,10 @@ static void amdgpu_ib_preempt_mark_partial_job(struct amdgpu_ring *ring) no_preempt: spin_lock(&sched->job_list_lock); - list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { + list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { if (dma_fence_is_signaled(&s_job->s_fence->finished)) { /* remove job from ring_mirror_list */ - list_del_init(&s_job->node); + list_del_init(&s_job->list); sched->ops->free_job(s_job); continue; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 4d434803fb49..bfaa99354bbc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -1106,8 +1106,7 @@ void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb) */ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) { - u64 space_needed = roundup_pow_of_two(adev->gmc.real_vram_size); - u32 rbar_size = order_base_2(((space_needed >> 20) | 1)) - 1; + int rbar_size = pci_rebar_bytes_to_size(adev->gmc.real_vram_size); struct pci_bus *root; struct resource *res; unsigned i; @@ -1138,6 +1137,10 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev) if (!res) return 0; + /* Limit the BAR size to what is available */ + rbar_size = min(fls(pci_rebar_get_possible_sizes(adev->pdev, 0)) - 1, + rbar_size); + /* Disable memory decoding while we change the BAR addresses and size */ pci_read_config_word(adev->pdev, PCI_COMMAND, &cmd); pci_write_config_word(adev->pdev, PCI_COMMAND, @@ -1423,9 +1426,9 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, /* don't suspend or resume card normally */ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - pci_set_power_state(dev->pdev, PCI_D0); - amdgpu_device_load_pci_state(dev->pdev); - r = pci_enable_device(dev->pdev); + pci_set_power_state(pdev, PCI_D0); + amdgpu_device_load_pci_state(pdev); + r = pci_enable_device(pdev); if (r) DRM_WARN("pci_enable_device failed (%d)\n", r); amdgpu_device_resume(dev, true); @@ -1437,10 +1440,10 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, drm_kms_helper_poll_disable(dev); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; amdgpu_device_suspend(dev, true); - amdgpu_device_cache_pci_state(dev->pdev); + amdgpu_device_cache_pci_state(pdev); /* Shut down the device */ - pci_disable_device(dev->pdev); - pci_set_power_state(dev->pdev, PCI_D3cold); + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3cold); dev->switch_power_state = DRM_SWITCH_POWER_OFF; } } @@ -1703,8 +1706,7 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev) adev->enable_virtual_display = false; if (amdgpu_virtual_display) { - struct drm_device *ddev = adev_to_drm(adev); - const char *pci_address_name = pci_name(ddev->pdev); + const char *pci_address_name = pci_name(adev->pdev); char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname; pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL); @@ -3400,7 +3402,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, } } - pci_enable_pcie_error_reporting(adev->ddev.pdev); + pci_enable_pcie_error_reporting(adev->pdev); /* Post card if necessary */ if (amdgpu_device_need_post(adev)) { @@ -4158,8 +4160,8 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev) continue; spin_lock(&ring->sched.job_list_lock); - job = list_first_entry_or_null(&ring->sched.ring_mirror_list, - struct drm_sched_job, node); + job = list_first_entry_or_null(&ring->sched.pending_list, + struct drm_sched_job, list); spin_unlock(&ring->sched.job_list_lock); if (job) return true; @@ -4955,8 +4957,8 @@ pci_ers_result_t amdgpu_pci_error_detected(struct pci_dev *pdev, pci_channel_sta case pci_channel_io_normal: return PCI_ERS_RESULT_CAN_RECOVER; /* Fatal error, prepare for slot reset */ - case pci_channel_io_frozen: - /* + case pci_channel_io_frozen: + /* * Cancel and wait for all TDRs in progress if failing to * set adev->in_gpu_reset in amdgpu_device_lock_adev * @@ -5047,7 +5049,7 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev) goto out; } - adev->in_pci_err_recovery = true; + adev->in_pci_err_recovery = true; r = amdgpu_device_pre_asic_reset(adev, NULL, &need_full_reset); adev->in_pci_err_recovery = false; if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 0f4cf8dc8f93..47e0b48dc26f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -281,7 +281,7 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach, case TTM_PL_TT: sgt = drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages, - bo->tbo.num_pages); + bo->tbo.ttm->num_pages); if (IS_ERR(sgt)) return sgt; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 3f0f3ce2611c..effa9062f541 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1206,7 +1206,6 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, if (ret) return ret; - ddev->pdev = pdev; pci_set_drvdata(pdev, ddev); ret = amdgpu_driver_load_kms(adev, ent->driver_data); @@ -1536,8 +1535,6 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv) return 0; } -int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); - const struct drm_ioctl_desc amdgpu_ioctls_kms[] = { DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c index 0bf7d36c6686..51cd49c6f38f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c @@ -271,7 +271,7 @@ static int amdgpufb_create(struct drm_fb_helper *helper, DRM_INFO("fb depth is %d\n", fb->format->depth); DRM_INFO(" pitch is %d\n", fb->pitches[0]); - vga_switcheroo_client_fb_set(adev_to_drm(adev)->pdev, info); + vga_switcheroo_client_fb_set(adev->pdev, info); return 0; out: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index d0a1fee1f5f6..a5c42c3004a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -619,7 +619,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, int r = 0; if (args->va_address < AMDGPU_VA_RESERVED_SIZE) { - dev_dbg(&dev->pdev->dev, + dev_dbg(dev->dev, "va_address 0x%LX is in reserved area 0x%LX\n", args->va_address, AMDGPU_VA_RESERVED_SIZE); return -EINVAL; @@ -627,7 +627,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, if (args->va_address >= AMDGPU_GMC_HOLE_START && args->va_address < AMDGPU_GMC_HOLE_END) { - dev_dbg(&dev->pdev->dev, + dev_dbg(dev->dev, "va_address 0x%LX is in VA hole 0x%LX-0x%LX\n", args->va_address, AMDGPU_GMC_HOLE_START, AMDGPU_GMC_HOLE_END); @@ -639,14 +639,14 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; vm_size -= AMDGPU_VA_RESERVED_SIZE; if (args->va_address + args->map_size > vm_size) { - dev_dbg(&dev->pdev->dev, + dev_dbg(dev->dev, "va_address 0x%llx is in top reserved area 0x%llx\n", args->va_address + args->map_size, vm_size); return -EINVAL; } if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) { - dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n", + dev_dbg(dev->dev, "invalid flags combination 0x%08X\n", args->flags); return -EINVAL; } @@ -658,7 +658,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, case AMDGPU_VA_OP_REPLACE: break; default: - dev_dbg(&dev->pdev->dev, "unsupported operation %d\n", + dev_dbg(dev->dev, "unsupported operation %d\n", args->operation); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 6e679db5e46f..fe1a39ffda72 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -120,7 +120,7 @@ uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); - if (bo->num_pages != 1 || bo->ttm->caching == ttm_cached) + if (bo->ttm->num_pages != 1 || bo->ttm->caching == ttm_cached) return AMDGPU_BO_INVALID_OFFSET; if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c index 47cad23a6b9e..bca4dddd5a15 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c @@ -176,7 +176,7 @@ struct amdgpu_i2c_chan *amdgpu_i2c_create(struct drm_device *dev, i2c->rec = *rec; i2c->adapter.owner = THIS_MODULE; i2c->adapter.class = I2C_CLASS_DDC; - i2c->adapter.dev.parent = &dev->pdev->dev; + i2c->adapter.dev.parent = dev->dev; i2c->dev = dev; i2c_set_adapdata(&i2c->adapter, i2c); mutex_init(&i2c->mutex); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index dcfe8a3b03ff..ff48101bab55 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -271,7 +271,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) } /* Signal all jobs already scheduled to HW */ - list_for_each_entry(s_job, &sched->ring_mirror_list, node) { + list_for_each_entry(s_job, &sched->pending_list, list) { struct drm_sched_fence *s_fence = s_job->s_fence; dma_fence_set_error(&s_fence->finished, -EHWPOISON); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index b16b32797624..3c37cf1ae8b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -142,7 +142,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) (amdgpu_is_atpx_hybrid() || amdgpu_has_atpx_dgpu_power_cntl()) && ((flags & AMD_IS_APU) == 0) && - !pci_is_thunderbolt_attached(dev->pdev)) + !pci_is_thunderbolt_attached(to_pci_dev(dev->dev))) flags |= AMD_IS_PX; parent = pci_upstream_bridge(adev->pdev); @@ -156,7 +156,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) */ r = amdgpu_device_init(adev, flags); if (r) { - dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); + dev_err(dev->dev, "Fatal error during GPU init\n"); goto out; } @@ -199,7 +199,7 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) acpi_status = amdgpu_acpi_init(adev); if (acpi_status) - dev_dbg(&dev->pdev->dev, "Error during ACPI methods call\n"); + dev_dbg(dev->dev, "Error during ACPI methods call\n"); if (adev->runpm) { /* only need to skip on ATPX */ @@ -735,10 +735,10 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) if (!dev_info) return -ENOMEM; - dev_info->device_id = dev->pdev->device; + dev_info->device_id = adev->pdev->device; dev_info->chip_rev = adev->rev_id; dev_info->external_rev = adev->external_rev_id; - dev_info->pci_rev = dev->pdev->revision; + dev_info->pci_rev = adev->pdev->revision; dev_info->family = adev->family; dev_info->num_shader_engines = adev->gfx.config.max_shader_engines; dev_info->num_shader_arrays_per_engine = adev->gfx.config.max_sh_per_se; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 25ec4d57333f..6cc9919b12cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -787,7 +787,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) if (r < 0) return r; - r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); + r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 79120ec41396..9ac37569823f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -174,12 +174,12 @@ static inline void amdgpu_bo_unreserve(struct amdgpu_bo *bo) static inline unsigned long amdgpu_bo_size(struct amdgpu_bo *bo) { - return bo->tbo.num_pages << PAGE_SHIFT; + return bo->tbo.base.size; } static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo) { - return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE; + return bo->tbo.base.size / AMDGPU_GPU_PAGE_SIZE; } static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index ce8dc995c10c..792d20261846 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -127,7 +127,7 @@ TRACE_EVENT(amdgpu_bo_create, TP_fast_assign( __entry->bo = bo; - __entry->pages = bo->tbo.num_pages; + __entry->pages = bo->tbo.mem.num_pages; __entry->type = bo->tbo.mem.mem_type; __entry->prefer = bo->preferred_domains; __entry->allow = bo->allowed_domains; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index b848f9e97613..9fd2157b133a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -46,7 +46,6 @@ #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> -#include <drm/ttm/ttm_module.h> #include <drm/drm_debugfs.h> #include <drm/amdgpu_drm.h> @@ -551,25 +550,12 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_resource *old_mem = &bo->mem; int r; - if ((old_mem->mem_type == TTM_PL_SYSTEM && - new_mem->mem_type == TTM_PL_VRAM) || - (old_mem->mem_type == TTM_PL_VRAM && - new_mem->mem_type == TTM_PL_SYSTEM)) { - hop->fpfn = 0; - hop->lpfn = 0; - hop->mem_type = TTM_PL_TT; - hop->flags = 0; - return -EMULTIHOP; - } - if (new_mem->mem_type == TTM_PL_TT) { r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem); if (r) return r; } - amdgpu_bo_move_notify(bo, evict, new_mem); - /* Can't move a pinned BO */ abo = ttm_to_amdgpu_bo(bo); if (WARN_ON_ONCE(abo->tbo.pin_count > 0)) @@ -579,24 +565,23 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { ttm_bo_move_null(bo, new_mem); - return 0; + goto out; } if (old_mem->mem_type == TTM_PL_SYSTEM && new_mem->mem_type == TTM_PL_TT) { ttm_bo_move_null(bo, new_mem); - return 0; + goto out; } - if (old_mem->mem_type == TTM_PL_TT && new_mem->mem_type == TTM_PL_SYSTEM) { r = ttm_bo_wait_ctx(bo, ctx); if (r) - goto fail; + return r; amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm); ttm_resource_free(bo, &bo->mem); ttm_bo_assign_mem(bo, new_mem); - return 0; + goto out; } if (old_mem->mem_type == AMDGPU_PL_GDS || @@ -607,27 +592,37 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, new_mem->mem_type == AMDGPU_PL_OA) { /* Nothing to save here */ ttm_bo_move_null(bo, new_mem); - return 0; + goto out; } - if (!adev->mman.buffer_funcs_enabled) { + if (adev->mman.buffer_funcs_enabled) { + if (((old_mem->mem_type == TTM_PL_SYSTEM && + new_mem->mem_type == TTM_PL_VRAM) || + (old_mem->mem_type == TTM_PL_VRAM && + new_mem->mem_type == TTM_PL_SYSTEM))) { + hop->fpfn = 0; + hop->lpfn = 0; + hop->mem_type = TTM_PL_TT; + hop->flags = 0; + return -EMULTIHOP; + } + + r = amdgpu_move_blit(bo, evict, new_mem, old_mem); + } else { r = -ENODEV; - goto memcpy; } - r = amdgpu_move_blit(bo, evict, new_mem, old_mem); if (r) { -memcpy: /* Check that all memory is CPU accessible */ if (!amdgpu_mem_visible(adev, old_mem) || !amdgpu_mem_visible(adev, new_mem)) { pr_err("Move buffer fallback to memcpy unavailable\n"); - goto fail; + return r; } r = ttm_bo_move_memcpy(bo, ctx, new_mem); if (r) - goto fail; + return r; } if (bo->type == ttm_bo_type_device && @@ -639,14 +634,11 @@ memcpy: abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; } +out: /* update statistics */ - atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved); + atomic64_add(bo->base.size, &adev->num_bytes_moved); + amdgpu_bo_move_notify(bo, evict, new_mem); return 0; -fail: - swap(*new_mem, bo->mem); - amdgpu_bo_move_notify(bo, false, new_mem); - swap(*new_mem, bo->mem); - return r; } /* @@ -925,8 +917,8 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, goto release_sg; /* convert SG to linear array of pages and dma addresses */ - drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, - gtt->ttm.dma_address, ttm->num_pages); + drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, + ttm->num_pages); return 0; @@ -1272,9 +1264,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev, ttm->sg = sgt; } - drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, - gtt->ttm.dma_address, - ttm->num_pages); + drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, + ttm->num_pages); return 0; } @@ -2131,7 +2122,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, return r; } - num_pages = bo->tbo.num_pages; + num_pages = bo->tbo.mem.num_pages; mm_node = bo->tbo.mem.mm_node; num_loops = 0; while (num_pages) { @@ -2161,7 +2152,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, } } - num_pages = bo->tbo.num_pages; + num_pages = bo->tbo.mem.num_pages; mm_node = bo->tbo.mem.mm_node; while (num_pages) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 0d5284b936e4..ea6a62f67e38 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -1160,6 +1160,6 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout) error: dma_fence_put(fence); amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); + amdgpu_bo_free_kernel(&bo, NULL, NULL); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 4a77c7424dfc..99b82f3c2617 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -496,6 +496,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_job *job; struct amdgpu_ib *ib; uint64_t addr; + void *msg = NULL; int i, r; r = amdgpu_job_alloc_with_ib(adev, 64, @@ -505,6 +506,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, ib = &job->ibs[0]; addr = amdgpu_bo_gpu_offset(bo); + msg = amdgpu_bo_kptr(bo); ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0); ib->ptr[1] = addr; ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0); @@ -523,7 +525,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, amdgpu_bo_fence(bo, f, false); amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); + amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg); if (fence) *fence = dma_fence_get(f); @@ -536,7 +538,7 @@ err_free: err: amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); + amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg); return r; } @@ -890,6 +892,7 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) error: dma_fence_put(fence); amdgpu_bo_unreserve(bo); - amdgpu_bo_unref(&bo); + amdgpu_bo_free_kernel(&bo, NULL, NULL); + return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 0768c8686983..ad91c0c3c423 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -653,9 +653,11 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, if (!bo->parent) continue; - ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move); + ttm_bo_move_to_lru_tail(&bo->tbo, &bo->tbo.mem, + &vm->lru_bulk_move); if (bo->shadow) ttm_bo_move_to_lru_tail(&bo->shadow->tbo, + &bo->shadow->tbo.mem, &vm->lru_bulk_move); } spin_unlock(&ttm_bo_glob.lru_lock); diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c index 985e454463e1..7f30629f21a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v10_1.c @@ -554,7 +554,7 @@ static int mes_v10_1_allocate_eop_buf(struct amdgpu_device *adev) return r; } - memset(eop, 0, adev->mes.eop_gpu_obj->tbo.mem.size); + memset(eop, 0, adev->mes.eop_gpu_obj->tbo.base.size); amdgpu_bo_kunmap(adev->mes.eop_gpu_obj); amdgpu_bo_unreserve(adev->mes.eop_gpu_obj); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 7c100650c2f4..c64e469ac6b0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -5435,7 +5435,6 @@ static void dm_disable_vblank(struct drm_crtc *crtc) static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { .reset = dm_crtc_reset_state, .destroy = amdgpu_dm_crtc_destroy, - .gamma_set = drm_atomic_helper_legacy_gamma_set, .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, .atomic_duplicate_state = dm_crtc_duplicate_state, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 5340c41b85f0..5b0a4a7479e2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -23,6 +23,7 @@ * */ +#include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_dp_mst_helper.h> #include <drm/drm_dp_helper.h> @@ -251,8 +252,10 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) static struct drm_encoder * dm_mst_atomic_best_encoder(struct drm_connector *connector, - struct drm_connector_state *connector_state) + struct drm_atomic_state *state) { + struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state, + connector); struct drm_device *dev = connector->dev; struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc); diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c index 042d7b54a6de..895cdd991af6 100644 --- a/drivers/gpu/drm/arc/arcpgu_crtc.c +++ b/drivers/gpu/drm/arc/arcpgu_crtc.c @@ -162,15 +162,10 @@ static const struct drm_plane_helper_funcs arc_pgu_plane_helper_funcs = { .atomic_update = arc_pgu_plane_atomic_update, }; -static void arc_pgu_plane_destroy(struct drm_plane *plane) -{ - drm_plane_cleanup(plane); -} - static const struct drm_plane_funcs arc_pgu_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, - .destroy = arc_pgu_plane_destroy, + .destroy = drm_plane_cleanup, .reset = drm_atomic_helper_plane_reset, .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, @@ -213,7 +208,7 @@ int arc_pgu_setup_crtc(struct drm_device *drm) ret = drm_crtc_init_with_planes(drm, &arcpgu->crtc, primary, NULL, &arc_pgu_crtc_funcs, NULL); if (ret) { - arc_pgu_plane_destroy(primary); + drm_plane_cleanup(primary); return ret; } diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c index f164818ec477..077d006b1fbf 100644 --- a/drivers/gpu/drm/arc/arcpgu_drv.c +++ b/drivers/gpu/drm/arc/arcpgu_drv.c @@ -145,7 +145,7 @@ static void arcpgu_debugfs_init(struct drm_minor *minor) } #endif -static struct drm_driver arcpgu_drm_driver = { +static const struct drm_driver arcpgu_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, .name = "arcpgu", .desc = "ARC PGU Controller", diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c index 4b485eb512e2..59172acb9738 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c @@ -550,7 +550,6 @@ static void komeda_crtc_vblank_disable(struct drm_crtc *crtc) } static const struct drm_crtc_funcs komeda_crtc_funcs = { - .gamma_set = drm_atomic_helper_legacy_gamma_set, .destroy = drm_crtc_cleanup, .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c index 1f8195bad536..ca891ae14d36 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c @@ -152,7 +152,6 @@ static int komeda_parse_dt(struct device *dev, struct komeda_dev *mdev) ret = of_reserved_mem_device_init(dev); if (ret && ret != -ENODEV) return ret; - ret = 0; for_each_available_child_of_node(np, child) { if (of_node_name_eq(child, "pipeline")) { diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c index 6b99df696384..034ee08482e0 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c @@ -81,10 +81,10 @@ static void komeda_kms_commit_tail(struct drm_atomic_state *old_state) drm_atomic_helper_commit_modeset_enables(dev, old_state); - drm_atomic_helper_wait_for_flip_done(dev, old_state); - drm_atomic_helper_commit_hw_done(old_state); + drm_atomic_helper_wait_for_flip_done(dev, old_state); + drm_atomic_helper_cleanup_planes(dev, old_state); } diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c index 452e505a1fd3..719a79728e24 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.c @@ -137,9 +137,10 @@ komeda_pipeline_get_first_component(struct komeda_pipeline *pipe, u32 comp_mask) { struct komeda_component *c = NULL; + unsigned long comp_mask_local = (unsigned long)comp_mask; int id; - id = find_first_bit((unsigned long *)&comp_mask, 32); + id = find_first_bit(&comp_mask_local, 32); if (id < 32) c = komeda_pipeline_get_component(pipe, id); diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c index 8f32ae7c25d0..5c085116de3f 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c @@ -704,10 +704,10 @@ komeda_compiz_set_input(struct komeda_compiz *compiz, cin->layer_alpha = dflow->layer_alpha; old_st = komeda_component_get_old_state(&compiz->base, drm_st); - WARN_ON(!old_st); /* compare with old to check if this input has been changed */ - if (memcmp(&(to_compiz_st(old_st)->cins[idx]), cin, sizeof(*cin))) + if (WARN_ON(!old_st) || + memcmp(&(to_compiz_st(old_st)->cins[idx]), cin, sizeof(*cin))) c_st->changed_active_inputs |= BIT(idx); komeda_component_add_input(c_st, &dflow->input, idx); diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c index 108e7a31bd26..494075ddbef6 100644 --- a/drivers/gpu/drm/arm/malidp_crtc.c +++ b/drivers/gpu/drm/arm/malidp_crtc.c @@ -510,7 +510,6 @@ static void malidp_crtc_disable_vblank(struct drm_crtc *crtc) } static const struct drm_crtc_funcs malidp_crtc_funcs = { - .gamma_set = drm_atomic_helper_legacy_gamma_set, .destroy = drm_crtc_cleanup, .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 3ebcf5a52c8b..b7bb90ae787f 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -820,7 +820,6 @@ static const struct drm_crtc_funcs armada_crtc_funcs = { .cursor_set = armada_drm_crtc_cursor_set, .cursor_move = armada_drm_crtc_cursor_move, .destroy = armada_drm_crtc_destroy, - .gamma_set = drm_atomic_helper_legacy_gamma_set, .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, diff --git a/drivers/gpu/drm/ast/ast_cursor.c b/drivers/gpu/drm/ast/ast_cursor.c index 742d43a7edf4..fac1ee79c372 100644 --- a/drivers/gpu/drm/ast/ast_cursor.c +++ b/drivers/gpu/drm/ast/ast_cursor.c @@ -39,7 +39,6 @@ static void ast_cursor_fini(struct ast_private *ast) for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) { gbo = ast->cursor.gbo[i]; - drm_gem_vram_vunmap(gbo, &ast->cursor.map[i]); drm_gem_vram_unpin(gbo); drm_gem_vram_put(gbo); } @@ -53,14 +52,13 @@ static void ast_cursor_release(struct drm_device *dev, void *ptr) } /* - * Allocate cursor BOs and pins them at the end of VRAM. + * Allocate cursor BOs and pin them at the end of VRAM. */ int ast_cursor_init(struct ast_private *ast) { struct drm_device *dev = &ast->base; size_t size, i; struct drm_gem_vram_object *gbo; - struct dma_buf_map map; int ret; size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE); @@ -77,15 +75,7 @@ int ast_cursor_init(struct ast_private *ast) drm_gem_vram_put(gbo); goto err_drm_gem_vram_put; } - ret = drm_gem_vram_vmap(gbo, &map); - if (ret) { - drm_gem_vram_unpin(gbo); - drm_gem_vram_put(gbo); - goto err_drm_gem_vram_put; - } - ast->cursor.gbo[i] = gbo; - ast->cursor.map[i] = map; } return drmm_add_action_or_reset(dev, ast_cursor_release, NULL); @@ -94,7 +84,6 @@ err_drm_gem_vram_put: while (i) { --i; gbo = ast->cursor.gbo[i]; - drm_gem_vram_vunmap(gbo, &ast->cursor.map[i]); drm_gem_vram_unpin(gbo); drm_gem_vram_put(gbo); } @@ -168,38 +157,37 @@ static void update_cursor_image(u8 __iomem *dst, const u8 *src, int width, int h int ast_cursor_blit(struct ast_private *ast, struct drm_framebuffer *fb) { struct drm_device *dev = &ast->base; - struct drm_gem_vram_object *gbo; - struct dma_buf_map map; - int ret; - void *src; + struct drm_gem_vram_object *dst_gbo = ast->cursor.gbo[ast->cursor.next_index]; + struct drm_gem_vram_object *src_gbo = drm_gem_vram_of_gem(fb->obj[0]); + struct dma_buf_map src_map, dst_map; void __iomem *dst; + void *src; + int ret; if (drm_WARN_ON_ONCE(dev, fb->width > AST_MAX_HWC_WIDTH) || drm_WARN_ON_ONCE(dev, fb->height > AST_MAX_HWC_HEIGHT)) return -EINVAL; - gbo = drm_gem_vram_of_gem(fb->obj[0]); - - ret = drm_gem_vram_pin(gbo, 0); + ret = drm_gem_vram_vmap(src_gbo, &src_map); if (ret) return ret; - ret = drm_gem_vram_vmap(gbo, &map); - if (ret) - goto err_drm_gem_vram_unpin; - src = map.vaddr; /* TODO: Use mapping abstraction properly */ + src = src_map.vaddr; /* TODO: Use mapping abstraction properly */ - dst = ast->cursor.map[ast->cursor.next_index].vaddr_iomem; + ret = drm_gem_vram_vmap(dst_gbo, &dst_map); + if (ret) + goto err_drm_gem_vram_vunmap; + dst = dst_map.vaddr_iomem; /* TODO: Use mapping abstraction properly */ /* do data transfer to cursor BO */ update_cursor_image(dst, src, fb->width, fb->height); - drm_gem_vram_vunmap(gbo, &map); - drm_gem_vram_unpin(gbo); + drm_gem_vram_vunmap(dst_gbo, &dst_map); + drm_gem_vram_vunmap(src_gbo, &src_map); return 0; -err_drm_gem_vram_unpin: - drm_gem_vram_unpin(gbo); +err_drm_gem_vram_vunmap: + drm_gem_vram_vunmap(src_gbo, &src_map); return ret; } @@ -251,17 +239,26 @@ static void ast_cursor_set_location(struct ast_private *ast, u16 x, u16 y, void ast_cursor_show(struct ast_private *ast, int x, int y, unsigned int offset_x, unsigned int offset_y) { + struct drm_device *dev = &ast->base; + struct drm_gem_vram_object *gbo = ast->cursor.gbo[ast->cursor.next_index]; + struct dma_buf_map map; u8 x_offset, y_offset; u8 __iomem *dst; u8 __iomem *sig; u8 jreg; + int ret; - dst = ast->cursor.map[ast->cursor.next_index].vaddr; + ret = drm_gem_vram_vmap(gbo, &map); + if (drm_WARN_ONCE(dev, ret, "drm_gem_vram_vmap() failed, ret=%d\n", ret)) + return; + dst = map.vaddr_iomem; /* TODO: Use mapping abstraction properly */ sig = dst + AST_HWC_SIZE; writel(x, sig + AST_HWC_SIGNATURE_X); writel(y, sig + AST_HWC_SIGNATURE_Y); + drm_gem_vram_vunmap(gbo, &map); + if (x < 0) { x_offset = (-x) + offset_x; x = 0; diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index 667b450606ef..ea8164e7a6dc 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -147,7 +147,7 @@ static int ast_drm_freeze(struct drm_device *dev) error = drm_mode_config_helper_suspend(dev); if (error) return error; - pci_save_state(dev->pdev); + pci_save_state(to_pci_dev(dev->dev)); return 0; } @@ -162,7 +162,7 @@ static int ast_drm_resume(struct drm_device *dev) { int ret; - if (pci_enable_device(dev->pdev)) + if (pci_enable_device(to_pci_dev(dev->dev))) return -EIO; ret = ast_drm_thaw(dev); diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index ccaff81924ee..f871fc36c2f7 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -28,7 +28,6 @@ #ifndef __AST_DRV_H__ #define __AST_DRV_H__ -#include <linux/dma-buf-map.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> #include <linux/io.h> @@ -133,7 +132,6 @@ struct ast_private { struct { struct drm_gem_vram_object *gbo[AST_DEFAULT_HWC_NUM]; - struct dma_buf_map map[AST_DEFAULT_HWC_NUM]; unsigned int next_index; } cursor; diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 1b13199858cb..0ac3c2039c4b 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -67,8 +67,9 @@ uint8_t ast_get_index_reg_mask(struct ast_private *ast, static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev) { - struct device_node *np = dev->pdev->dev.of_node; + struct device_node *np = dev->dev->of_node; struct ast_private *ast = to_ast_private(dev); + struct pci_dev *pdev = to_pci_dev(dev->dev); uint32_t data, jregd0, jregd1; /* Defaults */ @@ -85,7 +86,7 @@ static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev) } /* Not all families have a P2A bridge */ - if (dev->pdev->device != PCI_CHIP_AST2000) + if (pdev->device != PCI_CHIP_AST2000) return; /* @@ -119,6 +120,7 @@ static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev) static int ast_detect_chip(struct drm_device *dev, bool *need_post) { struct ast_private *ast = to_ast_private(dev); + struct pci_dev *pdev = to_pci_dev(dev->dev); uint32_t jreg, scu_rev; /* @@ -143,19 +145,19 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) ast_detect_config_mode(dev, &scu_rev); /* Identify chipset */ - if (dev->pdev->revision >= 0x50) { + if (pdev->revision >= 0x50) { ast->chip = AST2600; drm_info(dev, "AST 2600 detected\n"); - } else if (dev->pdev->revision >= 0x40) { + } else if (pdev->revision >= 0x40) { ast->chip = AST2500; drm_info(dev, "AST 2500 detected\n"); - } else if (dev->pdev->revision >= 0x30) { + } else if (pdev->revision >= 0x30) { ast->chip = AST2400; drm_info(dev, "AST 2400 detected\n"); - } else if (dev->pdev->revision >= 0x20) { + } else if (pdev->revision >= 0x20) { ast->chip = AST2300; drm_info(dev, "AST 2300 detected\n"); - } else if (dev->pdev->revision >= 0x10) { + } else if (pdev->revision >= 0x10) { switch (scu_rev & 0x0300) { case 0x0200: ast->chip = AST1100; @@ -265,7 +267,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) static int ast_get_dram_info(struct drm_device *dev) { - struct device_node *np = dev->pdev->dev.of_node; + struct device_node *np = dev->dev->of_node; struct ast_private *ast = to_ast_private(dev); uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap; uint32_t denum, num, div, ref_pll, dsel; @@ -409,10 +411,9 @@ struct ast_private *ast_device_create(const struct drm_driver *drv, return ast; dev = &ast->base; - dev->pdev = pdev; pci_set_drvdata(pdev, dev); - ast->regs = pci_iomap(dev->pdev, 1, 0); + ast->regs = pci_iomap(pdev, 1, 0); if (!ast->regs) return ERR_PTR(-EIO); @@ -421,14 +422,14 @@ struct ast_private *ast_device_create(const struct drm_driver *drv, * assume the chip has MMIO enabled by default (rev 0x20 * and higher). */ - if (!(pci_resource_flags(dev->pdev, 2) & IORESOURCE_IO)) { + if (!(pci_resource_flags(pdev, 2) & IORESOURCE_IO)) { drm_info(dev, "platform has no IO space, trying MMIO\n"); ast->ioregs = ast->regs + AST_IO_MM_OFFSET; } /* "map" IO regs if the above hasn't done so already */ if (!ast->ioregs) { - ast->ioregs = pci_iomap(dev->pdev, 2, 0); + ast->ioregs = pci_iomap(pdev, 2, 0); if (!ast->ioregs) return ERR_PTR(-EIO); } diff --git a/drivers/gpu/drm/ast/ast_mm.c b/drivers/gpu/drm/ast/ast_mm.c index 8392ebde504b..7592f1b9e1f1 100644 --- a/drivers/gpu/drm/ast/ast_mm.c +++ b/drivers/gpu/drm/ast/ast_mm.c @@ -77,31 +77,32 @@ static u32 ast_get_vram_size(struct ast_private *ast) static void ast_mm_release(struct drm_device *dev, void *ptr) { struct ast_private *ast = to_ast_private(dev); + struct pci_dev *pdev = to_pci_dev(dev->dev); arch_phys_wc_del(ast->fb_mtrr); - arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0), - pci_resource_len(dev->pdev, 0)); + arch_io_free_memtype_wc(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); } int ast_mm_init(struct ast_private *ast) { struct drm_device *dev = &ast->base; + struct pci_dev *pdev = to_pci_dev(dev->dev); u32 vram_size; int ret; vram_size = ast_get_vram_size(ast); - ret = drmm_vram_helper_init(dev, pci_resource_start(dev->pdev, 0), - vram_size); + ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0), vram_size); if (ret) { drm_err(dev, "Error initializing VRAM MM; %d\n", ret); return ret; } - arch_io_reserve_memtype_wc(pci_resource_start(dev->pdev, 0), - pci_resource_len(dev->pdev, 0)); - ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0), - pci_resource_len(dev->pdev, 0)); + arch_io_reserve_memtype_wc(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); return drmm_add_action_or_reset(dev, ast_mm_release, NULL); } diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 9db371f4054f..988b270fea5e 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -903,7 +903,6 @@ static void ast_crtc_atomic_destroy_state(struct drm_crtc *crtc, static const struct drm_crtc_funcs ast_crtc_funcs = { .reset = ast_crtc_reset, - .gamma_set = drm_atomic_helper_legacy_gamma_set, .destroy = drm_crtc_cleanup, .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, @@ -1107,6 +1106,7 @@ static const struct drm_mode_config_funcs ast_mode_config_funcs = { int ast_mode_config_init(struct ast_private *ast) { struct drm_device *dev = &ast->base; + struct pci_dev *pdev = to_pci_dev(dev->dev); int ret; ret = ast_cursor_init(ast); @@ -1122,7 +1122,7 @@ int ast_mode_config_init(struct ast_private *ast) dev->mode_config.min_height = 0; dev->mode_config.preferred_depth = 24; dev->mode_config.prefer_shadow = 1; - dev->mode_config.fb_base = pci_resource_start(dev->pdev, 0); + dev->mode_config.fb_base = pci_resource_start(pdev, 0); if (ast->chip == AST2100 || ast->chip == AST2200 || @@ -1259,7 +1259,7 @@ static struct ast_i2c_chan *ast_i2c_create(struct drm_device *dev) i2c->adapter.owner = THIS_MODULE; i2c->adapter.class = I2C_CLASS_DDC; - i2c->adapter.dev.parent = &dev->pdev->dev; + i2c->adapter.dev.parent = dev->dev; i2c->dev = dev; i2c_set_adapdata(&i2c->adapter, i2c); snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index 8902c2f84bf9..0607658dde51 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -71,6 +71,7 @@ static void ast_set_def_ext_reg(struct drm_device *dev) { struct ast_private *ast = to_ast_private(dev); + struct pci_dev *pdev = to_pci_dev(dev->dev); u8 i, index, reg; const u8 *ext_reg_info; @@ -80,7 +81,7 @@ ast_set_def_ext_reg(struct drm_device *dev) if (ast->chip == AST2300 || ast->chip == AST2400 || ast->chip == AST2500) { - if (dev->pdev->revision >= 0x20) + if (pdev->revision >= 0x20) ext_reg_info = extreginfo_ast2300; else ext_reg_info = extreginfo_ast2300a0; @@ -366,11 +367,12 @@ static void ast_init_dram_reg(struct drm_device *dev) void ast_post_gpu(struct drm_device *dev) { struct ast_private *ast = to_ast_private(dev); + struct pci_dev *pdev = to_pci_dev(dev->dev); u32 reg; - pci_read_config_dword(dev->pdev, 0x04, ®); + pci_read_config_dword(pdev, 0x04, ®); reg |= 0x3; - pci_write_config_dword(dev->pdev, 0x04, reg); + pci_write_config_dword(pdev, 0x04, reg); ast_enable_vga(dev); ast_open_key(ast); diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index c58fa00b4848..05ad75d155e8 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -473,7 +473,6 @@ static const struct drm_crtc_funcs atmel_hlcdc_crtc_funcs = { .atomic_destroy_state = atmel_hlcdc_crtc_destroy_state, .enable_vblank = atmel_hlcdc_crtc_enable_vblank, .disable_vblank = atmel_hlcdc_crtc_disable_vblank, - .gamma_set = drm_atomic_helper_legacy_gamma_set, }; int atmel_hlcdc_crtc_create(struct drm_device *dev) diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c index fd454225fd19..b469624fe40d 100644 --- a/drivers/gpu/drm/bochs/bochs_drv.c +++ b/drivers/gpu/drm/bochs/bochs_drv.c @@ -121,7 +121,6 @@ static int bochs_pci_probe(struct pci_dev *pdev, if (ret) goto err_free_dev; - dev->pdev = pdev; pci_set_drvdata(pdev, dev); ret = bochs_load(dev); diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c index dce4672e3fc8..2d7380a9890e 100644 --- a/drivers/gpu/drm/bochs/bochs_hw.c +++ b/drivers/gpu/drm/bochs/bochs_hw.c @@ -110,7 +110,7 @@ int bochs_hw_load_edid(struct bochs_device *bochs) int bochs_hw_init(struct drm_device *dev) { struct bochs_device *bochs = dev->dev_private; - struct pci_dev *pdev = dev->pdev; + struct pci_dev *pdev = to_pci_dev(dev->dev); unsigned long addr, size, mem, ioaddr, iosize; u16 id; @@ -201,7 +201,7 @@ void bochs_hw_fini(struct drm_device *dev) release_region(VBE_DISPI_IOPORT_INDEX, 2); if (bochs->fb_map) iounmap(bochs->fb_map); - pci_release_regions(dev->pdev); + pci_release_regions(to_pci_dev(dev->dev)); kfree(bochs->edid); } diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index a0d392c338da..76555ae64e9c 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -1292,8 +1292,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id) err_unregister_cec: i2c_unregister_device(adv7511->i2c_cec); - if (adv7511->cec_clk) - clk_disable_unprepare(adv7511->cec_clk); + clk_disable_unprepare(adv7511->cec_clk); err_i2c_unregister_packet: i2c_unregister_device(adv7511->i2c_packet); err_i2c_unregister_edid: @@ -1311,8 +1310,7 @@ static int adv7511_remove(struct i2c_client *i2c) if (adv7511->type == ADV7533 || adv7511->type == ADV7535) adv7533_detach_dsi(adv7511); i2c_unregister_device(adv7511->i2c_cec); - if (adv7511->cec_clk) - clk_disable_unprepare(adv7511->cec_clk); + clk_disable_unprepare(adv7511->cec_clk); adv7511_uninit_regulators(adv7511); diff --git a/drivers/gpu/drm/bridge/cadence/Kconfig b/drivers/gpu/drm/bridge/cadence/Kconfig index 511d67b16d14..ef8c230e0f62 100644 --- a/drivers/gpu/drm/bridge/cadence/Kconfig +++ b/drivers/gpu/drm/bridge/cadence/Kconfig @@ -13,7 +13,7 @@ config DRM_CDNS_MHDP8546 if DRM_CDNS_MHDP8546 config DRM_CDNS_MHDP8546_J721E - depends on ARCH_K3_J721E_SOC || COMPILE_TEST + depends on ARCH_K3 || COMPILE_TEST bool "J721E Cadence DPI/DP wrapper support" default y help diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c index 4d278573cdb9..05eb759da6fc 100644 --- a/drivers/gpu/drm/bridge/display-connector.c +++ b/drivers/gpu/drm/bridge/display-connector.c @@ -11,6 +11,7 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> +#include <linux/regulator/consumer.h> #include <drm/drm_bridge.h> #include <drm/drm_edid.h> @@ -20,6 +21,8 @@ struct display_connector { struct gpio_desc *hpd_gpio; int hpd_irq; + + struct regulator *dp_pwr; }; static inline struct display_connector * @@ -172,11 +175,12 @@ static int display_connector_probe(struct platform_device *pdev) of_property_read_string(pdev->dev.of_node, "label", &label); /* - * Get the HPD GPIO for DVI and HDMI connectors. If the GPIO can provide + * Get the HPD GPIO for DVI, HDMI and DP connectors. If the GPIO can provide * edge interrupts, register an interrupt handler. */ if (type == DRM_MODE_CONNECTOR_DVII || - type == DRM_MODE_CONNECTOR_HDMIA) { + type == DRM_MODE_CONNECTOR_HDMIA || + type == DRM_MODE_CONNECTOR_DisplayPort) { conn->hpd_gpio = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN); if (IS_ERR(conn->hpd_gpio)) { @@ -223,6 +227,38 @@ static int display_connector_probe(struct platform_device *pdev) } } + /* Get the DP PWR for DP connector. */ + if (type == DRM_MODE_CONNECTOR_DisplayPort) { + int ret; + + conn->dp_pwr = devm_regulator_get_optional(&pdev->dev, "dp-pwr"); + + if (IS_ERR(conn->dp_pwr)) { + ret = PTR_ERR(conn->dp_pwr); + + switch (ret) { + case -ENODEV: + conn->dp_pwr = NULL; + break; + + case -EPROBE_DEFER: + return -EPROBE_DEFER; + + default: + dev_err(&pdev->dev, "failed to get DP PWR regulator: %d\n", ret); + return ret; + } + } + + if (conn->dp_pwr) { + ret = regulator_enable(conn->dp_pwr); + if (ret) { + dev_err(&pdev->dev, "failed to enable DP PWR regulator: %d\n", ret); + return ret; + } + } + } + conn->bridge.funcs = &display_connector_bridge_funcs; conn->bridge.of_node = pdev->dev.of_node; @@ -251,6 +287,9 @@ static int display_connector_remove(struct platform_device *pdev) { struct display_connector *conn = platform_get_drvdata(pdev); + if (conn->dp_pwr) + regulator_disable(conn->dp_pwr); + drm_bridge_remove(&conn->bridge); if (!IS_ERR(conn->bridge.ddc)) @@ -275,6 +314,9 @@ static const struct of_device_id display_connector_match[] = { }, { .compatible = "vga-connector", .data = (void *)DRM_MODE_CONNECTOR_VGA, + }, { + .compatible = "dp-connector", + .data = (void *)DRM_MODE_CONNECTOR_DisplayPort, }, {}, }; diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 748df1cacd2b..dda4fa9a1a08 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -2327,12 +2327,6 @@ static enum drm_connector_status dw_hdmi_detect(struct dw_hdmi *hdmi) { enum drm_connector_status result; - mutex_lock(&hdmi->mutex); - hdmi->force = DRM_FORCE_UNSPECIFIED; - dw_hdmi_update_power(hdmi); - dw_hdmi_update_phy_mask(hdmi); - mutex_unlock(&hdmi->mutex); - result = hdmi->phy.ops->read_hpd(hdmi, hdmi->phy.data); mutex_lock(&hdmi->mutex); @@ -3446,8 +3440,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev, err_iahb: clk_disable_unprepare(hdmi->iahb_clk); - if (hdmi->cec_clk) - clk_disable_unprepare(hdmi->cec_clk); + clk_disable_unprepare(hdmi->cec_clk); err_isfr: clk_disable_unprepare(hdmi->isfr_clk); err_res: @@ -3471,8 +3464,7 @@ void dw_hdmi_remove(struct dw_hdmi *hdmi) clk_disable_unprepare(hdmi->iahb_clk); clk_disable_unprepare(hdmi->isfr_clk); - if (hdmi->cec_clk) - clk_disable_unprepare(hdmi->cec_clk); + clk_disable_unprepare(hdmi->cec_clk); if (hdmi->i2c) i2c_del_adapter(&hdmi->i2c->adap); diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c index 86b06975bfdd..e21078b2f8b5 100644 --- a/drivers/gpu/drm/bridge/thc63lvd1024.c +++ b/drivers/gpu/drm/bridge/thc63lvd1024.c @@ -202,7 +202,7 @@ static int thc63_probe(struct platform_device *pdev) thc63->dev = &pdev->dev; platform_set_drvdata(pdev, thc63); - thc63->vcc = devm_regulator_get_optional(thc63->dev, "vcc"); + thc63->vcc = devm_regulator_get(thc63->dev, "vcc"); if (IS_ERR(thc63->vcc)) { if (PTR_ERR(thc63->vcc) == -EPROBE_DEFER) return -EPROBE_DEFER; diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c index 4c7ad46fdd21..5311d03d49cc 100644 --- a/drivers/gpu/drm/drm_agpsupport.c +++ b/drivers/gpu/drm/drm_agpsupport.c @@ -45,13 +45,9 @@ #include "drm_legacy.h" -/** +/* * Get AGP information. * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg pointer to a (output) drm_agp_info structure. * \return zero on success or a negative number on failure. * * Verifies the AGP device has been initialized and acquired and fills in the @@ -92,7 +88,7 @@ int drm_agp_info_ioctl(struct drm_device *dev, void *data, return 0; } -/** +/* * Acquire the AGP device. * * \param dev DRM device that is to acquire AGP. @@ -103,11 +99,13 @@ int drm_agp_info_ioctl(struct drm_device *dev, void *data, */ int drm_agp_acquire(struct drm_device *dev) { + struct pci_dev *pdev = to_pci_dev(dev->dev); + if (!dev->agp) return -ENODEV; if (dev->agp->acquired) return -EBUSY; - dev->agp->bridge = agp_backend_acquire(dev->pdev); + dev->agp->bridge = agp_backend_acquire(pdev); if (!dev->agp->bridge) return -ENODEV; dev->agp->acquired = 1; @@ -115,13 +113,9 @@ int drm_agp_acquire(struct drm_device *dev) } EXPORT_SYMBOL(drm_agp_acquire); -/** +/* * Acquire the AGP device (ioctl). * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg user argument. * \return zero on success or a negative number on failure. * * Verifies the AGP device hasn't been acquired before and calls @@ -133,7 +127,7 @@ int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, return drm_agp_acquire((struct drm_device *) file_priv->minor->dev); } -/** +/* * Release the AGP device. * * \param dev DRM device that is to release AGP. @@ -157,7 +151,7 @@ int drm_agp_release_ioctl(struct drm_device *dev, void *data, return drm_agp_release(dev); } -/** +/* * Enable the AGP bus. * * \param dev DRM device that has previously acquired AGP. @@ -187,13 +181,9 @@ int drm_agp_enable_ioctl(struct drm_device *dev, void *data, return drm_agp_enable(dev, *mode); } -/** +/* * Allocate AGP memory. * - * \param inode device inode. - * \param file_priv file private pointer. - * \param cmd command. - * \param arg pointer to a drm_agp_buffer structure. * \return zero on success or a negative number on failure. * * Verifies the AGP device is present and has been acquired, allocates the @@ -242,7 +232,7 @@ int drm_agp_alloc_ioctl(struct drm_device *dev, void *data, return drm_agp_alloc(dev, request); } -/** +/* * Search for the AGP memory entry associated with a handle. * * \param dev DRM device structure. @@ -263,13 +253,9 @@ static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device *dev, return NULL; } -/** +/* * Unbind AGP memory from the GATT (ioctl). * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg pointer to a drm_agp_binding structure. * \return zero on success or a negative number on failure. * * Verifies the AGP device is present and acquired, looks-up the AGP memory @@ -285,7 +271,7 @@ int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request) entry = drm_agp_lookup_entry(dev, request->handle); if (!entry || !entry->bound) return -EINVAL; - ret = drm_unbind_agp(entry->memory); + ret = agp_unbind_memory(entry->memory); if (ret == 0) entry->bound = 0; return ret; @@ -301,13 +287,9 @@ int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, return drm_agp_unbind(dev, request); } -/** +/* * Bind AGP memory into the GATT (ioctl) * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg pointer to a drm_agp_binding structure. * \return zero on success or a negative number on failure. * * Verifies the AGP device is present and has been acquired and that no memory @@ -326,7 +308,7 @@ int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request) if (!entry || entry->bound) return -EINVAL; page = DIV_ROUND_UP(request->offset, PAGE_SIZE); - retcode = drm_bind_agp(entry->memory, page); + retcode = agp_bind_memory(entry->memory, page); if (retcode) return retcode; entry->bound = dev->agp->base + (page << PAGE_SHIFT); @@ -345,13 +327,9 @@ int drm_agp_bind_ioctl(struct drm_device *dev, void *data, return drm_agp_bind(dev, request); } -/** +/* * Free AGP memory (ioctl). * - * \param inode device inode. - * \param file_priv DRM file private. - * \param cmd command. - * \param arg pointer to a drm_agp_buffer structure. * \return zero on success or a negative number on failure. * * Verifies the AGP device is present and has been acquired and looks up the @@ -369,11 +347,11 @@ int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request) if (!entry) return -EINVAL; if (entry->bound) - drm_unbind_agp(entry->memory); + agp_unbind_memory(entry->memory); list_del(&entry->head); - drm_free_agp(entry->memory, entry->pages); + agp_free_memory(entry->memory); kfree(entry); return 0; } @@ -388,7 +366,7 @@ int drm_agp_free_ioctl(struct drm_device *dev, void *data, return drm_agp_free(dev, request); } -/** +/* * Initialize the AGP resources. * * \return pointer to a drm_agp_head structure. @@ -402,14 +380,15 @@ int drm_agp_free_ioctl(struct drm_device *dev, void *data, */ struct drm_agp_head *drm_agp_init(struct drm_device *dev) { + struct pci_dev *pdev = to_pci_dev(dev->dev); struct drm_agp_head *head = NULL; head = kzalloc(sizeof(*head), GFP_KERNEL); if (!head) return NULL; - head->bridge = agp_find_bridge(dev->pdev); + head->bridge = agp_find_bridge(pdev); if (!head->bridge) { - head->bridge = agp_backend_acquire(dev->pdev); + head->bridge = agp_backend_acquire(pdev); if (!head->bridge) { kfree(head); return NULL; @@ -453,8 +432,8 @@ void drm_legacy_agp_clear(struct drm_device *dev) list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) { if (entry->bound) - drm_unbind_agp(entry->memory); - drm_free_agp(entry->memory, entry->pages); + agp_unbind_memory(entry->memory); + agp_free_memory(entry->memory); kfree(entry); } INIT_LIST_HEAD(&dev->agp->memory); diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index b2d20eb6c807..dda60051854b 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -964,7 +964,8 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state, struct __drm_connnectors_state *c; int alloc = max(index + 1, config->num_connector); - c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL); + c = krealloc_array(state->connectors, alloc, + sizeof(*state->connectors), GFP_KERNEL); if (!c) return ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index ddd0e3239150..a84dc427cf82 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -122,7 +122,8 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state, continue; if (funcs->atomic_best_encoder) - new_encoder = funcs->atomic_best_encoder(connector, new_conn_state); + new_encoder = funcs->atomic_best_encoder(connector, + state); else if (funcs->best_encoder) new_encoder = funcs->best_encoder(connector); else @@ -345,8 +346,7 @@ update_connector_routing(struct drm_atomic_state *state, funcs = connector->helper_private; if (funcs->atomic_best_encoder) - new_encoder = funcs->atomic_best_encoder(connector, - new_connector_state); + new_encoder = funcs->atomic_best_encoder(connector, state); else if (funcs->best_encoder) new_encoder = funcs->best_encoder(connector); else @@ -1313,7 +1313,7 @@ static void drm_atomic_helper_commit_writebacks(struct drm_device *dev, if (new_conn_state->writeback_job && new_conn_state->writeback_job->fb) { WARN_ON(connector->connector_type != DRM_MODE_CONNECTOR_WRITEBACK); - funcs->atomic_commit(connector, new_conn_state); + funcs->atomic_commit(connector, old_state); } } } @@ -2040,6 +2040,9 @@ crtc_or_fake_commit(struct drm_atomic_state *state, struct drm_crtc *crtc) * should always call this function from their * &drm_mode_config_funcs.atomic_commit hook. * + * Drivers that need to extend the commit setup to private objects can use the + * &drm_mode_config_helper_funcs.atomic_commit_setup hook. + * * To be able to use this support drivers need to use a few more helper * functions. drm_atomic_helper_wait_for_dependencies() must be called before * actually committing the hardware state, and for nonblocking commits this call @@ -2083,8 +2086,11 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state, struct drm_plane *plane; struct drm_plane_state *old_plane_state, *new_plane_state; struct drm_crtc_commit *commit; + const struct drm_mode_config_helper_funcs *funcs; int i, ret; + funcs = state->dev->mode_config.helper_private; + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { commit = kzalloc(sizeof(*commit), GFP_KERNEL); if (!commit) @@ -2169,6 +2175,9 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state, new_plane_state->commit = drm_crtc_commit_get(commit); } + if (funcs && funcs->atomic_commit_setup) + return funcs->atomic_commit_setup(state); + return 0; } EXPORT_SYMBOL(drm_atomic_helper_setup_commit); @@ -3500,76 +3509,6 @@ fail: EXPORT_SYMBOL(drm_atomic_helper_page_flip_target); /** - * drm_atomic_helper_legacy_gamma_set - set the legacy gamma correction table - * @crtc: CRTC object - * @red: red correction table - * @green: green correction table - * @blue: green correction table - * @size: size of the tables - * @ctx: lock acquire context - * - * Implements support for legacy gamma correction table for drivers - * that support color management through the DEGAMMA_LUT/GAMMA_LUT - * properties. See drm_crtc_enable_color_mgmt() and the containing chapter for - * how the atomic color management and gamma tables work. - */ -int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, - u16 *red, u16 *green, u16 *blue, - uint32_t size, - struct drm_modeset_acquire_ctx *ctx) -{ - struct drm_device *dev = crtc->dev; - struct drm_atomic_state *state; - struct drm_crtc_state *crtc_state; - struct drm_property_blob *blob = NULL; - struct drm_color_lut *blob_data; - int i, ret = 0; - bool replaced; - - state = drm_atomic_state_alloc(crtc->dev); - if (!state) - return -ENOMEM; - - blob = drm_property_create_blob(dev, - sizeof(struct drm_color_lut) * size, - NULL); - if (IS_ERR(blob)) { - ret = PTR_ERR(blob); - blob = NULL; - goto fail; - } - - /* Prepare GAMMA_LUT with the legacy values. */ - blob_data = blob->data; - for (i = 0; i < size; i++) { - blob_data[i].red = red[i]; - blob_data[i].green = green[i]; - blob_data[i].blue = blue[i]; - } - - state->acquire_ctx = ctx; - crtc_state = drm_atomic_get_crtc_state(state, crtc); - if (IS_ERR(crtc_state)) { - ret = PTR_ERR(crtc_state); - goto fail; - } - - /* Reset DEGAMMA_LUT and CTM properties. */ - replaced = drm_property_replace_blob(&crtc_state->degamma_lut, NULL); - replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL); - replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, blob); - crtc_state->color_mgmt_changed |= replaced; - - ret = drm_atomic_commit(state); - -fail: - drm_atomic_state_put(state); - drm_property_blob_put(blob); - return ret; -} -EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set); - -/** * drm_atomic_helper_bridge_propagate_bus_fmt() - Propagate output format to * the input end of a bridge * @bridge: bridge control structure diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c index ae2234aae93d..26e2f2ffd255 100644 --- a/drivers/gpu/drm/drm_blend.c +++ b/drivers/gpu/drm/drm_blend.c @@ -185,21 +185,15 @@ * plane does not expose the "alpha" property, then this is * assumed to be 1.0 * - * IN_FORMATS: - * Blob property which contains the set of buffer format and modifier - * pairs supported by this plane. The blob is a drm_format_modifier_blob - * struct. Without this property the plane doesn't support buffers with - * modifiers. Userspace cannot change this property. - * * Note that all the property extensions described here apply either to the * plane or the CRTC (e.g. for the background color, which currently is not * exposed and assumed to be black). * * SCALING_FILTER: - * * Indicates scaling filter to be used for plane scaler * * The value of this property can be one of the following: + * * Default: * Driver's default scaling filter * Nearest Neighbor: diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index 7a01d0918861..e3d77dfefb0a 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -77,6 +77,7 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev, if ((entry->map->offset & 0xffffffff) == (map->offset & 0xffffffff)) return entry; + break; default: /* Make gcc happy */ ; } @@ -325,7 +326,7 @@ static int drm_addmap_core(struct drm_device *dev, resource_size_t offset, * As we're limiting the address to 2^32-1 (or less), * casting it down to 32 bits is no problem, but we * need to point to a 64bit variable first. */ - map->handle = dma_alloc_coherent(&dev->pdev->dev, + map->handle = dma_alloc_coherent(dev->dev, map->size, &map->offset, GFP_KERNEL); @@ -555,7 +556,7 @@ int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map) case _DRM_SCATTER_GATHER: break; case _DRM_CONSISTENT: - dma_free_coherent(&dev->pdev->dev, + dma_free_coherent(dev->dev, map->size, map->handle, map->offset); diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c index 0fe3c496002a..79a50ef1250f 100644 --- a/drivers/gpu/drm/drm_cache.c +++ b/drivers/gpu/drm/drm_cache.c @@ -30,6 +30,8 @@ #include <linux/export.h> #include <linux/highmem.h> +#include <linux/mem_encrypt.h> +#include <xen/xen.h> #include <drm/drm_cache.h> @@ -176,3 +178,34 @@ drm_clflush_virt_range(void *addr, unsigned long length) #endif } EXPORT_SYMBOL(drm_clflush_virt_range); + +bool drm_need_swiotlb(int dma_bits) +{ + struct resource *tmp; + resource_size_t max_iomem = 0; + + /* + * Xen paravirtual hosts require swiotlb regardless of requested dma + * transfer size. + * + * NOTE: Really, what it requires is use of the dma_alloc_coherent + * allocator used in ttm_dma_populate() instead of + * ttm_populate_and_map_pages(), which bounce buffers so much in + * Xen it leads to swiotlb buffer exhaustion. + */ + if (xen_pv_domain()) + return true; + + /* + * Enforce dma_alloc_coherent when memory encryption is active as well + * for the same reasons as for Xen paravirtual hosts. + */ + if (mem_encrypt_active()) + return true; + + for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) + max_iomem = max(max_iomem, tmp->end); + + return max_iomem > ((u64)1 << dma_bits); +} +EXPORT_SYMBOL(drm_need_swiotlb); diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c index fe573acf1067..ce45e380f4a2 100644 --- a/drivers/gpu/drm/drm_client.c +++ b/drivers/gpu/drm/drm_client.c @@ -314,9 +314,6 @@ drm_client_buffer_vmap(struct drm_client_buffer *buffer, struct dma_buf_map *map struct dma_buf_map *map = &buffer->map; int ret; - if (dma_buf_map_is_set(map)) - goto out; - /* * FIXME: The dependency on GEM here isn't required, we could * convert the driver handle to a dma-buf instead and use the @@ -329,7 +326,6 @@ drm_client_buffer_vmap(struct drm_client_buffer *buffer, struct dma_buf_map *map if (ret) return ret; -out: *map_copy = *map; return 0; diff --git a/drivers/gpu/drm/drm_color_mgmt.c b/drivers/gpu/drm/drm_color_mgmt.c index 3bcabc2f6e0e..bb14f488c8f6 100644 --- a/drivers/gpu/drm/drm_color_mgmt.c +++ b/drivers/gpu/drm/drm_color_mgmt.c @@ -22,6 +22,7 @@ #include <linux/uaccess.h> +#include <drm/drm_atomic.h> #include <drm/drm_color_mgmt.h> #include <drm/drm_crtc.h> #include <drm/drm_device.h> @@ -89,9 +90,8 @@ * modes) appropriately. * * There is also support for a legacy gamma table, which is set up by calling - * drm_mode_crtc_set_gamma_size(). Drivers which support both should use - * drm_atomic_helper_legacy_gamma_set() to alias the legacy gamma ramp with the - * "GAMMA_LUT" property above. + * drm_mode_crtc_set_gamma_size(). The DRM core will then alias the legacy gamma + * ramp with "GAMMA_LUT" or, if that is unavailable, "DEGAMMA_LUT". * * Support for different non RGB color encodings is controlled through * &drm_plane specific COLOR_ENCODING and COLOR_RANGE properties. They @@ -156,9 +156,6 @@ EXPORT_SYMBOL(drm_color_ctm_s31_32_to_qm_n); * optional. The gamma and degamma properties are only attached if * their size is not 0 and ctm_property is only attached if has_ctm is * true. - * - * Drivers should use drm_atomic_helper_legacy_gamma_set() to implement the - * legacy &drm_crtc_funcs.gamma_set callback. */ void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc, uint degamma_lut_size, @@ -232,6 +229,116 @@ int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size); /** + * drm_crtc_supports_legacy_gamma - does the crtc support legacy gamma correction table + * @crtc: CRTC object + * + * Returns true/false if the given crtc supports setting the legacy gamma + * correction table. + */ +static bool drm_crtc_supports_legacy_gamma(struct drm_crtc *crtc) +{ + u32 gamma_id = crtc->dev->mode_config.gamma_lut_property->base.id; + u32 degamma_id = crtc->dev->mode_config.degamma_lut_property->base.id; + + if (!crtc->gamma_size) + return false; + + if (crtc->funcs->gamma_set) + return true; + + return !!(drm_mode_obj_find_prop_id(&crtc->base, gamma_id) || + drm_mode_obj_find_prop_id(&crtc->base, degamma_id)); +} + +/** + * drm_crtc_legacy_gamma_set - set the legacy gamma correction table + * @crtc: CRTC object + * @red: red correction table + * @green: green correction table + * @blue: green correction table + * @size: size of the tables + * @ctx: lock acquire context + * + * Implements support for legacy gamma correction table for drivers + * that have set drm_crtc_funcs.gamma_set or that support color management + * through the DEGAMMA_LUT/GAMMA_LUT properties. See + * drm_crtc_enable_color_mgmt() and the containing chapter for + * how the atomic color management and gamma tables work. + * + * This function sets the gamma using drm_crtc_funcs.gamma_set if set, or + * alternatively using crtc color management properties. + */ +static int drm_crtc_legacy_gamma_set(struct drm_crtc *crtc, + u16 *red, u16 *green, u16 *blue, + u32 size, + struct drm_modeset_acquire_ctx *ctx) +{ + struct drm_device *dev = crtc->dev; + struct drm_atomic_state *state; + struct drm_crtc_state *crtc_state; + struct drm_property_blob *blob; + struct drm_color_lut *blob_data; + u32 gamma_id = dev->mode_config.gamma_lut_property->base.id; + u32 degamma_id = dev->mode_config.degamma_lut_property->base.id; + bool use_gamma_lut; + int i, ret = 0; + bool replaced; + + if (crtc->funcs->gamma_set) + return crtc->funcs->gamma_set(crtc, red, green, blue, size, ctx); + + if (drm_mode_obj_find_prop_id(&crtc->base, gamma_id)) + use_gamma_lut = true; + else if (drm_mode_obj_find_prop_id(&crtc->base, degamma_id)) + use_gamma_lut = false; + else + return -ENODEV; + + state = drm_atomic_state_alloc(crtc->dev); + if (!state) + return -ENOMEM; + + blob = drm_property_create_blob(dev, + sizeof(struct drm_color_lut) * size, + NULL); + if (IS_ERR(blob)) { + ret = PTR_ERR(blob); + blob = NULL; + goto fail; + } + + /* Prepare GAMMA_LUT with the legacy values. */ + blob_data = blob->data; + for (i = 0; i < size; i++) { + blob_data[i].red = red[i]; + blob_data[i].green = green[i]; + blob_data[i].blue = blue[i]; + } + + state->acquire_ctx = ctx; + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) { + ret = PTR_ERR(crtc_state); + goto fail; + } + + /* Set GAMMA_LUT and reset DEGAMMA_LUT and CTM */ + replaced = drm_property_replace_blob(&crtc_state->degamma_lut, + use_gamma_lut ? NULL : blob); + replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL); + replaced |= drm_property_replace_blob(&crtc_state->gamma_lut, + use_gamma_lut ? blob : NULL); + crtc_state->color_mgmt_changed |= replaced; + + ret = drm_atomic_commit(state); + +fail: + drm_atomic_state_put(state); + drm_property_blob_put(blob); + return ret; +} + +/** * drm_mode_gamma_set_ioctl - set the gamma table * @dev: DRM device * @data: ioctl data @@ -262,7 +369,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev, if (!crtc) return -ENOENT; - if (crtc->funcs->gamma_set == NULL) + if (!drm_crtc_supports_legacy_gamma(crtc)) return -ENOSYS; /* memcpy into gamma store */ @@ -290,8 +397,8 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev, goto out; } - ret = crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, - crtc->gamma_size, &ctx); + ret = drm_crtc_legacy_gamma_set(crtc, r_base, g_base, b_base, + crtc->gamma_size, &ctx); out: DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index f927976eca50..9c4f9947b194 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -38,6 +38,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/drm_fourcc.h> +#include <drm/drm_managed.h> #include <drm/drm_modeset_lock.h> #include <drm/drm_atomic.h> #include <drm/drm_auth.h> @@ -67,7 +68,7 @@ * &drm_crtc_funcs.page_flip and &drm_crtc_funcs.cursor_set2, and other legacy * operations like &drm_crtc_funcs.gamma_set. For atomic drivers all these * features are controlled through &drm_property and - * &drm_mode_config_funcs.atomic_check and &drm_mode_config_funcs.atomic_check. + * &drm_mode_config_funcs.atomic_check. */ /** @@ -230,40 +231,22 @@ struct dma_fence *drm_crtc_create_fence(struct drm_crtc *crtc) * * Setting MODE_ID to 0 will release reserved resources for the CRTC. * SCALING_FILTER: - * Atomic property for setting the scaling filter for CRTC scaler + * Atomic property for setting the scaling filter for CRTC scaler * - * The value of this property can be one of the following: - * Default: - * Driver's default scaling filter - * Nearest Neighbor: - * Nearest Neighbor scaling filter + * The value of this property can be one of the following: * + * Default: + * Driver's default scaling filter + * Nearest Neighbor: + * Nearest Neighbor scaling filter */ -/** - * drm_crtc_init_with_planes - Initialise a new CRTC object with - * specified primary and cursor planes. - * @dev: DRM device - * @crtc: CRTC object to init - * @primary: Primary plane for CRTC - * @cursor: Cursor plane for CRTC - * @funcs: callbacks for the new CRTC - * @name: printf style format string for the CRTC name, or NULL for default name - * - * Inits a new object created as base part of a driver crtc object. Drivers - * should use this function instead of drm_crtc_init(), which is only provided - * for backwards compatibility with drivers which do not yet support universal - * planes). For really simple hardware which has only 1 plane look at - * drm_simple_display_pipe_init() instead. - * - * Returns: - * Zero on success, error code on failure. - */ -int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, - struct drm_plane *primary, - struct drm_plane *cursor, - const struct drm_crtc_funcs *funcs, - const char *name, ...) +__printf(6, 0) +static int __drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, + struct drm_plane *primary, + struct drm_plane *cursor, + const struct drm_crtc_funcs *funcs, + const char *name, va_list ap) { struct drm_mode_config *config = &dev->mode_config; int ret; @@ -291,11 +274,7 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, return ret; if (name) { - va_list ap; - - va_start(ap, name); crtc->name = kvasprintf(GFP_KERNEL, name, ap); - va_end(ap); } else { crtc->name = kasprintf(GFP_KERNEL, "crtc-%d", drm_num_crtcs(dev)); @@ -339,8 +318,101 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, return 0; } + +/** + * drm_crtc_init_with_planes - Initialise a new CRTC object with + * specified primary and cursor planes. + * @dev: DRM device + * @crtc: CRTC object to init + * @primary: Primary plane for CRTC + * @cursor: Cursor plane for CRTC + * @funcs: callbacks for the new CRTC + * @name: printf style format string for the CRTC name, or NULL for default name + * + * Inits a new object created as base part of a driver crtc object. Drivers + * should use this function instead of drm_crtc_init(), which is only provided + * for backwards compatibility with drivers which do not yet support universal + * planes). For really simple hardware which has only 1 plane look at + * drm_simple_display_pipe_init() instead. + * The &drm_crtc_funcs.destroy hook should call drm_crtc_cleanup() and kfree() + * the crtc structure. The crtc structure should not be allocated with + * devm_kzalloc(). + * + * The @primary and @cursor planes are only relevant for legacy uAPI, see + * &drm_crtc.primary and &drm_crtc.cursor. + * + * Note: consider using drmm_crtc_alloc_with_planes() instead of + * drm_crtc_init_with_planes() to let the DRM managed resource infrastructure + * take care of cleanup and deallocation. + * + * Returns: + * Zero on success, error code on failure. + */ +int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc, + struct drm_plane *primary, + struct drm_plane *cursor, + const struct drm_crtc_funcs *funcs, + const char *name, ...) +{ + va_list ap; + int ret; + + WARN_ON(!funcs->destroy); + + va_start(ap, name); + ret = __drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs, + name, ap); + va_end(ap); + + return ret; +} EXPORT_SYMBOL(drm_crtc_init_with_planes); +static void drmm_crtc_alloc_with_planes_cleanup(struct drm_device *dev, + void *ptr) +{ + struct drm_crtc *crtc = ptr; + + drm_crtc_cleanup(crtc); +} + +void *__drmm_crtc_alloc_with_planes(struct drm_device *dev, + size_t size, size_t offset, + struct drm_plane *primary, + struct drm_plane *cursor, + const struct drm_crtc_funcs *funcs, + const char *name, ...) +{ + void *container; + struct drm_crtc *crtc; + va_list ap; + int ret; + + if (WARN_ON(!funcs || funcs->destroy)) + return ERR_PTR(-EINVAL); + + container = drmm_kzalloc(dev, size, GFP_KERNEL); + if (!container) + return ERR_PTR(-ENOMEM); + + crtc = container + offset; + + va_start(ap, name); + ret = __drm_crtc_init_with_planes(dev, crtc, primary, cursor, funcs, + name, ap); + va_end(ap); + if (ret) + return ERR_PTR(ret); + + ret = drmm_add_action_or_reset(dev, drmm_crtc_alloc_with_planes_cleanup, + crtc); + if (ret) + return ERR_PTR(ret); + + return container; +} +EXPORT_SYMBOL(__drmm_crtc_alloc_with_planes); + /** * drm_crtc_cleanup - Clean up the core crtc usage * @crtc: CRTC to cleanup diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index 5bd0934004e3..3c5b04abb66f 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -950,6 +950,38 @@ bool drm_dp_downstream_444_to_420_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE] EXPORT_SYMBOL(drm_dp_downstream_444_to_420_conversion); /** + * drm_dp_downstream_rgb_to_ycbcr_conversion() - determine downstream facing port + * RGB->YCbCr conversion capability + * @dpcd: DisplayPort configuration data + * @port_cap: downstream facing port capabilities + * @color_spc: Colorspace for which conversion cap is sought + * + * Returns: whether the downstream facing port can convert RGB->YCbCr for a given + * colorspace. + */ +bool drm_dp_downstream_rgb_to_ycbcr_conversion(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4], + u8 color_spc) +{ + if (!drm_dp_is_branch(dpcd)) + return false; + + if (dpcd[DP_DPCD_REV] < 0x13) + return false; + + switch (port_cap[0] & DP_DS_PORT_TYPE_MASK) { + case DP_DS_PORT_TYPE_HDMI: + if ((dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) == 0) + return false; + + return port_cap[3] & color_spc; + default: + return false; + } +} +EXPORT_SYMBOL(drm_dp_downstream_rgb_to_ycbcr_conversion); + +/** * drm_dp_downstream_mode() - return a mode for downstream facing port * @dev: DRM device * @dpcd: DisplayPort configuration data @@ -2596,3 +2628,538 @@ void drm_dp_vsc_sdp_log(const char *level, struct device *dev, #undef DP_SDP_LOG } EXPORT_SYMBOL(drm_dp_vsc_sdp_log); + +/** + * drm_dp_get_pcon_max_frl_bw() - maximum frl supported by PCON + * @dpcd: DisplayPort configuration data + * @port_cap: port capabilities + * + * Returns maximum frl bandwidth supported by PCON in GBPS, + * returns 0 if not supported. + */ +int drm_dp_get_pcon_max_frl_bw(const u8 dpcd[DP_RECEIVER_CAP_SIZE], + const u8 port_cap[4]) +{ + int bw; + u8 buf; + + buf = port_cap[2]; + bw = buf & DP_PCON_MAX_FRL_BW; + + switch (bw) { + case DP_PCON_MAX_9GBPS: + return 9; + case DP_PCON_MAX_18GBPS: + return 18; + case DP_PCON_MAX_24GBPS: + return 24; + case DP_PCON_MAX_32GBPS: + return 32; + case DP_PCON_MAX_40GBPS: + return 40; + case DP_PCON_MAX_48GBPS: + return 48; + case DP_PCON_MAX_0GBPS: + default: + return 0; + } + + return 0; +} +EXPORT_SYMBOL(drm_dp_get_pcon_max_frl_bw); + +/** + * drm_dp_pcon_frl_prepare() - Prepare PCON for FRL. + * @aux: DisplayPort AUX channel + * @enable_frl_ready_hpd: Configure DP_PCON_ENABLE_HPD_READY. + * + * Returns 0 if success, else returns negative error code. + */ +int drm_dp_pcon_frl_prepare(struct drm_dp_aux *aux, bool enable_frl_ready_hpd) +{ + int ret; + u8 buf = DP_PCON_ENABLE_SOURCE_CTL_MODE | + DP_PCON_ENABLE_LINK_FRL_MODE; + + if (enable_frl_ready_hpd) + buf |= DP_PCON_ENABLE_HPD_READY; + + ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); + + return ret; +} +EXPORT_SYMBOL(drm_dp_pcon_frl_prepare); + +/** + * drm_dp_pcon_is_frl_ready() - Is PCON ready for FRL + * @aux: DisplayPort AUX channel + * + * Returns true if success, else returns false. + */ +bool drm_dp_pcon_is_frl_ready(struct drm_dp_aux *aux) +{ + int ret; + u8 buf; + + ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf); + if (ret < 0) + return false; + + if (buf & DP_PCON_FRL_READY) + return true; + + return false; +} +EXPORT_SYMBOL(drm_dp_pcon_is_frl_ready); + +/** + * drm_dp_pcon_frl_configure_1() - Set HDMI LINK Configuration-Step1 + * @aux: DisplayPort AUX channel + * @max_frl_gbps: maximum frl bw to be configured between PCON and HDMI sink + * @concurrent_mode: true if concurrent mode or operation is required, + * false otherwise. + * + * Returns 0 if success, else returns negative error code. + */ + +int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps, + bool concurrent_mode) +{ + int ret; + u8 buf; + + ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf); + if (ret < 0) + return ret; + + if (concurrent_mode) + buf |= DP_PCON_ENABLE_CONCURRENT_LINK; + else + buf &= ~DP_PCON_ENABLE_CONCURRENT_LINK; + + switch (max_frl_gbps) { + case 9: + buf |= DP_PCON_ENABLE_MAX_BW_9GBPS; + break; + case 18: + buf |= DP_PCON_ENABLE_MAX_BW_18GBPS; + break; + case 24: + buf |= DP_PCON_ENABLE_MAX_BW_24GBPS; + break; + case 32: + buf |= DP_PCON_ENABLE_MAX_BW_32GBPS; + break; + case 40: + buf |= DP_PCON_ENABLE_MAX_BW_40GBPS; + break; + case 48: + buf |= DP_PCON_ENABLE_MAX_BW_48GBPS; + break; + case 0: + buf |= DP_PCON_ENABLE_MAX_BW_0GBPS; + break; + default: + return -EINVAL; + } + + ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); + if (ret < 0) + return ret; + + return 0; +} +EXPORT_SYMBOL(drm_dp_pcon_frl_configure_1); + +/** + * drm_dp_pcon_frl_configure_2() - Set HDMI Link configuration Step-2 + * @aux: DisplayPort AUX channel + * @max_frl_mask : Max FRL BW to be tried by the PCON with HDMI Sink + * @extended_train_mode : true for Extended Mode, false for Normal Mode. + * In Normal mode, the PCON tries each frl bw from the max_frl_mask starting + * from min, and stops when link training is successful. In Extended mode, all + * frl bw selected in the mask are trained by the PCON. + * + * Returns 0 if success, else returns negative error code. + */ +int drm_dp_pcon_frl_configure_2(struct drm_dp_aux *aux, int max_frl_mask, + bool extended_train_mode) +{ + int ret; + u8 buf = max_frl_mask; + + if (extended_train_mode) + buf |= DP_PCON_FRL_LINK_TRAIN_EXTENDED; + + ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_2, buf); + if (ret < 0) + return ret; + + return 0; +} +EXPORT_SYMBOL(drm_dp_pcon_frl_configure_2); + +/** + * drm_dp_pcon_reset_frl_config() - Re-Set HDMI Link configuration. + * @aux: DisplayPort AUX channel + * + * Returns 0 if success, else returns negative error code. + */ +int drm_dp_pcon_reset_frl_config(struct drm_dp_aux *aux) +{ + int ret; + + ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, 0x0); + if (ret < 0) + return ret; + + return 0; +} +EXPORT_SYMBOL(drm_dp_pcon_reset_frl_config); + +/** + * drm_dp_pcon_frl_enable() - Enable HDMI link through FRL + * @aux: DisplayPort AUX channel + * + * Returns 0 if success, else returns negative error code. + */ +int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux) +{ + int ret; + u8 buf = 0; + + ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf); + if (ret < 0) + return ret; + if (!(buf & DP_PCON_ENABLE_SOURCE_CTL_MODE)) { + DRM_DEBUG_KMS("PCON in Autonomous mode, can't enable FRL\n"); + return -EINVAL; + } + buf |= DP_PCON_ENABLE_HDMI_LINK; + ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); + if (ret < 0) + return ret; + + return 0; +} +EXPORT_SYMBOL(drm_dp_pcon_frl_enable); + +/** + * drm_dp_pcon_hdmi_link_active() - check if the PCON HDMI LINK status is active. + * @aux: DisplayPort AUX channel + * + * Returns true if link is active else returns false. + */ +bool drm_dp_pcon_hdmi_link_active(struct drm_dp_aux *aux) +{ + u8 buf; + int ret; + + ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf); + if (ret < 0) + return false; + + return buf & DP_PCON_HDMI_TX_LINK_ACTIVE; +} +EXPORT_SYMBOL(drm_dp_pcon_hdmi_link_active); + +/** + * drm_dp_pcon_hdmi_link_mode() - get the PCON HDMI LINK MODE + * @aux: DisplayPort AUX channel + * @frl_trained_mask: pointer to store bitmask of the trained bw configuration. + * Valid only if the MODE returned is FRL. For Normal Link training mode + * only 1 of the bits will be set, but in case of Extended mode, more than + * one bits can be set. + * + * Returns the link mode : TMDS or FRL on success, else returns negative error + * code. + */ +int drm_dp_pcon_hdmi_link_mode(struct drm_dp_aux *aux, u8 *frl_trained_mask) +{ + u8 buf; + int mode; + int ret; + + ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_POST_FRL_STATUS, &buf); + if (ret < 0) + return ret; + + mode = buf & DP_PCON_HDMI_LINK_MODE; + + if (frl_trained_mask && DP_PCON_HDMI_MODE_FRL == mode) + *frl_trained_mask = (buf & DP_PCON_HDMI_FRL_TRAINED_BW) >> 1; + + return mode; +} +EXPORT_SYMBOL(drm_dp_pcon_hdmi_link_mode); + +/** + * drm_dp_pcon_hdmi_frl_link_error_count() - print the error count per lane + * during link failure between PCON and HDMI sink + * @aux: DisplayPort AUX channel + * @connector: DRM connector + * code. + **/ + +void drm_dp_pcon_hdmi_frl_link_error_count(struct drm_dp_aux *aux, + struct drm_connector *connector) +{ + u8 buf, error_count; + int i, num_error; + struct drm_hdmi_info *hdmi = &connector->display_info.hdmi; + + for (i = 0; i < hdmi->max_lanes; i++) { + if (drm_dp_dpcd_readb(aux, DP_PCON_HDMI_ERROR_STATUS_LN0 + i, &buf) < 0) + return; + + error_count = buf & DP_PCON_HDMI_ERROR_COUNT_MASK; + switch (error_count) { + case DP_PCON_HDMI_ERROR_COUNT_HUNDRED_PLUS: + num_error = 100; + break; + case DP_PCON_HDMI_ERROR_COUNT_TEN_PLUS: + num_error = 10; + break; + case DP_PCON_HDMI_ERROR_COUNT_THREE_PLUS: + num_error = 3; + break; + default: + num_error = 0; + } + + DRM_ERROR("More than %d errors since the last read for lane %d", num_error, i); + } +} +EXPORT_SYMBOL(drm_dp_pcon_hdmi_frl_link_error_count); + +/* + * drm_dp_pcon_enc_is_dsc_1_2 - Does PCON Encoder supports DSC 1.2 + * @pcon_dsc_dpcd: DSC capabilities of the PCON DSC Encoder + * + * Returns true is PCON encoder is DSC 1.2 else returns false. + */ +bool drm_dp_pcon_enc_is_dsc_1_2(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]) +{ + u8 buf; + u8 major_v, minor_v; + + buf = pcon_dsc_dpcd[DP_PCON_DSC_VERSION - DP_PCON_DSC_ENCODER]; + major_v = (buf & DP_PCON_DSC_MAJOR_MASK) >> DP_PCON_DSC_MAJOR_SHIFT; + minor_v = (buf & DP_PCON_DSC_MINOR_MASK) >> DP_PCON_DSC_MINOR_SHIFT; + + if (major_v == 1 && minor_v == 2) + return true; + + return false; +} +EXPORT_SYMBOL(drm_dp_pcon_enc_is_dsc_1_2); + +/* + * drm_dp_pcon_dsc_max_slices - Get max slices supported by PCON DSC Encoder + * @pcon_dsc_dpcd: DSC capabilities of the PCON DSC Encoder + * + * Returns maximum no. of slices supported by the PCON DSC Encoder. + */ +int drm_dp_pcon_dsc_max_slices(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]) +{ + u8 slice_cap1, slice_cap2; + + slice_cap1 = pcon_dsc_dpcd[DP_PCON_DSC_SLICE_CAP_1 - DP_PCON_DSC_ENCODER]; + slice_cap2 = pcon_dsc_dpcd[DP_PCON_DSC_SLICE_CAP_2 - DP_PCON_DSC_ENCODER]; + + if (slice_cap2 & DP_PCON_DSC_24_PER_DSC_ENC) + return 24; + if (slice_cap2 & DP_PCON_DSC_20_PER_DSC_ENC) + return 20; + if (slice_cap2 & DP_PCON_DSC_16_PER_DSC_ENC) + return 16; + if (slice_cap1 & DP_PCON_DSC_12_PER_DSC_ENC) + return 12; + if (slice_cap1 & DP_PCON_DSC_10_PER_DSC_ENC) + return 10; + if (slice_cap1 & DP_PCON_DSC_8_PER_DSC_ENC) + return 8; + if (slice_cap1 & DP_PCON_DSC_6_PER_DSC_ENC) + return 6; + if (slice_cap1 & DP_PCON_DSC_4_PER_DSC_ENC) + return 4; + if (slice_cap1 & DP_PCON_DSC_2_PER_DSC_ENC) + return 2; + if (slice_cap1 & DP_PCON_DSC_1_PER_DSC_ENC) + return 1; + + return 0; +} +EXPORT_SYMBOL(drm_dp_pcon_dsc_max_slices); + +/* + * drm_dp_pcon_dsc_max_slice_width() - Get max slice width for Pcon DSC encoder + * @pcon_dsc_dpcd: DSC capabilities of the PCON DSC Encoder + * + * Returns maximum width of the slices in pixel width i.e. no. of pixels x 320. + */ +int drm_dp_pcon_dsc_max_slice_width(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]) +{ + u8 buf; + + buf = pcon_dsc_dpcd[DP_PCON_DSC_MAX_SLICE_WIDTH - DP_PCON_DSC_ENCODER]; + + return buf * DP_DSC_SLICE_WIDTH_MULTIPLIER; +} +EXPORT_SYMBOL(drm_dp_pcon_dsc_max_slice_width); + +/* + * drm_dp_pcon_dsc_bpp_incr() - Get bits per pixel increment for PCON DSC encoder + * @pcon_dsc_dpcd: DSC capabilities of the PCON DSC Encoder + * + * Returns the bpp precision supported by the PCON encoder. + */ +int drm_dp_pcon_dsc_bpp_incr(const u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]) +{ + u8 buf; + + buf = pcon_dsc_dpcd[DP_PCON_DSC_BPP_INCR - DP_PCON_DSC_ENCODER]; + + switch (buf & DP_PCON_DSC_BPP_INCR_MASK) { + case DP_PCON_DSC_ONE_16TH_BPP: + return 16; + case DP_PCON_DSC_ONE_8TH_BPP: + return 8; + case DP_PCON_DSC_ONE_4TH_BPP: + return 4; + case DP_PCON_DSC_ONE_HALF_BPP: + return 2; + case DP_PCON_DSC_ONE_BPP: + return 1; + } + + return 0; +} +EXPORT_SYMBOL(drm_dp_pcon_dsc_bpp_incr); + +static +int drm_dp_pcon_configure_dsc_enc(struct drm_dp_aux *aux, u8 pps_buf_config) +{ + u8 buf; + int ret; + + ret = drm_dp_dpcd_readb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf); + if (ret < 0) + return ret; + + buf |= DP_PCON_ENABLE_DSC_ENCODER; + + if (pps_buf_config <= DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER) { + buf &= ~DP_PCON_ENCODER_PPS_OVERRIDE_MASK; + buf |= pps_buf_config << 2; + } + + ret = drm_dp_dpcd_writeb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf); + if (ret < 0) + return ret; + + return 0; +} + +/** + * drm_dp_pcon_pps_default() - Let PCON fill the default pps parameters + * for DSC1.2 between PCON & HDMI2.1 sink + * @aux: DisplayPort AUX channel + * + * Returns 0 on success, else returns negative error code. + */ +int drm_dp_pcon_pps_default(struct drm_dp_aux *aux) +{ + int ret; + + ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_DISABLED); + if (ret < 0) + return ret; + + return 0; +} +EXPORT_SYMBOL(drm_dp_pcon_pps_default); + +/** + * drm_dp_pcon_pps_override_buf() - Configure PPS encoder override buffer for + * HDMI sink + * @aux: DisplayPort AUX channel + * @pps_buf: 128 bytes to be written into PPS buffer for HDMI sink by PCON. + * + * Returns 0 on success, else returns negative error code. + */ +int drm_dp_pcon_pps_override_buf(struct drm_dp_aux *aux, u8 pps_buf[128]) +{ + int ret; + + ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVERRIDE_BASE, &pps_buf, 128); + if (ret < 0) + return ret; + + ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER); + if (ret < 0) + return ret; + + return 0; +} +EXPORT_SYMBOL(drm_dp_pcon_pps_override_buf); + +/* + * drm_dp_pcon_pps_override_param() - Write PPS parameters to DSC encoder + * override registers + * @aux: DisplayPort AUX channel + * @pps_param: 3 Parameters (2 Bytes each) : Slice Width, Slice Height, + * bits_per_pixel. + * + * Returns 0 on success, else returns negative error code. + */ +int drm_dp_pcon_pps_override_param(struct drm_dp_aux *aux, u8 pps_param[6]) +{ + int ret; + + ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_HEIGHT, &pps_param[0], 2); + if (ret < 0) + return ret; + ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_WIDTH, &pps_param[2], 2); + if (ret < 0) + return ret; + ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_BPP, &pps_param[4], 2); + if (ret < 0) + return ret; + + ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER); + if (ret < 0) + return ret; + + return 0; +} +EXPORT_SYMBOL(drm_dp_pcon_pps_override_param); + +/* + * drm_dp_pcon_convert_rgb_to_ycbcr() - Configure the PCon to convert RGB to Ycbcr + * @aux: displayPort AUX channel + * @color_spc: Color-space/s for which conversion is to be enabled, 0 for disable. + * + * Returns 0 on success, else returns negative error code. + */ +int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc) +{ + int ret; + u8 buf; + + ret = drm_dp_dpcd_readb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf); + if (ret < 0) + return ret; + + if (color_spc & DP_CONVERSION_RGB_YCBCR_MASK) + buf |= (color_spc & DP_CONVERSION_RGB_YCBCR_MASK); + else + buf &= ~DP_CONVERSION_RGB_YCBCR_MASK; + + ret = drm_dp_dpcd_writeb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf); + if (ret < 0) + return ret; + + return 0; +} +EXPORT_SYMBOL(drm_dp_pcon_convert_rgb_to_ycbcr); diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 0401b2f47500..18b15a4aee2d 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -2751,7 +2751,7 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work) drm_dp_mst_topology_put_mstb(mstb); mutex_unlock(&mgr->probe_lock); - if (ret) + if (ret > 0) drm_kms_helper_hotplug_event(dev); } diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 734303802bc3..20d22e41d7ce 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -469,6 +469,9 @@ void drm_dev_unplug(struct drm_device *dev) synchronize_srcu(&drm_unplug_srcu); drm_dev_unregister(dev); + + /* Clear all CPU mappings pointing to this device */ + unmap_mapping_range(dev->anon_inode->i_mapping, 0, 0, 1); } EXPORT_SYMBOL(drm_dev_unplug); @@ -589,11 +592,7 @@ static int drm_dev_init(struct drm_device *dev, kref_init(&dev->ref); dev->dev = get_device(parent); -#ifdef CONFIG_DRM_LEGACY - dev->driver = (struct drm_driver *)driver; -#else dev->driver = driver; -#endif INIT_LIST_HEAD(&dev->managed.resources); spin_lock_init(&dev->managed.lock); @@ -675,11 +674,8 @@ static int devm_drm_dev_init(struct device *parent, if (ret) return ret; - ret = devm_add_action(parent, devm_drm_dev_init_release, dev); - if (ret) - devm_drm_dev_init_release(dev); - - return ret; + return devm_add_action_or_reset(parent, + devm_drm_dev_init_release, dev); } void *__devm_drm_dev_alloc(struct device *parent, @@ -897,8 +893,6 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags) if (drm_core_check_feature(dev, DRIVER_MODESET)) drm_modeset_register_all(dev); - ret = 0; - DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n", driver->name, driver->major, driver->minor, driver->patchlevel, driver->date, diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c index 4a475d9696ff..ff602f7ec65b 100644 --- a/drivers/gpu/drm/drm_dsc.c +++ b/drivers/gpu/drm/drm_dsc.c @@ -50,6 +50,33 @@ void drm_dsc_dp_pps_header_init(struct dp_sdp_header *pps_header) EXPORT_SYMBOL(drm_dsc_dp_pps_header_init); /** + * drm_dsc_dp_rc_buffer_size - get rc buffer size in bytes + * @rc_buffer_block_size: block size code, according to DPCD offset 62h + * @rc_buffer_size: number of blocks - 1, according to DPCD offset 63h + * + * return: + * buffer size in bytes, or 0 on invalid input + */ +int drm_dsc_dp_rc_buffer_size(u8 rc_buffer_block_size, u8 rc_buffer_size) +{ + int size = 1024 * (rc_buffer_size + 1); + + switch (rc_buffer_block_size) { + case DP_DSC_RC_BUF_BLK_SIZE_1: + return 1 * size; + case DP_DSC_RC_BUF_BLK_SIZE_4: + return 4 * size; + case DP_DSC_RC_BUF_BLK_SIZE_16: + return 16 * size; + case DP_DSC_RC_BUF_BLK_SIZE_64: + return 64 * size; + default: + return 0; + } +} +EXPORT_SYMBOL(drm_dsc_dp_rc_buffer_size); + +/** * drm_dsc_pps_payload_pack() - Populates the DSC PPS * * @pps_payload: @@ -186,8 +213,7 @@ void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_payload, pps_payload->flatness_max_qp = dsc_cfg->flatness_max_qp; /* PPS 38, 39 */ - pps_payload->rc_model_size = - cpu_to_be16(DSC_RC_MODEL_SIZE_CONST); + pps_payload->rc_model_size = cpu_to_be16(dsc_cfg->rc_model_size); /* PPS 40 */ pps_payload->rc_edge_factor = DSC_RC_EDGE_FACTOR_CONST; diff --git a/drivers/gpu/drm/drm_dumb_buffers.c b/drivers/gpu/drm/drm_dumb_buffers.c index d18a740fe0f1..ad17fa21cebb 100644 --- a/drivers/gpu/drm/drm_dumb_buffers.c +++ b/drivers/gpu/drm/drm_dumb_buffers.c @@ -29,6 +29,7 @@ #include <drm/drm_mode.h> #include "drm_crtc_internal.h" +#include "drm_internal.h" /** * DOC: overview @@ -46,9 +47,10 @@ * KMS frame buffers. * * To support dumb objects drivers must implement the &drm_driver.dumb_create - * operation. &drm_driver.dumb_destroy defaults to drm_gem_dumb_destroy() if - * not set and &drm_driver.dumb_map_offset defaults to - * drm_gem_dumb_map_offset(). See the callbacks for further details. + * and &drm_driver.dumb_map_offset operations (the latter defaults to + * drm_gem_dumb_map_offset() if not set). Drivers that don't use GEM handles + * additionally need to implement the &drm_driver.dumb_destroy operation. See + * the callbacks for further details. * * Note that dumb objects may not be used for gpu acceleration, as has been * attempted on some ARM embedded platforms. Such drivers really must have diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 74f5a3197214..c2bbe7bee7b6 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -32,6 +32,7 @@ #include <linux/i2c.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/pci.h> #include <linux/slab.h> #include <linux/vga_switcheroo.h> @@ -2075,9 +2076,13 @@ EXPORT_SYMBOL(drm_get_edid); struct edid *drm_get_edid_switcheroo(struct drm_connector *connector, struct i2c_adapter *adapter) { - struct pci_dev *pdev = connector->dev->pdev; + struct drm_device *dev = connector->dev; + struct pci_dev *pdev = to_pci_dev(dev->dev); struct edid *edid; + if (drm_WARN_ON_ONCE(dev, !dev_is_pci(dev->dev))) + return NULL; + vga_switcheroo_lock_ddc(pdev); edid = drm_get_edid(connector, adapter); vga_switcheroo_unlock_ddc(pdev); @@ -3102,6 +3107,8 @@ static int drm_cvt_modes(struct drm_connector *connector, height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 4) + 1) * 2; switch (cvt->code[1] & 0x0c) { + /* default - because compiler doesn't see that we've enumerated all cases */ + default: case 0x00: width = height * 4 / 3; break; @@ -3114,8 +3121,6 @@ static int drm_cvt_modes(struct drm_connector *connector, case 0x0c: width = height * 15 / 9; break; - default: - unreachable(); } for (j = 1; j < 5; j++) { @@ -4851,6 +4856,41 @@ static void drm_parse_vcdb(struct drm_connector *connector, const u8 *db) info->rgb_quant_range_selectable = true; } +static +void drm_get_max_frl_rate(int max_frl_rate, u8 *max_lanes, u8 *max_rate_per_lane) +{ + switch (max_frl_rate) { + case 1: + *max_lanes = 3; + *max_rate_per_lane = 3; + break; + case 2: + *max_lanes = 3; + *max_rate_per_lane = 6; + break; + case 3: + *max_lanes = 4; + *max_rate_per_lane = 6; + break; + case 4: + *max_lanes = 4; + *max_rate_per_lane = 8; + break; + case 5: + *max_lanes = 4; + *max_rate_per_lane = 10; + break; + case 6: + *max_lanes = 4; + *max_rate_per_lane = 12; + break; + case 0: + default: + *max_lanes = 0; + *max_rate_per_lane = 0; + } +} + static void drm_parse_ycbcr420_deep_color_info(struct drm_connector *connector, const u8 *db) { @@ -4904,6 +4944,74 @@ static void drm_parse_hdmi_forum_vsdb(struct drm_connector *connector, } } + if (hf_vsdb[7]) { + u8 max_frl_rate; + u8 dsc_max_frl_rate; + u8 dsc_max_slices; + struct drm_hdmi_dsc_cap *hdmi_dsc = &hdmi->dsc_cap; + + DRM_DEBUG_KMS("hdmi_21 sink detected. parsing edid\n"); + max_frl_rate = (hf_vsdb[7] & DRM_EDID_MAX_FRL_RATE_MASK) >> 4; + drm_get_max_frl_rate(max_frl_rate, &hdmi->max_lanes, + &hdmi->max_frl_rate_per_lane); + hdmi_dsc->v_1p2 = hf_vsdb[11] & DRM_EDID_DSC_1P2; + + if (hdmi_dsc->v_1p2) { + hdmi_dsc->native_420 = hf_vsdb[11] & DRM_EDID_DSC_NATIVE_420; + hdmi_dsc->all_bpp = hf_vsdb[11] & DRM_EDID_DSC_ALL_BPP; + + if (hf_vsdb[11] & DRM_EDID_DSC_16BPC) + hdmi_dsc->bpc_supported = 16; + else if (hf_vsdb[11] & DRM_EDID_DSC_12BPC) + hdmi_dsc->bpc_supported = 12; + else if (hf_vsdb[11] & DRM_EDID_DSC_10BPC) + hdmi_dsc->bpc_supported = 10; + else + hdmi_dsc->bpc_supported = 0; + + dsc_max_frl_rate = (hf_vsdb[12] & DRM_EDID_DSC_MAX_FRL_RATE_MASK) >> 4; + drm_get_max_frl_rate(dsc_max_frl_rate, &hdmi_dsc->max_lanes, + &hdmi_dsc->max_frl_rate_per_lane); + hdmi_dsc->total_chunk_kbytes = hf_vsdb[13] & DRM_EDID_DSC_TOTAL_CHUNK_KBYTES; + + dsc_max_slices = hf_vsdb[12] & DRM_EDID_DSC_MAX_SLICES; + switch (dsc_max_slices) { + case 1: + hdmi_dsc->max_slices = 1; + hdmi_dsc->clk_per_slice = 340; + break; + case 2: + hdmi_dsc->max_slices = 2; + hdmi_dsc->clk_per_slice = 340; + break; + case 3: + hdmi_dsc->max_slices = 4; + hdmi_dsc->clk_per_slice = 340; + break; + case 4: + hdmi_dsc->max_slices = 8; + hdmi_dsc->clk_per_slice = 340; + break; + case 5: + hdmi_dsc->max_slices = 8; + hdmi_dsc->clk_per_slice = 400; + break; + case 6: + hdmi_dsc->max_slices = 12; + hdmi_dsc->clk_per_slice = 400; + break; + case 7: + hdmi_dsc->max_slices = 16; + hdmi_dsc->clk_per_slice = 400; + break; + case 0: + default: + hdmi_dsc->max_slices = 0; + hdmi_dsc->clk_per_slice = 0; + } + } + } + drm_parse_ycbcr420_deep_color_info(connector, hf_vsdb); } diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c index e555281f43d4..72e982323a5e 100644 --- a/drivers/gpu/drm/drm_encoder.c +++ b/drivers/gpu/drm/drm_encoder.c @@ -26,6 +26,7 @@ #include <drm/drm_device.h> #include <drm/drm_drv.h> #include <drm/drm_encoder.h> +#include <drm/drm_managed.h> #include "drm_crtc_internal.h" @@ -72,7 +73,7 @@ int drm_encoder_register_all(struct drm_device *dev) int ret = 0; drm_for_each_encoder(encoder, dev) { - if (encoder->funcs->late_register) + if (encoder->funcs && encoder->funcs->late_register) ret = encoder->funcs->late_register(encoder); if (ret) return ret; @@ -86,30 +87,16 @@ void drm_encoder_unregister_all(struct drm_device *dev) struct drm_encoder *encoder; drm_for_each_encoder(encoder, dev) { - if (encoder->funcs->early_unregister) + if (encoder->funcs && encoder->funcs->early_unregister) encoder->funcs->early_unregister(encoder); } } -/** - * drm_encoder_init - Init a preallocated encoder - * @dev: drm device - * @encoder: the encoder to init - * @funcs: callbacks for this encoder - * @encoder_type: user visible type of the encoder - * @name: printf style format string for the encoder name, or NULL for default name - * - * Initialises a preallocated encoder. Encoder should be subclassed as part of - * driver encoder objects. At driver unload time drm_encoder_cleanup() should be - * called from the driver's &drm_encoder_funcs.destroy hook. - * - * Returns: - * Zero on success, error code on failure. - */ -int drm_encoder_init(struct drm_device *dev, - struct drm_encoder *encoder, - const struct drm_encoder_funcs *funcs, - int encoder_type, const char *name, ...) +__printf(5, 0) +static int __drm_encoder_init(struct drm_device *dev, + struct drm_encoder *encoder, + const struct drm_encoder_funcs *funcs, + int encoder_type, const char *name, va_list ap) { int ret; @@ -125,11 +112,7 @@ int drm_encoder_init(struct drm_device *dev, encoder->encoder_type = encoder_type; encoder->funcs = funcs; if (name) { - va_list ap; - - va_start(ap, name); encoder->name = kvasprintf(GFP_KERNEL, name, ap); - va_end(ap); } else { encoder->name = kasprintf(GFP_KERNEL, "%s-%d", drm_encoder_enum_list[encoder_type].name, @@ -150,6 +133,44 @@ out_put: return ret; } + +/** + * drm_encoder_init - Init a preallocated encoder + * @dev: drm device + * @encoder: the encoder to init + * @funcs: callbacks for this encoder + * @encoder_type: user visible type of the encoder + * @name: printf style format string for the encoder name, or NULL for default name + * + * Initializes a preallocated encoder. Encoder should be subclassed as part of + * driver encoder objects. At driver unload time the driver's + * &drm_encoder_funcs.destroy hook should call drm_encoder_cleanup() and kfree() + * the encoder structure. The encoder structure should not be allocated with + * devm_kzalloc(). + * + * Note: consider using drmm_encoder_alloc() instead of drm_encoder_init() to + * let the DRM managed resource infrastructure take care of cleanup and + * deallocation. + * + * Returns: + * Zero on success, error code on failure. + */ +int drm_encoder_init(struct drm_device *dev, + struct drm_encoder *encoder, + const struct drm_encoder_funcs *funcs, + int encoder_type, const char *name, ...) +{ + va_list ap; + int ret; + + WARN_ON(!funcs->destroy); + + va_start(ap, name); + ret = __drm_encoder_init(dev, encoder, funcs, encoder_type, name, ap); + va_end(ap); + + return ret; +} EXPORT_SYMBOL(drm_encoder_init); /** @@ -181,6 +202,48 @@ void drm_encoder_cleanup(struct drm_encoder *encoder) } EXPORT_SYMBOL(drm_encoder_cleanup); +static void drmm_encoder_alloc_release(struct drm_device *dev, void *ptr) +{ + struct drm_encoder *encoder = ptr; + + if (WARN_ON(!encoder->dev)) + return; + + drm_encoder_cleanup(encoder); +} + +void *__drmm_encoder_alloc(struct drm_device *dev, size_t size, size_t offset, + const struct drm_encoder_funcs *funcs, + int encoder_type, const char *name, ...) +{ + void *container; + struct drm_encoder *encoder; + va_list ap; + int ret; + + if (WARN_ON(funcs && funcs->destroy)) + return ERR_PTR(-EINVAL); + + container = drmm_kzalloc(dev, size, GFP_KERNEL); + if (!container) + return ERR_PTR(-EINVAL); + + encoder = container + offset; + + va_start(ap, name); + ret = __drm_encoder_init(dev, encoder, funcs, encoder_type, name, ap); + va_end(ap); + if (ret) + return ERR_PTR(ret); + + ret = drmm_add_action_or_reset(dev, drmm_encoder_alloc_release, encoder); + if (ret) + return ERR_PTR(ret); + + return container; +} +EXPORT_SYMBOL(__drmm_encoder_alloc); + static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder) { struct drm_connector *connector; diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 25edf670867c..a44c3a438059 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -371,9 +371,9 @@ static void drm_fb_helper_resume_worker(struct work_struct *work) console_unlock(); } -static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper, - struct drm_clip_rect *clip, - struct dma_buf_map *dst) +static void drm_fb_helper_damage_blit_real(struct drm_fb_helper *fb_helper, + struct drm_clip_rect *clip, + struct dma_buf_map *dst) { struct drm_framebuffer *fb = fb_helper->fb; unsigned int cpp = fb->format->cpp[0]; @@ -391,40 +391,86 @@ static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper, } } -static void drm_fb_helper_dirty_work(struct work_struct *work) +static int drm_fb_helper_damage_blit(struct drm_fb_helper *fb_helper, + struct drm_clip_rect *clip) +{ + struct drm_client_buffer *buffer = fb_helper->buffer; + struct dma_buf_map map, dst; + int ret; + + /* + * We have to pin the client buffer to its current location while + * flushing the shadow buffer. In the general case, concurrent + * modesetting operations could try to move the buffer and would + * fail. The modeset has to be serialized by acquiring the reservation + * object of the underlying BO here. + * + * For fbdev emulation, we only have to protect against fbdev modeset + * operations. Nothing else will involve the client buffer's BO. So it + * is sufficient to acquire struct drm_fb_helper.lock here. + */ + mutex_lock(&fb_helper->lock); + + ret = drm_client_buffer_vmap(buffer, &map); + if (ret) + goto out; + + dst = map; + drm_fb_helper_damage_blit_real(fb_helper, clip, &dst); + + drm_client_buffer_vunmap(buffer); + +out: + mutex_unlock(&fb_helper->lock); + + return ret; +} + +static void drm_fb_helper_damage_work(struct work_struct *work) { struct drm_fb_helper *helper = container_of(work, struct drm_fb_helper, - dirty_work); - struct drm_clip_rect *clip = &helper->dirty_clip; + damage_work); + struct drm_device *dev = helper->dev; + struct drm_clip_rect *clip = &helper->damage_clip; struct drm_clip_rect clip_copy; unsigned long flags; - struct dma_buf_map map; int ret; - spin_lock_irqsave(&helper->dirty_lock, flags); + spin_lock_irqsave(&helper->damage_lock, flags); clip_copy = *clip; clip->x1 = clip->y1 = ~0; clip->x2 = clip->y2 = 0; - spin_unlock_irqrestore(&helper->dirty_lock, flags); + spin_unlock_irqrestore(&helper->damage_lock, flags); - /* call dirty callback only when it has been really touched */ - if (clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2) { - - /* Generic fbdev uses a shadow buffer */ - if (helper->buffer) { - ret = drm_client_buffer_vmap(helper->buffer, &map); - if (ret) - return; - drm_fb_helper_dirty_blit_real(helper, &clip_copy, &map); - } + /* Call damage handlers only if necessary */ + if (!(clip_copy.x1 < clip_copy.x2 && clip_copy.y1 < clip_copy.y2)) + return; - if (helper->fb->funcs->dirty) - helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, - &clip_copy, 1); + if (helper->buffer) { + ret = drm_fb_helper_damage_blit(helper, &clip_copy); + if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret)) + goto err; + } - if (helper->buffer) - drm_client_buffer_vunmap(helper->buffer); + if (helper->fb->funcs->dirty) { + ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, &clip_copy, 1); + if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret)) + goto err; } + + return; + +err: + /* + * Restore damage clip rectangle on errors. The next run + * of the damage worker will perform the update. + */ + spin_lock_irqsave(&helper->damage_lock, flags); + clip->x1 = min_t(u32, clip->x1, clip_copy.x1); + clip->y1 = min_t(u32, clip->y1, clip_copy.y1); + clip->x2 = max_t(u32, clip->x2, clip_copy.x2); + clip->y2 = max_t(u32, clip->y2, clip_copy.y2); + spin_unlock_irqrestore(&helper->damage_lock, flags); } /** @@ -440,10 +486,10 @@ void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper, const struct drm_fb_helper_funcs *funcs) { INIT_LIST_HEAD(&helper->kernel_fb_list); - spin_lock_init(&helper->dirty_lock); + spin_lock_init(&helper->damage_lock); INIT_WORK(&helper->resume_work, drm_fb_helper_resume_worker); - INIT_WORK(&helper->dirty_work, drm_fb_helper_dirty_work); - helper->dirty_clip.x1 = helper->dirty_clip.y1 = ~0; + INIT_WORK(&helper->damage_work, drm_fb_helper_damage_work); + helper->damage_clip.x1 = helper->damage_clip.y1 = ~0; mutex_init(&helper->lock); helper->funcs = funcs; helper->dev = dev; @@ -579,7 +625,7 @@ void drm_fb_helper_fini(struct drm_fb_helper *fb_helper) return; cancel_work_sync(&fb_helper->resume_work); - cancel_work_sync(&fb_helper->dirty_work); + cancel_work_sync(&fb_helper->damage_work); info = fb_helper->fbdev; if (info) { @@ -614,30 +660,30 @@ static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper) fb->funcs->dirty; } -static void drm_fb_helper_dirty(struct fb_info *info, u32 x, u32 y, - u32 width, u32 height) +static void drm_fb_helper_damage(struct fb_info *info, u32 x, u32 y, + u32 width, u32 height) { struct drm_fb_helper *helper = info->par; - struct drm_clip_rect *clip = &helper->dirty_clip; + struct drm_clip_rect *clip = &helper->damage_clip; unsigned long flags; if (!drm_fbdev_use_shadow_fb(helper)) return; - spin_lock_irqsave(&helper->dirty_lock, flags); + spin_lock_irqsave(&helper->damage_lock, flags); clip->x1 = min_t(u32, clip->x1, x); clip->y1 = min_t(u32, clip->y1, y); clip->x2 = max_t(u32, clip->x2, x + width); clip->y2 = max_t(u32, clip->y2, y + height); - spin_unlock_irqrestore(&helper->dirty_lock, flags); + spin_unlock_irqrestore(&helper->damage_lock, flags); - schedule_work(&helper->dirty_work); + schedule_work(&helper->damage_work); } /** * drm_fb_helper_deferred_io() - fbdev deferred_io callback function * @info: fb_info struct pointer - * @pagelist: list of dirty mmap framebuffer pages + * @pagelist: list of mmap framebuffer pages that have to be flushed * * This function is used as the &fb_deferred_io.deferred_io * callback function for flushing the fbdev mmap writes. @@ -662,7 +708,7 @@ void drm_fb_helper_deferred_io(struct fb_info *info, y1 = min / info->fix.line_length; y2 = min_t(u32, DIV_ROUND_UP(max, info->fix.line_length), info->var.yres); - drm_fb_helper_dirty(info, 0, y1, info->var.xres, y2 - y1); + drm_fb_helper_damage(info, 0, y1, info->var.xres, y2 - y1); } } EXPORT_SYMBOL(drm_fb_helper_deferred_io); @@ -699,8 +745,7 @@ ssize_t drm_fb_helper_sys_write(struct fb_info *info, const char __user *buf, ret = fb_sys_write(info, buf, count, ppos); if (ret > 0) - drm_fb_helper_dirty(info, 0, 0, info->var.xres, - info->var.yres); + drm_fb_helper_damage(info, 0, 0, info->var.xres, info->var.yres); return ret; } @@ -717,8 +762,7 @@ void drm_fb_helper_sys_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { sys_fillrect(info, rect); - drm_fb_helper_dirty(info, rect->dx, rect->dy, - rect->width, rect->height); + drm_fb_helper_damage(info, rect->dx, rect->dy, rect->width, rect->height); } EXPORT_SYMBOL(drm_fb_helper_sys_fillrect); @@ -733,8 +777,7 @@ void drm_fb_helper_sys_copyarea(struct fb_info *info, const struct fb_copyarea *area) { sys_copyarea(info, area); - drm_fb_helper_dirty(info, area->dx, area->dy, - area->width, area->height); + drm_fb_helper_damage(info, area->dx, area->dy, area->width, area->height); } EXPORT_SYMBOL(drm_fb_helper_sys_copyarea); @@ -749,8 +792,7 @@ void drm_fb_helper_sys_imageblit(struct fb_info *info, const struct fb_image *image) { sys_imageblit(info, image); - drm_fb_helper_dirty(info, image->dx, image->dy, - image->width, image->height); + drm_fb_helper_damage(info, image->dx, image->dy, image->width, image->height); } EXPORT_SYMBOL(drm_fb_helper_sys_imageblit); @@ -765,8 +807,7 @@ void drm_fb_helper_cfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { cfb_fillrect(info, rect); - drm_fb_helper_dirty(info, rect->dx, rect->dy, - rect->width, rect->height); + drm_fb_helper_damage(info, rect->dx, rect->dy, rect->width, rect->height); } EXPORT_SYMBOL(drm_fb_helper_cfb_fillrect); @@ -781,8 +822,7 @@ void drm_fb_helper_cfb_copyarea(struct fb_info *info, const struct fb_copyarea *area) { cfb_copyarea(info, area); - drm_fb_helper_dirty(info, area->dx, area->dy, - area->width, area->height); + drm_fb_helper_damage(info, area->dx, area->dy, area->width, area->height); } EXPORT_SYMBOL(drm_fb_helper_cfb_copyarea); @@ -797,8 +837,7 @@ void drm_fb_helper_cfb_imageblit(struct fb_info *info, const struct fb_image *image) { cfb_imageblit(info, image); - drm_fb_helper_dirty(info, image->dx, image->dy, - image->width, image->height); + drm_fb_helper_damage(info, image->dx, image->dy, image->width, image->height); } EXPORT_SYMBOL(drm_fb_helper_cfb_imageblit); @@ -907,11 +946,15 @@ static int setcmap_legacy(struct fb_cmap *cmap, struct fb_info *info) drm_modeset_lock_all(fb_helper->dev); drm_client_for_each_modeset(modeset, &fb_helper->client) { crtc = modeset->crtc; - if (!crtc->funcs->gamma_set || !crtc->gamma_size) - return -EINVAL; + if (!crtc->funcs->gamma_set || !crtc->gamma_size) { + ret = -EINVAL; + goto out; + } - if (cmap->start + cmap->len > crtc->gamma_size) - return -EINVAL; + if (cmap->start + cmap->len > crtc->gamma_size) { + ret = -EINVAL; + goto out; + } r = crtc->gamma_store; g = r + crtc->gamma_size; @@ -924,8 +967,9 @@ static int setcmap_legacy(struct fb_cmap *cmap, struct fb_info *info) ret = crtc->funcs->gamma_set(crtc, r, g, b, crtc->gamma_size, NULL); if (ret) - return ret; + goto out; } +out: drm_modeset_unlock_all(fb_helper->dev); return ret; @@ -1015,6 +1059,11 @@ retry: goto out_state; } + /* + * FIXME: This always uses gamma_lut. Some HW have only + * degamma_lut, in which case we should reset gamma_lut and set + * degamma_lut. See drm_crtc_legacy_gamma_set(). + */ replaced = drm_property_replace_blob(&crtc_state->degamma_lut, NULL); replaced |= drm_property_replace_blob(&crtc_state->ctm, NULL); @@ -1988,14 +2037,19 @@ static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper) if (!fb_helper->dev) return; - if (fbi && fbi->fbdefio) { - fb_deferred_io_cleanup(fbi); - shadow = fbi->screen_buffer; + if (fbi) { + if (fbi->fbdefio) + fb_deferred_io_cleanup(fbi); + if (drm_fbdev_use_shadow_fb(fb_helper)) + shadow = fbi->screen_buffer; } drm_fb_helper_fini(fb_helper); - vfree(shadow); + if (shadow) + vfree(shadow); + else + drm_client_buffer_vunmap(fb_helper->buffer); drm_client_framebuffer_delete(fb_helper->buffer); } @@ -2189,6 +2243,9 @@ static ssize_t drm_fbdev_fb_write(struct fb_info *info, const char __user *buf, if (ret > 0) *ppos += ret; + if (ret > 0) + drm_fb_helper_damage(info, 0, 0, info->var.xres_virtual, info->var.yres_virtual); + return ret ? ret : err; } @@ -2439,6 +2496,11 @@ void drm_fbdev_generic_setup(struct drm_device *dev, return; } + /* + * FIXME: This mixes up depth with bpp, which results in a glorious + * mess, resulting in some drivers picking wrong fbdev defaults and + * others wrong preferred_depth defaults. + */ if (!preferred_bpp) preferred_bpp = dev->mode_config.preferred_depth; if (!preferred_bpp) diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c index b50380fa80ce..6b116bfd747c 100644 --- a/drivers/gpu/drm/drm_file.c +++ b/drivers/gpu/drm/drm_file.c @@ -113,8 +113,7 @@ bool drm_dev_needs_global_mutex(struct drm_device *dev) * The memory mapping implementation will vary depending on how the driver * manages memory. Legacy drivers will use the deprecated drm_legacy_mmap() * function, modern drivers should use one of the provided memory-manager - * specific implementations. For GEM-based drivers this is drm_gem_mmap(), and - * for drivers which use the CMA GEM helpers it's drm_gem_cma_mmap(). + * specific implementations. For GEM-based drivers this is drm_gem_mmap(). * * No other file operations are supported by the DRM userspace API. Overall the * following is an example &file_operations structure:: @@ -240,9 +239,6 @@ static void drm_events_release(struct drm_file *file_priv) * before calling this. * * If NULL is passed, this is a no-op. - * - * RETURNS: - * 0 on success, or error code on failure. */ void drm_file_free(struct drm_file *file) { @@ -371,6 +367,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor) list_add(&priv->lhead, &dev->filelist); mutex_unlock(&dev->filelist_mutex); +#ifdef CONFIG_DRM_LEGACY #ifdef __alpha__ /* * Default the hose @@ -391,6 +388,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor) } } #endif +#endif return 0; } diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 92f89cee213e..c2ce78c4edc3 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -335,22 +335,12 @@ out: } EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset); -/** - * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers - * @file: drm file-private structure to remove the dumb handle from - * @dev: corresponding drm_device - * @handle: the dumb handle to remove - * - * This implements the &drm_driver.dumb_destroy kms driver callback for drivers - * which use gem to manage their backing storage. - */ int drm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, - uint32_t handle) + u32 handle) { return drm_gem_handle_delete(file, handle); } -EXPORT_SYMBOL(drm_gem_dumb_destroy); /** * drm_gem_handle_create_tail - internal functions to create a handle @@ -1078,20 +1068,17 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, drm_gem_object_get(obj); vma->vm_private_data = obj; + vma->vm_ops = obj->funcs->vm_ops; if (obj->funcs->mmap) { ret = obj->funcs->mmap(obj, vma); - if (ret) { - drm_gem_object_put(obj); - return ret; - } + if (ret) + goto err_drm_gem_object_put; WARN_ON(!(vma->vm_flags & VM_DONTEXPAND)); } else { - if (obj->funcs->vm_ops) - vma->vm_ops = obj->funcs->vm_ops; - else { - drm_gem_object_put(obj); - return -EINVAL; + if (!vma->vm_ops) { + ret = -EINVAL; + goto err_drm_gem_object_put; } vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; @@ -1100,6 +1087,10 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, } return 0; + +err_drm_gem_object_put: + drm_gem_object_put(obj); + return ret; } EXPORT_SYMBOL(drm_gem_mmap_obj); diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c index 4d5c1d86b022..7942cf05cd93 100644 --- a/drivers/gpu/drm/drm_gem_cma_helper.c +++ b/drivers/gpu/drm/drm_gem_cma_helper.c @@ -36,8 +36,9 @@ static const struct drm_gem_object_funcs drm_gem_cma_default_funcs = { .free = drm_gem_cma_free_object, .print_info = drm_gem_cma_print_info, - .get_sg_table = drm_gem_cma_prime_get_sg_table, - .vmap = drm_gem_cma_prime_vmap, + .get_sg_table = drm_gem_cma_get_sg_table, + .vmap = drm_gem_cma_vmap, + .mmap = drm_gem_cma_mmap, .vm_ops = &drm_gem_cma_vm_ops, }; @@ -277,62 +278,6 @@ const struct vm_operations_struct drm_gem_cma_vm_ops = { }; EXPORT_SYMBOL_GPL(drm_gem_cma_vm_ops); -static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj, - struct vm_area_struct *vma) -{ - int ret; - - /* - * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the - * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map - * the whole buffer. - */ - vma->vm_flags &= ~VM_PFNMAP; - vma->vm_pgoff = 0; - - ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr, - cma_obj->paddr, vma->vm_end - vma->vm_start); - if (ret) - drm_gem_vm_close(vma); - - return ret; -} - -/** - * drm_gem_cma_mmap - memory-map a CMA GEM object - * @filp: file object - * @vma: VMA for the area to be mapped - * - * This function implements an augmented version of the GEM DRM file mmap - * operation for CMA objects: In addition to the usual GEM VMA setup it - * immediately faults in the entire object instead of using on-demaind - * faulting. Drivers which employ the CMA helpers should use this function - * as their ->mmap() handler in the DRM device file's file_operations - * structure. - * - * Instead of directly referencing this function, drivers should use the - * DEFINE_DRM_GEM_CMA_FOPS().macro. - * - * Returns: - * 0 on success or a negative error code on failure. - */ -int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma) -{ - struct drm_gem_cma_object *cma_obj; - struct drm_gem_object *gem_obj; - int ret; - - ret = drm_gem_mmap(filp, vma); - if (ret) - return ret; - - gem_obj = vma->vm_private_data; - cma_obj = to_drm_gem_cma_obj(gem_obj); - - return drm_gem_cma_mmap_obj(cma_obj, vma); -} -EXPORT_SYMBOL_GPL(drm_gem_cma_mmap); - #ifndef CONFIG_MMU /** * drm_gem_cma_get_unmapped_area - propose address for mapping in noMMU cases @@ -424,18 +369,18 @@ void drm_gem_cma_print_info(struct drm_printer *p, unsigned int indent, EXPORT_SYMBOL(drm_gem_cma_print_info); /** - * drm_gem_cma_prime_get_sg_table - provide a scatter/gather table of pinned + * drm_gem_cma_get_sg_table - provide a scatter/gather table of pinned * pages for a CMA GEM object * @obj: GEM object * - * This function exports a scatter/gather table suitable for PRIME usage by + * This function exports a scatter/gather table by * calling the standard DMA mapping API. Drivers using the CMA helpers should * set this as their &drm_gem_object_funcs.get_sg_table callback. * * Returns: * A pointer to the scatter/gather table of pinned pages or NULL on failure. */ -struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj) +struct sg_table *drm_gem_cma_get_sg_table(struct drm_gem_object *obj) { struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); struct sg_table *sgt; @@ -456,7 +401,7 @@ out: kfree(sgt); return ERR_PTR(ret); } -EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table); +EXPORT_SYMBOL_GPL(drm_gem_cma_get_sg_table); /** * drm_gem_cma_prime_import_sg_table - produce a CMA GEM object from another @@ -501,40 +446,13 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev, EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table); /** - * drm_gem_cma_prime_mmap - memory-map an exported CMA GEM object - * @obj: GEM object - * @vma: VMA for the area to be mapped - * - * This function maps a buffer imported via DRM PRIME into a userspace - * process's address space. Drivers that use the CMA helpers should set this - * as their &drm_driver.gem_prime_mmap callback. - * - * Returns: - * 0 on success or a negative error code on failure. - */ -int drm_gem_cma_prime_mmap(struct drm_gem_object *obj, - struct vm_area_struct *vma) -{ - struct drm_gem_cma_object *cma_obj; - int ret; - - ret = drm_gem_mmap_obj(obj, obj->size, vma); - if (ret < 0) - return ret; - - cma_obj = to_drm_gem_cma_obj(obj); - return drm_gem_cma_mmap_obj(cma_obj, vma); -} -EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap); - -/** - * drm_gem_cma_prime_vmap - map a CMA GEM object into the kernel's virtual + * drm_gem_cma_vmap - map a CMA GEM object into the kernel's virtual * address space * @obj: GEM object * @map: Returns the kernel virtual address of the CMA GEM object's backing * store. * - * This function maps a buffer exported via DRM PRIME into the kernel's + * This function maps a buffer into the kernel's * virtual address space. Since the CMA buffers are already mapped into the * kernel virtual address space this simply returns the cached virtual * address. Drivers using the CMA helpers should set this as their DRM @@ -543,7 +461,7 @@ EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap); * Returns: * 0 on success, or a negative error code otherwise. */ -int drm_gem_cma_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) +int drm_gem_cma_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) { struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); @@ -551,7 +469,44 @@ int drm_gem_cma_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) return 0; } -EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap); +EXPORT_SYMBOL_GPL(drm_gem_cma_vmap); + +/** + * drm_gem_cma_mmap - memory-map an exported CMA GEM object + * @obj: GEM object + * @vma: VMA for the area to be mapped + * + * This function maps a buffer into a userspace process's address space. + * In addition to the usual GEM VMA setup it immediately faults in the entire + * object instead of using on-demand faulting. Drivers that use the CMA + * helpers should set this as their &drm_gem_object_funcs.mmap callback. + * + * Returns: + * 0 on success or a negative error code on failure. + */ +int drm_gem_cma_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + struct drm_gem_cma_object *cma_obj; + int ret; + + /* + * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the + * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map + * the whole buffer. + */ + vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node); + vma->vm_flags &= ~VM_PFNMAP; + + cma_obj = to_drm_gem_cma_obj(obj); + + ret = dma_mmap_wc(cma_obj->base.dev->dev, vma, cma_obj->vaddr, + cma_obj->paddr, vma->vm_end - vma->vm_start); + if (ret) + drm_gem_vm_close(vma); + + return ret; +} +EXPORT_SYMBOL_GPL(drm_gem_cma_mmap); /** * drm_gem_cma_prime_import_sg_table_vmap - PRIME import another driver's diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 499189c48f0b..9825c378dfa6 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -51,13 +51,17 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private) if (!obj) return ERR_PTR(-ENOMEM); + shmem = to_drm_gem_shmem_obj(obj); + if (!obj->funcs) obj->funcs = &drm_gem_shmem_funcs; - if (private) + if (private) { drm_gem_private_object_init(dev, obj, size); - else + shmem->map_wc = false; /* dma-buf mappings use always writecombine */ + } else { ret = drm_gem_object_init(dev, obj, size); + } if (ret) goto err_free; @@ -65,7 +69,6 @@ __drm_gem_shmem_create(struct drm_device *dev, size_t size, bool private) if (ret) goto err_release; - shmem = to_drm_gem_shmem_obj(obj); mutex_init(&shmem->pages_lock); mutex_init(&shmem->vmap_lock); INIT_LIST_HEAD(&shmem->madv_list); @@ -284,7 +287,7 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, struct if (ret) goto err_zero_use; - if (!shmem->map_cached) + if (shmem->map_wc) prot = pgprot_writecombine(prot); shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, VM_MAP, prot); @@ -477,33 +480,6 @@ bool drm_gem_shmem_purge(struct drm_gem_object *obj) EXPORT_SYMBOL(drm_gem_shmem_purge); /** - * drm_gem_shmem_create_object_cached - Create a shmem buffer object with - * cached mappings - * @dev: DRM device - * @size: Size of the object to allocate - * - * By default, shmem buffer objects use writecombine mappings. This - * function implements struct drm_driver.gem_create_object for shmem - * buffer objects with cached mappings. - * - * Returns: - * A struct drm_gem_shmem_object * on success or NULL negative on failure. - */ -struct drm_gem_object * -drm_gem_shmem_create_object_cached(struct drm_device *dev, size_t size) -{ - struct drm_gem_shmem_object *shmem; - - shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); - if (!shmem) - return NULL; - shmem->map_cached = true; - - return &shmem->base; -} -EXPORT_SYMBOL(drm_gem_shmem_create_object_cached); - -/** * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object * @file: DRM file structure to create the dumb buffer for * @dev: DRM device @@ -626,7 +602,7 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); - if (!shmem->map_cached) + if (shmem->map_wc) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); vma->vm_ops = &drm_gem_shmem_vm_ops; diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 81d386b5b92a..fad2249ee67b 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -191,6 +191,9 @@ void drm_gem_unpin(struct drm_gem_object *obj); int drm_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); void drm_gem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map); +int drm_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, + u32 handle); + /* drm_debugfs.c drm_debugfs_crc.c */ #if defined(CONFIG_DEBUG_FS) int drm_debugfs_init(struct drm_minor *minor, int minor_id, diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 09d6e9e2e075..c3bd664ea733 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -122,7 +122,7 @@ int drm_irq_install(struct drm_device *dev, int irq) dev->driver->irq_preinstall(dev); /* PCI devices require shared interrupts. */ - if (dev->pdev) + if (dev_is_pci(dev->dev)) sh_flags = IRQF_SHARED; ret = request_irq(irq, dev->driver->irq_handler, @@ -140,7 +140,7 @@ int drm_irq_install(struct drm_device *dev, int irq) if (ret < 0) { dev->irq_enabled = false; if (drm_core_check_feature(dev, DRIVER_LEGACY)) - vga_client_register(dev->pdev, NULL, NULL, NULL); + vga_client_register(to_pci_dev(dev->dev), NULL, NULL, NULL); free_irq(irq, dev); } else { dev->irq = irq; @@ -203,7 +203,7 @@ int drm_irq_uninstall(struct drm_device *dev) DRM_DEBUG("irq=%d\n", dev->irq); if (drm_core_check_feature(dev, DRIVER_LEGACY)) - vga_client_register(dev->pdev, NULL, NULL, NULL); + vga_client_register(to_pci_dev(dev->dev), NULL, NULL, NULL); if (dev->driver->irq_uninstall) dev->driver->irq_uninstall(dev); @@ -214,12 +214,45 @@ int drm_irq_uninstall(struct drm_device *dev) } EXPORT_SYMBOL(drm_irq_uninstall); +static void devm_drm_irq_uninstall(void *data) +{ + drm_irq_uninstall(data); +} + +/** + * devm_drm_irq_install - install IRQ handler + * @dev: DRM device + * @irq: IRQ number to install the handler for + * + * devm_drm_irq_install is a help function of drm_irq_install. + * + * if the driver uses devm_drm_irq_install, there is no need + * to call drm_irq_uninstall when the drm module get unloaded, + * as this will done automagically. + * + * Returns: + * Zero on success or a negative error code on failure. + */ +int devm_drm_irq_install(struct drm_device *dev, int irq) +{ + int ret; + + ret = drm_irq_install(dev, irq); + if (ret) + return ret; + + return devm_add_action_or_reset(dev->dev, + devm_drm_irq_uninstall, dev); +} +EXPORT_SYMBOL(devm_drm_irq_install); + #if IS_ENABLED(CONFIG_DRM_LEGACY) int drm_legacy_irq_control(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_control *ctl = data; int ret = 0, irq; + struct pci_dev *pdev; /* if we haven't irq we fallback for compatibility reasons - * this used to be a separate function in drm_dma.h @@ -230,12 +263,13 @@ int drm_legacy_irq_control(struct drm_device *dev, void *data, if (!drm_core_check_feature(dev, DRIVER_LEGACY)) return 0; /* UMS was only ever supported on pci devices. */ - if (WARN_ON(!dev->pdev)) + if (WARN_ON(!dev_is_pci(dev->dev))) return -EINVAL; switch (ctl->func) { case DRM_INST_HANDLER: - irq = dev->pdev->irq; + pdev = to_pci_dev(dev->dev); + irq = pdev->irq; if (dev->if_version < DRM_IF_VERSION(1, 2) && ctl->irq != irq) diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h index 1be3ea320474..f71358f9eac9 100644 --- a/drivers/gpu/drm/drm_legacy.h +++ b/drivers/gpu/drm/drm_legacy.h @@ -127,7 +127,7 @@ static inline void drm_legacy_master_rmmaps(struct drm_device *dev, static inline void drm_legacy_rmmaps(struct drm_device *dev) {} #endif -#if IS_ENABLED(CONFIG_DRM_VM) && IS_ENABLED(CONFIG_DRM_LEGACY) +#if IS_ENABLED(CONFIG_DRM_LEGACY) void drm_legacy_vma_flush(struct drm_device *d); #else static inline void drm_legacy_vma_flush(struct drm_device *d) diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c index fbea69d6f909..e4f20a2eb6e7 100644 --- a/drivers/gpu/drm/drm_memory.c +++ b/drivers/gpu/drm/drm_memory.c @@ -37,7 +37,6 @@ #include <linux/highmem.h> #include <linux/pci.h> #include <linux/vmalloc.h> -#include <xen/xen.h> #include <drm/drm_agpsupport.h> #include <drm/drm_cache.h> @@ -100,24 +99,6 @@ static void *agp_remap(unsigned long offset, unsigned long size, return addr; } -/** Wrapper around agp_free_memory() */ -void drm_free_agp(struct agp_memory *handle, int pages) -{ - agp_free_memory(handle); -} - -/** Wrapper around agp_bind_memory() */ -int drm_bind_agp(struct agp_memory *handle, unsigned int start) -{ - return agp_bind_memory(handle, start); -} - -/** Wrapper around agp_unbind_memory() */ -int drm_unbind_agp(struct agp_memory *handle) -{ - return agp_unbind_memory(handle); -} - #else /* CONFIG_AGP */ static inline void *agp_remap(unsigned long offset, unsigned long size, struct drm_device *dev) @@ -156,35 +137,3 @@ void drm_legacy_ioremapfree(struct drm_local_map *map, struct drm_device *dev) iounmap(map->handle); } EXPORT_SYMBOL(drm_legacy_ioremapfree); - -bool drm_need_swiotlb(int dma_bits) -{ - struct resource *tmp; - resource_size_t max_iomem = 0; - - /* - * Xen paravirtual hosts require swiotlb regardless of requested dma - * transfer size. - * - * NOTE: Really, what it requires is use of the dma_alloc_coherent - * allocator used in ttm_dma_populate() instead of - * ttm_populate_and_map_pages(), which bounce buffers so much in - * Xen it leads to swiotlb buffer exhaustion. - */ - if (xen_pv_domain()) - return true; - - /* - * Enforce dma_alloc_coherent when memory encryption is active as well - * for the same reasons as for Xen paravirtual hosts. - */ - if (mem_encrypt_active()) - return true; - - for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling) { - max_iomem = max(max_iomem, tmp->end); - } - - return max_iomem > ((u64)1 << dma_bits); -} -EXPORT_SYMBOL(drm_need_swiotlb); diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c index f1affc1bb679..37b4b9f0e468 100644 --- a/drivers/gpu/drm/drm_mode_config.c +++ b/drivers/gpu/drm/drm_mode_config.c @@ -195,7 +195,7 @@ void drm_mode_config_reset(struct drm_device *dev) crtc->funcs->reset(crtc); drm_for_each_encoder(encoder, dev) - if (encoder->funcs->reset) + if (encoder->funcs && encoder->funcs->reset) encoder->funcs->reset(encoder); drm_connector_list_iter_begin(dev, &conn_iter); @@ -625,6 +625,10 @@ static void validate_encoder_possible_crtcs(struct drm_encoder *encoder) void drm_mode_config_validate(struct drm_device *dev) { struct drm_encoder *encoder; + struct drm_crtc *crtc; + struct drm_plane *plane; + u32 primary_with_crtc = 0, cursor_with_crtc = 0; + unsigned int num_primary = 0; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return; @@ -636,4 +640,49 @@ void drm_mode_config_validate(struct drm_device *dev) validate_encoder_possible_clones(encoder); validate_encoder_possible_crtcs(encoder); } + + drm_for_each_crtc(crtc, dev) { + WARN(!crtc->primary, "Missing primary plane on [CRTC:%d:%s]\n", + crtc->base.id, crtc->name); + + WARN(crtc->cursor && crtc->funcs->cursor_set, + "[CRTC:%d:%s] must not have both a cursor plane and a cursor_set func", + crtc->base.id, crtc->name); + WARN(crtc->cursor && crtc->funcs->cursor_set2, + "[CRTC:%d:%s] must not have both a cursor plane and a cursor_set2 func", + crtc->base.id, crtc->name); + WARN(crtc->cursor && crtc->funcs->cursor_move, + "[CRTC:%d:%s] must not have both a cursor plane and a cursor_move func", + crtc->base.id, crtc->name); + + if (crtc->primary) { + WARN(!(crtc->primary->possible_crtcs & drm_crtc_mask(crtc)), + "Bogus primary plane possible_crtcs: [PLANE:%d:%s] must be compatible with [CRTC:%d:%s]\n", + crtc->primary->base.id, crtc->primary->name, + crtc->base.id, crtc->name); + WARN(primary_with_crtc & drm_plane_mask(crtc->primary), + "Primary plane [PLANE:%d:%s] used for multiple CRTCs", + crtc->primary->base.id, crtc->primary->name); + primary_with_crtc |= drm_plane_mask(crtc->primary); + } + if (crtc->cursor) { + WARN(!(crtc->cursor->possible_crtcs & drm_crtc_mask(crtc)), + "Bogus cursor plane possible_crtcs: [PLANE:%d:%s] must be compatible with [CRTC:%d:%s]\n", + crtc->cursor->base.id, crtc->cursor->name, + crtc->base.id, crtc->name); + WARN(cursor_with_crtc & drm_plane_mask(crtc->cursor), + "Cursor plane [PLANE:%d:%s] used for multiple CRTCs", + crtc->cursor->base.id, crtc->cursor->name); + cursor_with_crtc |= drm_plane_mask(crtc->cursor); + } + } + + drm_for_each_plane(plane, dev) { + if (plane->type == DRM_PLANE_TYPE_PRIMARY) + num_primary++; + } + + WARN(num_primary != dev->mode_config.num_crtc, + "Must have as many primary planes as there are CRTCs, but have %u primary planes and %u CRTCs", + num_primary, dev->mode_config.num_crtc); } diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c index 33fb2f05ce66..1ac67d4505e0 100644 --- a/drivers/gpu/drm/drm_modes.c +++ b/drivers/gpu/drm/drm_modes.c @@ -762,7 +762,7 @@ int drm_mode_vrefresh(const struct drm_display_mode *mode) if (mode->htotal == 0 || mode->vtotal == 0) return 0; - num = mode->clock * 1000; + num = mode->clock; den = mode->htotal * mode->vtotal; if (mode->flags & DRM_MODE_FLAG_INTERLACE) @@ -772,7 +772,7 @@ int drm_mode_vrefresh(const struct drm_display_mode *mode) if (mode->vscan > 1) den *= mode->vscan; - return DIV_ROUND_CLOSEST(num, den); + return DIV_ROUND_CLOSEST_ULL(mul_u32_u32(num, 1000), den); } EXPORT_SYMBOL(drm_mode_vrefresh); diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c index 6dba4b8ce4fe..2294a1580d35 100644 --- a/drivers/gpu/drm/drm_pci.c +++ b/drivers/gpu/drm/drm_pci.c @@ -24,6 +24,8 @@ #include <linux/dma-mapping.h> #include <linux/export.h> +#include <linux/list.h> +#include <linux/mutex.h> #include <linux/pci.h> #include <linux/slab.h> @@ -36,6 +38,9 @@ #include "drm_legacy.h" #ifdef CONFIG_DRM_LEGACY +/* List of devices hanging off drivers with stealth attach. */ +static LIST_HEAD(legacy_dev_list); +static DEFINE_MUTEX(legacy_dev_list_lock); /** * drm_pci_alloc - Allocate a PCI consistent memory block, for DMA. @@ -65,7 +70,7 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali return NULL; dmah->size = size; - dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, + dmah->vaddr = dma_alloc_coherent(dev->dev, size, &dmah->busaddr, GFP_KERNEL); @@ -88,7 +93,7 @@ EXPORT_SYMBOL(drm_pci_alloc); */ void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) { - dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr, + dma_free_coherent(dev->dev, dmah->size, dmah->vaddr, dmah->busaddr); kfree(dmah); } @@ -107,16 +112,18 @@ static int drm_get_pci_domain(struct drm_device *dev) return 0; #endif /* __alpha__ */ - return pci_domain_nr(dev->pdev->bus); + return pci_domain_nr(to_pci_dev(dev->dev)->bus); } int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master) { + struct pci_dev *pdev = to_pci_dev(dev->dev); + master->unique = kasprintf(GFP_KERNEL, "pci:%04x:%02x:%02x.%d", drm_get_pci_domain(dev), - dev->pdev->bus->number, - PCI_SLOT(dev->pdev->devfn), - PCI_FUNC(dev->pdev->devfn)); + pdev->bus->number, + PCI_SLOT(pdev->devfn), + PCI_FUNC(pdev->devfn)); if (!master->unique) return -ENOMEM; @@ -126,12 +133,14 @@ int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master) static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p) { + struct pci_dev *pdev = to_pci_dev(dev->dev); + if ((p->busnum >> 8) != drm_get_pci_domain(dev) || - (p->busnum & 0xff) != dev->pdev->bus->number || - p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn)) + (p->busnum & 0xff) != pdev->bus->number || + p->devnum != PCI_SLOT(pdev->devfn) || p->funcnum != PCI_FUNC(pdev->devfn)) return -EINVAL; - p->irq = dev->pdev->irq; + p->irq = pdev->irq; DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum, p->irq); @@ -159,7 +168,7 @@ int drm_legacy_irq_by_busid(struct drm_device *dev, void *data, return -EOPNOTSUPP; /* UMS was only ever support on PCI devices. */ - if (WARN_ON(!dev->pdev)) + if (WARN_ON(!dev_is_pci(dev->dev))) return -EINVAL; if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) @@ -183,7 +192,7 @@ void drm_pci_agp_destroy(struct drm_device *dev) static void drm_pci_agp_init(struct drm_device *dev) { if (drm_core_check_feature(dev, DRIVER_USE_AGP)) { - if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP)) + if (pci_find_capability(to_pci_dev(dev->dev), PCI_CAP_ID_AGP)) dev->agp = drm_agp_init(dev); if (dev->agp) { dev->agp->agp_mtrr = arch_phys_wc_add( @@ -196,7 +205,7 @@ static void drm_pci_agp_init(struct drm_device *dev) static int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent, - struct drm_driver *driver) + const struct drm_driver *driver) { struct drm_device *dev; int ret; @@ -225,10 +234,11 @@ static int drm_get_pci_dev(struct pci_dev *pdev, if (ret) goto err_agp; - /* No locking needed since shadow-attach is single-threaded since it may - * only be called from the per-driver module init hook. */ - if (drm_core_check_feature(dev, DRIVER_LEGACY)) - list_add_tail(&dev->legacy_dev_list, &driver->legacy_dev_list); + if (drm_core_check_feature(dev, DRIVER_LEGACY)) { + mutex_lock(&legacy_dev_list_lock); + list_add_tail(&dev->legacy_dev_list, &legacy_dev_list); + mutex_unlock(&legacy_dev_list_lock); + } return 0; @@ -249,7 +259,8 @@ err_free: * * Return: 0 on success or a negative error code on failure. */ -int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver) +int drm_legacy_pci_init(const struct drm_driver *driver, + struct pci_driver *pdriver) { struct pci_dev *pdev = NULL; const struct pci_device_id *pid; @@ -261,7 +272,6 @@ int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver) return -EINVAL; /* If not using KMS, fall back to stealth mode manual scanning. */ - INIT_LIST_HEAD(&driver->legacy_dev_list); for (i = 0; pdriver->id_table[i].vendor != 0; i++) { pid = &pdriver->id_table[i]; @@ -295,7 +305,8 @@ EXPORT_SYMBOL(drm_legacy_pci_init); * Unregister a DRM driver shadow-attached through drm_legacy_pci_init(). This * is deprecated and only used by dri1 drivers. */ -void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver) +void drm_legacy_pci_exit(const struct drm_driver *driver, + struct pci_driver *pdriver) { struct drm_device *dev, *tmp; @@ -304,11 +315,15 @@ void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver) if (!(driver->driver_features & DRIVER_LEGACY)) { WARN_ON(1); } else { - list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list, + mutex_lock(&legacy_dev_list_lock); + list_for_each_entry_safe(dev, tmp, &legacy_dev_list, legacy_dev_list) { - list_del(&dev->legacy_dev_list); - drm_put_dev(dev); + if (dev->driver == driver) { + list_del(&dev->legacy_dev_list); + drm_put_dev(dev); + } } + mutex_unlock(&legacy_dev_list_lock); } DRM_INFO("Module unloaded\n"); } diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index e6231947f987..bf6e525bb116 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -30,6 +30,7 @@ #include <drm/drm_file.h> #include <drm/drm_crtc.h> #include <drm/drm_fourcc.h> +#include <drm/drm_managed.h> #include <drm/drm_vblank.h> #include "drm_crtc_internal.h" @@ -40,7 +41,7 @@ * A plane represents an image source that can be blended with or overlayed on * top of a CRTC during the scanout process. Planes take their input data from a * &drm_framebuffer object. The plane itself specifies the cropping and scaling - * of that image, and where it is placed on the visible are of a display + * of that image, and where it is placed on the visible area of a display * pipeline, represented by &drm_crtc. A plane can also have additional * properties that specify how the pixels are positioned and blended, like * rotation or Z-position. All these properties are stored in &drm_plane_state. @@ -49,14 +50,34 @@ * &struct drm_plane (possibly as part of a larger structure) and registers it * with a call to drm_universal_plane_init(). * - * Cursor and overlay planes are optional. All drivers should provide one - * primary plane per CRTC to avoid surprising userspace too much. See enum - * drm_plane_type for a more in-depth discussion of these special uapi-relevant - * plane types. Special planes are associated with their CRTC by calling - * drm_crtc_init_with_planes(). - * * The type of a plane is exposed in the immutable "type" enumeration property, - * which has one of the following values: "Overlay", "Primary", "Cursor". + * which has one of the following values: "Overlay", "Primary", "Cursor" (see + * enum drm_plane_type). A plane can be compatible with multiple CRTCs, see + * &drm_plane.possible_crtcs. + * + * Each CRTC must have a unique primary plane userspace can attach to enable + * the CRTC. In other words, userspace must be able to attach a different + * primary plane to each CRTC at the same time. Primary planes can still be + * compatible with multiple CRTCs. There must be exactly as many primary planes + * as there are CRTCs. + * + * Legacy uAPI doesn't expose the primary and cursor planes directly. DRM core + * relies on the driver to set the primary and optionally the cursor plane used + * for legacy IOCTLs. This is done by calling drm_crtc_init_with_planes(). All + * drivers must provide one primary plane per CRTC to avoid surprising legacy + * userspace too much. + */ + +/** + * DOC: standard plane properties + * + * DRM planes have a few standardized properties: + * + * IN_FORMATS: + * Blob property which contains the set of buffer format and modifier + * pairs supported by this plane. The blob is a struct + * drm_format_modifier_blob. Without this property the plane doesn't + * support buffers with modifiers. Userspace cannot change this property. */ static unsigned int drm_num_planes(struct drm_device *dev) @@ -152,31 +173,16 @@ done: return 0; } -/** - * drm_universal_plane_init - Initialize a new universal plane object - * @dev: DRM device - * @plane: plane object to init - * @possible_crtcs: bitmask of possible CRTCs - * @funcs: callbacks for the new plane - * @formats: array of supported formats (DRM_FORMAT\_\*) - * @format_count: number of elements in @formats - * @format_modifiers: array of struct drm_format modifiers terminated by - * DRM_FORMAT_MOD_INVALID - * @type: type of plane (overlay, primary, cursor) - * @name: printf style format string for the plane name, or NULL for default name - * - * Initializes a plane object of type @type. - * - * Returns: - * Zero on success, error code on failure. - */ -int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, - uint32_t possible_crtcs, - const struct drm_plane_funcs *funcs, - const uint32_t *formats, unsigned int format_count, - const uint64_t *format_modifiers, - enum drm_plane_type type, - const char *name, ...) +__printf(9, 0) +static int __drm_universal_plane_init(struct drm_device *dev, + struct drm_plane *plane, + uint32_t possible_crtcs, + const struct drm_plane_funcs *funcs, + const uint32_t *formats, + unsigned int format_count, + const uint64_t *format_modifiers, + enum drm_plane_type type, + const char *name, va_list ap) { struct drm_mode_config *config = &dev->mode_config; unsigned int format_modifier_count = 0; @@ -237,11 +243,7 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, } if (name) { - va_list ap; - - va_start(ap, name); plane->name = kvasprintf(GFP_KERNEL, name, ap); - va_end(ap); } else { plane->name = kasprintf(GFP_KERNEL, "plane-%d", drm_num_planes(dev)); @@ -286,8 +288,102 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, return 0; } + +/** + * drm_universal_plane_init - Initialize a new universal plane object + * @dev: DRM device + * @plane: plane object to init + * @possible_crtcs: bitmask of possible CRTCs + * @funcs: callbacks for the new plane + * @formats: array of supported formats (DRM_FORMAT\_\*) + * @format_count: number of elements in @formats + * @format_modifiers: array of struct drm_format modifiers terminated by + * DRM_FORMAT_MOD_INVALID + * @type: type of plane (overlay, primary, cursor) + * @name: printf style format string for the plane name, or NULL for default name + * + * Initializes a plane object of type @type. The &drm_plane_funcs.destroy hook + * should call drm_plane_cleanup() and kfree() the plane structure. The plane + * structure should not be allocated with devm_kzalloc(). + * + * Note: consider using drmm_universal_plane_alloc() instead of + * drm_universal_plane_init() to let the DRM managed resource infrastructure + * take care of cleanup and deallocation. + * + * Returns: + * Zero on success, error code on failure. + */ +int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane, + uint32_t possible_crtcs, + const struct drm_plane_funcs *funcs, + const uint32_t *formats, unsigned int format_count, + const uint64_t *format_modifiers, + enum drm_plane_type type, + const char *name, ...) +{ + va_list ap; + int ret; + + WARN_ON(!funcs->destroy); + + va_start(ap, name); + ret = __drm_universal_plane_init(dev, plane, possible_crtcs, funcs, + formats, format_count, format_modifiers, + type, name, ap); + va_end(ap); + return ret; +} EXPORT_SYMBOL(drm_universal_plane_init); +static void drmm_universal_plane_alloc_release(struct drm_device *dev, void *ptr) +{ + struct drm_plane *plane = ptr; + + if (WARN_ON(!plane->dev)) + return; + + drm_plane_cleanup(plane); +} + +void *__drmm_universal_plane_alloc(struct drm_device *dev, size_t size, + size_t offset, uint32_t possible_crtcs, + const struct drm_plane_funcs *funcs, + const uint32_t *formats, unsigned int format_count, + const uint64_t *format_modifiers, + enum drm_plane_type type, + const char *name, ...) +{ + void *container; + struct drm_plane *plane; + va_list ap; + int ret; + + if (WARN_ON(!funcs || funcs->destroy)) + return ERR_PTR(-EINVAL); + + container = drmm_kzalloc(dev, size, GFP_KERNEL); + if (!container) + return ERR_PTR(-ENOMEM); + + plane = container + offset; + + va_start(ap, name); + ret = __drm_universal_plane_init(dev, plane, possible_crtcs, funcs, + formats, format_count, format_modifiers, + type, name, ap); + va_end(ap); + if (ret) + return ERR_PTR(ret); + + ret = drmm_add_action_or_reset(dev, drmm_universal_plane_alloc_release, + plane); + if (ret) + return ERR_PTR(ret); + + return container; +} +EXPORT_SYMBOL(__drmm_universal_plane_alloc); + int drm_plane_register_all(struct drm_device *dev) { unsigned int num_planes = 0; diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 7db55fce35d8..2a54f86856af 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -717,6 +717,8 @@ int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) vma->vm_pgoff += drm_vma_node_start(&obj->vma_node); if (obj->funcs && obj->funcs->mmap) { + vma->vm_ops = obj->funcs->vm_ops; + ret = obj->funcs->mmap(obj, vma); if (ret) return ret; @@ -978,44 +980,58 @@ struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev, EXPORT_SYMBOL(drm_gem_prime_import); /** - * drm_prime_sg_to_page_addr_arrays - convert an sg table into a page array + * drm_prime_sg_to_page_array - convert an sg table into a page array + * @sgt: scatter-gather table to convert + * @pages: array of page pointers to store the pages in + * @max_entries: size of the passed-in array + * + * Exports an sg table into an array of pages. + * + * This function is deprecated and strongly discouraged to be used. + * The page array is only useful for page faults and those can corrupt fields + * in the struct page if they are not handled by the exporting driver. + */ +int __deprecated drm_prime_sg_to_page_array(struct sg_table *sgt, + struct page **pages, + int max_entries) +{ + struct sg_page_iter page_iter; + struct page **p = pages; + + for_each_sgtable_page(sgt, &page_iter, 0) { + if (WARN_ON(p - pages >= max_entries)) + return -1; + *p++ = sg_page_iter_page(&page_iter); + } + return 0; +} +EXPORT_SYMBOL(drm_prime_sg_to_page_array); + +/** + * drm_prime_sg_to_dma_addr_array - convert an sg table into a dma addr array * @sgt: scatter-gather table to convert - * @pages: optional array of page pointers to store the page array in - * @addrs: optional array to store the dma bus address of each page + * @addrs: array to store the dma bus address of each page * @max_entries: size of both the passed-in arrays * - * Exports an sg table into an array of pages and addresses. This is currently - * required by the TTM driver in order to do correct fault handling. + * Exports an sg table into an array of addresses. * - * Drivers can use this in their &drm_driver.gem_prime_import_sg_table + * Drivers should use this in their &drm_driver.gem_prime_import_sg_table * implementation. */ -int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, - dma_addr_t *addrs, int max_entries) +int drm_prime_sg_to_dma_addr_array(struct sg_table *sgt, dma_addr_t *addrs, + int max_entries) { struct sg_dma_page_iter dma_iter; - struct sg_page_iter page_iter; - struct page **p = pages; dma_addr_t *a = addrs; - if (pages) { - for_each_sgtable_page(sgt, &page_iter, 0) { - if (WARN_ON(p - pages >= max_entries)) - return -1; - *p++ = sg_page_iter_page(&page_iter); - } + for_each_sgtable_dma_page(sgt, &dma_iter, 0) { + if (WARN_ON(a - addrs >= max_entries)) + return -1; + *a++ = sg_page_iter_dma_address(&dma_iter); } - if (addrs) { - for_each_sgtable_dma_page(sgt, &dma_iter, 0) { - if (WARN_ON(a - addrs >= max_entries)) - return -1; - *a++ = sg_page_iter_dma_address(&dma_iter); - } - } - return 0; } -EXPORT_SYMBOL(drm_prime_sg_to_page_addr_arrays); +EXPORT_SYMBOL(drm_prime_sg_to_dma_addr_array); /** * drm_prime_gem_destroy - helper to clean up a PRIME-imported GEM object diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index d6017726cc2a..ad59a51eab6d 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -515,7 +515,8 @@ retry: if (count == 0 && connector->status == connector_status_connected) count = drm_add_override_edid_modes(connector); - if (count == 0 && connector->status == connector_status_connected) + if (count == 0 && (connector->status == connector_status_connected || + connector->status == connector_status_unknown)) count = drm_add_modes_noedid(connector, 1024, 768); count += drm_helper_probe_add_cmdline_mode(connector); if (count == 0) diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c index 743e57c1b44f..6ce8f5cd1eb5 100644 --- a/drivers/gpu/drm/drm_simple_kms_helper.c +++ b/drivers/gpu/drm/drm_simple_kms_helper.c @@ -9,6 +9,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> +#include <drm/drm_managed.h> #include <drm/drm_plane_helper.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> @@ -55,8 +56,9 @@ static const struct drm_encoder_funcs drm_simple_encoder_funcs_cleanup = { * stored in the device structure. Free the encoder's memory as part of * the device release function. * - * FIXME: Later improvements to DRM's resource management may allow for - * an automated kfree() of the encoder's memory. + * Note: consider using drmm_simple_encoder_alloc() instead of + * drm_simple_encoder_init() to let the DRM managed resource infrastructure + * take care of cleanup and deallocation. * * Returns: * Zero on success, error code on failure. @@ -71,6 +73,14 @@ int drm_simple_encoder_init(struct drm_device *dev, } EXPORT_SYMBOL(drm_simple_encoder_init); +void *__drmm_simple_encoder_alloc(struct drm_device *dev, size_t size, + size_t offset, int encoder_type) +{ + return __drmm_encoder_alloc(dev, size, offset, NULL, encoder_type, + NULL); +} +EXPORT_SYMBOL(__drmm_simple_encoder_alloc); + static enum drm_mode_status drm_simple_kms_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode) diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index d30e2f2b8f3c..30912d8f82a5 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -74,7 +74,7 @@ * |↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓↓| updates the * | | frame as it * | | travels down - * | | ("sacn out") + * | | ("scan out") * | Old frame | * | | * | | diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index 6d5a03b32238..9b3b989d7cad 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -278,7 +278,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) case _DRM_SCATTER_GATHER: break; case _DRM_CONSISTENT: - dma_free_coherent(&dev->pdev->dev, + dma_free_coherent(dev->dev, map->size, map->handle, map->offset); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c index bbd235473645..6d38c5c17f23 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c @@ -145,10 +145,8 @@ static int etnaviv_gem_mmap_obj(struct etnaviv_gem_object *etnaviv_obj, * address_space (so unmap_mapping_range does what we want, * in particular in the case of mmap'd dmabufs) */ - fput(vma->vm_file); - get_file(etnaviv_obj->base.filp); vma->vm_pgoff = 0; - vma->vm_file = etnaviv_obj->base.filp; + vma_set_file(vma, etnaviv_obj->base.filp); vma->vm_page_prot = vm_page_prot; } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c index d9bd83203a15..b390dd4d60b7 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_prime.c @@ -135,8 +135,7 @@ struct drm_gem_object *etnaviv_gem_prime_import_sg_table(struct drm_device *dev, goto fail; } - ret = drm_prime_sg_to_page_addr_arrays(sgt, etnaviv_obj->pages, - NULL, npages); + ret = drm_prime_sg_to_page_array(sgt, etnaviv_obj->pages, npages); if (ret) goto fail; diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index 6417f374b923..951d5f708e92 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -1,7 +1,8 @@ # SPDX-License-Identifier: GPL-2.0-only config DRM_EXYNOS tristate "DRM Support for Samsung SoC Exynos Series" - depends on OF && DRM && (ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST) + depends on OF && DRM && COMMON_CLK + depends on ARCH_S3C64XX || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_MULTIPLATFORM || COMPILE_TEST depends on MMU select DRM_KMS_HELPER select VIDEOMODE_HELPERS diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c index e75293e4a52f..19e055dbd4c2 100644 --- a/drivers/gpu/drm/gma500/cdv_device.c +++ b/drivers/gpu/drm/gma500/cdv_device.c @@ -95,13 +95,14 @@ static u32 cdv_get_max_backlight(struct drm_device *dev) static int cdv_get_brightness(struct backlight_device *bd) { struct drm_device *dev = bl_get_data(bd); + struct pci_dev *pdev = to_pci_dev(dev->dev); u32 val = REG_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; if (cdv_backlight_combination_mode(dev)) { u8 lbpc; val &= ~1; - pci_read_config_byte(dev->pdev, 0xF4, &lbpc); + pci_read_config_byte(pdev, 0xF4, &lbpc); val *= lbpc; } return (val * 100)/cdv_get_max_backlight(dev); @@ -111,6 +112,7 @@ static int cdv_get_brightness(struct backlight_device *bd) static int cdv_set_brightness(struct backlight_device *bd) { struct drm_device *dev = bl_get_data(bd); + struct pci_dev *pdev = to_pci_dev(dev->dev); int level = bd->props.brightness; u32 blc_pwm_ctl; @@ -128,7 +130,7 @@ static int cdv_set_brightness(struct backlight_device *bd) lbpc = level * 0xfe / max + 1; level /= lbpc; - pci_write_config_byte(dev->pdev, 0xF4, lbpc); + pci_write_config_byte(pdev, 0xF4, lbpc); } blc_pwm_ctl = REG_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK; @@ -205,8 +207,9 @@ static inline void CDV_MSG_WRITE32(int domain, uint port, uint offset, static void cdv_init_pm(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; + struct pci_dev *pdev = to_pci_dev(dev->dev); u32 pwr_cnt; - int domain = pci_domain_nr(dev->pdev->bus); + int domain = pci_domain_nr(pdev->bus); int i; dev_priv->apm_base = CDV_MSG_READ32(domain, PSB_PUNIT_PORT, @@ -234,6 +237,8 @@ static void cdv_init_pm(struct drm_device *dev) static void cdv_errata(struct drm_device *dev) { + struct pci_dev *pdev = to_pci_dev(dev->dev); + /* Disable bonus launch. * CPU and GPU competes for memory and display misses updates and * flickers. Worst with dual core, dual displays. @@ -242,7 +247,7 @@ static void cdv_errata(struct drm_device *dev) * Bonus Launch to work around the issue, by degrading * performance. */ - CDV_MSG_WRITE32(pci_domain_nr(dev->pdev->bus), 3, 0x30, 0x08027108); + CDV_MSG_WRITE32(pci_domain_nr(pdev->bus), 3, 0x30, 0x08027108); } /** @@ -255,12 +260,13 @@ static void cdv_errata(struct drm_device *dev) static int cdv_save_display_registers(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; + struct pci_dev *pdev = to_pci_dev(dev->dev); struct psb_save_area *regs = &dev_priv->regs; struct drm_connector *connector; dev_dbg(dev->dev, "Saving GPU registers.\n"); - pci_read_config_byte(dev->pdev, 0xF4, ®s->cdv.saveLBB); + pci_read_config_byte(pdev, 0xF4, ®s->cdv.saveLBB); regs->cdv.saveDSPCLK_GATE_D = REG_READ(DSPCLK_GATE_D); regs->cdv.saveRAMCLK_GATE_D = REG_READ(RAMCLK_GATE_D); @@ -309,11 +315,12 @@ static int cdv_save_display_registers(struct drm_device *dev) static int cdv_restore_display_registers(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; + struct pci_dev *pdev = to_pci_dev(dev->dev); struct psb_save_area *regs = &dev_priv->regs; struct drm_connector *connector; u32 temp; - pci_write_config_byte(dev->pdev, 0xF4, regs->cdv.saveLBB); + pci_write_config_byte(pdev, 0xF4, regs->cdv.saveLBB); REG_WRITE(DSPCLK_GATE_D, regs->cdv.saveDSPCLK_GATE_D); REG_WRITE(RAMCLK_GATE_D, regs->cdv.saveRAMCLK_GATE_D); @@ -421,16 +428,16 @@ static int cdv_power_up(struct drm_device *dev) static void cdv_hotplug_work_func(struct work_struct *work) { struct drm_psb_private *dev_priv = container_of(work, struct drm_psb_private, - hotplug_work); + hotplug_work); struct drm_device *dev = dev_priv->dev; /* Just fire off a uevent and let userspace tell us what to do */ drm_helper_hpd_irq_event(dev); -} +} /* The core driver has received a hotplug IRQ. We are in IRQ context so extract the needed information and kick off queued processing */ - + static int cdv_hotplug_event(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; @@ -449,7 +456,7 @@ static void cdv_hotplug_enable(struct drm_device *dev, bool on) } else { REG_WRITE(PORT_HOTPLUG_EN, 0); REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT)); - } + } } static const char *force_audio_names[] = { @@ -568,9 +575,10 @@ static const struct psb_offset cdv_regmap[2] = { static int cdv_chip_setup(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; + struct pci_dev *pdev = to_pci_dev(dev->dev); INIT_WORK(&dev_priv->hotplug_work, cdv_hotplug_work_func); - if (pci_enable_msi(dev->pdev)) + if (pci_enable_msi(pdev)) dev_warn(dev->dev, "Enabling MSI failed!\n"); dev_priv->regmap = cdv_regmap; gma_get_core_freq(dev); diff --git a/drivers/gpu/drm/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c index 88535f5aacc5..c48c9d322dfb 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_crt.c +++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c @@ -127,7 +127,7 @@ static void cdv_intel_crt_mode_set(struct drm_encoder *encoder, } -/** +/* * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence. * * \return true if CRT is connected. @@ -278,8 +278,7 @@ void cdv_intel_crt_init(struct drm_device *dev, gma_encoder->ddc_bus = psb_intel_i2c_create(dev, i2c_reg, "CRTDDC_A"); if (!gma_encoder->ddc_bus) { - dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " - "failed.\n"); + dev_printk(KERN_ERR, dev->dev, "DDC bus registration failed.\n"); goto failed_ddc; } diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c index 686385a66167..5d3302249779 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c @@ -551,7 +551,7 @@ void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc) } } -/** +/* * Return the pipe currently connected to the panel fitter, * or -1 if the panel fitter is not present or not in use */ diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c index bfd9a15d63b1..6d3ada39ff86 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_dp.c +++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c @@ -306,7 +306,7 @@ static uint32_t dp_vswing_premph_table[] = { }; /** * is_edp - is the given port attached to an eDP panel (either CPU or PCH) - * @intel_dp: DP struct + * @encoder: GMA encoder struct * * If a CPU or PCH DP output is attached to an eDP panel, this function * will return true, and false otherwise. @@ -1687,7 +1687,7 @@ static enum drm_connector_status cdv_dp_detect(struct gma_encoder *encoder) return status; } -/** +/* * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. * * \return true if DP port is connected. diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c index eaaf4efec217..5bff7d9e3aa6 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c +++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c @@ -74,7 +74,7 @@ static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev) return retval; } -/** +/* * Sets the backlight level. * * level backlight level, from 0 to cdv_intel_lvds_get_max_backlight(). @@ -99,7 +99,7 @@ static void cdv_intel_lvds_set_backlight(struct drm_device *dev, int level) } } -/** +/* * Sets the power state for the panel. */ static void cdv_intel_lvds_set_power(struct drm_device *dev, @@ -291,7 +291,7 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder, REG_WRITE(PFIT_CONTROL, pfit_control); } -/** +/* * Return the list of DDC modes if available, or the BIOS fixed mode otherwise. */ static int cdv_intel_lvds_get_modes(struct drm_connector *connector) @@ -471,6 +471,7 @@ static bool lvds_is_present_in_vbt(struct drm_device *dev, /** * cdv_intel_lvds_init - setup LVDS connectors on this device * @dev: drm device + * @mode_dev: PSB mode device * * Create the connector, register the LVDS DDC bus, and try to figure out what * modes we can display on the LVDS panel (if present). @@ -554,7 +555,7 @@ void cdv_intel_lvds_init(struct drm_device *dev, "LVDSBLC_B"); if (!gma_encoder->i2c_bus) { dev_printk(KERN_ERR, - &dev->pdev->dev, "I2C bus registration failed.\n"); + dev->dev, "I2C bus registration failed.\n"); goto failed_blc_i2c; } gma_encoder->i2c_bus->slave_addr = 0x2C; @@ -575,7 +576,7 @@ void cdv_intel_lvds_init(struct drm_device *dev, GPIOC, "LVDSDDC_C"); if (!gma_encoder->ddc_bus) { - dev_printk(KERN_ERR, &dev->pdev->dev, + dev_printk(KERN_ERR, dev->dev, "DDC bus registration " "failed.\n"); goto failed_ddc; } diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c index fc4fda1d258b..ebe9dccf2d83 100644 --- a/drivers/gpu/drm/gma500/framebuffer.c +++ b/drivers/gpu/drm/gma500/framebuffer.c @@ -159,7 +159,7 @@ static const struct fb_ops psbfb_unaccel_ops = { * @dev: our DRM device * @fb: framebuffer to set up * @mode_cmd: mode description - * @gt: backing object + * @obj: backing object * * Configure and fill in the boilerplate for our frame buffer. Return * 0 on success or an error code if we fail. @@ -197,7 +197,7 @@ static int psb_framebuffer_init(struct drm_device *dev, * psb_framebuffer_create - create a framebuffer backed by gt * @dev: our DRM device * @mode_cmd: the description of the requested mode - * @gt: the backing object + * @obj: the backing object * * Create a framebuffer object backed by the gt, and fill in the * boilerplate required @@ -252,7 +252,7 @@ static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size) /** * psbfb_create - create a framebuffer - * @fbdev: the framebuffer device + * @fb_helper: the framebuffer helper * @sizes: specification of the layout * * Create a framebuffer to the specifications provided @@ -262,6 +262,7 @@ static int psbfb_create(struct drm_fb_helper *fb_helper, { struct drm_device *dev = fb_helper->dev; struct drm_psb_private *dev_priv = dev->dev_private; + struct pci_dev *pdev = to_pci_dev(dev->dev); struct fb_info *info; struct drm_framebuffer *fb; struct drm_mode_fb_cmd2 mode_cmd; @@ -325,8 +326,8 @@ static int psbfb_create(struct drm_fb_helper *fb_helper, drm_fb_helper_fill_info(info, fb_helper, sizes); - info->fix.mmio_start = pci_resource_start(dev->pdev, 0); - info->fix.mmio_len = pci_resource_len(dev->pdev, 0); + info->fix.mmio_start = pci_resource_start(pdev, 0); + info->fix.mmio_len = pci_resource_len(pdev, 0); /* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */ @@ -529,6 +530,7 @@ void psb_modeset_init(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev; + struct pci_dev *pdev = to_pci_dev(dev->dev); int i; drm_mode_config_init(dev); @@ -540,8 +542,7 @@ void psb_modeset_init(struct drm_device *dev) /* set memory base */ /* Oaktrail and Poulsbo should use BAR 2*/ - pci_read_config_dword(dev->pdev, PSB_BSM, (u32 *) - &(dev->mode_config.fb_base)); + pci_read_config_dword(pdev, PSB_BSM, (u32 *)&(dev->mode_config.fb_base)); /* num pipes is 2 for PSB but 1 for Mrst */ for (i = 0; i < dev_priv->num_pipe; i++) diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c index db827e591403..fbf420051ef5 100644 --- a/drivers/gpu/drm/gma500/gem.c +++ b/drivers/gpu/drm/gma500/gem.c @@ -16,6 +16,7 @@ #include <drm/drm.h> #include <drm/drm_vma_manager.h> +#include "gem.h" #include "psb_drv.h" static vm_fault_t psb_gem_fault(struct vm_fault *vmf); @@ -49,6 +50,8 @@ const struct drm_gem_object_funcs psb_gem_object_funcs = { * @dev: our device * @size: the size requested * @handlep: returned handle (opaque number) + * @stolen: unused + * @align: unused * * Create a GEM object, fill in the boilerplate and attach a handle to * it so that userspace can speak about it. This does the core work @@ -97,7 +100,7 @@ int psb_gem_create(struct drm_file *file, struct drm_device *dev, u64 size, /** * psb_gem_dumb_create - create a dumb buffer - * @drm_file: our client file + * @file: our client file * @dev: our device * @args: the requested arguments copied from userspace * @@ -116,7 +119,6 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev, /** * psb_gem_fault - pagefault handler for GEM objects - * @vma: the VMA of the GEM object * @vmf: fault detail * * Invoked when a fault occurs on an mmap of a GEM managed area. GEM diff --git a/drivers/gpu/drm/gma500/gem.h b/drivers/gpu/drm/gma500/gem.h index 3741a711b9fd..bae6454ead29 100644 --- a/drivers/gpu/drm/gma500/gem.h +++ b/drivers/gpu/drm/gma500/gem.h @@ -8,6 +8,8 @@ #ifndef _GEM_H #define _GEM_H +struct drm_device; + extern const struct drm_gem_object_funcs psb_gem_object_funcs; extern int psb_gem_create(struct drm_file *file, struct drm_device *dev, diff --git a/drivers/gpu/drm/gma500/gma_device.c b/drivers/gpu/drm/gma500/gma_device.c index 869f30392566..4c91e86f4b14 100644 --- a/drivers/gpu/drm/gma500/gma_device.c +++ b/drivers/gpu/drm/gma500/gma_device.c @@ -6,12 +6,14 @@ **************************************************************************/ #include "psb_drv.h" +#include "gma_device.h" void gma_get_core_freq(struct drm_device *dev) { uint32_t clock; + struct pci_dev *pdev = to_pci_dev(dev->dev); struct pci_dev *pci_root = - pci_get_domain_bus_and_slot(pci_domain_nr(dev->pdev->bus), + pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus), 0, 0); struct drm_psb_private *dev_priv = dev->dev_private; diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c index 3df6d6e850f5..b03f7b8241f2 100644 --- a/drivers/gpu/drm/gma500/gma_display.c +++ b/drivers/gpu/drm/gma500/gma_display.c @@ -20,7 +20,7 @@ #include "psb_intel_drv.h" #include "psb_intel_reg.h" -/** +/* * Returns whether any output on the specified pipe is of the specified type */ bool gma_pipe_has_type(struct drm_crtc *crtc, int type) @@ -180,7 +180,7 @@ int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, return 0; } -/** +/* * Sets the power management mode of the pipe and plane. * * This code should probably grow support for turning the cursor off and back @@ -559,14 +559,14 @@ int gma_crtc_set_config(struct drm_mode_set *set, if (!dev_priv->rpm_enabled) return drm_crtc_helper_set_config(set, ctx); - pm_runtime_forbid(&dev->pdev->dev); + pm_runtime_forbid(dev->dev); ret = drm_crtc_helper_set_config(set, ctx); - pm_runtime_allow(&dev->pdev->dev); + pm_runtime_allow(dev->dev); return ret; } -/** +/* * Save HW states of given crtc */ void gma_crtc_save(struct drm_crtc *crtc) @@ -609,7 +609,7 @@ void gma_crtc_save(struct drm_crtc *crtc) crtc_state->savePalette[i] = REG_READ(palette_reg + (i << 2)); } -/** +/* * Restore HW states of given crtc */ void gma_crtc_restore(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c index d246b1f70366..e884750bc123 100644 --- a/drivers/gpu/drm/gma500/gtt.c +++ b/drivers/gpu/drm/gma500/gtt.c @@ -340,13 +340,14 @@ static void psb_gtt_alloc(struct drm_device *dev) void psb_gtt_takedown(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; + struct pci_dev *pdev = to_pci_dev(dev->dev); if (dev_priv->gtt_map) { iounmap(dev_priv->gtt_map); dev_priv->gtt_map = NULL; } if (dev_priv->gtt_initialized) { - pci_write_config_word(dev->pdev, PSB_GMCH_CTRL, + pci_write_config_word(pdev, PSB_GMCH_CTRL, dev_priv->gmch_ctrl); PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL); (void) PSB_RVDC32(PSB_PGETBL_CTL); @@ -358,6 +359,7 @@ void psb_gtt_takedown(struct drm_device *dev) int psb_gtt_init(struct drm_device *dev, int resume) { struct drm_psb_private *dev_priv = dev->dev_private; + struct pci_dev *pdev = to_pci_dev(dev->dev); unsigned gtt_pages; unsigned long stolen_size, vram_stolen_size; unsigned i, num_pages; @@ -376,8 +378,8 @@ int psb_gtt_init(struct drm_device *dev, int resume) pg = &dev_priv->gtt; /* Enable the GTT */ - pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl); - pci_write_config_word(dev->pdev, PSB_GMCH_CTRL, + pci_read_config_word(pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl); + pci_write_config_word(pdev, PSB_GMCH_CTRL, dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED); dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL); @@ -397,8 +399,8 @@ int psb_gtt_init(struct drm_device *dev, int resume) */ pg->mmu_gatt_start = 0xE0000000; - pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE); - gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE) + pg->gtt_start = pci_resource_start(pdev, PSB_GTT_RESOURCE); + gtt_pages = pci_resource_len(pdev, PSB_GTT_RESOURCE) >> PAGE_SHIFT; /* CDV doesn't report this. In which case the system has 64 gtt pages */ if (pg->gtt_start == 0 || gtt_pages == 0) { @@ -407,10 +409,10 @@ int psb_gtt_init(struct drm_device *dev, int resume) pg->gtt_start = dev_priv->pge_ctl; } - pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE); - pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE) + pg->gatt_start = pci_resource_start(pdev, PSB_GATT_RESOURCE); + pg->gatt_pages = pci_resource_len(pdev, PSB_GATT_RESOURCE) >> PAGE_SHIFT; - dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE]; + dev_priv->gtt_mem = &pdev->resource[PSB_GATT_RESOURCE]; if (pg->gatt_pages == 0 || pg->gatt_start == 0) { static struct resource fudge; /* Preferably peppermint */ @@ -431,7 +433,7 @@ int psb_gtt_init(struct drm_device *dev, int resume) dev_priv->gtt_mem = &fudge; } - pci_read_config_dword(dev->pdev, PSB_BSM, &dev_priv->stolen_base); + pci_read_config_dword(pdev, PSB_BSM, &dev_priv->stolen_base); vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base - PAGE_SIZE; diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c index 8ad6337eeba3..d838369f0119 100644 --- a/drivers/gpu/drm/gma500/intel_bios.c +++ b/drivers/gpu/drm/gma500/intel_bios.c @@ -50,7 +50,7 @@ parse_edp(struct drm_psb_private *dev_priv, struct bdb_header *bdb) uint8_t panel_type; edp = find_section(bdb, BDB_EDP); - + dev_priv->edp.bpp = 18; if (!edp) { if (dev_priv->edp.support) { @@ -80,7 +80,7 @@ parse_edp(struct drm_psb_private *dev_priv, struct bdb_header *bdb) dev_priv->edp.pps = *edp_pps; DRM_DEBUG_KMS("EDP timing in vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", - dev_priv->edp.pps.t1_t3, dev_priv->edp.pps.t8, + dev_priv->edp.pps.t1_t3, dev_priv->edp.pps.t8, dev_priv->edp.pps.t9, dev_priv->edp.pps.t10, dev_priv->edp.pps.t11_t12); @@ -516,7 +516,7 @@ parse_device_mapping(struct drm_psb_private *dev_priv, int psb_intel_init_bios(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; - struct pci_dev *pdev = dev->pdev; + struct pci_dev *pdev = to_pci_dev(dev->dev); struct vbt_header *vbt = NULL; struct bdb_header *bdb = NULL; u8 __iomem *bios = NULL; @@ -574,7 +574,7 @@ int psb_intel_init_bios(struct drm_device *dev) return 0; } -/** +/* * Destroy and free VBT data */ void psb_intel_destroy_bios(struct drm_device *dev) diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c index a083fbfe35b8..370bd6451bd9 100644 --- a/drivers/gpu/drm/gma500/intel_gmbus.c +++ b/drivers/gpu/drm/gma500/intel_gmbus.c @@ -196,7 +196,7 @@ intel_gpio_create(struct drm_psb_private *dev_priv, u32 pin) "gma500 GPIO%c", "?BACDE?F"[pin]); gpio->adapter.owner = THIS_MODULE; gpio->adapter.algo_data = &gpio->algo; - gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev; + gpio->adapter.dev.parent = dev_priv->dev->dev; gpio->algo.setsda = set_data; gpio->algo.setscl = set_clock; gpio->algo.getsda = get_data; @@ -417,7 +417,7 @@ int gma_intel_setup_gmbus(struct drm_device *dev) "gma500 gmbus %s", names[i]); - bus->adapter.dev.parent = &dev->pdev->dev; + bus->adapter.dev.parent = dev->dev; bus->adapter.algo_data = dev_priv; bus->adapter.algo = &gmbus_algorithm; diff --git a/drivers/gpu/drm/gma500/intel_i2c.c b/drivers/gpu/drm/gma500/intel_i2c.c index de8810188190..5e1b4d70c317 100644 --- a/drivers/gpu/drm/gma500/intel_i2c.c +++ b/drivers/gpu/drm/gma500/intel_i2c.c @@ -85,7 +85,6 @@ static void set_data(void *data, int state_high) /** * psb_intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg * @dev: DRM device - * @output: driver specific output device * @reg: GPIO reg to use * @name: name for this bus * @@ -117,7 +116,7 @@ struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev, snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name); chan->adapter.owner = THIS_MODULE; chan->adapter.algo_data = &chan->algo; - chan->adapter.dev.parent = &dev->pdev->dev; + chan->adapter.dev.parent = dev->dev; chan->algo.setsda = set_data; chan->algo.setscl = set_clock; chan->algo.getsda = get_data; @@ -145,7 +144,7 @@ out_free: /** * psb_intel_i2c_destroy - unregister and free i2c bus resources - * @output: channel to free + * @chan: channel to free * * Unregister the adapter from the i2c layer, then free the structure. */ diff --git a/drivers/gpu/drm/gma500/mdfld_device.c b/drivers/gpu/drm/gma500/mdfld_device.c index b83d59b21de5..684d6cf9856f 100644 --- a/drivers/gpu/drm/gma500/mdfld_device.c +++ b/drivers/gpu/drm/gma500/mdfld_device.c @@ -523,7 +523,9 @@ static struct gpiod_lookup_table mdfld_dsi_pipe_gpio_table = { static int mdfld_chip_setup(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; - if (pci_enable_msi(dev->pdev)) + struct pci_dev *pdev = to_pci_dev(dev->dev); + + if (pci_enable_msi(pdev)) dev_warn(dev->dev, "Enabling MSI failed!\n"); dev_priv->regmap = mdfld_regmap; diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c index ae1223f631a7..4c5a2f7348c5 100644 --- a/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c +++ b/drivers/gpu/drm/gma500/mdfld_dsi_dpi.c @@ -138,7 +138,7 @@ static void dsi_set_pipe_plane_enable_state(struct drm_device *dev, REG_WRITE(pipeconf_reg, BIT(31)); if (REG_BIT_WAIT(pipeconf_reg, 1, 30)) - dev_err(&dev->pdev->dev, "%s: Pipe enable timeout\n", + dev_err(dev->dev, "%s: Pipe enable timeout\n", __func__); /*Set up display plane */ @@ -165,11 +165,11 @@ static void dsi_set_pipe_plane_enable_state(struct drm_device *dev, REG_FLD_MOD(pipeconf_reg, 0, 31, 31); if (REG_BIT_WAIT(pipeconf_reg, 0, 30)) - dev_err(&dev->pdev->dev, "%s: Pipe disable timeout\n", + dev_err(dev->dev, "%s: Pipe disable timeout\n", __func__); if (REG_BIT_WAIT(MIPI_GEN_FIFO_STAT_REG(pipe), 1, 28)) - dev_err(&dev->pdev->dev, "%s: FIFO not empty\n", + dev_err(dev->dev, "%s: FIFO not empty\n", __func__); } } @@ -867,7 +867,7 @@ void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder, REG_WRITE(MRST_DPLL_A, 0x80800000); if (REG_BIT_WAIT(pipeconf_reg, 1, 29)) - dev_err(&dev->pdev->dev, "%s: DSI PLL lock timeout\n", + dev_err(dev->dev, "%s: DSI PLL lock timeout\n", __func__); REG_WRITE(MIPI_DPHY_PARAM_REG(pipe), 0x2A0c6008); diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c index 4aab76613bd9..24105f45c1c4 100644 --- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c +++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c @@ -63,7 +63,7 @@ static int __init parse_LABC_control(char *arg) early_param("LABC", parse_LABC_control); #endif -/** +/* * Check and see if the generic control or data buffer is empty and ready. */ void mdfld_dsi_gen_fifo_ready(struct drm_device *dev, u32 gen_fifo_stat_reg, @@ -85,7 +85,7 @@ void mdfld_dsi_gen_fifo_ready(struct drm_device *dev, u32 gen_fifo_stat_reg, gen_fifo_stat_reg); } -/** +/* * Manage the DSI MIPI keyboard and display brightness. * FIXME: this is exported to OSPM code. should work out an specific * display interface to OSPM. diff --git a/drivers/gpu/drm/gma500/mdfld_intel_display.c b/drivers/gpu/drm/gma500/mdfld_intel_display.c index aae2d358364c..462aba8f7528 100644 --- a/drivers/gpu/drm/gma500/mdfld_intel_display.c +++ b/drivers/gpu/drm/gma500/mdfld_intel_display.c @@ -95,7 +95,7 @@ void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe) } } -/** +/* * Return the pipe currently connected to the panel fitter, * or -1 if the panel fitter is not present or not in use */ @@ -263,7 +263,7 @@ void mdfld_disable_crtc(struct drm_device *dev, int pipe) } -/** +/* * Sets the power management mode of the pipe and plane. * * This code should probably grow support for turning the cursor off and back @@ -599,7 +599,7 @@ static void mdfld_clock(int refclk, struct mrst_clock_t *clock) clock->dot = (refclk * clock->m) / clock->p1; } -/** +/* * Returns a set of divisors for the desired target clock with the given refclk, * or FALSE. Divisor values are the actual divisors for */ diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c index 8ab44fec4bfa..68e787924ed0 100644 --- a/drivers/gpu/drm/gma500/mid_bios.c +++ b/drivers/gpu/drm/gma500/mid_bios.c @@ -19,8 +19,9 @@ static void mid_get_fuse_settings(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; + struct pci_dev *pdev = to_pci_dev(dev->dev); struct pci_dev *pci_root = - pci_get_domain_bus_and_slot(pci_domain_nr(dev->pdev->bus), + pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus), 0, 0); uint32_t fuse_value = 0; uint32_t fuse_value_tmp = 0; @@ -93,7 +94,8 @@ static void mid_get_fuse_settings(struct drm_device *dev) static void mid_get_pci_revID(struct drm_psb_private *dev_priv) { uint32_t platform_rev_id = 0; - int domain = pci_domain_nr(dev_priv->dev->pdev->bus); + struct pci_dev *pdev = to_pci_dev(dev_priv->dev->dev); + int domain = pci_domain_nr(pdev->bus); struct pci_dev *pci_gfx_root = pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(2, 0)); @@ -269,11 +271,12 @@ out: static void mid_get_vbt_data(struct drm_psb_private *dev_priv) { struct drm_device *dev = dev_priv->dev; + struct pci_dev *pdev = to_pci_dev(dev->dev); u32 addr; u8 __iomem *vbt_virtual; struct mid_vbt_header vbt_header; struct pci_dev *pci_gfx_root = - pci_get_domain_bus_and_slot(pci_domain_nr(dev->pdev->bus), + pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus), 0, PCI_DEVFN(2, 0)); int ret = -1; diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c index 505044c9a673..13aff19aae9b 100644 --- a/drivers/gpu/drm/gma500/mmu.c +++ b/drivers/gpu/drm/gma500/mmu.c @@ -313,8 +313,8 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd) return pt; } -struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd, - unsigned long addr) +static struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd, + unsigned long addr) { uint32_t index = psb_mmu_pd_index(addr); struct psb_mmu_pt *pt; @@ -416,15 +416,6 @@ struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver) return pd; } -/* Returns the physical address of the PD shared by sgx/msvdx */ -uint32_t psb_get_default_pd_addr(struct psb_mmu_driver *driver) -{ - struct psb_mmu_pd *pd; - - pd = psb_mmu_get_default_pd(driver); - return page_to_pfn(pd->p) << PAGE_SHIFT; -} - void psb_mmu_driver_takedown(struct psb_mmu_driver *driver) { struct drm_device *dev = driver->dev; @@ -690,7 +681,7 @@ out: if (pd->hw_context != -1) psb_mmu_flush(pd->driver); - return 0; + return ret; } int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages, diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c index 900e5499249d..129f87971002 100644 --- a/drivers/gpu/drm/gma500/oaktrail_crtc.c +++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c @@ -174,7 +174,7 @@ static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit, return min_error == 0; } -/** +/* * Returns a set of divisors for the desired target clock with the given refclk, * or FALSE. Divisor values are the actual divisors for */ @@ -205,7 +205,7 @@ static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit, return err != target; } -/** +/* * Sets the power management mode of the pipe and plane. * * This code should probably grow support for turning the cursor off and back @@ -337,7 +337,7 @@ static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode) gma_power_end(dev); } -/** +/* * Return the pipe currently connected to the panel fitter, * or -1 if the panel fitter is not present or not in use */ diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c index 8754290b0e23..08cd5f73c868 100644 --- a/drivers/gpu/drm/gma500/oaktrail_device.c +++ b/drivers/gpu/drm/gma500/oaktrail_device.c @@ -504,9 +504,10 @@ static const struct psb_offset oaktrail_regmap[2] = { static int oaktrail_chip_setup(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; + struct pci_dev *pdev = to_pci_dev(dev->dev); int ret; - - if (pci_enable_msi(dev->pdev)) + + if (pci_enable_msi(pdev)) dev_warn(dev->dev, "Enabling MSI failed!\n"); dev_priv->regmap = oaktrail_regmap; diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c index e28107061148..fc9a34ed58bd 100644 --- a/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c +++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c @@ -279,11 +279,8 @@ int oaktrail_hdmi_i2c_init(struct pci_dev *dev) hdmi_dev = pci_get_drvdata(dev); i2c_dev = kzalloc(sizeof(struct hdmi_i2c_dev), GFP_KERNEL); - if (i2c_dev == NULL) { - DRM_ERROR("Can't allocate interface\n"); - ret = -ENOMEM; - goto exit; - } + if (!i2c_dev) + return -ENOMEM; i2c_dev->adap = &oaktrail_hdmi_i2c_adapter; i2c_dev->status = I2C_STAT_INIT; @@ -300,16 +297,23 @@ int oaktrail_hdmi_i2c_init(struct pci_dev *dev) oaktrail_hdmi_i2c_adapter.name, hdmi_dev); if (ret) { DRM_ERROR("Failed to request IRQ for I2C controller\n"); - goto err; + goto free_dev; } /* Adapter registration */ ret = i2c_add_numbered_adapter(&oaktrail_hdmi_i2c_adapter); - return ret; + if (ret) { + DRM_ERROR("Failed to add I2C adapter\n"); + goto free_irq; + } -err: + return 0; + +free_irq: + free_irq(dev->irq, hdmi_dev); +free_dev: kfree(i2c_dev); -exit: + return ret; } diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c index 2828360153d1..432bdcc57ac9 100644 --- a/drivers/gpu/drm/gma500/oaktrail_lvds.c +++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c @@ -29,7 +29,7 @@ #define MRST_BLC_MAX_PWM_REG_FREQ 0xFFFF #define BRIGHTNESS_MAX_LEVEL 100 -/** +/* * Sets the power state for the panel. */ static void oaktrail_lvds_set_power(struct drm_device *dev, @@ -60,7 +60,7 @@ static void oaktrail_lvds_set_power(struct drm_device *dev, pp_status = REG_READ(PP_STATUS); } while (pp_status & PP_ON); dev_priv->is_lvds_on = false; - pm_request_idle(&dev->pdev->dev); + pm_request_idle(dev->dev); } gma_power_end(dev); } @@ -282,6 +282,7 @@ static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev, /** * oaktrail_lvds_init - setup LVDS connectors on this device * @dev: drm device + * @mode_dev: PSB mode device * * Create the connector, register the LVDS DDC bus, and try to figure out what * modes we can display on the LVDS panel (if present). diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c b/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c index baaf8212e01d..1d2dd6ea1c71 100644 --- a/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c +++ b/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c @@ -66,12 +66,12 @@ static int get_clock(void *data) { struct psb_intel_i2c_chan *chan = data; - u32 val, tmp; + u32 val; val = LPC_READ_REG(chan, RGIO); val |= GPIO_CLOCK; LPC_WRITE_REG(chan, RGIO, val); - tmp = LPC_READ_REG(chan, RGLVL); + LPC_READ_REG(chan, RGLVL); val = (LPC_READ_REG(chan, RGLVL) & GPIO_CLOCK) ? 1 : 0; return val; @@ -80,12 +80,12 @@ static int get_clock(void *data) static int get_data(void *data) { struct psb_intel_i2c_chan *chan = data; - u32 val, tmp; + u32 val; val = LPC_READ_REG(chan, RGIO); val |= GPIO_DATA; LPC_WRITE_REG(chan, RGIO, val); - tmp = LPC_READ_REG(chan, RGLVL); + LPC_READ_REG(chan, RGLVL); val = (LPC_READ_REG(chan, RGLVL) & GPIO_DATA) ? 1 : 0; return val; @@ -145,7 +145,7 @@ void oaktrail_lvds_i2c_init(struct drm_encoder *encoder) strncpy(chan->adapter.name, "gma500 LPC", I2C_NAME_SIZE - 1); chan->adapter.owner = THIS_MODULE; chan->adapter.algo_data = &chan->algo; - chan->adapter.dev.parent = &dev->pdev->dev; + chan->adapter.dev.parent = dev->dev; chan->algo.setsda = set_data; chan->algo.setscl = set_clock; chan->algo.getsda = get_data; diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c index eab6d889bde9..a1ffc6a1c255 100644 --- a/drivers/gpu/drm/gma500/opregion.c +++ b/drivers/gpu/drm/gma500/opregion.c @@ -305,12 +305,13 @@ void psb_intel_opregion_fini(struct drm_device *dev) int psb_intel_opregion_setup(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; + struct pci_dev *pdev = to_pci_dev(dev->dev); struct psb_intel_opregion *opregion = &dev_priv->opregion; u32 opregion_phy, mboxes; void __iomem *base; int err = 0; - pci_read_config_dword(dev->pdev, PCI_ASLS, &opregion_phy); + pci_read_config_dword(pdev, PCI_ASLS, &opregion_phy); if (opregion_phy == 0) { DRM_DEBUG_DRIVER("ACPI Opregion not supported\n"); return -ENOTSUPP; diff --git a/drivers/gpu/drm/gma500/power.c b/drivers/gpu/drm/gma500/power.c index bea8578846d1..56ef88237ef6 100644 --- a/drivers/gpu/drm/gma500/power.c +++ b/drivers/gpu/drm/gma500/power.c @@ -70,8 +70,8 @@ void gma_power_init(struct drm_device *dev) */ void gma_power_uninit(struct drm_device *dev) { - pm_runtime_disable(&dev->pdev->dev); - pm_runtime_set_suspended(&dev->pdev->dev); + pm_runtime_disable(dev->dev); + pm_runtime_set_suspended(dev->dev); } /** @@ -93,6 +93,7 @@ static void gma_suspend_display(struct drm_device *dev) /** * gma_resume_display - resume display side logic + * @pdev: PCI device * * Resume the display hardware restoring state and enabling * as necessary. @@ -146,7 +147,7 @@ static void gma_suspend_pci(struct pci_dev *pdev) /** * gma_resume_pci - resume helper - * @dev: our PCI device + * @pdev: our PCI device * * Perform the resume processing on our PCI device state - rewrite * register state and re-enable the PCI device @@ -178,8 +179,7 @@ static bool gma_resume_pci(struct pci_dev *pdev) /** * gma_power_suspend - bus callback for suspend - * @pdev: our PCI device - * @state: suspend type + * @_dev: our device * * Called back by the PCI layer during a suspend of the system. We * perform the necessary shut down steps and save enough state that @@ -208,7 +208,7 @@ int gma_power_suspend(struct device *_dev) /** * gma_power_resume - resume power - * @pdev: PCI device + * @_dev: our device * * Resume the PCI side of the graphics and then the displays */ @@ -249,6 +249,7 @@ bool gma_power_is_on(struct drm_device *dev) bool gma_power_begin(struct drm_device *dev, bool force_on) { struct drm_psb_private *dev_priv = dev->dev_private; + struct pci_dev *pdev = to_pci_dev(dev->dev); int ret; unsigned long flags; @@ -256,7 +257,7 @@ bool gma_power_begin(struct drm_device *dev, bool force_on) /* Power already on ? */ if (dev_priv->display_power) { dev_priv->display_count++; - pm_runtime_get(&dev->pdev->dev); + pm_runtime_get(dev->dev); spin_unlock_irqrestore(&power_ctrl_lock, flags); return true; } @@ -264,11 +265,11 @@ bool gma_power_begin(struct drm_device *dev, bool force_on) goto out_false; /* Ok power up needed */ - ret = gma_resume_pci(dev->pdev); + ret = gma_resume_pci(pdev); if (ret == 0) { psb_irq_preinstall(dev); psb_irq_postinstall(dev); - pm_runtime_get(&dev->pdev->dev); + pm_runtime_get(dev->dev); dev_priv->display_count++; spin_unlock_irqrestore(&power_ctrl_lock, flags); return true; @@ -293,7 +294,7 @@ void gma_power_end(struct drm_device *dev) dev_priv->display_count--; WARN_ON(dev_priv->display_count < 0); spin_unlock_irqrestore(&power_ctrl_lock, flags); - pm_runtime_put(&dev->pdev->dev); + pm_runtime_put(dev->dev); } int psb_runtime_suspend(struct device *dev) diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index cc2d59e8471d..a19f60d40394 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -208,6 +208,7 @@ static void psb_driver_unload(struct drm_device *dev) static int psb_driver_load(struct drm_device *dev, unsigned long flags) { + struct pci_dev *pdev = to_pci_dev(dev->dev); struct drm_psb_private *dev_priv; unsigned long resource_start, resource_len; unsigned long irqflags; @@ -227,11 +228,11 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags) pg = &dev_priv->gtt; - pci_set_master(dev->pdev); + pci_set_master(pdev); dev_priv->num_pipe = dev_priv->ops->pipes; - resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE); + resource_start = pci_resource_start(pdev, PSB_MMIO_RESOURCE); dev_priv->vdc_reg = ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE); @@ -244,7 +245,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags) goto out_err; if (IS_MRST(dev)) { - int domain = pci_domain_nr(dev->pdev->bus); + int domain = pci_domain_nr(pdev->bus); dev_priv->aux_pdev = pci_get_domain_bus_and_slot(domain, 0, @@ -312,6 +313,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags) if (ret) goto out_err; + ret = -ENOMEM; + dev_priv->mmu = psb_mmu_driver_init(dev, 1, 0, 0); if (!dev_priv->mmu) goto out_err; @@ -359,7 +362,7 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags) PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R); spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags); - drm_irq_install(dev, dev->pdev->irq); + drm_irq_install(dev, pdev->irq); dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ @@ -385,8 +388,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags) psb_intel_opregion_enable_asle(dev); #if 0 /* Enable runtime pm at last */ - pm_runtime_enable(&dev->pdev->dev); - pm_runtime_set_active(&dev->pdev->dev); + pm_runtime_enable(dev->dev); + pm_runtime_set_active(dev->dev); #endif /* Intel drm driver load is done, continue doing pvr load */ return 0; @@ -415,7 +418,7 @@ static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd, if (runtime_allowed == 1 && dev_priv->is_lvds_on) { runtime_allowed++; - pm_runtime_allow(&dev->pdev->dev); + pm_runtime_allow(dev->dev); dev_priv->rpm_enabled = 1; } return drm_ioctl(filp, cmd, arg); @@ -437,7 +440,6 @@ static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_pci_disable_device; } - dev->pdev = pdev; pci_set_drvdata(pdev, dev); ret = psb_driver_load(dev, ent->driver_data); diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h index 5b7f7a312d53..d303f8271f7e 100644 --- a/drivers/gpu/drm/gma500/psb_drv.h +++ b/drivers/gpu/drm/gma500/psb_drv.h @@ -43,10 +43,10 @@ enum { CHIP_MFLD_0130 = 3, /* Medfield */ }; -#define IS_PSB(dev) (((dev)->pdev->device & 0xfffe) == 0x8108) -#define IS_MRST(dev) (((dev)->pdev->device & 0xfff0) == 0x4100) -#define IS_MFLD(dev) (((dev)->pdev->device & 0xfff8) == 0x0130) -#define IS_CDV(dev) (((dev)->pdev->device & 0xfff0) == 0x0be0) +#define IS_PSB(drm) ((to_pci_dev((drm)->dev)->device & 0xfffe) == 0x8108) +#define IS_MRST(drm) ((to_pci_dev((drm)->dev)->device & 0xfff0) == 0x4100) +#define IS_MFLD(drm) ((to_pci_dev((drm)->dev)->device & 0xfff8) == 0x0130) +#define IS_CDV(drm) ((to_pci_dev((drm)->dev)->device & 0xfff0) == 0x0be0) /* Hardware offsets */ #define PSB_VDC_OFFSET 0x00000000 diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c index 531c5485be17..9c3cb1b80bbd 100644 --- a/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/drivers/gpu/drm/gma500/psb_intel_display.c @@ -71,7 +71,7 @@ static void psb_intel_clock(int refclk, struct gma_clock_t *clock) clock->dot = clock->vco / clock->p; } -/** +/* * Return the pipe currently connected to the panel fitter, * or -1 if the panel fitter is not present or not in use */ diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c index 063c66bb946d..ace95d4bdb6f 100644 --- a/drivers/gpu/drm/gma500/psb_intel_lvds.c +++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c @@ -216,7 +216,7 @@ static void psb_intel_lvds_set_power(struct drm_device *dev, bool on) dev_err(dev->dev, "set power, chip off!\n"); return; } - + if (on) { REG_WRITE(PP_CONTROL, REG_READ(PP_CONTROL) | POWER_TARGET_ON); @@ -626,6 +626,7 @@ const struct drm_connector_funcs psb_intel_lvds_connector_funcs = { /** * psb_intel_lvds_init - setup LVDS connectors on this device * @dev: drm device + * @mode_dev: mode device * * Create the connector, register the LVDS DDC bus, and try to figure out what * modes we can display on the LVDS panel (if present). @@ -700,7 +701,7 @@ void psb_intel_lvds_init(struct drm_device *dev, lvds_priv->i2c_bus = psb_intel_i2c_create(dev, GPIOB, "LVDSBLC_B"); if (!lvds_priv->i2c_bus) { dev_printk(KERN_ERR, - &dev->pdev->dev, "I2C bus registration failed.\n"); + dev->dev, "I2C bus registration failed.\n"); goto failed_blc_i2c; } lvds_priv->i2c_bus->slave_addr = 0x2C; @@ -719,7 +720,7 @@ void psb_intel_lvds_init(struct drm_device *dev, /* Set up the DDC bus. */ lvds_priv->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C"); if (!lvds_priv->ddc_bus) { - dev_printk(KERN_ERR, &dev->pdev->dev, + dev_printk(KERN_ERR, dev->dev, "DDC bus registration " "failed.\n"); goto failed_ddc; } diff --git a/drivers/gpu/drm/gma500/psb_intel_modes.c b/drivers/gpu/drm/gma500/psb_intel_modes.c index 88653a40aeb5..60306780e16c 100644 --- a/drivers/gpu/drm/gma500/psb_intel_modes.c +++ b/drivers/gpu/drm/gma500/psb_intel_modes.c @@ -11,7 +11,7 @@ /** * psb_intel_ddc_probe - * + * @adapter: Associated I2C adaptor */ bool psb_intel_ddc_probe(struct i2c_adapter *adapter) { @@ -43,6 +43,7 @@ bool psb_intel_ddc_probe(struct i2c_adapter *adapter) /** * psb_intel_ddc_get_modes - get modelist from monitor * @connector: DRM connector device to use + * @adapter: Associated I2C adaptor * * Fetch the EDID information from @connector using the DDC bus. */ diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c index 907f966d6f22..355da2856389 100644 --- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c +++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c @@ -221,7 +221,7 @@ static bool psb_intel_sdvo_create_enhance_property(struct psb_intel_sdvo *psb_intel_sdvo, struct psb_intel_sdvo_connector *psb_intel_sdvo_connector); -/** +/* * Writes the SDVOB or SDVOC with the given value, but always writes both * SDVOB and SDVOC to work around apparent hardware issues (according to * comments in the BIOS). @@ -588,7 +588,7 @@ static bool psb_intel_sdvo_set_target_input(struct psb_intel_sdvo *psb_intel_sdv &targets, sizeof(targets)); } -/** +/* * Return whether each input is trained. * * This function is making an assumption about the layout of the response, @@ -1818,7 +1818,7 @@ psb_intel_sdvo_guess_ddc_bus(struct psb_intel_sdvo *sdvo) #endif } -/** +/* * Choose the appropriate DDC bus for control bus switch command for this * SDVO output based on the controlled output. * @@ -2406,7 +2406,7 @@ psb_intel_sdvo_init_ddc_proxy(struct psb_intel_sdvo *sdvo, sdvo->ddc.owner = THIS_MODULE; sdvo->ddc.class = I2C_CLASS_DDC; snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy"); - sdvo->ddc.dev.parent = &dev->pdev->dev; + sdvo->ddc.dev.parent = dev->dev; sdvo->ddc.algo_data = sdvo; sdvo->ddc.algo = &psb_intel_sdvo_ddc_proxy; diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c index 15eb3770d817..5cceea9be534 100644 --- a/drivers/gpu/drm/gma500/psb_irq.c +++ b/drivers/gpu/drm/gma500/psb_irq.c @@ -126,9 +126,8 @@ static void mid_disable_pipe_event(struct drm_psb_private *dev_priv, int pipe) } } -/** +/* * Display controller interrupt handler for pipe event. - * */ static void mid_pipe_event_handler(struct drm_device *dev, int pipe) { @@ -347,6 +346,7 @@ int psb_irq_postinstall(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; unsigned long irqflags; + unsigned int i; spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); @@ -359,20 +359,12 @@ int psb_irq_postinstall(struct drm_device *dev) PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R); PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); - if (dev->vblank[0].enabled) - psb_enable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE); - else - psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE); - - if (dev->vblank[1].enabled) - psb_enable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE); - else - psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE); - - if (dev->vblank[2].enabled) - psb_enable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE); - else - psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE); + for (i = 0; i < dev->num_crtcs; ++i) { + if (dev->vblank[i].enabled) + psb_enable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE); + else + psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE); + } if (dev_priv->ops->hotplug_enable) dev_priv->ops->hotplug_enable(dev, true); @@ -385,6 +377,7 @@ void psb_irq_uninstall(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; unsigned long irqflags; + unsigned int i; spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags); @@ -393,14 +386,10 @@ void psb_irq_uninstall(struct drm_device *dev) PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM); - if (dev->vblank[0].enabled) - psb_disable_pipestat(dev_priv, 0, PIPE_VBLANK_INTERRUPT_ENABLE); - - if (dev->vblank[1].enabled) - psb_disable_pipestat(dev_priv, 1, PIPE_VBLANK_INTERRUPT_ENABLE); - - if (dev->vblank[2].enabled) - psb_disable_pipestat(dev_priv, 2, PIPE_VBLANK_INTERRUPT_ENABLE); + for (i = 0; i < dev->num_crtcs; ++i) { + if (dev->vblank[i].enabled) + psb_disable_pipestat(dev_priv, i, PIPE_VBLANK_INTERRUPT_ENABLE); + } dev_priv->vdc_irq_mask &= _PSB_IRQ_SGX_FLAG | _PSB_IRQ_MSVDX_FLAG | diff --git a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c index e5bdd99ad453..99d2ffc2fed9 100644 --- a/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c +++ b/drivers/gpu/drm/gma500/tc35876x-dsi-lvds.c @@ -454,13 +454,13 @@ static void tc35876x_brightness_init(struct drm_device *dev) ret = intel_scu_ipc_ioread8(GPIOPWMCTRL, &pwmctrl); if (ret || pwmctrl != 0x01) { if (ret) - dev_err(&dev->pdev->dev, "GPIOPWMCTRL read failed\n"); + dev_err(dev->dev, "GPIOPWMCTRL read failed\n"); else - dev_warn(&dev->pdev->dev, "GPIOPWMCTRL was not set to system clock (pwmctrl = 0x%02x)\n", pwmctrl); + dev_warn(dev->dev, "GPIOPWMCTRL was not set to system clock (pwmctrl = 0x%02x)\n", pwmctrl); ret = intel_scu_ipc_iowrite8(GPIOPWMCTRL, 0x01); if (ret) - dev_err(&dev->pdev->dev, "GPIOPWMCTRL set failed\n"); + dev_err(dev->dev, "GPIOPWMCTRL set failed\n"); } clkdiv = calc_clkdiv(SYSTEMCLK, PWM_FREQUENCY); @@ -470,9 +470,9 @@ static void tc35876x_brightness_init(struct drm_device *dev) ret = intel_scu_ipc_iowrite8(PWM0CLKDIV0, clkdiv & 0xff); if (ret) - dev_err(&dev->pdev->dev, "PWM0CLKDIV set failed\n"); + dev_err(dev->dev, "PWM0CLKDIV set failed\n"); else - dev_dbg(&dev->pdev->dev, "PWM0CLKDIV set to 0x%04x (%d Hz)\n", + dev_dbg(dev->dev, "PWM0CLKDIV set to 0x%04x (%d Hz)\n", clkdiv, PWM_FREQUENCY); } @@ -575,7 +575,7 @@ static struct drm_display_mode *tc35876x_get_config_mode(struct drm_device *dev) { struct drm_display_mode *mode; - dev_dbg(&dev->pdev->dev, "%s\n", __func__); + dev_dbg(dev->dev, "%s\n", __func__); mode = kzalloc(sizeof(*mode), GFP_KERNEL); if (!mode) @@ -592,15 +592,15 @@ static struct drm_display_mode *tc35876x_get_config_mode(struct drm_device *dev) mode->vtotal = 838; mode->clock = 33324 << 1; - dev_info(&dev->pdev->dev, "hdisplay(w) = %d\n", mode->hdisplay); - dev_info(&dev->pdev->dev, "vdisplay(h) = %d\n", mode->vdisplay); - dev_info(&dev->pdev->dev, "HSS = %d\n", mode->hsync_start); - dev_info(&dev->pdev->dev, "HSE = %d\n", mode->hsync_end); - dev_info(&dev->pdev->dev, "htotal = %d\n", mode->htotal); - dev_info(&dev->pdev->dev, "VSS = %d\n", mode->vsync_start); - dev_info(&dev->pdev->dev, "VSE = %d\n", mode->vsync_end); - dev_info(&dev->pdev->dev, "vtotal = %d\n", mode->vtotal); - dev_info(&dev->pdev->dev, "clock = %d\n", mode->clock); + dev_info(dev->dev, "hdisplay(w) = %d\n", mode->hdisplay); + dev_info(dev->dev, "vdisplay(h) = %d\n", mode->vdisplay); + dev_info(dev->dev, "HSS = %d\n", mode->hsync_start); + dev_info(dev->dev, "HSE = %d\n", mode->hsync_end); + dev_info(dev->dev, "htotal = %d\n", mode->htotal); + dev_info(dev->dev, "VSS = %d\n", mode->vsync_start); + dev_info(dev->dev, "VSE = %d\n", mode->vsync_end); + dev_info(dev->dev, "vtotal = %d\n", mode->vtotal); + dev_info(dev->dev, "clock = %d\n", mode->clock); drm_mode_set_name(mode); drm_mode_set_crtcinfo(mode, 0); @@ -775,19 +775,19 @@ void tc35876x_init(struct drm_device *dev) { int r; - dev_dbg(&dev->pdev->dev, "%s\n", __func__); + dev_dbg(dev->dev, "%s\n", __func__); cmi_lcd_hack_create_device(); r = i2c_add_driver(&cmi_lcd_i2c_driver); if (r < 0) - dev_err(&dev->pdev->dev, + dev_err(dev->dev, "%s: i2c_add_driver() for %s failed (%d)\n", __func__, cmi_lcd_i2c_driver.driver.name, r); r = i2c_add_driver(&tc35876x_bridge_i2c_driver); if (r < 0) - dev_err(&dev->pdev->dev, + dev_err(dev->dev, "%s: i2c_add_driver() for %s failed (%d)\n", __func__, tc35876x_bridge_i2c_driver.driver.name, r); diff --git a/drivers/gpu/drm/hisilicon/hibmc/Makefile b/drivers/gpu/drm/hisilicon/hibmc/Makefile index 684ef794eb7c..d25c75e60d3d 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/Makefile +++ b/drivers/gpu/drm/hisilicon/hibmc/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only -hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_ttm.o hibmc_drm_i2c.o +hibmc-drm-y := hibmc_drm_drv.o hibmc_drm_de.o hibmc_drm_vdac.o hibmc_drm_i2c.o obj-$(CONFIG_DRM_HISI_HIBMC) += hibmc-drm.o diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c index ea962acfeae0..096eea985b6f 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c @@ -499,7 +499,7 @@ static const struct drm_crtc_helper_funcs hibmc_crtc_helper_funcs = { int hibmc_de_init(struct hibmc_drm_private *priv) { - struct drm_device *dev = priv->dev; + struct drm_device *dev = &priv->dev; struct drm_crtc *crtc = &priv->crtc; struct drm_plane *plane = &priv->primary_plane; int ret; diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c index d845657fd99c..abd6250d5a14 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c @@ -16,6 +16,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> +#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_gem_vram_helper.h> #include <drm/drm_irq.h> #include <drm/drm_managed.h> @@ -43,6 +44,12 @@ static irqreturn_t hibmc_drm_interrupt(int irq, void *arg) return IRQ_HANDLED; } +static int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + return drm_gem_vram_fill_create_dumb(file, dev, 0, 128, args); +} + static const struct drm_driver hibmc_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .fops = &hibmc_fops, @@ -77,47 +84,48 @@ static const struct dev_pm_ops hibmc_pm_ops = { hibmc_pm_resume) }; +static const struct drm_mode_config_funcs hibmc_mode_funcs = { + .mode_valid = drm_vram_helper_mode_valid, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, + .fb_create = drm_gem_fb_create, +}; + static int hibmc_kms_init(struct hibmc_drm_private *priv) { + struct drm_device *dev = &priv->dev; int ret; - drm_mode_config_init(priv->dev); - priv->mode_config_initialized = true; + ret = drmm_mode_config_init(dev); + if (ret) + return ret; - priv->dev->mode_config.min_width = 0; - priv->dev->mode_config.min_height = 0; - priv->dev->mode_config.max_width = 1920; - priv->dev->mode_config.max_height = 1200; + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + dev->mode_config.max_width = 1920; + dev->mode_config.max_height = 1200; - priv->dev->mode_config.fb_base = priv->fb_base; - priv->dev->mode_config.preferred_depth = 32; - priv->dev->mode_config.prefer_shadow = 1; + dev->mode_config.fb_base = priv->fb_base; + dev->mode_config.preferred_depth = 32; + dev->mode_config.prefer_shadow = 1; - priv->dev->mode_config.funcs = (void *)&hibmc_mode_funcs; + dev->mode_config.funcs = (void *)&hibmc_mode_funcs; ret = hibmc_de_init(priv); if (ret) { - drm_err(priv->dev, "failed to init de: %d\n", ret); + drm_err(dev, "failed to init de: %d\n", ret); return ret; } ret = hibmc_vdac_init(priv); if (ret) { - drm_err(priv->dev, "failed to init vdac: %d\n", ret); + drm_err(dev, "failed to init vdac: %d\n", ret); return ret; } return 0; } -static void hibmc_kms_fini(struct hibmc_drm_private *priv) -{ - if (priv->mode_config_initialized) { - drm_mode_config_cleanup(priv->dev); - priv->mode_config_initialized = false; - } -} - /* * It can operate in one of three modes: 0, 1 or Sleep. */ @@ -202,8 +210,8 @@ static void hibmc_hw_config(struct hibmc_drm_private *priv) static int hibmc_hw_map(struct hibmc_drm_private *priv) { - struct drm_device *dev = priv->dev; - struct pci_dev *pdev = dev->pdev; + struct drm_device *dev = &priv->dev; + struct pci_dev *pdev = to_pci_dev(dev->dev); resource_size_t addr, size, ioaddr, iosize; ioaddr = pci_resource_start(pdev, 1); @@ -242,40 +250,31 @@ static int hibmc_hw_init(struct hibmc_drm_private *priv) static int hibmc_unload(struct drm_device *dev) { - struct hibmc_drm_private *priv = to_hibmc_drm_private(dev); - drm_atomic_helper_shutdown(dev); if (dev->irq_enabled) drm_irq_uninstall(dev); - pci_disable_msi(dev->pdev); - hibmc_kms_fini(priv); - hibmc_mm_fini(priv); - dev->dev_private = NULL; + pci_disable_msi(to_pci_dev(dev->dev)); + return 0; } static int hibmc_load(struct drm_device *dev) { - struct hibmc_drm_private *priv; + struct pci_dev *pdev = to_pci_dev(dev->dev); + struct hibmc_drm_private *priv = to_hibmc_drm_private(dev); int ret; - priv = drmm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); - if (!priv) { - drm_err(dev, "no memory to allocate for hibmc_drm_private\n"); - return -ENOMEM; - } - dev->dev_private = priv; - priv->dev = dev; - ret = hibmc_hw_init(priv); if (ret) goto err; - ret = hibmc_mm_init(priv); - if (ret) + ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0), priv->fb_size); + if (ret) { + drm_err(dev, "Error initializing VRAM MM; %d\n", ret); goto err; + } ret = hibmc_kms_init(priv); if (ret) @@ -287,11 +286,11 @@ static int hibmc_load(struct drm_device *dev) goto err; } - ret = pci_enable_msi(dev->pdev); + ret = pci_enable_msi(pdev); if (ret) { drm_warn(dev, "enabling MSI failed: %d\n", ret); } else { - ret = drm_irq_install(dev, dev->pdev->irq); + ret = drm_irq_install(dev, pdev->irq); if (ret) drm_warn(dev, "install irq failed: %d\n", ret); } @@ -310,6 +309,7 @@ err: static int hibmc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { + struct hibmc_drm_private *priv; struct drm_device *dev; int ret; @@ -318,25 +318,26 @@ static int hibmc_pci_probe(struct pci_dev *pdev, if (ret) return ret; - dev = drm_dev_alloc(&hibmc_driver, &pdev->dev); - if (IS_ERR(dev)) { + priv = devm_drm_dev_alloc(&pdev->dev, &hibmc_driver, + struct hibmc_drm_private, dev); + if (IS_ERR(priv)) { DRM_ERROR("failed to allocate drm_device\n"); - return PTR_ERR(dev); + return PTR_ERR(priv); } - dev->pdev = pdev; + dev = &priv->dev; pci_set_drvdata(pdev, dev); - ret = pci_enable_device(pdev); + ret = pcim_enable_device(pdev); if (ret) { drm_err(dev, "failed to enable pci device: %d\n", ret); - goto err_free; + goto err_return; } ret = hibmc_load(dev); if (ret) { drm_err(dev, "failed to load hibmc: %d\n", ret); - goto err_disable; + goto err_return; } ret = drm_dev_register(dev, 0); @@ -352,11 +353,7 @@ static int hibmc_pci_probe(struct pci_dev *pdev, err_unload: hibmc_unload(dev); -err_disable: - pci_disable_device(pdev); -err_free: - drm_dev_put(dev); - +err_return: return ret; } @@ -366,7 +363,6 @@ static void hibmc_pci_remove(struct pci_dev *pdev) drm_dev_unregister(dev); hibmc_unload(dev); - drm_dev_put(dev); } static const struct pci_device_id hibmc_pci_table[] = { diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h index f310a83d9c48..7d263f4d7078 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.h @@ -37,12 +37,11 @@ struct hibmc_drm_private { resource_size_t fb_size; /* drm */ - struct drm_device *dev; + struct drm_device dev; struct drm_plane primary_plane; struct drm_crtc crtc; struct drm_encoder encoder; struct hibmc_connector connector; - bool mode_config_initialized; }; static inline struct hibmc_connector *to_hibmc_connector(struct drm_connector *connector) @@ -52,7 +51,7 @@ static inline struct hibmc_connector *to_hibmc_connector(struct drm_connector *c static inline struct hibmc_drm_private *to_hibmc_drm_private(struct drm_device *dev) { - return dev->dev_private; + return container_of(dev, struct hibmc_drm_private, dev); } void hibmc_set_power_mode(struct hibmc_drm_private *priv, @@ -64,11 +63,6 @@ int hibmc_de_init(struct hibmc_drm_private *priv); int hibmc_vdac_init(struct hibmc_drm_private *priv); int hibmc_mm_init(struct hibmc_drm_private *hibmc); -void hibmc_mm_fini(struct hibmc_drm_private *hibmc); -int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev, - struct drm_mode_create_dumb *args); int hibmc_ddc_create(struct drm_device *drm_dev, struct hibmc_connector *connector); -extern const struct drm_mode_config_funcs hibmc_mode_funcs; - #endif diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c index 86d712090d87..410bd019bb35 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_i2c.c @@ -83,7 +83,7 @@ int hibmc_ddc_create(struct drm_device *drm_dev, connector->adapter.owner = THIS_MODULE; connector->adapter.class = I2C_CLASS_DDC; snprintf(connector->adapter.name, I2C_NAME_SIZE, "HIS i2c bit bus"); - connector->adapter.dev.parent = &drm_dev->pdev->dev; + connector->adapter.dev.parent = drm_dev->dev; i2c_set_adapdata(&connector->adapter, connector); connector->adapter.algo_data = &connector->bit_data; diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c index 74e26c27d878..c228091fb0e6 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c @@ -14,6 +14,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_probe_helper.h> #include <drm/drm_print.h> +#include <drm/drm_simple_kms_helper.h> #include "hibmc_drm_drv.h" #include "hibmc_drm_regs.h" @@ -42,12 +43,6 @@ out: return count; } -static enum drm_mode_status hibmc_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - return MODE_OK; -} - static void hibmc_connector_destroy(struct drm_connector *connector) { struct hibmc_connector *hibmc_connector = to_hibmc_connector(connector); @@ -59,7 +54,6 @@ static void hibmc_connector_destroy(struct drm_connector *connector) static const struct drm_connector_helper_funcs hibmc_connector_helper_funcs = { .get_modes = hibmc_connector_get_modes, - .mode_valid = hibmc_connector_mode_valid, }; static const struct drm_connector_funcs hibmc_connector_funcs = { @@ -90,15 +84,12 @@ static const struct drm_encoder_helper_funcs hibmc_encoder_helper_funcs = { .mode_set = hibmc_encoder_mode_set, }; -static const struct drm_encoder_funcs hibmc_encoder_funcs = { - .destroy = drm_encoder_cleanup, -}; - int hibmc_vdac_init(struct hibmc_drm_private *priv) { - struct drm_device *dev = priv->dev; + struct drm_device *dev = &priv->dev; struct hibmc_connector *hibmc_connector = &priv->connector; struct drm_encoder *encoder = &priv->encoder; + struct drm_crtc *crtc = &priv->crtc; struct drm_connector *connector = &hibmc_connector->base; int ret; @@ -108,9 +99,8 @@ int hibmc_vdac_init(struct hibmc_drm_private *priv) return ret; } - encoder->possible_crtcs = 0x1; - ret = drm_encoder_init(dev, encoder, &hibmc_encoder_funcs, - DRM_MODE_ENCODER_DAC, NULL); + encoder->possible_crtcs = drm_crtc_mask(crtc); + ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC); if (ret) { drm_err(dev, "failed to init encoder: %d\n", ret); return ret; diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c deleted file mode 100644 index 602ece11bb4a..000000000000 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c +++ /dev/null @@ -1,61 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* Hisilicon Hibmc SoC drm driver - * - * Based on the bochs drm driver. - * - * Copyright (c) 2016 Huawei Limited. - * - * Author: - * Rongrong Zou <[email protected]> - * Rongrong Zou <[email protected]> - * Jianhua Li <[email protected]> - */ - -#include <linux/pci.h> - -#include <drm/drm_atomic_helper.h> -#include <drm/drm_gem.h> -#include <drm/drm_gem_framebuffer_helper.h> -#include <drm/drm_gem_vram_helper.h> -#include <drm/drm_print.h> - -#include "hibmc_drm_drv.h" - -int hibmc_mm_init(struct hibmc_drm_private *hibmc) -{ - struct drm_vram_mm *vmm; - int ret; - struct drm_device *dev = hibmc->dev; - - vmm = drm_vram_helper_alloc_mm(dev, - pci_resource_start(dev->pdev, 0), - hibmc->fb_size); - if (IS_ERR(vmm)) { - ret = PTR_ERR(vmm); - drm_err(dev, "Error initializing VRAM MM; %d\n", ret); - return ret; - } - - return 0; -} - -void hibmc_mm_fini(struct hibmc_drm_private *hibmc) -{ - if (!hibmc->dev->vram_mm) - return; - - drm_vram_helper_release_mm(hibmc->dev); -} - -int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev, - struct drm_mode_create_dumb *args) -{ - return drm_gem_vram_fill_create_dumb(file, dev, 0, 128, args); -} - -const struct drm_mode_config_funcs hibmc_mode_funcs = { - .mode_valid = drm_vram_helper_mode_valid, - .atomic_check = drm_atomic_helper_check, - .atomic_commit = drm_atomic_helper_commit, - .fb_create = drm_gem_fb_create, -}; diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index e5574e506a5c..d6ac946d0407 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -38,6 +38,7 @@ i915-y += i915_drv.o \ i915_config.o \ i915_irq.o \ i915_getparam.o \ + i915_mitigations.o \ i915_params.o \ i915_pci.o \ i915_scatterlist.o \ @@ -58,6 +59,7 @@ i915-y += i915_drv.o \ # core library code i915-y += \ + dma_resv_utils.o \ i915_memcpy.o \ i915_mm.o \ i915_sw_fence.o \ @@ -82,6 +84,7 @@ gt-y += \ gt/gen6_engine_cs.o \ gt/gen6_ppgtt.o \ gt/gen7_renderclear.o \ + gt/gen8_engine_cs.o \ gt/gen8_ppgtt.o \ gt/intel_breadcrumbs.o \ gt/intel_context.o \ @@ -91,6 +94,7 @@ gt-y += \ gt/intel_engine_heartbeat.o \ gt/intel_engine_pm.o \ gt/intel_engine_user.o \ + gt/intel_execlists_submission.o \ gt/intel_ggtt.o \ gt/intel_ggtt_fencing.o \ gt/intel_gt.o \ @@ -106,6 +110,7 @@ gt-y += \ gt/intel_mocs.o \ gt/intel_ppgtt.o \ gt/intel_rc6.o \ + gt/intel_region_lmem.o \ gt/intel_renderstate.o \ gt/intel_reset.o \ gt/intel_ring.o \ @@ -166,7 +171,6 @@ i915-y += \ i915_scheduler.o \ i915_trace_points.o \ i915_vma.o \ - intel_region_lmem.o \ intel_wopcm.o # general-purpose microcontroller (GuC) support @@ -197,6 +201,7 @@ i915-y += \ display/intel_combo_phy.o \ display/intel_connector.o \ display/intel_csr.o \ + display/intel_cursor.o \ display/intel_display.o \ display/intel_display_power.o \ display/intel_dpio_phy.o \ @@ -214,7 +219,8 @@ i915-y += \ display/intel_quirks.o \ display/intel_sprite.o \ display/intel_tc.o \ - display/intel_vga.o + display/intel_vga.o \ + display/i9xx_plane.o i915-$(CONFIG_ACPI) += \ display/intel_acpi.o \ display/intel_opregion.o @@ -283,15 +289,7 @@ obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o # exclude some broken headers from the test coverage no-header-test := \ - display/intel_vbt_defs.h \ - gvt/execlist.h \ - gvt/fb_decoder.h \ - gvt/gtt.h \ - gvt/gvt.h \ - gvt/interrupt.h \ - gvt/mmio_context.h \ - gvt/mpt.h \ - gvt/scheduler.h + display/intel_vbt_defs.h extra-$(CONFIG_DRM_I915_WERROR) += \ $(patsubst %.h,%.hdrtest, $(filter-out $(no-header-test), \ diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c new file mode 100644 index 000000000000..b78985c855a5 --- /dev/null +++ b/drivers/gpu/drm/i915/display/i9xx_plane.c @@ -0,0 +1,704 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ +#include <linux/kernel.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_plane_helper.h> + +#include "intel_atomic.h" +#include "intel_atomic_plane.h" +#include "intel_display_types.h" +#include "intel_sprite.h" +#include "i9xx_plane.h" + +/* Primary plane formats for gen <= 3 */ +static const u32 i8xx_primary_formats[] = { + DRM_FORMAT_C8, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_RGB565, + DRM_FORMAT_XRGB8888, +}; + +/* Primary plane formats for ivb (no fp16 due to hw issue) */ +static const u32 ivb_primary_formats[] = { + DRM_FORMAT_C8, + DRM_FORMAT_RGB565, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, +}; + +/* Primary plane formats for gen >= 4, except ivb */ +static const u32 i965_primary_formats[] = { + DRM_FORMAT_C8, + DRM_FORMAT_RGB565, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_XBGR16161616F, +}; + +/* Primary plane formats for vlv/chv */ +static const u32 vlv_primary_formats[] = { + DRM_FORMAT_C8, + DRM_FORMAT_RGB565, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_XBGR16161616F, +}; + +static const u64 i9xx_format_modifiers[] = { + I915_FORMAT_MOD_X_TILED, + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + +static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane, + u32 format, u64 modifier) +{ + switch (modifier) { + case DRM_FORMAT_MOD_LINEAR: + case I915_FORMAT_MOD_X_TILED: + break; + default: + return false; + } + + switch (format) { + case DRM_FORMAT_C8: + case DRM_FORMAT_RGB565: + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_XRGB8888: + return modifier == DRM_FORMAT_MOD_LINEAR || + modifier == I915_FORMAT_MOD_X_TILED; + default: + return false; + } +} + +static bool i965_plane_format_mod_supported(struct drm_plane *_plane, + u32 format, u64 modifier) +{ + switch (modifier) { + case DRM_FORMAT_MOD_LINEAR: + case I915_FORMAT_MOD_X_TILED: + break; + default: + return false; + } + + switch (format) { + case DRM_FORMAT_C8: + case DRM_FORMAT_RGB565: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_XBGR16161616F: + return modifier == DRM_FORMAT_MOD_LINEAR || + modifier == I915_FORMAT_MOD_X_TILED; + default: + return false; + } +} + +static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv, + enum i9xx_plane_id i9xx_plane) +{ + if (!HAS_FBC(dev_priv)) + return false; + + if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) + return i9xx_plane == PLANE_A; /* tied to pipe A */ + else if (IS_IVYBRIDGE(dev_priv)) + return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B || + i9xx_plane == PLANE_C; + else if (INTEL_GEN(dev_priv) >= 4) + return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B; + else + return i9xx_plane == PLANE_A; +} + +static bool i9xx_plane_has_windowing(struct intel_plane *plane) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; + + if (IS_CHERRYVIEW(dev_priv)) + return i9xx_plane == PLANE_B; + else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) + return false; + else if (IS_GEN(dev_priv, 4)) + return i9xx_plane == PLANE_C; + else + return i9xx_plane == PLANE_B || + i9xx_plane == PLANE_C; +} + +static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = + to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int rotation = plane_state->hw.rotation; + u32 dspcntr; + + dspcntr = DISPLAY_PLANE_ENABLE; + + if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) || + IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) + dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; + + switch (fb->format->format) { + case DRM_FORMAT_C8: + dspcntr |= DISPPLANE_8BPP; + break; + case DRM_FORMAT_XRGB1555: + dspcntr |= DISPPLANE_BGRX555; + break; + case DRM_FORMAT_ARGB1555: + dspcntr |= DISPPLANE_BGRA555; + break; + case DRM_FORMAT_RGB565: + dspcntr |= DISPPLANE_BGRX565; + break; + case DRM_FORMAT_XRGB8888: + dspcntr |= DISPPLANE_BGRX888; + break; + case DRM_FORMAT_XBGR8888: + dspcntr |= DISPPLANE_RGBX888; + break; + case DRM_FORMAT_ARGB8888: + dspcntr |= DISPPLANE_BGRA888; + break; + case DRM_FORMAT_ABGR8888: + dspcntr |= DISPPLANE_RGBA888; + break; + case DRM_FORMAT_XRGB2101010: + dspcntr |= DISPPLANE_BGRX101010; + break; + case DRM_FORMAT_XBGR2101010: + dspcntr |= DISPPLANE_RGBX101010; + break; + case DRM_FORMAT_ARGB2101010: + dspcntr |= DISPPLANE_BGRA101010; + break; + case DRM_FORMAT_ABGR2101010: + dspcntr |= DISPPLANE_RGBA101010; + break; + case DRM_FORMAT_XBGR16161616F: + dspcntr |= DISPPLANE_RGBX161616; + break; + default: + MISSING_CASE(fb->format->format); + return 0; + } + + if (INTEL_GEN(dev_priv) >= 4 && + fb->modifier == I915_FORMAT_MOD_X_TILED) + dspcntr |= DISPPLANE_TILED; + + if (rotation & DRM_MODE_ROTATE_180) + dspcntr |= DISPPLANE_ROTATE_180; + + if (rotation & DRM_MODE_REFLECT_X) + dspcntr |= DISPPLANE_MIRROR; + + return dspcntr; +} + +int i9xx_check_plane_surface(struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = + to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + int src_x, src_y, src_w; + u32 offset; + int ret; + + ret = intel_plane_compute_gtt(plane_state); + if (ret) + return ret; + + if (!plane_state->uapi.visible) + return 0; + + src_w = drm_rect_width(&plane_state->uapi.src) >> 16; + src_x = plane_state->uapi.src.x1 >> 16; + src_y = plane_state->uapi.src.y1 >> 16; + + /* Undocumented hardware limit on i965/g4x/vlv/chv */ + if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048) + return -EINVAL; + + intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); + + if (INTEL_GEN(dev_priv) >= 4) + offset = intel_plane_compute_aligned_offset(&src_x, &src_y, + plane_state, 0); + else + offset = 0; + + /* + * Put the final coordinates back so that the src + * coordinate checks will see the right values. + */ + drm_rect_translate_to(&plane_state->uapi.src, + src_x << 16, src_y << 16); + + /* HSW/BDW do this automagically in hardware */ + if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) { + unsigned int rotation = plane_state->hw.rotation; + int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; + int src_h = drm_rect_height(&plane_state->uapi.src) >> 16; + + if (rotation & DRM_MODE_ROTATE_180) { + src_x += src_w - 1; + src_y += src_h - 1; + } else if (rotation & DRM_MODE_REFLECT_X) { + src_x += src_w - 1; + } + } + + plane_state->color_plane[0].offset = offset; + plane_state->color_plane[0].x = src_x; + plane_state->color_plane[0].y = src_y; + + return 0; +} + +static int +i9xx_plane_check(struct intel_crtc_state *crtc_state, + struct intel_plane_state *plane_state) +{ + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + int ret; + + ret = chv_plane_check_rotation(plane_state); + if (ret) + return ret; + + ret = intel_atomic_plane_check_clipping(plane_state, crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + i9xx_plane_has_windowing(plane)); + if (ret) + return ret; + + ret = i9xx_check_plane_surface(plane_state); + if (ret) + return ret; + + if (!plane_state->uapi.visible) + return 0; + + ret = intel_plane_check_src_coordinates(plane_state); + if (ret) + return ret; + + plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state); + + return 0; +} + +static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + u32 dspcntr = 0; + + if (crtc_state->gamma_enable) + dspcntr |= DISPPLANE_GAMMA_ENABLE; + + if (crtc_state->csc_enable) + dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; + + if (INTEL_GEN(dev_priv) < 5) + dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe); + + return dspcntr; +} + +static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + unsigned int *num, unsigned int *den) +{ + const struct drm_framebuffer *fb = plane_state->hw.fb; + unsigned int cpp = fb->format->cpp[0]; + + /* + * g4x bspec says 64bpp pixel rate can't exceed 80% + * of cdclk when the sprite plane is enabled on the + * same pipe. ilk/snb bspec says 64bpp pixel rate is + * never allowed to exceed 80% of cdclk. Let's just go + * with the ilk/snb limit always. + */ + if (cpp == 8) { + *num = 10; + *den = 8; + } else { + *num = 1; + *den = 1; + } +} + +static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + unsigned int pixel_rate; + unsigned int num, den; + + /* + * Note that crtc_state->pixel_rate accounts for both + * horizontal and vertical panel fitter downscaling factors. + * Pre-HSW bspec tells us to only consider the horizontal + * downscaling factor here. We ignore that and just consider + * both for simplicity. + */ + pixel_rate = crtc_state->pixel_rate; + + i9xx_plane_ratio(crtc_state, plane_state, &num, &den); + + /* two pixels per clock with double wide pipe */ + if (crtc_state->double_wide) + den *= 2; + + return DIV_ROUND_UP(pixel_rate * num, den); +} + +static void i9xx_update_plane(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; + u32 linear_offset; + int x = plane_state->color_plane[0].x; + int y = plane_state->color_plane[0].y; + int crtc_x = plane_state->uapi.dst.x1; + int crtc_y = plane_state->uapi.dst.y1; + int crtc_w = drm_rect_width(&plane_state->uapi.dst); + int crtc_h = drm_rect_height(&plane_state->uapi.dst); + unsigned long irqflags; + u32 dspaddr_offset; + u32 dspcntr; + + dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state); + + linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); + + if (INTEL_GEN(dev_priv) >= 4) + dspaddr_offset = plane_state->color_plane[0].offset; + else + dspaddr_offset = linear_offset; + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + + intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane), + plane_state->color_plane[0].stride); + + if (INTEL_GEN(dev_priv) < 4) { + /* + * PLANE_A doesn't actually have a full window + * generator but let's assume we still need to + * program whatever is there. + */ + intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane), + (crtc_y << 16) | crtc_x); + intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane), + ((crtc_h - 1) << 16) | (crtc_w - 1)); + } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) { + intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane), + (crtc_y << 16) | crtc_x); + intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane), + ((crtc_h - 1) << 16) | (crtc_w - 1)); + intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0); + } + + if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { + intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane), + (y << 16) | x); + } else if (INTEL_GEN(dev_priv) >= 4) { + intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane), + linear_offset); + intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane), + (y << 16) | x); + } + + /* + * The control register self-arms if the plane was previously + * disabled. Try to make the plane enable atomic by writing + * the control register just before the surface register. + */ + intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr); + if (INTEL_GEN(dev_priv) >= 4) + intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), + intel_plane_ggtt_offset(plane_state) + dspaddr_offset); + else + intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), + intel_plane_ggtt_offset(plane_state) + dspaddr_offset); + + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); +} + +static void i9xx_disable_plane(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; + unsigned long irqflags; + u32 dspcntr; + + /* + * DSPCNTR pipe gamma enable on g4x+ and pipe csc + * enable on ilk+ affect the pipe bottom color as + * well, so we must configure them even if the plane + * is disabled. + * + * On pre-g4x there is no way to gamma correct the + * pipe bottom color but we'll keep on doing this + * anyway so that the crtc state readout works correctly. + */ + dspcntr = i9xx_plane_ctl_crtc(crtc_state); + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + + intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr); + if (INTEL_GEN(dev_priv) >= 4) + intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0); + else + intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0); + + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); +} + +static bool i9xx_plane_get_hw_state(struct intel_plane *plane, + enum pipe *pipe) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum intel_display_power_domain power_domain; + enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; + intel_wakeref_t wakeref; + bool ret; + u32 val; + + /* + * Not 100% correct for planes that can move between pipes, + * but that's only the case for gen2-4 which don't have any + * display power wells. + */ + power_domain = POWER_DOMAIN_PIPE(plane->pipe); + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (!wakeref) + return false; + + val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); + + ret = val & DISPLAY_PLANE_ENABLE; + + if (INTEL_GEN(dev_priv) >= 5) + *pipe = plane->pipe; + else + *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> + DISPPLANE_SEL_PIPE_SHIFT; + + intel_display_power_put(dev_priv, power_domain, wakeref); + + return ret; +} + +unsigned int +i9xx_plane_max_stride(struct intel_plane *plane, + u32 pixel_format, u64 modifier, + unsigned int rotation) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + + if (!HAS_GMCH(dev_priv)) { + return 32*1024; + } else if (INTEL_GEN(dev_priv) >= 4) { + if (modifier == I915_FORMAT_MOD_X_TILED) + return 16*1024; + else + return 32*1024; + } else if (INTEL_GEN(dev_priv) >= 3) { + if (modifier == I915_FORMAT_MOD_X_TILED) + return 8*1024; + else + return 16*1024; + } else { + if (plane->i9xx_plane == PLANE_C) + return 4*1024; + else + return 8*1024; + } +} + +static const struct drm_plane_funcs i965_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = intel_plane_destroy, + .atomic_duplicate_state = intel_plane_duplicate_state, + .atomic_destroy_state = intel_plane_destroy_state, + .format_mod_supported = i965_plane_format_mod_supported, +}; + +static const struct drm_plane_funcs i8xx_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = intel_plane_destroy, + .atomic_duplicate_state = intel_plane_duplicate_state, + .atomic_destroy_state = intel_plane_destroy_state, + .format_mod_supported = i8xx_plane_format_mod_supported, +}; + +struct intel_plane * +intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) +{ + struct intel_plane *plane; + const struct drm_plane_funcs *plane_funcs; + unsigned int supported_rotations; + const u32 *formats; + int num_formats; + int ret, zpos; + + if (INTEL_GEN(dev_priv) >= 9) + return skl_universal_plane_create(dev_priv, pipe, + PLANE_PRIMARY); + + plane = intel_plane_alloc(); + if (IS_ERR(plane)) + return plane; + + plane->pipe = pipe; + /* + * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS + * port is hooked to pipe B. Hence we want plane A feeding pipe B. + */ + if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4 && + INTEL_NUM_PIPES(dev_priv) == 2) + plane->i9xx_plane = (enum i9xx_plane_id) !pipe; + else + plane->i9xx_plane = (enum i9xx_plane_id) pipe; + plane->id = PLANE_PRIMARY; + plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id); + + plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane); + if (plane->has_fbc) { + struct intel_fbc *fbc = &dev_priv->fbc; + + fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; + } + + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { + formats = vlv_primary_formats; + num_formats = ARRAY_SIZE(vlv_primary_formats); + } else if (INTEL_GEN(dev_priv) >= 4) { + /* + * WaFP16GammaEnabling:ivb + * "Workaround : When using the 64-bit format, the plane + * output on each color channel has one quarter amplitude. + * It can be brought up to full amplitude by using pipe + * gamma correction or pipe color space conversion to + * multiply the plane output by four." + * + * There is no dedicated plane gamma for the primary plane, + * and using the pipe gamma/csc could conflict with other + * planes, so we choose not to expose fp16 on IVB primary + * planes. HSW primary planes no longer have this problem. + */ + if (IS_IVYBRIDGE(dev_priv)) { + formats = ivb_primary_formats; + num_formats = ARRAY_SIZE(ivb_primary_formats); + } else { + formats = i965_primary_formats; + num_formats = ARRAY_SIZE(i965_primary_formats); + } + } else { + formats = i8xx_primary_formats; + num_formats = ARRAY_SIZE(i8xx_primary_formats); + } + + if (INTEL_GEN(dev_priv) >= 4) + plane_funcs = &i965_plane_funcs; + else + plane_funcs = &i8xx_plane_funcs; + + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + plane->min_cdclk = vlv_plane_min_cdclk; + else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) + plane->min_cdclk = hsw_plane_min_cdclk; + else if (IS_IVYBRIDGE(dev_priv)) + plane->min_cdclk = ivb_plane_min_cdclk; + else + plane->min_cdclk = i9xx_plane_min_cdclk; + + plane->max_stride = i9xx_plane_max_stride; + plane->update_plane = i9xx_update_plane; + plane->disable_plane = i9xx_disable_plane; + plane->get_hw_state = i9xx_plane_get_hw_state; + plane->check_plane = i9xx_plane_check; + + if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) + ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, + 0, plane_funcs, + formats, num_formats, + i9xx_format_modifiers, + DRM_PLANE_TYPE_PRIMARY, + "primary %c", pipe_name(pipe)); + else + ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, + 0, plane_funcs, + formats, num_formats, + i9xx_format_modifiers, + DRM_PLANE_TYPE_PRIMARY, + "plane %c", + plane_name(plane->i9xx_plane)); + if (ret) + goto fail; + + if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { + supported_rotations = + DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | + DRM_MODE_REFLECT_X; + } else if (INTEL_GEN(dev_priv) >= 4) { + supported_rotations = + DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180; + } else { + supported_rotations = DRM_MODE_ROTATE_0; + } + + if (INTEL_GEN(dev_priv) >= 4) + drm_plane_create_rotation_property(&plane->base, + DRM_MODE_ROTATE_0, + supported_rotations); + + zpos = 0; + drm_plane_create_zpos_immutable_property(&plane->base, zpos); + + drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); + + return plane; + +fail: + intel_plane_free(plane); + + return ERR_PTR(ret); +} + diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.h b/drivers/gpu/drm/i915/display/i9xx_plane.h new file mode 100644 index 000000000000..bc2834a62735 --- /dev/null +++ b/drivers/gpu/drm/i915/display/i9xx_plane.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2020 Intel Corporation + */ + +#ifndef _I9XX_PLANE_H_ +#define _I9XX_PLANE_H_ + +#include <linux/types.h> + +enum pipe; +struct drm_i915_private; +struct intel_plane; +struct intel_plane_state; + +unsigned int i9xx_plane_max_stride(struct intel_plane *plane, + u32 pixel_format, u64 modifier, + unsigned int rotation); +int i9xx_check_plane_surface(struct intel_plane_state *plane_state); + +struct intel_plane * +intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe); + +#endif diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index a9439b415603..9d245a689323 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -1535,6 +1535,9 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder, vdsc_cfg->convert_rgb = true; + /* FIXME: initialize from VBT */ + vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; + ret = intel_dsc_compute_params(encoder, crtc_state); if (ret) return ret; @@ -1616,10 +1619,6 @@ static void gen11_dsi_get_power_domains(struct intel_encoder *encoder, get_dsi_io_power_domains(i915, enc_to_intel_dsi(encoder)); - - if (crtc_state->dsc.compression_enable) - intel_display_power_get(i915, - intel_dsc_power_domain(crtc_state)); } static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index 7e9f84b00859..b5e1ee99535c 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -312,10 +312,13 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ int ret; intel_plane_set_invisible(new_crtc_state, new_plane_state); + new_crtc_state->enabled_planes &= ~BIT(plane->id); if (!new_plane_state->hw.crtc && !old_plane_state->hw.crtc) return 0; + new_crtc_state->enabled_planes |= BIT(plane->id); + ret = plane->check_plane(new_crtc_state, new_plane_state); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 4cc949b228f2..987cf509337f 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -1623,6 +1623,13 @@ static const u8 icp_ddc_pin_map[] = { [TGL_DDC_BUS_PORT_6] = GMBUS_PIN_14_TC6_TGP, }; +static const u8 rkl_pch_tgp_ddc_pin_map[] = { + [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT, + [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT, + [RKL_DDC_BUS_DDI_D] = GMBUS_PIN_9_TC1_ICP, + [RKL_DDC_BUS_DDI_E] = GMBUS_PIN_10_TC2_ICP, +}; + static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin) { const u8 *ddc_pin_map; @@ -1630,6 +1637,9 @@ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin) if (INTEL_PCH_TYPE(dev_priv) >= PCH_DG1) { return vbt_pin; + } else if (IS_ROCKETLAKE(dev_priv) && INTEL_PCH_TYPE(dev_priv) == PCH_TGP) { + ddc_pin_map = rkl_pch_tgp_ddc_pin_map; + n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map); } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) { ddc_pin_map = icp_ddc_pin_map; n_entries = ARRAY_SIZE(icp_ddc_pin_map); @@ -2555,16 +2565,11 @@ static void fill_dsc(struct intel_crtc_state *crtc_state, crtc_state->dsc.slice_count); /* - * FIXME: Use VBT rc_buffer_block_size and rc_buffer_size for the - * implementation specific physical rate buffer size. Currently we use - * the required rate buffer model size calculated in - * drm_dsc_compute_rc_parameters() according to VESA DSC Annex E. - * * The VBT rc_buffer_block_size and rc_buffer_size definitions - * correspond to DP 1.4 DPCD offsets 0x62 and 0x63. The DP DSC - * implementation should also use the DPCD (or perhaps VBT for eDP) - * provided value for the buffer size. + * correspond to DP 1.4 DPCD offsets 0x62 and 0x63. */ + vdsc_cfg->rc_model_size = drm_dsc_dp_rc_buffer_size(dsc->rc_buffer_block_size, + dsc->rc_buffer_size); /* FIXME: DSI spec says bpc + 1 for this one */ vdsc_cfg->line_buf_depth = VBT_DSC_LINE_BUFFER_DEPTH(dsc->line_buffer_depth); diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index c449d28d0560..2e878cc274b7 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -2415,8 +2415,7 @@ static int intel_modeset_all_pipes(struct intel_atomic_state *state) if (ret) return ret; - ret = drm_atomic_add_affected_planes(&state->base, - &crtc->base); + ret = intel_atomic_add_affected_planes(state, crtc); if (ret) return ret; @@ -2710,8 +2709,8 @@ static int dg1_rawclk(struct drm_i915_private *dev_priv) * DG1 always uses a 38.4 MHz rawclk. The bspec tells us * "Program Numerator=2, Denominator=4, Divider=37 decimal." */ - I915_WRITE(PCH_RAWCLK_FREQ, - CNP_RAWCLK_DEN(4) | CNP_RAWCLK_DIV(37) | ICP_RAWCLK_NUM(2)); + intel_de_write(dev_priv, PCH_RAWCLK_FREQ, + CNP_RAWCLK_DEN(4) | CNP_RAWCLK_DIV(37) | ICP_RAWCLK_NUM(2)); return 38400; } diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c index d5ad61e4083e..996ae0608a62 100644 --- a/drivers/gpu/drm/i915/display/intel_combo_phy.c +++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c @@ -427,10 +427,22 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv) u32 val; if (phy == PHY_A && - !icl_combo_phy_verify_state(dev_priv, phy)) - drm_warn(&dev_priv->drm, - "Combo PHY %c HW state changed unexpectedly\n", - phy_name(phy)); + !icl_combo_phy_verify_state(dev_priv, phy)) { + if (IS_TIGERLAKE(dev_priv) || IS_DG1(dev_priv)) { + /* + * A known problem with old ifwi: + * https://gitlab.freedesktop.org/drm/intel/-/issues/2411 + * Suppress the warning for CI. Remove ASAP! + */ + drm_dbg_kms(&dev_priv->drm, + "Combo PHY %c HW state changed unexpectedly\n", + phy_name(phy)); + } else { + drm_warn(&dev_priv->drm, + "Combo PHY %c HW state changed unexpectedly\n", + phy_name(phy)); + } + } if (!has_phy_misc(dev_priv, phy)) goto skip_phy_misc; diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c index 406e96785c76..d5ceb7bdc14b 100644 --- a/drivers/gpu/drm/i915/display/intel_connector.c +++ b/drivers/gpu/drm/i915/display/intel_connector.c @@ -279,24 +279,17 @@ intel_attach_aspect_ratio_property(struct drm_connector *connector) } void -intel_attach_colorspace_property(struct drm_connector *connector) +intel_attach_hdmi_colorspace_property(struct drm_connector *connector) { - switch (connector->connector_type) { - case DRM_MODE_CONNECTOR_HDMIA: - case DRM_MODE_CONNECTOR_HDMIB: - if (drm_mode_create_hdmi_colorspace_property(connector)) - return; - break; - case DRM_MODE_CONNECTOR_DisplayPort: - case DRM_MODE_CONNECTOR_eDP: - if (drm_mode_create_dp_colorspace_property(connector)) - return; - break; - default: - MISSING_CASE(connector->connector_type); - return; - } + if (!drm_mode_create_hdmi_colorspace_property(connector)) + drm_object_attach_property(&connector->base, + connector->colorspace_property, 0); +} - drm_object_attach_property(&connector->base, - connector->colorspace_property, 0); +void +intel_attach_dp_colorspace_property(struct drm_connector *connector) +{ + if (!drm_mode_create_dp_colorspace_property(connector)) + drm_object_attach_property(&connector->base, + connector->colorspace_property, 0); } diff --git a/drivers/gpu/drm/i915/display/intel_connector.h b/drivers/gpu/drm/i915/display/intel_connector.h index 93a7375c8196..661a37a3c6d8 100644 --- a/drivers/gpu/drm/i915/display/intel_connector.h +++ b/drivers/gpu/drm/i915/display/intel_connector.h @@ -30,6 +30,7 @@ int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); void intel_attach_force_audio_property(struct drm_connector *connector); void intel_attach_broadcast_rgb_property(struct drm_connector *connector); void intel_attach_aspect_ratio_property(struct drm_connector *connector); -void intel_attach_colorspace_property(struct drm_connector *connector); +void intel_attach_hdmi_colorspace_property(struct drm_connector *connector); +void intel_attach_dp_colorspace_property(struct drm_connector *connector); #endif /* __INTEL_CONNECTOR_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c new file mode 100644 index 000000000000..21fe4d2753e9 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_cursor.c @@ -0,0 +1,806 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ +#include <linux/kernel.h> + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_atomic_uapi.h> +#include <drm/drm_damage_helper.h> +#include <drm/drm_plane_helper.h> +#include <drm/drm_fourcc.h> + +#include "intel_atomic.h" +#include "intel_atomic_plane.h" +#include "intel_cursor.h" +#include "intel_display_types.h" +#include "intel_display.h" + +#include "intel_frontbuffer.h" +#include "intel_pm.h" +#include "intel_psr.h" +#include "intel_sprite.h" + +/* Cursor formats */ +static const u32 intel_cursor_formats[] = { + DRM_FORMAT_ARGB8888, +}; + +static const u64 cursor_format_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + +static u32 intel_cursor_base(const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = + to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + const struct drm_i915_gem_object *obj = intel_fb_obj(fb); + u32 base; + + if (INTEL_INFO(dev_priv)->display.cursor_needs_physical) + base = sg_dma_address(obj->mm.pages->sgl); + else + base = intel_plane_ggtt_offset(plane_state); + + return base + plane_state->color_plane[0].offset; +} + +static u32 intel_cursor_position(const struct intel_plane_state *plane_state) +{ + int x = plane_state->uapi.dst.x1; + int y = plane_state->uapi.dst.y1; + u32 pos = 0; + + if (x < 0) { + pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; + x = -x; + } + pos |= x << CURSOR_X_SHIFT; + + if (y < 0) { + pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; + y = -y; + } + pos |= y << CURSOR_Y_SHIFT; + + return pos; +} + +static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state) +{ + const struct drm_mode_config *config = + &plane_state->uapi.plane->dev->mode_config; + int width = drm_rect_width(&plane_state->uapi.dst); + int height = drm_rect_height(&plane_state->uapi.dst); + + return width > 0 && width <= config->cursor_width && + height > 0 && height <= config->cursor_height; +} + +static int intel_cursor_check_surface(struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = + to_i915(plane_state->uapi.plane->dev); + unsigned int rotation = plane_state->hw.rotation; + int src_x, src_y; + u32 offset; + int ret; + + ret = intel_plane_compute_gtt(plane_state); + if (ret) + return ret; + + if (!plane_state->uapi.visible) + return 0; + + src_x = plane_state->uapi.src.x1 >> 16; + src_y = plane_state->uapi.src.y1 >> 16; + + intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); + offset = intel_plane_compute_aligned_offset(&src_x, &src_y, + plane_state, 0); + + if (src_x != 0 || src_y != 0) { + drm_dbg_kms(&dev_priv->drm, + "Arbitrary cursor panning not supported\n"); + return -EINVAL; + } + + /* + * Put the final coordinates back so that the src + * coordinate checks will see the right values. + */ + drm_rect_translate_to(&plane_state->uapi.src, + src_x << 16, src_y << 16); + + /* ILK+ do this automagically in hardware */ + if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) { + const struct drm_framebuffer *fb = plane_state->hw.fb; + int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; + int src_h = drm_rect_height(&plane_state->uapi.src) >> 16; + + offset += (src_h * src_w - 1) * fb->format->cpp[0]; + } + + plane_state->color_plane[0].offset = offset; + plane_state->color_plane[0].x = src_x; + plane_state->color_plane[0].y = src_y; + + return 0; +} + +static int intel_check_cursor(struct intel_crtc_state *crtc_state, + struct intel_plane_state *plane_state) +{ + const struct drm_framebuffer *fb = plane_state->hw.fb; + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + const struct drm_rect src = plane_state->uapi.src; + const struct drm_rect dst = plane_state->uapi.dst; + int ret; + + if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) { + drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n"); + return -EINVAL; + } + + ret = intel_atomic_plane_check_clipping(plane_state, crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + true); + if (ret) + return ret; + + /* Use the unclipped src/dst rectangles, which we program to hw */ + plane_state->uapi.src = src; + plane_state->uapi.dst = dst; + + ret = intel_cursor_check_surface(plane_state); + if (ret) + return ret; + + if (!plane_state->uapi.visible) + return 0; + + ret = intel_plane_check_src_coordinates(plane_state); + if (ret) + return ret; + + return 0; +} + +static unsigned int +i845_cursor_max_stride(struct intel_plane *plane, + u32 pixel_format, u64 modifier, + unsigned int rotation) +{ + return 2048; +} + +static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) +{ + u32 cntl = 0; + + if (crtc_state->gamma_enable) + cntl |= CURSOR_GAMMA_ENABLE; + + return cntl; +} + +static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + return CURSOR_ENABLE | + CURSOR_FORMAT_ARGB | + CURSOR_STRIDE(plane_state->color_plane[0].stride); +} + +static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state) +{ + int width = drm_rect_width(&plane_state->uapi.dst); + + /* + * 845g/865g are only limited by the width of their cursors, + * the height is arbitrary up to the precision of the register. + */ + return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64); +} + +static int i845_check_cursor(struct intel_crtc_state *crtc_state, + struct intel_plane_state *plane_state) +{ + const struct drm_framebuffer *fb = plane_state->hw.fb; + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + int ret; + + ret = intel_check_cursor(crtc_state, plane_state); + if (ret) + return ret; + + /* if we want to turn off the cursor ignore width and height */ + if (!fb) + return 0; + + /* Check for which cursor types we support */ + if (!i845_cursor_size_ok(plane_state)) { + drm_dbg_kms(&i915->drm, + "Cursor dimension %dx%d not supported\n", + drm_rect_width(&plane_state->uapi.dst), + drm_rect_height(&plane_state->uapi.dst)); + return -EINVAL; + } + + drm_WARN_ON(&i915->drm, plane_state->uapi.visible && + plane_state->color_plane[0].stride != fb->pitches[0]); + + switch (fb->pitches[0]) { + case 256: + case 512: + case 1024: + case 2048: + break; + default: + drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n", + fb->pitches[0]); + return -EINVAL; + } + + plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state); + + return 0; +} + +static void i845_update_cursor(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + u32 cntl = 0, base = 0, pos = 0, size = 0; + unsigned long irqflags; + + if (plane_state && plane_state->uapi.visible) { + unsigned int width = drm_rect_width(&plane_state->uapi.dst); + unsigned int height = drm_rect_height(&plane_state->uapi.dst); + + cntl = plane_state->ctl | + i845_cursor_ctl_crtc(crtc_state); + + size = (height << 12) | width; + + base = intel_cursor_base(plane_state); + pos = intel_cursor_position(plane_state); + } + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + + /* On these chipsets we can only modify the base/size/stride + * whilst the cursor is disabled. + */ + if (plane->cursor.base != base || + plane->cursor.size != size || + plane->cursor.cntl != cntl) { + intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0); + intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base); + intel_de_write_fw(dev_priv, CURSIZE, size); + intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos); + intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl); + + plane->cursor.base = base; + plane->cursor.size = size; + plane->cursor.cntl = cntl; + } else { + intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos); + } + + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); +} + +static void i845_disable_cursor(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) +{ + i845_update_cursor(plane, crtc_state, NULL); +} + +static bool i845_cursor_get_hw_state(struct intel_plane *plane, + enum pipe *pipe) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum intel_display_power_domain power_domain; + intel_wakeref_t wakeref; + bool ret; + + power_domain = POWER_DOMAIN_PIPE(PIPE_A); + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (!wakeref) + return false; + + ret = intel_de_read(dev_priv, CURCNTR(PIPE_A)) & CURSOR_ENABLE; + + *pipe = PIPE_A; + + intel_display_power_put(dev_priv, power_domain, wakeref); + + return ret; +} + +static unsigned int +i9xx_cursor_max_stride(struct intel_plane *plane, + u32 pixel_format, u64 modifier, + unsigned int rotation) +{ + return plane->base.dev->mode_config.cursor_width * 4; +} + +static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + u32 cntl = 0; + + if (INTEL_GEN(dev_priv) >= 11) + return cntl; + + if (crtc_state->gamma_enable) + cntl = MCURSOR_GAMMA_ENABLE; + + if (crtc_state->csc_enable) + cntl |= MCURSOR_PIPE_CSC_ENABLE; + + if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) + cntl |= MCURSOR_PIPE_SELECT(crtc->pipe); + + return cntl; +} + +static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = + to_i915(plane_state->uapi.plane->dev); + u32 cntl = 0; + + if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) + cntl |= MCURSOR_TRICKLE_FEED_DISABLE; + + switch (drm_rect_width(&plane_state->uapi.dst)) { + case 64: + cntl |= MCURSOR_MODE_64_ARGB_AX; + break; + case 128: + cntl |= MCURSOR_MODE_128_ARGB_AX; + break; + case 256: + cntl |= MCURSOR_MODE_256_ARGB_AX; + break; + default: + MISSING_CASE(drm_rect_width(&plane_state->uapi.dst)); + return 0; + } + + if (plane_state->hw.rotation & DRM_MODE_ROTATE_180) + cntl |= MCURSOR_ROTATE_180; + + return cntl; +} + +static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = + to_i915(plane_state->uapi.plane->dev); + int width = drm_rect_width(&plane_state->uapi.dst); + int height = drm_rect_height(&plane_state->uapi.dst); + + if (!intel_cursor_size_ok(plane_state)) + return false; + + /* Cursor width is limited to a few power-of-two sizes */ + switch (width) { + case 256: + case 128: + case 64: + break; + default: + return false; + } + + /* + * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor + * height from 8 lines up to the cursor width, when the + * cursor is not rotated. Everything else requires square + * cursors. + */ + if (HAS_CUR_FBC(dev_priv) && + plane_state->hw.rotation & DRM_MODE_ROTATE_0) { + if (height < 8 || height > width) + return false; + } else { + if (height != width) + return false; + } + + return true; +} + +static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, + struct intel_plane_state *plane_state) +{ + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + enum pipe pipe = plane->pipe; + int ret; + + ret = intel_check_cursor(crtc_state, plane_state); + if (ret) + return ret; + + /* if we want to turn off the cursor ignore width and height */ + if (!fb) + return 0; + + /* Check for which cursor types we support */ + if (!i9xx_cursor_size_ok(plane_state)) { + drm_dbg(&dev_priv->drm, + "Cursor dimension %dx%d not supported\n", + drm_rect_width(&plane_state->uapi.dst), + drm_rect_height(&plane_state->uapi.dst)); + return -EINVAL; + } + + drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible && + plane_state->color_plane[0].stride != fb->pitches[0]); + + if (fb->pitches[0] != + drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) { + drm_dbg_kms(&dev_priv->drm, + "Invalid cursor stride (%u) (cursor width %d)\n", + fb->pitches[0], + drm_rect_width(&plane_state->uapi.dst)); + return -EINVAL; + } + + /* + * There's something wrong with the cursor on CHV pipe C. + * If it straddles the left edge of the screen then + * moving it away from the edge or disabling it often + * results in a pipe underrun, and often that can lead to + * dead pipe (constant underrun reported, and it scans + * out just a solid color). To recover from that, the + * display power well must be turned off and on again. + * Refuse the put the cursor into that compromised position. + */ + if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C && + plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) { + drm_dbg_kms(&dev_priv->drm, + "CHV cursor C not allowed to straddle the left screen edge\n"); + return -EINVAL; + } + + plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state); + + return 0; +} + +static void i9xx_update_cursor(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum pipe pipe = plane->pipe; + u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0; + unsigned long irqflags; + + if (plane_state && plane_state->uapi.visible) { + int width = drm_rect_width(&plane_state->uapi.dst); + int height = drm_rect_height(&plane_state->uapi.dst); + + cntl = plane_state->ctl | + i9xx_cursor_ctl_crtc(crtc_state); + + if (width != height) + fbc_ctl = CUR_FBC_CTL_EN | (height - 1); + + base = intel_cursor_base(plane_state); + pos = intel_cursor_position(plane_state); + } + + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + + /* + * On some platforms writing CURCNTR first will also + * cause CURPOS to be armed by the CURBASE write. + * Without the CURCNTR write the CURPOS write would + * arm itself. Thus we always update CURCNTR before + * CURPOS. + * + * On other platforms CURPOS always requires the + * CURBASE write to arm the update. Additonally + * a write to any of the cursor register will cancel + * an already armed cursor update. Thus leaving out + * the CURBASE write after CURPOS could lead to a + * cursor that doesn't appear to move, or even change + * shape. Thus we always write CURBASE. + * + * The other registers are armed by the CURBASE write + * except when the plane is getting enabled at which time + * the CURCNTR write arms the update. + */ + + if (INTEL_GEN(dev_priv) >= 9) + skl_write_cursor_wm(plane, crtc_state); + + if (!intel_crtc_needs_modeset(crtc_state)) + intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, 0); + + if (plane->cursor.base != base || + plane->cursor.size != fbc_ctl || + plane->cursor.cntl != cntl) { + if (HAS_CUR_FBC(dev_priv)) + intel_de_write_fw(dev_priv, CUR_FBC_CTL(pipe), + fbc_ctl); + intel_de_write_fw(dev_priv, CURCNTR(pipe), cntl); + intel_de_write_fw(dev_priv, CURPOS(pipe), pos); + intel_de_write_fw(dev_priv, CURBASE(pipe), base); + + plane->cursor.base = base; + plane->cursor.size = fbc_ctl; + plane->cursor.cntl = cntl; + } else { + intel_de_write_fw(dev_priv, CURPOS(pipe), pos); + intel_de_write_fw(dev_priv, CURBASE(pipe), base); + } + + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); +} + +static void i9xx_disable_cursor(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state) +{ + i9xx_update_cursor(plane, crtc_state, NULL); +} + +static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, + enum pipe *pipe) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + enum intel_display_power_domain power_domain; + intel_wakeref_t wakeref; + bool ret; + u32 val; + + /* + * Not 100% correct for planes that can move between pipes, + * but that's only the case for gen2-3 which don't have any + * display power wells. + */ + power_domain = POWER_DOMAIN_PIPE(plane->pipe); + wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); + if (!wakeref) + return false; + + val = intel_de_read(dev_priv, CURCNTR(plane->pipe)); + + ret = val & MCURSOR_MODE; + + if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) + *pipe = plane->pipe; + else + *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >> + MCURSOR_PIPE_SELECT_SHIFT; + + intel_display_power_put(dev_priv, power_domain, wakeref); + + return ret; +} + +static bool intel_cursor_format_mod_supported(struct drm_plane *_plane, + u32 format, u64 modifier) +{ + return modifier == DRM_FORMAT_MOD_LINEAR && + format == DRM_FORMAT_ARGB8888; +} + +static int +intel_legacy_cursor_update(struct drm_plane *_plane, + struct drm_crtc *_crtc, + struct drm_framebuffer *fb, + int crtc_x, int crtc_y, + unsigned int crtc_w, unsigned int crtc_h, + u32 src_x, u32 src_y, + u32 src_w, u32 src_h, + struct drm_modeset_acquire_ctx *ctx) +{ + struct intel_plane *plane = to_intel_plane(_plane); + struct intel_crtc *crtc = to_intel_crtc(_crtc); + struct intel_plane_state *old_plane_state = + to_intel_plane_state(plane->base.state); + struct intel_plane_state *new_plane_state; + struct intel_crtc_state *crtc_state = + to_intel_crtc_state(crtc->base.state); + struct intel_crtc_state *new_crtc_state; + int ret; + + /* + * When crtc is inactive or there is a modeset pending, + * wait for it to complete in the slowpath + * + * FIXME bigjoiner fastpath would be good + */ + if (!crtc_state->hw.active || intel_crtc_needs_modeset(crtc_state) || + crtc_state->update_pipe || crtc_state->bigjoiner) + goto slow; + + /* + * Don't do an async update if there is an outstanding commit modifying + * the plane. This prevents our async update's changes from getting + * overridden by a previous synchronous update's state. + */ + if (old_plane_state->uapi.commit && + !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done)) + goto slow; + + /* + * If any parameters change that may affect watermarks, + * take the slowpath. Only changing fb or position should be + * in the fastpath. + */ + if (old_plane_state->uapi.crtc != &crtc->base || + old_plane_state->uapi.src_w != src_w || + old_plane_state->uapi.src_h != src_h || + old_plane_state->uapi.crtc_w != crtc_w || + old_plane_state->uapi.crtc_h != crtc_h || + !old_plane_state->uapi.fb != !fb) + goto slow; + + new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base)); + if (!new_plane_state) + return -ENOMEM; + + new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base)); + if (!new_crtc_state) { + ret = -ENOMEM; + goto out_free; + } + + drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb); + + new_plane_state->uapi.src_x = src_x; + new_plane_state->uapi.src_y = src_y; + new_plane_state->uapi.src_w = src_w; + new_plane_state->uapi.src_h = src_h; + new_plane_state->uapi.crtc_x = crtc_x; + new_plane_state->uapi.crtc_y = crtc_y; + new_plane_state->uapi.crtc_w = crtc_w; + new_plane_state->uapi.crtc_h = crtc_h; + + intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state, crtc); + + ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state, + old_plane_state, new_plane_state); + if (ret) + goto out_free; + + ret = intel_plane_pin_fb(new_plane_state); + if (ret) + goto out_free; + + intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb), + ORIGIN_FLIP); + intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb), + to_intel_frontbuffer(new_plane_state->hw.fb), + plane->frontbuffer_bit); + + /* Swap plane state */ + plane->base.state = &new_plane_state->uapi; + + /* + * We cannot swap crtc_state as it may be in use by an atomic commit or + * page flip that's running simultaneously. If we swap crtc_state and + * destroy the old state, we will cause a use-after-free there. + * + * Only update active_planes, which is needed for our internal + * bookkeeping. Either value will do the right thing when updating + * planes atomically. If the cursor was part of the atomic update then + * we would have taken the slowpath. + */ + crtc_state->active_planes = new_crtc_state->active_planes; + + if (new_plane_state->uapi.visible) + intel_update_plane(plane, crtc_state, new_plane_state); + else + intel_disable_plane(plane, crtc_state); + + intel_plane_unpin_fb(old_plane_state); + +out_free: + if (new_crtc_state) + intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi); + if (ret) + intel_plane_destroy_state(&plane->base, &new_plane_state->uapi); + else + intel_plane_destroy_state(&plane->base, &old_plane_state->uapi); + return ret; + +slow: + return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb, + crtc_x, crtc_y, crtc_w, crtc_h, + src_x, src_y, src_w, src_h, ctx); +} + +static const struct drm_plane_funcs intel_cursor_plane_funcs = { + .update_plane = intel_legacy_cursor_update, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = intel_plane_destroy, + .atomic_duplicate_state = intel_plane_duplicate_state, + .atomic_destroy_state = intel_plane_destroy_state, + .format_mod_supported = intel_cursor_format_mod_supported, +}; + +struct intel_plane * +intel_cursor_plane_create(struct drm_i915_private *dev_priv, + enum pipe pipe) +{ + struct intel_plane *cursor; + int ret, zpos; + + cursor = intel_plane_alloc(); + if (IS_ERR(cursor)) + return cursor; + + cursor->pipe = pipe; + cursor->i9xx_plane = (enum i9xx_plane_id) pipe; + cursor->id = PLANE_CURSOR; + cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id); + + if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { + cursor->max_stride = i845_cursor_max_stride; + cursor->update_plane = i845_update_cursor; + cursor->disable_plane = i845_disable_cursor; + cursor->get_hw_state = i845_cursor_get_hw_state; + cursor->check_plane = i845_check_cursor; + } else { + cursor->max_stride = i9xx_cursor_max_stride; + cursor->update_plane = i9xx_update_cursor; + cursor->disable_plane = i9xx_disable_cursor; + cursor->get_hw_state = i9xx_cursor_get_hw_state; + cursor->check_plane = i9xx_check_cursor; + } + + cursor->cursor.base = ~0; + cursor->cursor.cntl = ~0; + + if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv)) + cursor->cursor.size = ~0; + + ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base, + 0, &intel_cursor_plane_funcs, + intel_cursor_formats, + ARRAY_SIZE(intel_cursor_formats), + cursor_format_modifiers, + DRM_PLANE_TYPE_CURSOR, + "cursor %c", pipe_name(pipe)); + if (ret) + goto fail; + + if (INTEL_GEN(dev_priv) >= 4) + drm_plane_create_rotation_property(&cursor->base, + DRM_MODE_ROTATE_0, + DRM_MODE_ROTATE_0 | + DRM_MODE_ROTATE_180); + + zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1; + drm_plane_create_zpos_immutable_property(&cursor->base, zpos); + + if (INTEL_GEN(dev_priv) >= 12) + drm_plane_enable_fb_damage_clips(&cursor->base); + + drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); + + return cursor; + +fail: + intel_plane_free(cursor); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/i915/display/intel_cursor.h b/drivers/gpu/drm/i915/display/intel_cursor.h new file mode 100644 index 000000000000..ce333bf4c2d5 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_cursor.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2020 Intel Corporation + */ + +#ifndef _INTEL_CURSOR_H_ +#define _INTEL_CURSOR_H_ + +enum pipe; +struct drm_i915_private; +struct intel_plane; + +struct intel_plane * +intel_cursor_plane_create(struct drm_i915_private *dev_priv, + enum pipe pipe); + +#endif diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 92940a0c5ef8..ef9848001418 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -611,6 +611,34 @@ static const struct cnl_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr2[] { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ }; +static const struct cnl_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_rbr_hbr[] = { + /* NT mV Trans mV db */ + { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ + { 0xA, 0x48, 0x35, 0x00, 0x0A }, /* 350 500 3.1 */ + { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ + { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */ + { 0xA, 0x43, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ + { 0xC, 0x60, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */ + { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */ + { 0xC, 0x60, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */ + { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 600 900 3.5 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ +}; + +static const struct cnl_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_hbr2_hbr3[] = { + /* NT mV Trans mV db */ + { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ + { 0xA, 0x48, 0x35, 0x00, 0x0A }, /* 350 500 3.1 */ + { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ + { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */ + { 0xA, 0x43, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ + { 0xC, 0x60, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */ + { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */ + { 0xC, 0x58, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */ + { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ +}; + struct icl_mg_phy_ddi_buf_trans { u32 cri_txdeemph_override_11_6; u32 cri_txdeemph_override_5_0; @@ -766,6 +794,34 @@ static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_edp_hbr2_ho { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 2 1 */ }; +static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr[] = { + /* NT mV Trans mV db */ + { 0xA, 0x2F, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ + { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */ + { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */ + { 0x6, 0x7D, 0x2A, 0x00, 0x15 }, /* 350 900 8.2 */ + { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ + { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */ + { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ + { 0xC, 0x6E, 0x3E, 0x00, 0x01 }, /* 650 700 0.6 */ + { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ +}; + +static const struct cnl_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr2_hbr3[] = { + /* NT mV Trans mV db */ + { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */ + { 0xA, 0x50, 0x38, 0x00, 0x07 }, /* 350 500 3.1 */ + { 0xC, 0x61, 0x33, 0x00, 0x0C }, /* 350 700 6.0 */ + { 0x6, 0x7F, 0x2E, 0x00, 0x11 }, /* 350 900 8.2 */ + { 0xA, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */ + { 0xC, 0x5F, 0x38, 0x00, 0x07 }, /* 500 700 2.9 */ + { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */ + { 0xC, 0x5F, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */ + { 0x6, 0x7E, 0x36, 0x00, 0x09 }, /* 600 900 3.5 */ + { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */ +}; + static bool is_hobl_buf_trans(const struct cnl_ddi_buf_trans *table) { return table == tgl_combo_phy_ddi_translations_edp_hbr2_hobl; @@ -1093,6 +1149,12 @@ icl_get_combo_buf_trans_edp(struct intel_encoder *encoder, } else if (dev_priv->vbt.edp.low_vswing) { *n_entries = ARRAY_SIZE(icl_combo_phy_ddi_translations_edp_hbr2); return icl_combo_phy_ddi_translations_edp_hbr2; + } else if (IS_DG1(dev_priv) && crtc_state->port_clock > 270000) { + *n_entries = ARRAY_SIZE(dg1_combo_phy_ddi_translations_dp_hbr2_hbr3); + return dg1_combo_phy_ddi_translations_dp_hbr2_hbr3; + } else if (IS_DG1(dev_priv)) { + *n_entries = ARRAY_SIZE(dg1_combo_phy_ddi_translations_dp_rbr_hbr); + return dg1_combo_phy_ddi_translations_dp_rbr_hbr; } return icl_get_combo_buf_trans_dp(encoder, crtc_state, n_entries); @@ -1259,7 +1321,10 @@ tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); if (crtc_state->port_clock > 270000) { - if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) { + if (IS_ROCKETLAKE(dev_priv)) { + *n_entries = ARRAY_SIZE(rkl_combo_phy_ddi_translations_dp_hbr2_hbr3); + return rkl_combo_phy_ddi_translations_dp_hbr2_hbr3; + } else if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) { *n_entries = ARRAY_SIZE(tgl_uy_combo_phy_ddi_translations_dp_hbr2); return tgl_uy_combo_phy_ddi_translations_dp_hbr2; } else { @@ -1267,8 +1332,13 @@ tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder, return tgl_combo_phy_ddi_translations_dp_hbr2; } } else { - *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr); - return tgl_combo_phy_ddi_translations_dp_hbr; + if (IS_ROCKETLAKE(dev_priv)) { + *n_entries = ARRAY_SIZE(rkl_combo_phy_ddi_translations_dp_hbr); + return rkl_combo_phy_ddi_translations_dp_hbr; + } else { + *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr); + return tgl_combo_phy_ddi_translations_dp_hbr; + } } } @@ -2285,18 +2355,23 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder, dig_port = enc_to_dig_port(encoder); if (!intel_phy_is_tc(dev_priv, phy) || - dig_port->tc_mode != TC_PORT_TBT_ALT) - intel_display_power_get(dev_priv, - dig_port->ddi_io_power_domain); + dig_port->tc_mode != TC_PORT_TBT_ALT) { + drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref); + dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv, + dig_port->ddi_io_power_domain); + } /* * AUX power is only needed for (e)DP mode, and for HDMI mode on TC * ports. */ if (intel_crtc_has_dp_encoder(crtc_state) || - intel_phy_is_tc(dev_priv, phy)) - intel_display_power_get(dev_priv, - intel_ddi_main_link_aux_domain(dig_port)); + intel_phy_is_tc(dev_priv, phy)) { + drm_WARN_ON(&dev_priv->drm, dig_port->aux_wakeref); + dig_port->aux_wakeref = + intel_display_power_get(dev_priv, + intel_ddi_main_link_aux_domain(dig_port)); + } } void intel_ddi_enable_pipe_clock(struct intel_encoder *encoder, @@ -3507,12 +3582,6 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder, val = intel_de_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); val |= DP_TP_CTL_FEC_ENABLE; intel_de_write(dev_priv, dp_tp_ctl_reg(encoder, crtc_state), val); - - if (intel_de_wait_for_set(dev_priv, - dp_tp_status_reg(encoder, crtc_state), - DP_TP_STATUS_FEC_ENABLE_LIVE, 1)) - drm_err(&dev_priv->drm, - "Timed out waiting for FEC Enable Status\n"); } static void intel_ddi_disable_fec_state(struct intel_encoder *encoder, @@ -3577,9 +3646,11 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, /* 5. If IO power is controlled through PWR_WELL_CTL, Enable IO Power */ if (!intel_phy_is_tc(dev_priv, phy) || - dig_port->tc_mode != TC_PORT_TBT_ALT) - intel_display_power_get(dev_priv, - dig_port->ddi_io_power_domain); + dig_port->tc_mode != TC_PORT_TBT_ALT) { + drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref); + dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv, + dig_port->ddi_io_power_domain); + } /* 6. Program DP_MODE */ icl_program_mg_dp_mode(dig_port, crtc_state); @@ -3643,6 +3714,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, if (!is_mst) intel_dp_set_power(intel_dp, DP_SET_POWER_D0); + intel_dp_configure_protocol_converter(intel_dp, crtc_state); intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true); /* * DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit @@ -3651,6 +3723,9 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, */ intel_dp_sink_set_fec_ready(intel_dp, crtc_state); + intel_dp_check_frl_training(intel_dp); + intel_dp_pcon_dsc_configure(intel_dp, crtc_state); + /* * 7.i Follow DisplayPort specification training sequence (see notes for * failure handling) @@ -3698,9 +3773,11 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, intel_ddi_clk_select(encoder, crtc_state); if (!intel_phy_is_tc(dev_priv, phy) || - dig_port->tc_mode != TC_PORT_TBT_ALT) - intel_display_power_get(dev_priv, - dig_port->ddi_io_power_domain); + dig_port->tc_mode != TC_PORT_TBT_ALT) { + drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref); + dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv, + dig_port->ddi_io_power_domain); + } icl_program_mg_dp_mode(dig_port, crtc_state); @@ -3725,7 +3802,7 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, intel_ddi_init_dp_buf_reg(encoder, crtc_state); if (!is_mst) intel_dp_set_power(intel_dp, DP_SET_POWER_D0); - intel_dp_configure_protocol_converter(intel_dp); + intel_dp_configure_protocol_converter(intel_dp, crtc_state); intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true); intel_dp_sink_set_fec_ready(intel_dp, crtc_state); @@ -3778,7 +3855,9 @@ static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state, intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); intel_ddi_clk_select(encoder, crtc_state); - intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); + drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref); + dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv, + dig_port->ddi_io_power_domain); icl_program_mg_dp_mode(dig_port, crtc_state); @@ -3936,8 +4015,9 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state, if (!intel_phy_is_tc(dev_priv, phy) || dig_port->tc_mode != TC_PORT_TBT_ALT) - intel_display_power_put_unchecked(dev_priv, - dig_port->ddi_io_power_domain); + intel_display_power_put(dev_priv, + dig_port->ddi_io_power_domain, + fetch_and_zero(&dig_port->ddi_io_wakeref)); intel_ddi_clk_disable(encoder); } @@ -3958,8 +4038,9 @@ static void intel_ddi_post_disable_hdmi(struct intel_atomic_state *state, intel_disable_ddi_buf(encoder, old_crtc_state); - intel_display_power_put_unchecked(dev_priv, - dig_port->ddi_io_power_domain); + intel_display_power_put(dev_priv, + dig_port->ddi_io_power_domain, + fetch_and_zero(&dig_port->ddi_io_wakeref)); intel_ddi_clk_disable(encoder); @@ -4032,8 +4113,9 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state, icl_unmap_plls_to_ports(encoder); if (intel_crtc_has_dp_encoder(old_crtc_state) || is_tc_port) - intel_display_power_put_unchecked(dev_priv, - intel_ddi_main_link_aux_domain(dig_port)); + intel_display_power_put(dev_priv, + intel_ddi_main_link_aux_domain(dig_port), + fetch_and_zero(&dig_port->aux_wakeref)); if (is_tc_port) intel_tc_port_put_link(dig_port); @@ -4118,6 +4200,7 @@ static void intel_enable_ddi_dp(struct intel_atomic_state *state, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); enum port port = encoder->port; if (port == PORT_A && INTEL_GEN(dev_priv) < 9) @@ -4125,7 +4208,10 @@ static void intel_enable_ddi_dp(struct intel_atomic_state *state, intel_edp_backlight_on(crtc_state, conn_state); intel_psr_enable(intel_dp, crtc_state, conn_state); - intel_dp_set_infoframes(encoder, true, crtc_state, conn_state); + + if (!dig_port->lspcon.active || dig_port->dp.has_hdmi_sink) + intel_dp_set_infoframes(encoder, true, crtc_state, conn_state); + intel_edp_drrs_enable(intel_dp, crtc_state); if (crtc_state->has_audio) @@ -4368,9 +4454,12 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state, if (is_tc_port) intel_tc_port_get_link(dig_port, crtc_state->lane_count); - if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port) - intel_display_power_get(dev_priv, - intel_ddi_main_link_aux_domain(dig_port)); + if (intel_crtc_has_dp_encoder(crtc_state) || is_tc_port) { + drm_WARN_ON(&dev_priv->drm, dig_port->aux_wakeref); + dig_port->aux_wakeref = + intel_display_power_get(dev_priv, + intel_ddi_main_link_aux_domain(dig_port)); + } if (is_tc_port && dig_port->tc_mode != TC_PORT_TBT_ALT) /* @@ -4583,6 +4672,7 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->uapi.crtc); enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); u32 temp, flags = 0; temp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)); @@ -4657,9 +4747,12 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder, pipe_config->fec_enable); } - pipe_config->infoframes.enable |= - intel_hdmi_infoframes_enabled(encoder, pipe_config); - + if (dig_port->lspcon.active && dig_port->dp.has_hdmi_sink) + pipe_config->infoframes.enable |= + intel_lspcon_infoframes_enabled(encoder, pipe_config); + else + pipe_config->infoframes.enable |= + intel_hdmi_infoframes_enabled(encoder, pipe_config); break; case TRANS_DDI_MODE_SELECT_DP_MST: pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST); @@ -5262,8 +5355,7 @@ intel_ddi_max_lanes(struct intel_digital_port *dig_port) static bool hti_uses_phy(struct drm_i915_private *i915, enum phy phy) { return i915->hti_state & HDPORT_ENABLED && - (i915->hti_state & HDPORT_PHY_USED_DP(phy) || - i915->hti_state & HDPORT_PHY_USED_HDMI(phy)); + i915->hti_state & HDPORT_DDI_USED(phy); } static enum hpd_pin dg1_hpd_pin(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index ba26545392bc..0189d379a55e 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -45,6 +45,7 @@ #include "display/intel_crt.h" #include "display/intel_ddi.h" +#include "display/intel_display_debugfs.h" #include "display/intel_dp.h" #include "display/intel_dp_mst.h" #include "display/intel_dpll_mgr.h" @@ -68,6 +69,7 @@ #include "intel_cdclk.h" #include "intel_color.h" #include "intel_csr.h" +#include "intel_cursor.h" #include "intel_display_types.h" #include "intel_dp_link_training.h" #include "intel_fbc.h" @@ -85,66 +87,7 @@ #include "intel_sprite.h" #include "intel_tc.h" #include "intel_vga.h" - -/* Primary plane formats for gen <= 3 */ -static const u32 i8xx_primary_formats[] = { - DRM_FORMAT_C8, - DRM_FORMAT_XRGB1555, - DRM_FORMAT_RGB565, - DRM_FORMAT_XRGB8888, -}; - -/* Primary plane formats for ivb (no fp16 due to hw issue) */ -static const u32 ivb_primary_formats[] = { - DRM_FORMAT_C8, - DRM_FORMAT_RGB565, - DRM_FORMAT_XRGB8888, - DRM_FORMAT_XBGR8888, - DRM_FORMAT_XRGB2101010, - DRM_FORMAT_XBGR2101010, -}; - -/* Primary plane formats for gen >= 4, except ivb */ -static const u32 i965_primary_formats[] = { - DRM_FORMAT_C8, - DRM_FORMAT_RGB565, - DRM_FORMAT_XRGB8888, - DRM_FORMAT_XBGR8888, - DRM_FORMAT_XRGB2101010, - DRM_FORMAT_XBGR2101010, - DRM_FORMAT_XBGR16161616F, -}; - -/* Primary plane formats for vlv/chv */ -static const u32 vlv_primary_formats[] = { - DRM_FORMAT_C8, - DRM_FORMAT_RGB565, - DRM_FORMAT_XRGB8888, - DRM_FORMAT_XBGR8888, - DRM_FORMAT_ARGB8888, - DRM_FORMAT_ABGR8888, - DRM_FORMAT_XRGB2101010, - DRM_FORMAT_XBGR2101010, - DRM_FORMAT_ARGB2101010, - DRM_FORMAT_ABGR2101010, - DRM_FORMAT_XBGR16161616F, -}; - -static const u64 i9xx_format_modifiers[] = { - I915_FORMAT_MOD_X_TILED, - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; - -/* Cursor formats */ -static const u32 intel_cursor_formats[] = { - DRM_FORMAT_ARGB8888, -}; - -static const u64 cursor_format_modifiers[] = { - DRM_FORMAT_MOD_LINEAR, - DRM_FORMAT_MOD_INVALID -}; +#include "i9xx_plane.h" static void i9xx_crtc_clock_get(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config); @@ -542,12 +485,6 @@ icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, } static bool -needs_modeset(const struct intel_crtc_state *state) -{ - return drm_atomic_crtc_needs_modeset(&state->uapi); -} - -static bool is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state) { return crtc_state->master_transcoder != INVALID_TRANSCODER; @@ -994,7 +931,8 @@ chv_find_best_dpll(const struct intel_limit *limit, * set to 2. If requires to support 200Mhz refclk, we need to * revisit this because n may not 1 anymore. */ - clock.n = 1, clock.m1 = 2; + clock.n = 1; + clock.m1 = 2; target *= 5; /* fast clock */ for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { @@ -2532,9 +2470,9 @@ static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv, return offset_aligned; } -static u32 intel_plane_compute_aligned_offset(int *x, int *y, - const struct intel_plane_state *state, - int color_plane) +u32 intel_plane_compute_aligned_offset(int *x, int *y, + const struct intel_plane_state *state, + int color_plane) { struct intel_plane *intel_plane = to_intel_plane(state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev); @@ -3271,7 +3209,7 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state) } } -static int +int intel_plane_compute_gtt(struct intel_plane_state *plane_state) { const struct intel_framebuffer *fb = @@ -3551,7 +3489,7 @@ intel_set_plane_visible(struct intel_crtc_state *crtc_state, crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base); } -static void fixup_active_planes(struct intel_crtc_state *crtc_state) +static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); struct drm_plane *plane; @@ -3561,11 +3499,14 @@ static void fixup_active_planes(struct intel_crtc_state *crtc_state) * have been used on the same (or wrong) pipe. plane_mask uses * unique ids, hence we can use that to reconstruct active_planes. */ + crtc_state->enabled_planes = 0; crtc_state->active_planes = 0; drm_for_each_plane_mask(plane, &dev_priv->drm, - crtc_state->uapi.plane_mask) + crtc_state->uapi.plane_mask) { + crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id); crtc_state->active_planes |= BIT(to_intel_plane(plane)->id); + } } static void intel_plane_disable_noatomic(struct intel_crtc *crtc, @@ -3583,7 +3524,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc, crtc->base.base.id, crtc->base.name); intel_set_plane_visible(crtc_state, plane_state, false); - fixup_active_planes(crtc_state); + fixup_plane_bitmasks(crtc_state); crtc_state->data_rate[plane->id] = 0; crtc_state->min_cdclk[plane->id] = 0; @@ -3613,12 +3554,6 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc, intel_disable_plane(plane, crtc_state); } -static struct intel_frontbuffer * -to_intel_frontbuffer(struct drm_framebuffer *fb) -{ - return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL; -} - static void intel_find_initial_plane_obj(struct intel_crtc *intel_crtc, struct intel_initial_plane_config *plane_config) @@ -3817,33 +3752,19 @@ static int intel_plane_max_height(struct intel_plane *plane, return INT_MAX; } -static int skl_check_main_surface(struct intel_plane_state *plane_state) +int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state, + int *x, int *y, u32 *offset) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct drm_framebuffer *fb = plane_state->hw.fb; - unsigned int rotation = plane_state->hw.rotation; - int x = plane_state->uapi.src.x1 >> 16; - int y = plane_state->uapi.src.y1 >> 16; - int w = drm_rect_width(&plane_state->uapi.src) >> 16; - int h = drm_rect_height(&plane_state->uapi.src) >> 16; - int min_width = intel_plane_min_width(plane, fb, 0, rotation); - int max_width = intel_plane_max_width(plane, fb, 0, rotation); - int max_height = intel_plane_max_height(plane, fb, 0, rotation); - int aux_plane = intel_main_to_aux_plane(fb, 0); - u32 aux_offset = plane_state->color_plane[aux_plane].offset; - u32 alignment, offset; + const int aux_plane = intel_main_to_aux_plane(fb, 0); + const u32 aux_offset = plane_state->color_plane[aux_plane].offset; + const u32 alignment = intel_surf_alignment(fb, 0); + const int w = drm_rect_width(&plane_state->uapi.src) >> 16; - if (w > max_width || w < min_width || h > max_height) { - drm_dbg_kms(&dev_priv->drm, - "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n", - w, h, min_width, max_width, max_height); - return -EINVAL; - } - - intel_add_fb_offsets(&x, &y, plane_state, 0); - offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0); - alignment = intel_surf_alignment(fb, 0); + intel_add_fb_offsets(x, y, plane_state, 0); + *offset = intel_plane_compute_aligned_offset(x, y, plane_state, 0); if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment))) return -EINVAL; @@ -3852,9 +3773,10 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) * main surface offset, and it must be non-negative. Make * sure that is what we will get. */ - if (aux_plane && offset > aux_offset) - offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, - offset, aux_offset & ~(alignment - 1)); + if (aux_plane && *offset > aux_offset) + *offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0, + *offset, + aux_offset & ~(alignment - 1)); /* * When using an X-tiled surface, the plane blows up @@ -3865,18 +3787,51 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) if (fb->modifier == I915_FORMAT_MOD_X_TILED) { int cpp = fb->format->cpp[0]; - while ((x + w) * cpp > plane_state->color_plane[0].stride) { - if (offset == 0) { + while ((*x + w) * cpp > plane_state->color_plane[0].stride) { + if (*offset == 0) { drm_dbg_kms(&dev_priv->drm, "Unable to find suitable display surface offset due to X-tiling\n"); return -EINVAL; } - offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, - offset, offset - alignment); + *offset = intel_plane_adjust_aligned_offset(x, y, plane_state, 0, + *offset, + *offset - alignment); } } + return 0; +} + +static int skl_check_main_surface(struct intel_plane_state *plane_state) +{ + struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + const unsigned int rotation = plane_state->hw.rotation; + int x = plane_state->uapi.src.x1 >> 16; + int y = plane_state->uapi.src.y1 >> 16; + const int w = drm_rect_width(&plane_state->uapi.src) >> 16; + const int h = drm_rect_height(&plane_state->uapi.src) >> 16; + const int min_width = intel_plane_min_width(plane, fb, 0, rotation); + const int max_width = intel_plane_max_width(plane, fb, 0, rotation); + const int max_height = intel_plane_max_height(plane, fb, 0, rotation); + const int aux_plane = intel_main_to_aux_plane(fb, 0); + const u32 alignment = intel_surf_alignment(fb, 0); + u32 offset; + int ret; + + if (w > max_width || w < min_width || h > max_height) { + drm_dbg_kms(&dev_priv->drm, + "requested Y/RGB source size %dx%d outside limits (min: %dx1 max: %dx%d)\n", + w, h, min_width, max_width, max_height); + return -EINVAL; + } + + ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset); + if (ret) + return ret; + /* * CCS AUX surface doesn't have its own x/y offsets, we must make sure * they match with the main surface x/y offsets. @@ -4063,422 +4018,6 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state) return 0; } -static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - unsigned int *num, unsigned int *den) -{ - const struct drm_framebuffer *fb = plane_state->hw.fb; - unsigned int cpp = fb->format->cpp[0]; - - /* - * g4x bspec says 64bpp pixel rate can't exceed 80% - * of cdclk when the sprite plane is enabled on the - * same pipe. ilk/snb bspec says 64bpp pixel rate is - * never allowed to exceed 80% of cdclk. Let's just go - * with the ilk/snb limit always. - */ - if (cpp == 8) { - *num = 10; - *den = 8; - } else { - *num = 1; - *den = 1; - } -} - -static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - unsigned int pixel_rate; - unsigned int num, den; - - /* - * Note that crtc_state->pixel_rate accounts for both - * horizontal and vertical panel fitter downscaling factors. - * Pre-HSW bspec tells us to only consider the horizontal - * downscaling factor here. We ignore that and just consider - * both for simplicity. - */ - pixel_rate = crtc_state->pixel_rate; - - i9xx_plane_ratio(crtc_state, plane_state, &num, &den); - - /* two pixels per clock with double wide pipe */ - if (crtc_state->double_wide) - den *= 2; - - return DIV_ROUND_UP(pixel_rate * num, den); -} - -unsigned int -i9xx_plane_max_stride(struct intel_plane *plane, - u32 pixel_format, u64 modifier, - unsigned int rotation) -{ - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - - if (!HAS_GMCH(dev_priv)) { - return 32*1024; - } else if (INTEL_GEN(dev_priv) >= 4) { - if (modifier == I915_FORMAT_MOD_X_TILED) - return 16*1024; - else - return 32*1024; - } else if (INTEL_GEN(dev_priv) >= 3) { - if (modifier == I915_FORMAT_MOD_X_TILED) - return 8*1024; - else - return 16*1024; - } else { - if (plane->i9xx_plane == PLANE_C) - return 4*1024; - else - return 8*1024; - } -} - -static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 dspcntr = 0; - - if (crtc_state->gamma_enable) - dspcntr |= DISPPLANE_GAMMA_ENABLE; - - if (crtc_state->csc_enable) - dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; - - if (INTEL_GEN(dev_priv) < 5) - dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe); - - return dspcntr; -} - -static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - struct drm_i915_private *dev_priv = - to_i915(plane_state->uapi.plane->dev); - const struct drm_framebuffer *fb = plane_state->hw.fb; - unsigned int rotation = plane_state->hw.rotation; - u32 dspcntr; - - dspcntr = DISPLAY_PLANE_ENABLE; - - if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) || - IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) - dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; - - switch (fb->format->format) { - case DRM_FORMAT_C8: - dspcntr |= DISPPLANE_8BPP; - break; - case DRM_FORMAT_XRGB1555: - dspcntr |= DISPPLANE_BGRX555; - break; - case DRM_FORMAT_ARGB1555: - dspcntr |= DISPPLANE_BGRA555; - break; - case DRM_FORMAT_RGB565: - dspcntr |= DISPPLANE_BGRX565; - break; - case DRM_FORMAT_XRGB8888: - dspcntr |= DISPPLANE_BGRX888; - break; - case DRM_FORMAT_XBGR8888: - dspcntr |= DISPPLANE_RGBX888; - break; - case DRM_FORMAT_ARGB8888: - dspcntr |= DISPPLANE_BGRA888; - break; - case DRM_FORMAT_ABGR8888: - dspcntr |= DISPPLANE_RGBA888; - break; - case DRM_FORMAT_XRGB2101010: - dspcntr |= DISPPLANE_BGRX101010; - break; - case DRM_FORMAT_XBGR2101010: - dspcntr |= DISPPLANE_RGBX101010; - break; - case DRM_FORMAT_ARGB2101010: - dspcntr |= DISPPLANE_BGRA101010; - break; - case DRM_FORMAT_ABGR2101010: - dspcntr |= DISPPLANE_RGBA101010; - break; - case DRM_FORMAT_XBGR16161616F: - dspcntr |= DISPPLANE_RGBX161616; - break; - default: - MISSING_CASE(fb->format->format); - return 0; - } - - if (INTEL_GEN(dev_priv) >= 4 && - fb->modifier == I915_FORMAT_MOD_X_TILED) - dspcntr |= DISPPLANE_TILED; - - if (rotation & DRM_MODE_ROTATE_180) - dspcntr |= DISPPLANE_ROTATE_180; - - if (rotation & DRM_MODE_REFLECT_X) - dspcntr |= DISPPLANE_MIRROR; - - return dspcntr; -} - -int i9xx_check_plane_surface(struct intel_plane_state *plane_state) -{ - struct drm_i915_private *dev_priv = - to_i915(plane_state->uapi.plane->dev); - const struct drm_framebuffer *fb = plane_state->hw.fb; - int src_x, src_y, src_w; - u32 offset; - int ret; - - ret = intel_plane_compute_gtt(plane_state); - if (ret) - return ret; - - if (!plane_state->uapi.visible) - return 0; - - src_w = drm_rect_width(&plane_state->uapi.src) >> 16; - src_x = plane_state->uapi.src.x1 >> 16; - src_y = plane_state->uapi.src.y1 >> 16; - - /* Undocumented hardware limit on i965/g4x/vlv/chv */ - if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048) - return -EINVAL; - - intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); - - if (INTEL_GEN(dev_priv) >= 4) - offset = intel_plane_compute_aligned_offset(&src_x, &src_y, - plane_state, 0); - else - offset = 0; - - /* - * Put the final coordinates back so that the src - * coordinate checks will see the right values. - */ - drm_rect_translate_to(&plane_state->uapi.src, - src_x << 16, src_y << 16); - - /* HSW/BDW do this automagically in hardware */ - if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) { - unsigned int rotation = plane_state->hw.rotation; - int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; - int src_h = drm_rect_height(&plane_state->uapi.src) >> 16; - - if (rotation & DRM_MODE_ROTATE_180) { - src_x += src_w - 1; - src_y += src_h - 1; - } else if (rotation & DRM_MODE_REFLECT_X) { - src_x += src_w - 1; - } - } - - plane_state->color_plane[0].offset = offset; - plane_state->color_plane[0].x = src_x; - plane_state->color_plane[0].y = src_y; - - return 0; -} - -static bool i9xx_plane_has_windowing(struct intel_plane *plane) -{ - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; - - if (IS_CHERRYVIEW(dev_priv)) - return i9xx_plane == PLANE_B; - else if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) - return false; - else if (IS_GEN(dev_priv, 4)) - return i9xx_plane == PLANE_C; - else - return i9xx_plane == PLANE_B || - i9xx_plane == PLANE_C; -} - -static int -i9xx_plane_check(struct intel_crtc_state *crtc_state, - struct intel_plane_state *plane_state) -{ - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - int ret; - - ret = chv_plane_check_rotation(plane_state); - if (ret) - return ret; - - ret = intel_atomic_plane_check_clipping(plane_state, crtc_state, - DRM_PLANE_HELPER_NO_SCALING, - DRM_PLANE_HELPER_NO_SCALING, - i9xx_plane_has_windowing(plane)); - if (ret) - return ret; - - ret = i9xx_check_plane_surface(plane_state); - if (ret) - return ret; - - if (!plane_state->uapi.visible) - return 0; - - ret = intel_plane_check_src_coordinates(plane_state); - if (ret) - return ret; - - plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state); - - return 0; -} - -static void i9xx_update_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; - u32 linear_offset; - int x = plane_state->color_plane[0].x; - int y = plane_state->color_plane[0].y; - int crtc_x = plane_state->uapi.dst.x1; - int crtc_y = plane_state->uapi.dst.y1; - int crtc_w = drm_rect_width(&plane_state->uapi.dst); - int crtc_h = drm_rect_height(&plane_state->uapi.dst); - unsigned long irqflags; - u32 dspaddr_offset; - u32 dspcntr; - - dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state); - - linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); - - if (INTEL_GEN(dev_priv) >= 4) - dspaddr_offset = plane_state->color_plane[0].offset; - else - dspaddr_offset = linear_offset; - - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - - intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane), - plane_state->color_plane[0].stride); - - if (INTEL_GEN(dev_priv) < 4) { - /* - * PLANE_A doesn't actually have a full window - * generator but let's assume we still need to - * program whatever is there. - */ - intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane), - (crtc_y << 16) | crtc_x); - intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane), - ((crtc_h - 1) << 16) | (crtc_w - 1)); - } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) { - intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane), - (crtc_y << 16) | crtc_x); - intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane), - ((crtc_h - 1) << 16) | (crtc_w - 1)); - intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0); - } - - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { - intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane), - (y << 16) | x); - } else if (INTEL_GEN(dev_priv) >= 4) { - intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane), - linear_offset); - intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane), - (y << 16) | x); - } - - /* - * The control register self-arms if the plane was previously - * disabled. Try to make the plane enable atomic by writing - * the control register just before the surface register. - */ - intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr); - if (INTEL_GEN(dev_priv) >= 4) - intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), - intel_plane_ggtt_offset(plane_state) + dspaddr_offset); - else - intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), - intel_plane_ggtt_offset(plane_state) + dspaddr_offset); - - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); -} - -static void i9xx_disable_plane(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; - unsigned long irqflags; - u32 dspcntr; - - /* - * DSPCNTR pipe gamma enable on g4x+ and pipe csc - * enable on ilk+ affect the pipe bottom color as - * well, so we must configure them even if the plane - * is disabled. - * - * On pre-g4x there is no way to gamma correct the - * pipe bottom color but we'll keep on doing this - * anyway so that the crtc state readout works correctly. - */ - dspcntr = i9xx_plane_ctl_crtc(crtc_state); - - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - - intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr); - if (INTEL_GEN(dev_priv) >= 4) - intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0); - else - intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0); - - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); -} - -static bool i9xx_plane_get_hw_state(struct intel_plane *plane, - enum pipe *pipe) -{ - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - enum intel_display_power_domain power_domain; - enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; - intel_wakeref_t wakeref; - bool ret; - u32 val; - - /* - * Not 100% correct for planes that can move between pipes, - * but that's only the case for gen2-4 which don't have any - * display power wells. - */ - power_domain = POWER_DOMAIN_PIPE(plane->pipe); - wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); - if (!wakeref) - return false; - - val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); - - ret = val & DISPLAY_PLANE_ENABLE; - - if (INTEL_GEN(dev_priv) >= 5) - *pipe = plane->pipe; - else - *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> - DISPPLANE_SEL_PIPE_SHIFT; - - intel_display_power_put(dev_priv, power_domain, wakeref); - - return ret; -} - static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) { struct drm_device *dev = intel_crtc->base.dev; @@ -6470,7 +6009,7 @@ static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_s if (!old_crtc_state->ips_enabled) return false; - if (needs_modeset(new_crtc_state)) + if (intel_crtc_needs_modeset(new_crtc_state)) return true; /* @@ -6497,7 +6036,7 @@ static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_s if (!new_crtc_state->ips_enabled) return false; - if (needs_modeset(new_crtc_state)) + if (intel_crtc_needs_modeset(new_crtc_state)) return true; /* @@ -6550,7 +6089,7 @@ static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state) static bool planes_enabling(const struct intel_crtc_state *old_crtc_state, const struct intel_crtc_state *new_crtc_state) { - return (!old_crtc_state->active_planes || needs_modeset(new_crtc_state)) && + return (!old_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)) && new_crtc_state->active_planes; } @@ -6558,7 +6097,7 @@ static bool planes_disabling(const struct intel_crtc_state *old_crtc_state, const struct intel_crtc_state *new_crtc_state) { return old_crtc_state->active_planes && - (!new_crtc_state->active_planes || needs_modeset(new_crtc_state)); + (!new_crtc_state->active_planes || intel_crtc_needs_modeset(new_crtc_state)); } static void intel_post_plane_update(struct intel_atomic_state *state, @@ -6681,7 +6220,7 @@ static void intel_pre_plane_update(struct intel_atomic_state *state, * If we're doing a modeset we don't need to do any * pre-vblank watermark programming here. */ - if (!needs_modeset(new_crtc_state)) { + if (!intel_crtc_needs_modeset(new_crtc_state)) { /* * For platforms that support atomic watermarks, program the * 'intermediate' watermarks immediately. On pre-gen9 platforms, these @@ -7575,25 +7114,25 @@ modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state) enum intel_display_power_domain domain; u64 domains, new_domains, old_domains; - old_domains = crtc->enabled_power_domains; - crtc->enabled_power_domains = new_domains = - get_crtc_power_domains(crtc_state); + domains = get_crtc_power_domains(crtc_state); - domains = new_domains & ~old_domains; + new_domains = domains & ~crtc->enabled_power_domains.mask; + old_domains = crtc->enabled_power_domains.mask & ~domains; - for_each_power_domain(domain, domains) - intel_display_power_get(dev_priv, domain); + for_each_power_domain(domain, new_domains) + intel_display_power_get_in_set(dev_priv, + &crtc->enabled_power_domains, + domain); - return old_domains & ~new_domains; + return old_domains; } -static void modeset_put_power_domains(struct drm_i915_private *dev_priv, - u64 domains) +static void modeset_put_crtc_power_domains(struct intel_crtc *crtc, + u64 domains) { - enum intel_display_power_domain domain; - - for_each_power_domain(domain, domains) - intel_display_power_put_unchecked(dev_priv, domain); + intel_display_power_put_mask_in_set(to_i915(crtc->base.dev), + &crtc->enabled_power_domains, + domains); } static void valleyview_crtc_enable(struct intel_atomic_state *state, @@ -7789,12 +7328,10 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, to_intel_dbuf_state(dev_priv->dbuf.obj.state); struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); - enum intel_display_power_domain domain; struct intel_plane *plane; struct drm_atomic_state *state; struct intel_crtc_state *temp_crtc_state; enum pipe pipe = crtc->pipe; - u64 domains; int ret; if (!crtc_state->hw.active) @@ -7850,10 +7387,7 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc, intel_update_watermarks(crtc); intel_disable_shared_dpll(crtc_state); - domains = crtc->enabled_power_domains; - for_each_power_domain(domain, domains) - intel_display_power_put_unchecked(dev_priv, domain); - crtc->enabled_power_domains = 0; + intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains); dev_priv->active_pipes &= ~BIT(pipe); cdclk_state->min_cdclk[pipe] = 0; @@ -11226,16 +10760,13 @@ static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port, static bool hsw_get_transcoder_state(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config, - u64 *power_domain_mask, - intel_wakeref_t *wakerefs) + struct intel_display_power_domain_set *power_domain_set) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - enum intel_display_power_domain power_domain; unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP); unsigned long enabled_panel_transcoders = 0; enum transcoder panel_transcoder; - intel_wakeref_t wf; u32 tmp; if (INTEL_GEN(dev_priv) >= 11) @@ -11306,16 +10837,10 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) && enabled_panel_transcoders != BIT(TRANSCODER_EDP)); - power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder); - drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain)); - - wf = intel_display_power_get_if_enabled(dev_priv, power_domain); - if (!wf) + if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set, + POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) return false; - wakerefs[power_domain] = wf; - *power_domain_mask |= BIT_ULL(power_domain); - tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder)); return tmp & PIPECONF_ENABLE; @@ -11323,14 +10848,11 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc, static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config, - u64 *power_domain_mask, - intel_wakeref_t *wakerefs) + struct intel_display_power_domain_set *power_domain_set) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - enum intel_display_power_domain power_domain; enum transcoder cpu_transcoder; - intel_wakeref_t wf; enum port port; u32 tmp; @@ -11340,16 +10862,10 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc, else cpu_transcoder = TRANSCODER_DSI_C; - power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); - drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain)); - - wf = intel_display_power_get_if_enabled(dev_priv, power_domain); - if (!wf) + if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set, + POWER_DOMAIN_TRANSCODER(cpu_transcoder))) continue; - wakerefs[power_domain] = wf; - *power_domain_mask |= BIT_ULL(power_domain); - /* * The PLL needs to be enabled with a valid divider * configuration, otherwise accessing DSI registers will hang @@ -11432,30 +10948,22 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf; - enum intel_display_power_domain power_domain; - u64 power_domain_mask; + struct intel_display_power_domain_set power_domain_set = { }; bool active; u32 tmp; pipe_config->master_transcoder = INVALID_TRANSCODER; - power_domain = POWER_DOMAIN_PIPE(crtc->pipe); - wf = intel_display_power_get_if_enabled(dev_priv, power_domain); - if (!wf) + if (!intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set, + POWER_DOMAIN_PIPE(crtc->pipe))) return false; - wakerefs[power_domain] = wf; - power_domain_mask = BIT_ULL(power_domain); - pipe_config->shared_dpll = NULL; - active = hsw_get_transcoder_state(crtc, pipe_config, - &power_domain_mask, wakerefs); + active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_set); if (IS_GEN9_LP(dev_priv) && - bxt_get_dsi_transcoder_state(crtc, pipe_config, - &power_domain_mask, wakerefs)) { + bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_set)) { drm_WARN_ON(&dev_priv->drm, active); active = true; } @@ -11519,14 +11027,8 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, pipe_config->ips_linetime = REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp); - power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); - drm_WARN_ON(&dev_priv->drm, power_domain_mask & BIT_ULL(power_domain)); - - wf = intel_display_power_get_if_enabled(dev_priv, power_domain); - if (wf) { - wakerefs[power_domain] = wf; - power_domain_mask |= BIT_ULL(power_domain); - + if (intel_display_power_get_in_set_if_enabled(dev_priv, &power_domain_set, + POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe))) { if (INTEL_GEN(dev_priv) >= 9) skl_get_pfit_config(pipe_config); else @@ -11560,9 +11062,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, } out: - for_each_power_domain(power_domain, power_domain_mask) - intel_display_power_put(dev_priv, - power_domain, wakerefs[power_domain]); + intel_display_power_put_all_in_set(dev_priv, &power_domain_set); return active; } @@ -11582,569 +11082,6 @@ static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state) return true; } -static u32 intel_cursor_base(const struct intel_plane_state *plane_state) -{ - struct drm_i915_private *dev_priv = - to_i915(plane_state->uapi.plane->dev); - const struct drm_framebuffer *fb = plane_state->hw.fb; - const struct drm_i915_gem_object *obj = intel_fb_obj(fb); - u32 base; - - if (INTEL_INFO(dev_priv)->display.cursor_needs_physical) - base = sg_dma_address(obj->mm.pages->sgl); - else - base = intel_plane_ggtt_offset(plane_state); - - return base + plane_state->color_plane[0].offset; -} - -static u32 intel_cursor_position(const struct intel_plane_state *plane_state) -{ - int x = plane_state->uapi.dst.x1; - int y = plane_state->uapi.dst.y1; - u32 pos = 0; - - if (x < 0) { - pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; - x = -x; - } - pos |= x << CURSOR_X_SHIFT; - - if (y < 0) { - pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; - y = -y; - } - pos |= y << CURSOR_Y_SHIFT; - - return pos; -} - -static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state) -{ - const struct drm_mode_config *config = - &plane_state->uapi.plane->dev->mode_config; - int width = drm_rect_width(&plane_state->uapi.dst); - int height = drm_rect_height(&plane_state->uapi.dst); - - return width > 0 && width <= config->cursor_width && - height > 0 && height <= config->cursor_height; -} - -static int intel_cursor_check_surface(struct intel_plane_state *plane_state) -{ - struct drm_i915_private *dev_priv = - to_i915(plane_state->uapi.plane->dev); - unsigned int rotation = plane_state->hw.rotation; - int src_x, src_y; - u32 offset; - int ret; - - ret = intel_plane_compute_gtt(plane_state); - if (ret) - return ret; - - if (!plane_state->uapi.visible) - return 0; - - src_x = plane_state->uapi.src.x1 >> 16; - src_y = plane_state->uapi.src.y1 >> 16; - - intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); - offset = intel_plane_compute_aligned_offset(&src_x, &src_y, - plane_state, 0); - - if (src_x != 0 || src_y != 0) { - drm_dbg_kms(&dev_priv->drm, - "Arbitrary cursor panning not supported\n"); - return -EINVAL; - } - - /* - * Put the final coordinates back so that the src - * coordinate checks will see the right values. - */ - drm_rect_translate_to(&plane_state->uapi.src, - src_x << 16, src_y << 16); - - /* ILK+ do this automagically in hardware */ - if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) { - const struct drm_framebuffer *fb = plane_state->hw.fb; - int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; - int src_h = drm_rect_height(&plane_state->uapi.src) >> 16; - - offset += (src_h * src_w - 1) * fb->format->cpp[0]; - } - - plane_state->color_plane[0].offset = offset; - plane_state->color_plane[0].x = src_x; - plane_state->color_plane[0].y = src_y; - - return 0; -} - -static int intel_check_cursor(struct intel_crtc_state *crtc_state, - struct intel_plane_state *plane_state) -{ - const struct drm_framebuffer *fb = plane_state->hw.fb; - struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); - const struct drm_rect src = plane_state->uapi.src; - const struct drm_rect dst = plane_state->uapi.dst; - int ret; - - if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) { - drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n"); - return -EINVAL; - } - - ret = intel_atomic_plane_check_clipping(plane_state, crtc_state, - DRM_PLANE_HELPER_NO_SCALING, - DRM_PLANE_HELPER_NO_SCALING, - true); - if (ret) - return ret; - - /* Use the unclipped src/dst rectangles, which we program to hw */ - plane_state->uapi.src = src; - plane_state->uapi.dst = dst; - - ret = intel_cursor_check_surface(plane_state); - if (ret) - return ret; - - if (!plane_state->uapi.visible) - return 0; - - ret = intel_plane_check_src_coordinates(plane_state); - if (ret) - return ret; - - return 0; -} - -static unsigned int -i845_cursor_max_stride(struct intel_plane *plane, - u32 pixel_format, u64 modifier, - unsigned int rotation) -{ - return 2048; -} - -static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) -{ - u32 cntl = 0; - - if (crtc_state->gamma_enable) - cntl |= CURSOR_GAMMA_ENABLE; - - return cntl; -} - -static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - return CURSOR_ENABLE | - CURSOR_FORMAT_ARGB | - CURSOR_STRIDE(plane_state->color_plane[0].stride); -} - -static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state) -{ - int width = drm_rect_width(&plane_state->uapi.dst); - - /* - * 845g/865g are only limited by the width of their cursors, - * the height is arbitrary up to the precision of the register. - */ - return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64); -} - -static int i845_check_cursor(struct intel_crtc_state *crtc_state, - struct intel_plane_state *plane_state) -{ - const struct drm_framebuffer *fb = plane_state->hw.fb; - struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); - int ret; - - ret = intel_check_cursor(crtc_state, plane_state); - if (ret) - return ret; - - /* if we want to turn off the cursor ignore width and height */ - if (!fb) - return 0; - - /* Check for which cursor types we support */ - if (!i845_cursor_size_ok(plane_state)) { - drm_dbg_kms(&i915->drm, - "Cursor dimension %dx%d not supported\n", - drm_rect_width(&plane_state->uapi.dst), - drm_rect_height(&plane_state->uapi.dst)); - return -EINVAL; - } - - drm_WARN_ON(&i915->drm, plane_state->uapi.visible && - plane_state->color_plane[0].stride != fb->pitches[0]); - - switch (fb->pitches[0]) { - case 256: - case 512: - case 1024: - case 2048: - break; - default: - drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n", - fb->pitches[0]); - return -EINVAL; - } - - plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state); - - return 0; -} - -static void i845_update_cursor(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - u32 cntl = 0, base = 0, pos = 0, size = 0; - unsigned long irqflags; - - if (plane_state && plane_state->uapi.visible) { - unsigned int width = drm_rect_width(&plane_state->uapi.dst); - unsigned int height = drm_rect_height(&plane_state->uapi.dst); - - cntl = plane_state->ctl | - i845_cursor_ctl_crtc(crtc_state); - - size = (height << 12) | width; - - base = intel_cursor_base(plane_state); - pos = intel_cursor_position(plane_state); - } - - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - - /* On these chipsets we can only modify the base/size/stride - * whilst the cursor is disabled. - */ - if (plane->cursor.base != base || - plane->cursor.size != size || - plane->cursor.cntl != cntl) { - intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0); - intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base); - intel_de_write_fw(dev_priv, CURSIZE, size); - intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos); - intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl); - - plane->cursor.base = base; - plane->cursor.size = size; - plane->cursor.cntl = cntl; - } else { - intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos); - } - - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); -} - -static void i845_disable_cursor(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) -{ - i845_update_cursor(plane, crtc_state, NULL); -} - -static bool i845_cursor_get_hw_state(struct intel_plane *plane, - enum pipe *pipe) -{ - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - enum intel_display_power_domain power_domain; - intel_wakeref_t wakeref; - bool ret; - - power_domain = POWER_DOMAIN_PIPE(PIPE_A); - wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); - if (!wakeref) - return false; - - ret = intel_de_read(dev_priv, CURCNTR(PIPE_A)) & CURSOR_ENABLE; - - *pipe = PIPE_A; - - intel_display_power_put(dev_priv, power_domain, wakeref); - - return ret; -} - -static unsigned int -i9xx_cursor_max_stride(struct intel_plane *plane, - u32 pixel_format, u64 modifier, - unsigned int rotation) -{ - return plane->base.dev->mode_config.cursor_width * 4; -} - -static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - u32 cntl = 0; - - if (INTEL_GEN(dev_priv) >= 11) - return cntl; - - if (crtc_state->gamma_enable) - cntl = MCURSOR_GAMMA_ENABLE; - - if (crtc_state->csc_enable) - cntl |= MCURSOR_PIPE_CSC_ENABLE; - - if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) - cntl |= MCURSOR_PIPE_SELECT(crtc->pipe); - - return cntl; -} - -static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - struct drm_i915_private *dev_priv = - to_i915(plane_state->uapi.plane->dev); - u32 cntl = 0; - - if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) - cntl |= MCURSOR_TRICKLE_FEED_DISABLE; - - switch (drm_rect_width(&plane_state->uapi.dst)) { - case 64: - cntl |= MCURSOR_MODE_64_ARGB_AX; - break; - case 128: - cntl |= MCURSOR_MODE_128_ARGB_AX; - break; - case 256: - cntl |= MCURSOR_MODE_256_ARGB_AX; - break; - default: - MISSING_CASE(drm_rect_width(&plane_state->uapi.dst)); - return 0; - } - - if (plane_state->hw.rotation & DRM_MODE_ROTATE_180) - cntl |= MCURSOR_ROTATE_180; - - return cntl; -} - -static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state) -{ - struct drm_i915_private *dev_priv = - to_i915(plane_state->uapi.plane->dev); - int width = drm_rect_width(&plane_state->uapi.dst); - int height = drm_rect_height(&plane_state->uapi.dst); - - if (!intel_cursor_size_ok(plane_state)) - return false; - - /* Cursor width is limited to a few power-of-two sizes */ - switch (width) { - case 256: - case 128: - case 64: - break; - default: - return false; - } - - /* - * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor - * height from 8 lines up to the cursor width, when the - * cursor is not rotated. Everything else requires square - * cursors. - */ - if (HAS_CUR_FBC(dev_priv) && - plane_state->hw.rotation & DRM_MODE_ROTATE_0) { - if (height < 8 || height > width) - return false; - } else { - if (height != width) - return false; - } - - return true; -} - -static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, - struct intel_plane_state *plane_state) -{ - struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - const struct drm_framebuffer *fb = plane_state->hw.fb; - enum pipe pipe = plane->pipe; - int ret; - - ret = intel_check_cursor(crtc_state, plane_state); - if (ret) - return ret; - - /* if we want to turn off the cursor ignore width and height */ - if (!fb) - return 0; - - /* Check for which cursor types we support */ - if (!i9xx_cursor_size_ok(plane_state)) { - drm_dbg(&dev_priv->drm, - "Cursor dimension %dx%d not supported\n", - drm_rect_width(&plane_state->uapi.dst), - drm_rect_height(&plane_state->uapi.dst)); - return -EINVAL; - } - - drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible && - plane_state->color_plane[0].stride != fb->pitches[0]); - - if (fb->pitches[0] != - drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) { - drm_dbg_kms(&dev_priv->drm, - "Invalid cursor stride (%u) (cursor width %d)\n", - fb->pitches[0], - drm_rect_width(&plane_state->uapi.dst)); - return -EINVAL; - } - - /* - * There's something wrong with the cursor on CHV pipe C. - * If it straddles the left edge of the screen then - * moving it away from the edge or disabling it often - * results in a pipe underrun, and often that can lead to - * dead pipe (constant underrun reported, and it scans - * out just a solid color). To recover from that, the - * display power well must be turned off and on again. - * Refuse the put the cursor into that compromised position. - */ - if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C && - plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) { - drm_dbg_kms(&dev_priv->drm, - "CHV cursor C not allowed to straddle the left screen edge\n"); - return -EINVAL; - } - - plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state); - - return 0; -} - -static void i9xx_update_cursor(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - enum pipe pipe = plane->pipe; - u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0; - unsigned long irqflags; - - if (plane_state && plane_state->uapi.visible) { - unsigned width = drm_rect_width(&plane_state->uapi.dst); - unsigned height = drm_rect_height(&plane_state->uapi.dst); - - cntl = plane_state->ctl | - i9xx_cursor_ctl_crtc(crtc_state); - - if (width != height) - fbc_ctl = CUR_FBC_CTL_EN | (height - 1); - - base = intel_cursor_base(plane_state); - pos = intel_cursor_position(plane_state); - } - - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - - /* - * On some platforms writing CURCNTR first will also - * cause CURPOS to be armed by the CURBASE write. - * Without the CURCNTR write the CURPOS write would - * arm itself. Thus we always update CURCNTR before - * CURPOS. - * - * On other platforms CURPOS always requires the - * CURBASE write to arm the update. Additonally - * a write to any of the cursor register will cancel - * an already armed cursor update. Thus leaving out - * the CURBASE write after CURPOS could lead to a - * cursor that doesn't appear to move, or even change - * shape. Thus we always write CURBASE. - * - * The other registers are armed by by the CURBASE write - * except when the plane is getting enabled at which time - * the CURCNTR write arms the update. - */ - - if (INTEL_GEN(dev_priv) >= 9) - skl_write_cursor_wm(plane, crtc_state); - - if (!needs_modeset(crtc_state)) - intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, 0); - - if (plane->cursor.base != base || - plane->cursor.size != fbc_ctl || - plane->cursor.cntl != cntl) { - if (HAS_CUR_FBC(dev_priv)) - intel_de_write_fw(dev_priv, CUR_FBC_CTL(pipe), - fbc_ctl); - intel_de_write_fw(dev_priv, CURCNTR(pipe), cntl); - intel_de_write_fw(dev_priv, CURPOS(pipe), pos); - intel_de_write_fw(dev_priv, CURBASE(pipe), base); - - plane->cursor.base = base; - plane->cursor.size = fbc_ctl; - plane->cursor.cntl = cntl; - } else { - intel_de_write_fw(dev_priv, CURPOS(pipe), pos); - intel_de_write_fw(dev_priv, CURBASE(pipe), base); - } - - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); -} - -static void i9xx_disable_cursor(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state) -{ - i9xx_update_cursor(plane, crtc_state, NULL); -} - -static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, - enum pipe *pipe) -{ - struct drm_i915_private *dev_priv = to_i915(plane->base.dev); - enum intel_display_power_domain power_domain; - intel_wakeref_t wakeref; - bool ret; - u32 val; - - /* - * Not 100% correct for planes that can move between pipes, - * but that's only the case for gen2-3 which don't have any - * display power wells. - */ - power_domain = POWER_DOMAIN_PIPE(plane->pipe); - wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); - if (!wakeref) - return false; - - val = intel_de_read(dev_priv, CURCNTR(plane->pipe)); - - ret = val & MCURSOR_MODE; - - if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) - *pipe = plane->pipe; - else - *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >> - MCURSOR_PIPE_SELECT_SHIFT; - - intel_display_power_put(dev_priv, power_domain, wakeref); - - return ret; -} - /* VESA 640x480x72Hz mode to set on the pipe */ static const struct drm_display_mode load_detect_mode = { DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, @@ -12651,7 +11588,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - bool mode_changed = needs_modeset(crtc_state); + bool mode_changed = intel_crtc_needs_modeset(crtc_state); bool was_crtc_enabled = old_crtc_state->hw.active; bool is_crtc_enabled = crtc_state->hw.active; bool turn_off, turn_on, visible, was_visible; @@ -12842,6 +11779,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) plane_state->planar_linked_plane = NULL; if (plane_state->planar_slave && !plane_state->uapi.visible) { + crtc_state->enabled_planes &= ~BIT(plane->id); crtc_state->active_planes &= ~BIT(plane->id); crtc_state->update_planes |= BIT(plane->id); } @@ -12885,6 +11823,7 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) linked_state->planar_slave = true; linked_state->planar_linked_plane = plane; + crtc_state->enabled_planes |= BIT(linked->id); crtc_state->active_planes |= BIT(linked->id); crtc_state->update_planes |= BIT(linked->id); drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n", @@ -13013,7 +11952,7 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state, struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - bool mode_changed = needs_modeset(crtc_state); + bool mode_changed = intel_crtc_needs_modeset(crtc_state); int ret; if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) && @@ -14845,7 +13784,7 @@ intel_modeset_verify_crtc(struct intel_crtc *crtc, struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *new_crtc_state) { - if (!needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe) + if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe) return; verify_wm_state(crtc, new_crtc_state); @@ -14940,7 +13879,7 @@ static void intel_modeset_clear_plls(struct intel_atomic_state *state) return; for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { - if (!needs_modeset(new_crtc_state)) + if (!intel_crtc_needs_modeset(new_crtc_state)) continue; intel_release_shared_dplls(state, crtc); @@ -14965,7 +13904,7 @@ static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state) /* look at all crtc's that are going to be enabled in during modeset */ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { if (!crtc_state->hw.active || - !needs_modeset(crtc_state)) + !intel_crtc_needs_modeset(crtc_state)) continue; if (first_crtc_state) { @@ -14990,7 +13929,7 @@ static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state) crtc_state->hsw_workaround_pipe = INVALID_PIPE; if (!crtc_state->hw.active || - needs_modeset(crtc_state)) + intel_crtc_needs_modeset(crtc_state)) continue; /* 2 or more enabled crtcs means no need for w/a */ @@ -15102,6 +14041,19 @@ static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state, return 0; } +int intel_atomic_add_affected_planes(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + const struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + const struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + return intel_crtc_add_planes_to_state(state, crtc, + old_crtc_state->enabled_planes | + new_crtc_state->enabled_planes); +} + static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv) { /* See {hsw,vlv,ivb}_plane_ratio() */ @@ -15296,7 +14248,7 @@ static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state, for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { if (new_crtc_state->hw.enable && transcoders & BIT(new_crtc_state->cpu_transcoder) && - needs_modeset(new_crtc_state)) + intel_crtc_needs_modeset(new_crtc_state)) return true; } @@ -15317,7 +14269,7 @@ static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state, slave = crtc; master = old_crtc_state->bigjoiner_linked_crtc; master_crtc_state = intel_atomic_get_new_crtc_state(state, master); - if (!master_crtc_state || !needs_modeset(master_crtc_state)) + if (!master_crtc_state || !intel_crtc_needs_modeset(master_crtc_state)) goto claimed; } @@ -15355,21 +14307,16 @@ claimed: return -EINVAL; } -static int kill_bigjoiner_slave(struct intel_atomic_state *state, - struct intel_crtc_state *master_crtc_state) +static void kill_bigjoiner_slave(struct intel_atomic_state *state, + struct intel_crtc_state *master_crtc_state) { struct intel_crtc_state *slave_crtc_state = - intel_atomic_get_crtc_state(&state->base, - master_crtc_state->bigjoiner_linked_crtc); - - if (IS_ERR(slave_crtc_state)) - return PTR_ERR(slave_crtc_state); + intel_atomic_get_new_crtc_state(state, master_crtc_state->bigjoiner_linked_crtc); slave_crtc_state->bigjoiner = master_crtc_state->bigjoiner = false; slave_crtc_state->bigjoiner_slave = master_crtc_state->bigjoiner_slave = false; slave_crtc_state->bigjoiner_linked_crtc = master_crtc_state->bigjoiner_linked_crtc = NULL; intel_crtc_copy_uapi_to_hw_state(state, slave_crtc_state); - return 0; } /** @@ -15401,7 +14348,7 @@ static int intel_atomic_check_async(struct intel_atomic_state *state) for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - if (needs_modeset(new_crtc_state)) { + if (intel_crtc_needs_modeset(new_crtc_state)) { drm_dbg_kms(&i915->drm, "Modeset Required. Async flip not supported\n"); return -EINVAL; } @@ -15507,20 +14454,43 @@ static int intel_atomic_check_async(struct intel_atomic_state *state) static int intel_bigjoiner_add_affected_crtcs(struct intel_atomic_state *state) { - const struct intel_crtc_state *crtc_state; + struct intel_crtc_state *crtc_state; struct intel_crtc *crtc; int i; for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { struct intel_crtc_state *linked_crtc_state; + struct intel_crtc *linked_crtc; + int ret; if (!crtc_state->bigjoiner) continue; - linked_crtc_state = intel_atomic_get_crtc_state(&state->base, - crtc_state->bigjoiner_linked_crtc); + linked_crtc = crtc_state->bigjoiner_linked_crtc; + linked_crtc_state = intel_atomic_get_crtc_state(&state->base, linked_crtc); if (IS_ERR(linked_crtc_state)) return PTR_ERR(linked_crtc_state); + + if (!intel_crtc_needs_modeset(crtc_state)) + continue; + + linked_crtc_state->uapi.mode_changed = true; + + ret = drm_atomic_add_affected_connectors(&state->base, + &linked_crtc->base); + if (ret) + return ret; + + ret = intel_atomic_add_affected_planes(state, linked_crtc); + if (ret) + return ret; + } + + for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { + /* Kill old bigjoiner link, we may re-establish afterwards */ + if (intel_crtc_needs_modeset(crtc_state) && + crtc_state->bigjoiner && !crtc_state->bigjoiner_slave) + kill_bigjoiner_slave(state, crtc_state); } return 0; @@ -15557,20 +14527,13 @@ static int intel_atomic_check(struct drm_device *dev, for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - if (!needs_modeset(new_crtc_state)) { + if (!intel_crtc_needs_modeset(new_crtc_state)) { /* Light copy */ intel_crtc_copy_uapi_to_hw_state_nomodeset(state, new_crtc_state); continue; } - /* Kill old bigjoiner link, we may re-establish afterwards */ - if (old_crtc_state->bigjoiner && !old_crtc_state->bigjoiner_slave) { - ret = kill_bigjoiner_slave(state, new_crtc_state); - if (ret) - goto fail; - } - if (!new_crtc_state->uapi.enable) { if (!new_crtc_state->bigjoiner_slave) { intel_crtc_copy_uapi_to_hw_state(state, new_crtc_state); @@ -15595,7 +14558,7 @@ static int intel_atomic_check(struct drm_device *dev, for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - if (!needs_modeset(new_crtc_state)) + if (!intel_crtc_needs_modeset(new_crtc_state)) continue; ret = intel_modeset_pipe_config_late(new_crtc_state); @@ -15617,7 +14580,7 @@ static int intel_atomic_check(struct drm_device *dev, * forced a full modeset. */ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { - if (!new_crtc_state->hw.enable || needs_modeset(new_crtc_state)) + if (!new_crtc_state->hw.enable || intel_crtc_needs_modeset(new_crtc_state)) continue; if (intel_dp_mst_is_slave_trans(new_crtc_state)) { @@ -15640,11 +14603,21 @@ static int intel_atomic_check(struct drm_device *dev, new_crtc_state->update_pipe = false; } } + + if (new_crtc_state->bigjoiner) { + struct intel_crtc_state *linked_crtc_state = + intel_atomic_get_new_crtc_state(state, new_crtc_state->bigjoiner_linked_crtc); + + if (intel_crtc_needs_modeset(linked_crtc_state)) { + new_crtc_state->uapi.mode_changed = true; + new_crtc_state->update_pipe = false; + } + } } for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - if (needs_modeset(new_crtc_state)) { + if (intel_crtc_needs_modeset(new_crtc_state)) { any_ms = true; continue; } @@ -15721,12 +14694,12 @@ static int intel_atomic_check(struct drm_device *dev, goto fail; } - if (!needs_modeset(new_crtc_state) && + if (!intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->update_pipe) continue; intel_dump_pipe_config(new_crtc_state, state, - needs_modeset(new_crtc_state) ? + intel_crtc_needs_modeset(new_crtc_state) ? "[modeset]" : "[fastset]"); } @@ -15758,7 +14731,7 @@ static int intel_atomic_prepare_commit(struct intel_atomic_state *state) return ret; for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { - bool mode_changed = needs_modeset(crtc_state); + bool mode_changed = intel_crtc_needs_modeset(crtc_state); if (mode_changed || crtc_state->update_pipe || crtc_state->uapi.color_mgmt_changed) { @@ -15849,7 +14822,7 @@ static void commit_pipe_config(struct intel_atomic_state *state, intel_atomic_get_old_crtc_state(state, crtc); const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - bool modeset = needs_modeset(new_crtc_state); + bool modeset = intel_crtc_needs_modeset(new_crtc_state); /* * During modesets pipe configuration was programmed as the @@ -15883,7 +14856,7 @@ static void intel_enable_crtc(struct intel_atomic_state *state, const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - if (!needs_modeset(new_crtc_state)) + if (!intel_crtc_needs_modeset(new_crtc_state)) return; intel_crtc_update_active_timings(new_crtc_state); @@ -15905,7 +14878,7 @@ static void intel_update_crtc(struct intel_atomic_state *state, intel_atomic_get_old_crtc_state(state, crtc); struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - bool modeset = needs_modeset(new_crtc_state); + bool modeset = intel_crtc_needs_modeset(new_crtc_state); if (!modeset) { if (new_crtc_state->preload_luts && @@ -15997,7 +14970,7 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state) /* Only disable port sync and MST slaves */ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - if (!needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner) + if (!intel_crtc_needs_modeset(new_crtc_state) || old_crtc_state->bigjoiner) continue; if (!old_crtc_state->hw.active) @@ -16021,7 +14994,7 @@ static void intel_commit_modeset_disables(struct intel_atomic_state *state) /* Disable everything else left on */ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - if (!needs_modeset(new_crtc_state) || + if (!intel_crtc_needs_modeset(new_crtc_state) || (handled & BIT(crtc->pipe)) || old_crtc_state->bigjoiner_slave) continue; @@ -16071,7 +15044,7 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state) continue; /* ignore allocations for crtc's that have been turned off. */ - if (!needs_modeset(new_crtc_state)) { + if (!intel_crtc_needs_modeset(new_crtc_state)) { entries[pipe] = old_crtc_state->wm.skl.ddb; update_pipes |= BIT(pipe); } else { @@ -16266,7 +15239,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { - if (needs_modeset(new_crtc_state) || + if (intel_crtc_needs_modeset(new_crtc_state) || new_crtc_state->update_pipe) { put_domains[crtc->pipe] = @@ -16292,7 +15265,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) /* Complete the events for pipes that have now been disabled */ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) { - bool modeset = needs_modeset(new_crtc_state); + bool modeset = intel_crtc_needs_modeset(new_crtc_state); /* Complete events for now disable pipes here. */ if (modeset && !new_crtc_state->hw.active && new_crtc_state->uapi.event) { @@ -16340,7 +15313,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) skl_disable_flip_done(crtc); if (new_crtc_state->hw.active && - !needs_modeset(new_crtc_state) && + !intel_crtc_needs_modeset(new_crtc_state) && !new_crtc_state->preload_luts && (new_crtc_state->uapi.color_mgmt_changed || new_crtc_state->update_pipe)) @@ -16376,8 +15349,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { intel_post_plane_update(state, crtc); - if (put_domains[i]) - modeset_put_power_domains(dev_priv, put_domains[i]); + modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]); intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state); @@ -16620,7 +15592,7 @@ static void add_rps_boost_after_vblank(struct drm_crtc *crtc, add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait); } -static int intel_plane_pin_fb(struct intel_plane_state *plane_state) +int intel_plane_pin_fb(struct intel_plane_state *plane_state) { struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); struct drm_i915_private *dev_priv = to_i915(plane->base.dev); @@ -16650,7 +15622,7 @@ static int intel_plane_pin_fb(struct intel_plane_state *plane_state) return 0; } -static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state) +void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state) { struct i915_vma *vma; @@ -16712,7 +15684,7 @@ intel_prepare_plane_fb(struct drm_plane *_plane, * This should only fail upon a hung GPU, in which case we * can safely continue. */ - if (needs_modeset(crtc_state)) { + if (intel_crtc_needs_modeset(crtc_state)) { ret = i915_sw_fence_await_reservation(&state->commit_ready, old_obj->base.resv, NULL, false, 0, @@ -16833,441 +15805,13 @@ void intel_plane_destroy(struct drm_plane *plane) kfree(to_intel_plane(plane)); } -static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane, - u32 format, u64 modifier) -{ - switch (modifier) { - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - break; - default: - return false; - } - - switch (format) { - case DRM_FORMAT_C8: - case DRM_FORMAT_RGB565: - case DRM_FORMAT_XRGB1555: - case DRM_FORMAT_XRGB8888: - return modifier == DRM_FORMAT_MOD_LINEAR || - modifier == I915_FORMAT_MOD_X_TILED; - default: - return false; - } -} - -static bool i965_plane_format_mod_supported(struct drm_plane *_plane, - u32 format, u64 modifier) -{ - switch (modifier) { - case DRM_FORMAT_MOD_LINEAR: - case I915_FORMAT_MOD_X_TILED: - break; - default: - return false; - } - - switch (format) { - case DRM_FORMAT_C8: - case DRM_FORMAT_RGB565: - case DRM_FORMAT_XRGB8888: - case DRM_FORMAT_XBGR8888: - case DRM_FORMAT_ARGB8888: - case DRM_FORMAT_ABGR8888: - case DRM_FORMAT_XRGB2101010: - case DRM_FORMAT_XBGR2101010: - case DRM_FORMAT_ARGB2101010: - case DRM_FORMAT_ABGR2101010: - case DRM_FORMAT_XBGR16161616F: - return modifier == DRM_FORMAT_MOD_LINEAR || - modifier == I915_FORMAT_MOD_X_TILED; - default: - return false; - } -} - -static bool intel_cursor_format_mod_supported(struct drm_plane *_plane, - u32 format, u64 modifier) -{ - return modifier == DRM_FORMAT_MOD_LINEAR && - format == DRM_FORMAT_ARGB8888; -} - -static const struct drm_plane_funcs i965_plane_funcs = { - .update_plane = drm_atomic_helper_update_plane, - .disable_plane = drm_atomic_helper_disable_plane, - .destroy = intel_plane_destroy, - .atomic_duplicate_state = intel_plane_duplicate_state, - .atomic_destroy_state = intel_plane_destroy_state, - .format_mod_supported = i965_plane_format_mod_supported, -}; - -static const struct drm_plane_funcs i8xx_plane_funcs = { - .update_plane = drm_atomic_helper_update_plane, - .disable_plane = drm_atomic_helper_disable_plane, - .destroy = intel_plane_destroy, - .atomic_duplicate_state = intel_plane_duplicate_state, - .atomic_destroy_state = intel_plane_destroy_state, - .format_mod_supported = i8xx_plane_format_mod_supported, -}; - -static int -intel_legacy_cursor_update(struct drm_plane *_plane, - struct drm_crtc *_crtc, - struct drm_framebuffer *fb, - int crtc_x, int crtc_y, - unsigned int crtc_w, unsigned int crtc_h, - u32 src_x, u32 src_y, - u32 src_w, u32 src_h, - struct drm_modeset_acquire_ctx *ctx) -{ - struct intel_plane *plane = to_intel_plane(_plane); - struct intel_crtc *crtc = to_intel_crtc(_crtc); - struct intel_plane_state *old_plane_state = - to_intel_plane_state(plane->base.state); - struct intel_plane_state *new_plane_state; - struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->base.state); - struct intel_crtc_state *new_crtc_state; - int ret; - - /* - * When crtc is inactive or there is a modeset pending, - * wait for it to complete in the slowpath - * - * FIXME bigjoiner fastpath would be good - */ - if (!crtc_state->hw.active || needs_modeset(crtc_state) || - crtc_state->update_pipe || crtc_state->bigjoiner) - goto slow; - - /* - * Don't do an async update if there is an outstanding commit modifying - * the plane. This prevents our async update's changes from getting - * overridden by a previous synchronous update's state. - */ - if (old_plane_state->uapi.commit && - !try_wait_for_completion(&old_plane_state->uapi.commit->hw_done)) - goto slow; - - /* - * If any parameters change that may affect watermarks, - * take the slowpath. Only changing fb or position should be - * in the fastpath. - */ - if (old_plane_state->uapi.crtc != &crtc->base || - old_plane_state->uapi.src_w != src_w || - old_plane_state->uapi.src_h != src_h || - old_plane_state->uapi.crtc_w != crtc_w || - old_plane_state->uapi.crtc_h != crtc_h || - !old_plane_state->uapi.fb != !fb) - goto slow; - - new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base)); - if (!new_plane_state) - return -ENOMEM; - - new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base)); - if (!new_crtc_state) { - ret = -ENOMEM; - goto out_free; - } - - drm_atomic_set_fb_for_plane(&new_plane_state->uapi, fb); - - new_plane_state->uapi.src_x = src_x; - new_plane_state->uapi.src_y = src_y; - new_plane_state->uapi.src_w = src_w; - new_plane_state->uapi.src_h = src_h; - new_plane_state->uapi.crtc_x = crtc_x; - new_plane_state->uapi.crtc_y = crtc_y; - new_plane_state->uapi.crtc_w = crtc_w; - new_plane_state->uapi.crtc_h = crtc_h; - - intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state, crtc); - - ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state, - old_plane_state, new_plane_state); - if (ret) - goto out_free; - - ret = intel_plane_pin_fb(new_plane_state); - if (ret) - goto out_free; - - intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb), - ORIGIN_FLIP); - intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb), - to_intel_frontbuffer(new_plane_state->hw.fb), - plane->frontbuffer_bit); - - /* Swap plane state */ - plane->base.state = &new_plane_state->uapi; - - /* - * We cannot swap crtc_state as it may be in use by an atomic commit or - * page flip that's running simultaneously. If we swap crtc_state and - * destroy the old state, we will cause a use-after-free there. - * - * Only update active_planes, which is needed for our internal - * bookkeeping. Either value will do the right thing when updating - * planes atomically. If the cursor was part of the atomic update then - * we would have taken the slowpath. - */ - crtc_state->active_planes = new_crtc_state->active_planes; - - if (new_plane_state->uapi.visible) - intel_update_plane(plane, crtc_state, new_plane_state); - else - intel_disable_plane(plane, crtc_state); - - intel_plane_unpin_fb(old_plane_state); - -out_free: - if (new_crtc_state) - intel_crtc_destroy_state(&crtc->base, &new_crtc_state->uapi); - if (ret) - intel_plane_destroy_state(&plane->base, &new_plane_state->uapi); - else - intel_plane_destroy_state(&plane->base, &old_plane_state->uapi); - return ret; - -slow: - return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb, - crtc_x, crtc_y, crtc_w, crtc_h, - src_x, src_y, src_w, src_h, ctx); -} - -static const struct drm_plane_funcs intel_cursor_plane_funcs = { - .update_plane = intel_legacy_cursor_update, - .disable_plane = drm_atomic_helper_disable_plane, - .destroy = intel_plane_destroy, - .atomic_duplicate_state = intel_plane_duplicate_state, - .atomic_destroy_state = intel_plane_destroy_state, - .format_mod_supported = intel_cursor_format_mod_supported, -}; - -static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv, - enum i9xx_plane_id i9xx_plane) -{ - if (!HAS_FBC(dev_priv)) - return false; - - if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) - return i9xx_plane == PLANE_A; /* tied to pipe A */ - else if (IS_IVYBRIDGE(dev_priv)) - return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B || - i9xx_plane == PLANE_C; - else if (INTEL_GEN(dev_priv) >= 4) - return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B; - else - return i9xx_plane == PLANE_A; -} - -static struct intel_plane * -intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) +static int intel_crtc_late_register(struct drm_crtc *crtc) { - struct intel_plane *plane; - const struct drm_plane_funcs *plane_funcs; - unsigned int supported_rotations; - const u32 *formats; - int num_formats; - int ret, zpos; - - if (INTEL_GEN(dev_priv) >= 9) - return skl_universal_plane_create(dev_priv, pipe, - PLANE_PRIMARY); - - plane = intel_plane_alloc(); - if (IS_ERR(plane)) - return plane; - - plane->pipe = pipe; - /* - * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS - * port is hooked to pipe B. Hence we want plane A feeding pipe B. - */ - if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4 && - INTEL_NUM_PIPES(dev_priv) == 2) - plane->i9xx_plane = (enum i9xx_plane_id) !pipe; - else - plane->i9xx_plane = (enum i9xx_plane_id) pipe; - plane->id = PLANE_PRIMARY; - plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id); - - plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane); - if (plane->has_fbc) { - struct intel_fbc *fbc = &dev_priv->fbc; - - fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; - } - - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - formats = vlv_primary_formats; - num_formats = ARRAY_SIZE(vlv_primary_formats); - } else if (INTEL_GEN(dev_priv) >= 4) { - /* - * WaFP16GammaEnabling:ivb - * "Workaround : When using the 64-bit format, the plane - * output on each color channel has one quarter amplitude. - * It can be brought up to full amplitude by using pipe - * gamma correction or pipe color space conversion to - * multiply the plane output by four." - * - * There is no dedicated plane gamma for the primary plane, - * and using the pipe gamma/csc could conflict with other - * planes, so we choose not to expose fp16 on IVB primary - * planes. HSW primary planes no longer have this problem. - */ - if (IS_IVYBRIDGE(dev_priv)) { - formats = ivb_primary_formats; - num_formats = ARRAY_SIZE(ivb_primary_formats); - } else { - formats = i965_primary_formats; - num_formats = ARRAY_SIZE(i965_primary_formats); - } - } else { - formats = i8xx_primary_formats; - num_formats = ARRAY_SIZE(i8xx_primary_formats); - } - - if (INTEL_GEN(dev_priv) >= 4) - plane_funcs = &i965_plane_funcs; - else - plane_funcs = &i8xx_plane_funcs; - - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - plane->min_cdclk = vlv_plane_min_cdclk; - else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) - plane->min_cdclk = hsw_plane_min_cdclk; - else if (IS_IVYBRIDGE(dev_priv)) - plane->min_cdclk = ivb_plane_min_cdclk; - else - plane->min_cdclk = i9xx_plane_min_cdclk; - - plane->max_stride = i9xx_plane_max_stride; - plane->update_plane = i9xx_update_plane; - plane->disable_plane = i9xx_disable_plane; - plane->get_hw_state = i9xx_plane_get_hw_state; - plane->check_plane = i9xx_plane_check; - - if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) - ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, - 0, plane_funcs, - formats, num_formats, - i9xx_format_modifiers, - DRM_PLANE_TYPE_PRIMARY, - "primary %c", pipe_name(pipe)); - else - ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, - 0, plane_funcs, - formats, num_formats, - i9xx_format_modifiers, - DRM_PLANE_TYPE_PRIMARY, - "plane %c", - plane_name(plane->i9xx_plane)); - if (ret) - goto fail; - - if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { - supported_rotations = - DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | - DRM_MODE_REFLECT_X; - } else if (INTEL_GEN(dev_priv) >= 4) { - supported_rotations = - DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180; - } else { - supported_rotations = DRM_MODE_ROTATE_0; - } - - if (INTEL_GEN(dev_priv) >= 4) - drm_plane_create_rotation_property(&plane->base, - DRM_MODE_ROTATE_0, - supported_rotations); - - zpos = 0; - drm_plane_create_zpos_immutable_property(&plane->base, zpos); - - drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); - - return plane; - -fail: - intel_plane_free(plane); - - return ERR_PTR(ret); -} - -static struct intel_plane * -intel_cursor_plane_create(struct drm_i915_private *dev_priv, - enum pipe pipe) -{ - struct intel_plane *cursor; - int ret, zpos; - - cursor = intel_plane_alloc(); - if (IS_ERR(cursor)) - return cursor; - - cursor->pipe = pipe; - cursor->i9xx_plane = (enum i9xx_plane_id) pipe; - cursor->id = PLANE_CURSOR; - cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id); - - if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { - cursor->max_stride = i845_cursor_max_stride; - cursor->update_plane = i845_update_cursor; - cursor->disable_plane = i845_disable_cursor; - cursor->get_hw_state = i845_cursor_get_hw_state; - cursor->check_plane = i845_check_cursor; - } else { - cursor->max_stride = i9xx_cursor_max_stride; - cursor->update_plane = i9xx_update_cursor; - cursor->disable_plane = i9xx_disable_cursor; - cursor->get_hw_state = i9xx_cursor_get_hw_state; - cursor->check_plane = i9xx_check_cursor; - } - - cursor->cursor.base = ~0; - cursor->cursor.cntl = ~0; - - if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv)) - cursor->cursor.size = ~0; - - ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base, - 0, &intel_cursor_plane_funcs, - intel_cursor_formats, - ARRAY_SIZE(intel_cursor_formats), - cursor_format_modifiers, - DRM_PLANE_TYPE_CURSOR, - "cursor %c", pipe_name(pipe)); - if (ret) - goto fail; - - if (INTEL_GEN(dev_priv) >= 4) - drm_plane_create_rotation_property(&cursor->base, - DRM_MODE_ROTATE_0, - DRM_MODE_ROTATE_0 | - DRM_MODE_ROTATE_180); - - zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1; - drm_plane_create_zpos_immutable_property(&cursor->base, zpos); - - if (INTEL_GEN(dev_priv) >= 12) - drm_plane_enable_fb_damage_clips(&cursor->base); - - drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); - - return cursor; - -fail: - intel_plane_free(cursor); - - return ERR_PTR(ret); + intel_crtc_debugfs_add(crtc); + return 0; } #define INTEL_CRTC_FUNCS \ - .gamma_set = drm_atomic_helper_legacy_gamma_set, \ .set_config = drm_atomic_helper_set_config, \ .destroy = intel_crtc_destroy, \ .page_flip = drm_atomic_helper_page_flip, \ @@ -17275,7 +15819,8 @@ fail: .atomic_destroy_state = intel_crtc_destroy_state, \ .set_crc_source = intel_crtc_set_crc_source, \ .verify_crc_source = intel_crtc_verify_crc_source, \ - .get_crc_sources = intel_crtc_get_crc_sources + .get_crc_sources = intel_crtc_get_crc_sources, \ + .late_register = intel_crtc_late_register static const struct drm_crtc_funcs bdw_crtc_funcs = { INTEL_CRTC_FUNCS, @@ -18748,7 +17293,7 @@ int intel_modeset_init(struct drm_i915_private *i915) */ ret = intel_initial_commit(&i915->drm); if (ret) - return ret; + drm_dbg_kms(&i915->drm, "Initial modeset failed, %d\n", ret); intel_overlay_setup(i915); @@ -19165,7 +17710,7 @@ static void readout_plane_state(struct drm_i915_private *dev_priv) struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state); - fixup_active_planes(crtc_state); + fixup_plane_bitmasks(crtc_state); } } @@ -19588,7 +18133,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev, put_domains = modeset_get_crtc_power_domains(crtc_state); if (drm_WARN_ON(dev, put_domains)) - modeset_put_power_domains(dev_priv, put_domains); + modeset_put_crtc_power_domains(crtc, put_domains); } intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref); diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 5e0d42d82c11..7ddbc00a0f41 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -499,6 +499,8 @@ enum phy_fia { ((connector) = to_intel_connector((__state)->base.connectors[__i].ptr), \ (new_connector_state) = to_intel_digital_connector_state((__state)->base.connectors[__i].new_state), 1)) +int intel_atomic_add_affected_planes(struct intel_atomic_state *state, + struct intel_crtc *crtc); u8 intel_calc_active_pipes(struct intel_atomic_state *state, u8 active_pipes); void intel_link_compute_m_n(u16 bpp, int nlanes, @@ -628,11 +630,9 @@ u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state); u32 skl_plane_stride(const struct intel_plane_state *plane_state, int plane); int skl_check_plane_surface(struct intel_plane_state *plane_state); -int i9xx_check_plane_surface(struct intel_plane_state *plane_state); +int skl_calc_main_surface_offset(const struct intel_plane_state *plane_state, + int *x, int *y, u32 *offset); int skl_format_to_fourcc(int format, bool rgb_order, bool alpha); -unsigned int i9xx_plane_max_stride(struct intel_plane *plane, - u32 pixel_format, u64 modifier, - unsigned int rotation); int bdw_get_pipemisc_bpp(struct intel_crtc *crtc); unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state); @@ -645,6 +645,13 @@ bool intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info, uint64_t modifier); +int intel_plane_compute_gtt(struct intel_plane_state *plane_state); +u32 intel_plane_compute_aligned_offset(int *x, int *y, + const struct intel_plane_state *state, + int color_plane); +int intel_plane_pin_fb(struct intel_plane_state *plane_state); +void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state); + /* modesetting */ void intel_modeset_init_hw(struct drm_i915_private *i915); int intel_modeset_init_noirq(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index ca41e8c00ad7..cd7e5519ee7d 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -18,6 +18,7 @@ #include "intel_pm.h" #include "intel_psr.h" #include "intel_sideband.h" +#include "intel_sprite.h" static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node) { @@ -865,6 +866,110 @@ static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc) } } +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE) +static void crtc_updates_info(struct seq_file *m, + struct intel_crtc *crtc, + const char *hdr) +{ + u64 count; + int row; + + count = 0; + for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++) + count += crtc->debug.vbl.times[row]; + seq_printf(m, "%sUpdates: %llu\n", hdr, count); + if (!count) + return; + + for (row = 0; row < ARRAY_SIZE(crtc->debug.vbl.times); row++) { + char columns[80] = " |"; + unsigned int x; + + if (row & 1) { + const char *units; + + if (row > 10) { + x = 1000000; + units = "ms"; + } else { + x = 1000; + units = "us"; + } + + snprintf(columns, sizeof(columns), "%4ld%s |", + DIV_ROUND_CLOSEST(BIT(row + 9), x), units); + } + + if (crtc->debug.vbl.times[row]) { + x = ilog2(crtc->debug.vbl.times[row]); + memset(columns + 8, '*', x); + columns[8 + x] = '\0'; + } + + seq_printf(m, "%s%s\n", hdr, columns); + } + + seq_printf(m, "%sMin update: %lluns\n", + hdr, crtc->debug.vbl.min); + seq_printf(m, "%sMax update: %lluns\n", + hdr, crtc->debug.vbl.max); + seq_printf(m, "%sAverage update: %lluns\n", + hdr, div64_u64(crtc->debug.vbl.sum, count)); + seq_printf(m, "%sOverruns > %uus: %u\n", + hdr, VBLANK_EVASION_TIME_US, crtc->debug.vbl.over); +} + +static int crtc_updates_show(struct seq_file *m, void *data) +{ + crtc_updates_info(m, m->private, ""); + return 0; +} + +static int crtc_updates_open(struct inode *inode, struct file *file) +{ + return single_open(file, crtc_updates_show, inode->i_private); +} + +static ssize_t crtc_updates_write(struct file *file, + const char __user *ubuf, + size_t len, loff_t *offp) +{ + struct seq_file *m = file->private_data; + struct intel_crtc *crtc = m->private; + + /* May race with an update. Meh. */ + memset(&crtc->debug.vbl, 0, sizeof(crtc->debug.vbl)); + + return len; +} + +static const struct file_operations crtc_updates_fops = { + .owner = THIS_MODULE, + .open = crtc_updates_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = crtc_updates_write +}; + +static void crtc_updates_add(struct drm_crtc *crtc) +{ + debugfs_create_file("i915_update_info", 0644, crtc->debugfs_entry, + to_intel_crtc(crtc), &crtc_updates_fops); +} + +#else +static void crtc_updates_info(struct seq_file *m, + struct intel_crtc *crtc, + const char *hdr) +{ +} + +static void crtc_updates_add(struct drm_crtc *crtc) +{ +} +#endif + static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = node_to_i915(m->private); @@ -907,6 +1012,8 @@ static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc) seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n", yesno(!crtc->cpu_fifo_underrun_disabled), yesno(!crtc->pch_fifo_underrun_disabled)); + + crtc_updates_info(m, crtc, "\t"); } static int i915_display_info(struct seq_file *m, void *unused) @@ -2278,3 +2385,20 @@ int intel_connector_debugfs_add(struct drm_connector *connector) return 0; } + +/** + * intel_crtc_debugfs_add - add i915 specific crtc debugfs files + * @crtc: pointer to a drm_crtc + * + * Returns 0 on success, negative error codes on error. + * + * Failure to add debugfs entries should generally be ignored. + */ +int intel_crtc_debugfs_add(struct drm_crtc *crtc) +{ + if (!crtc->debugfs_entry) + return -ENODEV; + + crtc_updates_add(crtc); + return 0; +} diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.h b/drivers/gpu/drm/i915/display/intel_display_debugfs.h index c922c1745bfe..557901f3eb90 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.h +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.h @@ -7,14 +7,17 @@ #define __INTEL_DISPLAY_DEBUGFS_H__ struct drm_connector; +struct drm_crtc; struct drm_i915_private; #ifdef CONFIG_DEBUG_FS void intel_display_debugfs_register(struct drm_i915_private *i915); int intel_connector_debugfs_add(struct drm_connector *connector); +int intel_crtc_debugfs_add(struct drm_crtc *crtc); #else static inline void intel_display_debugfs_register(struct drm_i915_private *i915) {} static inline int intel_connector_debugfs_add(struct drm_connector *connector) { return 0; } +static inline int intel_crtc_debugfs_add(struct drm_crtc *crtc) { return 0; } #endif #endif /* __INTEL_DISPLAY_DEBUGFS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index fe2d90bba536..d52374f01316 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -2184,26 +2184,6 @@ static void __intel_display_power_put(struct drm_i915_private *dev_priv, mutex_unlock(&power_domains->lock); } -/** - * intel_display_power_put_unchecked - release an unchecked power domain reference - * @dev_priv: i915 device instance - * @domain: power domain to reference - * - * This function drops the power domain reference obtained by - * intel_display_power_get() and might power down the corresponding hardware - * block right away if this is the last reference. - * - * This function exists only for historical reasons and should be avoided in - * new code, as the correctness of its use cannot be checked. Always use - * intel_display_power_put() instead. - */ -void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain) -{ - __intel_display_power_put(dev_priv, domain); - intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); -} - static void queue_async_put_domains_work(struct i915_power_domains *power_domains, intel_wakeref_t wakeref) @@ -2410,8 +2390,85 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, __intel_display_power_put(dev_priv, domain); intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); } +#else +/** + * intel_display_power_put_unchecked - release an unchecked power domain reference + * @dev_priv: i915 device instance + * @domain: power domain to reference + * + * This function drops the power domain reference obtained by + * intel_display_power_get() and might power down the corresponding hardware + * block right away if this is the last reference. + * + * This function is only for the power domain code's internal use to suppress wakeref + * tracking when the correspondig debug kconfig option is disabled, should not + * be used otherwise. + */ +void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain) +{ + __intel_display_power_put(dev_priv, domain); + intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); +} #endif +void +intel_display_power_get_in_set(struct drm_i915_private *i915, + struct intel_display_power_domain_set *power_domain_set, + enum intel_display_power_domain domain) +{ + intel_wakeref_t __maybe_unused wf; + + drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain)); + + wf = intel_display_power_get(i915, domain); +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) + power_domain_set->wakerefs[domain] = wf; +#endif + power_domain_set->mask |= BIT_ULL(domain); +} + +bool +intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915, + struct intel_display_power_domain_set *power_domain_set, + enum intel_display_power_domain domain) +{ + intel_wakeref_t wf; + + drm_WARN_ON(&i915->drm, power_domain_set->mask & BIT_ULL(domain)); + + wf = intel_display_power_get_if_enabled(i915, domain); + if (!wf) + return false; + +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) + power_domain_set->wakerefs[domain] = wf; +#endif + power_domain_set->mask |= BIT_ULL(domain); + + return true; +} + +void +intel_display_power_put_mask_in_set(struct drm_i915_private *i915, + struct intel_display_power_domain_set *power_domain_set, + u64 mask) +{ + enum intel_display_power_domain domain; + + drm_WARN_ON(&i915->drm, mask & ~power_domain_set->mask); + + for_each_power_domain(domain, mask) { + intel_wakeref_t __maybe_unused wf = -1; + +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) + wf = fetch_and_zero(&power_domain_set->wakerefs[domain]); +#endif + intel_display_power_put(i915, domain, wf); + power_domain_set->mask &= ~BIT_ULL(domain); + } +} + #define I830_PIPES_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_PIPE_A) | \ BIT_ULL(POWER_DOMAIN_PIPE_B) | \ @@ -5601,12 +5658,16 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) * resources powered until display HW readout is complete. We drop * this reference in intel_power_domains_enable(). */ - power_domains->wakeref = + drm_WARN_ON(&i915->drm, power_domains->init_wakeref); + power_domains->init_wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT); /* Disable power support if the user asked so. */ - if (!i915->params.disable_power_well) - intel_display_power_get(i915, POWER_DOMAIN_INIT); + if (!i915->params.disable_power_well) { + drm_WARN_ON(&i915->drm, power_domains->disable_wakeref); + i915->power_domains.disable_wakeref = intel_display_power_get(i915, + POWER_DOMAIN_INIT); + } intel_power_domains_sync_hw(i915); power_domains->initializing = false; @@ -5626,11 +5687,12 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume) void intel_power_domains_driver_remove(struct drm_i915_private *i915) { intel_wakeref_t wakeref __maybe_unused = - fetch_and_zero(&i915->power_domains.wakeref); + fetch_and_zero(&i915->power_domains.init_wakeref); /* Remove the refcount we took to keep power well support disabled. */ if (!i915->params.disable_power_well) - intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT); + intel_display_power_put(i915, POWER_DOMAIN_INIT, + fetch_and_zero(&i915->power_domains.disable_wakeref)); intel_display_power_flush_work_sync(i915); @@ -5655,7 +5717,7 @@ void intel_power_domains_driver_remove(struct drm_i915_private *i915) void intel_power_domains_enable(struct drm_i915_private *i915) { intel_wakeref_t wakeref __maybe_unused = - fetch_and_zero(&i915->power_domains.wakeref); + fetch_and_zero(&i915->power_domains.init_wakeref); intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); intel_power_domains_verify_state(i915); @@ -5672,8 +5734,8 @@ void intel_power_domains_disable(struct drm_i915_private *i915) { struct i915_power_domains *power_domains = &i915->power_domains; - drm_WARN_ON(&i915->drm, power_domains->wakeref); - power_domains->wakeref = + drm_WARN_ON(&i915->drm, power_domains->init_wakeref); + power_domains->init_wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT); intel_power_domains_verify_state(i915); @@ -5695,7 +5757,7 @@ void intel_power_domains_suspend(struct drm_i915_private *i915, { struct i915_power_domains *power_domains = &i915->power_domains; intel_wakeref_t wakeref __maybe_unused = - fetch_and_zero(&power_domains->wakeref); + fetch_and_zero(&power_domains->init_wakeref); intel_display_power_put(i915, POWER_DOMAIN_INIT, wakeref); @@ -5719,7 +5781,8 @@ void intel_power_domains_suspend(struct drm_i915_private *i915, * power wells if power domains must be deinitialized for suspend. */ if (!i915->params.disable_power_well) - intel_display_power_put_unchecked(i915, POWER_DOMAIN_INIT); + intel_display_power_put(i915, POWER_DOMAIN_INIT, + fetch_and_zero(&i915->power_domains.disable_wakeref)); intel_display_power_flush_work(i915); intel_power_domains_verify_state(i915); @@ -5754,8 +5817,8 @@ void intel_power_domains_resume(struct drm_i915_private *i915) intel_power_domains_init_hw(i915, true); power_domains->display_core_suspended = false; } else { - drm_WARN_ON(&i915->drm, power_domains->wakeref); - power_domains->wakeref = + drm_WARN_ON(&i915->drm, power_domains->init_wakeref); + power_domains->init_wakeref = intel_display_power_get(i915, POWER_DOMAIN_INIT); } diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index 4aa0a09cf14f..bc30c479be53 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -212,7 +212,8 @@ struct i915_power_domains { bool display_core_suspended; int power_well_count; - intel_wakeref_t wakeref; + intel_wakeref_t init_wakeref; + intel_wakeref_t disable_wakeref; struct mutex lock; int domain_use_count[POWER_DOMAIN_NUM]; @@ -224,6 +225,13 @@ struct i915_power_domains { struct i915_power_well *power_wells; }; +struct intel_display_power_domain_set { + u64 mask; +#ifdef CONFIG_DRM_I915_DEBUG_RUNTIME_PM + intel_wakeref_t wakerefs[POWER_DOMAIN_NUM]; +#endif +}; + #define for_each_power_domain(domain, mask) \ for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \ for_each_if(BIT_ULL(domain) & (mask)) @@ -279,8 +287,6 @@ intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, intel_wakeref_t intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain); -void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, - enum intel_display_power_domain domain); void __intel_display_power_put_async(struct drm_i915_private *i915, enum intel_display_power_domain domain, intel_wakeref_t wakeref); @@ -297,6 +303,9 @@ intel_display_power_put_async(struct drm_i915_private *i915, __intel_display_power_put_async(i915, domain, wakeref); } #else +void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, + enum intel_display_power_domain domain); + static inline void intel_display_power_put(struct drm_i915_private *i915, enum intel_display_power_domain domain, @@ -314,6 +323,28 @@ intel_display_power_put_async(struct drm_i915_private *i915, } #endif +void +intel_display_power_get_in_set(struct drm_i915_private *i915, + struct intel_display_power_domain_set *power_domain_set, + enum intel_display_power_domain domain); + +bool +intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915, + struct intel_display_power_domain_set *power_domain_set, + enum intel_display_power_domain domain); + +void +intel_display_power_put_mask_in_set(struct drm_i915_private *i915, + struct intel_display_power_domain_set *power_domain_set, + u64 mask); + +static inline void +intel_display_power_put_all_in_set(struct drm_i915_private *i915, + struct intel_display_power_domain_set *power_domain_set) +{ + intel_display_power_put_mask_in_set(i915, power_domain_set, power_domain_set->mask); +} + enum dbuf_slice { DBUF_S1, DBUF_S2, diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index ce82d654d0f2..9dfad41eb3dc 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -225,6 +225,17 @@ struct intel_encoder { const struct drm_connector *audio_connector; }; +struct intel_panel_bl_funcs { + /* Connector and platform specific backlight functions */ + int (*setup)(struct intel_connector *connector, enum pipe pipe); + u32 (*get)(struct intel_connector *connector); + void (*set)(const struct drm_connector_state *conn_state, u32 level); + void (*disable)(const struct drm_connector_state *conn_state, u32 level); + void (*enable)(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state, u32 level); + u32 (*hz_to_pwm)(struct intel_connector *connector, u32 hz); +}; + struct intel_panel { struct drm_display_mode *fixed_mode; struct drm_display_mode *downclock_mode; @@ -251,14 +262,7 @@ struct intel_panel { struct backlight_device *device; - /* Connector and platform specific backlight functions */ - int (*setup)(struct intel_connector *connector, enum pipe pipe); - u32 (*get)(struct intel_connector *connector); - void (*set)(const struct drm_connector_state *conn_state, u32 level); - void (*disable)(const struct drm_connector_state *conn_state); - void (*enable)(const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state); - u32 (*hz_to_pwm)(struct intel_connector *connector, u32 hz); + const struct intel_panel_bl_funcs *funcs; void (*power)(struct intel_connector *, bool enable); } backlight; }; @@ -604,6 +608,8 @@ struct intel_plane_state { u32 planar_slave; struct drm_intel_sprite_colorkey ckey; + + struct drm_rect psr2_sel_fetch_area; }; struct intel_initial_plane_config { @@ -1047,7 +1053,10 @@ struct intel_crtc_state { u32 cgm_mode; }; - /* bitmask of visible planes (enum plane_id) */ + /* bitmask of logically enabled planes (enum plane_id) */ + u8 enabled_planes; + + /* bitmask of actually visible planes (enum plane_id) */ u8 active_planes; u8 nv12_planes; u8 c8_planes; @@ -1160,7 +1169,7 @@ struct intel_crtc { /* I915_MODE_FLAG_* */ u8 mode_flags; - unsigned long long enabled_power_domains; + struct intel_display_power_domain_set enabled_power_domains; struct intel_overlay *overlay; struct intel_crtc_state *config; @@ -1186,6 +1195,15 @@ struct intel_crtc { ktime_t start_vbl_time; int min_vbl, max_vbl; int scanline_start; +#ifdef CONFIG_DRM_I915_DEBUG_VBLANK_EVADE + struct { + u64 min; + u64 max; + u64 sum; + unsigned int over; + unsigned int times[17]; /* [1us, 16ms] */ + } vbl; +#endif } debug; /* scalers available on this crtc */ @@ -1321,6 +1339,11 @@ struct intel_dp_compliance { u8 test_lane_count; }; +struct intel_dp_pcon_frl { + bool is_trained; + int trained_rate_gbps; +}; + struct intel_dp { i915_reg_t output_reg; u32 DP; @@ -1331,6 +1354,7 @@ struct intel_dp { bool has_hdmi_sink; bool has_audio; bool reset_link_params; + bool use_max_params; u8 dpcd[DP_RECEIVER_CAP_SIZE]; u8 psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; @@ -1339,6 +1363,7 @@ struct intel_dp { u8 lttpr_common_caps[DP_LTTPR_COMMON_CAP_SIZE]; u8 lttpr_phy_caps[DP_MAX_LTTPR_COUNT][DP_LTTPR_PHY_CAP_SIZE]; u8 fec_capable; + u8 pcon_dsc_dpcd[DP_PCON_DSC_ENCODER_CAP_SIZE]; /* source rates */ int num_source_rates; const int *source_rates; @@ -1369,6 +1394,7 @@ struct intel_dp { unsigned long last_power_on; unsigned long last_backlight_off; ktime_t panel_power_off_time; + intel_wakeref_t vdd_wakeref; /* * Pipe whose power sequencer is currently locked into @@ -1432,15 +1458,22 @@ struct intel_dp { struct { int min_tmds_clock, max_tmds_clock; int max_dotclock; + int pcon_max_frl_bw; u8 max_bpc; bool ycbcr_444_to_420; + bool rgb_to_ycbcr; } dfp; + /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ + struct pm_qos_request pm_qos; + /* Display stream compression testing */ bool force_dsc_en; bool hobl_failed; bool hobl_active; + + struct intel_dp_pcon_frl frl; }; enum lspcon_vendor { @@ -1450,6 +1483,7 @@ enum lspcon_vendor { struct intel_lspcon { bool active; + bool hdr_supported; enum drm_lspcon_mode mode; enum lspcon_vendor vendor; }; @@ -1466,6 +1500,8 @@ struct intel_digital_port { /* Used for DP and ICL+ TypeC/DP and TypeC/HDMI ports. */ enum aux_ch aux_ch; enum intel_display_power_domain ddi_io_power_domain; + intel_wakeref_t ddi_io_wakeref; + intel_wakeref_t aux_wakeref; struct mutex tc_lock; /* protects the TypeC port mode */ intel_wakeref_t tc_lock_wakeref; int tc_link_refcount; @@ -1755,6 +1791,12 @@ intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state) (1 << INTEL_OUTPUT_EDP)); } +static inline bool +intel_crtc_needs_modeset(const struct intel_crtc_state *crtc_state) +{ + return drm_atomic_crtc_needs_modeset(&crtc_state->uapi); +} + static inline void intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) { @@ -1777,4 +1819,10 @@ static inline u32 intel_plane_ggtt_offset(const struct intel_plane_state *state) return i915_ggtt_offset(state->vma); } +static inline struct intel_frontbuffer * +to_intel_frontbuffer(struct drm_framebuffer *fb) +{ + return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL; +} + #endif /* __INTEL_DISPLAY_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 3896d08c4177..469e765a1b7b 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -480,6 +480,13 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, return -1; } + if (intel_dp_is_edp(intel_dp) && !intel_dp->use_max_params) { + drm_dbg_kms(&i915->drm, + "Retrying Link training for eDP with max parameters\n"); + intel_dp->use_max_params = true; + return 0; + } + index = intel_dp_rate_index(intel_dp->common_rates, intel_dp->num_common_rates, link_rate); @@ -615,7 +622,7 @@ static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, return 0; } /* Also take into account max slice width */ - min_slice_count = min_t(u8, min_slice_count, + min_slice_count = max_t(u8, min_slice_count, DIV_ROUND_UP(mode_hdisplay, max_slice_width)); @@ -651,6 +658,10 @@ intel_dp_output_format(struct drm_connector *connector, !drm_mode_is_420_only(info, mode)) return INTEL_OUTPUT_FORMAT_RGB; + if (intel_dp->dfp.rgb_to_ycbcr && + intel_dp->dfp.ycbcr_444_to_420) + return INTEL_OUTPUT_FORMAT_RGB; + if (intel_dp->dfp.ycbcr_444_to_420) return INTEL_OUTPUT_FORMAT_YCBCR444; else @@ -716,6 +727,25 @@ intel_dp_mode_valid_downstream(struct intel_connector *connector, const struct drm_display_info *info = &connector->base.display_info; int tmds_clock; + /* If PCON supports FRL MODE, check FRL bandwidth constraints */ + if (intel_dp->dfp.pcon_max_frl_bw) { + int target_bw; + int max_frl_bw; + int bpp = intel_dp_mode_min_output_bpp(&connector->base, mode); + + target_bw = bpp * target_clock; + + max_frl_bw = intel_dp->dfp.pcon_max_frl_bw; + + /* converting bw from Gbps to Kbps*/ + max_frl_bw = max_frl_bw * 1000000; + + if (target_bw > max_frl_bw) + return MODE_CLOCK_HIGH; + + return MODE_OK; + } + if (intel_dp->dfp.max_dotclock && target_clock > intel_dp->dfp.max_dotclock) return MODE_CLOCK_HIGH; @@ -872,9 +902,7 @@ pps_lock(struct intel_dp *intel_dp) * See intel_power_sequencer_reset() why we need * a power domain reference here. */ - wakeref = intel_display_power_get(dev_priv, - intel_aux_power_domain(dp_to_dig_port(intel_dp))); - + wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE); mutex_lock(&dev_priv->pps_mutex); return wakeref; @@ -886,9 +914,7 @@ pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref) struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); mutex_unlock(&dev_priv->pps_mutex); - intel_display_power_put(dev_priv, - intel_aux_power_domain(dp_to_dig_port(intel_dp)), - wakeref); + intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); return 0; } @@ -1489,7 +1515,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, * lowest possible wakeup latency and so prevent the cpu from going into * deep sleep states. */ - cpu_latency_qos_update_request(&i915->pm_qos, 0); + cpu_latency_qos_update_request(&intel_dp->pm_qos, 0); intel_dp_check_edp(intel_dp); @@ -1622,7 +1648,7 @@ done: ret = recv_bytes; out: - cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE); + cpu_latency_qos_update_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE); if (vdd) edp_panel_vdd_off(intel_dp, false); @@ -1898,6 +1924,9 @@ static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index) static void intel_dp_aux_fini(struct intel_dp *intel_dp) { + if (cpu_latency_qos_request_active(&intel_dp->pm_qos)) + cpu_latency_qos_remove_request(&intel_dp->pm_qos); + kfree(intel_dp->aux.name); } @@ -1950,6 +1979,7 @@ intel_dp_aux_init(struct intel_dp *intel_dp) encoder->base.name); intel_dp->aux.transfer = intel_dp_aux_transfer; + cpu_latency_qos_add_request(&intel_dp->pm_qos, PM_QOS_DEFAULT_VALUE); } bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp) @@ -2263,6 +2293,44 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp, return -EINVAL; } +/* Optimize link config in order: max bpp, min lanes, min clock */ +static int +intel_dp_compute_link_config_fast(struct intel_dp *intel_dp, + struct intel_crtc_state *pipe_config, + const struct link_config_limits *limits) +{ + const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; + int bpp, clock, lane_count; + int mode_rate, link_clock, link_avail; + + for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { + int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp); + + mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, + output_bpp); + + for (lane_count = limits->min_lane_count; + lane_count <= limits->max_lane_count; + lane_count <<= 1) { + for (clock = limits->min_clock; clock <= limits->max_clock; clock++) { + link_clock = intel_dp->common_rates[clock]; + link_avail = intel_dp_max_data_rate(link_clock, + lane_count); + + if (mode_rate <= link_avail) { + pipe_config->lane_count = lane_count; + pipe_config->pipe_bpp = bpp; + pipe_config->port_clock = link_clock; + + return 0; + } + } + } + } + + return -EINVAL; +} + static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc) { int i, num_bpc; @@ -2289,6 +2357,14 @@ static int intel_dp_dsc_compute_params(struct intel_encoder *encoder, u8 line_buf_depth; int ret; + /* + * RC_MODEL_SIZE is currently a constant across all configurations. + * + * FIXME: Look into using sink defined DPCD DP_DSC_RC_BUF_BLK_SIZE and + * DP_DSC_RC_BUF_SIZE for this. + */ + vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; + ret = intel_dsc_compute_params(encoder, crtc_state); if (ret) return ret; @@ -2478,13 +2554,14 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format); limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config); - if (intel_dp_is_edp(intel_dp)) { + if (intel_dp->use_max_params) { /* * Use the maximum clock and number of lanes the eDP panel - * advertizes being capable of. The panels are generally + * advertizes being capable of in case the initial fast + * optimal params failed us. The panels are generally * designed to support only a single clock and lane - * configuration, and typically these values correspond to the - * native resolution of the panel. + * configuration, and typically on older panels these + * values correspond to the native resolution of the panel. */ limits.min_lane_count = limits.max_lane_count; limits.min_clock = limits.max_clock; @@ -2503,11 +2580,22 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, intel_dp_can_bigjoiner(intel_dp)) pipe_config->bigjoiner = true; - /* - * Optimize for slow and wide. This is the place to add alternative - * optimization policy. - */ - ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits); + if (intel_dp_is_edp(intel_dp)) + /* + * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4 + * section A.1: "It is recommended that the minimum number of + * lanes be used, using the minimum link rate allowed for that + * lane configuration." + * + * Note that we fall back to the max clock and lane count for eDP + * panels that fail with the fast optimal settings (see + * intel_dp->use_max_params), in which case the fast vs. wide + * choice doesn't matter. + */ + ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config, &limits); + else + /* Optimize for slow and wide. */ + ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits); /* enable compression if the mode doesn't fit available BW */ drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en); @@ -3094,8 +3182,9 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp) if (edp_have_panel_vdd(intel_dp)) return need_to_disable; - intel_display_power_get(dev_priv, - intel_aux_power_domain(dig_port)); + drm_WARN_ON(&dev_priv->drm, intel_dp->vdd_wakeref); + intel_dp->vdd_wakeref = intel_display_power_get(dev_priv, + intel_aux_power_domain(dig_port)); drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n", dig_port->base.base.base.id, @@ -3188,8 +3277,9 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) if ((pp & PANEL_POWER_ON) == 0) intel_dp->panel_power_off_time = ktime_get_boottime(); - intel_display_power_put_unchecked(dev_priv, - intel_aux_power_domain(dig_port)); + intel_display_power_put(dev_priv, + intel_aux_power_domain(dig_port), + fetch_and_zero(&intel_dp->vdd_wakeref)); } static void edp_panel_vdd_work(struct work_struct *__work) @@ -3341,7 +3431,9 @@ static void edp_panel_off(struct intel_dp *intel_dp) intel_dp->panel_power_off_time = ktime_get_boottime(); /* We got a reference when we enabled the VDD. */ - intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port)); + intel_display_power_put(dev_priv, + intel_aux_power_domain(dig_port), + fetch_and_zero(&intel_dp->vdd_wakeref)); } void intel_edp_panel_off(struct intel_dp *intel_dp) @@ -3579,6 +3671,29 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, enable ? "enable" : "disable"); } +static void +intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + u8 oui[] = { 0x00, 0xaa, 0x01 }; + u8 buf[3] = { 0 }; + + /* + * During driver init, we want to be careful and avoid changing the source OUI if it's + * already set to what we want, so as to avoid clearing any state by accident + */ + if (careful) { + if (drm_dp_dpcd_read(&intel_dp->aux, DP_SOURCE_OUI, buf, sizeof(buf)) < 0) + drm_err(&i915->drm, "Failed to read source OUI\n"); + + if (memcmp(oui, buf, sizeof(oui)) == 0) + return; + } + + if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0) + drm_err(&i915->drm, "Failed to write source OUI\n"); +} + /* If the device supports it, try to set the power state appropriately */ void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode) { @@ -3600,6 +3715,10 @@ void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode) lspcon_resume(dp_to_dig_port(intel_dp)); + /* Write the source OUI as early as possible */ + if (intel_dp_is_edp(intel_dp)) + intel_edp_init_source_oui(intel_dp, false); + /* * When turning on, we need to retry for 1ms to give the sink * time to wake up. @@ -3860,6 +3979,8 @@ static void intel_disable_dp(struct intel_atomic_state *state, intel_edp_backlight_off(old_conn_state); intel_dp_set_power(intel_dp, DP_SET_POWER_D3); intel_edp_panel_off(intel_dp); + intel_dp->frl.is_trained = false; + intel_dp->frl.trained_rate_gbps = 0; } static void g4x_disable_dp(struct intel_atomic_state *state, @@ -3955,6 +4076,280 @@ cpt_set_link_train(struct intel_dp *intel_dp, intel_de_posting_read(dev_priv, intel_dp->output_reg); } +static void intel_dp_get_pcon_dsc_cap(struct intel_dp *intel_dp) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + /* Clear the cached register set to avoid using stale values */ + + memset(intel_dp->pcon_dsc_dpcd, 0, sizeof(intel_dp->pcon_dsc_dpcd)); + + if (drm_dp_dpcd_read(&intel_dp->aux, DP_PCON_DSC_ENCODER, + intel_dp->pcon_dsc_dpcd, + sizeof(intel_dp->pcon_dsc_dpcd)) < 0) + drm_err(&i915->drm, "Failed to read DPCD register 0x%x\n", + DP_PCON_DSC_ENCODER); + + drm_dbg_kms(&i915->drm, "PCON ENCODER DSC DPCD: %*ph\n", + (int)sizeof(intel_dp->pcon_dsc_dpcd), intel_dp->pcon_dsc_dpcd); +} + +static int intel_dp_pcon_get_frl_mask(u8 frl_bw_mask) +{ + int bw_gbps[] = {9, 18, 24, 32, 40, 48}; + int i; + + for (i = ARRAY_SIZE(bw_gbps) - 1; i >= 0; i--) { + if (frl_bw_mask & (1 << i)) + return bw_gbps[i]; + } + return 0; +} + +static int intel_dp_pcon_set_frl_mask(int max_frl) +{ + switch (max_frl) { + case 48: + return DP_PCON_FRL_BW_MASK_48GBPS; + case 40: + return DP_PCON_FRL_BW_MASK_40GBPS; + case 32: + return DP_PCON_FRL_BW_MASK_32GBPS; + case 24: + return DP_PCON_FRL_BW_MASK_24GBPS; + case 18: + return DP_PCON_FRL_BW_MASK_18GBPS; + case 9: + return DP_PCON_FRL_BW_MASK_9GBPS; + } + + return 0; +} + +static int intel_dp_hdmi_sink_max_frl(struct intel_dp *intel_dp) +{ + struct intel_connector *intel_connector = intel_dp->attached_connector; + struct drm_connector *connector = &intel_connector->base; + int max_frl_rate; + int max_lanes, rate_per_lane; + int max_dsc_lanes, dsc_rate_per_lane; + + max_lanes = connector->display_info.hdmi.max_lanes; + rate_per_lane = connector->display_info.hdmi.max_frl_rate_per_lane; + max_frl_rate = max_lanes * rate_per_lane; + + if (connector->display_info.hdmi.dsc_cap.v_1p2) { + max_dsc_lanes = connector->display_info.hdmi.dsc_cap.max_lanes; + dsc_rate_per_lane = connector->display_info.hdmi.dsc_cap.max_frl_rate_per_lane; + if (max_dsc_lanes && dsc_rate_per_lane) + max_frl_rate = min(max_frl_rate, max_dsc_lanes * dsc_rate_per_lane); + } + + return max_frl_rate; +} + +static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) +{ +#define PCON_EXTENDED_TRAIN_MODE (1 > 0) +#define PCON_CONCURRENT_MODE (1 > 0) +#define PCON_SEQUENTIAL_MODE !PCON_CONCURRENT_MODE +#define PCON_NORMAL_TRAIN_MODE !PCON_EXTENDED_TRAIN_MODE +#define TIMEOUT_FRL_READY_MS 500 +#define TIMEOUT_HDMI_LINK_ACTIVE_MS 1000 + + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + int max_frl_bw, max_pcon_frl_bw, max_edid_frl_bw, ret; + u8 max_frl_bw_mask = 0, frl_trained_mask; + bool is_active; + + ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux); + if (ret < 0) + return ret; + + max_pcon_frl_bw = intel_dp->dfp.pcon_max_frl_bw; + drm_dbg(&i915->drm, "PCON max rate = %d Gbps\n", max_pcon_frl_bw); + + max_edid_frl_bw = intel_dp_hdmi_sink_max_frl(intel_dp); + drm_dbg(&i915->drm, "Sink max rate from EDID = %d Gbps\n", max_edid_frl_bw); + + max_frl_bw = min(max_edid_frl_bw, max_pcon_frl_bw); + + if (max_frl_bw <= 0) + return -EINVAL; + + ret = drm_dp_pcon_frl_prepare(&intel_dp->aux, false); + if (ret < 0) + return ret; + /* Wait for PCON to be FRL Ready */ + wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS); + + if (!is_active) + return -ETIMEDOUT; + + max_frl_bw_mask = intel_dp_pcon_set_frl_mask(max_frl_bw); + ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw, PCON_SEQUENTIAL_MODE); + if (ret < 0) + return ret; + ret = drm_dp_pcon_frl_configure_2(&intel_dp->aux, max_frl_bw_mask, PCON_NORMAL_TRAIN_MODE); + if (ret < 0) + return ret; + ret = drm_dp_pcon_frl_enable(&intel_dp->aux); + if (ret < 0) + return ret; + /* + * Wait for FRL to be completed + * Check if the HDMI Link is up and active. + */ + wait_for(is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux) == true, TIMEOUT_HDMI_LINK_ACTIVE_MS); + + if (!is_active) + return -ETIMEDOUT; + + /* Verify HDMI Link configuration shows FRL Mode */ + if (drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, &frl_trained_mask) != + DP_PCON_HDMI_MODE_FRL) { + drm_dbg(&i915->drm, "HDMI couldn't be trained in FRL Mode\n"); + return -EINVAL; + } + drm_dbg(&i915->drm, "MAX_FRL_MASK = %u, FRL_TRAINED_MASK = %u\n", max_frl_bw_mask, frl_trained_mask); + + intel_dp->frl.trained_rate_gbps = intel_dp_pcon_get_frl_mask(frl_trained_mask); + intel_dp->frl.is_trained = true; + drm_dbg(&i915->drm, "FRL trained with : %d Gbps\n", intel_dp->frl.trained_rate_gbps); + + return 0; +} + +static bool intel_dp_is_hdmi_2_1_sink(struct intel_dp *intel_dp) +{ + if (drm_dp_is_branch(intel_dp->dpcd) && + intel_dp->has_hdmi_sink && + intel_dp_hdmi_sink_max_frl(intel_dp) > 0) + return true; + + return false; +} + +void intel_dp_check_frl_training(struct intel_dp *intel_dp) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + + /* Always go for FRL training if supported */ + if (!intel_dp_is_hdmi_2_1_sink(intel_dp) || + intel_dp->frl.is_trained) + return; + + if (intel_dp_pcon_start_frl_training(intel_dp) < 0) { + int ret, mode; + + drm_dbg(&dev_priv->drm, "Couldnt set FRL mode, continuing with TMDS mode\n"); + ret = drm_dp_pcon_reset_frl_config(&intel_dp->aux); + mode = drm_dp_pcon_hdmi_link_mode(&intel_dp->aux, NULL); + + if (ret < 0 || mode != DP_PCON_HDMI_MODE_TMDS) + drm_dbg(&dev_priv->drm, "Issue with PCON, cannot set TMDS mode\n"); + } else { + drm_dbg(&dev_priv->drm, "FRL training Completed\n"); + } +} + +static int +intel_dp_pcon_dsc_enc_slice_height(const struct intel_crtc_state *crtc_state) +{ + int vactive = crtc_state->hw.adjusted_mode.vdisplay; + + return intel_hdmi_dsc_get_slice_height(vactive); +} + +static int +intel_dp_pcon_dsc_enc_slices(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + struct intel_connector *intel_connector = intel_dp->attached_connector; + struct drm_connector *connector = &intel_connector->base; + int hdmi_throughput = connector->display_info.hdmi.dsc_cap.clk_per_slice; + int hdmi_max_slices = connector->display_info.hdmi.dsc_cap.max_slices; + int pcon_max_slices = drm_dp_pcon_dsc_max_slices(intel_dp->pcon_dsc_dpcd); + int pcon_max_slice_width = drm_dp_pcon_dsc_max_slice_width(intel_dp->pcon_dsc_dpcd); + + return intel_hdmi_dsc_get_num_slices(crtc_state, pcon_max_slices, + pcon_max_slice_width, + hdmi_max_slices, hdmi_throughput); +} + +static int +intel_dp_pcon_dsc_enc_bpp(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + int num_slices, int slice_width) +{ + struct intel_connector *intel_connector = intel_dp->attached_connector; + struct drm_connector *connector = &intel_connector->base; + int output_format = crtc_state->output_format; + bool hdmi_all_bpp = connector->display_info.hdmi.dsc_cap.all_bpp; + int pcon_fractional_bpp = drm_dp_pcon_dsc_bpp_incr(intel_dp->pcon_dsc_dpcd); + int hdmi_max_chunk_bytes = + connector->display_info.hdmi.dsc_cap.total_chunk_kbytes * 1024; + + return intel_hdmi_dsc_get_bpp(pcon_fractional_bpp, slice_width, + num_slices, output_format, hdmi_all_bpp, + hdmi_max_chunk_bytes); +} + +void +intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + u8 pps_param[6]; + int slice_height; + int slice_width; + int num_slices; + int bits_per_pixel; + int ret; + struct intel_connector *intel_connector = intel_dp->attached_connector; + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct drm_connector *connector; + bool hdmi_is_dsc_1_2; + + if (!intel_dp_is_hdmi_2_1_sink(intel_dp)) + return; + + if (!intel_connector) + return; + connector = &intel_connector->base; + hdmi_is_dsc_1_2 = connector->display_info.hdmi.dsc_cap.v_1p2; + + if (!drm_dp_pcon_enc_is_dsc_1_2(intel_dp->pcon_dsc_dpcd) || + !hdmi_is_dsc_1_2) + return; + + slice_height = intel_dp_pcon_dsc_enc_slice_height(crtc_state); + if (!slice_height) + return; + + num_slices = intel_dp_pcon_dsc_enc_slices(intel_dp, crtc_state); + if (!num_slices) + return; + + slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay, + num_slices); + + bits_per_pixel = intel_dp_pcon_dsc_enc_bpp(intel_dp, crtc_state, + num_slices, slice_width); + if (!bits_per_pixel) + return; + + pps_param[0] = slice_height & 0xFF; + pps_param[1] = slice_height >> 8; + pps_param[2] = slice_width & 0xFF; + pps_param[3] = slice_width >> 8; + pps_param[4] = bits_per_pixel & 0xFF; + pps_param[5] = (bits_per_pixel >> 8) & 0x3; + + ret = drm_dp_pcon_pps_override_param(&intel_dp->aux, pps_param); + if (ret < 0) + drm_dbg_kms(&i915->drm, "Failed to set pcon DSC\n"); +} + static void g4x_set_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, @@ -4010,7 +4405,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp, intel_de_posting_read(dev_priv, intel_dp->output_reg); } -void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp) +void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 tmp; @@ -4039,12 +4435,42 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp) enableddisabled(intel_dp->dfp.ycbcr_444_to_420)); tmp = 0; + if (intel_dp->dfp.rgb_to_ycbcr) { + bool bt2020, bt709; - if (drm_dp_dpcd_writeb(&intel_dp->aux, - DP_PROTOCOL_CONVERTER_CONTROL_2, tmp) <= 0) + /* + * FIXME: Currently if userspace selects BT2020 or BT709, but PCON supports only + * RGB->YCbCr for BT601 colorspace, we go ahead with BT601, as default. + * + */ + tmp = DP_CONVERSION_BT601_RGB_YCBCR_ENABLE; + + bt2020 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, + intel_dp->downstream_ports, + DP_DS_HDMI_BT2020_RGB_YCBCR_CONV); + bt709 = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, + intel_dp->downstream_ports, + DP_DS_HDMI_BT709_RGB_YCBCR_CONV); + switch (crtc_state->infoframes.vsc.colorimetry) { + case DP_COLORIMETRY_BT2020_RGB: + case DP_COLORIMETRY_BT2020_YCC: + if (bt2020) + tmp = DP_CONVERSION_BT2020_RGB_YCBCR_ENABLE; + break; + case DP_COLORIMETRY_BT709_YCC: + case DP_COLORIMETRY_XVYCC_709: + if (bt709) + tmp = DP_CONVERSION_BT709_RGB_YCBCR_ENABLE; + break; + default: + break; + } + } + + if (drm_dp_pcon_convert_rgb_to_ycbcr(&intel_dp->aux, tmp) < 0) drm_dbg_kms(&i915->drm, - "Failed to set protocol converter YCbCr 4:2:2 conversion mode to %s\n", - enableddisabled(false)); + "Failed to set protocol converter RGB->YCbCr conversion mode to %s\n", + enableddisabled(tmp ? true : false)); } static void intel_enable_dp(struct intel_atomic_state *state, @@ -4084,7 +4510,9 @@ static void intel_enable_dp(struct intel_atomic_state *state, } intel_dp_set_power(intel_dp, DP_SET_POWER_D0); - intel_dp_configure_protocol_converter(intel_dp); + intel_dp_configure_protocol_converter(intel_dp, pipe_config); + intel_dp_check_frl_training(intel_dp); + intel_dp_pcon_dsc_configure(intel_dp, pipe_config); intel_dp_start_link_train(intel_dp, pipe_config); intel_dp_stop_link_train(intel_dp, pipe_config); @@ -4865,6 +5293,12 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp) if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) intel_dp_get_dsc_sink_cap(intel_dp); + /* + * If needed, program our source OUI so we can make various Intel-specific AUX services + * available (such as HDR backlight controls) + */ + intel_edp_init_source_oui(intel_dp, true); + return true; } @@ -5833,6 +6267,28 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp) return link_ok; } +static void +intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp) +{ + bool is_active; + u8 buf = 0; + + is_active = drm_dp_pcon_hdmi_link_active(&intel_dp->aux); + if (intel_dp->frl.is_trained && !is_active) { + if (drm_dp_dpcd_readb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf) < 0) + return; + + buf &= ~DP_PCON_ENABLE_HDMI_LINK; + if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PCON_HDMI_LINK_CONFIG_1, buf) < 0) + return; + + drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base); + + /* Restart FRL training or fall back to TMDS mode */ + intel_dp_check_frl_training(intel_dp); + } +} + static bool intel_dp_needs_link_retrain(struct intel_dp *intel_dp) { @@ -6006,6 +6462,8 @@ int intel_dp_retrain_link(struct intel_encoder *encoder, !intel_dp_mst_is_master_trans(crtc_state)) continue; + intel_dp_check_frl_training(intel_dp); + intel_dp_pcon_dsc_configure(intel_dp, crtc_state); intel_dp_start_link_train(intel_dp, crtc_state); intel_dp_stop_link_train(intel_dp, crtc_state); break; @@ -6197,7 +6655,7 @@ intel_dp_hotplug(struct intel_encoder *encoder, return state; } -static void intel_dp_check_service_irq(struct intel_dp *intel_dp) +static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 val; @@ -6221,6 +6679,30 @@ static void intel_dp_check_service_irq(struct intel_dp *intel_dp) drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n"); } +static void intel_dp_check_link_service_irq(struct intel_dp *intel_dp) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + u8 val; + + if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) + return; + + if (drm_dp_dpcd_readb(&intel_dp->aux, + DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val) { + drm_dbg_kms(&i915->drm, "Error in reading link service irq vector\n"); + return; + } + + if (drm_dp_dpcd_writeb(&intel_dp->aux, + DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1) { + drm_dbg_kms(&i915->drm, "Error in writing link service irq vector\n"); + return; + } + + if (val & HDMI_LINK_STATUS_CHANGED) + intel_dp_handle_hdmi_link_status_change(intel_dp); +} + /* * According to DP spec * 5.1.2: @@ -6260,7 +6742,8 @@ intel_dp_short_pulse(struct intel_dp *intel_dp) return false; } - intel_dp_check_service_irq(intel_dp); + intel_dp_check_device_service_irq(intel_dp); + intel_dp_check_link_service_irq(intel_dp); /* Handle CEC interrupts, if any */ drm_dp_cec_irq(&intel_dp->aux); @@ -6480,13 +6963,20 @@ intel_dp_update_dfp(struct intel_dp *intel_dp, intel_dp->downstream_ports, edid); + intel_dp->dfp.pcon_max_frl_bw = + drm_dp_get_pcon_max_frl_bw(intel_dp->dpcd, + intel_dp->downstream_ports); + drm_dbg_kms(&i915->drm, - "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d\n", + "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d, PCON Max FRL BW %dGbps\n", connector->base.base.id, connector->base.name, intel_dp->dfp.max_bpc, intel_dp->dfp.max_dotclock, intel_dp->dfp.min_tmds_clock, - intel_dp->dfp.max_tmds_clock); + intel_dp->dfp.max_tmds_clock, + intel_dp->dfp.pcon_max_frl_bw); + + intel_dp_get_pcon_dsc_cap(intel_dp); } static void @@ -6494,7 +6984,7 @@ intel_dp_update_420(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_connector *connector = intel_dp->attached_connector; - bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420; + bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420, rgb_to_ycbcr; /* No YCbCr output support on gmch platforms */ if (HAS_GMCH(i915)) @@ -6516,14 +7006,26 @@ intel_dp_update_420(struct intel_dp *intel_dp) dp_to_dig_port(intel_dp)->lspcon.active || drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd, intel_dp->downstream_ports); + rgb_to_ycbcr = drm_dp_downstream_rgb_to_ycbcr_conversion(intel_dp->dpcd, + intel_dp->downstream_ports, + DP_DS_HDMI_BT601_RGB_YCBCR_CONV || + DP_DS_HDMI_BT709_RGB_YCBCR_CONV || + DP_DS_HDMI_BT2020_RGB_YCBCR_CONV); if (INTEL_GEN(i915) >= 11) { + /* Let PCON convert from RGB->YCbCr if possible */ + if (is_branch && rgb_to_ycbcr && ycbcr_444_to_420) { + intel_dp->dfp.rgb_to_ycbcr = true; + intel_dp->dfp.ycbcr_444_to_420 = true; + connector->base.ycbcr_420_allowed = true; + } else { /* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */ - intel_dp->dfp.ycbcr_444_to_420 = - ycbcr_444_to_420 && !ycbcr_420_passthrough; + intel_dp->dfp.ycbcr_444_to_420 = + ycbcr_444_to_420 && !ycbcr_420_passthrough; - connector->base.ycbcr_420_allowed = - !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough; + connector->base.ycbcr_420_allowed = + !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough; + } } else { /* 4:4:4->4:2:0 conversion is the only way */ intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420; @@ -6532,8 +7034,9 @@ intel_dp_update_420(struct intel_dp *intel_dp) } drm_dbg_kms(&i915->drm, - "[CONNECTOR:%d:%s] YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n", + "[CONNECTOR:%d:%s] RGB->YcbCr conversion? %s, YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n", connector->base.base.id, connector->base.name, + yesno(intel_dp->dfp.rgb_to_ycbcr), yesno(connector->base.ycbcr_420_allowed), yesno(intel_dp->dfp.ycbcr_444_to_420)); } @@ -6578,6 +7081,8 @@ intel_dp_unset_edid(struct intel_dp *intel_dp) intel_dp->dfp.min_tmds_clock = 0; intel_dp->dfp.max_tmds_clock = 0; + intel_dp->dfp.pcon_max_frl_bw = 0; + intel_dp->dfp.ycbcr_444_to_420 = false; connector->base.ycbcr_420_allowed = false; } @@ -6683,7 +7188,7 @@ intel_dp_detect(struct drm_connector *connector, to_intel_connector(connector)->detect_edid) status = connector_status_connected; - intel_dp_check_service_irq(intel_dp); + intel_dp_check_device_service_irq(intel_dp); out: if (status != connector_status_connected && !intel_dp->is_mst) @@ -6774,6 +7279,8 @@ intel_dp_connector_register(struct drm_connector *connector) { struct drm_i915_private *i915 = to_i915(connector->dev); struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector)); + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct intel_lspcon *lspcon = &dig_port->lspcon; int ret; ret = intel_connector_register(connector); @@ -6787,6 +7294,22 @@ intel_dp_connector_register(struct drm_connector *connector) ret = drm_dp_aux_register(&intel_dp->aux); if (!ret) drm_dp_cec_register_connector(&intel_dp->aux, connector); + + if (!intel_bios_is_lspcon_present(i915, dig_port->base.port)) + return ret; + + /* + * ToDo: Clean this up to handle lspcon init and resume more + * efficiently and streamlined. + */ + if (lspcon_init(dig_port)) { + lspcon_detect_hdr_capability(lspcon); + if (lspcon->hdr_supported) + drm_object_attach_property(&connector->base, + connector->dev->mode_config.hdr_output_metadata_property, + 0); + } + return ret; } @@ -6876,7 +7399,9 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp) */ drm_dbg_kms(&dev_priv->drm, "VDD left on by BIOS, adjusting state tracking\n"); - intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port)); + drm_WARN_ON(&dev_priv->drm, intel_dp->vdd_wakeref); + intel_dp->vdd_wakeref = intel_display_power_get(dev_priv, + intel_aux_power_domain(dig_port)); edp_panel_vdd_schedule_off(intel_dp); } @@ -7175,7 +7700,13 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect else if (INTEL_GEN(dev_priv) >= 5) drm_connector_attach_max_bpc_property(connector, 6, 12); - intel_attach_colorspace_property(connector); + /* Register HDMI colorspace for case of lspcon */ + if (intel_bios_is_lspcon_present(dev_priv, port)) { + drm_connector_attach_content_type_property(connector); + intel_attach_hdmi_colorspace_property(connector); + } else { + intel_attach_dp_colorspace_property(connector); + } if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11) drm_object_attach_property(&connector->base, @@ -8146,6 +8677,9 @@ intel_dp_init_connector(struct intel_digital_port *dig_port, (temp & ~0xf) | 0xd); } + intel_dp->frl.is_trained = false; + intel_dp->frl.trained_rate_gbps = 0; + return true; fail: diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index b871a09b6901..4280a09fd8fd 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -51,7 +51,8 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, int intel_dp_retrain_link(struct intel_encoder *encoder, struct drm_modeset_acquire_ctx *ctx); void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode); -void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp); +void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, bool enable); @@ -144,4 +145,8 @@ bool intel_dp_initial_fastset_check(struct intel_encoder *encoder, void intel_dp_sync_state(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state); +void intel_dp_check_frl_training(struct intel_dp *intel_dp); +void intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); + #endif /* __INTEL_DP_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index 51d27fc98d48..9775f33d1aac 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -25,7 +25,60 @@ #include "intel_display_types.h" #include "intel_dp_aux_backlight.h" -static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable) +/* + * DP AUX registers for Intel's proprietary HDR backlight interface. We define + * them here since we'll likely be the only driver to ever use these. + */ +#define INTEL_EDP_HDR_TCON_CAP0 0x340 + +#define INTEL_EDP_HDR_TCON_CAP1 0x341 +# define INTEL_EDP_HDR_TCON_2084_DECODE_CAP BIT(0) +# define INTEL_EDP_HDR_TCON_2020_GAMUT_CAP BIT(1) +# define INTEL_EDP_HDR_TCON_TONE_MAPPING_CAP BIT(2) +# define INTEL_EDP_HDR_TCON_SEGMENTED_BACKLIGHT_CAP BIT(3) +# define INTEL_EDP_HDR_TCON_BRIGHTNESS_NITS_CAP BIT(4) +# define INTEL_EDP_HDR_TCON_OPTIMIZATION_CAP BIT(5) +# define INTEL_EDP_HDR_TCON_SDP_COLORIMETRY_CAP BIT(6) +# define INTEL_EDP_HDR_TCON_SRGB_TO_PANEL_GAMUT_CONVERSION_CAP BIT(7) + +#define INTEL_EDP_HDR_TCON_CAP2 0x342 +# define INTEL_EDP_SDR_TCON_BRIGHTNESS_AUX_CAP BIT(0) + +#define INTEL_EDP_HDR_TCON_CAP3 0x343 + +#define INTEL_EDP_HDR_GETSET_CTRL_PARAMS 0x344 +# define INTEL_EDP_HDR_TCON_2084_DECODE_ENABLE BIT(0) +# define INTEL_EDP_HDR_TCON_2020_GAMUT_ENABLE BIT(1) +# define INTEL_EDP_HDR_TCON_TONE_MAPPING_ENABLE BIT(2) /* Pre-TGL+ */ +# define INTEL_EDP_HDR_TCON_SEGMENTED_BACKLIGHT_ENABLE BIT(3) +# define INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE BIT(4) +# define INTEL_EDP_HDR_TCON_SRGB_TO_PANEL_GAMUT_ENABLE BIT(5) +/* Bit 6 is reserved */ +# define INTEL_EDP_HDR_TCON_SDP_COLORIMETRY_ENABLE BIT(7) + +#define INTEL_EDP_HDR_CONTENT_LUMINANCE 0x346 /* Pre-TGL+ */ +#define INTEL_EDP_HDR_PANEL_LUMINANCE_OVERRIDE 0x34A +#define INTEL_EDP_SDR_LUMINANCE_LEVEL 0x352 +#define INTEL_EDP_BRIGHTNESS_NITS_LSB 0x354 +#define INTEL_EDP_BRIGHTNESS_NITS_MSB 0x355 +#define INTEL_EDP_BRIGHTNESS_DELAY_FRAMES 0x356 +#define INTEL_EDP_BRIGHTNESS_PER_FRAME_STEPS 0x357 + +#define INTEL_EDP_BRIGHTNESS_OPTIMIZATION_0 0x358 +# define INTEL_EDP_TCON_USAGE_MASK GENMASK(0, 3) +# define INTEL_EDP_TCON_USAGE_UNKNOWN 0x0 +# define INTEL_EDP_TCON_USAGE_DESKTOP 0x1 +# define INTEL_EDP_TCON_USAGE_FULL_SCREEN_MEDIA 0x2 +# define INTEL_EDP_TCON_USAGE_FULL_SCREEN_GAMING 0x3 +# define INTEL_EDP_TCON_POWER_MASK BIT(4) +# define INTEL_EDP_TCON_POWER_DC (0 << 4) +# define INTEL_EDP_TCON_POWER_AC (1 << 4) +# define INTEL_EDP_TCON_OPTIMIZATION_STRENGTH_MASK GENMASK(5, 7) + +#define INTEL_EDP_BRIGHTNESS_OPTIMIZATION_1 0x359 + +/* VESA backlight callbacks */ +static void set_vesa_backlight_enable(struct intel_dp *intel_dp, bool enable) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 reg_val = 0; @@ -52,7 +105,7 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable) } } -static bool intel_dp_aux_backlight_dpcd_mode(struct intel_connector *connector) +static bool intel_dp_aux_vesa_backlight_dpcd_mode(struct intel_connector *connector) { struct intel_dp *intel_dp = intel_attached_dp(connector); struct drm_i915_private *i915 = dp_to_i915(intel_dp); @@ -75,7 +128,7 @@ static bool intel_dp_aux_backlight_dpcd_mode(struct intel_connector *connector) * Read the current backlight value from DPCD register(s) based * on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported */ -static u32 intel_dp_aux_get_backlight(struct intel_connector *connector) +static u32 intel_dp_aux_vesa_get_backlight(struct intel_connector *connector) { struct intel_dp *intel_dp = intel_attached_dp(connector); struct drm_i915_private *i915 = dp_to_i915(intel_dp); @@ -86,7 +139,7 @@ static u32 intel_dp_aux_get_backlight(struct intel_connector *connector) * If we're not in DPCD control mode yet, the programmed brightness * value is meaningless and we should assume max brightness */ - if (!intel_dp_aux_backlight_dpcd_mode(connector)) + if (!intel_dp_aux_vesa_backlight_dpcd_mode(connector)) return connector->panel.backlight.max; if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, @@ -107,7 +160,8 @@ static u32 intel_dp_aux_get_backlight(struct intel_connector *connector) * 8-bit or 16 bit value (MSB and LSB) */ static void -intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 level) +intel_dp_aux_vesa_set_backlight(const struct drm_connector_state *conn_state, + u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_dp *intel_dp = intel_attached_dp(connector); @@ -137,7 +191,7 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev * - Where P = 2^Pn, where Pn is the value programmed by field 4:0 of the * EDP_PWMGEN_BIT_COUNT register (DPCD Address 00724h) */ -static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector) +static bool intel_dp_aux_vesa_set_pwm_freq(struct intel_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_dp *intel_dp = intel_attached_dp(connector); @@ -173,8 +227,9 @@ static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector) return true; } -static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) +static void +intel_dp_aux_vesa_enable_backlight(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_dp *intel_dp = intel_attached_dp(connector); @@ -214,7 +269,7 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st } if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_FREQ_AUX_SET_CAP) - if (intel_dp_aux_set_pwm_freq(connector)) + if (intel_dp_aux_vesa_set_pwm_freq(connector)) new_dpcd_buf |= DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE; if (new_dpcd_buf != dpcd_buf) { @@ -225,18 +280,18 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st } } - intel_dp_aux_set_backlight(conn_state, - connector->panel.backlight.level); - set_aux_backlight_enable(intel_dp, true); + intel_dp_aux_vesa_set_backlight(conn_state, level); + set_vesa_backlight_enable(intel_dp, true); } -static void intel_dp_aux_disable_backlight(const struct drm_connector_state *old_conn_state) +static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state *old_conn_state, + u32 level) { - set_aux_backlight_enable(enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)), - false); + set_vesa_backlight_enable(enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder)), + false); } -static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector) +static u32 intel_dp_aux_vesa_calc_max_backlight(struct intel_connector *connector) { struct drm_i915_private *i915 = to_i915(connector->base.dev); struct intel_dp *intel_dp = intel_attached_dp(connector); @@ -316,25 +371,25 @@ static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector) return max_backlight; } -static int intel_dp_aux_setup_backlight(struct intel_connector *connector, - enum pipe pipe) +static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, + enum pipe pipe) { struct intel_panel *panel = &connector->panel; - panel->backlight.max = intel_dp_aux_calc_max_backlight(connector); + panel->backlight.max = intel_dp_aux_vesa_calc_max_backlight(connector); if (!panel->backlight.max) return -ENODEV; panel->backlight.min = 0; - panel->backlight.level = intel_dp_aux_get_backlight(connector); - panel->backlight.enabled = intel_dp_aux_backlight_dpcd_mode(connector) && + panel->backlight.level = intel_dp_aux_vesa_get_backlight(connector); + panel->backlight.enabled = intel_dp_aux_vesa_backlight_dpcd_mode(connector) && panel->backlight.level != 0; return 0; } static bool -intel_dp_aux_display_control_capable(struct intel_connector *connector) +intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector) { struct intel_dp *intel_dp = intel_attached_dp(connector); struct drm_i915_private *i915 = dp_to_i915(intel_dp); @@ -350,6 +405,14 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector) return false; } +static const struct intel_panel_bl_funcs intel_dp_vesa_bl_funcs = { + .setup = intel_dp_aux_vesa_setup_backlight, + .enable = intel_dp_aux_vesa_enable_backlight, + .disable = intel_dp_aux_vesa_disable_backlight, + .set = intel_dp_aux_vesa_set_backlight, + .get = intel_dp_aux_vesa_get_backlight, +}; + int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector) { struct intel_panel *panel = &intel_connector->panel; @@ -357,7 +420,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector) struct drm_i915_private *i915 = dp_to_i915(intel_dp); if (i915->params.enable_dpcd_backlight == 0 || - !intel_dp_aux_display_control_capable(intel_connector)) + !intel_dp_aux_supports_vesa_backlight(intel_connector)) return -ENODEV; /* @@ -379,11 +442,7 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector) return -ENODEV; } - panel->backlight.setup = intel_dp_aux_setup_backlight; - panel->backlight.enable = intel_dp_aux_enable_backlight; - panel->backlight.disable = intel_dp_aux_disable_backlight; - panel->backlight.set = intel_dp_aux_set_backlight; - panel->backlight.get = intel_dp_aux_get_backlight; + panel->backlight.funcs = &intel_dp_vesa_bl_funcs; return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 0c8684634fca..27f04aed8764 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -23,6 +23,7 @@ * */ +#include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_edid.h> #include <drm/drm_probe_helper.h> @@ -719,11 +720,13 @@ intel_dp_mst_mode_valid_ctx(struct drm_connector *connector, } static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector, - struct drm_connector_state *state) + struct drm_atomic_state *state) { + struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state, + connector); struct intel_connector *intel_connector = to_intel_connector(connector); struct intel_dp *intel_dp = intel_connector->mst_port; - struct intel_crtc *crtc = to_intel_crtc(state->crtc); + struct intel_crtc *crtc = to_intel_crtc(connector_state->crtc); return &intel_dp->mst_encoders[crtc->pipe]->base.base; } diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c index b53c50372918..88628764956d 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c @@ -77,7 +77,7 @@ static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32 } } -static void dcs_disable_backlight(const struct drm_connector_state *conn_state) +static void dcs_disable_backlight(const struct drm_connector_state *conn_state, u32 level) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder)); struct mipi_dsi_device *dsi_device; @@ -111,10 +111,9 @@ static void dcs_disable_backlight(const struct drm_connector_state *conn_state) } static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) + const struct drm_connector_state *conn_state, u32 level) { struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder)); - struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel; struct mipi_dsi_device *dsi_device; enum port port; @@ -142,7 +141,7 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state, &cabc, sizeof(cabc)); } - dcs_set_backlight(conn_state, panel->backlight.level); + dcs_set_backlight(conn_state, level); } static int dcs_setup_backlight(struct intel_connector *connector, @@ -156,6 +155,14 @@ static int dcs_setup_backlight(struct intel_connector *connector, return 0; } +static const struct intel_panel_bl_funcs dcs_bl_funcs = { + .setup = dcs_setup_backlight, + .enable = dcs_enable_backlight, + .disable = dcs_disable_backlight, + .set = dcs_set_backlight, + .get = dcs_get_backlight, +}; + int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector) { struct drm_device *dev = intel_connector->base.dev; @@ -169,11 +176,7 @@ int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector) if (drm_WARN_ON(dev, encoder->type != INTEL_OUTPUT_DSI)) return -EINVAL; - panel->backlight.setup = dcs_setup_backlight; - panel->backlight.enable = dcs_enable_backlight; - panel->backlight.disable = dcs_disable_backlight; - panel->backlight.set = dcs_set_backlight; - panel->backlight.get = dcs_get_backlight; + panel->backlight.funcs = &dcs_bl_funcs; return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c index 237dbb1ba0ee..090cd76266c6 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo.c +++ b/drivers/gpu/drm/i915/display/intel_dvo.c @@ -301,12 +301,8 @@ static void intel_dvo_pre_enable(struct intel_atomic_state *state, if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) dvo_val |= DVO_VSYNC_ACTIVE_HIGH; - /*I915_WRITE(DVOB_SRCDIM, - (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | - (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/ intel_de_write(dev_priv, dvo_srcdim_reg, (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT)); - /*I915_WRITE(DVOB, dvo_val);*/ intel_de_write(dev_priv, dvo_reg, dvo_val); } diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index a5b072816a7b..33200b5cfad0 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -742,6 +742,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc, cache->fence_id = plane_state->vma->fence->id; else cache->fence_id = -1; + + cache->psr2_active = crtc_state->has_psr2; } static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv) @@ -914,6 +916,16 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc) return false; } + /* + * Tigerlake is not supporting FBC with PSR2. + * Recommendation is to keep this combination disabled + * Bspec: 50422 HSD: 14010260002 + */ + if (fbc->state_cache.psr2_active && IS_TIGERLAKE(dev_priv)) { + fbc->no_fbc_reason = "not supported with PSR2"; + return false; + } + return true; } @@ -1433,13 +1445,6 @@ static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv) if (!HAS_FBC(dev_priv)) return 0; - /* - * Fbc is causing random underruns in CI execution on TGL platforms. - * Disabling the same while the problem is being debugged and analyzed. - */ - if (IS_TIGERLAKE(dev_priv)) - return 0; - if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) return 1; diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 82674a8853c6..c5959590562b 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -518,10 +518,10 @@ static u32 vlv_infoframes_enabled(struct intel_encoder *encoder, VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP); } -static void hsw_write_infoframe(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - unsigned int type, - const void *frame, ssize_t len) +void hsw_write_infoframe(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + unsigned int type, + const void *frame, ssize_t len) { const u32 *data = frame; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -555,10 +555,9 @@ static void hsw_write_infoframe(struct intel_encoder *encoder, intel_de_posting_read(dev_priv, ctl_reg); } -static void hsw_read_infoframe(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - unsigned int type, - void *frame, ssize_t len) +void hsw_read_infoframe(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + unsigned int type, void *frame, ssize_t len) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; @@ -2950,21 +2949,12 @@ static void intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector) { struct drm_i915_private *dev_priv = to_i915(connector->dev); - struct intel_digital_port *dig_port = - hdmi_to_dig_port(intel_hdmi); intel_attach_force_audio_property(connector); intel_attach_broadcast_rgb_property(connector); intel_attach_aspect_ratio_property(connector); - /* - * Attach Colorspace property for Non LSPCON based device - * ToDo: This needs to be extended for LSPCON implementation - * as well. Will be implemented separately. - */ - if (!dig_port->lspcon.active) - intel_attach_colorspace_property(connector); - + intel_attach_hdmi_colorspace_property(connector); drm_connector_attach_content_type_property(connector); if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) @@ -3438,3 +3428,236 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv, dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port); intel_hdmi_init_connector(dig_port, intel_connector); } + +/* + * intel_hdmi_dsc_get_slice_height - get the dsc slice_height + * @vactive: Vactive of a display mode + * + * @return: appropriate dsc slice height for a given mode. + */ +int intel_hdmi_dsc_get_slice_height(int vactive) +{ + int slice_height; + + /* + * Slice Height determination : HDMI2.1 Section 7.7.5.2 + * Select smallest slice height >=96, that results in a valid PPS and + * requires minimum padding lines required for final slice. + * + * Assumption : Vactive is even. + */ + for (slice_height = 96; slice_height <= vactive; slice_height += 2) + if (vactive % slice_height == 0) + return slice_height; + + return 0; +} + +/* + * intel_hdmi_dsc_get_num_slices - get no. of dsc slices based on dsc encoder + * and dsc decoder capabilities + * + * @crtc_state: intel crtc_state + * @src_max_slices: maximum slices supported by the DSC encoder + * @src_max_slice_width: maximum slice width supported by DSC encoder + * @hdmi_max_slices: maximum slices supported by sink DSC decoder + * @hdmi_throughput: maximum clock per slice (MHz) supported by HDMI sink + * + * @return: num of dsc slices that can be supported by the dsc encoder + * and decoder. + */ +int +intel_hdmi_dsc_get_num_slices(const struct intel_crtc_state *crtc_state, + int src_max_slices, int src_max_slice_width, + int hdmi_max_slices, int hdmi_throughput) +{ +/* Pixel rates in KPixels/sec */ +#define HDMI_DSC_PEAK_PIXEL_RATE 2720000 +/* + * Rates at which the source and sink are required to process pixels in each + * slice, can be two levels: either atleast 340000KHz or atleast 40000KHz. + */ +#define HDMI_DSC_MAX_ENC_THROUGHPUT_0 340000 +#define HDMI_DSC_MAX_ENC_THROUGHPUT_1 400000 + +/* Spec limits the slice width to 2720 pixels */ +#define MAX_HDMI_SLICE_WIDTH 2720 + int kslice_adjust; + int adjusted_clk_khz; + int min_slices; + int target_slices; + int max_throughput; /* max clock freq. in khz per slice */ + int max_slice_width; + int slice_width; + int pixel_clock = crtc_state->hw.adjusted_mode.crtc_clock; + + if (!hdmi_throughput) + return 0; + + /* + * Slice Width determination : HDMI2.1 Section 7.7.5.1 + * kslice_adjust factor for 4:2:0, and 4:2:2 formats is 0.5, where as + * for 4:4:4 is 1.0. Multiplying these factors by 10 and later + * dividing adjusted clock value by 10. + */ + if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 || + crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) + kslice_adjust = 10; + else + kslice_adjust = 5; + + /* + * As per spec, the rate at which the source and the sink process + * the pixels per slice are at two levels: atleast 340Mhz or 400Mhz. + * This depends upon the pixel clock rate and output formats + * (kslice adjust). + * If pixel clock * kslice adjust >= 2720MHz slices can be processed + * at max 340MHz, otherwise they can be processed at max 400MHz. + */ + + adjusted_clk_khz = DIV_ROUND_UP(kslice_adjust * pixel_clock, 10); + + if (adjusted_clk_khz <= HDMI_DSC_PEAK_PIXEL_RATE) + max_throughput = HDMI_DSC_MAX_ENC_THROUGHPUT_0; + else + max_throughput = HDMI_DSC_MAX_ENC_THROUGHPUT_1; + + /* + * Taking into account the sink's capability for maximum + * clock per slice (in MHz) as read from HF-VSDB. + */ + max_throughput = min(max_throughput, hdmi_throughput * 1000); + + min_slices = DIV_ROUND_UP(adjusted_clk_khz, max_throughput); + max_slice_width = min(MAX_HDMI_SLICE_WIDTH, src_max_slice_width); + + /* + * Keep on increasing the num of slices/line, starting from min_slices + * per line till we get such a number, for which the slice_width is + * just less than max_slice_width. The slices/line selected should be + * less than or equal to the max horizontal slices that the combination + * of PCON encoder and HDMI decoder can support. + */ + slice_width = max_slice_width; + + do { + if (min_slices <= 1 && src_max_slices >= 1 && hdmi_max_slices >= 1) + target_slices = 1; + else if (min_slices <= 2 && src_max_slices >= 2 && hdmi_max_slices >= 2) + target_slices = 2; + else if (min_slices <= 4 && src_max_slices >= 4 && hdmi_max_slices >= 4) + target_slices = 4; + else if (min_slices <= 8 && src_max_slices >= 8 && hdmi_max_slices >= 8) + target_slices = 8; + else if (min_slices <= 12 && src_max_slices >= 12 && hdmi_max_slices >= 12) + target_slices = 12; + else if (min_slices <= 16 && src_max_slices >= 16 && hdmi_max_slices >= 16) + target_slices = 16; + else + return 0; + + slice_width = DIV_ROUND_UP(crtc_state->hw.adjusted_mode.hdisplay, target_slices); + if (slice_width >= max_slice_width) + min_slices = target_slices + 1; + } while (slice_width >= max_slice_width); + + return target_slices; +} + +/* + * intel_hdmi_dsc_get_bpp - get the appropriate compressed bits_per_pixel based on + * source and sink capabilities. + * + * @src_fraction_bpp: fractional bpp supported by the source + * @slice_width: dsc slice width supported by the source and sink + * @num_slices: num of slices supported by the source and sink + * @output_format: video output format + * @hdmi_all_bpp: sink supports decoding of 1/16th bpp setting + * @hdmi_max_chunk_bytes: max bytes in a line of chunks supported by sink + * + * @return: compressed bits_per_pixel in step of 1/16 of bits_per_pixel + */ +int +intel_hdmi_dsc_get_bpp(int src_fractional_bpp, int slice_width, int num_slices, + int output_format, bool hdmi_all_bpp, + int hdmi_max_chunk_bytes) +{ + int max_dsc_bpp, min_dsc_bpp; + int target_bytes; + bool bpp_found = false; + int bpp_decrement_x16; + int bpp_target; + int bpp_target_x16; + + /* + * Get min bpp and max bpp as per Table 7.23, in HDMI2.1 spec + * Start with the max bpp and keep on decrementing with + * fractional bpp, if supported by PCON DSC encoder + * + * for each bpp we check if no of bytes can be supported by HDMI sink + */ + + /* Assuming: bpc as 8*/ + if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { + min_dsc_bpp = 6; + max_dsc_bpp = 3 * 4; /* 3*bpc/2 */ + } else if (output_format == INTEL_OUTPUT_FORMAT_YCBCR444 || + output_format == INTEL_OUTPUT_FORMAT_RGB) { + min_dsc_bpp = 8; + max_dsc_bpp = 3 * 8; /* 3*bpc */ + } else { + /* Assuming 4:2:2 encoding */ + min_dsc_bpp = 7; + max_dsc_bpp = 2 * 8; /* 2*bpc */ + } + + /* + * Taking into account if all dsc_all_bpp supported by HDMI2.1 sink + * Section 7.7.34 : Source shall not enable compressed Video + * Transport with bpp_target settings above 12 bpp unless + * DSC_all_bpp is set to 1. + */ + if (!hdmi_all_bpp) + max_dsc_bpp = min(max_dsc_bpp, 12); + + /* + * The Sink has a limit of compressed data in bytes for a scanline, + * as described in max_chunk_bytes field in HFVSDB block of edid. + * The no. of bytes depend on the target bits per pixel that the + * source configures. So we start with the max_bpp and calculate + * the target_chunk_bytes. We keep on decrementing the target_bpp, + * till we get the target_chunk_bytes just less than what the sink's + * max_chunk_bytes, or else till we reach the min_dsc_bpp. + * + * The decrement is according to the fractional support from PCON DSC + * encoder. For fractional BPP we use bpp_target as a multiple of 16. + * + * bpp_target_x16 = bpp_target * 16 + * So we need to decrement by {1, 2, 4, 8, 16} for fractional bpps + * {1/16, 1/8, 1/4, 1/2, 1} respectively. + */ + + bpp_target = max_dsc_bpp; + + /* src does not support fractional bpp implies decrement by 16 for bppx16 */ + if (!src_fractional_bpp) + src_fractional_bpp = 1; + bpp_decrement_x16 = DIV_ROUND_UP(16, src_fractional_bpp); + bpp_target_x16 = (bpp_target * 16) - bpp_decrement_x16; + + while (bpp_target_x16 > (min_dsc_bpp * 16)) { + int bpp; + + bpp = DIV_ROUND_UP(bpp_target_x16, 16); + target_bytes = DIV_ROUND_UP((num_slices * slice_width * bpp), 8); + if (target_bytes <= hdmi_max_chunk_bytes) { + bpp_found = true; + break; + } + bpp_target_x16 -= bpp_decrement_x16; + } + if (bpp_found) + return bpp_target_x16; + + return 0; +} diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h index 15eb0ccde76e..fa1a9b030850 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.h +++ b/drivers/gpu/drm/i915/display/intel_hdmi.h @@ -50,5 +50,12 @@ bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); bool intel_hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, int bpc, bool has_hdmi_sink, bool ycbcr420_output); +int intel_hdmi_dsc_get_bpp(int src_fractional_bpp, int slice_width, + int num_slices, int output_format, bool hdmi_all_bpp, + int hdmi_max_chunk_bytes); +int intel_hdmi_dsc_get_num_slices(const struct intel_crtc_state *crtc_state, + int src_max_slices, int src_max_slice_width, + int hdmi_max_slices, int hdmi_throughput); +int intel_hdmi_dsc_get_slice_height(int vactive); #endif /* __INTEL_HDMI_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c index ad5cc13037ae..1c939f9c9bc9 100644 --- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c @@ -297,13 +297,9 @@ int intel_lpe_audio_init(struct drm_i915_private *dev_priv) */ void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv) { - struct irq_desc *desc; - if (!HAS_LPE_AUDIO(dev_priv)) return; - desc = irq_to_desc(dev_priv->lpe_audio.irq); - lpe_audio_platdev_destroy(dev_priv); irq_free_desc(dev_priv->lpe_audio.irq); diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c index e37d45e531df..e4ff533e3a69 100644 --- a/drivers/gpu/drm/i915/display/intel_lspcon.c +++ b/drivers/gpu/drm/i915/display/intel_lspcon.c @@ -30,11 +30,15 @@ #include "intel_display_types.h" #include "intel_dp.h" #include "intel_lspcon.h" +#include "intel_hdmi.h" /* LSPCON OUI Vendor ID(signatures) */ #define LSPCON_VENDOR_PARADE_OUI 0x001CF8 #define LSPCON_VENDOR_MCA_OUI 0x0060AD +#define DPCD_MCA_LSPCON_HDR_STATUS 0x70003 +#define DPCD_PARADE_LSPCON_HDR_STATUS 0x00511 + /* AUX addresses to write MCA AVI IF */ #define LSPCON_MCA_AVI_IF_WRITE_OFFSET 0x5C0 #define LSPCON_MCA_AVI_IF_CTRL 0x5DF @@ -104,6 +108,35 @@ static bool lspcon_detect_vendor(struct intel_lspcon *lspcon) return true; } +static u32 get_hdr_status_reg(struct intel_lspcon *lspcon) +{ + if (lspcon->vendor == LSPCON_VENDOR_MCA) + return DPCD_MCA_LSPCON_HDR_STATUS; + else + return DPCD_PARADE_LSPCON_HDR_STATUS; +} + +void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon) +{ + struct intel_digital_port *dig_port = + container_of(lspcon, struct intel_digital_port, lspcon); + struct drm_device *dev = dig_port->base.base.dev; + struct intel_dp *dp = lspcon_to_intel_dp(lspcon); + u8 hdr_caps; + int ret; + + ret = drm_dp_dpcd_read(&dp->aux, get_hdr_status_reg(lspcon), + &hdr_caps, 1); + + if (ret < 0) { + drm_dbg_kms(dev, "HDR capability detection failed\n"); + lspcon->hdr_supported = false; + } else if (hdr_caps & 0x1) { + drm_dbg_kms(dev, "LSPCON capable of HDR\n"); + lspcon->hdr_supported = true; + } +} + static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon) { enum drm_lspcon_mode current_mode; @@ -418,27 +451,32 @@ void lspcon_write_infoframe(struct intel_encoder *encoder, unsigned int type, const void *frame, ssize_t len) { - bool ret; + bool ret = true; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder); - /* LSPCON only needs AVI IF */ - if (type != HDMI_INFOFRAME_TYPE_AVI) + switch (type) { + case HDMI_INFOFRAME_TYPE_AVI: + if (lspcon->vendor == LSPCON_VENDOR_MCA) + ret = _lspcon_write_avi_infoframe_mca(&intel_dp->aux, + frame, len); + else + ret = _lspcon_write_avi_infoframe_parade(&intel_dp->aux, + frame, len); + break; + case HDMI_PACKET_TYPE_GAMUT_METADATA: + drm_dbg_kms(encoder->base.dev, "Update HDR metadata for lspcon\n"); + /* It uses the legacy hsw implementation for the same */ + hsw_write_infoframe(encoder, crtc_state, type, frame, len); + break; + default: return; - - if (lspcon->vendor == LSPCON_VENDOR_MCA) - ret = _lspcon_write_avi_infoframe_mca(&intel_dp->aux, - frame, len); - else - ret = _lspcon_write_avi_infoframe_parade(&intel_dp->aux, - frame, len); + } if (!ret) { - DRM_ERROR("Failed to write AVI infoframes\n"); + DRM_ERROR("Failed to write infoframes\n"); return; } - - DRM_DEBUG_DRIVER("AVI infoframes updated successfully\n"); } void lspcon_read_infoframe(struct intel_encoder *encoder, @@ -446,7 +484,10 @@ void lspcon_read_infoframe(struct intel_encoder *encoder, unsigned int type, void *frame, ssize_t len) { - /* FIXME implement this */ + /* FIXME implement for AVI Infoframe as well */ + if (type == HDMI_PACKET_TYPE_GAMUT_METADATA) + hsw_read_infoframe(encoder, crtc_state, type, + frame, len); } void lspcon_set_infoframes(struct intel_encoder *encoder, @@ -491,12 +532,26 @@ void lspcon_set_infoframes(struct intel_encoder *encoder, else frame.avi.colorspace = HDMI_COLORSPACE_RGB; - drm_hdmi_avi_infoframe_quant_range(&frame.avi, - conn_state->connector, - adjusted_mode, - crtc_state->limited_color_range ? - HDMI_QUANTIZATION_RANGE_LIMITED : - HDMI_QUANTIZATION_RANGE_FULL); + /* Set the Colorspace as per the HDMI spec */ + drm_hdmi_avi_infoframe_colorspace(&frame.avi, conn_state); + + /* nonsense combination */ + drm_WARN_ON(encoder->base.dev, crtc_state->limited_color_range && + crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); + + if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) { + drm_hdmi_avi_infoframe_quant_range(&frame.avi, + conn_state->connector, + adjusted_mode, + crtc_state->limited_color_range ? + HDMI_QUANTIZATION_RANGE_LIMITED : + HDMI_QUANTIZATION_RANGE_FULL); + } else { + frame.avi.quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT; + frame.avi.ycc_quantization_range = HDMI_YCC_QUANTIZATION_RANGE_LIMITED; + } + + drm_hdmi_avi_infoframe_content_type(&frame.avi, conn_state); ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf)); if (ret < 0) { @@ -508,11 +563,64 @@ void lspcon_set_infoframes(struct intel_encoder *encoder, buf, ret); } +static bool _lspcon_read_avi_infoframe_enabled_mca(struct drm_dp_aux *aux) +{ + int ret; + u32 val = 0; + u16 reg = LSPCON_MCA_AVI_IF_CTRL; + + ret = drm_dp_dpcd_read(aux, reg, &val, 1); + if (ret < 0) { + DRM_ERROR("DPCD read failed, address 0x%x\n", reg); + return false; + } + + return val & LSPCON_MCA_AVI_IF_KICKOFF; +} + +static bool _lspcon_read_avi_infoframe_enabled_parade(struct drm_dp_aux *aux) +{ + int ret; + u32 val = 0; + u16 reg = LSPCON_PARADE_AVI_IF_CTRL; + + ret = drm_dp_dpcd_read(aux, reg, &val, 1); + if (ret < 0) { + DRM_ERROR("DPCD read failed, address 0x%x\n", reg); + return false; + } + + return val & LSPCON_PARADE_AVI_IF_KICKOFF; +} + u32 lspcon_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config) { - /* FIXME actually read this from the hw */ - return 0; + struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + bool infoframes_enabled; + u32 val = 0; + u32 mask, tmp; + + if (lspcon->vendor == LSPCON_VENDOR_MCA) + infoframes_enabled = _lspcon_read_avi_infoframe_enabled_mca(&intel_dp->aux); + else + infoframes_enabled = _lspcon_read_avi_infoframe_enabled_parade(&intel_dp->aux); + + if (infoframes_enabled) + val |= intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI); + + if (lspcon->hdr_supported) { + tmp = intel_de_read(dev_priv, + HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder)); + mask = VIDEO_DIP_ENABLE_GMP_HSW; + + if (tmp & mask) + val |= intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA); + } + + return val; } void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon) @@ -520,7 +628,7 @@ void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon) lspcon_wait_mode(lspcon, DRM_LSPCON_MODE_PCON); } -static bool lspcon_init(struct intel_digital_port *dig_port) +bool lspcon_init(struct intel_digital_port *dig_port) { struct intel_dp *dp = &dig_port->dp; struct intel_lspcon *lspcon = &dig_port->lspcon; @@ -550,6 +658,14 @@ static bool lspcon_init(struct intel_digital_port *dig_port) return true; } +u32 intel_lspcon_infoframes_enabled(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config) +{ + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + + return dig_port->infoframes_enabled(encoder, pipe_config); +} + void lspcon_resume(struct intel_digital_port *dig_port) { struct intel_lspcon *lspcon = &dig_port->lspcon; diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.h b/drivers/gpu/drm/i915/display/intel_lspcon.h index b03dcb7076d8..e19e10492b05 100644 --- a/drivers/gpu/drm/i915/display/intel_lspcon.h +++ b/drivers/gpu/drm/i915/display/intel_lspcon.h @@ -15,6 +15,8 @@ struct intel_digital_port; struct intel_encoder; struct intel_lspcon; +bool lspcon_init(struct intel_digital_port *dig_port); +void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon); void lspcon_resume(struct intel_digital_port *dig_port); void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon); void lspcon_write_infoframe(struct intel_encoder *encoder, @@ -31,5 +33,15 @@ void lspcon_set_infoframes(struct intel_encoder *encoder, const struct drm_connector_state *conn_state); u32 lspcon_infoframes_enabled(struct intel_encoder *encoder, const struct intel_crtc_state *pipe_config); +u32 intel_lspcon_infoframes_enabled(struct intel_encoder *encoder, + const struct intel_crtc_state *pipe_config); +void hsw_write_infoframe(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + unsigned int type, + const void *frame, ssize_t len); +void hsw_read_infoframe(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + unsigned int type, + void *frame, ssize_t len); #endif /* __INTEL_LSPCON_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index 52b4f6193b4c..6be5d8946c69 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -29,6 +29,7 @@ #include <drm/drm_fourcc.h> #include "gem/i915_gem_pm.h" +#include "gt/intel_gpu_commands.h" #include "gt/intel_ring.h" #include "i915_drv.h" diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index 9f23bac0d792..7a4239d1c241 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -589,7 +589,7 @@ static u32 bxt_get_backlight(struct intel_connector *connector) BXT_BLC_PWM_DUTY(panel->backlight.controller)); } -static u32 pwm_get_backlight(struct intel_connector *connector) +static u32 ext_pwm_get_backlight(struct intel_connector *connector) { struct intel_panel *panel = &connector->panel; struct pwm_state state; @@ -666,7 +666,7 @@ static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32 BXT_BLC_PWM_DUTY(panel->backlight.controller), level); } -static void pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level) +static void ext_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level) { struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel; @@ -684,7 +684,7 @@ intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state, drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", level); level = intel_panel_compute_brightness(connector, level); - panel->backlight.set(conn_state, level); + panel->backlight.funcs->set(conn_state, level); } /* set backlight brightness to level in range [0..max], assuming hw min is @@ -726,13 +726,13 @@ void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state mutex_unlock(&dev_priv->backlight_lock); } -static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state) +static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); u32 tmp; - intel_panel_actually_set_backlight(old_conn_state, 0); + intel_panel_actually_set_backlight(old_conn_state, level); /* * Although we don't support or enable CPU PWM with LPT/SPT based @@ -754,13 +754,13 @@ static void lpt_disable_backlight(const struct drm_connector_state *old_conn_sta intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE); } -static void pch_disable_backlight(const struct drm_connector_state *old_conn_state) +static void pch_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) { struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); u32 tmp; - intel_panel_actually_set_backlight(old_conn_state, 0); + intel_panel_actually_set_backlight(old_conn_state, val); tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2); intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE); @@ -769,44 +769,44 @@ static void pch_disable_backlight(const struct drm_connector_state *old_conn_sta intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE); } -static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state) +static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) { - intel_panel_actually_set_backlight(old_conn_state, 0); + intel_panel_actually_set_backlight(old_conn_state, val); } -static void i965_disable_backlight(const struct drm_connector_state *old_conn_state) +static void i965_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) { struct drm_i915_private *dev_priv = to_i915(old_conn_state->connector->dev); u32 tmp; - intel_panel_actually_set_backlight(old_conn_state, 0); + intel_panel_actually_set_backlight(old_conn_state, val); tmp = intel_de_read(dev_priv, BLC_PWM_CTL2); intel_de_write(dev_priv, BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE); } -static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state) +static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) { struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); enum pipe pipe = to_intel_crtc(old_conn_state->crtc)->pipe; u32 tmp; - intel_panel_actually_set_backlight(old_conn_state, 0); + intel_panel_actually_set_backlight(old_conn_state, val); tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe)); intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), tmp & ~BLM_PWM_ENABLE); } -static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state) +static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) { struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; - u32 tmp, val; + u32 tmp; - intel_panel_actually_set_backlight(old_conn_state, 0); + intel_panel_actually_set_backlight(old_conn_state, val); tmp = intel_de_read(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller)); @@ -820,14 +820,14 @@ static void bxt_disable_backlight(const struct drm_connector_state *old_conn_sta } } -static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state) +static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val) { struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_panel *panel = &connector->panel; u32 tmp; - intel_panel_actually_set_backlight(old_conn_state, 0); + intel_panel_actually_set_backlight(old_conn_state, val); tmp = intel_de_read(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller)); @@ -835,7 +835,7 @@ static void cnp_disable_backlight(const struct drm_connector_state *old_conn_sta tmp & ~BXT_BLC_PWM_ENABLE); } -static void pwm_disable_backlight(const struct drm_connector_state *old_conn_state) +static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct intel_panel *panel = &connector->panel; @@ -870,13 +870,13 @@ void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_st if (panel->backlight.device) panel->backlight.device->props.power = FB_BLANK_POWERDOWN; panel->backlight.enabled = false; - panel->backlight.disable(old_conn_state); + panel->backlight.funcs->disable(old_conn_state, 0); mutex_unlock(&dev_priv->backlight_lock); } static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) + const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); @@ -923,11 +923,11 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state, pch_ctl1 | BLM_PCH_PWM_ENABLE); /* This won't stick until the above enable. */ - intel_panel_actually_set_backlight(conn_state, panel->backlight.level); + intel_panel_actually_set_backlight(conn_state, level); } static void pch_enable_backlight(const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) + const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); @@ -958,7 +958,7 @@ static void pch_enable_backlight(const struct intel_crtc_state *crtc_state, intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE); /* This won't stick until the above enable. */ - intel_panel_actually_set_backlight(conn_state, panel->backlight.level); + intel_panel_actually_set_backlight(conn_state, level); pch_ctl2 = panel->backlight.max << 16; intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2); @@ -974,7 +974,7 @@ static void pch_enable_backlight(const struct intel_crtc_state *crtc_state, } static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) + const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); @@ -1001,7 +1001,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state, intel_de_posting_read(dev_priv, BLC_PWM_CTL); /* XXX: combine this into above write? */ - intel_panel_actually_set_backlight(conn_state, panel->backlight.level); + intel_panel_actually_set_backlight(conn_state, level); /* * Needed to enable backlight on some 855gm models. BLC_HIST_CTL is @@ -1013,7 +1013,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state, } static void i965_enable_backlight(const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) + const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); @@ -1044,11 +1044,11 @@ static void i965_enable_backlight(const struct intel_crtc_state *crtc_state, intel_de_posting_read(dev_priv, BLC_PWM_CTL2); intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE); - intel_panel_actually_set_backlight(conn_state, panel->backlight.level); + intel_panel_actually_set_backlight(conn_state, level); } static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) + const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); @@ -1067,7 +1067,7 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state, intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), ctl); /* XXX: combine this into above write? */ - intel_panel_actually_set_backlight(conn_state, panel->backlight.level); + intel_panel_actually_set_backlight(conn_state, level); ctl2 = 0; if (panel->backlight.active_low_pwm) @@ -1079,7 +1079,7 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state, } static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) + const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); @@ -1118,7 +1118,7 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state, BXT_BLC_PWM_FREQ(panel->backlight.controller), panel->backlight.max); - intel_panel_actually_set_backlight(conn_state, panel->backlight.level); + intel_panel_actually_set_backlight(conn_state, level); pwm_ctl = 0; if (panel->backlight.active_low_pwm) @@ -1133,7 +1133,7 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state, } static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) + const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *dev_priv = to_i915(connector->base.dev); @@ -1154,7 +1154,7 @@ static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state, BXT_BLC_PWM_FREQ(panel->backlight.controller), panel->backlight.max); - intel_panel_actually_set_backlight(conn_state, panel->backlight.level); + intel_panel_actually_set_backlight(conn_state, level); pwm_ctl = 0; if (panel->backlight.active_low_pwm) @@ -1168,12 +1168,11 @@ static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state, pwm_ctl | BXT_BLC_PWM_ENABLE); } -static void pwm_enable_backlight(const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) +static void ext_pwm_enable_backlight(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state, u32 level) { struct intel_connector *connector = to_intel_connector(conn_state->connector); struct intel_panel *panel = &connector->panel; - int level = panel->backlight.level; level = intel_panel_compute_brightness(connector, level); pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100); @@ -1198,7 +1197,7 @@ static void __intel_panel_enable_backlight(const struct intel_crtc_state *crtc_s panel->backlight.device->props.max_brightness); } - panel->backlight.enable(crtc_state, conn_state); + panel->backlight.funcs->enable(crtc_state, conn_state, panel->backlight.level); panel->backlight.enabled = true; if (panel->backlight.device) panel->backlight.device->props.power = FB_BLANK_UNBLANK; @@ -1234,7 +1233,7 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector) mutex_lock(&dev_priv->backlight_lock); if (panel->backlight.enabled) { - val = panel->backlight.get(connector); + val = panel->backlight.funcs->get(connector); val = intel_panel_compute_brightness(connector, val); } @@ -1567,13 +1566,13 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector) u16 pwm_freq_hz = get_vbt_pwm_freq(dev_priv); u32 pwm; - if (!panel->backlight.hz_to_pwm) { + if (!panel->backlight.funcs->hz_to_pwm) { drm_dbg_kms(&dev_priv->drm, "backlight frequency conversion not supported\n"); return 0; } - pwm = panel->backlight.hz_to_pwm(connector, pwm_freq_hz); + pwm = panel->backlight.funcs->hz_to_pwm(connector, pwm_freq_hz); if (!pwm) { drm_dbg_kms(&dev_priv->drm, "backlight frequency conversion failed\n"); @@ -1650,16 +1649,13 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus val = pch_get_backlight(connector); else val = lpt_get_backlight(connector); - val = intel_panel_compute_brightness(connector, val); - panel->backlight.level = clamp(val, panel->backlight.min, - panel->backlight.max); if (cpu_mode) { drm_dbg_kms(&dev_priv->drm, "CPU backlight register was enabled, switching to PCH override\n"); /* Write converted CPU PWM value to PCH override register */ - lpt_set_backlight(connector->base.state, panel->backlight.level); + lpt_set_backlight(connector->base.state, val); intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE); @@ -1667,6 +1663,10 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus cpu_ctl2 & ~BLM_PWM_ENABLE); } + val = intel_panel_compute_brightness(connector, val); + panel->backlight.level = clamp(val, panel->backlight.min, + panel->backlight.max); + return 0; } @@ -1890,8 +1890,8 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused) return 0; } -static int pwm_setup_backlight(struct intel_connector *connector, - enum pipe pipe) +static int ext_pwm_setup_backlight(struct intel_connector *connector, + enum pipe pipe) { struct drm_device *dev = connector->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -1981,12 +1981,12 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe) } /* ensure intel_panel has been initialized first */ - if (drm_WARN_ON(&dev_priv->drm, !panel->backlight.setup)) + if (drm_WARN_ON(&dev_priv->drm, !panel->backlight.funcs)) return -ENODEV; /* set level and max in panel struct */ mutex_lock(&dev_priv->backlight_lock); - ret = panel->backlight.setup(intel_connector, pipe); + ret = panel->backlight.funcs->setup(intel_connector, pipe); mutex_unlock(&dev_priv->backlight_lock); if (ret) { @@ -2016,6 +2016,86 @@ static void intel_panel_destroy_backlight(struct intel_panel *panel) panel->backlight.present = false; } +static const struct intel_panel_bl_funcs bxt_funcs = { + .setup = bxt_setup_backlight, + .enable = bxt_enable_backlight, + .disable = bxt_disable_backlight, + .set = bxt_set_backlight, + .get = bxt_get_backlight, + .hz_to_pwm = bxt_hz_to_pwm, +}; + +static const struct intel_panel_bl_funcs cnp_funcs = { + .setup = cnp_setup_backlight, + .enable = cnp_enable_backlight, + .disable = cnp_disable_backlight, + .set = bxt_set_backlight, + .get = bxt_get_backlight, + .hz_to_pwm = cnp_hz_to_pwm, +}; + +static const struct intel_panel_bl_funcs lpt_funcs = { + .setup = lpt_setup_backlight, + .enable = lpt_enable_backlight, + .disable = lpt_disable_backlight, + .set = lpt_set_backlight, + .get = lpt_get_backlight, + .hz_to_pwm = lpt_hz_to_pwm, +}; + +static const struct intel_panel_bl_funcs spt_funcs = { + .setup = lpt_setup_backlight, + .enable = lpt_enable_backlight, + .disable = lpt_disable_backlight, + .set = lpt_set_backlight, + .get = lpt_get_backlight, + .hz_to_pwm = spt_hz_to_pwm, +}; + +static const struct intel_panel_bl_funcs pch_funcs = { + .setup = pch_setup_backlight, + .enable = pch_enable_backlight, + .disable = pch_disable_backlight, + .set = pch_set_backlight, + .get = pch_get_backlight, + .hz_to_pwm = pch_hz_to_pwm, +}; + +static const struct intel_panel_bl_funcs ext_pwm_funcs = { + .setup = ext_pwm_setup_backlight, + .enable = ext_pwm_enable_backlight, + .disable = ext_pwm_disable_backlight, + .set = ext_pwm_set_backlight, + .get = ext_pwm_get_backlight, +}; + +static const struct intel_panel_bl_funcs vlv_funcs = { + .setup = vlv_setup_backlight, + .enable = vlv_enable_backlight, + .disable = vlv_disable_backlight, + .set = vlv_set_backlight, + .get = vlv_get_backlight, + .hz_to_pwm = vlv_hz_to_pwm, +}; + +static const struct intel_panel_bl_funcs i965_funcs = { + .setup = i965_setup_backlight, + .enable = i965_enable_backlight, + .disable = i965_disable_backlight, + .set = i9xx_set_backlight, + .get = i9xx_get_backlight, + .hz_to_pwm = i965_hz_to_pwm, +}; + +static const struct intel_panel_bl_funcs i9xx_funcs = { + .setup = i9xx_setup_backlight, + .enable = i9xx_enable_backlight, + .disable = i9xx_disable_backlight, + .set = i9xx_set_backlight, + .get = i9xx_get_backlight, + .hz_to_pwm = i9xx_hz_to_pwm, +}; + /* Set up chip specific backlight functions */ static void intel_panel_init_backlight_funcs(struct intel_panel *panel) @@ -2033,65 +2113,26 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel) return; if (IS_GEN9_LP(dev_priv)) { - panel->backlight.setup = bxt_setup_backlight; - panel->backlight.enable = bxt_enable_backlight; - panel->backlight.disable = bxt_disable_backlight; - panel->backlight.set = bxt_set_backlight; - panel->backlight.get = bxt_get_backlight; - panel->backlight.hz_to_pwm = bxt_hz_to_pwm; + panel->backlight.funcs = &bxt_funcs; } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) { - panel->backlight.setup = cnp_setup_backlight; - panel->backlight.enable = cnp_enable_backlight; - panel->backlight.disable = cnp_disable_backlight; - panel->backlight.set = bxt_set_backlight; - panel->backlight.get = bxt_get_backlight; - panel->backlight.hz_to_pwm = cnp_hz_to_pwm; + panel->backlight.funcs = &cnp_funcs; } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_LPT) { - panel->backlight.setup = lpt_setup_backlight; - panel->backlight.enable = lpt_enable_backlight; - panel->backlight.disable = lpt_disable_backlight; - panel->backlight.set = lpt_set_backlight; - panel->backlight.get = lpt_get_backlight; if (HAS_PCH_LPT(dev_priv)) - panel->backlight.hz_to_pwm = lpt_hz_to_pwm; + panel->backlight.funcs = &lpt_funcs; else - panel->backlight.hz_to_pwm = spt_hz_to_pwm; + panel->backlight.funcs = &spt_funcs; } else if (HAS_PCH_SPLIT(dev_priv)) { - panel->backlight.setup = pch_setup_backlight; - panel->backlight.enable = pch_enable_backlight; - panel->backlight.disable = pch_disable_backlight; - panel->backlight.set = pch_set_backlight; - panel->backlight.get = pch_get_backlight; - panel->backlight.hz_to_pwm = pch_hz_to_pwm; + panel->backlight.funcs = &pch_funcs; } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) { - panel->backlight.setup = pwm_setup_backlight; - panel->backlight.enable = pwm_enable_backlight; - panel->backlight.disable = pwm_disable_backlight; - panel->backlight.set = pwm_set_backlight; - panel->backlight.get = pwm_get_backlight; + panel->backlight.funcs = &ext_pwm_funcs; } else { - panel->backlight.setup = vlv_setup_backlight; - panel->backlight.enable = vlv_enable_backlight; - panel->backlight.disable = vlv_disable_backlight; - panel->backlight.set = vlv_set_backlight; - panel->backlight.get = vlv_get_backlight; - panel->backlight.hz_to_pwm = vlv_hz_to_pwm; + panel->backlight.funcs = &vlv_funcs; } } else if (IS_GEN(dev_priv, 4)) { - panel->backlight.setup = i965_setup_backlight; - panel->backlight.enable = i965_enable_backlight; - panel->backlight.disable = i965_disable_backlight; - panel->backlight.set = i9xx_set_backlight; - panel->backlight.get = i9xx_get_backlight; - panel->backlight.hz_to_pwm = i965_hz_to_pwm; + panel->backlight.funcs = &i965_funcs; } else { - panel->backlight.setup = i9xx_setup_backlight; - panel->backlight.enable = i9xx_enable_backlight; - panel->backlight.disable = i9xx_disable_backlight; - panel->backlight.set = i9xx_set_backlight; - panel->backlight.get = i9xx_get_backlight; - panel->backlight.hz_to_pwm = i9xx_hz_to_pwm; + panel->backlight.funcs = &i9xx_funcs; } } diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index b3631b722de3..c24ae69426cf 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -1185,7 +1185,9 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane, { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum pipe pipe = plane->pipe; - u32 val; + const struct drm_rect *clip; + u32 val, offset; + int ret, x, y; if (!crtc_state->enable_psr2_sel_fetch) return; @@ -1196,16 +1198,25 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane, if (!val || plane->id == PLANE_CURSOR) return; - val = plane_state->uapi.dst.y1 << 16 | plane_state->uapi.dst.x1; + clip = &plane_state->psr2_sel_fetch_area; + + val = (clip->y1 + plane_state->uapi.dst.y1) << 16; + val |= plane_state->uapi.dst.x1; intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_POS(pipe, plane->id), val); - val = plane_state->color_plane[color_plane].y << 16; - val |= plane_state->color_plane[color_plane].x; + /* TODO: consider auxiliary surfaces */ + x = plane_state->uapi.src.x1 >> 16; + y = (plane_state->uapi.src.y1 >> 16) + clip->y1; + ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset); + if (ret) + drm_warn_once(&dev_priv->drm, "skl_calc_main_surface_offset() returned %i\n", + ret); + val = y << 16 | x; intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id), val); /* Sizes are 0 based */ - val = ((drm_rect_height(&plane_state->uapi.src) >> 16) - 1) << 16; + val = (drm_rect_height(clip) - 1) << 16; val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1; intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val); } @@ -1237,9 +1248,11 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state, if (clip->y1 == -1) goto exit; + drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4); + val |= PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE; val |= PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1 / 4 + 1); - val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(DIV_ROUND_UP(clip->y2, 4) + 1); + val |= PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 / 4 + 1); exit: crtc_state->psr2_man_track_ctl = val; } @@ -1264,8 +1277,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, struct intel_crtc *crtc) { struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 }; struct intel_plane_state *new_plane_state, *old_plane_state; - struct drm_rect pipe_clip = { .y1 = -1 }; struct intel_plane *plane; bool full_update = false; int i, ret; @@ -1277,13 +1290,25 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, if (ret) return ret; + /* + * Calculate minimal selective fetch area of each plane and calculate + * the pipe damaged area. + * In the next loop the plane selective fetch area will actually be set + * using whole pipe damaged area. + */ for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { - struct drm_rect temp; + struct drm_rect src, damaged_area = { .y1 = -1 }; + struct drm_mode_rect *damaged_clips; + u32 num_clips, j; if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc) continue; + if (!new_plane_state->uapi.visible && + !old_plane_state->uapi.visible) + continue; + /* * TODO: Not clear how to handle planes with negative position, * also planes are not updated if they have a negative X @@ -1295,18 +1320,94 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state, break; } - if (!new_plane_state->uapi.visible) - continue; + num_clips = drm_plane_get_damage_clips_count(&new_plane_state->uapi); /* - * For now doing a selective fetch in the whole plane area, - * optimizations will come in the future. + * If visibility or plane moved, mark the whole plane area as + * damaged as it needs to be complete redraw in the new and old + * position. */ - temp.y1 = new_plane_state->uapi.dst.y1; - temp.y2 = new_plane_state->uapi.dst.y2; - clip_area_update(&pipe_clip, &temp); + if (new_plane_state->uapi.visible != old_plane_state->uapi.visible || + !drm_rect_equals(&new_plane_state->uapi.dst, + &old_plane_state->uapi.dst)) { + if (old_plane_state->uapi.visible) { + damaged_area.y1 = old_plane_state->uapi.dst.y1; + damaged_area.y2 = old_plane_state->uapi.dst.y2; + clip_area_update(&pipe_clip, &damaged_area); + } + + if (new_plane_state->uapi.visible) { + damaged_area.y1 = new_plane_state->uapi.dst.y1; + damaged_area.y2 = new_plane_state->uapi.dst.y2; + clip_area_update(&pipe_clip, &damaged_area); + } + continue; + } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha || + (!num_clips && + new_plane_state->uapi.fb != old_plane_state->uapi.fb)) { + /* + * If the plane don't have damaged areas but the + * framebuffer changed or alpha changed, mark the whole + * plane area as damaged. + */ + damaged_area.y1 = new_plane_state->uapi.dst.y1; + damaged_area.y2 = new_plane_state->uapi.dst.y2; + clip_area_update(&pipe_clip, &damaged_area); + continue; + } + + drm_rect_fp_to_int(&src, &new_plane_state->uapi.src); + damaged_clips = drm_plane_get_damage_clips(&new_plane_state->uapi); + + for (j = 0; j < num_clips; j++) { + struct drm_rect clip; + + clip.x1 = damaged_clips[j].x1; + clip.y1 = damaged_clips[j].y1; + clip.x2 = damaged_clips[j].x2; + clip.y2 = damaged_clips[j].y2; + if (drm_rect_intersect(&clip, &src)) + clip_area_update(&damaged_area, &clip); + } + + if (damaged_area.y1 == -1) + continue; + + damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1; + damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1; + clip_area_update(&pipe_clip, &damaged_area); + } + + if (full_update) + goto skip_sel_fetch_set_loop; + + /* It must be aligned to 4 lines */ + pipe_clip.y1 -= pipe_clip.y1 % 4; + if (pipe_clip.y2 % 4) + pipe_clip.y2 = ((pipe_clip.y2 / 4) + 1) * 4; + + /* + * Now that we have the pipe damaged area check if it intersect with + * every plane, if it does set the plane selective fetch area. + */ + for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state, + new_plane_state, i) { + struct drm_rect *sel_fetch_area, inter; + + if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc || + !new_plane_state->uapi.visible) + continue; + + inter = pipe_clip; + if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst)) + continue; + + sel_fetch_area = &new_plane_state->psr2_sel_fetch_area; + sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1; + sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1; } +skip_sel_fetch_set_loop: psr2_man_trk_ctl_calc(crtc_state, &pipe_clip, full_update); return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index 019a2d6d807a..cf3589fd0ddb 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -49,6 +49,7 @@ #include "intel_psr.h" #include "intel_dsi.h" #include "intel_sprite.h" +#include "i9xx_plane.h" int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, int usecs) @@ -61,14 +62,6 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, 1000 * adjusted_mode->crtc_htotal); } -/* FIXME: We should instead only take spinlocks once for the entire update - * instead of once per mmio. */ -#if IS_ENABLED(CONFIG_PROVE_LOCKING) -#define VBLANK_EVASION_TIME_US 250 -#else -#define VBLANK_EVASION_TIME_US 100 -#endif - /** * intel_pipe_update_start() - start update of a set of display registers * @new_crtc_state: the new crtc state @@ -187,6 +180,36 @@ irq_disable: local_irq_disable(); } +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE) +static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) +{ + u64 delta = ktime_to_ns(ktime_sub(end, crtc->debug.start_vbl_time)); + unsigned int h; + + h = ilog2(delta >> 9); + if (h >= ARRAY_SIZE(crtc->debug.vbl.times)) + h = ARRAY_SIZE(crtc->debug.vbl.times) - 1; + crtc->debug.vbl.times[h]++; + + crtc->debug.vbl.sum += delta; + if (!crtc->debug.vbl.min || delta < crtc->debug.vbl.min) + crtc->debug.vbl.min = delta; + if (delta > crtc->debug.vbl.max) + crtc->debug.vbl.max = delta; + + if (delta > 1000 * VBLANK_EVASION_TIME_US) { + drm_dbg_kms(crtc->base.dev, + "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n", + pipe_name(crtc->pipe), + div_u64(delta, 1000), + VBLANK_EVASION_TIME_US); + crtc->debug.vbl.over++; + } +} +#else +static void dbg_vblank_evade(struct intel_crtc *crtc, ktime_t end) {} +#endif + /** * intel_pipe_update_end() - end update of a set of display registers * @new_crtc_state: the new crtc state @@ -249,15 +272,8 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state) crtc->debug.min_vbl, crtc->debug.max_vbl, crtc->debug.scanline_start, scanline_end); } -#ifdef CONFIG_DRM_I915_DEBUG_VBLANK_EVADE - else if (ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time) > - VBLANK_EVASION_TIME_US) - drm_warn(&dev_priv->drm, - "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n", - pipe_name(pipe), - ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time), - VBLANK_EVASION_TIME_US); -#endif + + dbg_vblank_evade(crtc, end_vbl_time); } int intel_plane_check_stride(const struct intel_plane_state *plane_state) diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h index cd2104ba1ca1..76126dd8d584 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.h +++ b/drivers/gpu/drm/i915/display/intel_sprite.h @@ -17,6 +17,16 @@ struct drm_i915_private; struct intel_crtc_state; struct intel_plane_state; +/* + * FIXME: We should instead only take spinlocks once for the entire update + * instead of once per mmio. + */ +#if IS_ENABLED(CONFIG_PROVE_LOCKING) +#define VBLANK_EVASION_TIME_US 250 +#else +#define VBLANK_EVASION_TIME_US 100 +#endif + int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, int usecs); struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 4346bc1a747a..27dc2dad6809 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -262,7 +262,7 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port) mask |= BIT(TC_PORT_LEGACY); /* The sink can be connected only in a single mode. */ - if (!drm_WARN_ON(&i915->drm, hweight32(mask) > 1)) + if (!drm_WARN_ON_ONCE(&i915->drm, hweight32(mask) > 1)) tc_port_fixup_legacy_flag(dig_port, mask); return mask; diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h index 49b4b5fca941..187ec573de59 100644 --- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h @@ -319,6 +319,8 @@ enum vbt_gmbus_ddi { ICL_DDC_BUS_DDI_A = 0x1, ICL_DDC_BUS_DDI_B, TGL_DDC_BUS_DDI_C, + RKL_DDC_BUS_DDI_D = 0x3, + RKL_DDC_BUS_DDI_E, ICL_DDC_BUS_PORT_1 = 0x4, ICL_DDC_BUS_PORT_2, ICL_DDC_BUS_PORT_3, diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index e2716a67b281..f58cc5700784 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -454,8 +454,6 @@ int intel_dsc_compute_params(struct intel_encoder *encoder, else if (vdsc_cfg->bits_per_component == 12) vdsc_cfg->mux_word_size = DSC_MUX_WORD_SIZE_12_BPC; - /* RC_MODEL_SIZE is a constant across all configurations */ - vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST; /* InitialScaleValue is a 6 bit value with 3 fractional bits (U3.3) */ vdsc_cfg->initial_scale_value = (vdsc_cfg->rc_model_size << 3) / (vdsc_cfg->rc_model_size - vdsc_cfg->initial_offset); @@ -741,7 +739,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state) /* Populate PICTURE_PARAMETER_SET_9 registers */ pps_val = 0; - pps_val |= DSC_RC_MODEL_SIZE(DSC_RC_MODEL_SIZE_CONST) | + pps_val |= DSC_RC_MODEL_SIZE(vdsc_cfg->rc_model_size) | DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST); drm_info(&dev_priv->drm, "PPS9 = 0x%08x\n", pps_val); if (!is_pipe_dsc(crtc_state)) { diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index d52f9c177908..f94025ec603a 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -812,10 +812,20 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state, intel_dsi_prepare(encoder, pipe_config); intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON); - intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay); - /* Deassert reset */ - intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET); + /* + * Give the panel time to power-on and then deassert its reset. + * Depending on the VBT MIPI sequences version the deassert-seq + * may contain the necessary delay, intel_dsi_msleep() will skip + * the delay in that case. If there is no deassert-seq, then an + * unconditional msleep is used to give the panel time to power-on. + */ + if (dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) { + intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay); + intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET); + } else { + msleep(intel_dsi->panel_on_delay); + } if (IS_GEMINILAKE(dev_priv)) { glk_cold_boot = glk_dsi_enable_io(encoder); diff --git a/drivers/gpu/drm/i915/dma_resv_utils.c b/drivers/gpu/drm/i915/dma_resv_utils.c new file mode 100644 index 000000000000..9e508e7d4629 --- /dev/null +++ b/drivers/gpu/drm/i915/dma_resv_utils.c @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020 Intel Corporation + */ + +#include <linux/dma-resv.h> + +#include "dma_resv_utils.h" + +void dma_resv_prune(struct dma_resv *resv) +{ + if (dma_resv_trylock(resv)) { + if (dma_resv_test_signaled_rcu(resv, true)) + dma_resv_add_excl_fence(resv, NULL); + dma_resv_unlock(resv); + } +} diff --git a/drivers/gpu/drm/i915/dma_resv_utils.h b/drivers/gpu/drm/i915/dma_resv_utils.h new file mode 100644 index 000000000000..b9d8fb5f8367 --- /dev/null +++ b/drivers/gpu/drm/i915/dma_resv_utils.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2020 Intel Corporation + */ + +#ifndef DMA_RESV_UTILS_H +#define DMA_RESV_UTILS_H + +struct dma_resv; + +void dma_resv_prune(struct dma_resv *resv); + +#endif /* DMA_RESV_UTILS_H */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 4fd38101bb56..68f58762d5e3 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -72,6 +72,8 @@ #include "gt/intel_context_param.h" #include "gt/intel_engine_heartbeat.h" #include "gt/intel_engine_user.h" +#include "gt/intel_execlists_submission.h" /* virtual_engine */ +#include "gt/intel_gpu_commands.h" #include "gt/intel_ring.h" #include "i915_gem_context.h" @@ -333,13 +335,12 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx) return e; } -static void i915_gem_context_free(struct i915_gem_context *ctx) +void i915_gem_context_release(struct kref *ref) { - GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); + struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref); - spin_lock(&ctx->i915->gem.contexts.lock); - list_del(&ctx->link); - spin_unlock(&ctx->i915->gem.contexts.lock); + trace_i915_context_free(ctx); + GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); mutex_destroy(&ctx->engines_mutex); mutex_destroy(&ctx->lut_mutex); @@ -353,37 +354,6 @@ static void i915_gem_context_free(struct i915_gem_context *ctx) kfree_rcu(ctx, rcu); } -static void contexts_free_all(struct llist_node *list) -{ - struct i915_gem_context *ctx, *cn; - - llist_for_each_entry_safe(ctx, cn, list, free_link) - i915_gem_context_free(ctx); -} - -static void contexts_flush_free(struct i915_gem_contexts *gc) -{ - contexts_free_all(llist_del_all(&gc->free_list)); -} - -static void contexts_free_worker(struct work_struct *work) -{ - struct i915_gem_contexts *gc = - container_of(work, typeof(*gc), free_work); - - contexts_flush_free(gc); -} - -void i915_gem_context_release(struct kref *ref) -{ - struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref); - struct i915_gem_contexts *gc = &ctx->i915->gem.contexts; - - trace_i915_context_free(ctx); - if (llist_add(&ctx->free_link, &gc->free_list)) - schedule_work(&gc->free_work); -} - static inline struct i915_gem_engines * __context_engines_static(const struct i915_gem_context *ctx) { @@ -453,6 +423,9 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce) struct intel_engine_cs *engine = NULL; struct i915_request *rq; + if (intel_context_has_inflight(ce)) + return intel_context_inflight(ce); + if (!ce->timeline) return NULL; @@ -632,6 +605,10 @@ static void context_close(struct i915_gem_context *ctx) */ lut_close(ctx); + spin_lock(&ctx->i915->gem.contexts.lock); + list_del(&ctx->link); + spin_unlock(&ctx->i915->gem.contexts.lock); + mutex_unlock(&ctx->mutex); /* @@ -849,9 +826,6 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags) !HAS_EXECLISTS(i915)) return ERR_PTR(-EINVAL); - /* Reap the stale contexts */ - contexts_flush_free(&i915->gem.contexts); - ctx = __create_context(i915); if (IS_ERR(ctx)) return ctx; @@ -896,23 +870,11 @@ static void init_contexts(struct i915_gem_contexts *gc) { spin_lock_init(&gc->lock); INIT_LIST_HEAD(&gc->list); - - INIT_WORK(&gc->free_work, contexts_free_worker); - init_llist_head(&gc->free_list); } void i915_gem_init__contexts(struct drm_i915_private *i915) { init_contexts(&i915->gem.contexts); - drm_dbg(&i915->drm, "%s context support initialized\n", - DRIVER_CAPS(i915)->has_logical_contexts ? - "logical" : "fake"); -} - -void i915_gem_driver_release__contexts(struct drm_i915_private *i915) -{ - flush_work(&i915->gem.contexts.free_work); - rcu_barrier(); /* and flush the left over RCU frees */ } static int gem_context_register(struct i915_gem_context *ctx, @@ -988,7 +950,6 @@ err: void i915_gem_context_close(struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; - struct drm_i915_private *i915 = file_priv->dev_priv; struct i915_address_space *vm; struct i915_gem_context *ctx; unsigned long idx; @@ -1000,8 +961,6 @@ void i915_gem_context_close(struct drm_file *file) xa_for_each(&file_priv->vm_xa, idx, vm) i915_vm_put(vm); xa_destroy(&file_priv->vm_xa); - - contexts_flush_free(&i915->gem.contexts); } int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h index a133f92bbedb..b5c908f3f4f2 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h @@ -110,7 +110,6 @@ i915_gem_context_clear_user_engines(struct i915_gem_context *ctx) /* i915_gem_context.c */ void i915_gem_init__contexts(struct drm_i915_private *i915); -void i915_gem_driver_release__contexts(struct drm_i915_private *i915); int i915_gem_context_open(struct drm_i915_private *i915, struct drm_file *file); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h index ae14ca24a11f..1449f54924e0 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h @@ -108,7 +108,6 @@ struct i915_gem_context { /** link: place with &drm_i915_private.context_list */ struct list_head link; - struct llist_node free_link; /** * @ref: reference count diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index 0dd477e56573..04e9c04545ad 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -114,8 +114,7 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct * if (ret) return ret; - fput(vma->vm_file); - vma->vm_file = get_file(obj->base.filp); + vma_set_file(vma, obj->base.filp); return 0; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 1904e6e5ea64..b91b32195dcf 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -15,6 +15,7 @@ #include "gem/i915_gem_ioctls.h" #include "gt/intel_context.h" +#include "gt/intel_gpu_commands.h" #include "gt/intel_gt.h" #include "gt/intel_gt_buffer_pool.h" #include "gt/intel_gt_pm.h" @@ -382,7 +383,7 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry, return true; if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) && - (vma->node.start + vma->node.size - 1) >> 32) + (vma->node.start + vma->node.size + 4095) >> 32) return true; if (flags & __EXEC_OBJECT_NEEDS_MAP && @@ -534,8 +535,6 @@ eb_add_vma(struct i915_execbuffer *eb, struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; struct eb_vma *ev = &eb->vma[i]; - GEM_BUG_ON(i915_vma_is_closed(vma)); - ev->vma = vma; ev->exec = entry; ev->flags = entry->flags; @@ -1046,7 +1045,7 @@ static void reloc_gpu_flush(struct i915_execbuffer *eb, struct reloc_cache *cach GEM_BUG_ON(cache->rq_size >= obj->base.size / sizeof(u32)); cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END; - __i915_gem_object_flush_map(obj, 0, sizeof(u32) * (cache->rq_size + 1)); + i915_gem_object_flush_map(obj); i915_gem_object_unpin_map(obj); intel_gt_chipset_flush(cache->rq->engine->gt); @@ -1296,6 +1295,8 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, goto err_pool; } + memset32(cmd, 0, pool->obj->base.size / sizeof(u32)); + batch = i915_vma_instance(pool->obj, vma->vm, NULL); if (IS_ERR(batch)) { err = PTR_ERR(batch); @@ -2533,6 +2534,9 @@ static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch) { int err; + if (intel_context_nopreempt(eb->context)) + __set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags); + err = eb_move_to_gpu(eb); if (err) return err; @@ -2573,15 +2577,12 @@ static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch) return err; } - if (intel_context_nopreempt(eb->context)) - __set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags); - return 0; } static int num_vcs_engines(const struct drm_i915_private *i915) { - return hweight64(VDBOX_MASK(&i915->gt)); + return hweight_long(VDBOX_MASK(&i915->gt)); } /* @@ -3097,7 +3098,7 @@ static void retire_requests(struct intel_timeline *tl, struct i915_request *end) break; } -static void eb_request_add(struct i915_execbuffer *eb) +static int eb_request_add(struct i915_execbuffer *eb, int err) { struct i915_request *rq = eb->request; struct intel_timeline * const tl = i915_request_timeline(rq); @@ -3118,6 +3119,7 @@ static void eb_request_add(struct i915_execbuffer *eb) /* Serialise with context_close via the add_to_timeline */ i915_request_set_error_once(rq, -ENOENT); __i915_request_skip(rq); + err = -ENOENT; /* override any transient errors */ } __i915_request_queue(rq, &attr); @@ -3127,6 +3129,8 @@ static void eb_request_add(struct i915_execbuffer *eb) retire_requests(tl, prev); mutex_unlock(&tl->mutex); + + return err; } static const i915_user_extension_fn execbuf_extensions[] = { @@ -3332,7 +3336,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, err = eb_submit(&eb, batch); err_request: i915_request_get(eb.request); - eb_request_add(&eb); + err = eb_request_add(&eb, err); if (eb.fences) signal_fence_array(&eb); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 3d69e51f3e4d..ec28a6cde49b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -893,8 +893,9 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma) * requires avoiding extraneous references to their filp, hence why * we prefer to use an anonymous file for their mmaps. */ - fput(vma->vm_file); - vma->vm_file = anon; + vma_set_file(vma, anon); + /* Drop the initial creation reference, the vma is now holding one. */ + fput(anon); switch (mmo->mmap_type) { case I915_MMAP_TYPE_WC: diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c index aee7ad3cc3c6..10cac9fac79b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c @@ -6,6 +6,7 @@ #include "i915_drv.h" #include "gt/intel_context.h" #include "gt/intel_engine_pm.h" +#include "gt/intel_gpu_commands.h" #include "gt/intel_gt.h" #include "gt/intel_gt_buffer_pool.h" #include "gt/intel_ring.h" diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index e2c7b2a7895f..3db3c667c486 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -238,7 +238,7 @@ unlock: /* The 'mapping' part of i915_gem_object_pin_map() below */ static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj, - enum i915_map_type type) + enum i915_map_type type) { unsigned long n_pages = obj->base.size >> PAGE_SHIFT, i; struct page *stack[32], **pages = stack, *page; @@ -281,7 +281,7 @@ static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj, /* Too big for stack -- allocate temporary array instead */ pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL); if (!pages) - return NULL; + return ERR_PTR(-ENOMEM); } i = 0; @@ -290,11 +290,12 @@ static void *i915_gem_object_map_page(struct drm_i915_gem_object *obj, vaddr = vmap(pages, n_pages, 0, pgprot); if (pages != stack) kvfree(pages); - return vaddr; + + return vaddr ?: ERR_PTR(-ENOMEM); } static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj, - enum i915_map_type type) + enum i915_map_type type) { resource_size_t iomap = obj->mm.region->iomap.base - obj->mm.region->region.start; @@ -305,13 +306,13 @@ static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj, void *vaddr; if (type != I915_MAP_WC) - return NULL; + return ERR_PTR(-ENODEV); if (n_pfn > ARRAY_SIZE(stack)) { /* Too big for stack -- allocate temporary array instead */ pfns = kvmalloc_array(n_pfn, sizeof(*pfns), GFP_KERNEL); if (!pfns) - return NULL; + return ERR_PTR(-ENOMEM); } i = 0; @@ -320,7 +321,8 @@ static void *i915_gem_object_map_pfn(struct drm_i915_gem_object *obj, vaddr = vmap_pfn(pfns, n_pfn, pgprot_writecombine(PAGE_KERNEL_IO)); if (pfns != stack) kvfree(pfns); - return vaddr; + + return vaddr ?: ERR_PTR(-ENOMEM); } /* get, pin, and map the pages of the object into kernel space */ @@ -349,8 +351,10 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj)); err = ____i915_gem_object_get_pages(obj); - if (err) - goto err_unlock; + if (err) { + ptr = ERR_PTR(err); + goto out_unlock; + } smp_mb__before_atomic(); } @@ -362,7 +366,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, ptr = page_unpack_bits(obj->mm.mapping, &has_type); if (ptr && has_type != type) { if (pinned) { - err = -EBUSY; + ptr = ERR_PTR(-EBUSY); goto err_unpin; } @@ -374,15 +378,13 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, if (!ptr) { if (GEM_WARN_ON(type == I915_MAP_WC && !static_cpu_has(X86_FEATURE_PAT))) - ptr = NULL; + ptr = ERR_PTR(-ENODEV); else if (i915_gem_object_has_struct_page(obj)) ptr = i915_gem_object_map_page(obj, type); else ptr = i915_gem_object_map_pfn(obj, type); - if (!ptr) { - err = -ENOMEM; + if (IS_ERR(ptr)) goto err_unpin; - } obj->mm.mapping = page_pack_bits(ptr, type); } @@ -393,8 +395,6 @@ out_unlock: err_unpin: atomic_dec(&obj->mm.pages_pin_count); -err_unlock: - ptr = ERR_PTR(err); goto out_unlock; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c index 1515384d7e0e..835bd01f2e5d 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_region.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c @@ -22,6 +22,7 @@ i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj, int i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj) { + const u64 max_segment = i915_sg_segment_size(); struct intel_memory_region *mem = obj->mm.region; struct list_head *blocks = &obj->mm.blocks; resource_size_t size = obj->base.size; @@ -37,7 +38,7 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj) if (!st) return -ENOMEM; - if (sg_alloc_table(st, size >> ilog2(mem->mm.chunk_size), GFP_KERNEL)) { + if (sg_alloc_table(st, size >> PAGE_SHIFT, GFP_KERNEL)) { kfree(st); return -ENOMEM; } @@ -64,27 +65,30 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj) i915_buddy_block_size(&mem->mm, block)); offset = i915_buddy_block_offset(block); - GEM_BUG_ON(overflows_type(block_size, sg->length)); + while (block_size) { + u64 len; - if (offset != prev_end || - add_overflows_t(typeof(sg->length), sg->length, block_size)) { - if (st->nents) { - sg_page_sizes |= sg->length; - sg = __sg_next(sg); + if (offset != prev_end || sg->length >= max_segment) { + if (st->nents) { + sg_page_sizes |= sg->length; + sg = __sg_next(sg); + } + + sg_dma_address(sg) = mem->region.start + offset; + sg_dma_len(sg) = 0; + sg->length = 0; + st->nents++; } - sg_dma_address(sg) = mem->region.start + offset; - sg_dma_len(sg) = block_size; + len = min(block_size, max_segment - sg->length); + sg->length += len; + sg_dma_len(sg) += len; - sg->length = block_size; + offset += len; + block_size -= len; - st->nents++; - } else { - sg->length += block_size; - sg_dma_len(sg) += block_size; + prev_end = offset; } - - prev_end = offset + block_size; } sg_page_sizes |= sg->length; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index dc8f052a0ffe..c2dba1cd9532 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -15,6 +15,7 @@ #include "gt/intel_gt_requests.h" +#include "dma_resv_utils.h" #include "i915_trace.h" static bool swap_available(void) @@ -209,6 +210,8 @@ i915_gem_shrink(struct drm_i915_private *i915, mutex_unlock(&obj->mm.lock); } + dma_resv_prune(obj->base.resv); + scanned += obj->base.size >> PAGE_SHIFT; i915_gem_object_put(obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index 29bffc6afcc1..41b9fbf4dbcc 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -608,11 +608,10 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen); GEM_BUG_ON(!stolen); - - i915_gem_object_release_memory_region(obj); - i915_gem_stolen_remove_node(i915, stolen); kfree(stolen); + + i915_gem_object_release_memory_region(obj); } static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c index 8af55cd3e690..c1b13ac50d0f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c @@ -9,6 +9,7 @@ #include "gt/intel_engine.h" +#include "dma_resv_utils.h" #include "i915_gem_ioctls.h" #include "i915_gem_object.h" @@ -84,11 +85,8 @@ i915_gem_object_wait_reservation(struct dma_resv *resv, * Opportunistically prune the fences iff we know they have *all* been * signaled. */ - if (prune_fences && dma_resv_trylock(resv)) { - if (dma_resv_test_signaled_rcu(resv, true)) - dma_resv_add_excl_fence(resv, NULL); - dma_resv_unlock(resv); - } + if (prune_fences) + dma_resv_prune(resv); return timeout; } diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c index a768ec61e966..2fb501a78a85 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c @@ -27,7 +27,7 @@ static void huge_free_pages(struct drm_i915_gem_object *obj, static int huge_get_pages(struct drm_i915_gem_object *obj) { -#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY) +#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL) const unsigned long nreal = obj->scratch / PAGE_SIZE; const unsigned long npages = obj->base.size / PAGE_SIZE; struct scatterlist *sg, *src, *end; diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 1f35e71429b4..aacf4856ccb4 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -368,6 +368,27 @@ static int igt_check_page_sizes(struct i915_vma *vma) err = -EINVAL; } + /* + * The dma-api is like a box of chocolates when it comes to the + * alignment of dma addresses, however for LMEM we have total control + * and so can guarantee alignment, likewise when we allocate our blocks + * they should appear in descending order, and if we know that we align + * to the largest page size for the GTT address, we should be able to + * assert that if we see 2M physical pages then we should also get 2M + * GTT pages. If we don't then something might be wrong in our + * construction of the backing pages. + * + * Maintaining alignment is required to utilise huge pages in the ppGGT. + */ + if (i915_gem_object_is_lmem(obj) && + IS_ALIGNED(vma->node.start, SZ_2M) && + vma->page_sizes.sg & SZ_2M && + vma->page_sizes.gtt < SZ_2M) { + pr_err("gtt pages mismatch for LMEM, expected 2M GTT pages, sg(%u), gtt(%u)\n", + vma->page_sizes.sg, vma->page_sizes.gtt); + err = -EINVAL; + } + if (obj->mm.page_sizes.gtt) { pr_err("obj->page_sizes.gtt(%u) should never be set\n", obj->mm.page_sizes.gtt); @@ -1333,6 +1354,7 @@ static int igt_ppgtt_sanity_check(void *arg) unsigned int flags; } backends[] = { { igt_create_system, 0, }, + { igt_create_local, 0, }, { igt_create_local, I915_BO_ALLOC_CONTIGUOUS, }, }; struct { diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c index 4e36d4897ea6..6a674a7994df 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c @@ -20,13 +20,11 @@ static int __igt_client_fill(struct intel_engine_cs *engine) { struct intel_context *ce = engine->kernel_context; struct drm_i915_gem_object *obj; - struct rnd_state prng; + I915_RND_STATE(prng); IGT_TIMEOUT(end); u32 *vaddr; int err = 0; - prandom_seed_state(&prng, i915_selftest.random_seed); - intel_engine_pm_get(engine); do { const u32 max_block_size = S16_MAX * PAGE_SIZE; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c index 7049a6bbc03d..1117d2a44518 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c @@ -7,6 +7,7 @@ #include <linux/prime_numbers.h> #include "gt/intel_engine_pm.h" +#include "gt/intel_gpu_commands.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "gt/intel_ring.h" diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index d27d87a678c8..d429c7643ff2 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -7,6 +7,7 @@ #include <linux/prime_numbers.h> #include "gt/intel_engine_pm.h" +#include "gt/intel_gpu_commands.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "gem/i915_gem_region.h" diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c index e21b5023ca7d..d6783061bc72 100644 --- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c +++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c @@ -9,6 +9,7 @@ #include "gem/i915_gem_context.h" #include "gem/i915_gem_pm.h" #include "gt/intel_context.h" +#include "gt/intel_gpu_commands.h" #include "gt/intel_gt.h" #include "i915_vma.h" #include "i915_drv.h" diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c index 174a24553322..d4f4452ce5ed 100644 --- a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c @@ -11,6 +11,7 @@ #include "i915_drv.h" #include "intel_gt.h" #include "intel_gt_clock_utils.h" +#include "intel_gt_pm.h" #include "intel_llc.h" #include "intel_rc6.h" #include "intel_rps.h" @@ -403,34 +404,34 @@ static int frequency_show(struct seq_file *m, void *unused) seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit); seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); seq_printf(m, "CAGF: %dMHz\n", cagf); - seq_printf(m, "RP CUR UP EI: %d (%dns)\n", + seq_printf(m, "RP CUR UP EI: %d (%lldns)\n", rpcurupei, intel_gt_pm_interval_to_ns(gt, rpcurupei)); - seq_printf(m, "RP CUR UP: %d (%dns)\n", + seq_printf(m, "RP CUR UP: %d (%lldns)\n", rpcurup, intel_gt_pm_interval_to_ns(gt, rpcurup)); - seq_printf(m, "RP PREV UP: %d (%dns)\n", + seq_printf(m, "RP PREV UP: %d (%lldns)\n", rpprevup, intel_gt_pm_interval_to_ns(gt, rpprevup)); seq_printf(m, "Up threshold: %d%%\n", rps->power.up_threshold); - seq_printf(m, "RP UP EI: %d (%dns)\n", + seq_printf(m, "RP UP EI: %d (%lldns)\n", rpupei, intel_gt_pm_interval_to_ns(gt, rpupei)); - seq_printf(m, "RP UP THRESHOLD: %d (%dns)\n", + seq_printf(m, "RP UP THRESHOLD: %d (%lldns)\n", rpupt, intel_gt_pm_interval_to_ns(gt, rpupt)); - seq_printf(m, "RP CUR DOWN EI: %d (%dns)\n", + seq_printf(m, "RP CUR DOWN EI: %d (%lldns)\n", rpcurdownei, intel_gt_pm_interval_to_ns(gt, rpcurdownei)); - seq_printf(m, "RP CUR DOWN: %d (%dns)\n", + seq_printf(m, "RP CUR DOWN: %d (%lldns)\n", rpcurdown, intel_gt_pm_interval_to_ns(gt, rpcurdown)); - seq_printf(m, "RP PREV DOWN: %d (%dns)\n", + seq_printf(m, "RP PREV DOWN: %d (%lldns)\n", rpprevdown, intel_gt_pm_interval_to_ns(gt, rpprevdown)); seq_printf(m, "Down threshold: %d%%\n", rps->power.down_threshold); - seq_printf(m, "RP DOWN EI: %d (%dns)\n", + seq_printf(m, "RP DOWN EI: %d (%lldns)\n", rpdownei, intel_gt_pm_interval_to_ns(gt, rpdownei)); - seq_printf(m, "RP DOWN THRESHOLD: %d (%dns)\n", + seq_printf(m, "RP DOWN THRESHOLD: %d (%lldns)\n", rpdownt, intel_gt_pm_interval_to_ns(gt, rpdownt)); max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 0 : @@ -558,7 +559,9 @@ static int rps_boost_show(struct seq_file *m, void *data) seq_printf(m, "RPS enabled? %s\n", yesno(intel_rps_is_enabled(rps))); seq_printf(m, "RPS active? %s\n", yesno(intel_rps_is_active(rps))); - seq_printf(m, "GPU busy? %s\n", yesno(gt->awake)); + seq_printf(m, "GPU busy? %s, %llums\n", + yesno(gt->awake), + ktime_to_ms(intel_gt_get_awake_time(gt))); seq_printf(m, "Boosts outstanding? %d\n", atomic_read(&rps->num_waiters)); seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive)); @@ -575,7 +578,7 @@ static int rps_boost_show(struct seq_file *m, void *data) intel_gpu_freq(rps, rps->efficient_freq), intel_gpu_freq(rps, rps->boost_freq)); - seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts)); + seq_printf(m, "Wait boosts: %d\n", READ_ONCE(rps->boosts)); if (INTEL_GEN(i915) >= 6 && intel_rps_is_active(rps)) { struct intel_uncore *uncore = gt->uncore; diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c index d93d85cd3027..94465374ca2f 100644 --- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c +++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c @@ -7,8 +7,6 @@ #include "i915_drv.h" #include "intel_gpu_commands.h" -#define MAX_URB_ENTRIES 64 -#define STATE_SIZE (4 * 1024) #define GT3_INLINE_DATA_DELAYS 0x1E00 #define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS)) @@ -34,38 +32,59 @@ struct batch_chunk { }; struct batch_vals { - u32 max_primitives; - u32 max_urb_entries; - u32 cmd_size; - u32 state_size; + u32 max_threads; u32 state_start; - u32 batch_size; + u32 surface_start; u32 surface_height; u32 surface_width; - u32 scratch_size; - u32 max_size; + u32 size; }; +static inline int num_primitives(const struct batch_vals *bv) +{ + /* + * We need to saturate the GPU with work in order to dispatch + * a shader on every HW thread, and clear the thread-local registers. + * In short, we have to dispatch work faster than the shaders can + * run in order to fill the EU and occupy each HW thread. + */ + return bv->max_threads; +} + static void batch_get_defaults(struct drm_i915_private *i915, struct batch_vals *bv) { if (IS_HASWELL(i915)) { - bv->max_primitives = 280; - bv->max_urb_entries = MAX_URB_ENTRIES; + switch (INTEL_INFO(i915)->gt) { + default: + case 1: + bv->max_threads = 70; + break; + case 2: + bv->max_threads = 140; + break; + case 3: + bv->max_threads = 280; + break; + } bv->surface_height = 16 * 16; bv->surface_width = 32 * 2 * 16; } else { - bv->max_primitives = 128; - bv->max_urb_entries = MAX_URB_ENTRIES / 2; + switch (INTEL_INFO(i915)->gt) { + default: + case 1: /* including vlv */ + bv->max_threads = 36; + break; + case 2: + bv->max_threads = 128; + break; + } bv->surface_height = 16 * 8; bv->surface_width = 32 * 16; } - bv->cmd_size = bv->max_primitives * 4096; - bv->state_size = STATE_SIZE; - bv->state_start = bv->cmd_size; - bv->batch_size = bv->cmd_size + bv->state_size; - bv->scratch_size = bv->surface_height * bv->surface_width; - bv->max_size = bv->batch_size + bv->scratch_size; + bv->state_start = round_up(SZ_1K + num_primitives(bv) * 64, SZ_4K); + bv->surface_start = bv->state_start + SZ_4K; + bv->size = bv->surface_start + bv->surface_height * bv->surface_width; } static void batch_init(struct batch_chunk *bc, @@ -155,7 +174,8 @@ static u32 gen7_fill_binding_table(struct batch_chunk *state, const struct batch_vals *bv) { - u32 surface_start = gen7_fill_surface_state(state, bv->batch_size, bv); + u32 surface_start = + gen7_fill_surface_state(state, bv->surface_start, bv); u32 *cs = batch_alloc_items(state, 32, 8); u32 offset = batch_offset(state, cs); @@ -214,9 +234,9 @@ static void gen7_emit_state_base_address(struct batch_chunk *batch, u32 surface_state_base) { - u32 *cs = batch_alloc_items(batch, 0, 12); + u32 *cs = batch_alloc_items(batch, 0, 10); - *cs++ = STATE_BASE_ADDRESS | (12 - 2); + *cs++ = STATE_BASE_ADDRESS | (10 - 2); /* general */ *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY; /* surface */ @@ -233,8 +253,6 @@ gen7_emit_state_base_address(struct batch_chunk *batch, *cs++ = BASE_ADDRESS_MODIFY; *cs++ = 0; *cs++ = BASE_ADDRESS_MODIFY; - *cs++ = 0; - *cs++ = 0; batch_advance(batch, cs); } @@ -244,8 +262,7 @@ gen7_emit_vfe_state(struct batch_chunk *batch, u32 urb_size, u32 curbe_size, u32 mode) { - u32 urb_entries = bv->max_urb_entries; - u32 threads = bv->max_primitives - 1; + u32 threads = bv->max_threads - 1; u32 *cs = batch_alloc_items(batch, 32, 8); *cs++ = MEDIA_VFE_STATE | (8 - 2); @@ -254,7 +271,7 @@ gen7_emit_vfe_state(struct batch_chunk *batch, *cs++ = 0; /* number of threads & urb entries for GPGPU vs Media Mode */ - *cs++ = threads << 16 | urb_entries << 8 | mode << 2; + *cs++ = threads << 16 | 1 << 8 | mode << 2; *cs++ = 0; @@ -293,17 +310,12 @@ gen7_emit_media_object(struct batch_chunk *batch, { unsigned int x_offset = (media_object_index % 16) * 64; unsigned int y_offset = (media_object_index / 16) * 16; - unsigned int inline_data_size; - unsigned int media_batch_size; - unsigned int i; + unsigned int pkt = 6 + 3; u32 *cs; - inline_data_size = 112 * 8; - media_batch_size = inline_data_size + 6; - - cs = batch_alloc_items(batch, 8, media_batch_size); + cs = batch_alloc_items(batch, 8, pkt); - *cs++ = MEDIA_OBJECT | (media_batch_size - 2); + *cs++ = MEDIA_OBJECT | (pkt - 2); /* interface descriptor offset */ *cs++ = 0; @@ -317,25 +329,44 @@ gen7_emit_media_object(struct batch_chunk *batch, *cs++ = 0; /* inline */ - *cs++ = (y_offset << 16) | (x_offset); + *cs++ = y_offset << 16 | x_offset; *cs++ = 0; *cs++ = GT3_INLINE_DATA_DELAYS; - for (i = 3; i < inline_data_size; i++) - *cs++ = 0; batch_advance(batch, cs); } static void gen7_emit_pipeline_flush(struct batch_chunk *batch) { - u32 *cs = batch_alloc_items(batch, 0, 5); + u32 *cs = batch_alloc_items(batch, 0, 4); - *cs++ = GFX_OP_PIPE_CONTROL(5); - *cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE | - PIPE_CONTROL_GLOBAL_GTT_IVB; + *cs++ = GFX_OP_PIPE_CONTROL(4); + *cs++ = PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | + PIPE_CONTROL_DEPTH_CACHE_FLUSH | + PIPE_CONTROL_DC_FLUSH_ENABLE | + PIPE_CONTROL_CS_STALL; *cs++ = 0; *cs++ = 0; + + batch_advance(batch, cs); +} + +static void gen7_emit_pipeline_invalidate(struct batch_chunk *batch) +{ + u32 *cs = batch_alloc_items(batch, 0, 8); + + /* ivb: Stall before STATE_CACHE_INVALIDATE */ + *cs++ = GFX_OP_PIPE_CONTROL(4); + *cs++ = PIPE_CONTROL_STALL_AT_SCOREBOARD | + PIPE_CONTROL_CS_STALL; + *cs++ = 0; + *cs++ = 0; + + *cs++ = GFX_OP_PIPE_CONTROL(4); + *cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE; *cs++ = 0; + *cs++ = 0; + batch_advance(batch, cs); } @@ -344,34 +375,34 @@ static void emit_batch(struct i915_vma * const vma, const struct batch_vals *bv) { struct drm_i915_private *i915 = vma->vm->i915; - unsigned int desc_count = 64; - const u32 urb_size = 112; + const unsigned int desc_count = 1; + const unsigned int urb_size = 1; struct batch_chunk cmds, state; - u32 interface_descriptor; + u32 descriptors; unsigned int i; - batch_init(&cmds, vma, start, 0, bv->cmd_size); - batch_init(&state, vma, start, bv->state_start, bv->state_size); + batch_init(&cmds, vma, start, 0, bv->state_start); + batch_init(&state, vma, start, bv->state_start, SZ_4K); - interface_descriptor = - gen7_fill_interface_descriptor(&state, bv, - IS_HASWELL(i915) ? - &cb_kernel_hsw : - &cb_kernel_ivb, - desc_count); - gen7_emit_pipeline_flush(&cmds); + descriptors = gen7_fill_interface_descriptor(&state, bv, + IS_HASWELL(i915) ? + &cb_kernel_hsw : + &cb_kernel_ivb, + desc_count); + + gen7_emit_pipeline_invalidate(&cmds); batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA); batch_add(&cmds, MI_NOOP); - gen7_emit_state_base_address(&cmds, interface_descriptor); + gen7_emit_pipeline_invalidate(&cmds); + gen7_emit_pipeline_flush(&cmds); + gen7_emit_state_base_address(&cmds, descriptors); + gen7_emit_pipeline_invalidate(&cmds); gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0); + gen7_emit_interface_descriptor_load(&cmds, descriptors, desc_count); - gen7_emit_interface_descriptor_load(&cmds, - interface_descriptor, - desc_count); - - for (i = 0; i < bv->max_primitives; i++) + for (i = 0; i < num_primitives(bv); i++) gen7_emit_media_object(&cmds, i); batch_add(&cmds, MI_BATCH_BUFFER_END); @@ -385,15 +416,15 @@ int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine, batch_get_defaults(engine->i915, &bv); if (!vma) - return bv.max_size; + return bv.size; - GEM_BUG_ON(vma->obj->base.size < bv.max_size); + GEM_BUG_ON(vma->obj->base.size < bv.size); batch = i915_gem_object_pin_map(vma->obj, I915_MAP_WC); if (IS_ERR(batch)) return PTR_ERR(batch); - emit_batch(vma, memset(batch, 0, bv.max_size), &bv); + emit_batch(vma, memset(batch, 0, bv.size), &bv); i915_gem_object_flush_map(vma->obj); __i915_gem_object_release_map(vma->obj); diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.c b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c new file mode 100644 index 000000000000..8066b93e6dc4 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.c @@ -0,0 +1,633 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2014 Intel Corporation + */ + +#include "gen8_engine_cs.h" +#include "i915_drv.h" +#include "intel_lrc.h" +#include "intel_gpu_commands.h" +#include "intel_ring.h" + +int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode) +{ + bool vf_flush_wa = false, dc_flush_wa = false; + u32 *cs, flags = 0; + int len; + + flags |= PIPE_CONTROL_CS_STALL; + + if (mode & EMIT_FLUSH) { + flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; + flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; + flags |= PIPE_CONTROL_FLUSH_ENABLE; + } + + if (mode & EMIT_INVALIDATE) { + flags |= PIPE_CONTROL_TLB_INVALIDATE; + flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_QW_WRITE; + flags |= PIPE_CONTROL_STORE_DATA_INDEX; + + /* + * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL + * pipe control. + */ + if (IS_GEN(rq->engine->i915, 9)) + vf_flush_wa = true; + + /* WaForGAMHang:kbl */ + if (IS_KBL_GT_REVID(rq->engine->i915, 0, KBL_REVID_B0)) + dc_flush_wa = true; + } + + len = 6; + + if (vf_flush_wa) + len += 6; + + if (dc_flush_wa) + len += 12; + + cs = intel_ring_begin(rq, len); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + if (vf_flush_wa) + cs = gen8_emit_pipe_control(cs, 0, 0); + + if (dc_flush_wa) + cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE, + 0); + + cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); + + if (dc_flush_wa) + cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0); + + intel_ring_advance(rq, cs); + + return 0; +} + +int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode) +{ + u32 cmd, *cs; + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + cmd = MI_FLUSH_DW + 1; + + /* + * We always require a command barrier so that subsequent + * commands, such as breadcrumb interrupts, are strictly ordered + * wrt the contents of the write cache being flushed to memory + * (and thus being coherent from the CPU). + */ + cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; + + if (mode & EMIT_INVALIDATE) { + cmd |= MI_INVALIDATE_TLB; + if (rq->engine->class == VIDEO_DECODE_CLASS) + cmd |= MI_INVALIDATE_BSD; + } + + *cs++ = cmd; + *cs++ = LRC_PPHWSP_SCRATCH_ADDR; + *cs++ = 0; /* upper addr */ + *cs++ = 0; /* value */ + intel_ring_advance(rq, cs); + + return 0; +} + +int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode) +{ + if (mode & EMIT_FLUSH) { + u32 *cs; + u32 flags = 0; + + flags |= PIPE_CONTROL_CS_STALL; + + flags |= PIPE_CONTROL_TILE_CACHE_FLUSH; + flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; + flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; + flags |= PIPE_CONTROL_FLUSH_ENABLE; + flags |= PIPE_CONTROL_QW_WRITE; + flags |= PIPE_CONTROL_STORE_DATA_INDEX; + + cs = intel_ring_begin(rq, 6); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); + intel_ring_advance(rq, cs); + } + + if (mode & EMIT_INVALIDATE) { + u32 *cs; + u32 flags = 0; + + flags |= PIPE_CONTROL_CS_STALL; + + flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_TLB_INVALIDATE; + flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_QW_WRITE; + flags |= PIPE_CONTROL_STORE_DATA_INDEX; + + cs = intel_ring_begin(rq, 6); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); + intel_ring_advance(rq, cs); + } + + return 0; +} + +static u32 preparser_disable(bool state) +{ + return MI_ARB_CHECK | 1 << 8 | state; +} + +static i915_reg_t aux_inv_reg(const struct intel_engine_cs *engine) +{ + static const i915_reg_t vd[] = { + GEN12_VD0_AUX_NV, + GEN12_VD1_AUX_NV, + GEN12_VD2_AUX_NV, + GEN12_VD3_AUX_NV, + }; + + static const i915_reg_t ve[] = { + GEN12_VE0_AUX_NV, + GEN12_VE1_AUX_NV, + }; + + if (engine->class == VIDEO_DECODE_CLASS) + return vd[engine->instance]; + + if (engine->class == VIDEO_ENHANCEMENT_CLASS) + return ve[engine->instance]; + + GEM_BUG_ON("unknown aux_inv reg\n"); + return INVALID_MMIO_REG; +} + +static u32 *gen12_emit_aux_table_inv(const i915_reg_t inv_reg, u32 *cs) +{ + *cs++ = MI_LOAD_REGISTER_IMM(1); + *cs++ = i915_mmio_reg_offset(inv_reg); + *cs++ = AUX_INV; + *cs++ = MI_NOOP; + + return cs; +} + +int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode) +{ + if (mode & EMIT_FLUSH) { + u32 flags = 0; + u32 *cs; + + flags |= PIPE_CONTROL_TILE_CACHE_FLUSH; + flags |= PIPE_CONTROL_FLUSH_L3; + flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; + flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + /* Wa_1409600907:tgl */ + flags |= PIPE_CONTROL_DEPTH_STALL; + flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; + flags |= PIPE_CONTROL_FLUSH_ENABLE; + + flags |= PIPE_CONTROL_STORE_DATA_INDEX; + flags |= PIPE_CONTROL_QW_WRITE; + + flags |= PIPE_CONTROL_CS_STALL; + + cs = intel_ring_begin(rq, 6); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + cs = gen12_emit_pipe_control(cs, + PIPE_CONTROL0_HDC_PIPELINE_FLUSH, + flags, LRC_PPHWSP_SCRATCH_ADDR); + intel_ring_advance(rq, cs); + } + + if (mode & EMIT_INVALIDATE) { + u32 flags = 0; + u32 *cs; + + flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_TLB_INVALIDATE; + flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; + + flags |= PIPE_CONTROL_STORE_DATA_INDEX; + flags |= PIPE_CONTROL_QW_WRITE; + + flags |= PIPE_CONTROL_CS_STALL; + + cs = intel_ring_begin(rq, 8 + 4); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + /* + * Prevent the pre-parser from skipping past the TLB + * invalidate and loading a stale page for the batch + * buffer / request payload. + */ + *cs++ = preparser_disable(true); + + cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); + + /* hsdes: 1809175790 */ + cs = gen12_emit_aux_table_inv(GEN12_GFX_CCS_AUX_NV, cs); + + *cs++ = preparser_disable(false); + intel_ring_advance(rq, cs); + } + + return 0; +} + +int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode) +{ + intel_engine_mask_t aux_inv = 0; + u32 cmd, *cs; + + cmd = 4; + if (mode & EMIT_INVALIDATE) + cmd += 2; + if (mode & EMIT_INVALIDATE) + aux_inv = rq->engine->mask & ~BIT(BCS0); + if (aux_inv) + cmd += 2 * hweight8(aux_inv) + 2; + + cs = intel_ring_begin(rq, cmd); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + if (mode & EMIT_INVALIDATE) + *cs++ = preparser_disable(true); + + cmd = MI_FLUSH_DW + 1; + + /* + * We always require a command barrier so that subsequent + * commands, such as breadcrumb interrupts, are strictly ordered + * wrt the contents of the write cache being flushed to memory + * (and thus being coherent from the CPU). + */ + cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; + + if (mode & EMIT_INVALIDATE) { + cmd |= MI_INVALIDATE_TLB; + if (rq->engine->class == VIDEO_DECODE_CLASS) + cmd |= MI_INVALIDATE_BSD; + } + + *cs++ = cmd; + *cs++ = LRC_PPHWSP_SCRATCH_ADDR; + *cs++ = 0; /* upper addr */ + *cs++ = 0; /* value */ + + if (aux_inv) { /* hsdes: 1809175790 */ + struct intel_engine_cs *engine; + unsigned int tmp; + + *cs++ = MI_LOAD_REGISTER_IMM(hweight8(aux_inv)); + for_each_engine_masked(engine, rq->engine->gt, + aux_inv, tmp) { + *cs++ = i915_mmio_reg_offset(aux_inv_reg(engine)); + *cs++ = AUX_INV; + } + *cs++ = MI_NOOP; + } + + if (mode & EMIT_INVALIDATE) + *cs++ = preparser_disable(false); + + intel_ring_advance(rq, cs); + + return 0; +} + +static inline u32 preempt_address(struct intel_engine_cs *engine) +{ + return (i915_ggtt_offset(engine->status_page.vma) + + I915_GEM_HWS_PREEMPT_ADDR); +} + +static u32 hwsp_offset(const struct i915_request *rq) +{ + const struct intel_timeline_cacheline *cl; + + /* Before the request is executed, the timeline/cachline is fixed */ + + cl = rcu_dereference_protected(rq->hwsp_cacheline, 1); + if (cl) + return cl->ggtt_offset; + + return rcu_dereference_protected(rq->timeline, 1)->hwsp_offset; +} + +int gen8_emit_init_breadcrumb(struct i915_request *rq) +{ + u32 *cs; + + GEM_BUG_ON(i915_request_has_initial_breadcrumb(rq)); + if (!i915_request_timeline(rq)->has_initial_breadcrumb) + return 0; + + cs = intel_ring_begin(rq, 6); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = hwsp_offset(rq); + *cs++ = 0; + *cs++ = rq->fence.seqno - 1; + + /* + * Check if we have been preempted before we even get started. + * + * After this point i915_request_started() reports true, even if + * we get preempted and so are no longer running. + * + * i915_request_started() is used during preemption processing + * to decide if the request is currently inside the user payload + * or spinning on a kernel semaphore (or earlier). For no-preemption + * requests, we do allow preemption on the semaphore before the user + * payload, but do not allow preemption once the request is started. + * + * i915_request_started() is similarly used during GPU hangs to + * determine if the user's payload was guilty, and if so, the + * request is banned. Before the request is started, it is assumed + * to be unharmed and an innocent victim of another's hang. + */ + *cs++ = MI_NOOP; + *cs++ = MI_ARB_CHECK; + + intel_ring_advance(rq, cs); + + /* Record the updated position of the request's payload */ + rq->infix = intel_ring_offset(rq, cs); + + __set_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags); + + return 0; +} + +int gen8_emit_bb_start_noarb(struct i915_request *rq, + u64 offset, u32 len, + const unsigned int flags) +{ + u32 *cs; + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + /* + * WaDisableCtxRestoreArbitration:bdw,chv + * + * We don't need to perform MI_ARB_ENABLE as often as we do (in + * particular all the gen that do not need the w/a at all!), if we + * took care to make sure that on every switch into this context + * (both ordinary and for preemption) that arbitrartion was enabled + * we would be fine. However, for gen8 there is another w/a that + * requires us to not preempt inside GPGPU execution, so we keep + * arbitration disabled for gen8 batches. Arbitration will be + * re-enabled before we close the request + * (engine->emit_fini_breadcrumb). + */ + *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; + + /* FIXME(BDW+): Address space and security selectors. */ + *cs++ = MI_BATCH_BUFFER_START_GEN8 | + (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)); + *cs++ = lower_32_bits(offset); + *cs++ = upper_32_bits(offset); + + intel_ring_advance(rq, cs); + + return 0; +} + +int gen8_emit_bb_start(struct i915_request *rq, + u64 offset, u32 len, + const unsigned int flags) +{ + u32 *cs; + + if (unlikely(i915_request_has_nopreempt(rq))) + return gen8_emit_bb_start_noarb(rq, offset, len, flags); + + cs = intel_ring_begin(rq, 6); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + + *cs++ = MI_BATCH_BUFFER_START_GEN8 | + (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)); + *cs++ = lower_32_bits(offset); + *cs++ = upper_32_bits(offset); + + *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; + *cs++ = MI_NOOP; + + intel_ring_advance(rq, cs); + + return 0; +} + +static void assert_request_valid(struct i915_request *rq) +{ + struct intel_ring *ring __maybe_unused = rq->ring; + + /* Can we unwind this request without appearing to go forwards? */ + GEM_BUG_ON(intel_ring_direction(ring, rq->wa_tail, rq->head) <= 0); +} + +/* + * Reserve space for 2 NOOPs at the end of each request to be + * used as a workaround for not being allowed to do lite + * restore with HEAD==TAIL (WaIdleLiteRestore). + */ +static u32 *gen8_emit_wa_tail(struct i915_request *rq, u32 *cs) +{ + /* Ensure there's always at least one preemption point per-request. */ + *cs++ = MI_ARB_CHECK; + *cs++ = MI_NOOP; + rq->wa_tail = intel_ring_offset(rq, cs); + + /* Check that entire request is less than half the ring */ + assert_request_valid(rq); + + return cs; +} + +static u32 *emit_preempt_busywait(struct i915_request *rq, u32 *cs) +{ + *cs++ = MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_EQ_SDD; + *cs++ = 0; + *cs++ = preempt_address(rq->engine); + *cs++ = 0; + + return cs; +} + +static __always_inline u32* +gen8_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs) +{ + *cs++ = MI_USER_INTERRUPT; + + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + if (intel_engine_has_semaphores(rq->engine)) + cs = emit_preempt_busywait(rq, cs); + + rq->tail = intel_ring_offset(rq, cs); + assert_ring_tail_valid(rq->ring, rq->tail); + + return gen8_emit_wa_tail(rq, cs); +} + +static u32 *emit_xcs_breadcrumb(struct i915_request *rq, u32 *cs) +{ + return gen8_emit_ggtt_write(cs, rq->fence.seqno, hwsp_offset(rq), 0); +} + +u32 *gen8_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs) +{ + return gen8_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs)); +} + +u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs) +{ + cs = gen8_emit_pipe_control(cs, + PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | + PIPE_CONTROL_DEPTH_CACHE_FLUSH | + PIPE_CONTROL_DC_FLUSH_ENABLE, + 0); + + /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */ + cs = gen8_emit_ggtt_write_rcs(cs, + rq->fence.seqno, + hwsp_offset(rq), + PIPE_CONTROL_FLUSH_ENABLE | + PIPE_CONTROL_CS_STALL); + + return gen8_emit_fini_breadcrumb_tail(rq, cs); +} + +u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs) +{ + cs = gen8_emit_ggtt_write_rcs(cs, + rq->fence.seqno, + hwsp_offset(rq), + PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_TILE_CACHE_FLUSH | + PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | + PIPE_CONTROL_DEPTH_CACHE_FLUSH | + PIPE_CONTROL_DC_FLUSH_ENABLE | + PIPE_CONTROL_FLUSH_ENABLE); + + return gen8_emit_fini_breadcrumb_tail(rq, cs); +} + +/* + * Note that the CS instruction pre-parser will not stall on the breadcrumb + * flush and will continue pre-fetching the instructions after it before the + * memory sync is completed. On pre-gen12 HW, the pre-parser will stop at + * BB_START/END instructions, so, even though we might pre-fetch the pre-amble + * of the next request before the memory has been flushed, we're guaranteed that + * we won't access the batch itself too early. + * However, on gen12+ the parser can pre-fetch across the BB_START/END commands, + * so, if the current request is modifying an instruction in the next request on + * the same intel_context, we might pre-fetch and then execute the pre-update + * instruction. To avoid this, the users of self-modifying code should either + * disable the parser around the code emitting the memory writes, via a new flag + * added to MI_ARB_CHECK, or emit the writes from a different intel_context. For + * the in-kernel use-cases we've opted to use a separate context, see + * reloc_gpu() as an example. + * All the above applies only to the instructions themselves. Non-inline data + * used by the instructions is not pre-fetched. + */ + +static u32 *gen12_emit_preempt_busywait(struct i915_request *rq, u32 *cs) +{ + *cs++ = MI_ARB_CHECK; /* trigger IDLE->ACTIVE first */ + *cs++ = MI_SEMAPHORE_WAIT_TOKEN | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_EQ_SDD; + *cs++ = 0; + *cs++ = preempt_address(rq->engine); + *cs++ = 0; + *cs++ = 0; + + return cs; +} + +static __always_inline u32* +gen12_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs) +{ + *cs++ = MI_USER_INTERRUPT; + + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + if (intel_engine_has_semaphores(rq->engine)) + cs = gen12_emit_preempt_busywait(rq, cs); + + rq->tail = intel_ring_offset(rq, cs); + assert_ring_tail_valid(rq->ring, rq->tail); + + return gen8_emit_wa_tail(rq, cs); +} + +u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs) +{ + /* XXX Stalling flush before seqno write; post-sync not */ + cs = emit_xcs_breadcrumb(rq, __gen8_emit_flush_dw(cs, 0, 0, 0)); + return gen12_emit_fini_breadcrumb_tail(rq, cs); +} + +u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs) +{ + cs = gen12_emit_ggtt_write_rcs(cs, + rq->fence.seqno, + hwsp_offset(rq), + PIPE_CONTROL0_HDC_PIPELINE_FLUSH, + PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_TILE_CACHE_FLUSH | + PIPE_CONTROL_FLUSH_L3 | + PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | + PIPE_CONTROL_DEPTH_CACHE_FLUSH | + /* Wa_1409600907:tgl */ + PIPE_CONTROL_DEPTH_STALL | + PIPE_CONTROL_DC_FLUSH_ENABLE | + PIPE_CONTROL_FLUSH_ENABLE); + + return gen12_emit_fini_breadcrumb_tail(rq, cs); +} diff --git a/drivers/gpu/drm/i915/gt/gen8_engine_cs.h b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h new file mode 100644 index 000000000000..cc6e21d3662a --- /dev/null +++ b/drivers/gpu/drm/i915/gt/gen8_engine_cs.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2014 Intel Corporation + */ + +#ifndef __GEN8_ENGINE_CS_H__ +#define __GEN8_ENGINE_CS_H__ + +#include <linux/string.h> +#include <linux/types.h> + +#include "i915_gem.h" /* GEM_BUG_ON */ + +#include "intel_gpu_commands.h" + +struct i915_request; + +int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode); +int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode); +int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode); + +int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode); +int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode); + +int gen8_emit_init_breadcrumb(struct i915_request *rq); + +int gen8_emit_bb_start_noarb(struct i915_request *rq, + u64 offset, u32 len, + const unsigned int flags); +int gen8_emit_bb_start(struct i915_request *rq, + u64 offset, u32 len, + const unsigned int flags); + +u32 *gen8_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs); +u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs); + +u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); +u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); +u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs); + +static inline u32 * +__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset) +{ + memset(batch, 0, 6 * sizeof(u32)); + + batch[0] = GFX_OP_PIPE_CONTROL(6) | flags0; + batch[1] = flags1; + batch[2] = offset; + + return batch + 6; +} + +static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset) +{ + return __gen8_emit_pipe_control(batch, 0, flags, offset); +} + +static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset) +{ + return __gen8_emit_pipe_control(batch, flags0, flags1, offset); +} + +static inline u32 * +__gen8_emit_write_rcs(u32 *cs, u32 value, u32 offset, u32 flags0, u32 flags1) +{ + *cs++ = GFX_OP_PIPE_CONTROL(6) | flags0; + *cs++ = flags1 | PIPE_CONTROL_QW_WRITE; + *cs++ = offset; + *cs++ = 0; + *cs++ = value; + *cs++ = 0; /* We're thrashing one extra dword. */ + + return cs; +} + +static inline u32* +gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags) +{ + /* We're using qword write, offset should be aligned to 8 bytes. */ + GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8)); + + return __gen8_emit_write_rcs(cs, + value, + gtt_offset, + 0, + flags | PIPE_CONTROL_GLOBAL_GTT_IVB); +} + +static inline u32* +gen12_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1) +{ + /* We're using qword write, offset should be aligned to 8 bytes. */ + GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8)); + + return __gen8_emit_write_rcs(cs, + value, + gtt_offset, + flags0, + flags1 | PIPE_CONTROL_GLOBAL_GTT_IVB); +} + +static inline u32 * +__gen8_emit_flush_dw(u32 *cs, u32 value, u32 gtt_offset, u32 flags) +{ + *cs++ = (MI_FLUSH_DW + 1) | flags; + *cs++ = gtt_offset; + *cs++ = 0; + *cs++ = value; + + return cs; +} + +static inline u32 * +gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags) +{ + /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */ + GEM_BUG_ON(gtt_offset & (1 << 5)); + /* Offset should be aligned to 8 bytes for both (QW/DW) write types */ + GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8)); + + return __gen8_emit_flush_dw(cs, + value, + gtt_offset | MI_FLUSH_DW_USE_GTT, + flags | MI_FLUSH_DW_OP_STOREDW); +} + +#endif /* __GEN8_ENGINE_CS_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c index d8b206e53660..be2c285a0ac7 100644 --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c @@ -30,18 +30,21 @@ #include "i915_trace.h" #include "intel_breadcrumbs.h" #include "intel_context.h" +#include "intel_engine_pm.h" #include "intel_gt_pm.h" #include "intel_gt_requests.h" -static void irq_enable(struct intel_engine_cs *engine) +static bool irq_enable(struct intel_engine_cs *engine) { if (!engine->irq_enable) - return; + return false; /* Caller disables interrupts */ spin_lock(&engine->gt->irq_lock); engine->irq_enable(engine); spin_unlock(&engine->gt->irq_lock); + + return true; } static void irq_disable(struct intel_engine_cs *engine) @@ -57,12 +60,11 @@ static void irq_disable(struct intel_engine_cs *engine) static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b) { - lockdep_assert_held(&b->irq_lock); - - if (!b->irq_engine || b->irq_armed) - return; - - if (!intel_gt_pm_get_if_awake(b->irq_engine->gt)) + /* + * Since we are waiting on a request, the GPU should be busy + * and should have its own rpm reference. + */ + if (GEM_WARN_ON(!intel_gt_pm_get_if_awake(b->irq_engine->gt))) return; /* @@ -73,25 +75,24 @@ static void __intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b) */ WRITE_ONCE(b->irq_armed, true); - /* - * Since we are waiting on a request, the GPU should be busy - * and should have its own rpm reference. This is tracked - * by i915->gt.awake, we can forgo holding our own wakref - * for the interrupt as before i915->gt.awake is released (when - * the driver is idle) we disarm the breadcrumbs. - */ - - if (!b->irq_enabled++) - irq_enable(b->irq_engine); + /* Requests may have completed before we could enable the interrupt. */ + if (!b->irq_enabled++ && irq_enable(b->irq_engine)) + irq_work_queue(&b->irq_work); } -static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b) +static void intel_breadcrumbs_arm_irq(struct intel_breadcrumbs *b) { - lockdep_assert_held(&b->irq_lock); - - if (!b->irq_engine || !b->irq_armed) + if (!b->irq_engine) return; + spin_lock(&b->irq_lock); + if (!b->irq_armed) + __intel_breadcrumbs_arm_irq(b); + spin_unlock(&b->irq_lock); +} + +static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b) +{ GEM_BUG_ON(!b->irq_enabled); if (!--b->irq_enabled) irq_disable(b->irq_engine); @@ -100,25 +101,37 @@ static void __intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b) intel_gt_pm_put_async(b->irq_engine->gt); } +static void intel_breadcrumbs_disarm_irq(struct intel_breadcrumbs *b) +{ + spin_lock(&b->irq_lock); + if (b->irq_armed) + __intel_breadcrumbs_disarm_irq(b); + spin_unlock(&b->irq_lock); +} + static void add_signaling_context(struct intel_breadcrumbs *b, struct intel_context *ce) { - intel_context_get(ce); - list_add_tail(&ce->signal_link, &b->signalers); - if (list_is_first(&ce->signal_link, &b->signalers)) - __intel_breadcrumbs_arm_irq(b); + lockdep_assert_held(&ce->signal_lock); + + spin_lock(&b->signalers_lock); + list_add_rcu(&ce->signal_link, &b->signalers); + spin_unlock(&b->signalers_lock); } -static void remove_signaling_context(struct intel_breadcrumbs *b, +static bool remove_signaling_context(struct intel_breadcrumbs *b, struct intel_context *ce) { - list_del(&ce->signal_link); - intel_context_put(ce); -} + lockdep_assert_held(&ce->signal_lock); -static inline bool __request_completed(const struct i915_request *rq) -{ - return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno); + if (!list_empty(&ce->signals)) + return false; + + spin_lock(&b->signalers_lock); + list_del_rcu(&ce->signal_link); + spin_unlock(&b->signalers_lock); + + return true; } __maybe_unused static bool @@ -174,43 +187,65 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl) intel_engine_add_retire(b->irq_engine, tl); } -static bool __signal_request(struct i915_request *rq, struct list_head *signals) +static struct llist_node * +slist_add(struct llist_node *node, struct llist_node *head) { - clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); - - if (!__dma_fence_signal(&rq->fence)) { - i915_request_put(rq); - return false; - } - - list_add_tail(&rq->signal_link, signals); - return true; + node->next = head; + return node; } static void signal_irq_work(struct irq_work *work) { struct intel_breadcrumbs *b = container_of(work, typeof(*b), irq_work); const ktime_t timestamp = ktime_get(); - struct intel_context *ce, *cn; - struct list_head *pos, *next; - LIST_HEAD(signal); + struct llist_node *signal, *sn; + struct intel_context *ce; - spin_lock(&b->irq_lock); + signal = NULL; + if (unlikely(!llist_empty(&b->signaled_requests))) + signal = llist_del_all(&b->signaled_requests); - if (list_empty(&b->signalers)) - __intel_breadcrumbs_disarm_irq(b); + /* + * Keep the irq armed until the interrupt after all listeners are gone. + * + * Enabling/disabling the interrupt is rather costly, roughly a couple + * of hundred microseconds. If we are proactive and enable/disable + * the interrupt around every request that wants a breadcrumb, we + * quickly drown in the extra orders of magnitude of latency imposed + * on request submission. + * + * So we try to be lazy, and keep the interrupts enabled until no + * more listeners appear within a breadcrumb interrupt interval (that + * is until a request completes that no one cares about). The + * observation is that listeners come in batches, and will often + * listen to a bunch of requests in succession. Though note on icl+, + * interrupts are always enabled due to concerns with rc6 being + * dysfunctional with per-engine interrupt masking. + * + * We also try to avoid raising too many interrupts, as they may + * be generated by userspace batches and it is unfortunately rather + * too easy to drown the CPU under a flood of GPU interrupts. Thus + * whenever no one appears to be listening, we turn off the interrupts. + * Fewer interrupts should conserve power -- at the very least, fewer + * interrupt draw less ire from other users of the system and tools + * like powertop. + */ + if (!signal && READ_ONCE(b->irq_armed) && list_empty(&b->signalers)) + intel_breadcrumbs_disarm_irq(b); - list_splice_init(&b->signaled_requests, &signal); + rcu_read_lock(); + atomic_inc(&b->signaler_active); + list_for_each_entry_rcu(ce, &b->signalers, signal_link) { + struct i915_request *rq; - list_for_each_entry_safe(ce, cn, &b->signalers, signal_link) { - GEM_BUG_ON(list_empty(&ce->signals)); + list_for_each_entry_rcu(rq, &ce->signals, signal_link) { + bool release; - list_for_each_safe(pos, next, &ce->signals) { - struct i915_request *rq = - list_entry(pos, typeof(*rq), signal_link); + if (!__i915_request_is_complete(rq)) + break; - GEM_BUG_ON(!check_signal_order(ce, rq)); - if (!__request_completed(rq)) + if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, + &rq->fence.flags)) break; /* @@ -218,29 +253,29 @@ static void signal_irq_work(struct irq_work *work) * spinlock as the callback chain may end up adding * more signalers to the same context or engine. */ - __signal_request(rq, &signal); - } - - /* - * We process the list deletion in bulk, only using a list_add - * (not list_move) above but keeping the status of - * rq->signal_link known with the I915_FENCE_FLAG_SIGNAL bit. - */ - if (!list_is_first(pos, &ce->signals)) { - /* Advance the list to the first incomplete request */ - __list_del_many(&ce->signals, pos); - if (&ce->signals == pos) { /* now empty */ - add_retire(b, ce->timeline); - remove_signaling_context(b, ce); + spin_lock(&ce->signal_lock); + list_del_rcu(&rq->signal_link); + release = remove_signaling_context(b, ce); + spin_unlock(&ce->signal_lock); + if (release) { + if (intel_timeline_is_last(ce->timeline, rq)) + add_retire(b, ce->timeline); + intel_context_put(ce); } + + if (__dma_fence_signal(&rq->fence)) + /* We own signal_node now, xfer to local list */ + signal = slist_add(&rq->signal_node, signal); + else + i915_request_put(rq); } } + atomic_dec(&b->signaler_active); + rcu_read_unlock(); - spin_unlock(&b->irq_lock); - - list_for_each_safe(pos, next, &signal) { + llist_for_each_safe(signal, sn, signal) { struct i915_request *rq = - list_entry(pos, typeof(*rq), signal_link); + llist_entry(signal, typeof(*rq), signal_node); struct list_head cb_list; spin_lock(&rq->lock); @@ -251,6 +286,9 @@ static void signal_irq_work(struct irq_work *work) i915_request_put(rq); } + + if (!READ_ONCE(b->irq_armed) && !list_empty(&b->signalers)) + intel_breadcrumbs_arm_irq(b); } struct intel_breadcrumbs * @@ -262,14 +300,15 @@ intel_breadcrumbs_create(struct intel_engine_cs *irq_engine) if (!b) return NULL; - spin_lock_init(&b->irq_lock); + b->irq_engine = irq_engine; + + spin_lock_init(&b->signalers_lock); INIT_LIST_HEAD(&b->signalers); - INIT_LIST_HEAD(&b->signaled_requests); + init_llist_head(&b->signaled_requests); + spin_lock_init(&b->irq_lock); init_irq_work(&b->irq_work, signal_irq_work); - b->irq_engine = irq_engine; - return b; } @@ -290,49 +329,61 @@ void intel_breadcrumbs_reset(struct intel_breadcrumbs *b) spin_unlock_irqrestore(&b->irq_lock, flags); } -void intel_breadcrumbs_park(struct intel_breadcrumbs *b) +void __intel_breadcrumbs_park(struct intel_breadcrumbs *b) { - unsigned long flags; - if (!READ_ONCE(b->irq_armed)) return; - spin_lock_irqsave(&b->irq_lock, flags); - __intel_breadcrumbs_disarm_irq(b); - spin_unlock_irqrestore(&b->irq_lock, flags); - - if (!list_empty(&b->signalers)) - irq_work_queue(&b->irq_work); + /* Kick the work once more to drain the signalers, and disarm the irq */ + irq_work_sync(&b->irq_work); + while (READ_ONCE(b->irq_armed) && !atomic_read(&b->active)) { + local_irq_disable(); + signal_irq_work(&b->irq_work); + local_irq_enable(); + cond_resched(); + } } void intel_breadcrumbs_free(struct intel_breadcrumbs *b) { + irq_work_sync(&b->irq_work); + GEM_BUG_ON(!list_empty(&b->signalers)); + GEM_BUG_ON(b->irq_armed); kfree(b); } -static void insert_breadcrumb(struct i915_request *rq, - struct intel_breadcrumbs *b) +static void irq_signal_request(struct i915_request *rq, + struct intel_breadcrumbs *b) { + if (!__dma_fence_signal(&rq->fence)) + return; + + i915_request_get(rq); + if (llist_add(&rq->signal_node, &b->signaled_requests)) + irq_work_queue(&b->irq_work); +} + +static void insert_breadcrumb(struct i915_request *rq) +{ + struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs; struct intel_context *ce = rq->context; struct list_head *pos; if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) return; - i915_request_get(rq); - /* * If the request is already completed, we can transfer it * straight onto a signaled list, and queue the irq worker for * its signal completion. */ - if (__request_completed(rq)) { - if (__signal_request(rq, &b->signaled_requests)) - irq_work_queue(&b->irq_work); + if (__i915_request_is_complete(rq)) { + irq_signal_request(rq, b); return; } if (list_empty(&ce->signals)) { + intel_context_get(ce); add_signaling_context(b, ce); pos = &ce->signals; } else { @@ -358,18 +409,24 @@ static void insert_breadcrumb(struct i915_request *rq, break; } } - list_add(&rq->signal_link, pos); + + i915_request_get(rq); + list_add_rcu(&rq->signal_link, pos); GEM_BUG_ON(!check_signal_order(ce, rq)); + GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)); set_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); - /* Check after attaching to irq, interrupt may have already fired. */ - if (__request_completed(rq)) - irq_work_queue(&b->irq_work); + /* + * Defer enabling the interrupt to after HW submission and recheck + * the request as it may have completed and raised the interrupt as + * we were attaching it into the lists. + */ + irq_work_queue(&b->irq_work); } bool i915_request_enable_breadcrumb(struct i915_request *rq) { - struct intel_breadcrumbs *b; + struct intel_context *ce = rq->context; /* Serialises with i915_request_retire() using rq->lock */ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)) @@ -384,67 +441,68 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq) if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) return true; - /* - * rq->engine is locked by rq->engine->active.lock. That however - * is not known until after rq->engine has been dereferenced and - * the lock acquired. Hence we acquire the lock and then validate - * that rq->engine still matches the lock we hold for it. - * - * Here, we are using the breadcrumb lock as a proxy for the - * rq->engine->active.lock, and we know that since the breadcrumb - * will be serialised within i915_request_submit/i915_request_unsubmit, - * the engine cannot change while active as long as we hold the - * breadcrumb lock on that engine. - * - * From the dma_fence_enable_signaling() path, we are outside of the - * request submit/unsubmit path, and so we must be more careful to - * acquire the right lock. - */ - b = READ_ONCE(rq->engine)->breadcrumbs; - spin_lock(&b->irq_lock); - while (unlikely(b != READ_ONCE(rq->engine)->breadcrumbs)) { - spin_unlock(&b->irq_lock); - b = READ_ONCE(rq->engine)->breadcrumbs; - spin_lock(&b->irq_lock); - } - - /* - * Now that we are finally serialised with request submit/unsubmit, - * [with b->irq_lock] and with i915_request_retire() [via checking - * SIGNALED with rq->lock] confirm the request is indeed active. If - * it is no longer active, the breadcrumb will be attached upon - * i915_request_submit(). - */ + spin_lock(&ce->signal_lock); if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) - insert_breadcrumb(rq, b); - - spin_unlock(&b->irq_lock); + insert_breadcrumb(rq); + spin_unlock(&ce->signal_lock); return true; } void i915_request_cancel_breadcrumb(struct i915_request *rq) { - struct intel_breadcrumbs *b = rq->engine->breadcrumbs; + struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs; + struct intel_context *ce = rq->context; + unsigned long flags; + bool release; - /* - * We must wait for b->irq_lock so that we know the interrupt handler - * has released its reference to the intel_context and has completed - * the DMA_FENCE_FLAG_SIGNALED_BIT/I915_FENCE_FLAG_SIGNAL dance (if - * required). - */ - spin_lock(&b->irq_lock); - if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) { - struct intel_context *ce = rq->context; + if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) + return; + + spin_lock_irqsave(&ce->signal_lock, flags); + list_del_rcu(&rq->signal_link); + release = remove_signaling_context(b, ce); + spin_unlock_irqrestore(&ce->signal_lock, flags); + if (release) + intel_context_put(ce); + + if (__i915_request_is_complete(rq)) + irq_signal_request(rq, b); - list_del(&rq->signal_link); - if (list_empty(&ce->signals)) - remove_signaling_context(b, ce); + i915_request_put(rq); +} + +void intel_context_remove_breadcrumbs(struct intel_context *ce, + struct intel_breadcrumbs *b) +{ + struct i915_request *rq, *rn; + bool release = false; + unsigned long flags; - clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags); + spin_lock_irqsave(&ce->signal_lock, flags); + + if (list_empty(&ce->signals)) + goto unlock; + + list_for_each_entry_safe(rq, rn, &ce->signals, signal_link) { + GEM_BUG_ON(!__i915_request_is_complete(rq)); + if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, + &rq->fence.flags)) + continue; + + list_del_rcu(&rq->signal_link); + irq_signal_request(rq, b); i915_request_put(rq); } - spin_unlock(&b->irq_lock); + release = remove_signaling_context(b, ce); + +unlock: + spin_unlock_irqrestore(&ce->signal_lock, flags); + if (release) + intel_context_put(ce); + + while (atomic_read(&b->signaler_active)) + cpu_relax(); } static void print_signals(struct intel_breadcrumbs *b, struct drm_printer *p) @@ -454,18 +512,17 @@ static void print_signals(struct intel_breadcrumbs *b, struct drm_printer *p) drm_printf(p, "Signals:\n"); - spin_lock_irq(&b->irq_lock); - list_for_each_entry(ce, &b->signalers, signal_link) { - list_for_each_entry(rq, &ce->signals, signal_link) { + rcu_read_lock(); + list_for_each_entry_rcu(ce, &b->signalers, signal_link) { + list_for_each_entry_rcu(rq, &ce->signals, signal_link) drm_printf(p, "\t[%llx:%llx%s] @ %dms\n", rq->fence.context, rq->fence.seqno, i915_request_completed(rq) ? "!" : i915_request_started(rq) ? "*" : "", jiffies_to_msecs(jiffies - rq->emitted_jiffies)); - } } - spin_unlock_irq(&b->irq_lock); + rcu_read_unlock(); } void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine, diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h index ed3d1deabfbd..3ce5ce270b04 100644 --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.h @@ -6,6 +6,7 @@ #ifndef __INTEL_BREADCRUMBS__ #define __INTEL_BREADCRUMBS__ +#include <linux/atomic.h> #include <linux/irq_work.h> #include "intel_engine_types.h" @@ -19,7 +20,18 @@ intel_breadcrumbs_create(struct intel_engine_cs *irq_engine); void intel_breadcrumbs_free(struct intel_breadcrumbs *b); void intel_breadcrumbs_reset(struct intel_breadcrumbs *b); -void intel_breadcrumbs_park(struct intel_breadcrumbs *b); +void __intel_breadcrumbs_park(struct intel_breadcrumbs *b); + +static inline void intel_breadcrumbs_unpark(struct intel_breadcrumbs *b) +{ + atomic_inc(&b->active); +} + +static inline void intel_breadcrumbs_park(struct intel_breadcrumbs *b) +{ + if (atomic_dec_and_test(&b->active)) + __intel_breadcrumbs_park(b); +} static inline void intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine) @@ -33,4 +45,7 @@ void intel_engine_print_breadcrumbs(struct intel_engine_cs *engine, bool i915_request_enable_breadcrumb(struct i915_request *request); void i915_request_cancel_breadcrumb(struct i915_request *request); +void intel_context_remove_breadcrumbs(struct intel_context *ce, + struct intel_breadcrumbs *b); + #endif /* __INTEL_BREADCRUMBS__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h index 8e53b9942695..3a084ce8ff5e 100644 --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs_types.h @@ -29,19 +29,20 @@ * the overhead of waking that client is much preferred. */ struct intel_breadcrumbs { - spinlock_t irq_lock; /* protects the lists used in hardirq context */ - - /* Not all breadcrumbs are attached to physical HW */ - struct intel_engine_cs *irq_engine; + atomic_t active; + spinlock_t signalers_lock; /* protects the list of signalers */ struct list_head signalers; - struct list_head signaled_requests; + struct llist_head signaled_requests; + atomic_t signaler_active; + spinlock_t irq_lock; /* protects the interrupt from hardirq context */ struct irq_work irq_work; /* for use from inside irq_lock */ - unsigned int irq_enabled; - bool irq_armed; + + /* Not all breadcrumbs are attached to physical HW */ + struct intel_engine_cs *irq_engine; }; #endif /* __INTEL_BREADCRUMBS_TYPES__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index 92a3f25c4006..349e7fa1488d 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -25,11 +25,18 @@ static struct intel_context *intel_context_alloc(void) return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL); } -void intel_context_free(struct intel_context *ce) +static void rcu_context_free(struct rcu_head *rcu) { + struct intel_context *ce = container_of(rcu, typeof(*ce), rcu); + kmem_cache_free(global.slab_ce, ce); } +void intel_context_free(struct intel_context *ce) +{ + call_rcu(&ce->rcu, rcu_context_free); +} + struct intel_context * intel_context_create(struct intel_engine_cs *engine) { @@ -356,8 +363,7 @@ static int __intel_context_active(struct i915_active *active) } void -intel_context_init(struct intel_context *ce, - struct intel_engine_cs *engine) +intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine) { GEM_BUG_ON(!engine->cops); GEM_BUG_ON(!engine->gt->vm); @@ -373,7 +379,8 @@ intel_context_init(struct intel_context *ce, ce->vm = i915_vm_get(engine->gt->vm); - INIT_LIST_HEAD(&ce->signal_link); + /* NB ce->signal_link/lock is used under RCU */ + spin_lock_init(&ce->signal_lock); INIT_LIST_HEAD(&ce->signals); mutex_init(&ce->pin_mutex); diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index fda2eba81e22..d24ab6fa0ee5 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -191,6 +191,11 @@ static inline bool intel_context_is_closed(const struct intel_context *ce) return test_bit(CONTEXT_CLOSED_BIT, &ce->flags); } +static inline bool intel_context_has_inflight(const struct intel_context *ce) +{ + return test_bit(COPS_HAS_INFLIGHT_BIT, &ce->ops->flags); +} + static inline bool intel_context_use_semaphores(const struct intel_context *ce) { return test_bit(CONTEXT_USE_SEMAPHORES, &ce->flags); @@ -248,16 +253,14 @@ intel_context_clear_nopreempt(struct intel_context *ce) static inline u64 intel_context_get_total_runtime_ns(struct intel_context *ce) { - const u32 period = - RUNTIME_INFO(ce->engine->i915)->cs_timestamp_period_ns; + const u32 period = ce->engine->gt->clock_period_ns; return READ_ONCE(ce->runtime.total) * period; } static inline u64 intel_context_get_avg_runtime_ns(struct intel_context *ce) { - const u32 period = - RUNTIME_INFO(ce->engine->i915)->cs_timestamp_period_ns; + const u32 period = ce->engine->gt->clock_period_ns; return mul_u32_u32(ewma_runtime_read(&ce->runtime.avg), period); } diff --git a/drivers/gpu/drm/i915/gt/intel_context_sseu.c b/drivers/gpu/drm/i915/gt/intel_context_sseu.c index b9c8163978a3..8dfd8f656aaa 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_sseu.c +++ b/drivers/gpu/drm/i915/gt/intel_context_sseu.c @@ -9,7 +9,6 @@ #include "intel_engine_pm.h" #include "intel_gpu_commands.h" #include "intel_lrc.h" -#include "intel_lrc_reg.h" #include "intel_ring.h" #include "intel_sseu.h" diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index 552cb57a2e8c..e10d78601bbd 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -25,10 +25,15 @@ DECLARE_EWMA(runtime, 3, 8); struct i915_gem_context; struct i915_gem_ww_ctx; struct i915_vma; +struct intel_breadcrumbs; struct intel_context; struct intel_ring; struct intel_context_ops { + unsigned long flags; +#define COPS_HAS_INFLIGHT_BIT 0 +#define COPS_HAS_INFLIGHT BIT(COPS_HAS_INFLIGHT_BIT) + int (*alloc)(struct intel_context *ce); int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr); @@ -44,18 +49,38 @@ struct intel_context_ops { }; struct intel_context { - struct kref ref; + /* + * Note: Some fields may be accessed under RCU. + * + * Unless otherwise noted a field can safely be assumed to be protected + * by strong reference counting. + */ + union { + struct kref ref; /* no kref_get_unless_zero()! */ + struct rcu_head rcu; + }; struct intel_engine_cs *engine; struct intel_engine_cs *inflight; -#define intel_context_inflight(ce) ptr_mask_bits(READ_ONCE((ce)->inflight), 2) -#define intel_context_inflight_count(ce) ptr_unmask_bits(READ_ONCE((ce)->inflight), 2) +#define __intel_context_inflight(engine) ptr_mask_bits(engine, 3) +#define __intel_context_inflight_count(engine) ptr_unmask_bits(engine, 3) +#define intel_context_inflight(ce) \ + __intel_context_inflight(READ_ONCE((ce)->inflight)) +#define intel_context_inflight_count(ce) \ + __intel_context_inflight_count(READ_ONCE((ce)->inflight)) struct i915_address_space *vm; struct i915_gem_context __rcu *gem_context; - struct list_head signal_link; - struct list_head signals; + /* + * @signal_lock protects the list of requests that need signaling, + * @signals. While there are any requests that need signaling, + * we add the context to the breadcrumbs worker, and remove it + * upon completion/cancellation of the last request. + */ + struct list_head signal_link; /* Accessed under RCU */ + struct list_head signals; /* Guarded by signal_lock */ + spinlock_t signal_lock; /* protects signals, the list of requests */ struct i915_vma *state; struct intel_ring *ring; @@ -64,12 +89,13 @@ struct intel_context { unsigned long flags; #define CONTEXT_BARRIER_BIT 0 #define CONTEXT_ALLOC_BIT 1 -#define CONTEXT_VALID_BIT 2 -#define CONTEXT_CLOSED_BIT 3 -#define CONTEXT_USE_SEMAPHORES 4 -#define CONTEXT_BANNED 5 -#define CONTEXT_FORCE_SINGLE_SUBMISSION 6 -#define CONTEXT_NOPREEMPT 7 +#define CONTEXT_INIT_BIT 2 +#define CONTEXT_VALID_BIT 3 +#define CONTEXT_CLOSED_BIT 4 +#define CONTEXT_USE_SEMAPHORES 5 +#define CONTEXT_BANNED 6 +#define CONTEXT_FORCE_SINGLE_SUBMISSION 7 +#define CONTEXT_NOPREEMPT 8 u32 *lrc_reg_state; union { diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 760fefdfe392..47ee8578e511 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -15,7 +15,6 @@ #include "i915_selftest.h" #include "gt/intel_timeline.h" #include "intel_engine_types.h" -#include "intel_gpu_commands.h" #include "intel_workarounds.h" struct drm_printer; @@ -223,91 +222,6 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine, void intel_engine_init_execlists(struct intel_engine_cs *engine); -static inline u32 *__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset) -{ - memset(batch, 0, 6 * sizeof(u32)); - - batch[0] = GFX_OP_PIPE_CONTROL(6) | flags0; - batch[1] = flags1; - batch[2] = offset; - - return batch + 6; -} - -static inline u32 *gen8_emit_pipe_control(u32 *batch, u32 flags, u32 offset) -{ - return __gen8_emit_pipe_control(batch, 0, flags, offset); -} - -static inline u32 *gen12_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset) -{ - return __gen8_emit_pipe_control(batch, flags0, flags1, offset); -} - -static inline u32 * -__gen8_emit_write_rcs(u32 *cs, u32 value, u32 offset, u32 flags0, u32 flags1) -{ - *cs++ = GFX_OP_PIPE_CONTROL(6) | flags0; - *cs++ = flags1 | PIPE_CONTROL_QW_WRITE; - *cs++ = offset; - *cs++ = 0; - *cs++ = value; - *cs++ = 0; /* We're thrashing one extra dword. */ - - return cs; -} - -static inline u32* -gen8_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags) -{ - /* We're using qword write, offset should be aligned to 8 bytes. */ - GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8)); - - return __gen8_emit_write_rcs(cs, - value, - gtt_offset, - 0, - flags | PIPE_CONTROL_GLOBAL_GTT_IVB); -} - -static inline u32* -gen12_emit_ggtt_write_rcs(u32 *cs, u32 value, u32 gtt_offset, u32 flags0, u32 flags1) -{ - /* We're using qword write, offset should be aligned to 8 bytes. */ - GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8)); - - return __gen8_emit_write_rcs(cs, - value, - gtt_offset, - flags0, - flags1 | PIPE_CONTROL_GLOBAL_GTT_IVB); -} - -static inline u32 * -__gen8_emit_flush_dw(u32 *cs, u32 value, u32 gtt_offset, u32 flags) -{ - *cs++ = (MI_FLUSH_DW + 1) | flags; - *cs++ = gtt_offset; - *cs++ = 0; - *cs++ = value; - - return cs; -} - -static inline u32 * -gen8_emit_ggtt_write(u32 *cs, u32 value, u32 gtt_offset, u32 flags) -{ - /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */ - GEM_BUG_ON(gtt_offset & (1 << 5)); - /* Offset should be aligned to 8 bytes for both (QW/DW) write types */ - GEM_BUG_ON(!IS_ALIGNED(gtt_offset, 8)); - - return __gen8_emit_flush_dw(cs, - value, - gtt_offset | MI_FLUSH_DW_USE_GTT, - flags | MI_FLUSH_DW_OP_STOREDW); -} - static inline void __intel_engine_reset(struct intel_engine_cs *engine, bool stalled) { @@ -318,7 +232,12 @@ static inline void __intel_engine_reset(struct intel_engine_cs *engine, bool intel_engines_are_idle(struct intel_gt *gt); bool intel_engine_is_idle(struct intel_engine_cs *engine); -void intel_engine_flush_submission(struct intel_engine_cs *engine); + +void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync); +static inline void intel_engine_flush_submission(struct intel_engine_cs *engine) +{ + __intel_engine_flush_submission(engine, true); +} void intel_engines_reset_default_submission(struct intel_gt *gt); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 0b31670343f5..fa76602f9852 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -33,12 +33,14 @@ #include "intel_engine.h" #include "intel_engine_pm.h" #include "intel_engine_user.h" +#include "intel_execlists_submission.h" #include "intel_gt.h" #include "intel_gt_requests.h" #include "intel_gt_pm.h" -#include "intel_lrc.h" +#include "intel_lrc_reg.h" #include "intel_reset.h" #include "intel_ring.h" +#include "uc/intel_guc_submission.h" /* Haswell does have the CXT_SIZE register however it does not appear to be * valid. Now, docs explain in dwords what is in the context object. The full @@ -647,6 +649,8 @@ static int init_status_page(struct intel_engine_cs *engine) void *vaddr; int ret; + INIT_LIST_HEAD(&engine->status_page.timelines); + /* * Though the HWS register does support 36bit addresses, historically * we have had hangs and corruption reported due to wild writes if @@ -723,6 +727,9 @@ static int engine_setup_common(struct intel_engine_cs *engine) intel_engine_init_whitelist(engine); intel_engine_init_ctx_wa(engine); + if (INTEL_GEN(engine->i915) >= 12) + engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO; + return 0; err_status: @@ -829,6 +836,21 @@ create_pinned_context(struct intel_engine_cs *engine, return ce; } +static void destroy_pinned_context(struct intel_context *ce) +{ + struct intel_engine_cs *engine = ce->engine; + struct i915_vma *hwsp = engine->status_page.vma; + + GEM_BUG_ON(ce->timeline->hwsp_ggtt != hwsp); + + mutex_lock(&hwsp->vm->mutex); + list_del(&ce->timeline->engine_link); + mutex_unlock(&hwsp->vm->mutex); + + intel_context_unpin(ce); + intel_context_put(ce); +} + static struct intel_context * create_kernel_context(struct intel_engine_cs *engine) { @@ -889,7 +911,9 @@ int intel_engines_init(struct intel_gt *gt) enum intel_engine_id id; int err; - if (HAS_EXECLISTS(gt->i915)) + if (intel_uc_uses_guc_submission(>->uc)) + setup = intel_guc_submission_setup; + else if (HAS_EXECLISTS(gt->i915)) setup = intel_execlists_submission_setup; else setup = intel_ring_submission_setup; @@ -925,7 +949,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) GEM_BUG_ON(!list_empty(&engine->active.requests)); tasklet_kill(&engine->execlists.tasklet); /* flush the callback */ - cleanup_status_page(engine); intel_breadcrumbs_free(engine->breadcrumbs); intel_engine_fini_retire(engine); @@ -934,11 +957,11 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) if (engine->default_state) fput(engine->default_state); - if (engine->kernel_context) { - intel_context_unpin(engine->kernel_context); - intel_context_put(engine->kernel_context); - } + if (engine->kernel_context) + destroy_pinned_context(engine->kernel_context); + GEM_BUG_ON(!llist_empty(&engine->barrier_tasks)); + cleanup_status_page(engine); intel_wa_list_free(&engine->ctx_wa_list); intel_wa_list_free(&engine->wa_list); @@ -1002,32 +1025,50 @@ static unsigned long stop_timeout(const struct intel_engine_cs *engine) return READ_ONCE(engine->props.stop_timeout_ms); } -int intel_engine_stop_cs(struct intel_engine_cs *engine) +static int __intel_engine_stop_cs(struct intel_engine_cs *engine, + int fast_timeout_us, + int slow_timeout_ms) { struct intel_uncore *uncore = engine->uncore; - const u32 base = engine->mmio_base; - const i915_reg_t mode = RING_MI_MODE(base); + const i915_reg_t mode = RING_MI_MODE(engine->mmio_base); int err; + intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING)); + err = __intel_wait_for_register_fw(engine->uncore, mode, + MODE_IDLE, MODE_IDLE, + fast_timeout_us, + slow_timeout_ms, + NULL); + + /* A final mmio read to let GPU writes be hopefully flushed to memory */ + intel_uncore_posting_read_fw(uncore, mode); + return err; +} + +int intel_engine_stop_cs(struct intel_engine_cs *engine) +{ + int err = 0; + if (INTEL_GEN(engine->i915) < 3) return -ENODEV; ENGINE_TRACE(engine, "\n"); + if (__intel_engine_stop_cs(engine, 1000, stop_timeout(engine))) { + ENGINE_TRACE(engine, + "timed out on STOP_RING -> IDLE; HEAD:%04x, TAIL:%04x\n", + ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR, + ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR); - intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING)); - - err = 0; - if (__intel_wait_for_register_fw(uncore, - mode, MODE_IDLE, MODE_IDLE, - 1000, stop_timeout(engine), - NULL)) { - ENGINE_TRACE(engine, "timed out on STOP_RING -> IDLE\n"); - err = -ETIMEDOUT; + /* + * Sometimes we observe that the idle flag is not + * set even though the ring is empty. So double + * check before giving up. + */ + if ((ENGINE_READ_FW(engine, RING_HEAD) & HEAD_ADDR) != + (ENGINE_READ_FW(engine, RING_TAIL) & TAIL_ADDR)) + err = -ETIMEDOUT; } - /* A final mmio read to let GPU writes be hopefully flushed to memory */ - intel_uncore_posting_read_fw(uncore, mode); - return err; } @@ -1189,17 +1230,13 @@ static bool ring_is_idle(struct intel_engine_cs *engine) return idle; } -void intel_engine_flush_submission(struct intel_engine_cs *engine) +void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync) { struct tasklet_struct *t = &engine->execlists.tasklet; if (!t->func) return; - /* Synchronise and wait for the tasklet on another CPU */ - tasklet_kill(t); - - /* Having cancelled the tasklet, ensure that is run */ local_bh_disable(); if (tasklet_trylock(t)) { /* Must wait for any GPU reset in progress. */ @@ -1208,6 +1245,10 @@ void intel_engine_flush_submission(struct intel_engine_cs *engine) tasklet_unlock(t); } local_bh_enable(); + + /* Synchronise and wait for the tasklet on another CPU */ + if (sync) + tasklet_unlock_wait(t); } /** @@ -1273,8 +1314,12 @@ void intel_engines_reset_default_submission(struct intel_gt *gt) struct intel_engine_cs *engine; enum intel_engine_id id; - for_each_engine(engine, gt, id) + for_each_engine(engine, gt, id) { + if (engine->sanitize) + engine->sanitize(engine); + engine->set_default_submission(engine); + } } bool intel_engine_can_store_dword(struct intel_engine_cs *engine) @@ -1294,44 +1339,6 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine) } } -static int print_sched_attr(const struct i915_sched_attr *attr, - char *buf, int x, int len) -{ - if (attr->priority == I915_PRIORITY_INVALID) - return x; - - x += snprintf(buf + x, len - x, - " prio=%d", attr->priority); - - return x; -} - -static void print_request(struct drm_printer *m, - struct i915_request *rq, - const char *prefix) -{ - const char *name = rq->fence.ops->get_timeline_name(&rq->fence); - char buf[80] = ""; - int x = 0; - - x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf)); - - drm_printf(m, "%s %llx:%llx%s%s %s @ %dms: %s\n", - prefix, - rq->fence.context, rq->fence.seqno, - i915_request_completed(rq) ? "!" : - i915_request_started(rq) ? "*" : - "", - test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, - &rq->fence.flags) ? "+" : - test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, - &rq->fence.flags) ? "-" : - "", - buf, - jiffies_to_msecs(jiffies - rq->emitted_jiffies), - name); -} - static struct intel_timeline *get_timeline(struct i915_request *rq) { struct intel_timeline *tl; @@ -1480,7 +1487,9 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, drm_printf(m, "\tIPEHR: 0x%08x\n", ENGINE_READ(engine, IPEHR)); } - if (HAS_EXECLISTS(dev_priv)) { + if (intel_engine_in_guc_submission_mode(engine)) { + /* nothing to print yet */ + } else if (HAS_EXECLISTS(dev_priv)) { struct i915_request * const *port, *rq; const u32 *hws = &engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX]; @@ -1529,7 +1538,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, intel_context_is_banned(rq->context) ? "*" : ""); len += print_ring(hdr + len, sizeof(hdr) - len, rq); scnprintf(hdr + len, sizeof(hdr) - len, "rq: "); - print_request(m, rq, hdr); + i915_request_show(m, rq, hdr, 0); } for (port = execlists->pending; (rq = *port); port++) { char hdr[160]; @@ -1543,7 +1552,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, intel_context_is_banned(rq->context) ? "*" : ""); len += print_ring(hdr + len, sizeof(hdr) - len, rq); scnprintf(hdr + len, sizeof(hdr) - len, "rq: "); - print_request(m, rq, hdr); + i915_request_show(m, rq, hdr, 0); } rcu_read_unlock(); execlists_active_unlock_bh(execlists); @@ -1687,7 +1696,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, if (rq) { struct intel_timeline *tl = get_timeline(rq); - print_request(m, rq, "\t\tactive "); + i915_request_show(m, rq, "\t\tactive ", 0); drm_printf(m, "\t\tring->start: 0x%08x\n", i915_ggtt_offset(rq->ring->vma)); @@ -1725,7 +1734,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, drm_printf(m, "\tDevice is asleep; skipping register dump\n"); } - intel_execlists_show_requests(engine, m, print_request, 8); + intel_execlists_show_requests(engine, m, i915_request_show, 8); drm_printf(m, "HWSP:\n"); hexdump(m, engine->status_page.addr, PAGE_SIZE); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c index 9060385cd69e..d7be2b9339f9 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c @@ -37,6 +37,18 @@ static bool next_heartbeat(struct intel_engine_cs *engine) return true; } +static struct i915_request * +heartbeat_create(struct intel_context *ce, gfp_t gfp) +{ + struct i915_request *rq; + + intel_context_enter(ce); + rq = __i915_request_create(ce, gfp); + intel_context_exit(ce); + + return rq; +} + static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq) { engine->wakeref_serial = READ_ONCE(engine->serial) + 1; @@ -45,6 +57,15 @@ static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq) engine->heartbeat.systole = i915_request_get(rq); } +static void heartbeat_commit(struct i915_request *rq, + const struct i915_sched_attr *attr) +{ + idle_pulse(rq->engine, rq); + + __i915_request_commit(rq); + __i915_request_queue(rq, attr); +} + static void show_heartbeat(const struct i915_request *rq, struct intel_engine_cs *engine) { @@ -139,16 +160,11 @@ static void heartbeat(struct work_struct *wrk) goto out; } - intel_context_enter(ce); - rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN); - intel_context_exit(ce); + rq = heartbeat_create(ce, GFP_NOWAIT | __GFP_NOWARN); if (IS_ERR(rq)) goto unlock; - idle_pulse(engine, rq); - - __i915_request_commit(rq); - __i915_request_queue(rq, &attr); + heartbeat_commit(rq, &attr); unlock: mutex_unlock(&ce->timeline->mutex); @@ -187,17 +203,13 @@ static int __intel_engine_pulse(struct intel_engine_cs *engine) GEM_BUG_ON(!intel_engine_has_preemption(engine)); GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); - intel_context_enter(ce); - rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN); - intel_context_exit(ce); + rq = heartbeat_create(ce, GFP_NOWAIT | __GFP_NOWARN); if (IS_ERR(rq)) return PTR_ERR(rq); __set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags); - idle_pulse(engine, rq); - __i915_request_commit(rq); - __i915_request_queue(rq, &attr); + heartbeat_commit(rq, &attr); GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER); return 0; @@ -273,8 +285,12 @@ int intel_engine_pulse(struct intel_engine_cs *engine) int intel_engine_flush_barriers(struct intel_engine_cs *engine) { + struct i915_sched_attr attr = { + .priority = I915_USER_PRIORITY(I915_PRIORITY_MIN), + }; + struct intel_context *ce = engine->kernel_context; struct i915_request *rq; - int err = 0; + int err; if (llist_empty(&engine->barrier_tasks)) return 0; @@ -282,15 +298,22 @@ int intel_engine_flush_barriers(struct intel_engine_cs *engine) if (!intel_engine_pm_get_if_awake(engine)) return 0; - rq = i915_request_create(engine->kernel_context); + if (mutex_lock_interruptible(&ce->timeline->mutex)) { + err = -EINTR; + goto out_rpm; + } + + rq = heartbeat_create(ce, GFP_KERNEL); if (IS_ERR(rq)) { err = PTR_ERR(rq); - goto out_rpm; + goto out_unlock; } - idle_pulse(engine, rq); - i915_request_add(rq); + heartbeat_commit(rq, &attr); + err = 0; +out_unlock: + mutex_unlock(&ce->timeline->mutex); out_rpm: intel_engine_pm_put(engine); return err; diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index 499b09cb4acf..2843db731b7d 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -60,11 +60,19 @@ static int __engine_unpark(struct intel_wakeref *wf) /* Scrub the context image after our loss of control */ ce->ops->reset(ce); + + CE_TRACE(ce, "reset { seqno:%x, *hwsp:%x, ring:%x }\n", + ce->timeline->seqno, + READ_ONCE(*ce->timeline->hwsp_seqno), + ce->ring->emit); + GEM_BUG_ON(ce->timeline->seqno != + READ_ONCE(*ce->timeline->hwsp_seqno)); } if (engine->unpark) engine->unpark(engine); + intel_breadcrumbs_unpark(engine->breadcrumbs); intel_engine_unpark_heartbeat(engine); return 0; } @@ -136,7 +144,7 @@ __queue_and_release_pm(struct i915_request *rq, list_add_tail(&tl->link, &timelines->active_list); /* Hand the request over to HW and so engine_retire() */ - __i915_request_queue(rq, NULL); + __i915_request_queue_bh(rq); /* Let new submissions commence (and maybe retire this timeline) */ __intel_wakeref_defer_park(&engine->wakeref); diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index ee6312601c56..df62e793e747 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -68,6 +68,7 @@ typedef u8 intel_engine_mask_t; #define ALL_ENGINES ((intel_engine_mask_t)~0ul) struct intel_hw_status_page { + struct list_head timelines; struct i915_vma *vma; u32 *addr; }; @@ -183,7 +184,8 @@ struct intel_engine_execlists { * Reserve the upper 16b for tracking internal errors. */ u32 error_interrupt; -#define ERROR_CSB BIT(31) +#define ERROR_CSB BIT(31) +#define ERROR_PREEMPT BIT(30) /** * @reset_ccid: Active CCID [EXECLISTS_STATUS_HI] at the time of reset @@ -237,16 +239,6 @@ struct intel_engine_execlists { unsigned int port_mask; /** - * @switch_priority_hint: Second context priority. - * - * We submit multiple contexts to the HW simultaneously and would - * like to occasionally switch between them to emulate timeslicing. - * To know when timeslicing is suitable, we track the priority of - * the context submitted second. - */ - int switch_priority_hint; - - /** * @queue_priority_hint: Highest pending priority. * * When we add requests into the queue, or adjust the priority of @@ -559,6 +551,8 @@ struct intel_engine_cs { unsigned long stop_timeout_ms; unsigned long timeslice_duration_ms; } props, defaults; + + I915_SELFTEST_DECLARE(struct fault_attr reset_timeout); }; static inline bool diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c new file mode 100644 index 000000000000..d7d5a58990bb --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c @@ -0,0 +1,3929 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2014 Intel Corporation + */ + +/** + * DOC: Logical Rings, Logical Ring Contexts and Execlists + * + * Motivation: + * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts". + * These expanded contexts enable a number of new abilities, especially + * "Execlists" (also implemented in this file). + * + * One of the main differences with the legacy HW contexts is that logical + * ring contexts incorporate many more things to the context's state, like + * PDPs or ringbuffer control registers: + * + * The reason why PDPs are included in the context is straightforward: as + * PPGTTs (per-process GTTs) are actually per-context, having the PDPs + * contained there mean you don't need to do a ppgtt->switch_mm yourself, + * instead, the GPU will do it for you on the context switch. + * + * But, what about the ringbuffer control registers (head, tail, etc..)? + * shouldn't we just need a set of those per engine command streamer? This is + * where the name "Logical Rings" starts to make sense: by virtualizing the + * rings, the engine cs shifts to a new "ring buffer" with every context + * switch. When you want to submit a workload to the GPU you: A) choose your + * context, B) find its appropriate virtualized ring, C) write commands to it + * and then, finally, D) tell the GPU to switch to that context. + * + * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch + * to a contexts is via a context execution list, ergo "Execlists". + * + * LRC implementation: + * Regarding the creation of contexts, we have: + * + * - One global default context. + * - One local default context for each opened fd. + * - One local extra context for each context create ioctl call. + * + * Now that ringbuffers belong per-context (and not per-engine, like before) + * and that contexts are uniquely tied to a given engine (and not reusable, + * like before) we need: + * + * - One ringbuffer per-engine inside each context. + * - One backing object per-engine inside each context. + * + * The global default context starts its life with these new objects fully + * allocated and populated. The local default context for each opened fd is + * more complex, because we don't know at creation time which engine is going + * to use them. To handle this, we have implemented a deferred creation of LR + * contexts: + * + * The local context starts its life as a hollow or blank holder, that only + * gets populated for a given engine once we receive an execbuffer. If later + * on we receive another execbuffer ioctl for the same context but a different + * engine, we allocate/populate a new ringbuffer and context backing object and + * so on. + * + * Finally, regarding local contexts created using the ioctl call: as they are + * only allowed with the render ring, we can allocate & populate them right + * away (no need to defer anything, at least for now). + * + * Execlists implementation: + * Execlists are the new method by which, on gen8+ hardware, workloads are + * submitted for execution (as opposed to the legacy, ringbuffer-based, method). + * This method works as follows: + * + * When a request is committed, its commands (the BB start and any leading or + * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer + * for the appropriate context. The tail pointer in the hardware context is not + * updated at this time, but instead, kept by the driver in the ringbuffer + * structure. A structure representing this request is added to a request queue + * for the appropriate engine: this structure contains a copy of the context's + * tail after the request was written to the ring buffer and a pointer to the + * context itself. + * + * If the engine's request queue was empty before the request was added, the + * queue is processed immediately. Otherwise the queue will be processed during + * a context switch interrupt. In any case, elements on the queue will get sent + * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a + * globally unique 20-bits submission ID. + * + * When execution of a request completes, the GPU updates the context status + * buffer with a context complete event and generates a context switch interrupt. + * During the interrupt handling, the driver examines the events in the buffer: + * for each context complete event, if the announced ID matches that on the head + * of the request queue, then that request is retired and removed from the queue. + * + * After processing, if any requests were retired and the queue is not empty + * then a new execution list can be submitted. The two requests at the front of + * the queue are next to be submitted but since a context may not occur twice in + * an execution list, if subsequent requests have the same ID as the first then + * the two requests must be combined. This is done simply by discarding requests + * at the head of the queue until either only one requests is left (in which case + * we use a NULL second context) or the first two requests have unique IDs. + * + * By always executing the first two requests in the queue the driver ensures + * that the GPU is kept as busy as possible. In the case where a single context + * completes but a second context is still executing, the request for this second + * context will be at the head of the queue when we remove the first one. This + * request will then be resubmitted along with a new request for a different context, + * which will cause the hardware to continue executing the second request and queue + * the new request (the GPU detects the condition of a context getting preempted + * with the same context and optimizes the context switch flow by not doing + * preemption, but just sampling the new tail pointer). + * + */ +#include <linux/interrupt.h> + +#include "i915_drv.h" +#include "i915_trace.h" +#include "i915_vgpu.h" +#include "gen8_engine_cs.h" +#include "intel_breadcrumbs.h" +#include "intel_context.h" +#include "intel_engine_pm.h" +#include "intel_execlists_submission.h" +#include "intel_gt.h" +#include "intel_gt_pm.h" +#include "intel_gt_requests.h" +#include "intel_lrc.h" +#include "intel_lrc_reg.h" +#include "intel_mocs.h" +#include "intel_reset.h" +#include "intel_ring.h" +#include "intel_workarounds.h" +#include "shmem_utils.h" + +#define RING_EXECLIST_QFULL (1 << 0x2) +#define RING_EXECLIST1_VALID (1 << 0x3) +#define RING_EXECLIST0_VALID (1 << 0x4) +#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE) +#define RING_EXECLIST1_ACTIVE (1 << 0x11) +#define RING_EXECLIST0_ACTIVE (1 << 0x12) + +#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0) +#define GEN8_CTX_STATUS_PREEMPTED (1 << 1) +#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2) +#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3) +#define GEN8_CTX_STATUS_COMPLETE (1 << 4) +#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15) + +#define GEN8_CTX_STATUS_COMPLETED_MASK \ + (GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED) + +#define GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE (0x1) /* lower csb dword */ +#define GEN12_CTX_SWITCH_DETAIL(csb_dw) ((csb_dw) & 0xF) /* upper csb dword */ +#define GEN12_CSB_SW_CTX_ID_MASK GENMASK(25, 15) +#define GEN12_IDLE_CTX_ID 0x7FF +#define GEN12_CSB_CTX_VALID(csb_dw) \ + (FIELD_GET(GEN12_CSB_SW_CTX_ID_MASK, csb_dw) != GEN12_IDLE_CTX_ID) + +/* Typical size of the average request (2 pipecontrols and a MI_BB) */ +#define EXECLISTS_REQUEST_SIZE 64 /* bytes */ + +struct virtual_engine { + struct intel_engine_cs base; + struct intel_context context; + struct rcu_work rcu; + + /* + * We allow only a single request through the virtual engine at a time + * (each request in the timeline waits for the completion fence of + * the previous before being submitted). By restricting ourselves to + * only submitting a single request, each request is placed on to a + * physical to maximise load spreading (by virtue of the late greedy + * scheduling -- each real engine takes the next available request + * upon idling). + */ + struct i915_request *request; + + /* + * We keep a rbtree of available virtual engines inside each physical + * engine, sorted by priority. Here we preallocate the nodes we need + * for the virtual engine, indexed by physical_engine->id. + */ + struct ve_node { + struct rb_node rb; + int prio; + } nodes[I915_NUM_ENGINES]; + + /* + * Keep track of bonded pairs -- restrictions upon on our selection + * of physical engines any particular request may be submitted to. + * If we receive a submit-fence from a master engine, we will only + * use one of sibling_mask physical engines. + */ + struct ve_bond { + const struct intel_engine_cs *master; + intel_engine_mask_t sibling_mask; + } *bonds; + unsigned int num_bonds; + + /* And finally, which physical engines this virtual engine maps onto. */ + unsigned int num_siblings; + struct intel_engine_cs *siblings[]; +}; + +static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine) +{ + GEM_BUG_ON(!intel_engine_is_virtual(engine)); + return container_of(engine, struct virtual_engine, base); +} + +static struct i915_request * +__active_request(const struct intel_timeline * const tl, + struct i915_request *rq, + int error) +{ + struct i915_request *active = rq; + + list_for_each_entry_from_reverse(rq, &tl->requests, link) { + if (__i915_request_is_complete(rq)) + break; + + if (error) { + i915_request_set_error_once(rq, error); + __i915_request_skip(rq); + } + active = rq; + } + + return active; +} + +static struct i915_request * +active_request(const struct intel_timeline * const tl, struct i915_request *rq) +{ + return __active_request(tl, rq, 0); +} + +static inline void +ring_set_paused(const struct intel_engine_cs *engine, int state) +{ + /* + * We inspect HWS_PREEMPT with a semaphore inside + * engine->emit_fini_breadcrumb. If the dword is true, + * the ring is paused as the semaphore will busywait + * until the dword is false. + */ + engine->status_page.addr[I915_GEM_HWS_PREEMPT] = state; + if (state) + wmb(); +} + +static inline struct i915_priolist *to_priolist(struct rb_node *rb) +{ + return rb_entry(rb, struct i915_priolist, node); +} + +static inline int rq_prio(const struct i915_request *rq) +{ + return READ_ONCE(rq->sched.attr.priority); +} + +static int effective_prio(const struct i915_request *rq) +{ + int prio = rq_prio(rq); + + /* + * If this request is special and must not be interrupted at any + * cost, so be it. Note we are only checking the most recent request + * in the context and so may be masking an earlier vip request. It + * is hoped that under the conditions where nopreempt is used, this + * will not matter (i.e. all requests to that context will be + * nopreempt for as long as desired). + */ + if (i915_request_has_nopreempt(rq)) + prio = I915_PRIORITY_UNPREEMPTABLE; + + return prio; +} + +static int queue_prio(const struct intel_engine_execlists *execlists) +{ + struct i915_priolist *p; + struct rb_node *rb; + + rb = rb_first_cached(&execlists->queue); + if (!rb) + return INT_MIN; + + /* + * As the priolist[] are inverted, with the highest priority in [0], + * we have to flip the index value to become priority. + */ + p = to_priolist(rb); + if (!I915_USER_PRIORITY_SHIFT) + return p->priority; + + return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used); +} + +static int virtual_prio(const struct intel_engine_execlists *el) +{ + struct rb_node *rb = rb_first_cached(&el->virtual); + + return rb ? rb_entry(rb, struct ve_node, rb)->prio : INT_MIN; +} + +static inline bool need_preempt(const struct intel_engine_cs *engine, + const struct i915_request *rq) +{ + int last_prio; + + if (!intel_engine_has_semaphores(engine)) + return false; + + /* + * Check if the current priority hint merits a preemption attempt. + * + * We record the highest value priority we saw during rescheduling + * prior to this dequeue, therefore we know that if it is strictly + * less than the current tail of ESLP[0], we do not need to force + * a preempt-to-idle cycle. + * + * However, the priority hint is a mere hint that we may need to + * preempt. If that hint is stale or we may be trying to preempt + * ourselves, ignore the request. + * + * More naturally we would write + * prio >= max(0, last); + * except that we wish to prevent triggering preemption at the same + * priority level: the task that is running should remain running + * to preserve FIFO ordering of dependencies. + */ + last_prio = max(effective_prio(rq), I915_PRIORITY_NORMAL - 1); + if (engine->execlists.queue_priority_hint <= last_prio) + return false; + + /* + * Check against the first request in ELSP[1], it will, thanks to the + * power of PI, be the highest priority of that context. + */ + if (!list_is_last(&rq->sched.link, &engine->active.requests) && + rq_prio(list_next_entry(rq, sched.link)) > last_prio) + return true; + + /* + * If the inflight context did not trigger the preemption, then maybe + * it was the set of queued requests? Pick the highest priority in + * the queue (the first active priolist) and see if it deserves to be + * running instead of ELSP[0]. + * + * The highest priority request in the queue can not be either + * ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same + * context, it's priority would not exceed ELSP[0] aka last_prio. + */ + return max(virtual_prio(&engine->execlists), + queue_prio(&engine->execlists)) > last_prio; +} + +__maybe_unused static inline bool +assert_priority_queue(const struct i915_request *prev, + const struct i915_request *next) +{ + /* + * Without preemption, the prev may refer to the still active element + * which we refuse to let go. + * + * Even with preemption, there are times when we think it is better not + * to preempt and leave an ostensibly lower priority request in flight. + */ + if (i915_request_is_active(prev)) + return true; + + return rq_prio(prev) >= rq_prio(next); +} + +static struct i915_request * +__unwind_incomplete_requests(struct intel_engine_cs *engine) +{ + struct i915_request *rq, *rn, *active = NULL; + struct list_head *pl; + int prio = I915_PRIORITY_INVALID; + + lockdep_assert_held(&engine->active.lock); + + list_for_each_entry_safe_reverse(rq, rn, + &engine->active.requests, + sched.link) { + if (__i915_request_is_complete(rq)) { + list_del_init(&rq->sched.link); + continue; + } + + __i915_request_unsubmit(rq); + + GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID); + if (rq_prio(rq) != prio) { + prio = rq_prio(rq); + pl = i915_sched_lookup_priolist(engine, prio); + } + GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); + + list_move(&rq->sched.link, pl); + set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); + + /* Check in case we rollback so far we wrap [size/2] */ + if (intel_ring_direction(rq->ring, + rq->tail, + rq->ring->tail + 8) > 0) + rq->context->lrc.desc |= CTX_DESC_FORCE_RESTORE; + + active = rq; + } + + return active; +} + +struct i915_request * +execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists) +{ + struct intel_engine_cs *engine = + container_of(execlists, typeof(*engine), execlists); + + return __unwind_incomplete_requests(engine); +} + +static inline void +execlists_context_status_change(struct i915_request *rq, unsigned long status) +{ + /* + * Only used when GVT-g is enabled now. When GVT-g is disabled, + * The compiler should eliminate this function as dead-code. + */ + if (!IS_ENABLED(CONFIG_DRM_I915_GVT)) + return; + + atomic_notifier_call_chain(&rq->engine->context_status_notifier, + status, rq); +} + +static void intel_engine_context_in(struct intel_engine_cs *engine) +{ + unsigned long flags; + + if (atomic_add_unless(&engine->stats.active, 1, 0)) + return; + + write_seqlock_irqsave(&engine->stats.lock, flags); + if (!atomic_add_unless(&engine->stats.active, 1, 0)) { + engine->stats.start = ktime_get(); + atomic_inc(&engine->stats.active); + } + write_sequnlock_irqrestore(&engine->stats.lock, flags); +} + +static void intel_engine_context_out(struct intel_engine_cs *engine) +{ + unsigned long flags; + + GEM_BUG_ON(!atomic_read(&engine->stats.active)); + + if (atomic_add_unless(&engine->stats.active, -1, 1)) + return; + + write_seqlock_irqsave(&engine->stats.lock, flags); + if (atomic_dec_and_test(&engine->stats.active)) { + engine->stats.total = + ktime_add(engine->stats.total, + ktime_sub(ktime_get(), engine->stats.start)); + } + write_sequnlock_irqrestore(&engine->stats.lock, flags); +} + +static void reset_active(struct i915_request *rq, + struct intel_engine_cs *engine) +{ + struct intel_context * const ce = rq->context; + u32 head; + + /* + * The executing context has been cancelled. We want to prevent + * further execution along this context and propagate the error on + * to anything depending on its results. + * + * In __i915_request_submit(), we apply the -EIO and remove the + * requests' payloads for any banned requests. But first, we must + * rewind the context back to the start of the incomplete request so + * that we do not jump back into the middle of the batch. + * + * We preserve the breadcrumbs and semaphores of the incomplete + * requests so that inter-timeline dependencies (i.e other timelines) + * remain correctly ordered. And we defer to __i915_request_submit() + * so that all asynchronous waits are correctly handled. + */ + ENGINE_TRACE(engine, "{ reset rq=%llx:%lld }\n", + rq->fence.context, rq->fence.seqno); + + /* On resubmission of the active request, payload will be scrubbed */ + if (__i915_request_is_complete(rq)) + head = rq->tail; + else + head = __active_request(ce->timeline, rq, -EIO)->head; + head = intel_ring_wrap(ce->ring, head); + + /* Scrub the context image to prevent replaying the previous batch */ + lrc_init_regs(ce, engine, true); + + /* We've switched away, so this should be a no-op, but intent matters */ + ce->lrc.lrca = lrc_update_regs(ce, engine, head); +} + +static inline struct intel_engine_cs * +__execlists_schedule_in(struct i915_request *rq) +{ + struct intel_engine_cs * const engine = rq->engine; + struct intel_context * const ce = rq->context; + + intel_context_get(ce); + + if (unlikely(intel_context_is_closed(ce) && + !intel_engine_has_heartbeat(engine))) + intel_context_set_banned(ce); + + if (unlikely(intel_context_is_banned(ce))) + reset_active(rq, engine); + + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) + lrc_check_regs(ce, engine, "before"); + + if (ce->tag) { + /* Use a fixed tag for OA and friends */ + GEM_BUG_ON(ce->tag <= BITS_PER_LONG); + ce->lrc.ccid = ce->tag; + } else { + /* We don't need a strict matching tag, just different values */ + unsigned int tag = __ffs(engine->context_tag); + + GEM_BUG_ON(tag >= BITS_PER_LONG); + __clear_bit(tag, &engine->context_tag); + ce->lrc.ccid = (1 + tag) << (GEN11_SW_CTX_ID_SHIFT - 32); + + BUILD_BUG_ON(BITS_PER_LONG > GEN12_MAX_CONTEXT_HW_ID); + } + + ce->lrc.ccid |= engine->execlists.ccid; + + __intel_gt_pm_get(engine->gt); + if (engine->fw_domain && !atomic_fetch_inc(&engine->fw_active)) + intel_uncore_forcewake_get(engine->uncore, engine->fw_domain); + execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN); + intel_engine_context_in(engine); + + CE_TRACE(ce, "schedule-in, ccid:%x\n", ce->lrc.ccid); + + return engine; +} + +static inline void execlists_schedule_in(struct i915_request *rq, int idx) +{ + struct intel_context * const ce = rq->context; + struct intel_engine_cs *old; + + GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine)); + trace_i915_request_in(rq, idx); + + old = ce->inflight; + if (!old) + old = __execlists_schedule_in(rq); + WRITE_ONCE(ce->inflight, ptr_inc(old)); + + GEM_BUG_ON(intel_context_inflight(ce) != rq->engine); +} + +static void +resubmit_virtual_request(struct i915_request *rq, struct virtual_engine *ve) +{ + struct intel_engine_cs *engine = rq->engine; + + spin_lock_irq(&engine->active.lock); + + clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); + WRITE_ONCE(rq->engine, &ve->base); + ve->base.submit_request(rq); + + spin_unlock_irq(&engine->active.lock); +} + +static void kick_siblings(struct i915_request *rq, struct intel_context *ce) +{ + struct virtual_engine *ve = container_of(ce, typeof(*ve), context); + struct intel_engine_cs *engine = rq->engine; + + /* + * After this point, the rq may be transferred to a new sibling, so + * before we clear ce->inflight make sure that the context has been + * removed from the b->signalers and furthermore we need to make sure + * that the concurrent iterator in signal_irq_work is no longer + * following ce->signal_link. + */ + if (!list_empty(&ce->signals)) + intel_context_remove_breadcrumbs(ce, engine->breadcrumbs); + + /* + * This engine is now too busy to run this virtual request, so + * see if we can find an alternative engine for it to execute on. + * Once a request has become bonded to this engine, we treat it the + * same as other native request. + */ + if (i915_request_in_priority_queue(rq) && + rq->execution_mask != engine->mask) + resubmit_virtual_request(rq, ve); + + if (READ_ONCE(ve->request)) + tasklet_hi_schedule(&ve->base.execlists.tasklet); +} + +static inline void __execlists_schedule_out(struct i915_request *rq) +{ + struct intel_context * const ce = rq->context; + struct intel_engine_cs * const engine = rq->engine; + unsigned int ccid; + + /* + * NB process_csb() is not under the engine->active.lock and hence + * schedule_out can race with schedule_in meaning that we should + * refrain from doing non-trivial work here. + */ + + CE_TRACE(ce, "schedule-out, ccid:%x\n", ce->lrc.ccid); + + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) + lrc_check_regs(ce, engine, "after"); + + /* + * If we have just completed this context, the engine may now be + * idle and we want to re-enter powersaving. + */ + if (intel_timeline_is_last(ce->timeline, rq) && + __i915_request_is_complete(rq)) + intel_engine_add_retire(engine, ce->timeline); + + ccid = ce->lrc.ccid; + ccid >>= GEN11_SW_CTX_ID_SHIFT - 32; + ccid &= GEN12_MAX_CONTEXT_HW_ID; + if (ccid < BITS_PER_LONG) { + GEM_BUG_ON(ccid == 0); + GEM_BUG_ON(test_bit(ccid - 1, &engine->context_tag)); + __set_bit(ccid - 1, &engine->context_tag); + } + + lrc_update_runtime(ce); + intel_engine_context_out(engine); + execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT); + if (engine->fw_domain && !atomic_dec_return(&engine->fw_active)) + intel_uncore_forcewake_put(engine->uncore, engine->fw_domain); + intel_gt_pm_put_async(engine->gt); + + /* + * If this is part of a virtual engine, its next request may + * have been blocked waiting for access to the active context. + * We have to kick all the siblings again in case we need to + * switch (e.g. the next request is not runnable on this + * engine). Hopefully, we will already have submitted the next + * request before the tasklet runs and do not need to rebuild + * each virtual tree and kick everyone again. + */ + if (ce->engine != engine) + kick_siblings(rq, ce); +} + +static inline void +execlists_schedule_out(struct i915_request *rq) +{ + struct intel_context * const ce = rq->context; + + trace_i915_request_out(rq); + + GEM_BUG_ON(!ce->inflight); + ce->inflight = ptr_dec(ce->inflight); + if (!__intel_context_inflight_count(ce->inflight)) { + GEM_BUG_ON(ce->inflight != rq->engine); + __execlists_schedule_out(rq); + WRITE_ONCE(ce->inflight, NULL); + intel_context_put(ce); + } + + i915_request_put(rq); +} + +static u64 execlists_update_context(struct i915_request *rq) +{ + struct intel_context *ce = rq->context; + u64 desc = ce->lrc.desc; + u32 tail, prev; + + /* + * WaIdleLiteRestore:bdw,skl + * + * We should never submit the context with the same RING_TAIL twice + * just in case we submit an empty ring, which confuses the HW. + * + * We append a couple of NOOPs (gen8_emit_wa_tail) after the end of + * the normal request to be able to always advance the RING_TAIL on + * subsequent resubmissions (for lite restore). Should that fail us, + * and we try and submit the same tail again, force the context + * reload. + * + * If we need to return to a preempted context, we need to skip the + * lite-restore and force it to reload the RING_TAIL. Otherwise, the + * HW has a tendency to ignore us rewinding the TAIL to the end of + * an earlier request. + */ + GEM_BUG_ON(ce->lrc_reg_state[CTX_RING_TAIL] != rq->ring->tail); + prev = rq->ring->tail; + tail = intel_ring_set_tail(rq->ring, rq->tail); + if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0)) + desc |= CTX_DESC_FORCE_RESTORE; + ce->lrc_reg_state[CTX_RING_TAIL] = tail; + rq->tail = rq->wa_tail; + + /* + * Make sure the context image is complete before we submit it to HW. + * + * Ostensibly, writes (including the WCB) should be flushed prior to + * an uncached write such as our mmio register access, the empirical + * evidence (esp. on Braswell) suggests that the WC write into memory + * may not be visible to the HW prior to the completion of the UC + * register write and that we may begin execution from the context + * before its image is complete leading to invalid PD chasing. + */ + wmb(); + + ce->lrc.desc &= ~CTX_DESC_FORCE_RESTORE; + return desc; +} + +static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port) +{ + if (execlists->ctrl_reg) { + writel(lower_32_bits(desc), execlists->submit_reg + port * 2); + writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1); + } else { + writel(upper_32_bits(desc), execlists->submit_reg); + writel(lower_32_bits(desc), execlists->submit_reg); + } +} + +static __maybe_unused char * +dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq) +{ + if (!rq) + return ""; + + snprintf(buf, buflen, "%sccid:%x %llx:%lld%s prio %d", + prefix, + rq->context->lrc.ccid, + rq->fence.context, rq->fence.seqno, + __i915_request_is_complete(rq) ? "!" : + __i915_request_has_started(rq) ? "*" : + "", + rq_prio(rq)); + + return buf; +} + +static __maybe_unused void +trace_ports(const struct intel_engine_execlists *execlists, + const char *msg, + struct i915_request * const *ports) +{ + const struct intel_engine_cs *engine = + container_of(execlists, typeof(*engine), execlists); + char __maybe_unused p0[40], p1[40]; + + if (!ports[0]) + return; + + ENGINE_TRACE(engine, "%s { %s%s }\n", msg, + dump_port(p0, sizeof(p0), "", ports[0]), + dump_port(p1, sizeof(p1), ", ", ports[1])); +} + +static inline bool +reset_in_progress(const struct intel_engine_execlists *execlists) +{ + return unlikely(!__tasklet_is_enabled(&execlists->tasklet)); +} + +static __maybe_unused bool +assert_pending_valid(const struct intel_engine_execlists *execlists, + const char *msg) +{ + struct intel_engine_cs *engine = + container_of(execlists, typeof(*engine), execlists); + struct i915_request * const *port, *rq; + struct intel_context *ce = NULL; + bool sentinel = false; + u32 ccid = -1; + + trace_ports(execlists, msg, execlists->pending); + + /* We may be messing around with the lists during reset, lalala */ + if (reset_in_progress(execlists)) + return true; + + if (!execlists->pending[0]) { + GEM_TRACE_ERR("%s: Nothing pending for promotion!\n", + engine->name); + return false; + } + + if (execlists->pending[execlists_num_ports(execlists)]) { + GEM_TRACE_ERR("%s: Excess pending[%d] for promotion!\n", + engine->name, execlists_num_ports(execlists)); + return false; + } + + for (port = execlists->pending; (rq = *port); port++) { + unsigned long flags; + bool ok = true; + + GEM_BUG_ON(!kref_read(&rq->fence.refcount)); + GEM_BUG_ON(!i915_request_is_active(rq)); + + if (ce == rq->context) { + GEM_TRACE_ERR("%s: Dup context:%llx in pending[%zd]\n", + engine->name, + ce->timeline->fence_context, + port - execlists->pending); + return false; + } + ce = rq->context; + + if (ccid == ce->lrc.ccid) { + GEM_TRACE_ERR("%s: Dup ccid:%x context:%llx in pending[%zd]\n", + engine->name, + ccid, ce->timeline->fence_context, + port - execlists->pending); + return false; + } + ccid = ce->lrc.ccid; + + /* + * Sentinels are supposed to be the last request so they flush + * the current execution off the HW. Check that they are the only + * request in the pending submission. + */ + if (sentinel) { + GEM_TRACE_ERR("%s: context:%llx after sentinel in pending[%zd]\n", + engine->name, + ce->timeline->fence_context, + port - execlists->pending); + return false; + } + sentinel = i915_request_has_sentinel(rq); + + /* + * We want virtual requests to only be in the first slot so + * that they are never stuck behind a hog and can be immediately + * transferred onto the next idle engine. + */ + if (rq->execution_mask != engine->mask && + port != execlists->pending) { + GEM_TRACE_ERR("%s: virtual engine:%llx not in prime position[%zd]\n", + engine->name, + ce->timeline->fence_context, + port - execlists->pending); + return false; + } + + /* Hold tightly onto the lock to prevent concurrent retires! */ + if (!spin_trylock_irqsave(&rq->lock, flags)) + continue; + + if (__i915_request_is_complete(rq)) + goto unlock; + + if (i915_active_is_idle(&ce->active) && + !intel_context_is_barrier(ce)) { + GEM_TRACE_ERR("%s: Inactive context:%llx in pending[%zd]\n", + engine->name, + ce->timeline->fence_context, + port - execlists->pending); + ok = false; + goto unlock; + } + + if (!i915_vma_is_pinned(ce->state)) { + GEM_TRACE_ERR("%s: Unpinned context:%llx in pending[%zd]\n", + engine->name, + ce->timeline->fence_context, + port - execlists->pending); + ok = false; + goto unlock; + } + + if (!i915_vma_is_pinned(ce->ring->vma)) { + GEM_TRACE_ERR("%s: Unpinned ring:%llx in pending[%zd]\n", + engine->name, + ce->timeline->fence_context, + port - execlists->pending); + ok = false; + goto unlock; + } + +unlock: + spin_unlock_irqrestore(&rq->lock, flags); + if (!ok) + return false; + } + + return ce; +} + +static void execlists_submit_ports(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists *execlists = &engine->execlists; + unsigned int n; + + GEM_BUG_ON(!assert_pending_valid(execlists, "submit")); + + /* + * We can skip acquiring intel_runtime_pm_get() here as it was taken + * on our behalf by the request (see i915_gem_mark_busy()) and it will + * not be relinquished until the device is idle (see + * i915_gem_idle_work_handler()). As a precaution, we make sure + * that all ELSP are drained i.e. we have processed the CSB, + * before allowing ourselves to idle and calling intel_runtime_pm_put(). + */ + GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); + + /* + * ELSQ note: the submit queue is not cleared after being submitted + * to the HW so we need to make sure we always clean it up. This is + * currently ensured by the fact that we always write the same number + * of elsq entries, keep this in mind before changing the loop below. + */ + for (n = execlists_num_ports(execlists); n--; ) { + struct i915_request *rq = execlists->pending[n]; + + write_desc(execlists, + rq ? execlists_update_context(rq) : 0, + n); + } + + /* we need to manually load the submit queue */ + if (execlists->ctrl_reg) + writel(EL_CTRL_LOAD, execlists->ctrl_reg); +} + +static bool ctx_single_port_submission(const struct intel_context *ce) +{ + return (IS_ENABLED(CONFIG_DRM_I915_GVT) && + intel_context_force_single_submission(ce)); +} + +static bool can_merge_ctx(const struct intel_context *prev, + const struct intel_context *next) +{ + if (prev != next) + return false; + + if (ctx_single_port_submission(prev)) + return false; + + return true; +} + +static unsigned long i915_request_flags(const struct i915_request *rq) +{ + return READ_ONCE(rq->fence.flags); +} + +static bool can_merge_rq(const struct i915_request *prev, + const struct i915_request *next) +{ + GEM_BUG_ON(prev == next); + GEM_BUG_ON(!assert_priority_queue(prev, next)); + + /* + * We do not submit known completed requests. Therefore if the next + * request is already completed, we can pretend to merge it in + * with the previous context (and we will skip updating the ELSP + * and tracking). Thus hopefully keeping the ELSP full with active + * contexts, despite the best efforts of preempt-to-busy to confuse + * us. + */ + if (__i915_request_is_complete(next)) + return true; + + if (unlikely((i915_request_flags(prev) ^ i915_request_flags(next)) & + (BIT(I915_FENCE_FLAG_NOPREEMPT) | + BIT(I915_FENCE_FLAG_SENTINEL)))) + return false; + + if (!can_merge_ctx(prev->context, next->context)) + return false; + + GEM_BUG_ON(i915_seqno_passed(prev->fence.seqno, next->fence.seqno)); + return true; +} + +static bool virtual_matches(const struct virtual_engine *ve, + const struct i915_request *rq, + const struct intel_engine_cs *engine) +{ + const struct intel_engine_cs *inflight; + + if (!rq) + return false; + + if (!(rq->execution_mask & engine->mask)) /* We peeked too soon! */ + return false; + + /* + * We track when the HW has completed saving the context image + * (i.e. when we have seen the final CS event switching out of + * the context) and must not overwrite the context image before + * then. This restricts us to only using the active engine + * while the previous virtualized request is inflight (so + * we reuse the register offsets). This is a very small + * hystersis on the greedy seelction algorithm. + */ + inflight = intel_context_inflight(&ve->context); + if (inflight && inflight != engine) + return false; + + return true; +} + +static struct virtual_engine * +first_virtual_engine(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists *el = &engine->execlists; + struct rb_node *rb = rb_first_cached(&el->virtual); + + while (rb) { + struct virtual_engine *ve = + rb_entry(rb, typeof(*ve), nodes[engine->id].rb); + struct i915_request *rq = READ_ONCE(ve->request); + + /* lazily cleanup after another engine handled rq */ + if (!rq || !virtual_matches(ve, rq, engine)) { + rb_erase_cached(rb, &el->virtual); + RB_CLEAR_NODE(rb); + rb = rb_first_cached(&el->virtual); + continue; + } + + return ve; + } + + return NULL; +} + +static void virtual_xfer_context(struct virtual_engine *ve, + struct intel_engine_cs *engine) +{ + unsigned int n; + + if (likely(engine == ve->siblings[0])) + return; + + GEM_BUG_ON(READ_ONCE(ve->context.inflight)); + if (!intel_engine_has_relative_mmio(engine)) + lrc_update_offsets(&ve->context, engine); + + /* + * Move the bound engine to the top of the list for + * future execution. We then kick this tasklet first + * before checking others, so that we preferentially + * reuse this set of bound registers. + */ + for (n = 1; n < ve->num_siblings; n++) { + if (ve->siblings[n] == engine) { + swap(ve->siblings[n], ve->siblings[0]); + break; + } + } +} + +static void defer_request(struct i915_request *rq, struct list_head * const pl) +{ + LIST_HEAD(list); + + /* + * We want to move the interrupted request to the back of + * the round-robin list (i.e. its priority level), but + * in doing so, we must then move all requests that were in + * flight and were waiting for the interrupted request to + * be run after it again. + */ + do { + struct i915_dependency *p; + + GEM_BUG_ON(i915_request_is_active(rq)); + list_move_tail(&rq->sched.link, pl); + + for_each_waiter(p, rq) { + struct i915_request *w = + container_of(p->waiter, typeof(*w), sched); + + if (p->flags & I915_DEPENDENCY_WEAK) + continue; + + /* Leave semaphores spinning on the other engines */ + if (w->engine != rq->engine) + continue; + + /* No waiter should start before its signaler */ + GEM_BUG_ON(i915_request_has_initial_breadcrumb(w) && + __i915_request_has_started(w) && + !__i915_request_is_complete(rq)); + + GEM_BUG_ON(i915_request_is_active(w)); + if (!i915_request_is_ready(w)) + continue; + + if (rq_prio(w) < rq_prio(rq)) + continue; + + GEM_BUG_ON(rq_prio(w) > rq_prio(rq)); + list_move_tail(&w->sched.link, &list); + } + + rq = list_first_entry_or_null(&list, typeof(*rq), sched.link); + } while (rq); +} + +static void defer_active(struct intel_engine_cs *engine) +{ + struct i915_request *rq; + + rq = __unwind_incomplete_requests(engine); + if (!rq) + return; + + defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq))); +} + +static bool +timeslice_yield(const struct intel_engine_execlists *el, + const struct i915_request *rq) +{ + /* + * Once bitten, forever smitten! + * + * If the active context ever busy-waited on a semaphore, + * it will be treated as a hog until the end of its timeslice (i.e. + * until it is scheduled out and replaced by a new submission, + * possibly even its own lite-restore). The HW only sends an interrupt + * on the first miss, and we do know if that semaphore has been + * signaled, or even if it is now stuck on another semaphore. Play + * safe, yield if it might be stuck -- it will be given a fresh + * timeslice in the near future. + */ + return rq->context->lrc.ccid == READ_ONCE(el->yield); +} + +static bool needs_timeslice(const struct intel_engine_cs *engine, + const struct i915_request *rq) +{ + if (!intel_engine_has_timeslices(engine)) + return false; + + /* If not currently active, or about to switch, wait for next event */ + if (!rq || __i915_request_is_complete(rq)) + return false; + + /* We do not need to start the timeslice until after the ACK */ + if (READ_ONCE(engine->execlists.pending[0])) + return false; + + /* If ELSP[1] is occupied, always check to see if worth slicing */ + if (!list_is_last_rcu(&rq->sched.link, &engine->active.requests)) { + ENGINE_TRACE(engine, "timeslice required for second inflight context\n"); + return true; + } + + /* Otherwise, ELSP[0] is by itself, but may be waiting in the queue */ + if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)) { + ENGINE_TRACE(engine, "timeslice required for queue\n"); + return true; + } + + if (!RB_EMPTY_ROOT(&engine->execlists.virtual.rb_root)) { + ENGINE_TRACE(engine, "timeslice required for virtual\n"); + return true; + } + + return false; +} + +static bool +timeslice_expired(struct intel_engine_cs *engine, const struct i915_request *rq) +{ + const struct intel_engine_execlists *el = &engine->execlists; + + if (i915_request_has_nopreempt(rq) && __i915_request_has_started(rq)) + return false; + + if (!needs_timeslice(engine, rq)) + return false; + + return timer_expired(&el->timer) || timeslice_yield(el, rq); +} + +static unsigned long timeslice(const struct intel_engine_cs *engine) +{ + return READ_ONCE(engine->props.timeslice_duration_ms); +} + +static void start_timeslice(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists *el = &engine->execlists; + unsigned long duration; + + /* Disable the timer if there is nothing to switch to */ + duration = 0; + if (needs_timeslice(engine, *el->active)) { + /* Avoid continually prolonging an active timeslice */ + if (timer_active(&el->timer)) { + /* + * If we just submitted a new ELSP after an old + * context, that context may have already consumed + * its timeslice, so recheck. + */ + if (!timer_pending(&el->timer)) + tasklet_hi_schedule(&el->tasklet); + return; + } + + duration = timeslice(engine); + } + + set_timer_ms(&el->timer, duration); +} + +static void record_preemption(struct intel_engine_execlists *execlists) +{ + (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++); +} + +static unsigned long active_preempt_timeout(struct intel_engine_cs *engine, + const struct i915_request *rq) +{ + if (!rq) + return 0; + + /* Force a fast reset for terminated contexts (ignoring sysfs!) */ + if (unlikely(intel_context_is_banned(rq->context))) + return 1; + + return READ_ONCE(engine->props.preempt_timeout_ms); +} + +static void set_preempt_timeout(struct intel_engine_cs *engine, + const struct i915_request *rq) +{ + if (!intel_engine_has_preempt_reset(engine)) + return; + + set_timer_ms(&engine->execlists.preempt, + active_preempt_timeout(engine, rq)); +} + +static void execlists_dequeue(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + struct i915_request **port = execlists->pending; + struct i915_request ** const last_port = port + execlists->port_mask; + struct i915_request *last = *execlists->active; + struct virtual_engine *ve; + struct rb_node *rb; + bool submit = false; + + /* + * Hardware submission is through 2 ports. Conceptually each port + * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is + * static for a context, and unique to each, so we only execute + * requests belonging to a single context from each ring. RING_HEAD + * is maintained by the CS in the context image, it marks the place + * where it got up to last time, and through RING_TAIL we tell the CS + * where we want to execute up to this time. + * + * In this list the requests are in order of execution. Consecutive + * requests from the same context are adjacent in the ringbuffer. We + * can combine these requests into a single RING_TAIL update: + * + * RING_HEAD...req1...req2 + * ^- RING_TAIL + * since to execute req2 the CS must first execute req1. + * + * Our goal then is to point each port to the end of a consecutive + * sequence of requests as being the most optimal (fewest wake ups + * and context switches) submission. + */ + + spin_lock(&engine->active.lock); + + /* + * If the queue is higher priority than the last + * request in the currently active context, submit afresh. + * We will resubmit again afterwards in case we need to split + * the active context to interject the preemption request, + * i.e. we will retrigger preemption following the ack in case + * of trouble. + * + * In theory we can skip over completed contexts that have not + * yet been processed by events (as those events are in flight): + * + * while ((last = *active) && i915_request_completed(last)) + * active++; + * + * However, the GPU cannot handle this as it will ultimately + * find itself trying to jump back into a context it has just + * completed and barf. + */ + + if (last) { + if (__i915_request_is_complete(last)) { + goto check_secondary; + } else if (need_preempt(engine, last)) { + ENGINE_TRACE(engine, + "preempting last=%llx:%lld, prio=%d, hint=%d\n", + last->fence.context, + last->fence.seqno, + last->sched.attr.priority, + execlists->queue_priority_hint); + record_preemption(execlists); + + /* + * Don't let the RING_HEAD advance past the breadcrumb + * as we unwind (and until we resubmit) so that we do + * not accidentally tell it to go backwards. + */ + ring_set_paused(engine, 1); + + /* + * Note that we have not stopped the GPU at this point, + * so we are unwinding the incomplete requests as they + * remain inflight and so by the time we do complete + * the preemption, some of the unwound requests may + * complete! + */ + __unwind_incomplete_requests(engine); + + last = NULL; + } else if (timeslice_expired(engine, last)) { + ENGINE_TRACE(engine, + "expired:%s last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n", + yesno(timer_expired(&execlists->timer)), + last->fence.context, last->fence.seqno, + rq_prio(last), + execlists->queue_priority_hint, + yesno(timeslice_yield(execlists, last))); + + /* + * Consume this timeslice; ensure we start a new one. + * + * The timeslice expired, and we will unwind the + * running contexts and recompute the next ELSP. + * If that submit will be the same pair of contexts + * (due to dependency ordering), we will skip the + * submission. If we don't cancel the timer now, + * we will see that the timer has expired and + * reschedule the tasklet; continually until the + * next context switch or other preeemption event. + * + * Since we have decided to reschedule based on + * consumption of this timeslice, if we submit the + * same context again, grant it a full timeslice. + */ + cancel_timer(&execlists->timer); + ring_set_paused(engine, 1); + defer_active(engine); + + /* + * Unlike for preemption, if we rewind and continue + * executing the same context as previously active, + * the order of execution will remain the same and + * the tail will only advance. We do not need to + * force a full context restore, as a lite-restore + * is sufficient to resample the monotonic TAIL. + * + * If we switch to any other context, similarly we + * will not rewind TAIL of current context, and + * normal save/restore will preserve state and allow + * us to later continue executing the same request. + */ + last = NULL; + } else { + /* + * Otherwise if we already have a request pending + * for execution after the current one, we can + * just wait until the next CS event before + * queuing more. In either case we will force a + * lite-restore preemption event, but if we wait + * we hopefully coalesce several updates into a single + * submission. + */ +check_secondary: + if (!list_is_last(&last->sched.link, + &engine->active.requests)) { + /* + * Even if ELSP[1] is occupied and not worthy + * of timeslices, our queue might be. + */ + spin_unlock(&engine->active.lock); + return; + } + } + } + + /* XXX virtual is always taking precedence */ + while ((ve = first_virtual_engine(engine))) { + struct i915_request *rq; + + spin_lock(&ve->base.active.lock); + + rq = ve->request; + if (unlikely(!virtual_matches(ve, rq, engine))) + goto unlock; /* lost the race to a sibling */ + + GEM_BUG_ON(rq->engine != &ve->base); + GEM_BUG_ON(rq->context != &ve->context); + + if (unlikely(rq_prio(rq) < queue_prio(execlists))) { + spin_unlock(&ve->base.active.lock); + break; + } + + if (last && !can_merge_rq(last, rq)) { + spin_unlock(&ve->base.active.lock); + spin_unlock(&engine->active.lock); + return; /* leave this for another sibling */ + } + + ENGINE_TRACE(engine, + "virtual rq=%llx:%lld%s, new engine? %s\n", + rq->fence.context, + rq->fence.seqno, + __i915_request_is_complete(rq) ? "!" : + __i915_request_has_started(rq) ? "*" : + "", + yesno(engine != ve->siblings[0])); + + WRITE_ONCE(ve->request, NULL); + WRITE_ONCE(ve->base.execlists.queue_priority_hint, INT_MIN); + + rb = &ve->nodes[engine->id].rb; + rb_erase_cached(rb, &execlists->virtual); + RB_CLEAR_NODE(rb); + + GEM_BUG_ON(!(rq->execution_mask & engine->mask)); + WRITE_ONCE(rq->engine, engine); + + if (__i915_request_submit(rq)) { + /* + * Only after we confirm that we will submit + * this request (i.e. it has not already + * completed), do we want to update the context. + * + * This serves two purposes. It avoids + * unnecessary work if we are resubmitting an + * already completed request after timeslicing. + * But more importantly, it prevents us altering + * ve->siblings[] on an idle context, where + * we may be using ve->siblings[] in + * virtual_context_enter / virtual_context_exit. + */ + virtual_xfer_context(ve, engine); + GEM_BUG_ON(ve->siblings[0] != engine); + + submit = true; + last = rq; + } + + i915_request_put(rq); +unlock: + spin_unlock(&ve->base.active.lock); + + /* + * Hmm, we have a bunch of virtual engine requests, + * but the first one was already completed (thanks + * preempt-to-busy!). Keep looking at the veng queue + * until we have no more relevant requests (i.e. + * the normal submit queue has higher priority). + */ + if (submit) + break; + } + + while ((rb = rb_first_cached(&execlists->queue))) { + struct i915_priolist *p = to_priolist(rb); + struct i915_request *rq, *rn; + int i; + + priolist_for_each_request_consume(rq, rn, p, i) { + bool merge = true; + + /* + * Can we combine this request with the current port? + * It has to be the same context/ringbuffer and not + * have any exceptions (e.g. GVT saying never to + * combine contexts). + * + * If we can combine the requests, we can execute both + * by updating the RING_TAIL to point to the end of the + * second request, and so we never need to tell the + * hardware about the first. + */ + if (last && !can_merge_rq(last, rq)) { + /* + * If we are on the second port and cannot + * combine this request with the last, then we + * are done. + */ + if (port == last_port) + goto done; + + /* + * We must not populate both ELSP[] with the + * same LRCA, i.e. we must submit 2 different + * contexts if we submit 2 ELSP. + */ + if (last->context == rq->context) + goto done; + + if (i915_request_has_sentinel(last)) + goto done; + + /* + * We avoid submitting virtual requests into + * the secondary ports so that we can migrate + * the request immediately to another engine + * rather than wait for the primary request. + */ + if (rq->execution_mask != engine->mask) + goto done; + + /* + * If GVT overrides us we only ever submit + * port[0], leaving port[1] empty. Note that we + * also have to be careful that we don't queue + * the same context (even though a different + * request) to the second port. + */ + if (ctx_single_port_submission(last->context) || + ctx_single_port_submission(rq->context)) + goto done; + + merge = false; + } + + if (__i915_request_submit(rq)) { + if (!merge) { + *port++ = i915_request_get(last); + last = NULL; + } + + GEM_BUG_ON(last && + !can_merge_ctx(last->context, + rq->context)); + GEM_BUG_ON(last && + i915_seqno_passed(last->fence.seqno, + rq->fence.seqno)); + + submit = true; + last = rq; + } + } + + rb_erase_cached(&p->node, &execlists->queue); + i915_priolist_free(p); + } +done: + *port++ = i915_request_get(last); + + /* + * Here be a bit of magic! Or sleight-of-hand, whichever you prefer. + * + * We choose the priority hint such that if we add a request of greater + * priority than this, we kick the submission tasklet to decide on + * the right order of submitting the requests to hardware. We must + * also be prepared to reorder requests as they are in-flight on the + * HW. We derive the priority hint then as the first "hole" in + * the HW submission ports and if there are no available slots, + * the priority of the lowest executing request, i.e. last. + * + * When we do receive a higher priority request ready to run from the + * user, see queue_request(), the priority hint is bumped to that + * request triggering preemption on the next dequeue (or subsequent + * interrupt for secondary ports). + */ + execlists->queue_priority_hint = queue_prio(execlists); + spin_unlock(&engine->active.lock); + + /* + * We can skip poking the HW if we ended up with exactly the same set + * of requests as currently running, e.g. trying to timeslice a pair + * of ordered contexts. + */ + if (submit && + memcmp(execlists->active, + execlists->pending, + (port - execlists->pending) * sizeof(*port))) { + *port = NULL; + while (port-- != execlists->pending) + execlists_schedule_in(*port, port - execlists->pending); + + WRITE_ONCE(execlists->yield, -1); + set_preempt_timeout(engine, *execlists->active); + execlists_submit_ports(engine); + } else { + ring_set_paused(engine, 0); + while (port-- != execlists->pending) + i915_request_put(*port); + *execlists->pending = NULL; + } +} + +static void execlists_dequeue_irq(struct intel_engine_cs *engine) +{ + local_irq_disable(); /* Suspend interrupts across request submission */ + execlists_dequeue(engine); + local_irq_enable(); /* flush irq_work (e.g. breadcrumb enabling) */ +} + +static inline void clear_ports(struct i915_request **ports, int count) +{ + memset_p((void **)ports, NULL, count); +} + +static inline void +copy_ports(struct i915_request **dst, struct i915_request **src, int count) +{ + /* A memcpy_p() would be very useful here! */ + while (count--) + WRITE_ONCE(*dst++, *src++); /* avoid write tearing */ +} + +static struct i915_request ** +cancel_port_requests(struct intel_engine_execlists * const execlists, + struct i915_request **inactive) +{ + struct i915_request * const *port; + + for (port = execlists->pending; *port; port++) + *inactive++ = *port; + clear_ports(execlists->pending, ARRAY_SIZE(execlists->pending)); + + /* Mark the end of active before we overwrite *active */ + for (port = xchg(&execlists->active, execlists->pending); *port; port++) + *inactive++ = *port; + clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight)); + + smp_wmb(); /* complete the seqlock for execlists_active() */ + WRITE_ONCE(execlists->active, execlists->inflight); + + /* Having cancelled all outstanding process_csb(), stop their timers */ + GEM_BUG_ON(execlists->pending[0]); + cancel_timer(&execlists->timer); + cancel_timer(&execlists->preempt); + + return inactive; +} + +static inline void +invalidate_csb_entries(const u64 *first, const u64 *last) +{ + clflush((void *)first); + clflush((void *)last); +} + +/* + * Starting with Gen12, the status has a new format: + * + * bit 0: switched to new queue + * bit 1: reserved + * bit 2: semaphore wait mode (poll or signal), only valid when + * switch detail is set to "wait on semaphore" + * bits 3-5: engine class + * bits 6-11: engine instance + * bits 12-14: reserved + * bits 15-25: sw context id of the lrc the GT switched to + * bits 26-31: sw counter of the lrc the GT switched to + * bits 32-35: context switch detail + * - 0: ctx complete + * - 1: wait on sync flip + * - 2: wait on vblank + * - 3: wait on scanline + * - 4: wait on semaphore + * - 5: context preempted (not on SEMAPHORE_WAIT or + * WAIT_FOR_EVENT) + * bit 36: reserved + * bits 37-43: wait detail (for switch detail 1 to 4) + * bits 44-46: reserved + * bits 47-57: sw context id of the lrc the GT switched away from + * bits 58-63: sw counter of the lrc the GT switched away from + */ +static inline bool gen12_csb_parse(const u64 csb) +{ + bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_32_bits(csb)); + bool new_queue = + lower_32_bits(csb) & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE; + + /* + * The context switch detail is not guaranteed to be 5 when a preemption + * occurs, so we can't just check for that. The check below works for + * all the cases we care about, including preemptions of WAIT + * instructions and lite-restore. Preempt-to-idle via the CTRL register + * would require some extra handling, but we don't support that. + */ + if (!ctx_away_valid || new_queue) { + GEM_BUG_ON(!GEN12_CSB_CTX_VALID(lower_32_bits(csb))); + return true; + } + + /* + * switch detail = 5 is covered by the case above and we do not expect a + * context switch on an unsuccessful wait instruction since we always + * use polling mode. + */ + GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_32_bits(csb))); + return false; +} + +static inline bool gen8_csb_parse(const u64 csb) +{ + return csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED); +} + +static noinline u64 +wa_csb_read(const struct intel_engine_cs *engine, u64 * const csb) +{ + u64 entry; + + /* + * Reading from the HWSP has one particular advantage: we can detect + * a stale entry. Since the write into HWSP is broken, we have no reason + * to trust the HW at all, the mmio entry may equally be unordered, so + * we prefer the path that is self-checking and as a last resort, + * return the mmio value. + * + * tgl,dg1:HSDES#22011327657 + */ + preempt_disable(); + if (wait_for_atomic_us((entry = READ_ONCE(*csb)) != -1, 10)) { + int idx = csb - engine->execlists.csb_status; + int status; + + status = GEN8_EXECLISTS_STATUS_BUF; + if (idx >= 6) { + status = GEN11_EXECLISTS_STATUS_BUF2; + idx -= 6; + } + status += sizeof(u64) * idx; + + entry = intel_uncore_read64(engine->uncore, + _MMIO(engine->mmio_base + status)); + } + preempt_enable(); + + return entry; +} + +static inline u64 +csb_read(const struct intel_engine_cs *engine, u64 * const csb) +{ + u64 entry = READ_ONCE(*csb); + + /* + * Unfortunately, the GPU does not always serialise its write + * of the CSB entries before its write of the CSB pointer, at least + * from the perspective of the CPU, using what is known as a Global + * Observation Point. We may read a new CSB tail pointer, but then + * read the stale CSB entries, causing us to misinterpret the + * context-switch events, and eventually declare the GPU hung. + * + * icl:HSDES#1806554093 + * tgl:HSDES#22011248461 + */ + if (unlikely(entry == -1)) + entry = wa_csb_read(engine, csb); + + /* Consume this entry so that we can spot its future reuse. */ + WRITE_ONCE(*csb, -1); + + /* ELSP is an implicit wmb() before the GPU wraps and overwrites csb */ + return entry; +} + +static void new_timeslice(struct intel_engine_execlists *el) +{ + /* By cancelling, we will start afresh in start_timeslice() */ + cancel_timer(&el->timer); +} + +static struct i915_request ** +process_csb(struct intel_engine_cs *engine, struct i915_request **inactive) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + u64 * const buf = execlists->csb_status; + const u8 num_entries = execlists->csb_size; + struct i915_request **prev; + u8 head, tail; + + /* + * As we modify our execlists state tracking we require exclusive + * access. Either we are inside the tasklet, or the tasklet is disabled + * and we assume that is only inside the reset paths and so serialised. + */ + GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) && + !reset_in_progress(execlists)); + GEM_BUG_ON(!intel_engine_in_execlists_submission_mode(engine)); + + /* + * Note that csb_write, csb_status may be either in HWSP or mmio. + * When reading from the csb_write mmio register, we have to be + * careful to only use the GEN8_CSB_WRITE_PTR portion, which is + * the low 4bits. As it happens we know the next 4bits are always + * zero and so we can simply masked off the low u8 of the register + * and treat it identically to reading from the HWSP (without having + * to use explicit shifting and masking, and probably bifurcating + * the code to handle the legacy mmio read). + */ + head = execlists->csb_head; + tail = READ_ONCE(*execlists->csb_write); + if (unlikely(head == tail)) + return inactive; + + /* + * We will consume all events from HW, or at least pretend to. + * + * The sequence of events from the HW is deterministic, and derived + * from our writes to the ELSP, with a smidgen of variability for + * the arrival of the asynchronous requests wrt to the inflight + * execution. If the HW sends an event that does not correspond with + * the one we are expecting, we have to abandon all hope as we lose + * all tracking of what the engine is actually executing. We will + * only detect we are out of sequence with the HW when we get an + * 'impossible' event because we have already drained our own + * preemption/promotion queue. If this occurs, we know that we likely + * lost track of execution earlier and must unwind and restart, the + * simplest way is by stop processing the event queue and force the + * engine to reset. + */ + execlists->csb_head = tail; + ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail); + + /* + * Hopefully paired with a wmb() in HW! + * + * We must complete the read of the write pointer before any reads + * from the CSB, so that we do not see stale values. Without an rmb + * (lfence) the HW may speculatively perform the CSB[] reads *before* + * we perform the READ_ONCE(*csb_write). + */ + rmb(); + + /* Remember who was last running under the timer */ + prev = inactive; + *prev = NULL; + + do { + bool promote; + u64 csb; + + if (++head == num_entries) + head = 0; + + /* + * We are flying near dragons again. + * + * We hold a reference to the request in execlist_port[] + * but no more than that. We are operating in softirq + * context and so cannot hold any mutex or sleep. That + * prevents us stopping the requests we are processing + * in port[] from being retired simultaneously (the + * breadcrumb will be complete before we see the + * context-switch). As we only hold the reference to the + * request, any pointer chasing underneath the request + * is subject to a potential use-after-free. Thus we + * store all of the bookkeeping within port[] as + * required, and avoid using unguarded pointers beneath + * request itself. The same applies to the atomic + * status notifier. + */ + + csb = csb_read(engine, buf + head); + ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n", + head, upper_32_bits(csb), lower_32_bits(csb)); + + if (INTEL_GEN(engine->i915) >= 12) + promote = gen12_csb_parse(csb); + else + promote = gen8_csb_parse(csb); + if (promote) { + struct i915_request * const *old = execlists->active; + + if (GEM_WARN_ON(!*execlists->pending)) { + execlists->error_interrupt |= ERROR_CSB; + break; + } + + ring_set_paused(engine, 0); + + /* Point active to the new ELSP; prevent overwriting */ + WRITE_ONCE(execlists->active, execlists->pending); + smp_wmb(); /* notify execlists_active() */ + + /* cancel old inflight, prepare for switch */ + trace_ports(execlists, "preempted", old); + while (*old) + *inactive++ = *old++; + + /* switch pending to inflight */ + GEM_BUG_ON(!assert_pending_valid(execlists, "promote")); + copy_ports(execlists->inflight, + execlists->pending, + execlists_num_ports(execlists)); + smp_wmb(); /* complete the seqlock */ + WRITE_ONCE(execlists->active, execlists->inflight); + + /* XXX Magic delay for tgl */ + ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR); + + WRITE_ONCE(execlists->pending[0], NULL); + } else { + if (GEM_WARN_ON(!*execlists->active)) { + execlists->error_interrupt |= ERROR_CSB; + break; + } + + /* port0 completed, advanced to port1 */ + trace_ports(execlists, "completed", execlists->active); + + /* + * We rely on the hardware being strongly + * ordered, that the breadcrumb write is + * coherent (visible from the CPU) before the + * user interrupt is processed. One might assume + * that the breadcrumb write being before the + * user interrupt and the CS event for the context + * switch would therefore be before the CS event + * itself... + */ + if (GEM_SHOW_DEBUG() && + !__i915_request_is_complete(*execlists->active)) { + struct i915_request *rq = *execlists->active; + const u32 *regs __maybe_unused = + rq->context->lrc_reg_state; + + ENGINE_TRACE(engine, + "context completed before request!\n"); + ENGINE_TRACE(engine, + "ring:{start:0x%08x, head:%04x, tail:%04x, ctl:%08x, mode:%08x}\n", + ENGINE_READ(engine, RING_START), + ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR, + ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR, + ENGINE_READ(engine, RING_CTL), + ENGINE_READ(engine, RING_MI_MODE)); + ENGINE_TRACE(engine, + "rq:{start:%08x, head:%04x, tail:%04x, seqno:%llx:%d, hwsp:%d}, ", + i915_ggtt_offset(rq->ring->vma), + rq->head, rq->tail, + rq->fence.context, + lower_32_bits(rq->fence.seqno), + hwsp_seqno(rq)); + ENGINE_TRACE(engine, + "ctx:{start:%08x, head:%04x, tail:%04x}, ", + regs[CTX_RING_START], + regs[CTX_RING_HEAD], + regs[CTX_RING_TAIL]); + } + + *inactive++ = *execlists->active++; + + GEM_BUG_ON(execlists->active - execlists->inflight > + execlists_num_ports(execlists)); + } + } while (head != tail); + + /* + * Gen11 has proven to fail wrt global observation point between + * entry and tail update, failing on the ordering and thus + * we see an old entry in the context status buffer. + * + * Forcibly evict out entries for the next gpu csb update, + * to increase the odds that we get a fresh entries with non + * working hardware. The cost for doing so comes out mostly with + * the wash as hardware, working or not, will need to do the + * invalidation before. + */ + invalidate_csb_entries(&buf[0], &buf[num_entries - 1]); + + /* + * We assume that any event reflects a change in context flow + * and merits a fresh timeslice. We reinstall the timer after + * inspecting the queue to see if we need to resumbit. + */ + if (*prev != *execlists->active) /* elide lite-restores */ + new_timeslice(execlists); + + return inactive; +} + +static void post_process_csb(struct i915_request **port, + struct i915_request **last) +{ + while (port != last) + execlists_schedule_out(*port++); +} + +static void __execlists_hold(struct i915_request *rq) +{ + LIST_HEAD(list); + + do { + struct i915_dependency *p; + + if (i915_request_is_active(rq)) + __i915_request_unsubmit(rq); + + clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); + list_move_tail(&rq->sched.link, &rq->engine->active.hold); + i915_request_set_hold(rq); + RQ_TRACE(rq, "on hold\n"); + + for_each_waiter(p, rq) { + struct i915_request *w = + container_of(p->waiter, typeof(*w), sched); + + /* Leave semaphores spinning on the other engines */ + if (w->engine != rq->engine) + continue; + + if (!i915_request_is_ready(w)) + continue; + + if (__i915_request_is_complete(w)) + continue; + + if (i915_request_on_hold(w)) + continue; + + list_move_tail(&w->sched.link, &list); + } + + rq = list_first_entry_or_null(&list, typeof(*rq), sched.link); + } while (rq); +} + +static bool execlists_hold(struct intel_engine_cs *engine, + struct i915_request *rq) +{ + if (i915_request_on_hold(rq)) + return false; + + spin_lock_irq(&engine->active.lock); + + if (__i915_request_is_complete(rq)) { /* too late! */ + rq = NULL; + goto unlock; + } + + /* + * Transfer this request onto the hold queue to prevent it + * being resumbitted to HW (and potentially completed) before we have + * released it. Since we may have already submitted following + * requests, we need to remove those as well. + */ + GEM_BUG_ON(i915_request_on_hold(rq)); + GEM_BUG_ON(rq->engine != engine); + __execlists_hold(rq); + GEM_BUG_ON(list_empty(&engine->active.hold)); + +unlock: + spin_unlock_irq(&engine->active.lock); + return rq; +} + +static bool hold_request(const struct i915_request *rq) +{ + struct i915_dependency *p; + bool result = false; + + /* + * If one of our ancestors is on hold, we must also be on hold, + * otherwise we will bypass it and execute before it. + */ + rcu_read_lock(); + for_each_signaler(p, rq) { + const struct i915_request *s = + container_of(p->signaler, typeof(*s), sched); + + if (s->engine != rq->engine) + continue; + + result = i915_request_on_hold(s); + if (result) + break; + } + rcu_read_unlock(); + + return result; +} + +static void __execlists_unhold(struct i915_request *rq) +{ + LIST_HEAD(list); + + do { + struct i915_dependency *p; + + RQ_TRACE(rq, "hold release\n"); + + GEM_BUG_ON(!i915_request_on_hold(rq)); + GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit)); + + i915_request_clear_hold(rq); + list_move_tail(&rq->sched.link, + i915_sched_lookup_priolist(rq->engine, + rq_prio(rq))); + set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); + + /* Also release any children on this engine that are ready */ + for_each_waiter(p, rq) { + struct i915_request *w = + container_of(p->waiter, typeof(*w), sched); + + /* Propagate any change in error status */ + if (rq->fence.error) + i915_request_set_error_once(w, rq->fence.error); + + if (w->engine != rq->engine) + continue; + + if (!i915_request_on_hold(w)) + continue; + + /* Check that no other parents are also on hold */ + if (hold_request(w)) + continue; + + list_move_tail(&w->sched.link, &list); + } + + rq = list_first_entry_or_null(&list, typeof(*rq), sched.link); + } while (rq); +} + +static void execlists_unhold(struct intel_engine_cs *engine, + struct i915_request *rq) +{ + spin_lock_irq(&engine->active.lock); + + /* + * Move this request back to the priority queue, and all of its + * children and grandchildren that were suspended along with it. + */ + __execlists_unhold(rq); + + if (rq_prio(rq) > engine->execlists.queue_priority_hint) { + engine->execlists.queue_priority_hint = rq_prio(rq); + tasklet_hi_schedule(&engine->execlists.tasklet); + } + + spin_unlock_irq(&engine->active.lock); +} + +struct execlists_capture { + struct work_struct work; + struct i915_request *rq; + struct i915_gpu_coredump *error; +}; + +static void execlists_capture_work(struct work_struct *work) +{ + struct execlists_capture *cap = container_of(work, typeof(*cap), work); + const gfp_t gfp = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN; + struct intel_engine_cs *engine = cap->rq->engine; + struct intel_gt_coredump *gt = cap->error->gt; + struct intel_engine_capture_vma *vma; + + /* Compress all the objects attached to the request, slow! */ + vma = intel_engine_coredump_add_request(gt->engine, cap->rq, gfp); + if (vma) { + struct i915_vma_compress *compress = + i915_vma_capture_prepare(gt); + + intel_engine_coredump_add_vma(gt->engine, vma, compress); + i915_vma_capture_finish(gt, compress); + } + + gt->simulated = gt->engine->simulated; + cap->error->simulated = gt->simulated; + + /* Publish the error state, and announce it to the world */ + i915_error_state_store(cap->error); + i915_gpu_coredump_put(cap->error); + + /* Return this request and all that depend upon it for signaling */ + execlists_unhold(engine, cap->rq); + i915_request_put(cap->rq); + + kfree(cap); +} + +static struct execlists_capture *capture_regs(struct intel_engine_cs *engine) +{ + const gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN; + struct execlists_capture *cap; + + cap = kmalloc(sizeof(*cap), gfp); + if (!cap) + return NULL; + + cap->error = i915_gpu_coredump_alloc(engine->i915, gfp); + if (!cap->error) + goto err_cap; + + cap->error->gt = intel_gt_coredump_alloc(engine->gt, gfp); + if (!cap->error->gt) + goto err_gpu; + + cap->error->gt->engine = intel_engine_coredump_alloc(engine, gfp); + if (!cap->error->gt->engine) + goto err_gt; + + cap->error->gt->engine->hung = true; + + return cap; + +err_gt: + kfree(cap->error->gt); +err_gpu: + kfree(cap->error); +err_cap: + kfree(cap); + return NULL; +} + +static struct i915_request * +active_context(struct intel_engine_cs *engine, u32 ccid) +{ + const struct intel_engine_execlists * const el = &engine->execlists; + struct i915_request * const *port, *rq; + + /* + * Use the most recent result from process_csb(), but just in case + * we trigger an error (via interrupt) before the first CS event has + * been written, peek at the next submission. + */ + + for (port = el->active; (rq = *port); port++) { + if (rq->context->lrc.ccid == ccid) { + ENGINE_TRACE(engine, + "ccid:%x found at active:%zd\n", + ccid, port - el->active); + return rq; + } + } + + for (port = el->pending; (rq = *port); port++) { + if (rq->context->lrc.ccid == ccid) { + ENGINE_TRACE(engine, + "ccid:%x found at pending:%zd\n", + ccid, port - el->pending); + return rq; + } + } + + ENGINE_TRACE(engine, "ccid:%x not found\n", ccid); + return NULL; +} + +static u32 active_ccid(struct intel_engine_cs *engine) +{ + return ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI); +} + +static void execlists_capture(struct intel_engine_cs *engine) +{ + struct execlists_capture *cap; + + if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)) + return; + + /* + * We need to _quickly_ capture the engine state before we reset. + * We are inside an atomic section (softirq) here and we are delaying + * the forced preemption event. + */ + cap = capture_regs(engine); + if (!cap) + return; + + spin_lock_irq(&engine->active.lock); + cap->rq = active_context(engine, active_ccid(engine)); + if (cap->rq) { + cap->rq = active_request(cap->rq->context->timeline, cap->rq); + cap->rq = i915_request_get_rcu(cap->rq); + } + spin_unlock_irq(&engine->active.lock); + if (!cap->rq) + goto err_free; + + /* + * Remove the request from the execlists queue, and take ownership + * of the request. We pass it to our worker who will _slowly_ compress + * all the pages the _user_ requested for debugging their batch, after + * which we return it to the queue for signaling. + * + * By removing them from the execlists queue, we also remove the + * requests from being processed by __unwind_incomplete_requests() + * during the intel_engine_reset(), and so they will *not* be replayed + * afterwards. + * + * Note that because we have not yet reset the engine at this point, + * it is possible for the request that we have identified as being + * guilty, did in fact complete and we will then hit an arbitration + * point allowing the outstanding preemption to succeed. The likelihood + * of that is very low (as capturing of the engine registers should be + * fast enough to run inside an irq-off atomic section!), so we will + * simply hold that request accountable for being non-preemptible + * long enough to force the reset. + */ + if (!execlists_hold(engine, cap->rq)) + goto err_rq; + + INIT_WORK(&cap->work, execlists_capture_work); + schedule_work(&cap->work); + return; + +err_rq: + i915_request_put(cap->rq); +err_free: + i915_gpu_coredump_put(cap->error); + kfree(cap); +} + +static void execlists_reset(struct intel_engine_cs *engine, const char *msg) +{ + const unsigned int bit = I915_RESET_ENGINE + engine->id; + unsigned long *lock = &engine->gt->reset.flags; + + if (!intel_has_reset_engine(engine->gt)) + return; + + if (test_and_set_bit(bit, lock)) + return; + + ENGINE_TRACE(engine, "reset for %s\n", msg); + + /* Mark this tasklet as disabled to avoid waiting for it to complete */ + tasklet_disable_nosync(&engine->execlists.tasklet); + + ring_set_paused(engine, 1); /* Freeze the current request in place */ + execlists_capture(engine); + intel_engine_reset(engine, msg); + + tasklet_enable(&engine->execlists.tasklet); + clear_and_wake_up_bit(bit, lock); +} + +static bool preempt_timeout(const struct intel_engine_cs *const engine) +{ + const struct timer_list *t = &engine->execlists.preempt; + + if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT) + return false; + + if (!timer_expired(t)) + return false; + + return engine->execlists.pending[0]; +} + +/* + * Check the unread Context Status Buffers and manage the submission of new + * contexts to the ELSP accordingly. + */ +static void execlists_submission_tasklet(unsigned long data) +{ + struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; + struct i915_request *post[2 * EXECLIST_MAX_PORTS]; + struct i915_request **inactive; + + rcu_read_lock(); + inactive = process_csb(engine, post); + GEM_BUG_ON(inactive - post > ARRAY_SIZE(post)); + + if (unlikely(preempt_timeout(engine))) { + cancel_timer(&engine->execlists.preempt); + engine->execlists.error_interrupt |= ERROR_PREEMPT; + } + + if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) { + const char *msg; + + /* Generate the error message in priority wrt to the user! */ + if (engine->execlists.error_interrupt & GENMASK(15, 0)) + msg = "CS error"; /* thrown by a user payload */ + else if (engine->execlists.error_interrupt & ERROR_CSB) + msg = "invalid CSB event"; + else if (engine->execlists.error_interrupt & ERROR_PREEMPT) + msg = "preemption time out"; + else + msg = "internal error"; + + engine->execlists.error_interrupt = 0; + execlists_reset(engine, msg); + } + + if (!engine->execlists.pending[0]) { + execlists_dequeue_irq(engine); + start_timeslice(engine); + } + + post_process_csb(post, inactive); + rcu_read_unlock(); +} + +static void __execlists_kick(struct intel_engine_execlists *execlists) +{ + /* Kick the tasklet for some interrupt coalescing and reset handling */ + tasklet_hi_schedule(&execlists->tasklet); +} + +#define execlists_kick(t, member) \ + __execlists_kick(container_of(t, struct intel_engine_execlists, member)) + +static void execlists_timeslice(struct timer_list *timer) +{ + execlists_kick(timer, timer); +} + +static void execlists_preempt(struct timer_list *timer) +{ + execlists_kick(timer, preempt); +} + +static void queue_request(struct intel_engine_cs *engine, + struct i915_request *rq) +{ + GEM_BUG_ON(!list_empty(&rq->sched.link)); + list_add_tail(&rq->sched.link, + i915_sched_lookup_priolist(engine, rq_prio(rq))); + set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); +} + +static bool submit_queue(struct intel_engine_cs *engine, + const struct i915_request *rq) +{ + struct intel_engine_execlists *execlists = &engine->execlists; + + if (rq_prio(rq) <= execlists->queue_priority_hint) + return false; + + execlists->queue_priority_hint = rq_prio(rq); + return true; +} + +static bool ancestor_on_hold(const struct intel_engine_cs *engine, + const struct i915_request *rq) +{ + GEM_BUG_ON(i915_request_on_hold(rq)); + return !list_empty(&engine->active.hold) && hold_request(rq); +} + +static void execlists_submit_request(struct i915_request *request) +{ + struct intel_engine_cs *engine = request->engine; + unsigned long flags; + + /* Will be called from irq-context when using foreign fences. */ + spin_lock_irqsave(&engine->active.lock, flags); + + if (unlikely(ancestor_on_hold(engine, request))) { + RQ_TRACE(request, "ancestor on hold\n"); + list_add_tail(&request->sched.link, &engine->active.hold); + i915_request_set_hold(request); + } else { + queue_request(engine, request); + + GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); + GEM_BUG_ON(list_empty(&request->sched.link)); + + if (submit_queue(engine, request)) + __execlists_kick(&engine->execlists); + } + + spin_unlock_irqrestore(&engine->active.lock, flags); +} + +static int execlists_context_pre_pin(struct intel_context *ce, + struct i915_gem_ww_ctx *ww, + void **vaddr) +{ + return lrc_pre_pin(ce, ce->engine, ww, vaddr); +} + +static int execlists_context_pin(struct intel_context *ce, void *vaddr) +{ + return lrc_pin(ce, ce->engine, vaddr); +} + +static int execlists_context_alloc(struct intel_context *ce) +{ + return lrc_alloc(ce, ce->engine); +} + +static const struct intel_context_ops execlists_context_ops = { + .flags = COPS_HAS_INFLIGHT, + + .alloc = execlists_context_alloc, + + .pre_pin = execlists_context_pre_pin, + .pin = execlists_context_pin, + .unpin = lrc_unpin, + .post_unpin = lrc_post_unpin, + + .enter = intel_context_enter_engine, + .exit = intel_context_exit_engine, + + .reset = lrc_reset, + .destroy = lrc_destroy, +}; + +static int emit_pdps(struct i915_request *rq) +{ + const struct intel_engine_cs * const engine = rq->engine; + struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->context->vm); + int err, i; + u32 *cs; + + GEM_BUG_ON(intel_vgpu_active(rq->engine->i915)); + + /* + * Beware ye of the dragons, this sequence is magic! + * + * Small changes to this sequence can cause anything from + * GPU hangs to forcewake errors and machine lockups! + */ + + cs = intel_ring_begin(rq, 2); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; + *cs++ = MI_NOOP; + intel_ring_advance(rq, cs); + + /* Flush any residual operations from the context load */ + err = engine->emit_flush(rq, EMIT_FLUSH); + if (err) + return err; + + /* Magic required to prevent forcewake errors! */ + err = engine->emit_flush(rq, EMIT_INVALIDATE); + if (err) + return err; + + cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + /* Ensure the LRI have landed before we invalidate & continue */ + *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED; + for (i = GEN8_3LVL_PDPES; i--; ) { + const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); + u32 base = engine->mmio_base; + + *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i)); + *cs++ = upper_32_bits(pd_daddr); + *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i)); + *cs++ = lower_32_bits(pd_daddr); + } + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + intel_ring_advance(rq, cs); + + intel_ring_advance(rq, cs); + + return 0; +} + +static int execlists_request_alloc(struct i915_request *request) +{ + int ret; + + GEM_BUG_ON(!intel_context_is_pinned(request->context)); + + /* + * Flush enough space to reduce the likelihood of waiting after + * we start building the request - in which case we will just + * have to repeat work. + */ + request->reserved_space += EXECLISTS_REQUEST_SIZE; + + /* + * Note that after this point, we have committed to using + * this request as it is being used to both track the + * state of engine initialisation and liveness of the + * golden renderstate above. Think twice before you try + * to cancel/unwind this request now. + */ + + if (!i915_vm_is_4lvl(request->context->vm)) { + ret = emit_pdps(request); + if (ret) + return ret; + } + + /* Unconditionally invalidate GPU caches and TLBs. */ + ret = request->engine->emit_flush(request, EMIT_INVALIDATE); + if (ret) + return ret; + + request->reserved_space -= EXECLISTS_REQUEST_SIZE; + return 0; +} + +static void reset_csb_pointers(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + const unsigned int reset_value = execlists->csb_size - 1; + + ring_set_paused(engine, 0); + + /* + * Sometimes Icelake forgets to reset its pointers on a GPU reset. + * Bludgeon them with a mmio update to be sure. + */ + ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR, + 0xffff << 16 | reset_value << 8 | reset_value); + ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR); + + /* + * After a reset, the HW starts writing into CSB entry [0]. We + * therefore have to set our HEAD pointer back one entry so that + * the *first* entry we check is entry 0. To complicate this further, + * as we don't wait for the first interrupt after reset, we have to + * fake the HW write to point back to the last entry so that our + * inline comparison of our cached head position against the last HW + * write works even before the first interrupt. + */ + execlists->csb_head = reset_value; + WRITE_ONCE(*execlists->csb_write, reset_value); + wmb(); /* Make sure this is visible to HW (paranoia?) */ + + /* Check that the GPU does indeed update the CSB entries! */ + memset(execlists->csb_status, -1, (reset_value + 1) * sizeof(u64)); + invalidate_csb_entries(&execlists->csb_status[0], + &execlists->csb_status[reset_value]); + + /* Once more for luck and our trusty paranoia */ + ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR, + 0xffff << 16 | reset_value << 8 | reset_value); + ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR); + + GEM_BUG_ON(READ_ONCE(*execlists->csb_write) != reset_value); +} + +static void sanitize_hwsp(struct intel_engine_cs *engine) +{ + struct intel_timeline *tl; + + list_for_each_entry(tl, &engine->status_page.timelines, engine_link) + intel_timeline_reset_seqno(tl); +} + +static void execlists_sanitize(struct intel_engine_cs *engine) +{ + GEM_BUG_ON(execlists_active(&engine->execlists)); + + /* + * Poison residual state on resume, in case the suspend didn't! + * + * We have to assume that across suspend/resume (or other loss + * of control) that the contents of our pinned buffers has been + * lost, replaced by garbage. Since this doesn't always happen, + * let's poison such state so that we more quickly spot when + * we falsely assume it has been preserved. + */ + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) + memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE); + + reset_csb_pointers(engine); + + /* + * The kernel_context HWSP is stored in the status_page. As above, + * that may be lost on resume/initialisation, and so we need to + * reset the value in the HWSP. + */ + sanitize_hwsp(engine); + + /* And scrub the dirty cachelines for the HWSP */ + clflush_cache_range(engine->status_page.addr, PAGE_SIZE); +} + +static void enable_error_interrupt(struct intel_engine_cs *engine) +{ + u32 status; + + engine->execlists.error_interrupt = 0; + ENGINE_WRITE(engine, RING_EMR, ~0u); + ENGINE_WRITE(engine, RING_EIR, ~0u); /* clear all existing errors */ + + status = ENGINE_READ(engine, RING_ESR); + if (unlikely(status)) { + drm_err(&engine->i915->drm, + "engine '%s' resumed still in error: %08x\n", + engine->name, status); + __intel_gt_reset(engine->gt, engine->mask); + } + + /* + * On current gen8+, we have 2 signals to play with + * + * - I915_ERROR_INSTUCTION (bit 0) + * + * Generate an error if the command parser encounters an invalid + * instruction + * + * This is a fatal error. + * + * - CP_PRIV (bit 2) + * + * Generate an error on privilege violation (where the CP replaces + * the instruction with a no-op). This also fires for writes into + * read-only scratch pages. + * + * This is a non-fatal error, parsing continues. + * + * * there are a few others defined for odd HW that we do not use + * + * Since CP_PRIV fires for cases where we have chosen to ignore the + * error (as the HW is validating and suppressing the mistakes), we + * only unmask the instruction error bit. + */ + ENGINE_WRITE(engine, RING_EMR, ~I915_ERROR_INSTRUCTION); +} + +static void enable_execlists(struct intel_engine_cs *engine) +{ + u32 mode; + + assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL); + + intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */ + + if (INTEL_GEN(engine->i915) >= 11) + mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE); + else + mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE); + ENGINE_WRITE_FW(engine, RING_MODE_GEN7, mode); + + ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); + + ENGINE_WRITE_FW(engine, + RING_HWS_PGA, + i915_ggtt_offset(engine->status_page.vma)); + ENGINE_POSTING_READ(engine, RING_HWS_PGA); + + enable_error_interrupt(engine); +} + +static bool unexpected_starting_state(struct intel_engine_cs *engine) +{ + bool unexpected = false; + + if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) { + drm_dbg(&engine->i915->drm, + "STOP_RING still set in RING_MI_MODE\n"); + unexpected = true; + } + + return unexpected; +} + +static int execlists_resume(struct intel_engine_cs *engine) +{ + intel_mocs_init_engine(engine); + + intel_breadcrumbs_reset(engine->breadcrumbs); + + if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) { + struct drm_printer p = drm_debug_printer(__func__); + + intel_engine_dump(engine, &p, NULL); + } + + enable_execlists(engine); + + return 0; +} + +static void execlists_reset_prepare(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + + ENGINE_TRACE(engine, "depth<-%d\n", + atomic_read(&execlists->tasklet.count)); + + /* + * Prevent request submission to the hardware until we have + * completed the reset in i915_gem_reset_finish(). If a request + * is completed by one engine, it may then queue a request + * to a second via its execlists->tasklet *just* as we are + * calling engine->resume() and also writing the ELSP. + * Turning off the execlists->tasklet until the reset is over + * prevents the race. + */ + __tasklet_disable_sync_once(&execlists->tasklet); + GEM_BUG_ON(!reset_in_progress(execlists)); + + /* + * We stop engines, otherwise we might get failed reset and a + * dead gpu (on elk). Also as modern gpu as kbl can suffer + * from system hang if batchbuffer is progressing when + * the reset is issued, regardless of READY_TO_RESET ack. + * Thus assume it is best to stop engines on all gens + * where we have a gpu reset. + * + * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) + * + * FIXME: Wa for more modern gens needs to be validated + */ + ring_set_paused(engine, 1); + intel_engine_stop_cs(engine); + + engine->execlists.reset_ccid = active_ccid(engine); +} + +static struct i915_request ** +reset_csb(struct intel_engine_cs *engine, struct i915_request **inactive) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + + mb(); /* paranoia: read the CSB pointers from after the reset */ + clflush(execlists->csb_write); + mb(); + + inactive = process_csb(engine, inactive); /* drain preemption events */ + + /* Following the reset, we need to reload the CSB read/write pointers */ + reset_csb_pointers(engine); + + return inactive; +} + +static void +execlists_reset_active(struct intel_engine_cs *engine, bool stalled) +{ + struct intel_context *ce; + struct i915_request *rq; + u32 head; + + /* + * Save the currently executing context, even if we completed + * its request, it was still running at the time of the + * reset and will have been clobbered. + */ + rq = active_context(engine, engine->execlists.reset_ccid); + if (!rq) + return; + + ce = rq->context; + GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); + + if (__i915_request_is_complete(rq)) { + /* Idle context; tidy up the ring so we can restart afresh */ + head = intel_ring_wrap(ce->ring, rq->tail); + goto out_replay; + } + + /* We still have requests in-flight; the engine should be active */ + GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); + + /* Context has requests still in-flight; it should not be idle! */ + GEM_BUG_ON(i915_active_is_idle(&ce->active)); + + rq = active_request(ce->timeline, rq); + head = intel_ring_wrap(ce->ring, rq->head); + GEM_BUG_ON(head == ce->ring->tail); + + /* + * If this request hasn't started yet, e.g. it is waiting on a + * semaphore, we need to avoid skipping the request or else we + * break the signaling chain. However, if the context is corrupt + * the request will not restart and we will be stuck with a wedged + * device. It is quite often the case that if we issue a reset + * while the GPU is loading the context image, that the context + * image becomes corrupt. + * + * Otherwise, if we have not started yet, the request should replay + * perfectly and we do not need to flag the result as being erroneous. + */ + if (!__i915_request_has_started(rq)) + goto out_replay; + + /* + * If the request was innocent, we leave the request in the ELSP + * and will try to replay it on restarting. The context image may + * have been corrupted by the reset, in which case we may have + * to service a new GPU hang, but more likely we can continue on + * without impact. + * + * If the request was guilty, we presume the context is corrupt + * and have to at least restore the RING register in the context + * image back to the expected values to skip over the guilty request. + */ + __i915_request_reset(rq, stalled); + + /* + * We want a simple context + ring to execute the breadcrumb update. + * We cannot rely on the context being intact across the GPU hang, + * so clear it and rebuild just what we need for the breadcrumb. + * All pending requests for this context will be zapped, and any + * future request will be after userspace has had the opportunity + * to recreate its own state. + */ +out_replay: + ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n", + head, ce->ring->tail); + lrc_reset_regs(ce, engine); + ce->lrc.lrca = lrc_update_regs(ce, engine, head); +} + +static void execlists_reset_csb(struct intel_engine_cs *engine, bool stalled) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + struct i915_request *post[2 * EXECLIST_MAX_PORTS]; + struct i915_request **inactive; + + rcu_read_lock(); + inactive = reset_csb(engine, post); + + execlists_reset_active(engine, true); + + inactive = cancel_port_requests(execlists, inactive); + post_process_csb(post, inactive); + rcu_read_unlock(); +} + +static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled) +{ + unsigned long flags; + + ENGINE_TRACE(engine, "\n"); + + /* Process the csb, find the guilty context and throw away */ + execlists_reset_csb(engine, stalled); + + /* Push back any incomplete requests for replay after the reset. */ + rcu_read_lock(); + spin_lock_irqsave(&engine->active.lock, flags); + __unwind_incomplete_requests(engine); + spin_unlock_irqrestore(&engine->active.lock, flags); + rcu_read_unlock(); +} + +static void nop_submission_tasklet(unsigned long data) +{ + struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; + + /* The driver is wedged; don't process any more events. */ + WRITE_ONCE(engine->execlists.queue_priority_hint, INT_MIN); +} + +static void execlists_reset_cancel(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + struct i915_request *rq, *rn; + struct rb_node *rb; + unsigned long flags; + + ENGINE_TRACE(engine, "\n"); + + /* + * Before we call engine->cancel_requests(), we should have exclusive + * access to the submission state. This is arranged for us by the + * caller disabling the interrupt generation, the tasklet and other + * threads that may then access the same state, giving us a free hand + * to reset state. However, we still need to let lockdep be aware that + * we know this state may be accessed in hardirq context, so we + * disable the irq around this manipulation and we want to keep + * the spinlock focused on its duties and not accidentally conflate + * coverage to the submission's irq state. (Similarly, although we + * shouldn't need to disable irq around the manipulation of the + * submission's irq state, we also wish to remind ourselves that + * it is irq state.) + */ + execlists_reset_csb(engine, true); + + rcu_read_lock(); + spin_lock_irqsave(&engine->active.lock, flags); + + /* Mark all executing requests as skipped. */ + list_for_each_entry(rq, &engine->active.requests, sched.link) + i915_request_mark_eio(rq); + intel_engine_signal_breadcrumbs(engine); + + /* Flush the queued requests to the timeline list (for retiring). */ + while ((rb = rb_first_cached(&execlists->queue))) { + struct i915_priolist *p = to_priolist(rb); + int i; + + priolist_for_each_request_consume(rq, rn, p, i) { + i915_request_mark_eio(rq); + __i915_request_submit(rq); + } + + rb_erase_cached(&p->node, &execlists->queue); + i915_priolist_free(p); + } + + /* On-hold requests will be flushed to timeline upon their release */ + list_for_each_entry(rq, &engine->active.hold, sched.link) + i915_request_mark_eio(rq); + + /* Cancel all attached virtual engines */ + while ((rb = rb_first_cached(&execlists->virtual))) { + struct virtual_engine *ve = + rb_entry(rb, typeof(*ve), nodes[engine->id].rb); + + rb_erase_cached(rb, &execlists->virtual); + RB_CLEAR_NODE(rb); + + spin_lock(&ve->base.active.lock); + rq = fetch_and_zero(&ve->request); + if (rq) { + i915_request_mark_eio(rq); + + rq->engine = engine; + __i915_request_submit(rq); + i915_request_put(rq); + + ve->base.execlists.queue_priority_hint = INT_MIN; + } + spin_unlock(&ve->base.active.lock); + } + + /* Remaining _unready_ requests will be nop'ed when submitted */ + + execlists->queue_priority_hint = INT_MIN; + execlists->queue = RB_ROOT_CACHED; + + GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet)); + execlists->tasklet.func = nop_submission_tasklet; + + spin_unlock_irqrestore(&engine->active.lock, flags); + rcu_read_unlock(); +} + +static void execlists_reset_finish(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + + /* + * After a GPU reset, we may have requests to replay. Do so now while + * we still have the forcewake to be sure that the GPU is not allowed + * to sleep before we restart and reload a context. + * + * If the GPU reset fails, the engine may still be alive with requests + * inflight. We expect those to complete, or for the device to be + * reset as the next level of recovery, and as a final resort we + * will declare the device wedged. + */ + GEM_BUG_ON(!reset_in_progress(execlists)); + + /* And kick in case we missed a new request submission. */ + if (__tasklet_enable(&execlists->tasklet)) + __execlists_kick(execlists); + + ENGINE_TRACE(engine, "depth->%d\n", + atomic_read(&execlists->tasklet.count)); +} + +static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine) +{ + ENGINE_WRITE(engine, RING_IMR, + ~(engine->irq_enable_mask | engine->irq_keep_mask)); + ENGINE_POSTING_READ(engine, RING_IMR); +} + +static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine) +{ + ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask); +} + +static void execlists_park(struct intel_engine_cs *engine) +{ + cancel_timer(&engine->execlists.timer); + cancel_timer(&engine->execlists.preempt); +} + +static bool can_preempt(struct intel_engine_cs *engine) +{ + if (INTEL_GEN(engine->i915) > 8) + return true; + + /* GPGPU on bdw requires extra w/a; not implemented */ + return engine->class != RENDER_CLASS; +} + +static void execlists_set_default_submission(struct intel_engine_cs *engine) +{ + engine->submit_request = execlists_submit_request; + engine->schedule = i915_schedule; + engine->execlists.tasklet.func = execlists_submission_tasklet; + + engine->reset.prepare = execlists_reset_prepare; + engine->reset.rewind = execlists_reset_rewind; + engine->reset.cancel = execlists_reset_cancel; + engine->reset.finish = execlists_reset_finish; + + engine->park = execlists_park; + engine->unpark = NULL; + + engine->flags |= I915_ENGINE_SUPPORTS_STATS; + if (!intel_vgpu_active(engine->i915)) { + engine->flags |= I915_ENGINE_HAS_SEMAPHORES; + if (can_preempt(engine)) { + engine->flags |= I915_ENGINE_HAS_PREEMPTION; + if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) + engine->flags |= I915_ENGINE_HAS_TIMESLICES; + } + } + + if (intel_engine_has_preemption(engine)) + engine->emit_bb_start = gen8_emit_bb_start; + else + engine->emit_bb_start = gen8_emit_bb_start_noarb; +} + +static void execlists_shutdown(struct intel_engine_cs *engine) +{ + /* Synchronise with residual timers and any softirq they raise */ + del_timer_sync(&engine->execlists.timer); + del_timer_sync(&engine->execlists.preempt); + tasklet_kill(&engine->execlists.tasklet); +} + +static void execlists_release(struct intel_engine_cs *engine) +{ + engine->sanitize = NULL; /* no longer in control, nothing to sanitize */ + + execlists_shutdown(engine); + + intel_engine_cleanup_common(engine); + lrc_fini_wa_ctx(engine); +} + +static void +logical_ring_default_vfuncs(struct intel_engine_cs *engine) +{ + /* Default vfuncs which can be overriden by each engine. */ + + engine->resume = execlists_resume; + + engine->cops = &execlists_context_ops; + engine->request_alloc = execlists_request_alloc; + + engine->emit_flush = gen8_emit_flush_xcs; + engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb; + engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs; + if (INTEL_GEN(engine->i915) >= 12) { + engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs; + engine->emit_flush = gen12_emit_flush_xcs; + } + engine->set_default_submission = execlists_set_default_submission; + + if (INTEL_GEN(engine->i915) < 11) { + engine->irq_enable = gen8_logical_ring_enable_irq; + engine->irq_disable = gen8_logical_ring_disable_irq; + } else { + /* + * TODO: On Gen11 interrupt masks need to be clear + * to allow C6 entry. Keep interrupts enabled at + * and take the hit of generating extra interrupts + * until a more refined solution exists. + */ + } +} + +static inline void +logical_ring_default_irqs(struct intel_engine_cs *engine) +{ + unsigned int shift = 0; + + if (INTEL_GEN(engine->i915) < 11) { + const u8 irq_shifts[] = { + [RCS0] = GEN8_RCS_IRQ_SHIFT, + [BCS0] = GEN8_BCS_IRQ_SHIFT, + [VCS0] = GEN8_VCS0_IRQ_SHIFT, + [VCS1] = GEN8_VCS1_IRQ_SHIFT, + [VECS0] = GEN8_VECS_IRQ_SHIFT, + }; + + shift = irq_shifts[engine->id]; + } + + engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; + engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; + engine->irq_keep_mask |= GT_CS_MASTER_ERROR_INTERRUPT << shift; + engine->irq_keep_mask |= GT_WAIT_SEMAPHORE_INTERRUPT << shift; +} + +static void rcs_submission_override(struct intel_engine_cs *engine) +{ + switch (INTEL_GEN(engine->i915)) { + case 12: + engine->emit_flush = gen12_emit_flush_rcs; + engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs; + break; + case 11: + engine->emit_flush = gen11_emit_flush_rcs; + engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs; + break; + default: + engine->emit_flush = gen8_emit_flush_rcs; + engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs; + break; + } +} + +int intel_execlists_submission_setup(struct intel_engine_cs *engine) +{ + struct intel_engine_execlists * const execlists = &engine->execlists; + struct drm_i915_private *i915 = engine->i915; + struct intel_uncore *uncore = engine->uncore; + u32 base = engine->mmio_base; + + tasklet_init(&engine->execlists.tasklet, + execlists_submission_tasklet, (unsigned long)engine); + timer_setup(&engine->execlists.timer, execlists_timeslice, 0); + timer_setup(&engine->execlists.preempt, execlists_preempt, 0); + + logical_ring_default_vfuncs(engine); + logical_ring_default_irqs(engine); + + if (engine->class == RENDER_CLASS) + rcs_submission_override(engine); + + lrc_init_wa_ctx(engine); + + if (HAS_LOGICAL_RING_ELSQ(i915)) { + execlists->submit_reg = uncore->regs + + i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base)); + execlists->ctrl_reg = uncore->regs + + i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base)); + } else { + execlists->submit_reg = uncore->regs + + i915_mmio_reg_offset(RING_ELSP(base)); + } + + execlists->csb_status = + (u64 *)&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX]; + + execlists->csb_write = + &engine->status_page.addr[intel_hws_csb_write_index(i915)]; + + if (INTEL_GEN(i915) < 11) + execlists->csb_size = GEN8_CSB_ENTRIES; + else + execlists->csb_size = GEN11_CSB_ENTRIES; + + engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0); + if (INTEL_GEN(engine->i915) >= 11) { + execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32); + execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32); + } + + /* Finally, take ownership and responsibility for cleanup! */ + engine->sanitize = execlists_sanitize; + engine->release = execlists_release; + + return 0; +} + +static struct list_head *virtual_queue(struct virtual_engine *ve) +{ + return &ve->base.execlists.default_priolist.requests[0]; +} + +static void rcu_virtual_context_destroy(struct work_struct *wrk) +{ + struct virtual_engine *ve = + container_of(wrk, typeof(*ve), rcu.work); + unsigned int n; + + GEM_BUG_ON(ve->context.inflight); + + /* Preempt-to-busy may leave a stale request behind. */ + if (unlikely(ve->request)) { + struct i915_request *old; + + spin_lock_irq(&ve->base.active.lock); + + old = fetch_and_zero(&ve->request); + if (old) { + GEM_BUG_ON(!i915_request_completed(old)); + __i915_request_submit(old); + i915_request_put(old); + } + + spin_unlock_irq(&ve->base.active.lock); + } + + /* + * Flush the tasklet in case it is still running on another core. + * + * This needs to be done before we remove ourselves from the siblings' + * rbtrees as in the case it is running in parallel, it may reinsert + * the rb_node into a sibling. + */ + tasklet_kill(&ve->base.execlists.tasklet); + + /* Decouple ourselves from the siblings, no more access allowed. */ + for (n = 0; n < ve->num_siblings; n++) { + struct intel_engine_cs *sibling = ve->siblings[n]; + struct rb_node *node = &ve->nodes[sibling->id].rb; + + if (RB_EMPTY_NODE(node)) + continue; + + spin_lock_irq(&sibling->active.lock); + + /* Detachment is lazily performed in the execlists tasklet */ + if (!RB_EMPTY_NODE(node)) + rb_erase_cached(node, &sibling->execlists.virtual); + + spin_unlock_irq(&sibling->active.lock); + } + GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet)); + GEM_BUG_ON(!list_empty(virtual_queue(ve))); + + lrc_fini(&ve->context); + intel_context_fini(&ve->context); + + intel_breadcrumbs_free(ve->base.breadcrumbs); + intel_engine_free_request_pool(&ve->base); + + kfree(ve->bonds); + kfree(ve); +} + +static void virtual_context_destroy(struct kref *kref) +{ + struct virtual_engine *ve = + container_of(kref, typeof(*ve), context.ref); + + GEM_BUG_ON(!list_empty(&ve->context.signals)); + + /* + * When destroying the virtual engine, we have to be aware that + * it may still be in use from an hardirq/softirq context causing + * the resubmission of a completed request (background completion + * due to preempt-to-busy). Before we can free the engine, we need + * to flush the submission code and tasklets that are still potentially + * accessing the engine. Flushing the tasklets requires process context, + * and since we can guard the resubmit onto the engine with an RCU read + * lock, we can delegate the free of the engine to an RCU worker. + */ + INIT_RCU_WORK(&ve->rcu, rcu_virtual_context_destroy); + queue_rcu_work(system_wq, &ve->rcu); +} + +static void virtual_engine_initial_hint(struct virtual_engine *ve) +{ + int swp; + + /* + * Pick a random sibling on starting to help spread the load around. + * + * New contexts are typically created with exactly the same order + * of siblings, and often started in batches. Due to the way we iterate + * the array of sibling when submitting requests, sibling[0] is + * prioritised for dequeuing. If we make sure that sibling[0] is fairly + * randomised across the system, we also help spread the load by the + * first engine we inspect being different each time. + * + * NB This does not force us to execute on this engine, it will just + * typically be the first we inspect for submission. + */ + swp = prandom_u32_max(ve->num_siblings); + if (swp) + swap(ve->siblings[swp], ve->siblings[0]); +} + +static int virtual_context_alloc(struct intel_context *ce) +{ + struct virtual_engine *ve = container_of(ce, typeof(*ve), context); + + return lrc_alloc(ce, ve->siblings[0]); +} + +static int virtual_context_pre_pin(struct intel_context *ce, + struct i915_gem_ww_ctx *ww, + void **vaddr) +{ + struct virtual_engine *ve = container_of(ce, typeof(*ve), context); + + /* Note: we must use a real engine class for setting up reg state */ + return lrc_pre_pin(ce, ve->siblings[0], ww, vaddr); +} + +static int virtual_context_pin(struct intel_context *ce, void *vaddr) +{ + struct virtual_engine *ve = container_of(ce, typeof(*ve), context); + + return lrc_pin(ce, ve->siblings[0], vaddr); +} + +static void virtual_context_enter(struct intel_context *ce) +{ + struct virtual_engine *ve = container_of(ce, typeof(*ve), context); + unsigned int n; + + for (n = 0; n < ve->num_siblings; n++) + intel_engine_pm_get(ve->siblings[n]); + + intel_timeline_enter(ce->timeline); +} + +static void virtual_context_exit(struct intel_context *ce) +{ + struct virtual_engine *ve = container_of(ce, typeof(*ve), context); + unsigned int n; + + intel_timeline_exit(ce->timeline); + + for (n = 0; n < ve->num_siblings; n++) + intel_engine_pm_put(ve->siblings[n]); +} + +static const struct intel_context_ops virtual_context_ops = { + .flags = COPS_HAS_INFLIGHT, + + .alloc = virtual_context_alloc, + + .pre_pin = virtual_context_pre_pin, + .pin = virtual_context_pin, + .unpin = lrc_unpin, + .post_unpin = lrc_post_unpin, + + .enter = virtual_context_enter, + .exit = virtual_context_exit, + + .destroy = virtual_context_destroy, +}; + +static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve) +{ + struct i915_request *rq; + intel_engine_mask_t mask; + + rq = READ_ONCE(ve->request); + if (!rq) + return 0; + + /* The rq is ready for submission; rq->execution_mask is now stable. */ + mask = rq->execution_mask; + if (unlikely(!mask)) { + /* Invalid selection, submit to a random engine in error */ + i915_request_set_error_once(rq, -ENODEV); + mask = ve->siblings[0]->mask; + } + + ENGINE_TRACE(&ve->base, "rq=%llx:%lld, mask=%x, prio=%d\n", + rq->fence.context, rq->fence.seqno, + mask, ve->base.execlists.queue_priority_hint); + + return mask; +} + +static void virtual_submission_tasklet(unsigned long data) +{ + struct virtual_engine * const ve = (struct virtual_engine *)data; + const int prio = READ_ONCE(ve->base.execlists.queue_priority_hint); + intel_engine_mask_t mask; + unsigned int n; + + rcu_read_lock(); + mask = virtual_submission_mask(ve); + rcu_read_unlock(); + if (unlikely(!mask)) + return; + + for (n = 0; n < ve->num_siblings; n++) { + struct intel_engine_cs *sibling = READ_ONCE(ve->siblings[n]); + struct ve_node * const node = &ve->nodes[sibling->id]; + struct rb_node **parent, *rb; + bool first; + + if (!READ_ONCE(ve->request)) + break; /* already handled by a sibling's tasklet */ + + spin_lock_irq(&sibling->active.lock); + + if (unlikely(!(mask & sibling->mask))) { + if (!RB_EMPTY_NODE(&node->rb)) { + rb_erase_cached(&node->rb, + &sibling->execlists.virtual); + RB_CLEAR_NODE(&node->rb); + } + + goto unlock_engine; + } + + if (unlikely(!RB_EMPTY_NODE(&node->rb))) { + /* + * Cheat and avoid rebalancing the tree if we can + * reuse this node in situ. + */ + first = rb_first_cached(&sibling->execlists.virtual) == + &node->rb; + if (prio == node->prio || (prio > node->prio && first)) + goto submit_engine; + + rb_erase_cached(&node->rb, &sibling->execlists.virtual); + } + + rb = NULL; + first = true; + parent = &sibling->execlists.virtual.rb_root.rb_node; + while (*parent) { + struct ve_node *other; + + rb = *parent; + other = rb_entry(rb, typeof(*other), rb); + if (prio > other->prio) { + parent = &rb->rb_left; + } else { + parent = &rb->rb_right; + first = false; + } + } + + rb_link_node(&node->rb, rb, parent); + rb_insert_color_cached(&node->rb, + &sibling->execlists.virtual, + first); + +submit_engine: + GEM_BUG_ON(RB_EMPTY_NODE(&node->rb)); + node->prio = prio; + if (first && prio > sibling->execlists.queue_priority_hint) + tasklet_hi_schedule(&sibling->execlists.tasklet); + +unlock_engine: + spin_unlock_irq(&sibling->active.lock); + + if (intel_context_inflight(&ve->context)) + break; + } +} + +static void virtual_submit_request(struct i915_request *rq) +{ + struct virtual_engine *ve = to_virtual_engine(rq->engine); + unsigned long flags; + + ENGINE_TRACE(&ve->base, "rq=%llx:%lld\n", + rq->fence.context, + rq->fence.seqno); + + GEM_BUG_ON(ve->base.submit_request != virtual_submit_request); + + spin_lock_irqsave(&ve->base.active.lock, flags); + + /* By the time we resubmit a request, it may be completed */ + if (__i915_request_is_complete(rq)) { + __i915_request_submit(rq); + goto unlock; + } + + if (ve->request) { /* background completion from preempt-to-busy */ + GEM_BUG_ON(!i915_request_completed(ve->request)); + __i915_request_submit(ve->request); + i915_request_put(ve->request); + } + + ve->base.execlists.queue_priority_hint = rq_prio(rq); + ve->request = i915_request_get(rq); + + GEM_BUG_ON(!list_empty(virtual_queue(ve))); + list_move_tail(&rq->sched.link, virtual_queue(ve)); + + tasklet_hi_schedule(&ve->base.execlists.tasklet); + +unlock: + spin_unlock_irqrestore(&ve->base.active.lock, flags); +} + +static struct ve_bond * +virtual_find_bond(struct virtual_engine *ve, + const struct intel_engine_cs *master) +{ + int i; + + for (i = 0; i < ve->num_bonds; i++) { + if (ve->bonds[i].master == master) + return &ve->bonds[i]; + } + + return NULL; +} + +static void +virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal) +{ + struct virtual_engine *ve = to_virtual_engine(rq->engine); + intel_engine_mask_t allowed, exec; + struct ve_bond *bond; + + allowed = ~to_request(signal)->engine->mask; + + bond = virtual_find_bond(ve, to_request(signal)->engine); + if (bond) + allowed &= bond->sibling_mask; + + /* Restrict the bonded request to run on only the available engines */ + exec = READ_ONCE(rq->execution_mask); + while (!try_cmpxchg(&rq->execution_mask, &exec, exec & allowed)) + ; + + /* Prevent the master from being re-run on the bonded engines */ + to_request(signal)->execution_mask &= ~allowed; +} + +struct intel_context * +intel_execlists_create_virtual(struct intel_engine_cs **siblings, + unsigned int count) +{ + struct virtual_engine *ve; + unsigned int n; + int err; + + if (count == 0) + return ERR_PTR(-EINVAL); + + if (count == 1) + return intel_context_create(siblings[0]); + + ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL); + if (!ve) + return ERR_PTR(-ENOMEM); + + ve->base.i915 = siblings[0]->i915; + ve->base.gt = siblings[0]->gt; + ve->base.uncore = siblings[0]->uncore; + ve->base.id = -1; + + ve->base.class = OTHER_CLASS; + ve->base.uabi_class = I915_ENGINE_CLASS_INVALID; + ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL; + ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL; + + /* + * The decision on whether to submit a request using semaphores + * depends on the saturated state of the engine. We only compute + * this during HW submission of the request, and we need for this + * state to be globally applied to all requests being submitted + * to this engine. Virtual engines encompass more than one physical + * engine and so we cannot accurately tell in advance if one of those + * engines is already saturated and so cannot afford to use a semaphore + * and be pessimized in priority for doing so -- if we are the only + * context using semaphores after all other clients have stopped, we + * will be starved on the saturated system. Such a global switch for + * semaphores is less than ideal, but alas is the current compromise. + */ + ve->base.saturated = ALL_ENGINES; + + snprintf(ve->base.name, sizeof(ve->base.name), "virtual"); + + intel_engine_init_active(&ve->base, ENGINE_VIRTUAL); + intel_engine_init_execlists(&ve->base); + + ve->base.cops = &virtual_context_ops; + ve->base.request_alloc = execlists_request_alloc; + + ve->base.schedule = i915_schedule; + ve->base.submit_request = virtual_submit_request; + ve->base.bond_execute = virtual_bond_execute; + + INIT_LIST_HEAD(virtual_queue(ve)); + ve->base.execlists.queue_priority_hint = INT_MIN; + tasklet_init(&ve->base.execlists.tasklet, + virtual_submission_tasklet, + (unsigned long)ve); + + intel_context_init(&ve->context, &ve->base); + + ve->base.breadcrumbs = intel_breadcrumbs_create(NULL); + if (!ve->base.breadcrumbs) { + err = -ENOMEM; + goto err_put; + } + + for (n = 0; n < count; n++) { + struct intel_engine_cs *sibling = siblings[n]; + + GEM_BUG_ON(!is_power_of_2(sibling->mask)); + if (sibling->mask & ve->base.mask) { + DRM_DEBUG("duplicate %s entry in load balancer\n", + sibling->name); + err = -EINVAL; + goto err_put; + } + + /* + * The virtual engine implementation is tightly coupled to + * the execlists backend -- we push out request directly + * into a tree inside each physical engine. We could support + * layering if we handle cloning of the requests and + * submitting a copy into each backend. + */ + if (sibling->execlists.tasklet.func != + execlists_submission_tasklet) { + err = -ENODEV; + goto err_put; + } + + GEM_BUG_ON(RB_EMPTY_NODE(&ve->nodes[sibling->id].rb)); + RB_CLEAR_NODE(&ve->nodes[sibling->id].rb); + + ve->siblings[ve->num_siblings++] = sibling; + ve->base.mask |= sibling->mask; + + /* + * All physical engines must be compatible for their emission + * functions (as we build the instructions during request + * construction and do not alter them before submission + * on the physical engine). We use the engine class as a guide + * here, although that could be refined. + */ + if (ve->base.class != OTHER_CLASS) { + if (ve->base.class != sibling->class) { + DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n", + sibling->class, ve->base.class); + err = -EINVAL; + goto err_put; + } + continue; + } + + ve->base.class = sibling->class; + ve->base.uabi_class = sibling->uabi_class; + snprintf(ve->base.name, sizeof(ve->base.name), + "v%dx%d", ve->base.class, count); + ve->base.context_size = sibling->context_size; + + ve->base.emit_bb_start = sibling->emit_bb_start; + ve->base.emit_flush = sibling->emit_flush; + ve->base.emit_init_breadcrumb = sibling->emit_init_breadcrumb; + ve->base.emit_fini_breadcrumb = sibling->emit_fini_breadcrumb; + ve->base.emit_fini_breadcrumb_dw = + sibling->emit_fini_breadcrumb_dw; + + ve->base.flags = sibling->flags; + } + + ve->base.flags |= I915_ENGINE_IS_VIRTUAL; + + virtual_engine_initial_hint(ve); + return &ve->context; + +err_put: + intel_context_put(&ve->context); + return ERR_PTR(err); +} + +struct intel_context * +intel_execlists_clone_virtual(struct intel_engine_cs *src) +{ + struct virtual_engine *se = to_virtual_engine(src); + struct intel_context *dst; + + dst = intel_execlists_create_virtual(se->siblings, + se->num_siblings); + if (IS_ERR(dst)) + return dst; + + if (se->num_bonds) { + struct virtual_engine *de = to_virtual_engine(dst->engine); + + de->bonds = kmemdup(se->bonds, + sizeof(*se->bonds) * se->num_bonds, + GFP_KERNEL); + if (!de->bonds) { + intel_context_put(dst); + return ERR_PTR(-ENOMEM); + } + + de->num_bonds = se->num_bonds; + } + + return dst; +} + +int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine, + const struct intel_engine_cs *master, + const struct intel_engine_cs *sibling) +{ + struct virtual_engine *ve = to_virtual_engine(engine); + struct ve_bond *bond; + int n; + + /* Sanity check the sibling is part of the virtual engine */ + for (n = 0; n < ve->num_siblings; n++) + if (sibling == ve->siblings[n]) + break; + if (n == ve->num_siblings) + return -EINVAL; + + bond = virtual_find_bond(ve, master); + if (bond) { + bond->sibling_mask |= sibling->mask; + return 0; + } + + bond = krealloc(ve->bonds, + sizeof(*bond) * (ve->num_bonds + 1), + GFP_KERNEL); + if (!bond) + return -ENOMEM; + + bond[ve->num_bonds].master = master; + bond[ve->num_bonds].sibling_mask = sibling->mask; + + ve->bonds = bond; + ve->num_bonds++; + + return 0; +} + +void intel_execlists_show_requests(struct intel_engine_cs *engine, + struct drm_printer *m, + void (*show_request)(struct drm_printer *m, + const struct i915_request *rq, + const char *prefix, + int indent), + unsigned int max) +{ + const struct intel_engine_execlists *execlists = &engine->execlists; + struct i915_request *rq, *last; + unsigned long flags; + unsigned int count; + struct rb_node *rb; + + spin_lock_irqsave(&engine->active.lock, flags); + + last = NULL; + count = 0; + list_for_each_entry(rq, &engine->active.requests, sched.link) { + if (count++ < max - 1) + show_request(m, rq, "\t\t", 0); + else + last = rq; + } + if (last) { + if (count > max) { + drm_printf(m, + "\t\t...skipping %d executing requests...\n", + count - max); + } + show_request(m, last, "\t\t", 0); + } + + if (execlists->queue_priority_hint != INT_MIN) + drm_printf(m, "\t\tQueue priority hint: %d\n", + READ_ONCE(execlists->queue_priority_hint)); + + last = NULL; + count = 0; + for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) { + struct i915_priolist *p = rb_entry(rb, typeof(*p), node); + int i; + + priolist_for_each_request(rq, p, i) { + if (count++ < max - 1) + show_request(m, rq, "\t\t", 0); + else + last = rq; + } + } + if (last) { + if (count > max) { + drm_printf(m, + "\t\t...skipping %d queued requests...\n", + count - max); + } + show_request(m, last, "\t\t", 0); + } + + last = NULL; + count = 0; + for (rb = rb_first_cached(&execlists->virtual); rb; rb = rb_next(rb)) { + struct virtual_engine *ve = + rb_entry(rb, typeof(*ve), nodes[engine->id].rb); + struct i915_request *rq = READ_ONCE(ve->request); + + if (rq) { + if (count++ < max - 1) + show_request(m, rq, "\t\t", 0); + else + last = rq; + } + } + if (last) { + if (count > max) { + drm_printf(m, + "\t\t...skipping %d virtual requests...\n", + count - max); + } + show_request(m, last, "\t\t", 0); + } + + spin_unlock_irqrestore(&engine->active.lock, flags); +} + +bool +intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine) +{ + return engine->set_default_submission == + execlists_set_default_submission; +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftest_execlists.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.h b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h new file mode 100644 index 000000000000..a8fd7adefd82 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2014 Intel Corporation + */ + +#ifndef __INTEL_EXECLISTS_SUBMISSION_H__ +#define __INTEL_EXECLISTS_SUBMISSION_H__ + +#include <linux/types.h> + +struct drm_printer; + +struct i915_request; +struct intel_context; +struct intel_engine_cs; + +enum { + INTEL_CONTEXT_SCHEDULE_IN = 0, + INTEL_CONTEXT_SCHEDULE_OUT, + INTEL_CONTEXT_SCHEDULE_PREEMPTED, +}; + +int intel_execlists_submission_setup(struct intel_engine_cs *engine); + +void intel_execlists_show_requests(struct intel_engine_cs *engine, + struct drm_printer *m, + void (*show_request)(struct drm_printer *m, + const struct i915_request *rq, + const char *prefix, + int indent), + unsigned int max); + +struct intel_context * +intel_execlists_create_virtual(struct intel_engine_cs **siblings, + unsigned int count); + +struct intel_context * +intel_execlists_clone_virtual(struct intel_engine_cs *src); + +int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine, + const struct intel_engine_cs *master, + const struct intel_engine_cs *sibling); + +bool +intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine); + +#endif /* __INTEL_EXECLISTS_SUBMISSION_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index cf94525be2c1..eece0844fbe9 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -101,7 +101,16 @@ static bool needs_idle_maps(struct drm_i915_private *i915) * Query intel_iommu to see if we need the workaround. Presumably that * was loaded first. */ - return IS_GEN(i915, 5) && IS_MOBILE(i915) && intel_vtd_active(); + if (!intel_vtd_active()) + return false; + + if (IS_GEN(i915, 5) && IS_MOBILE(i915)) + return true; + + if (IS_GEN(i915, 12)) + return true; /* XXX DMAR fault reason 7 */ + + return false; } void i915_ggtt_suspend(struct i915_ggtt *ggtt) @@ -1050,7 +1059,12 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt) ggtt->vm.alloc_pt_dma = alloc_pt_dma; - ggtt->do_idle_maps = needs_idle_maps(i915); + if (needs_idle_maps(i915)) { + drm_notice(&i915->drm, + "Flushing DMA requests before IOMMU unmaps; performance may be degraded\n"); + ggtt->do_idle_maps = true; + } + ggtt->vm.insert_page = i915_ggtt_insert_page; ggtt->vm.insert_entries = i915_ggtt_insert_entries; ggtt->vm.clear_range = i915_ggtt_clear_range; diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c index 7fb36b12fe7a..a357bb431815 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c @@ -320,13 +320,31 @@ void i915_vma_revoke_fence(struct i915_vma *vma) fence_write(fence); } +static bool fence_is_active(const struct i915_fence_reg *fence) +{ + return fence->vma && i915_vma_is_active(fence->vma); +} + static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt) { - struct i915_fence_reg *fence; + struct i915_fence_reg *active = NULL; + struct i915_fence_reg *fence, *fn; - list_for_each_entry(fence, &ggtt->fence_list, link) { + list_for_each_entry_safe(fence, fn, &ggtt->fence_list, link) { GEM_BUG_ON(fence->vma && fence->vma->fence != fence); + if (fence == active) /* now seen this fence twice */ + active = ERR_PTR(-EAGAIN); + + /* Prefer idle fences so we do not have to wait on the GPU */ + if (active != ERR_PTR(-EAGAIN) && fence_is_active(fence)) { + if (!active) + active = fence; + + list_move_tail(&fence->link, &ggtt->fence_list); + continue; + } + if (atomic_read(&fence->pin_count)) continue; diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 44f1d51e5ae5..d8e1ab412634 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -46,6 +46,8 @@ void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt) int intel_gt_init_mmio(struct intel_gt *gt) { + intel_gt_init_clock_frequency(gt); + intel_uc_init_mmio(>->uc); intel_sseu_info_init(gt); @@ -546,8 +548,6 @@ int intel_gt_init(struct intel_gt *gt) */ intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); - intel_gt_init_clock_frequency(gt); - err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K); if (err) goto out_fw; diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c index 999079686846..a4242ca8dcd7 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c @@ -7,34 +7,146 @@ #include "intel_gt.h" #include "intel_gt_clock_utils.h" -#define MHZ_12 12000000 /* 12MHz (24MHz/2), 83.333ns */ -#define MHZ_12_5 12500000 /* 12.5MHz (25MHz/2), 80ns */ -#define MHZ_19_2 19200000 /* 19.2MHz, 52.083ns */ +static u32 read_reference_ts_freq(struct intel_uncore *uncore) +{ + u32 ts_override = intel_uncore_read(uncore, GEN9_TIMESTAMP_OVERRIDE); + u32 base_freq, frac_freq; + + base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >> + GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1; + base_freq *= 1000000; -static u32 read_clock_frequency(const struct intel_gt *gt) + frac_freq = ((ts_override & + GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >> + GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT); + frac_freq = 1000000 / (frac_freq + 1); + + return base_freq + frac_freq; +} + +static u32 gen10_get_crystal_clock_freq(struct intel_uncore *uncore, + u32 rpm_config_reg) { - if (INTEL_GEN(gt->i915) >= 11) { - u32 config; + u32 f19_2_mhz = 19200000; + u32 f24_mhz = 24000000; + u32 crystal_clock = + (rpm_config_reg & GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >> + GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT; - config = intel_uncore_read(gt->uncore, RPM_CONFIG0); - config &= GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK; - config >>= GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT; + switch (crystal_clock) { + case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ: + return f19_2_mhz; + case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ: + return f24_mhz; + default: + MISSING_CASE(crystal_clock); + return 0; + } +} - switch (config) { - case 0: return MHZ_12; - case 1: - case 2: return MHZ_19_2; - default: - case 3: return MHZ_12_5; +static u32 gen11_get_crystal_clock_freq(struct intel_uncore *uncore, + u32 rpm_config_reg) +{ + u32 f19_2_mhz = 19200000; + u32 f24_mhz = 24000000; + u32 f25_mhz = 25000000; + u32 f38_4_mhz = 38400000; + u32 crystal_clock = + (rpm_config_reg & GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >> + GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT; + + switch (crystal_clock) { + case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ: + return f24_mhz; + case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ: + return f19_2_mhz; + case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ: + return f38_4_mhz; + case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ: + return f25_mhz; + default: + MISSING_CASE(crystal_clock); + return 0; + } +} + +static u32 read_clock_frequency(struct intel_uncore *uncore) +{ + u32 f12_5_mhz = 12500000; + u32 f19_2_mhz = 19200000; + u32 f24_mhz = 24000000; + + if (INTEL_GEN(uncore->i915) <= 4) { + /* + * PRMs say: + * + * "The value in this register increments once every 16 + * hclks." (through the “Clocking Configuration” + * (“CLKCFG”) MCHBAR register) + */ + return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000 / 16; + } else if (INTEL_GEN(uncore->i915) <= 8) { + /* + * PRMs say: + * + * "The PCU TSC counts 10ns increments; this timestamp + * reflects bits 38:3 of the TSC (i.e. 80ns granularity, + * rolling over every 1.5 hours). + */ + return f12_5_mhz; + } else if (INTEL_GEN(uncore->i915) <= 9) { + u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE); + u32 freq = 0; + + if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) { + freq = read_reference_ts_freq(uncore); + } else { + freq = IS_GEN9_LP(uncore->i915) ? f19_2_mhz : f24_mhz; + + /* + * Now figure out how the command stream's timestamp + * register increments from this frequency (it might + * increment only every few clock cycle). + */ + freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >> + CTC_SHIFT_PARAMETER_SHIFT); + } + + return freq; + } else if (INTEL_GEN(uncore->i915) <= 12) { + u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE); + u32 freq = 0; + + /* + * First figure out the reference frequency. There are 2 ways + * we can compute the frequency, either through the + * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE + * tells us which one we should use. + */ + if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) { + freq = read_reference_ts_freq(uncore); + } else { + u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0); + + if (INTEL_GEN(uncore->i915) <= 10) + freq = gen10_get_crystal_clock_freq(uncore, c0); + else + freq = gen11_get_crystal_clock_freq(uncore, c0); + + /* + * Now figure out how the command stream's timestamp + * register increments from this frequency (it might + * increment only every few clock cycle). + */ + freq >>= 3 - ((c0 & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >> + GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT); } - } else if (INTEL_GEN(gt->i915) >= 9) { - if (IS_GEN9_LP(gt->i915)) - return MHZ_19_2; - else - return MHZ_12; - } else { - return MHZ_12_5; + + return freq; } + + MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n"); + return 0; } void intel_gt_init_clock_frequency(struct intel_gt *gt) @@ -43,20 +155,27 @@ void intel_gt_init_clock_frequency(struct intel_gt *gt) * Note that on gen11+, the clock frequency may be reconfigured. * We do not, and we assume nobody else does. */ - gt->clock_frequency = read_clock_frequency(gt); + gt->clock_frequency = read_clock_frequency(gt->uncore); + if (gt->clock_frequency) + gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1); + GT_TRACE(gt, - "Using clock frequency: %dkHz\n", - gt->clock_frequency / 1000); + "Using clock frequency: %dkHz, period: %dns, wrap: %lldms\n", + gt->clock_frequency / 1000, + gt->clock_period_ns, + div_u64(mul_u32_u32(gt->clock_period_ns, S32_MAX), + USEC_PER_SEC)); + } #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) void intel_gt_check_clock_frequency(const struct intel_gt *gt) { - if (gt->clock_frequency != read_clock_frequency(gt)) { + if (gt->clock_frequency != read_clock_frequency(gt->uncore)) { dev_err(gt->i915->drm.dev, "GT clock frequency changed, was %uHz, now %uHz!\n", gt->clock_frequency, - read_clock_frequency(gt)); + read_clock_frequency(gt->uncore)); } } #endif @@ -66,26 +185,24 @@ static u64 div_u64_roundup(u64 nom, u32 den) return div_u64(nom + den - 1, den); } -u32 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u32 count) +u64 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u64 count) { - return div_u64_roundup(mul_u32_u32(count, 1000 * 1000 * 1000), - gt->clock_frequency); + return div_u64_roundup(count * NSEC_PER_SEC, gt->clock_frequency); } -u32 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u32 count) +u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count) { return intel_gt_clock_interval_to_ns(gt, 16 * count); } -u32 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u32 ns) +u64 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u64 ns) { - return div_u64_roundup(mul_u32_u32(gt->clock_frequency, ns), - 1000 * 1000 * 1000); + return div_u64_roundup(gt->clock_frequency * ns, NSEC_PER_SEC); } -u32 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u32 ns) +u64 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u64 ns) { - u32 val; + u64 val; /* * Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS @@ -94,9 +211,9 @@ u32 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u32 ns) * EI/thresholds are "bad", leading to a very sluggish or even * frozen machine. */ - val = DIV_ROUND_UP(intel_gt_ns_to_clock_interval(gt, ns), 16); + val = div_u64_roundup(intel_gt_ns_to_clock_interval(gt, ns), 16); if (IS_GEN(gt->i915, 6)) - val = roundup(val, 25); + val = div_u64_roundup(val, 25) * 25; return val; } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h index f793c89f2cbd..8b03e97a85df 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.h @@ -18,10 +18,10 @@ void intel_gt_check_clock_frequency(const struct intel_gt *gt); static inline void intel_gt_check_clock_frequency(const struct intel_gt *gt) {} #endif -u32 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u32 count); -u32 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u32 count); +u64 intel_gt_clock_interval_to_ns(const struct intel_gt *gt, u64 count); +u64 intel_gt_pm_interval_to_ns(const struct intel_gt *gt, u64 count); -u32 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u32 ns); -u32 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u32 ns); +u64 intel_gt_ns_to_clock_interval(const struct intel_gt *gt, u64 ns); +u64 intel_gt_ns_to_pm_interval(const struct intel_gt *gt, u64 ns); #endif /* __INTEL_GT_CLOCK_UTILS_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c index 257063a57101..9830342aa6f4 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c @@ -11,6 +11,7 @@ #include "intel_breadcrumbs.h" #include "intel_gt.h" #include "intel_gt_irq.h" +#include "intel_lrc_reg.h" #include "intel_uncore.h" #include "intel_rps.h" diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index 274aa0dd7050..c94e8ac884eb 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -39,6 +39,28 @@ static void user_forcewake(struct intel_gt *gt, bool suspend) intel_gt_pm_put(gt); } +static void runtime_begin(struct intel_gt *gt) +{ + local_irq_disable(); + write_seqcount_begin(>->stats.lock); + gt->stats.start = ktime_get(); + gt->stats.active = true; + write_seqcount_end(>->stats.lock); + local_irq_enable(); +} + +static void runtime_end(struct intel_gt *gt) +{ + local_irq_disable(); + write_seqcount_begin(>->stats.lock); + gt->stats.active = false; + gt->stats.total = + ktime_add(gt->stats.total, + ktime_sub(ktime_get(), gt->stats.start)); + write_seqcount_end(>->stats.lock); + local_irq_enable(); +} + static int __gt_unpark(struct intel_wakeref *wf) { struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref); @@ -67,6 +89,7 @@ static int __gt_unpark(struct intel_wakeref *wf) i915_pmu_gt_unparked(i915); intel_gt_unpark_requests(gt); + runtime_begin(gt); return 0; } @@ -79,6 +102,7 @@ static int __gt_park(struct intel_wakeref *wf) GT_TRACE(gt, "\n"); + runtime_end(gt); intel_gt_park_requests(gt); i915_vma_parked(gt); @@ -106,6 +130,7 @@ static const struct intel_wakeref_ops wf_ops = { void intel_gt_pm_init_early(struct intel_gt *gt) { intel_wakeref_init(>->wakeref, gt->uncore->rpm, &wf_ops); + seqcount_mutex_init(>->stats.lock, >->wakeref.mutex); } void intel_gt_pm_init(struct intel_gt *gt) @@ -339,6 +364,30 @@ int intel_gt_runtime_resume(struct intel_gt *gt) return intel_uc_runtime_resume(>->uc); } +static ktime_t __intel_gt_get_awake_time(const struct intel_gt *gt) +{ + ktime_t total = gt->stats.total; + + if (gt->stats.active) + total = ktime_add(total, + ktime_sub(ktime_get(), gt->stats.start)); + + return total; +} + +ktime_t intel_gt_get_awake_time(const struct intel_gt *gt) +{ + unsigned int seq; + ktime_t total; + + do { + seq = read_seqcount_begin(>->stats.lock); + total = __intel_gt_get_awake_time(gt); + } while (read_seqcount_retry(>->stats.lock, seq)); + + return total; +} + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftest_gt_pm.c" #endif diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h index 60f0e2fbe55c..63846a856e7e 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h @@ -58,6 +58,8 @@ int intel_gt_resume(struct intel_gt *gt); void intel_gt_runtime_suspend(struct intel_gt *gt); int intel_gt_runtime_resume(struct intel_gt *gt); +ktime_t intel_gt_get_awake_time(const struct intel_gt *gt); + static inline bool is_mock_gt(const struct intel_gt *gt) { return I915_SELFTEST_ONLY(gt->awake == -ENODEV); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c index 66fcbf9d0fdd..dc06c78c9eeb 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c @@ -135,13 +135,8 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) struct intel_gt_timelines *timelines = >->timelines; struct intel_timeline *tl, *tn; unsigned long active_count = 0; - bool interruptible; LIST_HEAD(free); - interruptible = true; - if (unlikely(timeout < 0)) - timeout = -timeout, interruptible = false; - flush_submission(gt, timeout); /* kick the ksoftirqd tasklets */ spin_lock(&timelines->lock); list_for_each_entry_safe(tl, tn, &timelines->active_list, link) { @@ -163,7 +158,7 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) mutex_unlock(&tl->mutex); timeout = dma_fence_wait_timeout(fence, - interruptible, + true, timeout); dma_fence_put(fence); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index 6d39a4a11bf3..a83d3e18254d 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -75,6 +75,7 @@ struct intel_gt { intel_wakeref_t awake; u32 clock_frequency; + u32 clock_period_ns; struct intel_llc llc; struct intel_rc6 rc6; @@ -87,6 +88,30 @@ struct intel_gt { u32 pm_guc_events; + struct { + bool active; + + /** + * @lock: Lock protecting the below fields. + */ + seqcount_mutex_t lock; + + /** + * @total: Total time this engine was busy. + * + * Accumulated time not counting the most recent block in cases + * where engine is currently busy (active > 0). + */ + ktime_t total; + + /** + * @start: Timestamp of the last idle to active transition. + * + * Idle is defined as active == 0, active is active > 0. + */ + ktime_t start; + } stats; + struct intel_engine_cs *engine[I915_NUM_ENGINES]; struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1] [MAX_ENGINE_INSTANCE + 1]; diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c index 7bfe9072be9a..04aa6601e984 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.c +++ b/drivers/gpu/drm/i915/gt/intel_gtt.c @@ -422,6 +422,35 @@ void setup_private_pat(struct intel_uncore *uncore) bdw_setup_private_ppat(uncore); } +struct i915_vma * +__vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + int err; + + obj = i915_gem_object_create_internal(vm->i915, PAGE_ALIGN(size)); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED); + + vma = i915_vma_instance(obj, vm, NULL); + if (IS_ERR(vma)) { + i915_gem_object_put(obj); + return vma; + } + + err = i915_vma_pin(vma, 0, 0, + i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER); + if (err) { + i915_vma_put(vma); + return ERR_PTR(err); + } + + return vma; +} + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/mock_gtt.c" #endif diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h index 8a33940a71f3..29c10fde8ce3 100644 --- a/drivers/gpu/drm/i915/gt/intel_gtt.h +++ b/drivers/gpu/drm/i915/gt/intel_gtt.h @@ -573,6 +573,9 @@ int i915_vm_pin_pt_stash(struct i915_address_space *vm, void i915_vm_free_pt_stash(struct i915_address_space *vm, struct i915_vm_pt_stash *stash); +struct i915_vma * +__vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size); + static inline struct sgt_dma { struct scatterlist *sg; dma_addr_t dma, max; diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 8a51c1c3a091..a0fc78c89b61 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -1,598 +1,23 @@ +// SPDX-License-Identifier: MIT /* * Copyright © 2014 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - * Authors: - * Ben Widawsky <[email protected]> - * Michel Thierry <[email protected]> - * Thomas Daniel <[email protected]> - * Oscar Mateo <[email protected]> - * */ -/** - * DOC: Logical Rings, Logical Ring Contexts and Execlists - * - * Motivation: - * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts". - * These expanded contexts enable a number of new abilities, especially - * "Execlists" (also implemented in this file). - * - * One of the main differences with the legacy HW contexts is that logical - * ring contexts incorporate many more things to the context's state, like - * PDPs or ringbuffer control registers: - * - * The reason why PDPs are included in the context is straightforward: as - * PPGTTs (per-process GTTs) are actually per-context, having the PDPs - * contained there mean you don't need to do a ppgtt->switch_mm yourself, - * instead, the GPU will do it for you on the context switch. - * - * But, what about the ringbuffer control registers (head, tail, etc..)? - * shouldn't we just need a set of those per engine command streamer? This is - * where the name "Logical Rings" starts to make sense: by virtualizing the - * rings, the engine cs shifts to a new "ring buffer" with every context - * switch. When you want to submit a workload to the GPU you: A) choose your - * context, B) find its appropriate virtualized ring, C) write commands to it - * and then, finally, D) tell the GPU to switch to that context. - * - * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch - * to a contexts is via a context execution list, ergo "Execlists". - * - * LRC implementation: - * Regarding the creation of contexts, we have: - * - * - One global default context. - * - One local default context for each opened fd. - * - One local extra context for each context create ioctl call. - * - * Now that ringbuffers belong per-context (and not per-engine, like before) - * and that contexts are uniquely tied to a given engine (and not reusable, - * like before) we need: - * - * - One ringbuffer per-engine inside each context. - * - One backing object per-engine inside each context. - * - * The global default context starts its life with these new objects fully - * allocated and populated. The local default context for each opened fd is - * more complex, because we don't know at creation time which engine is going - * to use them. To handle this, we have implemented a deferred creation of LR - * contexts: - * - * The local context starts its life as a hollow or blank holder, that only - * gets populated for a given engine once we receive an execbuffer. If later - * on we receive another execbuffer ioctl for the same context but a different - * engine, we allocate/populate a new ringbuffer and context backing object and - * so on. - * - * Finally, regarding local contexts created using the ioctl call: as they are - * only allowed with the render ring, we can allocate & populate them right - * away (no need to defer anything, at least for now). - * - * Execlists implementation: - * Execlists are the new method by which, on gen8+ hardware, workloads are - * submitted for execution (as opposed to the legacy, ringbuffer-based, method). - * This method works as follows: - * - * When a request is committed, its commands (the BB start and any leading or - * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer - * for the appropriate context. The tail pointer in the hardware context is not - * updated at this time, but instead, kept by the driver in the ringbuffer - * structure. A structure representing this request is added to a request queue - * for the appropriate engine: this structure contains a copy of the context's - * tail after the request was written to the ring buffer and a pointer to the - * context itself. - * - * If the engine's request queue was empty before the request was added, the - * queue is processed immediately. Otherwise the queue will be processed during - * a context switch interrupt. In any case, elements on the queue will get sent - * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a - * globally unique 20-bits submission ID. - * - * When execution of a request completes, the GPU updates the context status - * buffer with a context complete event and generates a context switch interrupt. - * During the interrupt handling, the driver examines the events in the buffer: - * for each context complete event, if the announced ID matches that on the head - * of the request queue, then that request is retired and removed from the queue. - * - * After processing, if any requests were retired and the queue is not empty - * then a new execution list can be submitted. The two requests at the front of - * the queue are next to be submitted but since a context may not occur twice in - * an execution list, if subsequent requests have the same ID as the first then - * the two requests must be combined. This is done simply by discarding requests - * at the head of the queue until either only one requests is left (in which case - * we use a NULL second context) or the first two requests have unique IDs. - * - * By always executing the first two requests in the queue the driver ensures - * that the GPU is kept as busy as possible. In the case where a single context - * completes but a second context is still executing, the request for this second - * context will be at the head of the queue when we remove the first one. This - * request will then be resubmitted along with a new request for a different context, - * which will cause the hardware to continue executing the second request and queue - * the new request (the GPU detects the condition of a context getting preempted - * with the same context and optimizes the context switch flow by not doing - * preemption, but just sampling the new tail pointer). - * - */ -#include <linux/interrupt.h> - +#include "gen8_engine_cs.h" #include "i915_drv.h" #include "i915_perf.h" -#include "i915_trace.h" -#include "i915_vgpu.h" -#include "intel_breadcrumbs.h" -#include "intel_context.h" -#include "intel_engine_pm.h" +#include "intel_engine.h" +#include "intel_gpu_commands.h" #include "intel_gt.h" -#include "intel_gt_pm.h" -#include "intel_gt_requests.h" +#include "intel_lrc.h" #include "intel_lrc_reg.h" -#include "intel_mocs.h" -#include "intel_reset.h" #include "intel_ring.h" -#include "intel_workarounds.h" #include "shmem_utils.h" -#define RING_EXECLIST_QFULL (1 << 0x2) -#define RING_EXECLIST1_VALID (1 << 0x3) -#define RING_EXECLIST0_VALID (1 << 0x4) -#define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE) -#define RING_EXECLIST1_ACTIVE (1 << 0x11) -#define RING_EXECLIST0_ACTIVE (1 << 0x12) - -#define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0) -#define GEN8_CTX_STATUS_PREEMPTED (1 << 1) -#define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2) -#define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3) -#define GEN8_CTX_STATUS_COMPLETE (1 << 4) -#define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15) - -#define GEN8_CTX_STATUS_COMPLETED_MASK \ - (GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED) - -#define CTX_DESC_FORCE_RESTORE BIT_ULL(2) - -#define GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE (0x1) /* lower csb dword */ -#define GEN12_CTX_SWITCH_DETAIL(csb_dw) ((csb_dw) & 0xF) /* upper csb dword */ -#define GEN12_CSB_SW_CTX_ID_MASK GENMASK(25, 15) -#define GEN12_IDLE_CTX_ID 0x7FF -#define GEN12_CSB_CTX_VALID(csb_dw) \ - (FIELD_GET(GEN12_CSB_SW_CTX_ID_MASK, csb_dw) != GEN12_IDLE_CTX_ID) - -/* Typical size of the average request (2 pipecontrols and a MI_BB) */ -#define EXECLISTS_REQUEST_SIZE 64 /* bytes */ - -struct virtual_engine { - struct intel_engine_cs base; - struct intel_context context; - - /* - * We allow only a single request through the virtual engine at a time - * (each request in the timeline waits for the completion fence of - * the previous before being submitted). By restricting ourselves to - * only submitting a single request, each request is placed on to a - * physical to maximise load spreading (by virtue of the late greedy - * scheduling -- each real engine takes the next available request - * upon idling). - */ - struct i915_request *request; - - /* - * We keep a rbtree of available virtual engines inside each physical - * engine, sorted by priority. Here we preallocate the nodes we need - * for the virtual engine, indexed by physical_engine->id. - */ - struct ve_node { - struct rb_node rb; - int prio; - } nodes[I915_NUM_ENGINES]; - - /* - * Keep track of bonded pairs -- restrictions upon on our selection - * of physical engines any particular request may be submitted to. - * If we receive a submit-fence from a master engine, we will only - * use one of sibling_mask physical engines. - */ - struct ve_bond { - const struct intel_engine_cs *master; - intel_engine_mask_t sibling_mask; - } *bonds; - unsigned int num_bonds; - - /* And finally, which physical engines this virtual engine maps onto. */ - unsigned int num_siblings; - struct intel_engine_cs *siblings[]; -}; - -static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine) -{ - GEM_BUG_ON(!intel_engine_is_virtual(engine)); - return container_of(engine, struct virtual_engine, base); -} - -static int __execlists_context_alloc(struct intel_context *ce, - struct intel_engine_cs *engine); - -static void execlists_init_reg_state(u32 *reg_state, - const struct intel_context *ce, - const struct intel_engine_cs *engine, - const struct intel_ring *ring, - bool close); -static void -__execlists_update_reg_state(const struct intel_context *ce, - const struct intel_engine_cs *engine, - u32 head); - -static int lrc_ring_mi_mode(const struct intel_engine_cs *engine) -{ - if (INTEL_GEN(engine->i915) >= 12) - return 0x60; - else if (INTEL_GEN(engine->i915) >= 9) - return 0x54; - else if (engine->class == RENDER_CLASS) - return 0x58; - else - return -1; -} - -static int lrc_ring_gpr0(const struct intel_engine_cs *engine) -{ - if (INTEL_GEN(engine->i915) >= 12) - return 0x74; - else if (INTEL_GEN(engine->i915) >= 9) - return 0x68; - else if (engine->class == RENDER_CLASS) - return 0xd8; - else - return -1; -} - -static int lrc_ring_wa_bb_per_ctx(const struct intel_engine_cs *engine) -{ - if (INTEL_GEN(engine->i915) >= 12) - return 0x12; - else if (INTEL_GEN(engine->i915) >= 9 || engine->class == RENDER_CLASS) - return 0x18; - else - return -1; -} - -static int lrc_ring_indirect_ptr(const struct intel_engine_cs *engine) -{ - int x; - - x = lrc_ring_wa_bb_per_ctx(engine); - if (x < 0) - return x; - - return x + 2; -} - -static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine) -{ - int x; - - x = lrc_ring_indirect_ptr(engine); - if (x < 0) - return x; - - return x + 2; -} - -static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine) -{ - if (engine->class != RENDER_CLASS) - return -1; - - if (INTEL_GEN(engine->i915) >= 12) - return 0xb6; - else if (INTEL_GEN(engine->i915) >= 11) - return 0xaa; - else - return -1; -} - -static u32 -lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine) -{ - switch (INTEL_GEN(engine->i915)) { - default: - MISSING_CASE(INTEL_GEN(engine->i915)); - fallthrough; - case 12: - return GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; - case 11: - return GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; - case 10: - return GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; - case 9: - return GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; - case 8: - return GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; - } -} - -static void -lrc_ring_setup_indirect_ctx(u32 *regs, - const struct intel_engine_cs *engine, - u32 ctx_bb_ggtt_addr, - u32 size) -{ - GEM_BUG_ON(!size); - GEM_BUG_ON(!IS_ALIGNED(size, CACHELINE_BYTES)); - GEM_BUG_ON(lrc_ring_indirect_ptr(engine) == -1); - regs[lrc_ring_indirect_ptr(engine) + 1] = - ctx_bb_ggtt_addr | (size / CACHELINE_BYTES); - - GEM_BUG_ON(lrc_ring_indirect_offset(engine) == -1); - regs[lrc_ring_indirect_offset(engine) + 1] = - lrc_ring_indirect_offset_default(engine) << 6; -} - -static u32 intel_context_get_runtime(const struct intel_context *ce) -{ - /* - * We can use either ppHWSP[16] which is recorded before the context - * switch (and so excludes the cost of context switches) or use the - * value from the context image itself, which is saved/restored earlier - * and so includes the cost of the save. - */ - return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]); -} - -static void mark_eio(struct i915_request *rq) -{ - if (i915_request_completed(rq)) - return; - - GEM_BUG_ON(i915_request_signaled(rq)); - - i915_request_set_error_once(rq, -EIO); - i915_request_mark_complete(rq); -} - -static struct i915_request * -active_request(const struct intel_timeline * const tl, struct i915_request *rq) -{ - struct i915_request *active = rq; - - rcu_read_lock(); - list_for_each_entry_continue_reverse(rq, &tl->requests, link) { - if (i915_request_completed(rq)) - break; - - active = rq; - } - rcu_read_unlock(); - - return active; -} - -static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine) -{ - return (i915_ggtt_offset(engine->status_page.vma) + - I915_GEM_HWS_PREEMPT_ADDR); -} - -static inline void -ring_set_paused(const struct intel_engine_cs *engine, int state) -{ - /* - * We inspect HWS_PREEMPT with a semaphore inside - * engine->emit_fini_breadcrumb. If the dword is true, - * the ring is paused as the semaphore will busywait - * until the dword is false. - */ - engine->status_page.addr[I915_GEM_HWS_PREEMPT] = state; - if (state) - wmb(); -} - -static inline struct i915_priolist *to_priolist(struct rb_node *rb) -{ - return rb_entry(rb, struct i915_priolist, node); -} - -static inline int rq_prio(const struct i915_request *rq) -{ - return READ_ONCE(rq->sched.attr.priority); -} - -static int effective_prio(const struct i915_request *rq) -{ - int prio = rq_prio(rq); - - /* - * If this request is special and must not be interrupted at any - * cost, so be it. Note we are only checking the most recent request - * in the context and so may be masking an earlier vip request. It - * is hoped that under the conditions where nopreempt is used, this - * will not matter (i.e. all requests to that context will be - * nopreempt for as long as desired). - */ - if (i915_request_has_nopreempt(rq)) - prio = I915_PRIORITY_UNPREEMPTABLE; - - return prio; -} - -static int queue_prio(const struct intel_engine_execlists *execlists) -{ - struct i915_priolist *p; - struct rb_node *rb; - - rb = rb_first_cached(&execlists->queue); - if (!rb) - return INT_MIN; - - /* - * As the priolist[] are inverted, with the highest priority in [0], - * we have to flip the index value to become priority. - */ - p = to_priolist(rb); - if (!I915_USER_PRIORITY_SHIFT) - return p->priority; - - return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used); -} - -static inline bool need_preempt(const struct intel_engine_cs *engine, - const struct i915_request *rq, - struct rb_node *rb) -{ - int last_prio; - - if (!intel_engine_has_semaphores(engine)) - return false; - - /* - * Check if the current priority hint merits a preemption attempt. - * - * We record the highest value priority we saw during rescheduling - * prior to this dequeue, therefore we know that if it is strictly - * less than the current tail of ESLP[0], we do not need to force - * a preempt-to-idle cycle. - * - * However, the priority hint is a mere hint that we may need to - * preempt. If that hint is stale or we may be trying to preempt - * ourselves, ignore the request. - * - * More naturally we would write - * prio >= max(0, last); - * except that we wish to prevent triggering preemption at the same - * priority level: the task that is running should remain running - * to preserve FIFO ordering of dependencies. - */ - last_prio = max(effective_prio(rq), I915_PRIORITY_NORMAL - 1); - if (engine->execlists.queue_priority_hint <= last_prio) - return false; - - /* - * Check against the first request in ELSP[1], it will, thanks to the - * power of PI, be the highest priority of that context. - */ - if (!list_is_last(&rq->sched.link, &engine->active.requests) && - rq_prio(list_next_entry(rq, sched.link)) > last_prio) - return true; - - if (rb) { - struct virtual_engine *ve = - rb_entry(rb, typeof(*ve), nodes[engine->id].rb); - bool preempt = false; - - if (engine == ve->siblings[0]) { /* only preempt one sibling */ - struct i915_request *next; - - rcu_read_lock(); - next = READ_ONCE(ve->request); - if (next) - preempt = rq_prio(next) > last_prio; - rcu_read_unlock(); - } - - if (preempt) - return preempt; - } - - /* - * If the inflight context did not trigger the preemption, then maybe - * it was the set of queued requests? Pick the highest priority in - * the queue (the first active priolist) and see if it deserves to be - * running instead of ELSP[0]. - * - * The highest priority request in the queue can not be either - * ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same - * context, it's priority would not exceed ELSP[0] aka last_prio. - */ - return queue_prio(&engine->execlists) > last_prio; -} - -__maybe_unused static inline bool -assert_priority_queue(const struct i915_request *prev, - const struct i915_request *next) -{ - /* - * Without preemption, the prev may refer to the still active element - * which we refuse to let go. - * - * Even with preemption, there are times when we think it is better not - * to preempt and leave an ostensibly lower priority request in flight. - */ - if (i915_request_is_active(prev)) - return true; - - return rq_prio(prev) >= rq_prio(next); -} - -/* - * The context descriptor encodes various attributes of a context, - * including its GTT address and some flags. Because it's fairly - * expensive to calculate, we'll just do it once and cache the result, - * which remains valid until the context is unpinned. - * - * This is what a descriptor looks like, from LSB to MSB:: - * - * bits 0-11: flags, GEN8_CTX_* (cached in ctx->desc_template) - * bits 12-31: LRCA, GTT address of (the HWSP of) this context - * bits 32-52: ctx ID, a globally unique tag (highest bit used by GuC) - * bits 53-54: mbz, reserved for use by hardware - * bits 55-63: group ID, currently unused and set to 0 - * - * Starting from Gen11, the upper dword of the descriptor has a new format: - * - * bits 32-36: reserved - * bits 37-47: SW context ID - * bits 48:53: engine instance - * bit 54: mbz, reserved for use by hardware - * bits 55-60: SW counter - * bits 61-63: engine class - * - * engine info, SW context ID and SW counter need to form a unique number - * (Context ID) per lrc. - */ -static u32 -lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine) -{ - u32 desc; - - desc = INTEL_LEGACY_32B_CONTEXT; - if (i915_vm_is_4lvl(ce->vm)) - desc = INTEL_LEGACY_64B_CONTEXT; - desc <<= GEN8_CTX_ADDRESSING_MODE_SHIFT; - - desc |= GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE; - if (IS_GEN(engine->i915, 8)) - desc |= GEN8_CTX_L3LLC_COHERENT; - - return i915_ggtt_offset(ce->state) | desc; -} - -static inline unsigned int dword_in_page(void *addr) -{ - return offset_in_page(addr) / sizeof(u32); -} - static void set_offsets(u32 *regs, const u8 *data, const struct intel_engine_cs *engine, - bool clear) + bool close) #define NOP(x) (BIT(7) | (x)) #define LRI(count, flags) ((flags) << 6 | (count) | BUILD_BUG_ON_ZERO(count >= BIT(6))) #define POSTED BIT(0) @@ -600,7 +25,7 @@ static void set_offsets(u32 *regs, #define REG16(x) \ (((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \ (((x) >> 2) & 0x7f) -#define END(total_state_size) 0, (total_state_size) +#define END 0 { const u32 base = engine->mmio_base; @@ -609,8 +34,6 @@ static void set_offsets(u32 *regs, if (*data & BIT(7)) { /* skip */ count = *data++ & ~BIT(7); - if (clear) - memset32(regs, MI_NOOP, count); regs += count; continue; } @@ -638,19 +61,11 @@ static void set_offsets(u32 *regs, } while (v & BIT(7)); regs[0] = base + (offset << 2); - if (clear) - regs[1] = 0; regs += 2; } while (--count); } - if (clear) { - u8 count = *++data; - - /* Clear past the tail for HW access */ - GEM_BUG_ON(dword_in_page(regs) > count); - memset32(regs, MI_NOOP, count - dword_in_page(regs)); - + if (close) { /* Close the batch; used mainly by live_lrc_layout() */ *regs = MI_BATCH_BUFFER_END; if (INTEL_GEN(engine->i915) >= 10) @@ -690,7 +105,7 @@ static const u8 gen8_xcs_offsets[] = { REG16(0x200), REG(0x028), - END(80) + END }; static const u8 gen9_xcs_offsets[] = { @@ -774,7 +189,7 @@ static const u8 gen9_xcs_offsets[] = { REG16(0x67c), REG(0x068), - END(176) + END }; static const u8 gen12_xcs_offsets[] = { @@ -806,7 +221,7 @@ static const u8 gen12_xcs_offsets[] = { REG16(0x274), REG16(0x270), - END(80) + END }; static const u8 gen8_rcs_offsets[] = { @@ -843,7 +258,7 @@ static const u8 gen8_rcs_offsets[] = { LRI(1, 0), REG(0x0c8), - END(80) + END }; static const u8 gen9_rcs_offsets[] = { @@ -927,7 +342,7 @@ static const u8 gen9_rcs_offsets[] = { REG16(0x67c), REG(0x68), - END(176) + END }; static const u8 gen11_rcs_offsets[] = { @@ -968,7 +383,7 @@ static const u8 gen11_rcs_offsets[] = { LRI(1, 0), REG(0x0c8), - END(80) + END }; static const u8 gen12_rcs_offsets[] = { @@ -1064,7 +479,7 @@ static const u8 gen12_rcs_offsets[] = { REG(0x084), NOP(1), - END(192) + END }; #undef END @@ -1103,2279 +518,440 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine) } } -static struct i915_request * -__unwind_incomplete_requests(struct intel_engine_cs *engine) -{ - struct i915_request *rq, *rn, *active = NULL; - struct list_head *pl; - int prio = I915_PRIORITY_INVALID; - - lockdep_assert_held(&engine->active.lock); - - list_for_each_entry_safe_reverse(rq, rn, - &engine->active.requests, - sched.link) { - if (i915_request_completed(rq)) - continue; /* XXX */ - - __i915_request_unsubmit(rq); - - /* - * Push the request back into the queue for later resubmission. - * If this request is not native to this physical engine (i.e. - * it came from a virtual source), push it back onto the virtual - * engine so that it can be moved across onto another physical - * engine as load dictates. - */ - if (likely(rq->execution_mask == engine->mask)) { - GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID); - if (rq_prio(rq) != prio) { - prio = rq_prio(rq); - pl = i915_sched_lookup_priolist(engine, prio); - } - GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); - - list_move(&rq->sched.link, pl); - set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); - - /* Check in case we rollback so far we wrap [size/2] */ - if (intel_ring_direction(rq->ring, - rq->tail, - rq->ring->tail + 8) > 0) - rq->context->lrc.desc |= CTX_DESC_FORCE_RESTORE; - - active = rq; - } else { - struct intel_engine_cs *owner = rq->context->engine; - - WRITE_ONCE(rq->engine, owner); - owner->submit_request(rq); - active = NULL; - } - } - - return active; -} - -struct i915_request * -execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists) -{ - struct intel_engine_cs *engine = - container_of(execlists, typeof(*engine), execlists); - - return __unwind_incomplete_requests(engine); -} - -static inline void -execlists_context_status_change(struct i915_request *rq, unsigned long status) +static int lrc_ring_mi_mode(const struct intel_engine_cs *engine) { - /* - * Only used when GVT-g is enabled now. When GVT-g is disabled, - * The compiler should eliminate this function as dead-code. - */ - if (!IS_ENABLED(CONFIG_DRM_I915_GVT)) - return; - - atomic_notifier_call_chain(&rq->engine->context_status_notifier, - status, rq); + if (INTEL_GEN(engine->i915) >= 12) + return 0x60; + else if (INTEL_GEN(engine->i915) >= 9) + return 0x54; + else if (engine->class == RENDER_CLASS) + return 0x58; + else + return -1; } -static void intel_engine_context_in(struct intel_engine_cs *engine) +static int lrc_ring_gpr0(const struct intel_engine_cs *engine) { - unsigned long flags; - - if (atomic_add_unless(&engine->stats.active, 1, 0)) - return; - - write_seqlock_irqsave(&engine->stats.lock, flags); - if (!atomic_add_unless(&engine->stats.active, 1, 0)) { - engine->stats.start = ktime_get(); - atomic_inc(&engine->stats.active); - } - write_sequnlock_irqrestore(&engine->stats.lock, flags); + if (INTEL_GEN(engine->i915) >= 12) + return 0x74; + else if (INTEL_GEN(engine->i915) >= 9) + return 0x68; + else if (engine->class == RENDER_CLASS) + return 0xd8; + else + return -1; } -static void intel_engine_context_out(struct intel_engine_cs *engine) +static int lrc_ring_wa_bb_per_ctx(const struct intel_engine_cs *engine) { - unsigned long flags; - - GEM_BUG_ON(!atomic_read(&engine->stats.active)); - - if (atomic_add_unless(&engine->stats.active, -1, 1)) - return; - - write_seqlock_irqsave(&engine->stats.lock, flags); - if (atomic_dec_and_test(&engine->stats.active)) { - engine->stats.total = - ktime_add(engine->stats.total, - ktime_sub(ktime_get(), engine->stats.start)); - } - write_sequnlock_irqrestore(&engine->stats.lock, flags); + if (INTEL_GEN(engine->i915) >= 12) + return 0x12; + else if (INTEL_GEN(engine->i915) >= 9 || engine->class == RENDER_CLASS) + return 0x18; + else + return -1; } -static void -execlists_check_context(const struct intel_context *ce, - const struct intel_engine_cs *engine, - const char *when) +static int lrc_ring_indirect_ptr(const struct intel_engine_cs *engine) { - const struct intel_ring *ring = ce->ring; - u32 *regs = ce->lrc_reg_state; - bool valid = true; int x; - if (regs[CTX_RING_START] != i915_ggtt_offset(ring->vma)) { - pr_err("%s: context submitted with incorrect RING_START [%08x], expected %08x\n", - engine->name, - regs[CTX_RING_START], - i915_ggtt_offset(ring->vma)); - regs[CTX_RING_START] = i915_ggtt_offset(ring->vma); - valid = false; - } - - if ((regs[CTX_RING_CTL] & ~(RING_WAIT | RING_WAIT_SEMAPHORE)) != - (RING_CTL_SIZE(ring->size) | RING_VALID)) { - pr_err("%s: context submitted with incorrect RING_CTL [%08x], expected %08x\n", - engine->name, - regs[CTX_RING_CTL], - (u32)(RING_CTL_SIZE(ring->size) | RING_VALID)); - regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID; - valid = false; - } - - x = lrc_ring_mi_mode(engine); - if (x != -1 && regs[x + 1] & (regs[x + 1] >> 16) & STOP_RING) { - pr_err("%s: context submitted with STOP_RING [%08x] in RING_MI_MODE\n", - engine->name, regs[x + 1]); - regs[x + 1] &= ~STOP_RING; - regs[x + 1] |= STOP_RING << 16; - valid = false; - } + x = lrc_ring_wa_bb_per_ctx(engine); + if (x < 0) + return x; - WARN_ONCE(!valid, "Invalid lrc state found %s submission\n", when); + return x + 2; } -static void restore_default_state(struct intel_context *ce, - struct intel_engine_cs *engine) +static int lrc_ring_indirect_offset(const struct intel_engine_cs *engine) { - u32 *regs; + int x; - regs = memset(ce->lrc_reg_state, 0, engine->context_size - PAGE_SIZE); - execlists_init_reg_state(regs, ce, engine, ce->ring, true); + x = lrc_ring_indirect_ptr(engine); + if (x < 0) + return x; - ce->runtime.last = intel_context_get_runtime(ce); + return x + 2; } -static void reset_active(struct i915_request *rq, - struct intel_engine_cs *engine) +static int lrc_ring_cmd_buf_cctl(const struct intel_engine_cs *engine) { - struct intel_context * const ce = rq->context; - u32 head; - - /* - * The executing context has been cancelled. We want to prevent - * further execution along this context and propagate the error on - * to anything depending on its results. - * - * In __i915_request_submit(), we apply the -EIO and remove the - * requests' payloads for any banned requests. But first, we must - * rewind the context back to the start of the incomplete request so - * that we do not jump back into the middle of the batch. - * - * We preserve the breadcrumbs and semaphores of the incomplete - * requests so that inter-timeline dependencies (i.e other timelines) - * remain correctly ordered. And we defer to __i915_request_submit() - * so that all asynchronous waits are correctly handled. - */ - ENGINE_TRACE(engine, "{ rq=%llx:%lld }\n", - rq->fence.context, rq->fence.seqno); + if (engine->class != RENDER_CLASS) + return -1; - /* On resubmission of the active request, payload will be scrubbed */ - if (i915_request_completed(rq)) - head = rq->tail; + if (INTEL_GEN(engine->i915) >= 12) + return 0xb6; + else if (INTEL_GEN(engine->i915) >= 11) + return 0xaa; else - head = active_request(ce->timeline, rq)->head; - head = intel_ring_wrap(ce->ring, head); - - /* Scrub the context image to prevent replaying the previous batch */ - restore_default_state(ce, engine); - __execlists_update_reg_state(ce, engine, head); - - /* We've switched away, so this should be a no-op, but intent matters */ - ce->lrc.desc |= CTX_DESC_FORCE_RESTORE; -} - -static void st_update_runtime_underflow(struct intel_context *ce, s32 dt) -{ -#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) - ce->runtime.num_underflow += dt < 0; - ce->runtime.max_underflow = max_t(u32, ce->runtime.max_underflow, -dt); -#endif + return -1; } -static void intel_context_update_runtime(struct intel_context *ce) +static u32 +lrc_ring_indirect_offset_default(const struct intel_engine_cs *engine) { - u32 old; - s32 dt; - - if (intel_context_is_barrier(ce)) - return; - - old = ce->runtime.last; - ce->runtime.last = intel_context_get_runtime(ce); - dt = ce->runtime.last - old; - - if (unlikely(dt <= 0)) { - CE_TRACE(ce, "runtime underflow: last=%u, new=%u, delta=%d\n", - old, ce->runtime.last, dt); - st_update_runtime_underflow(ce, dt); - return; + switch (INTEL_GEN(engine->i915)) { + default: + MISSING_CASE(INTEL_GEN(engine->i915)); + fallthrough; + case 12: + return GEN12_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; + case 11: + return GEN11_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; + case 10: + return GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; + case 9: + return GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; + case 8: + return GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT; } - - ewma_runtime_add(&ce->runtime.avg, dt); - ce->runtime.total += dt; } -static inline struct intel_engine_cs * -__execlists_schedule_in(struct i915_request *rq) +static void +lrc_setup_indirect_ctx(u32 *regs, + const struct intel_engine_cs *engine, + u32 ctx_bb_ggtt_addr, + u32 size) { - struct intel_engine_cs * const engine = rq->engine; - struct intel_context * const ce = rq->context; - - intel_context_get(ce); - - if (unlikely(intel_context_is_banned(ce))) - reset_active(rq, engine); - - if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) - execlists_check_context(ce, engine, "before"); - - if (ce->tag) { - /* Use a fixed tag for OA and friends */ - GEM_BUG_ON(ce->tag <= BITS_PER_LONG); - ce->lrc.ccid = ce->tag; - } else { - /* We don't need a strict matching tag, just different values */ - unsigned int tag = ffs(READ_ONCE(engine->context_tag)); - - GEM_BUG_ON(tag == 0 || tag >= BITS_PER_LONG); - clear_bit(tag - 1, &engine->context_tag); - ce->lrc.ccid = tag << (GEN11_SW_CTX_ID_SHIFT - 32); - - BUILD_BUG_ON(BITS_PER_LONG > GEN12_MAX_CONTEXT_HW_ID); - } - - ce->lrc.ccid |= engine->execlists.ccid; - - __intel_gt_pm_get(engine->gt); - if (engine->fw_domain && !atomic_fetch_inc(&engine->fw_active)) - intel_uncore_forcewake_get(engine->uncore, engine->fw_domain); - execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN); - intel_engine_context_in(engine); + GEM_BUG_ON(!size); + GEM_BUG_ON(!IS_ALIGNED(size, CACHELINE_BYTES)); + GEM_BUG_ON(lrc_ring_indirect_ptr(engine) == -1); + regs[lrc_ring_indirect_ptr(engine) + 1] = + ctx_bb_ggtt_addr | (size / CACHELINE_BYTES); - return engine; + GEM_BUG_ON(lrc_ring_indirect_offset(engine) == -1); + regs[lrc_ring_indirect_offset(engine) + 1] = + lrc_ring_indirect_offset_default(engine) << 6; } -static inline struct i915_request * -execlists_schedule_in(struct i915_request *rq, int idx) +static void init_common_regs(u32 * const regs, + const struct intel_context *ce, + const struct intel_engine_cs *engine, + bool inhibit) { - struct intel_context * const ce = rq->context; - struct intel_engine_cs *old; - - GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine)); - trace_i915_request_in(rq, idx); - - old = READ_ONCE(ce->inflight); - do { - if (!old) { - WRITE_ONCE(ce->inflight, __execlists_schedule_in(rq)); - break; - } - } while (!try_cmpxchg(&ce->inflight, &old, ptr_inc(old))); - - GEM_BUG_ON(intel_context_inflight(ce) != rq->engine); - return i915_request_get(rq); -} + u32 ctl; -static void kick_siblings(struct i915_request *rq, struct intel_context *ce) -{ - struct virtual_engine *ve = container_of(ce, typeof(*ve), context); - struct i915_request *next = READ_ONCE(ve->request); + ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH); + ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); + if (inhibit) + ctl |= CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT; + if (INTEL_GEN(engine->i915) < 11) + ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT | + CTX_CTRL_RS_CTX_ENABLE); + regs[CTX_CONTEXT_CONTROL] = ctl; - if (next == rq || (next && next->execution_mask & ~rq->execution_mask)) - tasklet_hi_schedule(&ve->base.execlists.tasklet); + regs[CTX_TIMESTAMP] = ce->runtime.last; } -static inline void -__execlists_schedule_out(struct i915_request *rq, - struct intel_engine_cs * const engine, - unsigned int ccid) +static void init_wa_bb_regs(u32 * const regs, + const struct intel_engine_cs *engine) { - struct intel_context * const ce = rq->context; - - /* - * NB process_csb() is not under the engine->active.lock and hence - * schedule_out can race with schedule_in meaning that we should - * refrain from doing non-trivial work here. - */ - - if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) - execlists_check_context(ce, engine, "after"); + const struct i915_ctx_workarounds * const wa_ctx = &engine->wa_ctx; - /* - * If we have just completed this context, the engine may now be - * idle and we want to re-enter powersaving. - */ - if (list_is_last_rcu(&rq->link, &ce->timeline->requests) && - i915_request_completed(rq)) - intel_engine_add_retire(engine, ce->timeline); + if (wa_ctx->per_ctx.size) { + const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); - ccid >>= GEN11_SW_CTX_ID_SHIFT - 32; - ccid &= GEN12_MAX_CONTEXT_HW_ID; - if (ccid < BITS_PER_LONG) { - GEM_BUG_ON(ccid == 0); - GEM_BUG_ON(test_bit(ccid - 1, &engine->context_tag)); - set_bit(ccid - 1, &engine->context_tag); + GEM_BUG_ON(lrc_ring_wa_bb_per_ctx(engine) == -1); + regs[lrc_ring_wa_bb_per_ctx(engine) + 1] = + (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01; } - intel_context_update_runtime(ce); - intel_engine_context_out(engine); - execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT); - if (engine->fw_domain && !atomic_dec_return(&engine->fw_active)) - intel_uncore_forcewake_put(engine->uncore, engine->fw_domain); - intel_gt_pm_put_async(engine->gt); - - /* - * If this is part of a virtual engine, its next request may - * have been blocked waiting for access to the active context. - * We have to kick all the siblings again in case we need to - * switch (e.g. the next request is not runnable on this - * engine). Hopefully, we will already have submitted the next - * request before the tasklet runs and do not need to rebuild - * each virtual tree and kick everyone again. - */ - if (ce->engine != engine) - kick_siblings(rq, ce); - - intel_context_put(ce); -} - -static inline void -execlists_schedule_out(struct i915_request *rq) -{ - struct intel_context * const ce = rq->context; - struct intel_engine_cs *cur, *old; - u32 ccid; - - trace_i915_request_out(rq); - - ccid = rq->context->lrc.ccid; - old = READ_ONCE(ce->inflight); - do - cur = ptr_unmask_bits(old, 2) ? ptr_dec(old) : NULL; - while (!try_cmpxchg(&ce->inflight, &old, cur)); - if (!cur) - __execlists_schedule_out(rq, old, ccid); - - i915_request_put(rq); -} - -static u64 execlists_update_context(struct i915_request *rq) -{ - struct intel_context *ce = rq->context; - u64 desc = ce->lrc.desc; - u32 tail, prev; - - /* - * WaIdleLiteRestore:bdw,skl - * - * We should never submit the context with the same RING_TAIL twice - * just in case we submit an empty ring, which confuses the HW. - * - * We append a couple of NOOPs (gen8_emit_wa_tail) after the end of - * the normal request to be able to always advance the RING_TAIL on - * subsequent resubmissions (for lite restore). Should that fail us, - * and we try and submit the same tail again, force the context - * reload. - * - * If we need to return to a preempted context, we need to skip the - * lite-restore and force it to reload the RING_TAIL. Otherwise, the - * HW has a tendency to ignore us rewinding the TAIL to the end of - * an earlier request. - */ - GEM_BUG_ON(ce->lrc_reg_state[CTX_RING_TAIL] != rq->ring->tail); - prev = rq->ring->tail; - tail = intel_ring_set_tail(rq->ring, rq->tail); - if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0)) - desc |= CTX_DESC_FORCE_RESTORE; - ce->lrc_reg_state[CTX_RING_TAIL] = tail; - rq->tail = rq->wa_tail; - - /* - * Make sure the context image is complete before we submit it to HW. - * - * Ostensibly, writes (including the WCB) should be flushed prior to - * an uncached write such as our mmio register access, the empirical - * evidence (esp. on Braswell) suggests that the WC write into memory - * may not be visible to the HW prior to the completion of the UC - * register write and that we may begin execution from the context - * before its image is complete leading to invalid PD chasing. - */ - wmb(); - - ce->lrc.desc &= ~CTX_DESC_FORCE_RESTORE; - return desc; -} - -static inline void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port) -{ - if (execlists->ctrl_reg) { - writel(lower_32_bits(desc), execlists->submit_reg + port * 2); - writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1); - } else { - writel(upper_32_bits(desc), execlists->submit_reg); - writel(lower_32_bits(desc), execlists->submit_reg); + if (wa_ctx->indirect_ctx.size) { + lrc_setup_indirect_ctx(regs, engine, + i915_ggtt_offset(wa_ctx->vma) + + wa_ctx->indirect_ctx.offset, + wa_ctx->indirect_ctx.size); } } -static __maybe_unused char * -dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq) -{ - if (!rq) - return ""; - - snprintf(buf, buflen, "%sccid:%x %llx:%lld%s prio %d", - prefix, - rq->context->lrc.ccid, - rq->fence.context, rq->fence.seqno, - i915_request_completed(rq) ? "!" : - i915_request_started(rq) ? "*" : - "", - rq_prio(rq)); - - return buf; -} - -static __maybe_unused void -trace_ports(const struct intel_engine_execlists *execlists, - const char *msg, - struct i915_request * const *ports) -{ - const struct intel_engine_cs *engine = - container_of(execlists, typeof(*engine), execlists); - char __maybe_unused p0[40], p1[40]; - - if (!ports[0]) - return; - - ENGINE_TRACE(engine, "%s { %s%s }\n", msg, - dump_port(p0, sizeof(p0), "", ports[0]), - dump_port(p1, sizeof(p1), ", ", ports[1])); -} - -static inline bool -reset_in_progress(const struct intel_engine_execlists *execlists) +static void init_ppgtt_regs(u32 *regs, const struct i915_ppgtt *ppgtt) { - return unlikely(!__tasklet_is_enabled(&execlists->tasklet)); -} - -static __maybe_unused bool -assert_pending_valid(const struct intel_engine_execlists *execlists, - const char *msg) -{ - struct intel_engine_cs *engine = - container_of(execlists, typeof(*engine), execlists); - struct i915_request * const *port, *rq; - struct intel_context *ce = NULL; - bool sentinel = false; - u32 ccid = -1; - - trace_ports(execlists, msg, execlists->pending); - - /* We may be messing around with the lists during reset, lalala */ - if (reset_in_progress(execlists)) - return true; - - if (!execlists->pending[0]) { - GEM_TRACE_ERR("%s: Nothing pending for promotion!\n", - engine->name); - return false; - } - - if (execlists->pending[execlists_num_ports(execlists)]) { - GEM_TRACE_ERR("%s: Excess pending[%d] for promotion!\n", - engine->name, execlists_num_ports(execlists)); - return false; - } - - for (port = execlists->pending; (rq = *port); port++) { - unsigned long flags; - bool ok = true; - - GEM_BUG_ON(!kref_read(&rq->fence.refcount)); - GEM_BUG_ON(!i915_request_is_active(rq)); - - if (ce == rq->context) { - GEM_TRACE_ERR("%s: Dup context:%llx in pending[%zd]\n", - engine->name, - ce->timeline->fence_context, - port - execlists->pending); - return false; - } - ce = rq->context; - - if (ccid == ce->lrc.ccid) { - GEM_TRACE_ERR("%s: Dup ccid:%x context:%llx in pending[%zd]\n", - engine->name, - ccid, ce->timeline->fence_context, - port - execlists->pending); - return false; - } - ccid = ce->lrc.ccid; - - /* - * Sentinels are supposed to be the last request so they flush - * the current execution off the HW. Check that they are the only - * request in the pending submission. + if (i915_vm_is_4lvl(&ppgtt->vm)) { + /* 64b PPGTT (48bit canonical) + * PDP0_DESCRIPTOR contains the base address to PML4 and + * other PDP Descriptors are ignored. */ - if (sentinel) { - GEM_TRACE_ERR("%s: context:%llx after sentinel in pending[%zd]\n", - engine->name, - ce->timeline->fence_context, - port - execlists->pending); - return false; - } - sentinel = i915_request_has_sentinel(rq); - - /* Hold tightly onto the lock to prevent concurrent retires! */ - if (!spin_trylock_irqsave(&rq->lock, flags)) - continue; - - if (i915_request_completed(rq)) - goto unlock; - - if (i915_active_is_idle(&ce->active) && - !intel_context_is_barrier(ce)) { - GEM_TRACE_ERR("%s: Inactive context:%llx in pending[%zd]\n", - engine->name, - ce->timeline->fence_context, - port - execlists->pending); - ok = false; - goto unlock; - } - - if (!i915_vma_is_pinned(ce->state)) { - GEM_TRACE_ERR("%s: Unpinned context:%llx in pending[%zd]\n", - engine->name, - ce->timeline->fence_context, - port - execlists->pending); - ok = false; - goto unlock; - } - - if (!i915_vma_is_pinned(ce->ring->vma)) { - GEM_TRACE_ERR("%s: Unpinned ring:%llx in pending[%zd]\n", - engine->name, - ce->timeline->fence_context, - port - execlists->pending); - ok = false; - goto unlock; - } - -unlock: - spin_unlock_irqrestore(&rq->lock, flags); - if (!ok) - return false; - } - - return ce; -} - -static void execlists_submit_ports(struct intel_engine_cs *engine) -{ - struct intel_engine_execlists *execlists = &engine->execlists; - unsigned int n; - - GEM_BUG_ON(!assert_pending_valid(execlists, "submit")); - - /* - * We can skip acquiring intel_runtime_pm_get() here as it was taken - * on our behalf by the request (see i915_gem_mark_busy()) and it will - * not be relinquished until the device is idle (see - * i915_gem_idle_work_handler()). As a precaution, we make sure - * that all ELSP are drained i.e. we have processed the CSB, - * before allowing ourselves to idle and calling intel_runtime_pm_put(). - */ - GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); - - /* - * ELSQ note: the submit queue is not cleared after being submitted - * to the HW so we need to make sure we always clean it up. This is - * currently ensured by the fact that we always write the same number - * of elsq entries, keep this in mind before changing the loop below. - */ - for (n = execlists_num_ports(execlists); n--; ) { - struct i915_request *rq = execlists->pending[n]; - - write_desc(execlists, - rq ? execlists_update_context(rq) : 0, - n); - } - - /* we need to manually load the submit queue */ - if (execlists->ctrl_reg) - writel(EL_CTRL_LOAD, execlists->ctrl_reg); -} - -static bool ctx_single_port_submission(const struct intel_context *ce) -{ - return (IS_ENABLED(CONFIG_DRM_I915_GVT) && - intel_context_force_single_submission(ce)); -} - -static bool can_merge_ctx(const struct intel_context *prev, - const struct intel_context *next) -{ - if (prev != next) - return false; - - if (ctx_single_port_submission(prev)) - return false; - - return true; -} - -static unsigned long i915_request_flags(const struct i915_request *rq) -{ - return READ_ONCE(rq->fence.flags); -} - -static bool can_merge_rq(const struct i915_request *prev, - const struct i915_request *next) -{ - GEM_BUG_ON(prev == next); - GEM_BUG_ON(!assert_priority_queue(prev, next)); - - /* - * We do not submit known completed requests. Therefore if the next - * request is already completed, we can pretend to merge it in - * with the previous context (and we will skip updating the ELSP - * and tracking). Thus hopefully keeping the ELSP full with active - * contexts, despite the best efforts of preempt-to-busy to confuse - * us. - */ - if (i915_request_completed(next)) - return true; - - if (unlikely((i915_request_flags(prev) ^ i915_request_flags(next)) & - (BIT(I915_FENCE_FLAG_NOPREEMPT) | - BIT(I915_FENCE_FLAG_SENTINEL)))) - return false; - - if (!can_merge_ctx(prev->context, next->context)) - return false; - - GEM_BUG_ON(i915_seqno_passed(prev->fence.seqno, next->fence.seqno)); - return true; -} - -static void virtual_update_register_offsets(u32 *regs, - struct intel_engine_cs *engine) -{ - set_offsets(regs, reg_offsets(engine), engine, false); -} - -static bool virtual_matches(const struct virtual_engine *ve, - const struct i915_request *rq, - const struct intel_engine_cs *engine) -{ - const struct intel_engine_cs *inflight; - - if (!(rq->execution_mask & engine->mask)) /* We peeked too soon! */ - return false; - - /* - * We track when the HW has completed saving the context image - * (i.e. when we have seen the final CS event switching out of - * the context) and must not overwrite the context image before - * then. This restricts us to only using the active engine - * while the previous virtualized request is inflight (so - * we reuse the register offsets). This is a very small - * hystersis on the greedy seelction algorithm. - */ - inflight = intel_context_inflight(&ve->context); - if (inflight && inflight != engine) - return false; - - return true; -} - -static void virtual_xfer_context(struct virtual_engine *ve, - struct intel_engine_cs *engine) -{ - unsigned int n; - - if (likely(engine == ve->siblings[0])) - return; - - GEM_BUG_ON(READ_ONCE(ve->context.inflight)); - if (!intel_engine_has_relative_mmio(engine)) - virtual_update_register_offsets(ve->context.lrc_reg_state, - engine); - - /* - * Move the bound engine to the top of the list for - * future execution. We then kick this tasklet first - * before checking others, so that we preferentially - * reuse this set of bound registers. - */ - for (n = 1; n < ve->num_siblings; n++) { - if (ve->siblings[n] == engine) { - swap(ve->siblings[n], ve->siblings[0]); - break; - } + ASSIGN_CTX_PML4(ppgtt, regs); + } else { + ASSIGN_CTX_PDP(ppgtt, regs, 3); + ASSIGN_CTX_PDP(ppgtt, regs, 2); + ASSIGN_CTX_PDP(ppgtt, regs, 1); + ASSIGN_CTX_PDP(ppgtt, regs, 0); } } -#define for_each_waiter(p__, rq__) \ - list_for_each_entry_lockless(p__, \ - &(rq__)->sched.waiters_list, \ - wait_link) - -#define for_each_signaler(p__, rq__) \ - list_for_each_entry_rcu(p__, \ - &(rq__)->sched.signalers_list, \ - signal_link) - -static void defer_request(struct i915_request *rq, struct list_head * const pl) -{ - LIST_HEAD(list); - - /* - * We want to move the interrupted request to the back of - * the round-robin list (i.e. its priority level), but - * in doing so, we must then move all requests that were in - * flight and were waiting for the interrupted request to - * be run after it again. - */ - do { - struct i915_dependency *p; - - GEM_BUG_ON(i915_request_is_active(rq)); - list_move_tail(&rq->sched.link, pl); - - for_each_waiter(p, rq) { - struct i915_request *w = - container_of(p->waiter, typeof(*w), sched); - - if (p->flags & I915_DEPENDENCY_WEAK) - continue; - - /* Leave semaphores spinning on the other engines */ - if (w->engine != rq->engine) - continue; - - /* No waiter should start before its signaler */ - GEM_BUG_ON(i915_request_has_initial_breadcrumb(w) && - i915_request_started(w) && - !i915_request_completed(rq)); - - GEM_BUG_ON(i915_request_is_active(w)); - if (!i915_request_is_ready(w)) - continue; - - if (rq_prio(w) < rq_prio(rq)) - continue; - - GEM_BUG_ON(rq_prio(w) > rq_prio(rq)); - list_move_tail(&w->sched.link, &list); - } - - rq = list_first_entry_or_null(&list, typeof(*rq), sched.link); - } while (rq); -} - -static void defer_active(struct intel_engine_cs *engine) +static struct i915_ppgtt *vm_alias(struct i915_address_space *vm) { - struct i915_request *rq; - - rq = __unwind_incomplete_requests(engine); - if (!rq) - return; - - defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq))); + if (i915_is_ggtt(vm)) + return i915_vm_to_ggtt(vm)->alias; + else + return i915_vm_to_ppgtt(vm); } -static bool -need_timeslice(const struct intel_engine_cs *engine, - const struct i915_request *rq, - const struct rb_node *rb) +static void __reset_stop_ring(u32 *regs, const struct intel_engine_cs *engine) { - int hint; - - if (!intel_engine_has_timeslices(engine)) - return false; - - hint = engine->execlists.queue_priority_hint; - - if (rb) { - const struct virtual_engine *ve = - rb_entry(rb, typeof(*ve), nodes[engine->id].rb); - const struct intel_engine_cs *inflight = - intel_context_inflight(&ve->context); - - if (!inflight || inflight == engine) { - struct i915_request *next; + int x; - rcu_read_lock(); - next = READ_ONCE(ve->request); - if (next) - hint = max(hint, rq_prio(next)); - rcu_read_unlock(); - } + x = lrc_ring_mi_mode(engine); + if (x != -1) { + regs[x + 1] &= ~STOP_RING; + regs[x + 1] |= STOP_RING << 16; } - - if (!list_is_last(&rq->sched.link, &engine->active.requests)) - hint = max(hint, rq_prio(list_next_entry(rq, sched.link))); - - GEM_BUG_ON(hint >= I915_PRIORITY_UNPREEMPTABLE); - return hint >= effective_prio(rq); } -static bool -timeslice_yield(const struct intel_engine_execlists *el, - const struct i915_request *rq) +static void __lrc_init_regs(u32 *regs, + const struct intel_context *ce, + const struct intel_engine_cs *engine, + bool inhibit) { /* - * Once bitten, forever smitten! + * A context is actually a big batch buffer with several + * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The + * values we are setting here are only for the first context restore: + * on a subsequent save, the GPU will recreate this batchbuffer with new + * values (including all the missing MI_LOAD_REGISTER_IMM commands that + * we are not initializing here). * - * If the active context ever busy-waited on a semaphore, - * it will be treated as a hog until the end of its timeslice (i.e. - * until it is scheduled out and replaced by a new submission, - * possibly even its own lite-restore). The HW only sends an interrupt - * on the first miss, and we do know if that semaphore has been - * signaled, or even if it is now stuck on another semaphore. Play - * safe, yield if it might be stuck -- it will be given a fresh - * timeslice in the near future. + * Must keep consistent with virtual_update_register_offsets(). */ - return rq->context->lrc.ccid == READ_ONCE(el->yield); -} - -static bool -timeslice_expired(const struct intel_engine_execlists *el, - const struct i915_request *rq) -{ - return timer_expired(&el->timer) || timeslice_yield(el, rq); -} - -static int -switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq) -{ - if (list_is_last(&rq->sched.link, &engine->active.requests)) - return engine->execlists.queue_priority_hint; - return rq_prio(list_next_entry(rq, sched.link)); -} - -static inline unsigned long -timeslice(const struct intel_engine_cs *engine) -{ - return READ_ONCE(engine->props.timeslice_duration_ms); -} - -static unsigned long active_timeslice(const struct intel_engine_cs *engine) -{ - const struct intel_engine_execlists *execlists = &engine->execlists; - const struct i915_request *rq = *execlists->active; - - if (!rq || i915_request_completed(rq)) - return 0; - - if (READ_ONCE(execlists->switch_priority_hint) < effective_prio(rq)) - return 0; - - return timeslice(engine); -} + if (inhibit) + memset(regs, 0, PAGE_SIZE); -static void set_timeslice(struct intel_engine_cs *engine) -{ - unsigned long duration; + set_offsets(regs, reg_offsets(engine), engine, inhibit); - if (!intel_engine_has_timeslices(engine)) - return; + init_common_regs(regs, ce, engine, inhibit); + init_ppgtt_regs(regs, vm_alias(ce->vm)); - duration = active_timeslice(engine); - ENGINE_TRACE(engine, "bump timeslicing, interval:%lu", duration); + init_wa_bb_regs(regs, engine); - set_timer_ms(&engine->execlists.timer, duration); + __reset_stop_ring(regs, engine); } -static void start_timeslice(struct intel_engine_cs *engine, int prio) +void lrc_init_regs(const struct intel_context *ce, + const struct intel_engine_cs *engine, + bool inhibit) { - struct intel_engine_execlists *execlists = &engine->execlists; - unsigned long duration; - - if (!intel_engine_has_timeslices(engine)) - return; - - WRITE_ONCE(execlists->switch_priority_hint, prio); - if (prio == INT_MIN) - return; - - if (timer_pending(&execlists->timer)) - return; - - duration = timeslice(engine); - ENGINE_TRACE(engine, - "start timeslicing, prio:%d, interval:%lu", - prio, duration); - - set_timer_ms(&execlists->timer, duration); + __lrc_init_regs(ce->lrc_reg_state, ce, engine, inhibit); } -static void record_preemption(struct intel_engine_execlists *execlists) +void lrc_reset_regs(const struct intel_context *ce, + const struct intel_engine_cs *engine) { - (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++); + __reset_stop_ring(ce->lrc_reg_state, engine); } -static unsigned long active_preempt_timeout(struct intel_engine_cs *engine, - const struct i915_request *rq) +static void +set_redzone(void *vaddr, const struct intel_engine_cs *engine) { - if (!rq) - return 0; + if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) + return; - /* Force a fast reset for terminated contexts (ignoring sysfs!) */ - if (unlikely(intel_context_is_banned(rq->context))) - return 1; + vaddr += engine->context_size; - return READ_ONCE(engine->props.preempt_timeout_ms); + memset(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE); } -static void set_preempt_timeout(struct intel_engine_cs *engine, - const struct i915_request *rq) +static void +check_redzone(const void *vaddr, const struct intel_engine_cs *engine) { - if (!intel_engine_has_preempt_reset(engine)) + if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) return; - set_timer_ms(&engine->execlists.preempt, - active_preempt_timeout(engine, rq)); -} - -static inline void clear_ports(struct i915_request **ports, int count) -{ - memset_p((void **)ports, NULL, count); -} + vaddr += engine->context_size; -static inline void -copy_ports(struct i915_request **dst, struct i915_request **src, int count) -{ - /* A memcpy_p() would be very useful here! */ - while (count--) - WRITE_ONCE(*dst++, *src++); /* avoid write tearing */ + if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE)) + drm_err_once(&engine->i915->drm, + "%s context redzone overwritten!\n", + engine->name); } -static void execlists_dequeue(struct intel_engine_cs *engine) +void lrc_init_state(struct intel_context *ce, + struct intel_engine_cs *engine, + void *state) { - struct intel_engine_execlists * const execlists = &engine->execlists; - struct i915_request **port = execlists->pending; - struct i915_request ** const last_port = port + execlists->port_mask; - struct i915_request * const *active; - struct i915_request *last; - struct rb_node *rb; - bool submit = false; - - /* - * Hardware submission is through 2 ports. Conceptually each port - * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is - * static for a context, and unique to each, so we only execute - * requests belonging to a single context from each ring. RING_HEAD - * is maintained by the CS in the context image, it marks the place - * where it got up to last time, and through RING_TAIL we tell the CS - * where we want to execute up to this time. - * - * In this list the requests are in order of execution. Consecutive - * requests from the same context are adjacent in the ringbuffer. We - * can combine these requests into a single RING_TAIL update: - * - * RING_HEAD...req1...req2 - * ^- RING_TAIL - * since to execute req2 the CS must first execute req1. - * - * Our goal then is to point each port to the end of a consecutive - * sequence of requests as being the most optimal (fewest wake ups - * and context switches) submission. - */ - - for (rb = rb_first_cached(&execlists->virtual); rb; ) { - struct virtual_engine *ve = - rb_entry(rb, typeof(*ve), nodes[engine->id].rb); - struct i915_request *rq = READ_ONCE(ve->request); - - if (!rq) { /* lazily cleanup after another engine handled rq */ - rb_erase_cached(rb, &execlists->virtual); - RB_CLEAR_NODE(rb); - rb = rb_first_cached(&execlists->virtual); - continue; - } - - if (!virtual_matches(ve, rq, engine)) { - rb = rb_next(rb); - continue; - } - - break; - } - - /* - * If the queue is higher priority than the last - * request in the currently active context, submit afresh. - * We will resubmit again afterwards in case we need to split - * the active context to interject the preemption request, - * i.e. we will retrigger preemption following the ack in case - * of trouble. - */ - active = READ_ONCE(execlists->active); - - /* - * In theory we can skip over completed contexts that have not - * yet been processed by events (as those events are in flight): - * - * while ((last = *active) && i915_request_completed(last)) - * active++; - * - * However, the GPU cannot handle this as it will ultimately - * find itself trying to jump back into a context it has just - * completed and barf. - */ - - if ((last = *active)) { - if (need_preempt(engine, last, rb)) { - if (i915_request_completed(last)) { - tasklet_hi_schedule(&execlists->tasklet); - return; - } - - ENGINE_TRACE(engine, - "preempting last=%llx:%lld, prio=%d, hint=%d\n", - last->fence.context, - last->fence.seqno, - last->sched.attr.priority, - execlists->queue_priority_hint); - record_preemption(execlists); - - /* - * Don't let the RING_HEAD advance past the breadcrumb - * as we unwind (and until we resubmit) so that we do - * not accidentally tell it to go backwards. - */ - ring_set_paused(engine, 1); - - /* - * Note that we have not stopped the GPU at this point, - * so we are unwinding the incomplete requests as they - * remain inflight and so by the time we do complete - * the preemption, some of the unwound requests may - * complete! - */ - __unwind_incomplete_requests(engine); - - last = NULL; - } else if (need_timeslice(engine, last, rb) && - timeslice_expired(execlists, last)) { - if (i915_request_completed(last)) { - tasklet_hi_schedule(&execlists->tasklet); - return; - } - - ENGINE_TRACE(engine, - "expired last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n", - last->fence.context, - last->fence.seqno, - last->sched.attr.priority, - execlists->queue_priority_hint, - yesno(timeslice_yield(execlists, last))); - - ring_set_paused(engine, 1); - defer_active(engine); - - /* - * Unlike for preemption, if we rewind and continue - * executing the same context as previously active, - * the order of execution will remain the same and - * the tail will only advance. We do not need to - * force a full context restore, as a lite-restore - * is sufficient to resample the monotonic TAIL. - * - * If we switch to any other context, similarly we - * will not rewind TAIL of current context, and - * normal save/restore will preserve state and allow - * us to later continue executing the same request. - */ - last = NULL; - } else { - /* - * Otherwise if we already have a request pending - * for execution after the current one, we can - * just wait until the next CS event before - * queuing more. In either case we will force a - * lite-restore preemption event, but if we wait - * we hopefully coalesce several updates into a single - * submission. - */ - if (!list_is_last(&last->sched.link, - &engine->active.requests)) { - /* - * Even if ELSP[1] is occupied and not worthy - * of timeslices, our queue might be. - */ - start_timeslice(engine, queue_prio(execlists)); - return; - } - } - } - - while (rb) { /* XXX virtual is always taking precedence */ - struct virtual_engine *ve = - rb_entry(rb, typeof(*ve), nodes[engine->id].rb); - struct i915_request *rq; - - spin_lock(&ve->base.active.lock); - - rq = ve->request; - if (unlikely(!rq)) { /* lost the race to a sibling */ - spin_unlock(&ve->base.active.lock); - rb_erase_cached(rb, &execlists->virtual); - RB_CLEAR_NODE(rb); - rb = rb_first_cached(&execlists->virtual); - continue; - } - - GEM_BUG_ON(rq != ve->request); - GEM_BUG_ON(rq->engine != &ve->base); - GEM_BUG_ON(rq->context != &ve->context); - - if (rq_prio(rq) >= queue_prio(execlists)) { - if (!virtual_matches(ve, rq, engine)) { - spin_unlock(&ve->base.active.lock); - rb = rb_next(rb); - continue; - } - - if (last && !can_merge_rq(last, rq)) { - spin_unlock(&ve->base.active.lock); - start_timeslice(engine, rq_prio(rq)); - return; /* leave this for another sibling */ - } - - ENGINE_TRACE(engine, - "virtual rq=%llx:%lld%s, new engine? %s\n", - rq->fence.context, - rq->fence.seqno, - i915_request_completed(rq) ? "!" : - i915_request_started(rq) ? "*" : - "", - yesno(engine != ve->siblings[0])); - - WRITE_ONCE(ve->request, NULL); - WRITE_ONCE(ve->base.execlists.queue_priority_hint, - INT_MIN); - rb_erase_cached(rb, &execlists->virtual); - RB_CLEAR_NODE(rb); - - GEM_BUG_ON(!(rq->execution_mask & engine->mask)); - WRITE_ONCE(rq->engine, engine); - - if (__i915_request_submit(rq)) { - /* - * Only after we confirm that we will submit - * this request (i.e. it has not already - * completed), do we want to update the context. - * - * This serves two purposes. It avoids - * unnecessary work if we are resubmitting an - * already completed request after timeslicing. - * But more importantly, it prevents us altering - * ve->siblings[] on an idle context, where - * we may be using ve->siblings[] in - * virtual_context_enter / virtual_context_exit. - */ - virtual_xfer_context(ve, engine); - GEM_BUG_ON(ve->siblings[0] != engine); - - submit = true; - last = rq; - } - i915_request_put(rq); + bool inhibit = true; - /* - * Hmm, we have a bunch of virtual engine requests, - * but the first one was already completed (thanks - * preempt-to-busy!). Keep looking at the veng queue - * until we have no more relevant requests (i.e. - * the normal submit queue has higher priority). - */ - if (!submit) { - spin_unlock(&ve->base.active.lock); - rb = rb_first_cached(&execlists->virtual); - continue; - } - } + set_redzone(state, engine); - spin_unlock(&ve->base.active.lock); - break; + if (engine->default_state) { + shmem_read(engine->default_state, 0, + state, engine->context_size); + __set_bit(CONTEXT_VALID_BIT, &ce->flags); + inhibit = false; } - while ((rb = rb_first_cached(&execlists->queue))) { - struct i915_priolist *p = to_priolist(rb); - struct i915_request *rq, *rn; - int i; - - priolist_for_each_request_consume(rq, rn, p, i) { - bool merge = true; - - /* - * Can we combine this request with the current port? - * It has to be the same context/ringbuffer and not - * have any exceptions (e.g. GVT saying never to - * combine contexts). - * - * If we can combine the requests, we can execute both - * by updating the RING_TAIL to point to the end of the - * second request, and so we never need to tell the - * hardware about the first. - */ - if (last && !can_merge_rq(last, rq)) { - /* - * If we are on the second port and cannot - * combine this request with the last, then we - * are done. - */ - if (port == last_port) - goto done; - - /* - * We must not populate both ELSP[] with the - * same LRCA, i.e. we must submit 2 different - * contexts if we submit 2 ELSP. - */ - if (last->context == rq->context) - goto done; - - if (i915_request_has_sentinel(last)) - goto done; - - /* - * If GVT overrides us we only ever submit - * port[0], leaving port[1] empty. Note that we - * also have to be careful that we don't queue - * the same context (even though a different - * request) to the second port. - */ - if (ctx_single_port_submission(last->context) || - ctx_single_port_submission(rq->context)) - goto done; - - merge = false; - } - - if (__i915_request_submit(rq)) { - if (!merge) { - *port = execlists_schedule_in(last, port - execlists->pending); - port++; - last = NULL; - } - - GEM_BUG_ON(last && - !can_merge_ctx(last->context, - rq->context)); - GEM_BUG_ON(last && - i915_seqno_passed(last->fence.seqno, - rq->fence.seqno)); - - submit = true; - last = rq; - } - } - - rb_erase_cached(&p->node, &execlists->queue); - i915_priolist_free(p); - } + /* Clear the ppHWSP (inc. per-context counters) */ + memset(state, 0, PAGE_SIZE); -done: /* - * Here be a bit of magic! Or sleight-of-hand, whichever you prefer. - * - * We choose the priority hint such that if we add a request of greater - * priority than this, we kick the submission tasklet to decide on - * the right order of submitting the requests to hardware. We must - * also be prepared to reorder requests as they are in-flight on the - * HW. We derive the priority hint then as the first "hole" in - * the HW submission ports and if there are no available slots, - * the priority of the lowest executing request, i.e. last. - * - * When we do receive a higher priority request ready to run from the - * user, see queue_request(), the priority hint is bumped to that - * request triggering preemption on the next dequeue (or subsequent - * interrupt for secondary ports). + * The second page of the context object contains some registers which + * must be set up prior to the first execution. */ - execlists->queue_priority_hint = queue_prio(execlists); - - if (submit) { - *port = execlists_schedule_in(last, port - execlists->pending); - execlists->switch_priority_hint = - switch_prio(engine, *execlists->pending); - - /* - * Skip if we ended up with exactly the same set of requests, - * e.g. trying to timeslice a pair of ordered contexts - */ - if (!memcmp(active, execlists->pending, - (port - execlists->pending + 1) * sizeof(*port))) { - do - execlists_schedule_out(fetch_and_zero(port)); - while (port-- != execlists->pending); - - goto skip_submit; - } - clear_ports(port + 1, last_port - port); - - WRITE_ONCE(execlists->yield, -1); - set_preempt_timeout(engine, *active); - execlists_submit_ports(engine); - } else { - start_timeslice(engine, execlists->queue_priority_hint); -skip_submit: - ring_set_paused(engine, 0); - } + __lrc_init_regs(state + LRC_STATE_OFFSET, ce, engine, inhibit); } -static void -cancel_port_requests(struct intel_engine_execlists * const execlists) +static struct i915_vma * +__lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine) { - struct i915_request * const *port; - - for (port = execlists->pending; *port; port++) - execlists_schedule_out(*port); - clear_ports(execlists->pending, ARRAY_SIZE(execlists->pending)); - - /* Mark the end of active before we overwrite *active */ - for (port = xchg(&execlists->active, execlists->pending); *port; port++) - execlists_schedule_out(*port); - clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight)); - - smp_wmb(); /* complete the seqlock for execlists_active() */ - WRITE_ONCE(execlists->active, execlists->inflight); -} + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + u32 context_size; -static inline void -invalidate_csb_entries(const u64 *first, const u64 *last) -{ - clflush((void *)first); - clflush((void *)last); -} + context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE); -/* - * Starting with Gen12, the status has a new format: - * - * bit 0: switched to new queue - * bit 1: reserved - * bit 2: semaphore wait mode (poll or signal), only valid when - * switch detail is set to "wait on semaphore" - * bits 3-5: engine class - * bits 6-11: engine instance - * bits 12-14: reserved - * bits 15-25: sw context id of the lrc the GT switched to - * bits 26-31: sw counter of the lrc the GT switched to - * bits 32-35: context switch detail - * - 0: ctx complete - * - 1: wait on sync flip - * - 2: wait on vblank - * - 3: wait on scanline - * - 4: wait on semaphore - * - 5: context preempted (not on SEMAPHORE_WAIT or - * WAIT_FOR_EVENT) - * bit 36: reserved - * bits 37-43: wait detail (for switch detail 1 to 4) - * bits 44-46: reserved - * bits 47-57: sw context id of the lrc the GT switched away from - * bits 58-63: sw counter of the lrc the GT switched away from - */ -static inline bool gen12_csb_parse(const u64 csb) -{ - bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_32_bits(csb)); - bool new_queue = - lower_32_bits(csb) & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE; + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) + context_size += I915_GTT_PAGE_SIZE; /* for redzone */ - /* - * The context switch detail is not guaranteed to be 5 when a preemption - * occurs, so we can't just check for that. The check below works for - * all the cases we care about, including preemptions of WAIT - * instructions and lite-restore. Preempt-to-idle via the CTRL register - * would require some extra handling, but we don't support that. - */ - if (!ctx_away_valid || new_queue) { - GEM_BUG_ON(!GEN12_CSB_CTX_VALID(lower_32_bits(csb))); - return true; + if (INTEL_GEN(engine->i915) == 12) { + ce->wa_bb_page = context_size / PAGE_SIZE; + context_size += PAGE_SIZE; } - /* - * switch detail = 5 is covered by the case above and we do not expect a - * context switch on an unsuccessful wait instruction since we always - * use polling mode. - */ - GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_32_bits(csb))); - return false; -} - -static inline bool gen8_csb_parse(const u64 csb) -{ - return csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED); -} - -static noinline u64 -wa_csb_read(const struct intel_engine_cs *engine, u64 * const csb) -{ - u64 entry; - - /* - * Reading from the HWSP has one particular advantage: we can detect - * a stale entry. Since the write into HWSP is broken, we have no reason - * to trust the HW at all, the mmio entry may equally be unordered, so - * we prefer the path that is self-checking and as a last resort, - * return the mmio value. - * - * tgl,dg1:HSDES#22011327657 - */ - preempt_disable(); - if (wait_for_atomic_us((entry = READ_ONCE(*csb)) != -1, 10)) { - int idx = csb - engine->execlists.csb_status; - int status; - - status = GEN8_EXECLISTS_STATUS_BUF; - if (idx >= 6) { - status = GEN11_EXECLISTS_STATUS_BUF2; - idx -= 6; - } - status += sizeof(u64) * idx; + obj = i915_gem_object_create_shmem(engine->i915, context_size); + if (IS_ERR(obj)) + return ERR_CAST(obj); - entry = intel_uncore_read64(engine->uncore, - _MMIO(engine->mmio_base + status)); + vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); + if (IS_ERR(vma)) { + i915_gem_object_put(obj); + return vma; } - preempt_enable(); - return entry; + return vma; } -static inline u64 -csb_read(const struct intel_engine_cs *engine, u64 * const csb) +static struct intel_timeline * +pinned_timeline(struct intel_context *ce, struct intel_engine_cs *engine) { - u64 entry = READ_ONCE(*csb); - - /* - * Unfortunately, the GPU does not always serialise its write - * of the CSB entries before its write of the CSB pointer, at least - * from the perspective of the CPU, using what is known as a Global - * Observation Point. We may read a new CSB tail pointer, but then - * read the stale CSB entries, causing us to misinterpret the - * context-switch events, and eventually declare the GPU hung. - * - * icl:HSDES#1806554093 - * tgl:HSDES#22011248461 - */ - if (unlikely(entry == -1)) - entry = wa_csb_read(engine, csb); - - /* Consume this entry so that we can spot its future reuse. */ - WRITE_ONCE(*csb, -1); - - /* ELSP is an implicit wmb() before the GPU wraps and overwrites csb */ - return entry; -} - -static void process_csb(struct intel_engine_cs *engine) -{ - struct intel_engine_execlists * const execlists = &engine->execlists; - u64 * const buf = execlists->csb_status; - const u8 num_entries = execlists->csb_size; - u8 head, tail; - - /* - * As we modify our execlists state tracking we require exclusive - * access. Either we are inside the tasklet, or the tasklet is disabled - * and we assume that is only inside the reset paths and so serialised. - */ - GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) && - !reset_in_progress(execlists)); - GEM_BUG_ON(!intel_engine_in_execlists_submission_mode(engine)); - - /* - * Note that csb_write, csb_status may be either in HWSP or mmio. - * When reading from the csb_write mmio register, we have to be - * careful to only use the GEN8_CSB_WRITE_PTR portion, which is - * the low 4bits. As it happens we know the next 4bits are always - * zero and so we can simply masked off the low u8 of the register - * and treat it identically to reading from the HWSP (without having - * to use explicit shifting and masking, and probably bifurcating - * the code to handle the legacy mmio read). - */ - head = execlists->csb_head; - tail = READ_ONCE(*execlists->csb_write); - if (unlikely(head == tail)) - return; - - /* - * We will consume all events from HW, or at least pretend to. - * - * The sequence of events from the HW is deterministic, and derived - * from our writes to the ELSP, with a smidgen of variability for - * the arrival of the asynchronous requests wrt to the inflight - * execution. If the HW sends an event that does not correspond with - * the one we are expecting, we have to abandon all hope as we lose - * all tracking of what the engine is actually executing. We will - * only detect we are out of sequence with the HW when we get an - * 'impossible' event because we have already drained our own - * preemption/promotion queue. If this occurs, we know that we likely - * lost track of execution earlier and must unwind and restart, the - * simplest way is by stop processing the event queue and force the - * engine to reset. - */ - execlists->csb_head = tail; - ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail); - - /* - * Hopefully paired with a wmb() in HW! - * - * We must complete the read of the write pointer before any reads - * from the CSB, so that we do not see stale values. Without an rmb - * (lfence) the HW may speculatively perform the CSB[] reads *before* - * we perform the READ_ONCE(*csb_write). - */ - rmb(); - do { - bool promote; - u64 csb; - - if (++head == num_entries) - head = 0; - - /* - * We are flying near dragons again. - * - * We hold a reference to the request in execlist_port[] - * but no more than that. We are operating in softirq - * context and so cannot hold any mutex or sleep. That - * prevents us stopping the requests we are processing - * in port[] from being retired simultaneously (the - * breadcrumb will be complete before we see the - * context-switch). As we only hold the reference to the - * request, any pointer chasing underneath the request - * is subject to a potential use-after-free. Thus we - * store all of the bookkeeping within port[] as - * required, and avoid using unguarded pointers beneath - * request itself. The same applies to the atomic - * status notifier. - */ - - csb = csb_read(engine, buf + head); - ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n", - head, upper_32_bits(csb), lower_32_bits(csb)); - - if (INTEL_GEN(engine->i915) >= 12) - promote = gen12_csb_parse(csb); - else - promote = gen8_csb_parse(csb); - if (promote) { - struct i915_request * const *old = execlists->active; - - if (GEM_WARN_ON(!*execlists->pending)) { - execlists->error_interrupt |= ERROR_CSB; - break; - } - - ring_set_paused(engine, 0); - - /* Point active to the new ELSP; prevent overwriting */ - WRITE_ONCE(execlists->active, execlists->pending); - smp_wmb(); /* notify execlists_active() */ - - /* cancel old inflight, prepare for switch */ - trace_ports(execlists, "preempted", old); - while (*old) - execlists_schedule_out(*old++); - - /* switch pending to inflight */ - GEM_BUG_ON(!assert_pending_valid(execlists, "promote")); - copy_ports(execlists->inflight, - execlists->pending, - execlists_num_ports(execlists)); - smp_wmb(); /* complete the seqlock */ - WRITE_ONCE(execlists->active, execlists->inflight); - - /* XXX Magic delay for tgl */ - ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR); - - WRITE_ONCE(execlists->pending[0], NULL); - } else { - if (GEM_WARN_ON(!*execlists->active)) { - execlists->error_interrupt |= ERROR_CSB; - break; - } - - /* port0 completed, advanced to port1 */ - trace_ports(execlists, "completed", execlists->active); - - /* - * We rely on the hardware being strongly - * ordered, that the breadcrumb write is - * coherent (visible from the CPU) before the - * user interrupt is processed. One might assume - * that the breadcrumb write being before the - * user interrupt and the CS event for the context - * switch would therefore be before the CS event - * itself... - */ - if (GEM_SHOW_DEBUG() && - !i915_request_completed(*execlists->active)) { - struct i915_request *rq = *execlists->active; - const u32 *regs __maybe_unused = - rq->context->lrc_reg_state; - - ENGINE_TRACE(engine, - "context completed before request!\n"); - ENGINE_TRACE(engine, - "ring:{start:0x%08x, head:%04x, tail:%04x, ctl:%08x, mode:%08x}\n", - ENGINE_READ(engine, RING_START), - ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR, - ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR, - ENGINE_READ(engine, RING_CTL), - ENGINE_READ(engine, RING_MI_MODE)); - ENGINE_TRACE(engine, - "rq:{start:%08x, head:%04x, tail:%04x, seqno:%llx:%d, hwsp:%d}, ", - i915_ggtt_offset(rq->ring->vma), - rq->head, rq->tail, - rq->fence.context, - lower_32_bits(rq->fence.seqno), - hwsp_seqno(rq)); - ENGINE_TRACE(engine, - "ctx:{start:%08x, head:%04x, tail:%04x}, ", - regs[CTX_RING_START], - regs[CTX_RING_HEAD], - regs[CTX_RING_TAIL]); - } - - execlists_schedule_out(*execlists->active++); - - GEM_BUG_ON(execlists->active - execlists->inflight > - execlists_num_ports(execlists)); - } - } while (head != tail); - - set_timeslice(engine); - - /* - * Gen11 has proven to fail wrt global observation point between - * entry and tail update, failing on the ordering and thus - * we see an old entry in the context status buffer. - * - * Forcibly evict out entries for the next gpu csb update, - * to increase the odds that we get a fresh entries with non - * working hardware. The cost for doing so comes out mostly with - * the wash as hardware, working or not, will need to do the - * invalidation before. - */ - invalidate_csb_entries(&buf[0], &buf[num_entries - 1]); -} + struct intel_timeline *tl = fetch_and_zero(&ce->timeline); -static void __execlists_submission_tasklet(struct intel_engine_cs *const engine) -{ - lockdep_assert_held(&engine->active.lock); - if (!READ_ONCE(engine->execlists.pending[0])) { - rcu_read_lock(); /* protect peeking at execlists->active */ - execlists_dequeue(engine); - rcu_read_unlock(); - } + return intel_timeline_create_from_engine(engine, page_unmask_bits(tl)); } -static void __execlists_hold(struct i915_request *rq) +int lrc_alloc(struct intel_context *ce, struct intel_engine_cs *engine) { - LIST_HEAD(list); - - do { - struct i915_dependency *p; - - if (i915_request_is_active(rq)) - __i915_request_unsubmit(rq); - - clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); - list_move_tail(&rq->sched.link, &rq->engine->active.hold); - i915_request_set_hold(rq); - RQ_TRACE(rq, "on hold\n"); - - for_each_waiter(p, rq) { - struct i915_request *w = - container_of(p->waiter, typeof(*w), sched); - - /* Leave semaphores spinning on the other engines */ - if (w->engine != rq->engine) - continue; - - if (!i915_request_is_ready(w)) - continue; - - if (i915_request_completed(w)) - continue; - - if (i915_request_on_hold(w)) - continue; - - list_move_tail(&w->sched.link, &list); - } + struct intel_ring *ring; + struct i915_vma *vma; + int err; - rq = list_first_entry_or_null(&list, typeof(*rq), sched.link); - } while (rq); -} + GEM_BUG_ON(ce->state); -static bool execlists_hold(struct intel_engine_cs *engine, - struct i915_request *rq) -{ - spin_lock_irq(&engine->active.lock); + vma = __lrc_alloc_state(ce, engine); + if (IS_ERR(vma)) + return PTR_ERR(vma); - if (i915_request_completed(rq)) { /* too late! */ - rq = NULL; - goto unlock; + ring = intel_engine_create_ring(engine, (unsigned long)ce->ring); + if (IS_ERR(ring)) { + err = PTR_ERR(ring); + goto err_vma; } - if (rq->engine != engine) { /* preempted virtual engine */ - struct virtual_engine *ve = to_virtual_engine(rq->engine); - - /* - * intel_context_inflight() is only protected by virtue - * of process_csb() being called only by the tasklet (or - * directly from inside reset while the tasklet is suspended). - * Assert that neither of those are allowed to run while we - * poke at the request queues. - */ - GEM_BUG_ON(!reset_in_progress(&engine->execlists)); + if (!page_mask_bits(ce->timeline)) { + struct intel_timeline *tl; /* - * An unsubmitted request along a virtual engine will - * remain on the active (this) engine until we are able - * to process the context switch away (and so mark the - * context as no longer in flight). That cannot have happened - * yet, otherwise we would not be hanging! + * Use the static global HWSP for the kernel context, and + * a dynamically allocated cacheline for everyone else. */ - spin_lock(&ve->base.active.lock); - GEM_BUG_ON(intel_context_inflight(rq->context) != engine); - GEM_BUG_ON(ve->request != rq); - ve->request = NULL; - spin_unlock(&ve->base.active.lock); - i915_request_put(rq); - - rq->engine = engine; - } - - /* - * Transfer this request onto the hold queue to prevent it - * being resumbitted to HW (and potentially completed) before we have - * released it. Since we may have already submitted following - * requests, we need to remove those as well. - */ - GEM_BUG_ON(i915_request_on_hold(rq)); - GEM_BUG_ON(rq->engine != engine); - __execlists_hold(rq); - GEM_BUG_ON(list_empty(&engine->active.hold)); - -unlock: - spin_unlock_irq(&engine->active.lock); - return rq; -} - -static bool hold_request(const struct i915_request *rq) -{ - struct i915_dependency *p; - bool result = false; - - /* - * If one of our ancestors is on hold, we must also be on hold, - * otherwise we will bypass it and execute before it. - */ - rcu_read_lock(); - for_each_signaler(p, rq) { - const struct i915_request *s = - container_of(p->signaler, typeof(*s), sched); - - if (s->engine != rq->engine) - continue; - - result = i915_request_on_hold(s); - if (result) - break; - } - rcu_read_unlock(); - - return result; -} - -static void __execlists_unhold(struct i915_request *rq) -{ - LIST_HEAD(list); - - do { - struct i915_dependency *p; - - RQ_TRACE(rq, "hold release\n"); - - GEM_BUG_ON(!i915_request_on_hold(rq)); - GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit)); - - i915_request_clear_hold(rq); - list_move_tail(&rq->sched.link, - i915_sched_lookup_priolist(rq->engine, - rq_prio(rq))); - set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); - - /* Also release any children on this engine that are ready */ - for_each_waiter(p, rq) { - struct i915_request *w = - container_of(p->waiter, typeof(*w), sched); - - /* Propagate any change in error status */ - if (rq->fence.error) - i915_request_set_error_once(w, rq->fence.error); - - if (w->engine != rq->engine) - continue; - - if (!i915_request_on_hold(w)) - continue; - - /* Check that no other parents are also on hold */ - if (hold_request(w)) - continue; - - list_move_tail(&w->sched.link, &list); - } - - rq = list_first_entry_or_null(&list, typeof(*rq), sched.link); - } while (rq); -} - -static void execlists_unhold(struct intel_engine_cs *engine, - struct i915_request *rq) -{ - spin_lock_irq(&engine->active.lock); - - /* - * Move this request back to the priority queue, and all of its - * children and grandchildren that were suspended along with it. - */ - __execlists_unhold(rq); - - if (rq_prio(rq) > engine->execlists.queue_priority_hint) { - engine->execlists.queue_priority_hint = rq_prio(rq); - tasklet_hi_schedule(&engine->execlists.tasklet); - } - - spin_unlock_irq(&engine->active.lock); -} - -struct execlists_capture { - struct work_struct work; - struct i915_request *rq; - struct i915_gpu_coredump *error; -}; - -static void execlists_capture_work(struct work_struct *work) -{ - struct execlists_capture *cap = container_of(work, typeof(*cap), work); - const gfp_t gfp = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN; - struct intel_engine_cs *engine = cap->rq->engine; - struct intel_gt_coredump *gt = cap->error->gt; - struct intel_engine_capture_vma *vma; - - /* Compress all the objects attached to the request, slow! */ - vma = intel_engine_coredump_add_request(gt->engine, cap->rq, gfp); - if (vma) { - struct i915_vma_compress *compress = - i915_vma_capture_prepare(gt); - - intel_engine_coredump_add_vma(gt->engine, vma, compress); - i915_vma_capture_finish(gt, compress); - } - - gt->simulated = gt->engine->simulated; - cap->error->simulated = gt->simulated; - - /* Publish the error state, and announce it to the world */ - i915_error_state_store(cap->error); - i915_gpu_coredump_put(cap->error); - - /* Return this request and all that depend upon it for signaling */ - execlists_unhold(engine, cap->rq); - i915_request_put(cap->rq); - - kfree(cap); -} - -static struct execlists_capture *capture_regs(struct intel_engine_cs *engine) -{ - const gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN; - struct execlists_capture *cap; - - cap = kmalloc(sizeof(*cap), gfp); - if (!cap) - return NULL; - - cap->error = i915_gpu_coredump_alloc(engine->i915, gfp); - if (!cap->error) - goto err_cap; - - cap->error->gt = intel_gt_coredump_alloc(engine->gt, gfp); - if (!cap->error->gt) - goto err_gpu; - - cap->error->gt->engine = intel_engine_coredump_alloc(engine, gfp); - if (!cap->error->gt->engine) - goto err_gt; - - cap->error->gt->engine->hung = true; - - return cap; - -err_gt: - kfree(cap->error->gt); -err_gpu: - kfree(cap->error); -err_cap: - kfree(cap); - return NULL; -} - -static struct i915_request * -active_context(struct intel_engine_cs *engine, u32 ccid) -{ - const struct intel_engine_execlists * const el = &engine->execlists; - struct i915_request * const *port, *rq; - - /* - * Use the most recent result from process_csb(), but just in case - * we trigger an error (via interrupt) before the first CS event has - * been written, peek at the next submission. - */ - - for (port = el->active; (rq = *port); port++) { - if (rq->context->lrc.ccid == ccid) { - ENGINE_TRACE(engine, - "ccid found at active:%zd\n", - port - el->active); - return rq; - } - } - - for (port = el->pending; (rq = *port); port++) { - if (rq->context->lrc.ccid == ccid) { - ENGINE_TRACE(engine, - "ccid found at pending:%zd\n", - port - el->pending); - return rq; + if (unlikely(ce->timeline)) + tl = pinned_timeline(ce, engine); + else + tl = intel_timeline_create(engine->gt); + if (IS_ERR(tl)) { + err = PTR_ERR(tl); + goto err_ring; } - } - - ENGINE_TRACE(engine, "ccid:%x not found\n", ccid); - return NULL; -} -static u32 active_ccid(struct intel_engine_cs *engine) -{ - return ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI); -} - -static void execlists_capture(struct intel_engine_cs *engine) -{ - struct execlists_capture *cap; - - if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)) - return; - - /* - * We need to _quickly_ capture the engine state before we reset. - * We are inside an atomic section (softirq) here and we are delaying - * the forced preemption event. - */ - cap = capture_regs(engine); - if (!cap) - return; - - spin_lock_irq(&engine->active.lock); - cap->rq = active_context(engine, active_ccid(engine)); - if (cap->rq) { - cap->rq = active_request(cap->rq->context->timeline, cap->rq); - cap->rq = i915_request_get_rcu(cap->rq); + ce->timeline = tl; } - spin_unlock_irq(&engine->active.lock); - if (!cap->rq) - goto err_free; - - /* - * Remove the request from the execlists queue, and take ownership - * of the request. We pass it to our worker who will _slowly_ compress - * all the pages the _user_ requested for debugging their batch, after - * which we return it to the queue for signaling. - * - * By removing them from the execlists queue, we also remove the - * requests from being processed by __unwind_incomplete_requests() - * during the intel_engine_reset(), and so they will *not* be replayed - * afterwards. - * - * Note that because we have not yet reset the engine at this point, - * it is possible for the request that we have identified as being - * guilty, did in fact complete and we will then hit an arbitration - * point allowing the outstanding preemption to succeed. The likelihood - * of that is very low (as capturing of the engine registers should be - * fast enough to run inside an irq-off atomic section!), so we will - * simply hold that request accountable for being non-preemptible - * long enough to force the reset. - */ - if (!execlists_hold(engine, cap->rq)) - goto err_rq; - - INIT_WORK(&cap->work, execlists_capture_work); - schedule_work(&cap->work); - return; - -err_rq: - i915_request_put(cap->rq); -err_free: - i915_gpu_coredump_put(cap->error); - kfree(cap); -} -static void execlists_reset(struct intel_engine_cs *engine, const char *msg) -{ - const unsigned int bit = I915_RESET_ENGINE + engine->id; - unsigned long *lock = &engine->gt->reset.flags; - - if (!intel_has_reset_engine(engine->gt)) - return; - - if (test_and_set_bit(bit, lock)) - return; - - ENGINE_TRACE(engine, "reset for %s\n", msg); - - /* Mark this tasklet as disabled to avoid waiting for it to complete */ - tasklet_disable_nosync(&engine->execlists.tasklet); + ce->ring = ring; + ce->state = vma; - ring_set_paused(engine, 1); /* Freeze the current request in place */ - execlists_capture(engine); - intel_engine_reset(engine, msg); + return 0; - tasklet_enable(&engine->execlists.tasklet); - clear_and_wake_up_bit(bit, lock); +err_ring: + intel_ring_put(ring); +err_vma: + i915_vma_put(vma); + return err; } -static bool preempt_timeout(const struct intel_engine_cs *const engine) +void lrc_reset(struct intel_context *ce) { - const struct timer_list *t = &engine->execlists.preempt; - - if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT) - return false; + GEM_BUG_ON(!intel_context_is_pinned(ce)); - if (!timer_expired(t)) - return false; + intel_ring_reset(ce->ring, ce->ring->emit); - return READ_ONCE(engine->execlists.pending[0]); + /* Scrub away the garbage */ + lrc_init_regs(ce, ce->engine, true); + ce->lrc.lrca = lrc_update_regs(ce, ce->engine, ce->ring->tail); } -/* - * Check the unread Context Status Buffers and manage the submission of new - * contexts to the ELSP accordingly. - */ -static void execlists_submission_tasklet(unsigned long data) +int +lrc_pre_pin(struct intel_context *ce, + struct intel_engine_cs *engine, + struct i915_gem_ww_ctx *ww, + void **vaddr) { - struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; - bool timeout = preempt_timeout(engine); - - process_csb(engine); - - if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) { - const char *msg; - - /* Generate the error message in priority wrt to the user! */ - if (engine->execlists.error_interrupt & GENMASK(15, 0)) - msg = "CS error"; /* thrown by a user payload */ - else if (engine->execlists.error_interrupt & ERROR_CSB) - msg = "invalid CSB event"; - else - msg = "internal error"; - - engine->execlists.error_interrupt = 0; - execlists_reset(engine, msg); - } - - if (!READ_ONCE(engine->execlists.pending[0]) || timeout) { - unsigned long flags; + GEM_BUG_ON(!ce->state); + GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); - spin_lock_irqsave(&engine->active.lock, flags); - __execlists_submission_tasklet(engine); - spin_unlock_irqrestore(&engine->active.lock, flags); + *vaddr = i915_gem_object_pin_map(ce->state->obj, + i915_coherent_map_type(ce->engine->i915) | + I915_MAP_OVERRIDE); - /* Recheck after serialising with direct-submission */ - if (unlikely(timeout && preempt_timeout(engine))) - execlists_reset(engine, "preemption time out"); - } + return PTR_ERR_OR_ZERO(*vaddr); } -static void __execlists_kick(struct intel_engine_execlists *execlists) +int +lrc_pin(struct intel_context *ce, + struct intel_engine_cs *engine, + void *vaddr) { - /* Kick the tasklet for some interrupt coalescing and reset handling */ - tasklet_hi_schedule(&execlists->tasklet); -} - -#define execlists_kick(t, member) \ - __execlists_kick(container_of(t, struct intel_engine_execlists, member)) + ce->lrc_reg_state = vaddr + LRC_STATE_OFFSET; -static void execlists_timeslice(struct timer_list *timer) -{ - execlists_kick(timer, timer); -} + if (!__test_and_set_bit(CONTEXT_INIT_BIT, &ce->flags)) + lrc_init_state(ce, engine, vaddr); -static void execlists_preempt(struct timer_list *timer) -{ - execlists_kick(timer, preempt); + ce->lrc.lrca = lrc_update_regs(ce, engine, ce->ring->tail); + return 0; } -static void queue_request(struct intel_engine_cs *engine, - struct i915_request *rq) +void lrc_unpin(struct intel_context *ce) { - GEM_BUG_ON(!list_empty(&rq->sched.link)); - list_add_tail(&rq->sched.link, - i915_sched_lookup_priolist(engine, rq_prio(rq))); - set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); + check_redzone((void *)ce->lrc_reg_state - LRC_STATE_OFFSET, + ce->engine); } -static void __submit_queue_imm(struct intel_engine_cs *engine) +void lrc_post_unpin(struct intel_context *ce) { - struct intel_engine_execlists * const execlists = &engine->execlists; - - if (reset_in_progress(execlists)) - return; /* defer until we restart the engine following reset */ - - __execlists_submission_tasklet(engine); + i915_gem_object_unpin_map(ce->state->obj); } -static void submit_queue(struct intel_engine_cs *engine, - const struct i915_request *rq) +void lrc_fini(struct intel_context *ce) { - struct intel_engine_execlists *execlists = &engine->execlists; - - if (rq_prio(rq) <= execlists->queue_priority_hint) + if (!ce->state) return; - execlists->queue_priority_hint = rq_prio(rq); - __submit_queue_imm(engine); -} - -static bool ancestor_on_hold(const struct intel_engine_cs *engine, - const struct i915_request *rq) -{ - GEM_BUG_ON(i915_request_on_hold(rq)); - return !list_empty(&engine->active.hold) && hold_request(rq); -} - -static void flush_csb(struct intel_engine_cs *engine) -{ - struct intel_engine_execlists *el = &engine->execlists; - - if (READ_ONCE(el->pending[0]) && tasklet_trylock(&el->tasklet)) { - if (!reset_in_progress(el)) - process_csb(engine); - tasklet_unlock(&el->tasklet); - } -} - -static void execlists_submit_request(struct i915_request *request) -{ - struct intel_engine_cs *engine = request->engine; - unsigned long flags; - - /* Hopefully we clear execlists->pending[] to let us through */ - flush_csb(engine); - - /* Will be called from irq-context when using foreign fences. */ - spin_lock_irqsave(&engine->active.lock, flags); - - if (unlikely(ancestor_on_hold(engine, request))) { - RQ_TRACE(request, "ancestor on hold\n"); - list_add_tail(&request->sched.link, &engine->active.hold); - i915_request_set_hold(request); - } else { - queue_request(engine, request); - - GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); - GEM_BUG_ON(list_empty(&request->sched.link)); - - submit_queue(engine, request); - } - - spin_unlock_irqrestore(&engine->active.lock, flags); -} - -static void __execlists_context_fini(struct intel_context *ce) -{ - intel_ring_put(ce->ring); - i915_vma_put(ce->state); + intel_ring_put(fetch_and_zero(&ce->ring)); + i915_vma_put(fetch_and_zero(&ce->state)); } -static void execlists_context_destroy(struct kref *kref) +void lrc_destroy(struct kref *kref) { struct intel_context *ce = container_of(kref, typeof(*ce), ref); GEM_BUG_ON(!i915_active_is_idle(&ce->active)); GEM_BUG_ON(intel_context_is_pinned(ce)); - if (ce->state) - __execlists_context_fini(ce); + lrc_fini(ce); intel_context_fini(ce); intel_context_free(ce); } -static void -set_redzone(void *vaddr, const struct intel_engine_cs *engine) -{ - if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) - return; - - vaddr += engine->context_size; - - memset(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE); -} - -static void -check_redzone(const void *vaddr, const struct intel_engine_cs *engine) -{ - if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) - return; - - vaddr += engine->context_size; - - if (memchr_inv(vaddr, CONTEXT_REDZONE, I915_GTT_PAGE_SIZE)) - drm_err_once(&engine->i915->drm, - "%s context redzone overwritten!\n", - engine->name); -} - -static void execlists_context_unpin(struct intel_context *ce) -{ - check_redzone((void *)ce->lrc_reg_state - LRC_STATE_OFFSET, - ce->engine); -} - -static void execlists_context_post_unpin(struct intel_context *ce) -{ - i915_gem_object_unpin_map(ce->state->obj); -} - static u32 * gen12_emit_timestamp_wa(const struct intel_context *ce, u32 *cs) { @@ -3490,16 +1066,57 @@ setup_indirect_ctx_bb(const struct intel_context *ce, while ((unsigned long)cs % CACHELINE_BYTES) *cs++ = MI_NOOP; - lrc_ring_setup_indirect_ctx(ce->lrc_reg_state, engine, - i915_ggtt_offset(ce->state) + - context_wa_bb_offset(ce), - (cs - start) * sizeof(*cs)); + lrc_setup_indirect_ctx(ce->lrc_reg_state, engine, + i915_ggtt_offset(ce->state) + + context_wa_bb_offset(ce), + (cs - start) * sizeof(*cs)); } -static void -__execlists_update_reg_state(const struct intel_context *ce, - const struct intel_engine_cs *engine, - u32 head) +/* + * The context descriptor encodes various attributes of a context, + * including its GTT address and some flags. Because it's fairly + * expensive to calculate, we'll just do it once and cache the result, + * which remains valid until the context is unpinned. + * + * This is what a descriptor looks like, from LSB to MSB:: + * + * bits 0-11: flags, GEN8_CTX_* (cached in ctx->desc_template) + * bits 12-31: LRCA, GTT address of (the HWSP of) this context + * bits 32-52: ctx ID, a globally unique tag (highest bit used by GuC) + * bits 53-54: mbz, reserved for use by hardware + * bits 55-63: group ID, currently unused and set to 0 + * + * Starting from Gen11, the upper dword of the descriptor has a new format: + * + * bits 32-36: reserved + * bits 37-47: SW context ID + * bits 48:53: engine instance + * bit 54: mbz, reserved for use by hardware + * bits 55-60: SW counter + * bits 61-63: engine class + * + * engine info, SW context ID and SW counter need to form a unique number + * (Context ID) per lrc. + */ +static inline u32 lrc_descriptor(const struct intel_context *ce) +{ + u32 desc; + + desc = INTEL_LEGACY_32B_CONTEXT; + if (i915_vm_is_4lvl(ce->vm)) + desc = INTEL_LEGACY_64B_CONTEXT; + desc <<= GEN8_CTX_ADDRESSING_MODE_SHIFT; + + desc |= GEN8_CTX_VALID | GEN8_CTX_PRIVILEGE; + if (IS_GEN(ce->vm->i915, 8)) + desc |= GEN8_CTX_L3LLC_COHERENT; + + return i915_ggtt_offset(ce->state) | desc; +} + +u32 lrc_update_regs(const struct intel_context *ce, + const struct intel_engine_cs *engine, + u32 head) { struct intel_ring *ring = ce->ring; u32 *regs = ce->lrc_reg_state; @@ -3531,205 +1148,54 @@ __execlists_update_reg_state(const struct intel_context *ce, GEM_BUG_ON(engine->wa_ctx.indirect_ctx.size); setup_indirect_ctx_bb(ce, engine, fn); } -} - -static int -execlists_context_pre_pin(struct intel_context *ce, - struct i915_gem_ww_ctx *ww, void **vaddr) -{ - GEM_BUG_ON(!ce->state); - GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); - - *vaddr = i915_gem_object_pin_map(ce->state->obj, - i915_coherent_map_type(ce->engine->i915) | - I915_MAP_OVERRIDE); - return PTR_ERR_OR_ZERO(*vaddr); -} - -static int -__execlists_context_pin(struct intel_context *ce, - struct intel_engine_cs *engine, - void *vaddr) -{ - ce->lrc.lrca = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE; - ce->lrc_reg_state = vaddr + LRC_STATE_OFFSET; - __execlists_update_reg_state(ce, engine, ce->ring->tail); - - return 0; -} - -static int execlists_context_pin(struct intel_context *ce, void *vaddr) -{ - return __execlists_context_pin(ce, ce->engine, vaddr); + return lrc_descriptor(ce) | CTX_DESC_FORCE_RESTORE; } -static int execlists_context_alloc(struct intel_context *ce) +void lrc_update_offsets(struct intel_context *ce, + struct intel_engine_cs *engine) { - return __execlists_context_alloc(ce, ce->engine); + set_offsets(ce->lrc_reg_state, reg_offsets(engine), engine, false); } -static void execlists_context_reset(struct intel_context *ce) +void lrc_check_regs(const struct intel_context *ce, + const struct intel_engine_cs *engine, + const char *when) { - CE_TRACE(ce, "reset\n"); - GEM_BUG_ON(!intel_context_is_pinned(ce)); - - intel_ring_reset(ce->ring, ce->ring->emit); - - /* Scrub away the garbage */ - execlists_init_reg_state(ce->lrc_reg_state, - ce, ce->engine, ce->ring, true); - __execlists_update_reg_state(ce, ce->engine, ce->ring->tail); - - ce->lrc.desc |= CTX_DESC_FORCE_RESTORE; -} - -static const struct intel_context_ops execlists_context_ops = { - .alloc = execlists_context_alloc, - - .pre_pin = execlists_context_pre_pin, - .pin = execlists_context_pin, - .unpin = execlists_context_unpin, - .post_unpin = execlists_context_post_unpin, - - .enter = intel_context_enter_engine, - .exit = intel_context_exit_engine, - - .reset = execlists_context_reset, - .destroy = execlists_context_destroy, -}; - -static u32 hwsp_offset(const struct i915_request *rq) -{ - const struct intel_timeline_cacheline *cl; - - /* Before the request is executed, the timeline/cachline is fixed */ - - cl = rcu_dereference_protected(rq->hwsp_cacheline, 1); - if (cl) - return cl->ggtt_offset; - - return rcu_dereference_protected(rq->timeline, 1)->hwsp_offset; -} - -static int gen8_emit_init_breadcrumb(struct i915_request *rq) -{ - u32 *cs; - - GEM_BUG_ON(i915_request_has_initial_breadcrumb(rq)); - if (!i915_request_timeline(rq)->has_initial_breadcrumb) - return 0; - - cs = intel_ring_begin(rq, 6); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - /* - * Check if we have been preempted before we even get started. - * - * After this point i915_request_started() reports true, even if - * we get preempted and so are no longer running. - */ - *cs++ = MI_ARB_CHECK; - *cs++ = MI_NOOP; - - *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; - *cs++ = hwsp_offset(rq); - *cs++ = 0; - *cs++ = rq->fence.seqno - 1; - - intel_ring_advance(rq, cs); - - /* Record the updated position of the request's payload */ - rq->infix = intel_ring_offset(rq, cs); - - __set_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags); - - return 0; -} - -static int emit_pdps(struct i915_request *rq) -{ - const struct intel_engine_cs * const engine = rq->engine; - struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->context->vm); - int err, i; - u32 *cs; - - GEM_BUG_ON(intel_vgpu_active(rq->engine->i915)); - - /* - * Beware ye of the dragons, this sequence is magic! - * - * Small changes to this sequence can cause anything from - * GPU hangs to forcewake errors and machine lockups! - */ - - /* Flush any residual operations from the context load */ - err = engine->emit_flush(rq, EMIT_FLUSH); - if (err) - return err; - - /* Magic required to prevent forcewake errors! */ - err = engine->emit_flush(rq, EMIT_INVALIDATE); - if (err) - return err; - - cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - /* Ensure the LRI have landed before we invalidate & continue */ - *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED; - for (i = GEN8_3LVL_PDPES; i--; ) { - const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); - u32 base = engine->mmio_base; + const struct intel_ring *ring = ce->ring; + u32 *regs = ce->lrc_reg_state; + bool valid = true; + int x; - *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i)); - *cs++ = upper_32_bits(pd_daddr); - *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i)); - *cs++ = lower_32_bits(pd_daddr); + if (regs[CTX_RING_START] != i915_ggtt_offset(ring->vma)) { + pr_err("%s: context submitted with incorrect RING_START [%08x], expected %08x\n", + engine->name, + regs[CTX_RING_START], + i915_ggtt_offset(ring->vma)); + regs[CTX_RING_START] = i915_ggtt_offset(ring->vma); + valid = false; } - *cs++ = MI_NOOP; - - intel_ring_advance(rq, cs); - - return 0; -} -static int execlists_request_alloc(struct i915_request *request) -{ - int ret; - - GEM_BUG_ON(!intel_context_is_pinned(request->context)); - - /* - * Flush enough space to reduce the likelihood of waiting after - * we start building the request - in which case we will just - * have to repeat work. - */ - request->reserved_space += EXECLISTS_REQUEST_SIZE; - - /* - * Note that after this point, we have committed to using - * this request as it is being used to both track the - * state of engine initialisation and liveness of the - * golden renderstate above. Think twice before you try - * to cancel/unwind this request now. - */ - - if (!i915_vm_is_4lvl(request->context->vm)) { - ret = emit_pdps(request); - if (ret) - return ret; + if ((regs[CTX_RING_CTL] & ~(RING_WAIT | RING_WAIT_SEMAPHORE)) != + (RING_CTL_SIZE(ring->size) | RING_VALID)) { + pr_err("%s: context submitted with incorrect RING_CTL [%08x], expected %08x\n", + engine->name, + regs[CTX_RING_CTL], + (u32)(RING_CTL_SIZE(ring->size) | RING_VALID)); + regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID; + valid = false; } - /* Unconditionally invalidate GPU caches and TLBs. */ - ret = request->engine->emit_flush(request, EMIT_INVALIDATE); - if (ret) - return ret; + x = lrc_ring_mi_mode(engine); + if (x != -1 && regs[x + 1] & (regs[x + 1] >> 16) & STOP_RING) { + pr_err("%s: context submitted with STOP_RING [%08x] in RING_MI_MODE\n", + engine->name, regs[x + 1]); + regs[x + 1] &= ~STOP_RING; + regs[x + 1] |= STOP_RING << 16; + valid = false; + } - request->reserved_space -= EXECLISTS_REQUEST_SIZE; - return 0; + WARN_ONCE(!valid, "Invalid lrc state found %s submission\n", when); } /* @@ -3949,7 +1415,7 @@ gen10_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) return batch; } -#define CTX_WA_BB_OBJ_SIZE (PAGE_SIZE) +#define CTX_WA_BB_SIZE (PAGE_SIZE) static int lrc_setup_wa_ctx(struct intel_engine_cs *engine) { @@ -3957,7 +1423,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine) struct i915_vma *vma; int err; - obj = i915_gem_object_create_shmem(engine->i915, CTX_WA_BB_OBJ_SIZE); + obj = i915_gem_object_create_shmem(engine->i915, CTX_WA_BB_SIZE); if (IS_ERR(obj)) return PTR_ERR(obj); @@ -3979,30 +1445,34 @@ err: return err; } -static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine) +void lrc_fini_wa_ctx(struct intel_engine_cs *engine) { i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0); + + /* Called on error unwind, clear all flags to prevent further use */ + memset(&engine->wa_ctx, 0, sizeof(engine->wa_ctx)); } typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch); -static int intel_init_workaround_bb(struct intel_engine_cs *engine) +void lrc_init_wa_ctx(struct intel_engine_cs *engine) { struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx; - struct i915_wa_ctx_bb *wa_bb[2] = { &wa_ctx->indirect_ctx, - &wa_ctx->per_ctx }; - wa_bb_func_t wa_bb_fn[2]; + struct i915_wa_ctx_bb *wa_bb[] = { + &wa_ctx->indirect_ctx, &wa_ctx->per_ctx + }; + wa_bb_func_t wa_bb_fn[ARRAY_SIZE(wa_bb)]; void *batch, *batch_ptr; unsigned int i; - int ret; + int err; if (engine->class != RENDER_CLASS) - return 0; + return; switch (INTEL_GEN(engine->i915)) { case 12: case 11: - return 0; + return; case 10: wa_bb_fn[0] = gen10_init_indirectctx_bb; wa_bb_fn[1] = NULL; @@ -4017,14 +1487,20 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) break; default: MISSING_CASE(INTEL_GEN(engine->i915)); - return 0; + return; } - ret = lrc_setup_wa_ctx(engine); - if (ret) { - drm_dbg(&engine->i915->drm, - "Failed to setup context WA page: %d\n", ret); - return ret; + err = lrc_setup_wa_ctx(engine); + if (err) { + /* + * We continue even if we fail to initialize WA batch + * because we only expect rare glitches but nothing + * critical to prevent us from using GPU + */ + drm_err(&engine->i915->drm, + "Ignoring context switch w/a allocation error:%d\n", + err); + return; } batch = i915_gem_object_pin_map(wa_ctx->vma->obj, I915_MAP_WB); @@ -4039,2058 +1515,52 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine) wa_bb[i]->offset = batch_ptr - batch; if (GEM_DEBUG_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset, CACHELINE_BYTES))) { - ret = -EINVAL; + err = -EINVAL; break; } if (wa_bb_fn[i]) batch_ptr = wa_bb_fn[i](engine, batch_ptr); wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset); } - GEM_BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE); + GEM_BUG_ON(batch_ptr - batch > CTX_WA_BB_SIZE); __i915_gem_object_flush_map(wa_ctx->vma->obj, 0, batch_ptr - batch); __i915_gem_object_release_map(wa_ctx->vma->obj); - if (ret) - lrc_destroy_wa_ctx(engine); - - return ret; -} - -static void reset_csb_pointers(struct intel_engine_cs *engine) -{ - struct intel_engine_execlists * const execlists = &engine->execlists; - const unsigned int reset_value = execlists->csb_size - 1; - - ring_set_paused(engine, 0); - - /* - * Sometimes Icelake forgets to reset its pointers on a GPU reset. - * Bludgeon them with a mmio update to be sure. - */ - ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR, - 0xffff << 16 | reset_value << 8 | reset_value); - ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR); - - /* - * After a reset, the HW starts writing into CSB entry [0]. We - * therefore have to set our HEAD pointer back one entry so that - * the *first* entry we check is entry 0. To complicate this further, - * as we don't wait for the first interrupt after reset, we have to - * fake the HW write to point back to the last entry so that our - * inline comparison of our cached head position against the last HW - * write works even before the first interrupt. - */ - execlists->csb_head = reset_value; - WRITE_ONCE(*execlists->csb_write, reset_value); - wmb(); /* Make sure this is visible to HW (paranoia?) */ - - /* Check that the GPU does indeed update the CSB entries! */ - memset(execlists->csb_status, -1, (reset_value + 1) * sizeof(u64)); - invalidate_csb_entries(&execlists->csb_status[0], - &execlists->csb_status[reset_value]); - - /* Once more for luck and our trusty paranoia */ - ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR, - 0xffff << 16 | reset_value << 8 | reset_value); - ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR); - - GEM_BUG_ON(READ_ONCE(*execlists->csb_write) != reset_value); -} - -static void execlists_sanitize(struct intel_engine_cs *engine) -{ - GEM_BUG_ON(execlists_active(&engine->execlists)); - - /* - * Poison residual state on resume, in case the suspend didn't! - * - * We have to assume that across suspend/resume (or other loss - * of control) that the contents of our pinned buffers has been - * lost, replaced by garbage. Since this doesn't always happen, - * let's poison such state so that we more quickly spot when - * we falsely assume it has been preserved. - */ - if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) - memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE); - - reset_csb_pointers(engine); - - /* - * The kernel_context HWSP is stored in the status_page. As above, - * that may be lost on resume/initialisation, and so we need to - * reset the value in the HWSP. - */ - intel_timeline_reset_seqno(engine->kernel_context->timeline); - - /* And scrub the dirty cachelines for the HWSP */ - clflush_cache_range(engine->status_page.addr, PAGE_SIZE); -} - -static void enable_error_interrupt(struct intel_engine_cs *engine) -{ - u32 status; - - engine->execlists.error_interrupt = 0; - ENGINE_WRITE(engine, RING_EMR, ~0u); - ENGINE_WRITE(engine, RING_EIR, ~0u); /* clear all existing errors */ - - status = ENGINE_READ(engine, RING_ESR); - if (unlikely(status)) { - drm_err(&engine->i915->drm, - "engine '%s' resumed still in error: %08x\n", - engine->name, status); - __intel_gt_reset(engine->gt, engine->mask); - } - - /* - * On current gen8+, we have 2 signals to play with - * - * - I915_ERROR_INSTUCTION (bit 0) - * - * Generate an error if the command parser encounters an invalid - * instruction - * - * This is a fatal error. - * - * - CP_PRIV (bit 2) - * - * Generate an error on privilege violation (where the CP replaces - * the instruction with a no-op). This also fires for writes into - * read-only scratch pages. - * - * This is a non-fatal error, parsing continues. - * - * * there are a few others defined for odd HW that we do not use - * - * Since CP_PRIV fires for cases where we have chosen to ignore the - * error (as the HW is validating and suppressing the mistakes), we - * only unmask the instruction error bit. - */ - ENGINE_WRITE(engine, RING_EMR, ~I915_ERROR_INSTRUCTION); -} - -static void enable_execlists(struct intel_engine_cs *engine) -{ - u32 mode; - - assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL); - - intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */ - - if (INTEL_GEN(engine->i915) >= 11) - mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE); - else - mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE); - ENGINE_WRITE_FW(engine, RING_MODE_GEN7, mode); - - ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); - - ENGINE_WRITE_FW(engine, - RING_HWS_PGA, - i915_ggtt_offset(engine->status_page.vma)); - ENGINE_POSTING_READ(engine, RING_HWS_PGA); - - enable_error_interrupt(engine); - - engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0); -} - -static bool unexpected_starting_state(struct intel_engine_cs *engine) -{ - bool unexpected = false; - - if (ENGINE_READ_FW(engine, RING_MI_MODE) & STOP_RING) { - drm_dbg(&engine->i915->drm, - "STOP_RING still set in RING_MI_MODE\n"); - unexpected = true; - } - - return unexpected; -} - -static int execlists_resume(struct intel_engine_cs *engine) -{ - intel_mocs_init_engine(engine); - - intel_breadcrumbs_reset(engine->breadcrumbs); - - if (GEM_SHOW_DEBUG() && unexpected_starting_state(engine)) { - struct drm_printer p = drm_debug_printer(__func__); - - intel_engine_dump(engine, &p, NULL); - } - - enable_execlists(engine); - - return 0; -} - -static void execlists_reset_prepare(struct intel_engine_cs *engine) -{ - struct intel_engine_execlists * const execlists = &engine->execlists; - unsigned long flags; - - ENGINE_TRACE(engine, "depth<-%d\n", - atomic_read(&execlists->tasklet.count)); - - /* - * Prevent request submission to the hardware until we have - * completed the reset in i915_gem_reset_finish(). If a request - * is completed by one engine, it may then queue a request - * to a second via its execlists->tasklet *just* as we are - * calling engine->resume() and also writing the ELSP. - * Turning off the execlists->tasklet until the reset is over - * prevents the race. - */ - __tasklet_disable_sync_once(&execlists->tasklet); - GEM_BUG_ON(!reset_in_progress(execlists)); - - /* And flush any current direct submission. */ - spin_lock_irqsave(&engine->active.lock, flags); - spin_unlock_irqrestore(&engine->active.lock, flags); - - /* - * We stop engines, otherwise we might get failed reset and a - * dead gpu (on elk). Also as modern gpu as kbl can suffer - * from system hang if batchbuffer is progressing when - * the reset is issued, regardless of READY_TO_RESET ack. - * Thus assume it is best to stop engines on all gens - * where we have a gpu reset. - * - * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) - * - * FIXME: Wa for more modern gens needs to be validated - */ - ring_set_paused(engine, 1); - intel_engine_stop_cs(engine); - - engine->execlists.reset_ccid = active_ccid(engine); -} - -static void __reset_stop_ring(u32 *regs, const struct intel_engine_cs *engine) -{ - int x; - - x = lrc_ring_mi_mode(engine); - if (x != -1) { - regs[x + 1] &= ~STOP_RING; - regs[x + 1] |= STOP_RING << 16; - } -} - -static void __execlists_reset_reg_state(const struct intel_context *ce, - const struct intel_engine_cs *engine) -{ - u32 *regs = ce->lrc_reg_state; - - __reset_stop_ring(regs, engine); -} - -static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) -{ - struct intel_engine_execlists * const execlists = &engine->execlists; - struct intel_context *ce; - struct i915_request *rq; - u32 head; - - mb(); /* paranoia: read the CSB pointers from after the reset */ - clflush(execlists->csb_write); - mb(); - - process_csb(engine); /* drain preemption events */ - - /* Following the reset, we need to reload the CSB read/write pointers */ - reset_csb_pointers(engine); - - /* - * Save the currently executing context, even if we completed - * its request, it was still running at the time of the - * reset and will have been clobbered. - */ - rq = active_context(engine, engine->execlists.reset_ccid); - if (!rq) - goto unwind; - - ce = rq->context; - GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); - - if (i915_request_completed(rq)) { - /* Idle context; tidy up the ring so we can restart afresh */ - head = intel_ring_wrap(ce->ring, rq->tail); - goto out_replay; - } - - /* We still have requests in-flight; the engine should be active */ - GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); - - /* Context has requests still in-flight; it should not be idle! */ - GEM_BUG_ON(i915_active_is_idle(&ce->active)); - - rq = active_request(ce->timeline, rq); - head = intel_ring_wrap(ce->ring, rq->head); - GEM_BUG_ON(head == ce->ring->tail); - - /* - * If this request hasn't started yet, e.g. it is waiting on a - * semaphore, we need to avoid skipping the request or else we - * break the signaling chain. However, if the context is corrupt - * the request will not restart and we will be stuck with a wedged - * device. It is quite often the case that if we issue a reset - * while the GPU is loading the context image, that the context - * image becomes corrupt. - * - * Otherwise, if we have not started yet, the request should replay - * perfectly and we do not need to flag the result as being erroneous. - */ - if (!i915_request_started(rq)) - goto out_replay; - - /* - * If the request was innocent, we leave the request in the ELSP - * and will try to replay it on restarting. The context image may - * have been corrupted by the reset, in which case we may have - * to service a new GPU hang, but more likely we can continue on - * without impact. - * - * If the request was guilty, we presume the context is corrupt - * and have to at least restore the RING register in the context - * image back to the expected values to skip over the guilty request. - */ - __i915_request_reset(rq, stalled); - - /* - * We want a simple context + ring to execute the breadcrumb update. - * We cannot rely on the context being intact across the GPU hang, - * so clear it and rebuild just what we need for the breadcrumb. - * All pending requests for this context will be zapped, and any - * future request will be after userspace has had the opportunity - * to recreate its own state. - */ -out_replay: - ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n", - head, ce->ring->tail); - __execlists_reset_reg_state(ce, engine); - __execlists_update_reg_state(ce, engine, head); - ce->lrc.desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */ - -unwind: - /* Push back any incomplete requests for replay after the reset. */ - cancel_port_requests(execlists); - __unwind_incomplete_requests(engine); -} - -static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled) -{ - unsigned long flags; - - ENGINE_TRACE(engine, "\n"); - - spin_lock_irqsave(&engine->active.lock, flags); - - __execlists_reset(engine, stalled); - - spin_unlock_irqrestore(&engine->active.lock, flags); -} - -static void nop_submission_tasklet(unsigned long data) -{ - struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; - - /* The driver is wedged; don't process any more events. */ - WRITE_ONCE(engine->execlists.queue_priority_hint, INT_MIN); -} - -static void execlists_reset_cancel(struct intel_engine_cs *engine) -{ - struct intel_engine_execlists * const execlists = &engine->execlists; - struct i915_request *rq, *rn; - struct rb_node *rb; - unsigned long flags; - - ENGINE_TRACE(engine, "\n"); - - /* - * Before we call engine->cancel_requests(), we should have exclusive - * access to the submission state. This is arranged for us by the - * caller disabling the interrupt generation, the tasklet and other - * threads that may then access the same state, giving us a free hand - * to reset state. However, we still need to let lockdep be aware that - * we know this state may be accessed in hardirq context, so we - * disable the irq around this manipulation and we want to keep - * the spinlock focused on its duties and not accidentally conflate - * coverage to the submission's irq state. (Similarly, although we - * shouldn't need to disable irq around the manipulation of the - * submission's irq state, we also wish to remind ourselves that - * it is irq state.) - */ - spin_lock_irqsave(&engine->active.lock, flags); - - __execlists_reset(engine, true); - - /* Mark all executing requests as skipped. */ - list_for_each_entry(rq, &engine->active.requests, sched.link) - mark_eio(rq); - intel_engine_signal_breadcrumbs(engine); - - /* Flush the queued requests to the timeline list (for retiring). */ - while ((rb = rb_first_cached(&execlists->queue))) { - struct i915_priolist *p = to_priolist(rb); - int i; - - priolist_for_each_request_consume(rq, rn, p, i) { - mark_eio(rq); - __i915_request_submit(rq); - } - - rb_erase_cached(&p->node, &execlists->queue); - i915_priolist_free(p); - } - - /* On-hold requests will be flushed to timeline upon their release */ - list_for_each_entry(rq, &engine->active.hold, sched.link) - mark_eio(rq); - - /* Cancel all attached virtual engines */ - while ((rb = rb_first_cached(&execlists->virtual))) { - struct virtual_engine *ve = - rb_entry(rb, typeof(*ve), nodes[engine->id].rb); - - rb_erase_cached(rb, &execlists->virtual); - RB_CLEAR_NODE(rb); - - spin_lock(&ve->base.active.lock); - rq = fetch_and_zero(&ve->request); - if (rq) { - mark_eio(rq); - - rq->engine = engine; - __i915_request_submit(rq); - i915_request_put(rq); - - ve->base.execlists.queue_priority_hint = INT_MIN; - } - spin_unlock(&ve->base.active.lock); - } - - /* Remaining _unready_ requests will be nop'ed when submitted */ - - execlists->queue_priority_hint = INT_MIN; - execlists->queue = RB_ROOT_CACHED; - - GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet)); - execlists->tasklet.func = nop_submission_tasklet; - - spin_unlock_irqrestore(&engine->active.lock, flags); -} - -static void execlists_reset_finish(struct intel_engine_cs *engine) -{ - struct intel_engine_execlists * const execlists = &engine->execlists; - - /* - * After a GPU reset, we may have requests to replay. Do so now while - * we still have the forcewake to be sure that the GPU is not allowed - * to sleep before we restart and reload a context. - */ - GEM_BUG_ON(!reset_in_progress(execlists)); - if (!RB_EMPTY_ROOT(&execlists->queue.rb_root)) - execlists->tasklet.func(execlists->tasklet.data); - - if (__tasklet_enable(&execlists->tasklet)) - /* And kick in case we missed a new request submission. */ - tasklet_hi_schedule(&execlists->tasklet); - ENGINE_TRACE(engine, "depth->%d\n", - atomic_read(&execlists->tasklet.count)); -} - -static int gen8_emit_bb_start_noarb(struct i915_request *rq, - u64 offset, u32 len, - const unsigned int flags) -{ - u32 *cs; - - cs = intel_ring_begin(rq, 4); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - /* - * WaDisableCtxRestoreArbitration:bdw,chv - * - * We don't need to perform MI_ARB_ENABLE as often as we do (in - * particular all the gen that do not need the w/a at all!), if we - * took care to make sure that on every switch into this context - * (both ordinary and for preemption) that arbitrartion was enabled - * we would be fine. However, for gen8 there is another w/a that - * requires us to not preempt inside GPGPU execution, so we keep - * arbitration disabled for gen8 batches. Arbitration will be - * re-enabled before we close the request - * (engine->emit_fini_breadcrumb). - */ - *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; - - /* FIXME(BDW+): Address space and security selectors. */ - *cs++ = MI_BATCH_BUFFER_START_GEN8 | - (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)); - *cs++ = lower_32_bits(offset); - *cs++ = upper_32_bits(offset); - - intel_ring_advance(rq, cs); - - return 0; -} - -static int gen8_emit_bb_start(struct i915_request *rq, - u64 offset, u32 len, - const unsigned int flags) -{ - u32 *cs; - - cs = intel_ring_begin(rq, 6); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; - - *cs++ = MI_BATCH_BUFFER_START_GEN8 | - (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)); - *cs++ = lower_32_bits(offset); - *cs++ = upper_32_bits(offset); - - *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; - *cs++ = MI_NOOP; - - intel_ring_advance(rq, cs); - - return 0; -} - -static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine) -{ - ENGINE_WRITE(engine, RING_IMR, - ~(engine->irq_enable_mask | engine->irq_keep_mask)); - ENGINE_POSTING_READ(engine, RING_IMR); -} - -static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine) -{ - ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask); -} - -static int gen8_emit_flush(struct i915_request *request, u32 mode) -{ - u32 cmd, *cs; - - cs = intel_ring_begin(request, 4); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - cmd = MI_FLUSH_DW + 1; - - /* We always require a command barrier so that subsequent - * commands, such as breadcrumb interrupts, are strictly ordered - * wrt the contents of the write cache being flushed to memory - * (and thus being coherent from the CPU). - */ - cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; - - if (mode & EMIT_INVALIDATE) { - cmd |= MI_INVALIDATE_TLB; - if (request->engine->class == VIDEO_DECODE_CLASS) - cmd |= MI_INVALIDATE_BSD; - } - - *cs++ = cmd; - *cs++ = LRC_PPHWSP_SCRATCH_ADDR; - *cs++ = 0; /* upper addr */ - *cs++ = 0; /* value */ - intel_ring_advance(request, cs); - - return 0; -} - -static int gen8_emit_flush_render(struct i915_request *request, - u32 mode) -{ - bool vf_flush_wa = false, dc_flush_wa = false; - u32 *cs, flags = 0; - int len; - - flags |= PIPE_CONTROL_CS_STALL; - - if (mode & EMIT_FLUSH) { - flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; - flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; - flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; - flags |= PIPE_CONTROL_FLUSH_ENABLE; - } - - if (mode & EMIT_INVALIDATE) { - flags |= PIPE_CONTROL_TLB_INVALIDATE; - flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_QW_WRITE; - flags |= PIPE_CONTROL_STORE_DATA_INDEX; - - /* - * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL - * pipe control. - */ - if (IS_GEN(request->engine->i915, 9)) - vf_flush_wa = true; - - /* WaForGAMHang:kbl */ - if (IS_KBL_GT_REVID(request->engine->i915, 0, KBL_REVID_B0)) - dc_flush_wa = true; - } - - len = 6; - - if (vf_flush_wa) - len += 6; - - if (dc_flush_wa) - len += 12; - - cs = intel_ring_begin(request, len); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - if (vf_flush_wa) - cs = gen8_emit_pipe_control(cs, 0, 0); - - if (dc_flush_wa) - cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE, - 0); - - cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); - - if (dc_flush_wa) - cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0); - - intel_ring_advance(request, cs); - - return 0; -} - -static int gen11_emit_flush_render(struct i915_request *request, - u32 mode) -{ - if (mode & EMIT_FLUSH) { - u32 *cs; - u32 flags = 0; - - flags |= PIPE_CONTROL_CS_STALL; - - flags |= PIPE_CONTROL_TILE_CACHE_FLUSH; - flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; - flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; - flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; - flags |= PIPE_CONTROL_FLUSH_ENABLE; - flags |= PIPE_CONTROL_QW_WRITE; - flags |= PIPE_CONTROL_STORE_DATA_INDEX; - - cs = intel_ring_begin(request, 6); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); - intel_ring_advance(request, cs); - } - - if (mode & EMIT_INVALIDATE) { - u32 *cs; - u32 flags = 0; - - flags |= PIPE_CONTROL_CS_STALL; - - flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_TLB_INVALIDATE; - flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_QW_WRITE; - flags |= PIPE_CONTROL_STORE_DATA_INDEX; - - cs = intel_ring_begin(request, 6); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); - intel_ring_advance(request, cs); - } - - return 0; -} - -static u32 preparser_disable(bool state) -{ - return MI_ARB_CHECK | 1 << 8 | state; -} - -static i915_reg_t aux_inv_reg(const struct intel_engine_cs *engine) -{ - static const i915_reg_t vd[] = { - GEN12_VD0_AUX_NV, - GEN12_VD1_AUX_NV, - GEN12_VD2_AUX_NV, - GEN12_VD3_AUX_NV, - }; - - static const i915_reg_t ve[] = { - GEN12_VE0_AUX_NV, - GEN12_VE1_AUX_NV, - }; - - if (engine->class == VIDEO_DECODE_CLASS) - return vd[engine->instance]; - - if (engine->class == VIDEO_ENHANCEMENT_CLASS) - return ve[engine->instance]; - - GEM_BUG_ON("unknown aux_inv_reg\n"); - - return INVALID_MMIO_REG; -} - -static u32 * -gen12_emit_aux_table_inv(const i915_reg_t inv_reg, u32 *cs) -{ - *cs++ = MI_LOAD_REGISTER_IMM(1); - *cs++ = i915_mmio_reg_offset(inv_reg); - *cs++ = AUX_INV; - *cs++ = MI_NOOP; - - return cs; -} - -static int gen12_emit_flush_render(struct i915_request *request, - u32 mode) -{ - if (mode & EMIT_FLUSH) { - u32 flags = 0; - u32 *cs; - - flags |= PIPE_CONTROL_TILE_CACHE_FLUSH; - flags |= PIPE_CONTROL_FLUSH_L3; - flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; - flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; - /* Wa_1409600907:tgl */ - flags |= PIPE_CONTROL_DEPTH_STALL; - flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; - flags |= PIPE_CONTROL_FLUSH_ENABLE; - - flags |= PIPE_CONTROL_STORE_DATA_INDEX; - flags |= PIPE_CONTROL_QW_WRITE; - - flags |= PIPE_CONTROL_CS_STALL; - - cs = intel_ring_begin(request, 6); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - cs = gen12_emit_pipe_control(cs, - PIPE_CONTROL0_HDC_PIPELINE_FLUSH, - flags, LRC_PPHWSP_SCRATCH_ADDR); - intel_ring_advance(request, cs); - } - - if (mode & EMIT_INVALIDATE) { - u32 flags = 0; - u32 *cs; - - flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_TLB_INVALIDATE; - flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; - - flags |= PIPE_CONTROL_STORE_DATA_INDEX; - flags |= PIPE_CONTROL_QW_WRITE; - - flags |= PIPE_CONTROL_CS_STALL; - - cs = intel_ring_begin(request, 8 + 4); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - /* - * Prevent the pre-parser from skipping past the TLB - * invalidate and loading a stale page for the batch - * buffer / request payload. - */ - *cs++ = preparser_disable(true); - - cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); - - /* hsdes: 1809175790 */ - cs = gen12_emit_aux_table_inv(GEN12_GFX_CCS_AUX_NV, cs); - - *cs++ = preparser_disable(false); - intel_ring_advance(request, cs); - } - - return 0; -} - -static int gen12_emit_flush(struct i915_request *request, u32 mode) -{ - intel_engine_mask_t aux_inv = 0; - u32 cmd, *cs; - - cmd = 4; - if (mode & EMIT_INVALIDATE) - cmd += 2; - if (mode & EMIT_INVALIDATE) - aux_inv = request->engine->mask & ~BIT(BCS0); - if (aux_inv) - cmd += 2 * hweight8(aux_inv) + 2; - - cs = intel_ring_begin(request, cmd); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - if (mode & EMIT_INVALIDATE) - *cs++ = preparser_disable(true); - - cmd = MI_FLUSH_DW + 1; - - /* We always require a command barrier so that subsequent - * commands, such as breadcrumb interrupts, are strictly ordered - * wrt the contents of the write cache being flushed to memory - * (and thus being coherent from the CPU). - */ - cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; - - if (mode & EMIT_INVALIDATE) { - cmd |= MI_INVALIDATE_TLB; - if (request->engine->class == VIDEO_DECODE_CLASS) - cmd |= MI_INVALIDATE_BSD; - } - - *cs++ = cmd; - *cs++ = LRC_PPHWSP_SCRATCH_ADDR; - *cs++ = 0; /* upper addr */ - *cs++ = 0; /* value */ - - if (aux_inv) { /* hsdes: 1809175790 */ - struct intel_engine_cs *engine; - unsigned int tmp; - - *cs++ = MI_LOAD_REGISTER_IMM(hweight8(aux_inv)); - for_each_engine_masked(engine, request->engine->gt, - aux_inv, tmp) { - *cs++ = i915_mmio_reg_offset(aux_inv_reg(engine)); - *cs++ = AUX_INV; - } - *cs++ = MI_NOOP; - } - - if (mode & EMIT_INVALIDATE) - *cs++ = preparser_disable(false); - - intel_ring_advance(request, cs); - - return 0; -} - -static void assert_request_valid(struct i915_request *rq) -{ - struct intel_ring *ring __maybe_unused = rq->ring; - - /* Can we unwind this request without appearing to go forwards? */ - GEM_BUG_ON(intel_ring_direction(ring, rq->wa_tail, rq->head) <= 0); -} - -/* - * Reserve space for 2 NOOPs at the end of each request to be - * used as a workaround for not being allowed to do lite - * restore with HEAD==TAIL (WaIdleLiteRestore). - */ -static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs) -{ - /* Ensure there's always at least one preemption point per-request. */ - *cs++ = MI_ARB_CHECK; - *cs++ = MI_NOOP; - request->wa_tail = intel_ring_offset(request, cs); - - /* Check that entire request is less than half the ring */ - assert_request_valid(request); - - return cs; -} - -static u32 *emit_preempt_busywait(struct i915_request *request, u32 *cs) -{ - *cs++ = MI_SEMAPHORE_WAIT | - MI_SEMAPHORE_GLOBAL_GTT | - MI_SEMAPHORE_POLL | - MI_SEMAPHORE_SAD_EQ_SDD; - *cs++ = 0; - *cs++ = intel_hws_preempt_address(request->engine); - *cs++ = 0; - - return cs; -} - -static __always_inline u32* -gen8_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs) -{ - *cs++ = MI_USER_INTERRUPT; - - *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; - if (intel_engine_has_semaphores(request->engine)) - cs = emit_preempt_busywait(request, cs); - - request->tail = intel_ring_offset(request, cs); - assert_ring_tail_valid(request->ring, request->tail); - - return gen8_emit_wa_tail(request, cs); -} - -static u32 *emit_xcs_breadcrumb(struct i915_request *rq, u32 *cs) -{ - return gen8_emit_ggtt_write(cs, rq->fence.seqno, hwsp_offset(rq), 0); -} - -static u32 *gen8_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs) -{ - return gen8_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs)); -} - -static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) -{ - cs = gen8_emit_pipe_control(cs, - PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | - PIPE_CONTROL_DEPTH_CACHE_FLUSH | - PIPE_CONTROL_DC_FLUSH_ENABLE, - 0); - - /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */ - cs = gen8_emit_ggtt_write_rcs(cs, - request->fence.seqno, - hwsp_offset(request), - PIPE_CONTROL_FLUSH_ENABLE | - PIPE_CONTROL_CS_STALL); - - return gen8_emit_fini_breadcrumb_tail(request, cs); -} - -static u32 * -gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) -{ - cs = gen8_emit_ggtt_write_rcs(cs, - request->fence.seqno, - hwsp_offset(request), - PIPE_CONTROL_CS_STALL | - PIPE_CONTROL_TILE_CACHE_FLUSH | - PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | - PIPE_CONTROL_DEPTH_CACHE_FLUSH | - PIPE_CONTROL_DC_FLUSH_ENABLE | - PIPE_CONTROL_FLUSH_ENABLE); - - return gen8_emit_fini_breadcrumb_tail(request, cs); -} - -/* - * Note that the CS instruction pre-parser will not stall on the breadcrumb - * flush and will continue pre-fetching the instructions after it before the - * memory sync is completed. On pre-gen12 HW, the pre-parser will stop at - * BB_START/END instructions, so, even though we might pre-fetch the pre-amble - * of the next request before the memory has been flushed, we're guaranteed that - * we won't access the batch itself too early. - * However, on gen12+ the parser can pre-fetch across the BB_START/END commands, - * so, if the current request is modifying an instruction in the next request on - * the same intel_context, we might pre-fetch and then execute the pre-update - * instruction. To avoid this, the users of self-modifying code should either - * disable the parser around the code emitting the memory writes, via a new flag - * added to MI_ARB_CHECK, or emit the writes from a different intel_context. For - * the in-kernel use-cases we've opted to use a separate context, see - * reloc_gpu() as an example. - * All the above applies only to the instructions themselves. Non-inline data - * used by the instructions is not pre-fetched. - */ - -static u32 *gen12_emit_preempt_busywait(struct i915_request *request, u32 *cs) -{ - *cs++ = MI_SEMAPHORE_WAIT_TOKEN | - MI_SEMAPHORE_GLOBAL_GTT | - MI_SEMAPHORE_POLL | - MI_SEMAPHORE_SAD_EQ_SDD; - *cs++ = 0; - *cs++ = intel_hws_preempt_address(request->engine); - *cs++ = 0; - *cs++ = 0; - *cs++ = MI_NOOP; - - return cs; -} - -static __always_inline u32* -gen12_emit_fini_breadcrumb_tail(struct i915_request *request, u32 *cs) -{ - *cs++ = MI_USER_INTERRUPT; - - *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; - if (intel_engine_has_semaphores(request->engine)) - cs = gen12_emit_preempt_busywait(request, cs); - - request->tail = intel_ring_offset(request, cs); - assert_ring_tail_valid(request->ring, request->tail); - - return gen8_emit_wa_tail(request, cs); -} - -static u32 *gen12_emit_fini_breadcrumb(struct i915_request *rq, u32 *cs) -{ - /* XXX Stalling flush before seqno write; post-sync not */ - cs = emit_xcs_breadcrumb(rq, __gen8_emit_flush_dw(cs, 0, 0, 0)); - return gen12_emit_fini_breadcrumb_tail(rq, cs); -} - -static u32 * -gen12_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) -{ - cs = gen12_emit_ggtt_write_rcs(cs, - request->fence.seqno, - hwsp_offset(request), - PIPE_CONTROL0_HDC_PIPELINE_FLUSH, - PIPE_CONTROL_CS_STALL | - PIPE_CONTROL_TILE_CACHE_FLUSH | - PIPE_CONTROL_FLUSH_L3 | - PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | - PIPE_CONTROL_DEPTH_CACHE_FLUSH | - /* Wa_1409600907:tgl */ - PIPE_CONTROL_DEPTH_STALL | - PIPE_CONTROL_DC_FLUSH_ENABLE | - PIPE_CONTROL_FLUSH_ENABLE); - - return gen12_emit_fini_breadcrumb_tail(request, cs); -} - -static void execlists_park(struct intel_engine_cs *engine) -{ - cancel_timer(&engine->execlists.timer); - cancel_timer(&engine->execlists.preempt); -} - -void intel_execlists_set_default_submission(struct intel_engine_cs *engine) -{ - engine->submit_request = execlists_submit_request; - engine->schedule = i915_schedule; - engine->execlists.tasklet.func = execlists_submission_tasklet; - - engine->reset.prepare = execlists_reset_prepare; - engine->reset.rewind = execlists_reset_rewind; - engine->reset.cancel = execlists_reset_cancel; - engine->reset.finish = execlists_reset_finish; - - engine->park = execlists_park; - engine->unpark = NULL; - - engine->flags |= I915_ENGINE_SUPPORTS_STATS; - if (!intel_vgpu_active(engine->i915)) { - engine->flags |= I915_ENGINE_HAS_SEMAPHORES; - if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) { - engine->flags |= I915_ENGINE_HAS_PREEMPTION; - if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) - engine->flags |= I915_ENGINE_HAS_TIMESLICES; - } - } - - if (INTEL_GEN(engine->i915) >= 12) - engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO; - - if (intel_engine_has_preemption(engine)) - engine->emit_bb_start = gen8_emit_bb_start; - else - engine->emit_bb_start = gen8_emit_bb_start_noarb; -} - -static void execlists_shutdown(struct intel_engine_cs *engine) -{ - /* Synchronise with residual timers and any softirq they raise */ - del_timer_sync(&engine->execlists.timer); - del_timer_sync(&engine->execlists.preempt); - tasklet_kill(&engine->execlists.tasklet); -} - -static void execlists_release(struct intel_engine_cs *engine) -{ - engine->sanitize = NULL; /* no longer in control, nothing to sanitize */ - - execlists_shutdown(engine); - - intel_engine_cleanup_common(engine); - lrc_destroy_wa_ctx(engine); -} - -static void -logical_ring_default_vfuncs(struct intel_engine_cs *engine) -{ - /* Default vfuncs which can be overriden by each engine. */ - - engine->resume = execlists_resume; - - engine->cops = &execlists_context_ops; - engine->request_alloc = execlists_request_alloc; - - engine->emit_flush = gen8_emit_flush; - engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb; - engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb; - if (INTEL_GEN(engine->i915) >= 12) { - engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb; - engine->emit_flush = gen12_emit_flush; - } - engine->set_default_submission = intel_execlists_set_default_submission; - - if (INTEL_GEN(engine->i915) < 11) { - engine->irq_enable = gen8_logical_ring_enable_irq; - engine->irq_disable = gen8_logical_ring_disable_irq; - } else { - /* - * TODO: On Gen11 interrupt masks need to be clear - * to allow C6 entry. Keep interrupts enabled at - * and take the hit of generating extra interrupts - * until a more refined solution exists. - */ - } -} - -static inline void -logical_ring_default_irqs(struct intel_engine_cs *engine) -{ - unsigned int shift = 0; - - if (INTEL_GEN(engine->i915) < 11) { - const u8 irq_shifts[] = { - [RCS0] = GEN8_RCS_IRQ_SHIFT, - [BCS0] = GEN8_BCS_IRQ_SHIFT, - [VCS0] = GEN8_VCS0_IRQ_SHIFT, - [VCS1] = GEN8_VCS1_IRQ_SHIFT, - [VECS0] = GEN8_VECS_IRQ_SHIFT, - }; - - shift = irq_shifts[engine->id]; - } - - engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; - engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; - engine->irq_keep_mask |= GT_CS_MASTER_ERROR_INTERRUPT << shift; - engine->irq_keep_mask |= GT_WAIT_SEMAPHORE_INTERRUPT << shift; -} - -static void rcs_submission_override(struct intel_engine_cs *engine) -{ - switch (INTEL_GEN(engine->i915)) { - case 12: - engine->emit_flush = gen12_emit_flush_render; - engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs; - break; - case 11: - engine->emit_flush = gen11_emit_flush_render; - engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs; - break; - default: - engine->emit_flush = gen8_emit_flush_render; - engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs; - break; - } -} - -int intel_execlists_submission_setup(struct intel_engine_cs *engine) -{ - struct intel_engine_execlists * const execlists = &engine->execlists; - struct drm_i915_private *i915 = engine->i915; - struct intel_uncore *uncore = engine->uncore; - u32 base = engine->mmio_base; - - tasklet_init(&engine->execlists.tasklet, - execlists_submission_tasklet, (unsigned long)engine); - timer_setup(&engine->execlists.timer, execlists_timeslice, 0); - timer_setup(&engine->execlists.preempt, execlists_preempt, 0); - - logical_ring_default_vfuncs(engine); - logical_ring_default_irqs(engine); - - if (engine->class == RENDER_CLASS) - rcs_submission_override(engine); - - if (intel_init_workaround_bb(engine)) - /* - * We continue even if we fail to initialize WA batch - * because we only expect rare glitches but nothing - * critical to prevent us from using GPU - */ - drm_err(&i915->drm, "WA batch buffer initialization failed\n"); - - if (HAS_LOGICAL_RING_ELSQ(i915)) { - execlists->submit_reg = uncore->regs + - i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base)); - execlists->ctrl_reg = uncore->regs + - i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base)); - } else { - execlists->submit_reg = uncore->regs + - i915_mmio_reg_offset(RING_ELSP(base)); - } - - execlists->csb_status = - (u64 *)&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX]; - - execlists->csb_write = - &engine->status_page.addr[intel_hws_csb_write_index(i915)]; - - if (INTEL_GEN(i915) < 11) - execlists->csb_size = GEN8_CSB_ENTRIES; - else - execlists->csb_size = GEN11_CSB_ENTRIES; - - if (INTEL_GEN(engine->i915) >= 11) { - execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32); - execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32); - } - - /* Finally, take ownership and responsibility for cleanup! */ - engine->sanitize = execlists_sanitize; - engine->release = execlists_release; - - return 0; -} - -static void init_common_reg_state(u32 * const regs, - const struct intel_engine_cs *engine, - const struct intel_ring *ring, - bool inhibit) -{ - u32 ctl; - - ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH); - ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); - if (inhibit) - ctl |= CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT; - if (INTEL_GEN(engine->i915) < 11) - ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT | - CTX_CTRL_RS_CTX_ENABLE); - regs[CTX_CONTEXT_CONTROL] = ctl; - - regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID; - regs[CTX_TIMESTAMP] = 0; -} - -static void init_wa_bb_reg_state(u32 * const regs, - const struct intel_engine_cs *engine) -{ - const struct i915_ctx_workarounds * const wa_ctx = &engine->wa_ctx; - - if (wa_ctx->per_ctx.size) { - const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); - - GEM_BUG_ON(lrc_ring_wa_bb_per_ctx(engine) == -1); - regs[lrc_ring_wa_bb_per_ctx(engine) + 1] = - (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01; - } - - if (wa_ctx->indirect_ctx.size) { - lrc_ring_setup_indirect_ctx(regs, engine, - i915_ggtt_offset(wa_ctx->vma) + - wa_ctx->indirect_ctx.offset, - wa_ctx->indirect_ctx.size); - } -} - -static void init_ppgtt_reg_state(u32 *regs, const struct i915_ppgtt *ppgtt) -{ - if (i915_vm_is_4lvl(&ppgtt->vm)) { - /* 64b PPGTT (48bit canonical) - * PDP0_DESCRIPTOR contains the base address to PML4 and - * other PDP Descriptors are ignored. - */ - ASSIGN_CTX_PML4(ppgtt, regs); - } else { - ASSIGN_CTX_PDP(ppgtt, regs, 3); - ASSIGN_CTX_PDP(ppgtt, regs, 2); - ASSIGN_CTX_PDP(ppgtt, regs, 1); - ASSIGN_CTX_PDP(ppgtt, regs, 0); - } -} - -static struct i915_ppgtt *vm_alias(struct i915_address_space *vm) -{ - if (i915_is_ggtt(vm)) - return i915_vm_to_ggtt(vm)->alias; - else - return i915_vm_to_ppgtt(vm); -} - -static void execlists_init_reg_state(u32 *regs, - const struct intel_context *ce, - const struct intel_engine_cs *engine, - const struct intel_ring *ring, - bool inhibit) -{ - /* - * A context is actually a big batch buffer with several - * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The - * values we are setting here are only for the first context restore: - * on a subsequent save, the GPU will recreate this batchbuffer with new - * values (including all the missing MI_LOAD_REGISTER_IMM commands that - * we are not initializing here). - * - * Must keep consistent with virtual_update_register_offsets(). - */ - set_offsets(regs, reg_offsets(engine), engine, inhibit); - - init_common_reg_state(regs, engine, ring, inhibit); - init_ppgtt_reg_state(regs, vm_alias(ce->vm)); - - init_wa_bb_reg_state(regs, engine); - - __reset_stop_ring(regs, engine); -} - -static int -populate_lr_context(struct intel_context *ce, - struct drm_i915_gem_object *ctx_obj, - struct intel_engine_cs *engine, - struct intel_ring *ring) -{ - bool inhibit = true; - void *vaddr; - - vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB); - if (IS_ERR(vaddr)) { - drm_dbg(&engine->i915->drm, "Could not map object pages!\n"); - return PTR_ERR(vaddr); - } - - set_redzone(vaddr, engine); - - if (engine->default_state) { - shmem_read(engine->default_state, 0, - vaddr, engine->context_size); - __set_bit(CONTEXT_VALID_BIT, &ce->flags); - inhibit = false; - } - - /* Clear the ppHWSP (inc. per-context counters) */ - memset(vaddr, 0, PAGE_SIZE); - - /* - * The second page of the context object contains some registers which - * must be set up prior to the first execution. - */ - execlists_init_reg_state(vaddr + LRC_STATE_OFFSET, - ce, engine, ring, inhibit); - - __i915_gem_object_flush_map(ctx_obj, 0, engine->context_size); - i915_gem_object_unpin_map(ctx_obj); - return 0; -} - -static struct intel_timeline *pinned_timeline(struct intel_context *ce) -{ - struct intel_timeline *tl = fetch_and_zero(&ce->timeline); - - return intel_timeline_create_from_engine(ce->engine, - page_unmask_bits(tl)); -} - -static int __execlists_context_alloc(struct intel_context *ce, - struct intel_engine_cs *engine) -{ - struct drm_i915_gem_object *ctx_obj; - struct intel_ring *ring; - struct i915_vma *vma; - u32 context_size; - int ret; - - GEM_BUG_ON(ce->state); - context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE); - - if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) - context_size += I915_GTT_PAGE_SIZE; /* for redzone */ - - if (INTEL_GEN(engine->i915) == 12) { - ce->wa_bb_page = context_size / PAGE_SIZE; - context_size += PAGE_SIZE; - } - - ctx_obj = i915_gem_object_create_shmem(engine->i915, context_size); - if (IS_ERR(ctx_obj)) - return PTR_ERR(ctx_obj); - - vma = i915_vma_instance(ctx_obj, &engine->gt->ggtt->vm, NULL); - if (IS_ERR(vma)) { - ret = PTR_ERR(vma); - goto error_deref_obj; - } - - if (!page_mask_bits(ce->timeline)) { - struct intel_timeline *tl; - - /* - * Use the static global HWSP for the kernel context, and - * a dynamically allocated cacheline for everyone else. - */ - if (unlikely(ce->timeline)) - tl = pinned_timeline(ce); - else - tl = intel_timeline_create(engine->gt); - if (IS_ERR(tl)) { - ret = PTR_ERR(tl); - goto error_deref_obj; - } - - ce->timeline = tl; - } - - ring = intel_engine_create_ring(engine, (unsigned long)ce->ring); - if (IS_ERR(ring)) { - ret = PTR_ERR(ring); - goto error_deref_obj; - } - - ret = populate_lr_context(ce, ctx_obj, engine, ring); - if (ret) { - drm_dbg(&engine->i915->drm, - "Failed to populate LRC: %d\n", ret); - goto error_ring_free; - } - - ce->ring = ring; - ce->state = vma; - - return 0; - -error_ring_free: - intel_ring_put(ring); -error_deref_obj: - i915_gem_object_put(ctx_obj); - return ret; -} - -static struct list_head *virtual_queue(struct virtual_engine *ve) -{ - return &ve->base.execlists.default_priolist.requests[0]; -} - -static void virtual_context_destroy(struct kref *kref) -{ - struct virtual_engine *ve = - container_of(kref, typeof(*ve), context.ref); - unsigned int n; - - GEM_BUG_ON(!list_empty(virtual_queue(ve))); - GEM_BUG_ON(ve->request); - GEM_BUG_ON(ve->context.inflight); - - for (n = 0; n < ve->num_siblings; n++) { - struct intel_engine_cs *sibling = ve->siblings[n]; - struct rb_node *node = &ve->nodes[sibling->id].rb; - unsigned long flags; - - if (RB_EMPTY_NODE(node)) - continue; - spin_lock_irqsave(&sibling->active.lock, flags); - - /* Detachment is lazily performed in the execlists tasklet */ - if (!RB_EMPTY_NODE(node)) - rb_erase_cached(node, &sibling->execlists.virtual); - - spin_unlock_irqrestore(&sibling->active.lock, flags); - } - GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet)); - - if (ve->context.state) - __execlists_context_fini(&ve->context); - intel_context_fini(&ve->context); - - intel_engine_free_request_pool(&ve->base); - - kfree(ve->bonds); - kfree(ve); + /* Verify that we can handle failure to setup the wa_ctx */ + if (err || i915_inject_probe_error(engine->i915, -ENODEV)) + lrc_fini_wa_ctx(engine); } -static void virtual_engine_initial_hint(struct virtual_engine *ve) -{ - int swp; - - /* - * Pick a random sibling on starting to help spread the load around. - * - * New contexts are typically created with exactly the same order - * of siblings, and often started in batches. Due to the way we iterate - * the array of sibling when submitting requests, sibling[0] is - * prioritised for dequeuing. If we make sure that sibling[0] is fairly - * randomised across the system, we also help spread the load by the - * first engine we inspect being different each time. - * - * NB This does not force us to execute on this engine, it will just - * typically be the first we inspect for submission. - */ - swp = prandom_u32_max(ve->num_siblings); - if (swp) - swap(ve->siblings[swp], ve->siblings[0]); -} - -static int virtual_context_alloc(struct intel_context *ce) -{ - struct virtual_engine *ve = container_of(ce, typeof(*ve), context); - - return __execlists_context_alloc(ce, ve->siblings[0]); -} - -static int virtual_context_pin(struct intel_context *ce, void *vaddr) -{ - struct virtual_engine *ve = container_of(ce, typeof(*ve), context); - - /* Note: we must use a real engine class for setting up reg state */ - return __execlists_context_pin(ce, ve->siblings[0], vaddr); -} - -static void virtual_context_enter(struct intel_context *ce) -{ - struct virtual_engine *ve = container_of(ce, typeof(*ve), context); - unsigned int n; - - for (n = 0; n < ve->num_siblings; n++) - intel_engine_pm_get(ve->siblings[n]); - - intel_timeline_enter(ce->timeline); -} - -static void virtual_context_exit(struct intel_context *ce) -{ - struct virtual_engine *ve = container_of(ce, typeof(*ve), context); - unsigned int n; - - intel_timeline_exit(ce->timeline); - - for (n = 0; n < ve->num_siblings; n++) - intel_engine_pm_put(ve->siblings[n]); -} - -static const struct intel_context_ops virtual_context_ops = { - .alloc = virtual_context_alloc, - - .pre_pin = execlists_context_pre_pin, - .pin = virtual_context_pin, - .unpin = execlists_context_unpin, - .post_unpin = execlists_context_post_unpin, - - .enter = virtual_context_enter, - .exit = virtual_context_exit, - - .destroy = virtual_context_destroy, -}; - -static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve) +static void st_update_runtime_underflow(struct intel_context *ce, s32 dt) { - struct i915_request *rq; - intel_engine_mask_t mask; - - rq = READ_ONCE(ve->request); - if (!rq) - return 0; - - /* The rq is ready for submission; rq->execution_mask is now stable. */ - mask = rq->execution_mask; - if (unlikely(!mask)) { - /* Invalid selection, submit to a random engine in error */ - i915_request_set_error_once(rq, -ENODEV); - mask = ve->siblings[0]->mask; - } - - ENGINE_TRACE(&ve->base, "rq=%llx:%lld, mask=%x, prio=%d\n", - rq->fence.context, rq->fence.seqno, - mask, ve->base.execlists.queue_priority_hint); - - return mask; +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) + ce->runtime.num_underflow++; + ce->runtime.max_underflow = max_t(u32, ce->runtime.max_underflow, -dt); +#endif } -static void virtual_submission_tasklet(unsigned long data) +void lrc_update_runtime(struct intel_context *ce) { - struct virtual_engine * const ve = (struct virtual_engine *)data; - const int prio = READ_ONCE(ve->base.execlists.queue_priority_hint); - intel_engine_mask_t mask; - unsigned int n; + u32 old; + s32 dt; - rcu_read_lock(); - mask = virtual_submission_mask(ve); - rcu_read_unlock(); - if (unlikely(!mask)) + if (intel_context_is_barrier(ce)) return; - local_irq_disable(); - for (n = 0; n < ve->num_siblings; n++) { - struct intel_engine_cs *sibling = READ_ONCE(ve->siblings[n]); - struct ve_node * const node = &ve->nodes[sibling->id]; - struct rb_node **parent, *rb; - bool first; - - if (!READ_ONCE(ve->request)) - break; /* already handled by a sibling's tasklet */ - - if (unlikely(!(mask & sibling->mask))) { - if (!RB_EMPTY_NODE(&node->rb)) { - spin_lock(&sibling->active.lock); - rb_erase_cached(&node->rb, - &sibling->execlists.virtual); - RB_CLEAR_NODE(&node->rb); - spin_unlock(&sibling->active.lock); - } - continue; - } - - spin_lock(&sibling->active.lock); - - if (!RB_EMPTY_NODE(&node->rb)) { - /* - * Cheat and avoid rebalancing the tree if we can - * reuse this node in situ. - */ - first = rb_first_cached(&sibling->execlists.virtual) == - &node->rb; - if (prio == node->prio || (prio > node->prio && first)) - goto submit_engine; - - rb_erase_cached(&node->rb, &sibling->execlists.virtual); - } - - rb = NULL; - first = true; - parent = &sibling->execlists.virtual.rb_root.rb_node; - while (*parent) { - struct ve_node *other; - - rb = *parent; - other = rb_entry(rb, typeof(*other), rb); - if (prio > other->prio) { - parent = &rb->rb_left; - } else { - parent = &rb->rb_right; - first = false; - } - } - - rb_link_node(&node->rb, rb, parent); - rb_insert_color_cached(&node->rb, - &sibling->execlists.virtual, - first); - -submit_engine: - GEM_BUG_ON(RB_EMPTY_NODE(&node->rb)); - node->prio = prio; - if (first && prio > sibling->execlists.queue_priority_hint) - tasklet_hi_schedule(&sibling->execlists.tasklet); - - spin_unlock(&sibling->active.lock); - } - local_irq_enable(); -} - -static void virtual_submit_request(struct i915_request *rq) -{ - struct virtual_engine *ve = to_virtual_engine(rq->engine); - struct i915_request *old; - unsigned long flags; - - ENGINE_TRACE(&ve->base, "rq=%llx:%lld\n", - rq->fence.context, - rq->fence.seqno); - - GEM_BUG_ON(ve->base.submit_request != virtual_submit_request); - - spin_lock_irqsave(&ve->base.active.lock, flags); - - old = ve->request; - if (old) { /* background completion event from preempt-to-busy */ - GEM_BUG_ON(!i915_request_completed(old)); - __i915_request_submit(old); - i915_request_put(old); - } - - if (i915_request_completed(rq)) { - __i915_request_submit(rq); - - ve->base.execlists.queue_priority_hint = INT_MIN; - ve->request = NULL; - } else { - ve->base.execlists.queue_priority_hint = rq_prio(rq); - ve->request = i915_request_get(rq); - - GEM_BUG_ON(!list_empty(virtual_queue(ve))); - list_move_tail(&rq->sched.link, virtual_queue(ve)); - - tasklet_hi_schedule(&ve->base.execlists.tasklet); - } - - spin_unlock_irqrestore(&ve->base.active.lock, flags); -} - -static struct ve_bond * -virtual_find_bond(struct virtual_engine *ve, - const struct intel_engine_cs *master) -{ - int i; - - for (i = 0; i < ve->num_bonds; i++) { - if (ve->bonds[i].master == master) - return &ve->bonds[i]; - } - - return NULL; -} - -static void -virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal) -{ - struct virtual_engine *ve = to_virtual_engine(rq->engine); - intel_engine_mask_t allowed, exec; - struct ve_bond *bond; - - allowed = ~to_request(signal)->engine->mask; - - bond = virtual_find_bond(ve, to_request(signal)->engine); - if (bond) - allowed &= bond->sibling_mask; - - /* Restrict the bonded request to run on only the available engines */ - exec = READ_ONCE(rq->execution_mask); - while (!try_cmpxchg(&rq->execution_mask, &exec, exec & allowed)) - ; - - /* Prevent the master from being re-run on the bonded engines */ - to_request(signal)->execution_mask &= ~allowed; -} - -struct intel_context * -intel_execlists_create_virtual(struct intel_engine_cs **siblings, - unsigned int count) -{ - struct virtual_engine *ve; - unsigned int n; - int err; - - if (count == 0) - return ERR_PTR(-EINVAL); - - if (count == 1) - return intel_context_create(siblings[0]); - - ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL); - if (!ve) - return ERR_PTR(-ENOMEM); - - ve->base.i915 = siblings[0]->i915; - ve->base.gt = siblings[0]->gt; - ve->base.uncore = siblings[0]->uncore; - ve->base.id = -1; - - ve->base.class = OTHER_CLASS; - ve->base.uabi_class = I915_ENGINE_CLASS_INVALID; - ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL; - ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL; - - /* - * The decision on whether to submit a request using semaphores - * depends on the saturated state of the engine. We only compute - * this during HW submission of the request, and we need for this - * state to be globally applied to all requests being submitted - * to this engine. Virtual engines encompass more than one physical - * engine and so we cannot accurately tell in advance if one of those - * engines is already saturated and so cannot afford to use a semaphore - * and be pessimized in priority for doing so -- if we are the only - * context using semaphores after all other clients have stopped, we - * will be starved on the saturated system. Such a global switch for - * semaphores is less than ideal, but alas is the current compromise. - */ - ve->base.saturated = ALL_ENGINES; - - snprintf(ve->base.name, sizeof(ve->base.name), "virtual"); - - intel_engine_init_active(&ve->base, ENGINE_VIRTUAL); - intel_engine_init_execlists(&ve->base); - - ve->base.cops = &virtual_context_ops; - ve->base.request_alloc = execlists_request_alloc; - - ve->base.schedule = i915_schedule; - ve->base.submit_request = virtual_submit_request; - ve->base.bond_execute = virtual_bond_execute; - - INIT_LIST_HEAD(virtual_queue(ve)); - ve->base.execlists.queue_priority_hint = INT_MIN; - tasklet_init(&ve->base.execlists.tasklet, - virtual_submission_tasklet, - (unsigned long)ve); - - intel_context_init(&ve->context, &ve->base); - - ve->base.breadcrumbs = intel_breadcrumbs_create(NULL); - if (!ve->base.breadcrumbs) { - err = -ENOMEM; - goto err_put; - } - - for (n = 0; n < count; n++) { - struct intel_engine_cs *sibling = siblings[n]; - - GEM_BUG_ON(!is_power_of_2(sibling->mask)); - if (sibling->mask & ve->base.mask) { - DRM_DEBUG("duplicate %s entry in load balancer\n", - sibling->name); - err = -EINVAL; - goto err_put; - } - - /* - * The virtual engine implementation is tightly coupled to - * the execlists backend -- we push out request directly - * into a tree inside each physical engine. We could support - * layering if we handle cloning of the requests and - * submitting a copy into each backend. - */ - if (sibling->execlists.tasklet.func != - execlists_submission_tasklet) { - err = -ENODEV; - goto err_put; - } - - GEM_BUG_ON(RB_EMPTY_NODE(&ve->nodes[sibling->id].rb)); - RB_CLEAR_NODE(&ve->nodes[sibling->id].rb); - - ve->siblings[ve->num_siblings++] = sibling; - ve->base.mask |= sibling->mask; - - /* - * All physical engines must be compatible for their emission - * functions (as we build the instructions during request - * construction and do not alter them before submission - * on the physical engine). We use the engine class as a guide - * here, although that could be refined. - */ - if (ve->base.class != OTHER_CLASS) { - if (ve->base.class != sibling->class) { - DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n", - sibling->class, ve->base.class); - err = -EINVAL; - goto err_put; - } - continue; - } - - ve->base.class = sibling->class; - ve->base.uabi_class = sibling->uabi_class; - snprintf(ve->base.name, sizeof(ve->base.name), - "v%dx%d", ve->base.class, count); - ve->base.context_size = sibling->context_size; - - ve->base.emit_bb_start = sibling->emit_bb_start; - ve->base.emit_flush = sibling->emit_flush; - ve->base.emit_init_breadcrumb = sibling->emit_init_breadcrumb; - ve->base.emit_fini_breadcrumb = sibling->emit_fini_breadcrumb; - ve->base.emit_fini_breadcrumb_dw = - sibling->emit_fini_breadcrumb_dw; - - ve->base.flags = sibling->flags; - } - - ve->base.flags |= I915_ENGINE_IS_VIRTUAL; - - virtual_engine_initial_hint(ve); - return &ve->context; - -err_put: - intel_context_put(&ve->context); - return ERR_PTR(err); -} - -struct intel_context * -intel_execlists_clone_virtual(struct intel_engine_cs *src) -{ - struct virtual_engine *se = to_virtual_engine(src); - struct intel_context *dst; - - dst = intel_execlists_create_virtual(se->siblings, - se->num_siblings); - if (IS_ERR(dst)) - return dst; - - if (se->num_bonds) { - struct virtual_engine *de = to_virtual_engine(dst->engine); - - de->bonds = kmemdup(se->bonds, - sizeof(*se->bonds) * se->num_bonds, - GFP_KERNEL); - if (!de->bonds) { - intel_context_put(dst); - return ERR_PTR(-ENOMEM); - } - - de->num_bonds = se->num_bonds; - } - - return dst; -} - -int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine, - const struct intel_engine_cs *master, - const struct intel_engine_cs *sibling) -{ - struct virtual_engine *ve = to_virtual_engine(engine); - struct ve_bond *bond; - int n; - - /* Sanity check the sibling is part of the virtual engine */ - for (n = 0; n < ve->num_siblings; n++) - if (sibling == ve->siblings[n]) - break; - if (n == ve->num_siblings) - return -EINVAL; - - bond = virtual_find_bond(ve, master); - if (bond) { - bond->sibling_mask |= sibling->mask; - return 0; - } - - bond = krealloc(ve->bonds, - sizeof(*bond) * (ve->num_bonds + 1), - GFP_KERNEL); - if (!bond) - return -ENOMEM; - - bond[ve->num_bonds].master = master; - bond[ve->num_bonds].sibling_mask = sibling->mask; - - ve->bonds = bond; - ve->num_bonds++; - - return 0; -} - -void intel_execlists_show_requests(struct intel_engine_cs *engine, - struct drm_printer *m, - void (*show_request)(struct drm_printer *m, - struct i915_request *rq, - const char *prefix), - unsigned int max) -{ - const struct intel_engine_execlists *execlists = &engine->execlists; - struct i915_request *rq, *last; - unsigned long flags; - unsigned int count; - struct rb_node *rb; - - spin_lock_irqsave(&engine->active.lock, flags); - - last = NULL; - count = 0; - list_for_each_entry(rq, &engine->active.requests, sched.link) { - if (count++ < max - 1) - show_request(m, rq, "\t\tE "); - else - last = rq; - } - if (last) { - if (count > max) { - drm_printf(m, - "\t\t...skipping %d executing requests...\n", - count - max); - } - show_request(m, last, "\t\tE "); - } - - if (execlists->switch_priority_hint != INT_MIN) - drm_printf(m, "\t\tSwitch priority hint: %d\n", - READ_ONCE(execlists->switch_priority_hint)); - if (execlists->queue_priority_hint != INT_MIN) - drm_printf(m, "\t\tQueue priority hint: %d\n", - READ_ONCE(execlists->queue_priority_hint)); - - last = NULL; - count = 0; - for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) { - struct i915_priolist *p = rb_entry(rb, typeof(*p), node); - int i; - - priolist_for_each_request(rq, p, i) { - if (count++ < max - 1) - show_request(m, rq, "\t\tQ "); - else - last = rq; - } - } - if (last) { - if (count > max) { - drm_printf(m, - "\t\t...skipping %d queued requests...\n", - count - max); - } - show_request(m, last, "\t\tQ "); - } - - last = NULL; - count = 0; - for (rb = rb_first_cached(&execlists->virtual); rb; rb = rb_next(rb)) { - struct virtual_engine *ve = - rb_entry(rb, typeof(*ve), nodes[engine->id].rb); - struct i915_request *rq = READ_ONCE(ve->request); + old = ce->runtime.last; + ce->runtime.last = lrc_get_runtime(ce); + dt = ce->runtime.last - old; - if (rq) { - if (count++ < max - 1) - show_request(m, rq, "\t\tV "); - else - last = rq; - } - } - if (last) { - if (count > max) { - drm_printf(m, - "\t\t...skipping %d virtual requests...\n", - count - max); - } - show_request(m, last, "\t\tV "); + if (unlikely(dt < 0)) { + CE_TRACE(ce, "runtime underflow: last=%u, new=%u, delta=%d\n", + old, ce->runtime.last, dt); + st_update_runtime_underflow(ce, dt); + return; } - spin_unlock_irqrestore(&engine->active.lock, flags); -} - -void intel_lr_context_reset(struct intel_engine_cs *engine, - struct intel_context *ce, - u32 head, - bool scrub) -{ - GEM_BUG_ON(!intel_context_is_pinned(ce)); - - /* - * We want a simple context + ring to execute the breadcrumb update. - * We cannot rely on the context being intact across the GPU hang, - * so clear it and rebuild just what we need for the breadcrumb. - * All pending requests for this context will be zapped, and any - * future request will be after userspace has had the opportunity - * to recreate its own state. - */ - if (scrub) - restore_default_state(ce, engine); - - /* Rerun the request; its payload has been neutered (if guilty). */ - __execlists_update_reg_state(ce, engine, head); -} - -bool -intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine) -{ - return engine->set_default_submission == - intel_execlists_set_default_submission; + ewma_runtime_add(&ce->runtime.avg, dt); + ce->runtime.total += dt; } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h index c2d287f25497..7f697845c4cf 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.h +++ b/drivers/gpu/drm/i915/gt/intel_lrc.h @@ -1,90 +1,20 @@ +/* SPDX-License-Identifier: MIT */ /* * Copyright © 2014 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. */ -#ifndef _INTEL_LRC_H_ -#define _INTEL_LRC_H_ +#ifndef __INTEL_LRC_H__ +#define __INTEL_LRC_H__ #include <linux/types.h> -struct drm_printer; +#include "intel_context.h" +#include "intel_lrc_reg.h" -struct drm_i915_private; -struct i915_gem_context; -struct i915_request; -struct intel_context; +struct drm_i915_gem_object; struct intel_engine_cs; +struct intel_ring; -/* Execlists regs */ -#define RING_ELSP(base) _MMIO((base) + 0x230) -#define RING_EXECLIST_STATUS_LO(base) _MMIO((base) + 0x234) -#define RING_EXECLIST_STATUS_HI(base) _MMIO((base) + 0x234 + 4) -#define RING_CONTEXT_CONTROL(base) _MMIO((base) + 0x244) -#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3) -#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0) -#define CTX_CTRL_RS_CTX_ENABLE (1 << 1) -#define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT (1 << 2) -#define GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE (1 << 8) -#define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0) -#define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510) -#define RING_EXECLIST_CONTROL(base) _MMIO((base) + 0x550) - -#define EL_CTRL_LOAD (1 << 0) - -/* The docs specify that the write pointer wraps around after 5h, "After status - * is written out to the last available status QW at offset 5h, this pointer - * wraps to 0." - * - * Therefore, one must infer than even though there are 3 bits available, 6 and - * 7 appear to be * reserved. - */ -#define GEN8_CSB_ENTRIES 6 -#define GEN8_CSB_PTR_MASK 0x7 -#define GEN8_CSB_READ_PTR_MASK (GEN8_CSB_PTR_MASK << 8) -#define GEN8_CSB_WRITE_PTR_MASK (GEN8_CSB_PTR_MASK << 0) - -#define GEN11_CSB_ENTRIES 12 -#define GEN11_CSB_PTR_MASK 0xf -#define GEN11_CSB_READ_PTR_MASK (GEN11_CSB_PTR_MASK << 8) -#define GEN11_CSB_WRITE_PTR_MASK (GEN11_CSB_PTR_MASK << 0) - -#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */ -#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */ -#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */ -/* in Gen12 ID 0x7FF is reserved to indicate idle */ -#define GEN12_MAX_CONTEXT_HW_ID (GEN11_MAX_CONTEXT_HW_ID - 1) - -enum { - INTEL_CONTEXT_SCHEDULE_IN = 0, - INTEL_CONTEXT_SCHEDULE_OUT, - INTEL_CONTEXT_SCHEDULE_PREEMPTED, -}; - -/* Logical Rings */ -void intel_logical_ring_cleanup(struct intel_engine_cs *engine); - -int intel_execlists_submission_setup(struct intel_engine_cs *engine); - -/* Logical Ring Contexts */ /* At the start of the context image is its per-process HWS page */ #define LRC_PPHWSP_PN (0) #define LRC_PPHWSP_SZ (1) @@ -96,32 +26,57 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine); #define LRC_PPHWSP_SCRATCH 0x34 #define LRC_PPHWSP_SCRATCH_ADDR (LRC_PPHWSP_SCRATCH * sizeof(u32)) -void intel_execlists_set_default_submission(struct intel_engine_cs *engine); +void lrc_init_wa_ctx(struct intel_engine_cs *engine); +void lrc_fini_wa_ctx(struct intel_engine_cs *engine); + +int lrc_alloc(struct intel_context *ce, + struct intel_engine_cs *engine); +void lrc_reset(struct intel_context *ce); +void lrc_fini(struct intel_context *ce); +void lrc_destroy(struct kref *kref); -void intel_lr_context_reset(struct intel_engine_cs *engine, - struct intel_context *ce, - u32 head, - bool scrub); +int +lrc_pre_pin(struct intel_context *ce, + struct intel_engine_cs *engine, + struct i915_gem_ww_ctx *ww, + void **vaddr); +int +lrc_pin(struct intel_context *ce, + struct intel_engine_cs *engine, + void *vaddr); +void lrc_unpin(struct intel_context *ce); +void lrc_post_unpin(struct intel_context *ce); -void intel_execlists_show_requests(struct intel_engine_cs *engine, - struct drm_printer *m, - void (*show_request)(struct drm_printer *m, - struct i915_request *rq, - const char *prefix), - unsigned int max); +void lrc_init_state(struct intel_context *ce, + struct intel_engine_cs *engine, + void *state); -struct intel_context * -intel_execlists_create_virtual(struct intel_engine_cs **siblings, - unsigned int count); +void lrc_init_regs(const struct intel_context *ce, + const struct intel_engine_cs *engine, + bool clear); +void lrc_reset_regs(const struct intel_context *ce, + const struct intel_engine_cs *engine); -struct intel_context * -intel_execlists_clone_virtual(struct intel_engine_cs *src); +u32 lrc_update_regs(const struct intel_context *ce, + const struct intel_engine_cs *engine, + u32 head); +void lrc_update_offsets(struct intel_context *ce, + struct intel_engine_cs *engine); -int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine, - const struct intel_engine_cs *master, - const struct intel_engine_cs *sibling); +void lrc_check_regs(const struct intel_context *ce, + const struct intel_engine_cs *engine, + const char *when); -bool -intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine); +void lrc_update_runtime(struct intel_context *ce); +static inline u32 lrc_get_runtime(const struct intel_context *ce) +{ + /* + * We can use either ppHWSP[16] which is recorded before the context + * switch (and so excludes the cost of context switches) or use the + * value from the context image itself, which is saved/restored earlier + * and so includes the cost of the save. + */ + return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]); +} -#endif /* _INTEL_LRC_H_ */ +#endif /* __INTEL_LRC_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h index 1b51f7b9a5c3..65fe76738335 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h +++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h @@ -9,6 +9,8 @@ #include <linux/types.h> +#define CTX_DESC_FORCE_RESTORE BIT_ULL(2) + /* GEN8 to GEN12 Reg State Context */ #define CTX_CONTEXT_CONTROL (0x02 + 1) #define CTX_RING_HEAD (0x04 + 1) @@ -52,4 +54,43 @@ #define GEN8_EXECLISTS_STATUS_BUF 0x370 #define GEN11_EXECLISTS_STATUS_BUF2 0x3c0 +/* Execlists regs */ +#define RING_ELSP(base) _MMIO((base) + 0x230) +#define RING_EXECLIST_STATUS_LO(base) _MMIO((base) + 0x234) +#define RING_EXECLIST_STATUS_HI(base) _MMIO((base) + 0x234 + 4) +#define RING_CONTEXT_CONTROL(base) _MMIO((base) + 0x244) +#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT REG_BIT(0) +#define CTX_CTRL_RS_CTX_ENABLE REG_BIT(1) +#define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT REG_BIT(2) +#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH REG_BIT(3) +#define GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE REG_BIT(8) +#define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0) +#define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510) +#define RING_EXECLIST_CONTROL(base) _MMIO((base) + 0x550) +#define EL_CTRL_LOAD REG_BIT(0) + +/* + * The docs specify that the write pointer wraps around after 5h, "After status + * is written out to the last available status QW at offset 5h, this pointer + * wraps to 0." + * + * Therefore, one must infer than even though there are 3 bits available, 6 and + * 7 appear to be * reserved. + */ +#define GEN8_CSB_ENTRIES 6 +#define GEN8_CSB_PTR_MASK 0x7 +#define GEN8_CSB_READ_PTR_MASK (GEN8_CSB_PTR_MASK << 8) +#define GEN8_CSB_WRITE_PTR_MASK (GEN8_CSB_PTR_MASK << 0) + +#define GEN11_CSB_ENTRIES 12 +#define GEN11_CSB_PTR_MASK 0xf +#define GEN11_CSB_READ_PTR_MASK (GEN11_CSB_PTR_MASK << 8) +#define GEN11_CSB_WRITE_PTR_MASK (GEN11_CSB_PTR_MASK << 0) + +#define MAX_CONTEXT_HW_ID (1 << 21) /* exclusive */ +#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */ +#define GEN11_MAX_CONTEXT_HW_ID (1 << 11) /* exclusive */ +/* in Gen12 ID 0x7FF is reserved to indicate idle */ +#define GEN12_MAX_CONTEXT_HW_ID (GEN11_MAX_CONTEXT_HW_ID - 1) + #endif /* _INTEL_LRC_REG_H_ */ diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index 254873e1646e..c4512ee4daf2 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -24,8 +24,8 @@ #include "intel_engine.h" #include "intel_gt.h" +#include "intel_lrc_reg.h" #include "intel_mocs.h" -#include "intel_lrc.h" #include "intel_ring.h" /* structures required */ @@ -59,8 +59,7 @@ struct drm_i915_mocs_table { #define _L3_CACHEABILITY(value) ((value) << 4) /* Helper defines */ -#define GEN9_NUM_MOCS_ENTRIES 62 /* 62 out of 64 - 63 & 64 are reserved. */ -#define GEN11_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */ +#define GEN9_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */ /* (e)LLC caching options */ /* @@ -131,7 +130,19 @@ static const struct drm_i915_mocs_entry skl_mocs_table[] = { GEN9_MOCS_ENTRIES, MOCS_ENTRY(I915_MOCS_CACHED, LE_3_WB | LE_TC_2_LLC_ELLC | LE_LRUM(3), - L3_3_WB) + L3_3_WB), + + /* + * mocs:63 + * - used by the L3 for all of its evictions. + * Thus it is expected to allow LLC cacheability to enable coherent + * flows to be maintained. + * - used to force L3 uncachable cycles. + * Thus it is expected to make the surface L3 uncacheable. + */ + MOCS_ENTRY(63, + LE_3_WB | LE_TC_1_LLC | LE_LRUM(3), + L3_1_UC) }; /* NOTE: the LE_TGT_CACHE is not used on Broxton */ @@ -349,15 +360,15 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915, if (IS_DG1(i915)) { table->size = ARRAY_SIZE(dg1_mocs_table); table->table = dg1_mocs_table; - table->n_entries = GEN11_NUM_MOCS_ENTRIES; + table->n_entries = GEN9_NUM_MOCS_ENTRIES; } else if (INTEL_GEN(i915) >= 12) { table->size = ARRAY_SIZE(tgl_mocs_table); table->table = tgl_mocs_table; - table->n_entries = GEN11_NUM_MOCS_ENTRIES; + table->n_entries = GEN9_NUM_MOCS_ENTRIES; } else if (IS_GEN(i915, 11)) { table->size = ARRAY_SIZE(icl_mocs_table); table->table = icl_mocs_table; - table->n_entries = GEN11_NUM_MOCS_ENTRIES; + table->n_entries = GEN9_NUM_MOCS_ENTRIES; } else if (IS_GEN9_BC(i915) || IS_CANNONLAKE(i915)) { table->size = ARRAY_SIZE(skl_mocs_table); table->n_entries = GEN9_NUM_MOCS_ENTRIES; diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c index 40d8f1a95df6..83992312a34b 100644 --- a/drivers/gpu/drm/i915/intel_region_lmem.c +++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c @@ -95,7 +95,7 @@ region_lmem_init(struct intel_memory_region *mem) return ret; } -const struct intel_memory_region_ops intel_region_lmem_ops = { +static const struct intel_memory_region_ops intel_region_lmem_ops = { .init = region_lmem_init, .release = region_lmem_release, .create_object = __i915_gem_lmem_object_create, diff --git a/drivers/gpu/drm/i915/intel_region_lmem.h b/drivers/gpu/drm/i915/gt/intel_region_lmem.h index 213def7c7b8a..8ea43e538dab 100644 --- a/drivers/gpu/drm/i915/intel_region_lmem.h +++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.h @@ -8,8 +8,6 @@ struct drm_i915_private; -extern const struct intel_memory_region_ops intel_region_lmem_ops; - struct intel_memory_region * intel_setup_fake_lmem(struct drm_i915_private *i915); diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c index ea2a77c7b469..ca816ba22197 100644 --- a/drivers/gpu/drm/i915/gt/intel_renderstate.c +++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c @@ -27,7 +27,8 @@ #include "i915_drv.h" #include "intel_renderstate.h" -#include "gt/intel_context.h" +#include "intel_context.h" +#include "intel_gpu_commands.h" #include "intel_ring.h" static const struct intel_renderstate_rodata * diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 3654c955e6be..9d177297db79 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -40,20 +40,19 @@ static void rmw_clear_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clr) intel_uncore_rmw_fw(uncore, reg, clr, 0); } -static void engine_skip_context(struct i915_request *rq) +static void skip_context(struct i915_request *rq) { - struct intel_engine_cs *engine = rq->engine; struct intel_context *hung_ctx = rq->context; - if (!i915_request_is_active(rq)) - return; + list_for_each_entry_from_rcu(rq, &hung_ctx->timeline->requests, link) { + if (!i915_request_is_active(rq)) + return; - lockdep_assert_held(&engine->active.lock); - list_for_each_entry_continue(rq, &engine->active.requests, sched.link) if (rq->context == hung_ctx) { i915_request_set_error_once(rq, -EIO); __i915_request_skip(rq); } + } } static void client_mark_guilty(struct i915_gem_context *ctx, bool banned) @@ -160,7 +159,7 @@ void __i915_request_reset(struct i915_request *rq, bool guilty) i915_request_set_error_once(rq, -EIO); __i915_request_skip(rq); if (mark_guilty(rq)) - engine_skip_context(rq); + skip_context(rq); } else { i915_request_set_error_once(rq, -EAGAIN); mark_innocent(rq); @@ -231,7 +230,7 @@ static int g4x_do_reset(struct intel_gt *gt, GRDOM_MEDIA | GRDOM_RESET_ENABLE); ret = wait_for_atomic(g4x_reset_complete(pdev), 50); if (ret) { - drm_dbg(>->i915->drm, "Wait for media reset failed\n"); + GT_TRACE(gt, "Wait for media reset failed\n"); goto out; } @@ -239,7 +238,7 @@ static int g4x_do_reset(struct intel_gt *gt, GRDOM_RENDER | GRDOM_RESET_ENABLE); ret = wait_for_atomic(g4x_reset_complete(pdev), 50); if (ret) { - drm_dbg(>->i915->drm, "Wait for render reset failed\n"); + GT_TRACE(gt, "Wait for render reset failed\n"); goto out; } @@ -265,7 +264,7 @@ static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask, 5000, 0, NULL); if (ret) { - drm_dbg(>->i915->drm, "Wait for render reset failed\n"); + GT_TRACE(gt, "Wait for render reset failed\n"); goto out; } @@ -276,7 +275,7 @@ static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask, 5000, 0, NULL); if (ret) { - drm_dbg(>->i915->drm, "Wait for media reset failed\n"); + GT_TRACE(gt, "Wait for media reset failed\n"); goto out; } @@ -305,9 +304,9 @@ static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask) 500, 0, NULL); if (err) - drm_dbg(>->i915->drm, - "Wait for 0x%08x engines reset failed\n", - hw_domain_mask); + GT_TRACE(gt, + "Wait for 0x%08x engines reset failed\n", + hw_domain_mask); return err; } @@ -407,8 +406,7 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask) return 0; if (ret) { - drm_dbg(&engine->i915->drm, - "Wait for SFC forced lock ack failed\n"); + ENGINE_TRACE(engine, "Wait for SFC forced lock ack failed\n"); return ret; } @@ -499,6 +497,9 @@ static int gen8_engine_reset_prepare(struct intel_engine_cs *engine) u32 request, mask, ack; int ret; + if (I915_SELFTEST_ONLY(should_fail(&engine->reset_timeout, 1))) + return -ETIMEDOUT; + ack = intel_uncore_read_fw(uncore, reg); if (ack & RESET_CTL_CAT_ERROR) { /* @@ -754,8 +755,10 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask) if (err) return err; + local_bh_disable(); for_each_engine(engine, gt, id) __intel_engine_reset(engine, stalled_mask & engine->mask); + local_bh_enable(); intel_ggtt_restore_fences(gt->ggtt); @@ -833,9 +836,11 @@ static void __intel_gt_set_wedged(struct intel_gt *gt) set_bit(I915_WEDGED, >->reset.flags); /* Mark all executing requests as skipped */ + local_bh_disable(); for_each_engine(engine, gt, id) if (engine->reset.cancel) engine->reset.cancel(engine); + local_bh_enable(); reset_finish(gt, awake); @@ -1110,20 +1115,7 @@ static inline int intel_gt_reset_engine(struct intel_engine_cs *engine) return __intel_gt_reset(engine->gt, engine->mask); } -/** - * intel_engine_reset - reset GPU engine to recover from a hang - * @engine: engine to reset - * @msg: reason for GPU reset; or NULL for no drm_notice() - * - * Reset a specific GPU engine. Useful if a hang is detected. - * Returns zero on successful reset or otherwise an error code. - * - * Procedure is: - * - identifies the request that caused the hang and it is dropped - * - reset engine (which will force the engine to idle) - * - re-init/configure engine - */ -int intel_engine_reset(struct intel_engine_cs *engine, const char *msg) +int __intel_engine_reset_bh(struct intel_engine_cs *engine, const char *msg) { struct intel_gt *gt = engine->gt; bool uses_guc = intel_engine_in_guc_submission_mode(engine); @@ -1148,8 +1140,7 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg) ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine); if (ret) { /* If we fail here, we expect to fallback to a global reset */ - drm_dbg(>->i915->drm, "%sFailed to reset %s, ret=%d\n", - uses_guc ? "GuC " : "", engine->name, ret); + ENGINE_TRACE(engine, "Failed to reset, err: %d\n", ret); goto out; } @@ -1174,6 +1165,30 @@ out: return ret; } +/** + * intel_engine_reset - reset GPU engine to recover from a hang + * @engine: engine to reset + * @msg: reason for GPU reset; or NULL for no drm_notice() + * + * Reset a specific GPU engine. Useful if a hang is detected. + * Returns zero on successful reset or otherwise an error code. + * + * Procedure is: + * - identifies the request that caused the hang and it is dropped + * - reset engine (which will force the engine to idle) + * - re-init/configure engine + */ +int intel_engine_reset(struct intel_engine_cs *engine, const char *msg) +{ + int err; + + local_bh_disable(); + err = __intel_engine_reset_bh(engine, msg); + local_bh_enable(); + + return err; +} + static void intel_gt_reset_global(struct intel_gt *gt, u32 engine_mask, const char *reason) @@ -1186,7 +1201,7 @@ static void intel_gt_reset_global(struct intel_gt *gt, kobject_uevent_env(kobj, KOBJ_CHANGE, error_event); - drm_dbg(>->i915->drm, "resetting chip, engines=%x\n", engine_mask); + GT_TRACE(gt, "resetting chip, engines=%x\n", engine_mask); kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event); /* Use a watchdog to ensure that our reset completes */ @@ -1260,18 +1275,20 @@ void intel_gt_handle_error(struct intel_gt *gt, * single reset fails. */ if (intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) { + local_bh_disable(); for_each_engine_masked(engine, gt, engine_mask, tmp) { BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE); if (test_and_set_bit(I915_RESET_ENGINE + engine->id, >->reset.flags)) continue; - if (intel_engine_reset(engine, msg) == 0) + if (__intel_engine_reset_bh(engine, msg) == 0) engine_mask &= ~engine->mask; clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, >->reset.flags); } + local_bh_enable(); } if (!engine_mask) @@ -1380,6 +1397,17 @@ void intel_gt_init_reset(struct intel_gt *gt) mutex_init(>->reset.mutex); init_srcu_struct(>->reset.backoff_srcu); + /* + * While undesirable to wait inside the shrinker, complain anyway. + * + * If we have to wait during shrinking, we guarantee forward progress + * by forcing the reset. Therefore during the reset we must not + * re-enter the shrinker. By declaring that we take the reset mutex + * within the shrinker, we forbid ourselves from performing any + * fs-reclaim or taking related locks during reset. + */ + i915_gem_shrinker_taints_mutex(gt->i915, >->reset.mutex); + /* no GPU until we are ready! */ __set_bit(I915_WEDGED, >->reset.flags); } diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h index a0eec7c11c0c..7dbf5cc8a333 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.h +++ b/drivers/gpu/drm/i915/gt/intel_reset.h @@ -34,6 +34,8 @@ void intel_gt_reset(struct intel_gt *gt, const char *reason); int intel_engine_reset(struct intel_engine_cs *engine, const char *reason); +int __intel_engine_reset_bh(struct intel_engine_cs *engine, + const char *reason); void __i915_request_reset(struct i915_request *rq, bool guilty); diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c index 4034a4bac7f0..06385550450c 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring.c +++ b/drivers/gpu/drm/i915/gt/intel_ring.c @@ -5,9 +5,11 @@ */ #include "gem/i915_gem_object.h" + #include "i915_drv.h" #include "i915_vma.h" #include "intel_engine.h" +#include "intel_gpu_commands.h" #include "intel_ring.h" #include "intel_timeline.h" diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index a41b43f445b8..20f42722be8b 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -32,6 +32,7 @@ #include "gen6_ppgtt.h" #include "gen7_renderclear.h" #include "i915_drv.h" +#include "i915_mitigations.h" #include "intel_breadcrumbs.h" #include "intel_context.h" #include "intel_gt.h" @@ -158,30 +159,7 @@ static void ring_setup_status_page(struct intel_engine_cs *engine) static bool stop_ring(struct intel_engine_cs *engine) { - struct drm_i915_private *dev_priv = engine->i915; - - if (INTEL_GEN(dev_priv) > 2) { - ENGINE_WRITE(engine, - RING_MI_MODE, _MASKED_BIT_ENABLE(STOP_RING)); - if (intel_wait_for_register(engine->uncore, - RING_MI_MODE(engine->mmio_base), - MODE_IDLE, - MODE_IDLE, - 1000)) { - drm_err(&dev_priv->drm, - "%s : timed out trying to stop ring\n", - engine->name); - - /* - * Sometimes we observe that the idle flag is not - * set even though the ring is empty. So double - * check before giving up. - */ - if (ENGINE_READ(engine, RING_HEAD) != - ENGINE_READ(engine, RING_TAIL)) - return false; - } - } + intel_engine_stop_cs(engine); ENGINE_WRITE(engine, RING_HEAD, ENGINE_READ(engine, RING_TAIL)); @@ -321,6 +299,39 @@ out: return ret; } +static void sanitize_hwsp(struct intel_engine_cs *engine) +{ + struct intel_timeline *tl; + + list_for_each_entry(tl, &engine->status_page.timelines, engine_link) + intel_timeline_reset_seqno(tl); +} + +static void xcs_sanitize(struct intel_engine_cs *engine) +{ + /* + * Poison residual state on resume, in case the suspend didn't! + * + * We have to assume that across suspend/resume (or other loss + * of control) that the contents of our pinned buffers has been + * lost, replaced by garbage. Since this doesn't always happen, + * let's poison such state so that we more quickly spot when + * we falsely assume it has been preserved. + */ + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) + memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE); + + /* + * The kernel_context HWSP is stored in the status_page. As above, + * that may be lost on resume/initialisation, and so we need to + * reset the value in the HWSP. + */ + sanitize_hwsp(engine); + + /* And scrub the dirty cachelines for the HWSP */ + clflush_cache_range(engine->status_page.addr, PAGE_SIZE); +} + static void reset_prepare(struct intel_engine_cs *engine) { struct intel_uncore *uncore = engine->uncore; @@ -440,10 +451,8 @@ static void reset_cancel(struct intel_engine_cs *engine) spin_lock_irqsave(&engine->active.lock, flags); /* Mark all submitted requests as skipped. */ - list_for_each_entry(request, &engine->active.requests, sched.link) { - i915_request_set_error_once(request, -EIO); - i915_request_mark_complete(request); - } + list_for_each_entry(request, &engine->active.requests, sched.link) + i915_request_mark_eio(request); intel_engine_signal_breadcrumbs(engine); /* Remaining _unready_ requests will be nop'ed when submitted */ @@ -602,6 +611,7 @@ static int ring_context_pin(struct intel_context *ce, void *unused) static void ring_context_reset(struct intel_context *ce) { intel_ring_reset(ce->ring, ce->ring->emit); + clear_bit(CONTEXT_VALID_BIT, &ce->flags); } static const struct intel_context_ops ring_context_ops = { @@ -886,7 +896,8 @@ static int switch_context(struct i915_request *rq) GEM_BUG_ON(HAS_EXECLISTS(engine->i915)); if (engine->wa_ctx.vma && ce != engine->kernel_context) { - if (engine->wa_ctx.vma->private != ce) { + if (engine->wa_ctx.vma->private != ce && + i915_mitigate_clear_residuals()) { ret = clear_residuals(rq); if (ret) return ret; @@ -1069,6 +1080,8 @@ static void setup_common(struct intel_engine_cs *engine) setup_irq(engine); engine->resume = xcs_resume; + engine->sanitize = xcs_sanitize; + engine->reset.prepare = reset_prepare; engine->reset.rewind = reset_rewind; engine->reset.cancel = reset_cancel; @@ -1290,7 +1303,7 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine) GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma); - if (IS_HASWELL(engine->i915) && engine->class == RENDER_CLASS) { + if (IS_GEN(engine->i915, 7) && engine->class == RENDER_CLASS) { err = gen7_ctx_switch_bb_init(engine); if (err) goto err_ring_unpin; diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index 0d88f17799ff..69e1bd46cc46 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -400,7 +400,7 @@ static unsigned int gen5_invert_freq(struct intel_rps *rps, return val; } -static bool gen5_rps_set(struct intel_rps *rps, u8 val) +static int __gen5_rps_set(struct intel_rps *rps, u8 val) { struct intel_uncore *uncore = rps_to_uncore(rps); u16 rgvswctl; @@ -410,7 +410,7 @@ static bool gen5_rps_set(struct intel_rps *rps, u8 val) rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); if (rgvswctl & MEMCTL_CMD_STS) { DRM_DEBUG("gpu busy, RCS change rejected\n"); - return false; /* still busy with another command */ + return -EBUSY; /* still busy with another command */ } /* Invert the frequency bin into an ips delay */ @@ -426,7 +426,18 @@ static bool gen5_rps_set(struct intel_rps *rps, u8 val) rgvswctl |= MEMCTL_CMD_STS; intel_uncore_write16(uncore, MEMSWCTL, rgvswctl); - return true; + return 0; +} + +static int gen5_rps_set(struct intel_rps *rps, u8 val) +{ + int err; + + spin_lock_irq(&mchdev_lock); + err = __gen5_rps_set(rps, val); + spin_unlock_irq(&mchdev_lock); + + return err; } static unsigned long intel_pxfreq(u32 vidfreq) @@ -557,7 +568,7 @@ static bool gen5_rps_enable(struct intel_rps *rps) "stuck trying to change perf mode\n"); mdelay(1); - gen5_rps_set(rps, rps->cur_freq); + __gen5_rps_set(rps, rps->cur_freq); rps->ips.last_count1 = intel_uncore_read(uncore, DMIEC); rps->ips.last_count1 += intel_uncore_read(uncore, DDREC); @@ -599,7 +610,7 @@ static void gen5_rps_disable(struct intel_rps *rps) intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG); /* Go back to the starting frequency */ - gen5_rps_set(rps, rps->idle_freq); + __gen5_rps_set(rps, rps->idle_freq); mdelay(1); rgvswctl |= MEMCTL_CMD_STS; intel_uncore_write(uncore, MEMSWCTL, rgvswctl); @@ -797,20 +808,19 @@ static int rps_set(struct intel_rps *rps, u8 val, bool update) struct drm_i915_private *i915 = rps_to_i915(rps); int err; - if (INTEL_GEN(i915) < 6) - return 0; - if (val == rps->last_freq) return 0; if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) err = vlv_rps_set(rps, val); - else + else if (INTEL_GEN(i915) >= 6) err = gen6_rps_set(rps, val); + else + err = gen5_rps_set(rps, val); if (err) return err; - if (update) + if (update && INTEL_GEN(i915) >= 6) gen6_rps_set_thresholds(rps, val); rps->last_freq = val; @@ -852,6 +862,8 @@ void intel_rps_park(struct intel_rps *rps) { int adj; + GEM_BUG_ON(atomic_read(&rps->num_waiters)); + if (!intel_rps_clear_active(rps)) return; @@ -897,34 +909,37 @@ void intel_rps_park(struct intel_rps *rps) adj = -2; rps->last_adj = adj; rps->cur_freq = max_t(int, rps->cur_freq + adj, rps->min_freq); + if (rps->cur_freq < rps->efficient_freq) { + rps->cur_freq = rps->efficient_freq; + rps->last_adj = 0; + } GT_TRACE(rps_to_gt(rps), "park:%x\n", rps->cur_freq); } void intel_rps_boost(struct i915_request *rq) { - struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps; - unsigned long flags; - - if (i915_request_signaled(rq) || !intel_rps_is_active(rps)) + if (i915_request_signaled(rq) || i915_request_has_waitboost(rq)) return; /* Serializes with i915_request_retire() */ - spin_lock_irqsave(&rq->lock, flags); - if (!i915_request_has_waitboost(rq) && - !dma_fence_is_signaled_locked(&rq->fence)) { - set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags); + if (!test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) { + struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps; + + if (atomic_fetch_inc(&rps->num_waiters)) + return; + + if (!intel_rps_is_active(rps)) + return; GT_TRACE(rps_to_gt(rps), "boost fence:%llx:%llx\n", rq->fence.context, rq->fence.seqno); - if (!atomic_fetch_inc(&rps->num_waiters) && - READ_ONCE(rps->cur_freq) < rps->boost_freq) + if (READ_ONCE(rps->cur_freq) < rps->boost_freq) schedule_work(&rps->work); - atomic_inc(&rps->boosts); + WRITE_ONCE(rps->boosts, rps->boosts + 1); /* debug only */ } - spin_unlock_irqrestore(&rq->lock, flags); } int intel_rps_set(struct intel_rps *rps, u8 val) @@ -1794,7 +1809,7 @@ void gen5_rps_irq_handler(struct intel_rps *rps) rps->min_freq_softlimit, rps->max_freq_softlimit); - if (new_freq != rps->cur_freq && gen5_rps_set(rps, new_freq)) + if (new_freq != rps->cur_freq && !__gen5_rps_set(rps, new_freq)) rps->cur_freq = new_freq; spin_unlock(&mchdev_lock); @@ -2105,7 +2120,7 @@ bool i915_gpu_turbo_disable(void) spin_lock_irq(&mchdev_lock); rps->max_freq_softlimit = rps->min_freq; - ret = gen5_rps_set(&i915->gt.rps, rps->min_freq); + ret = !__gen5_rps_set(&i915->gt.rps, rps->min_freq); spin_unlock_irq(&mchdev_lock); drm_dev_put(&i915->drm); diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h index 38083f0402d9..029fe13cf303 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps_types.h +++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h @@ -93,7 +93,7 @@ struct intel_rps { } power; atomic_t num_waiters; - atomic_t boosts; + unsigned int boosts; /* manual wa residency calculations */ struct intel_rps_ei ei; diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index 7ea94d201fe6..7fe05918a76e 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -126,6 +126,10 @@ static void __rcu_cacheline_free(struct rcu_head *rcu) struct intel_timeline_cacheline *cl = container_of(rcu, typeof(*cl), rcu); + /* Must wait until after all *rq->hwsp are complete before removing */ + i915_gem_object_unpin_map(cl->hwsp->vma->obj); + __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS)); + i915_active_fini(&cl->active); kfree(cl); } @@ -133,11 +137,6 @@ static void __rcu_cacheline_free(struct rcu_head *rcu) static void __idle_cacheline_free(struct intel_timeline_cacheline *cl) { GEM_BUG_ON(!i915_active_is_idle(&cl->active)); - - i915_gem_object_unpin_map(cl->hwsp->vma->obj); - i915_vma_put(cl->hwsp->vma); - __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS)); - call_rcu(&cl->rcu, __rcu_cacheline_free); } @@ -179,7 +178,6 @@ cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline) return ERR_CAST(vaddr); } - i915_vma_get(hwsp->vma); cl->hwsp = hwsp; cl->vaddr = page_pack_bits(vaddr, cacheline); @@ -321,6 +319,25 @@ __intel_timeline_create(struct intel_gt *gt, return timeline; } +struct intel_timeline * +intel_timeline_create_from_engine(struct intel_engine_cs *engine, + unsigned int offset) +{ + struct i915_vma *hwsp = engine->status_page.vma; + struct intel_timeline *tl; + + tl = __intel_timeline_create(engine->gt, hwsp, offset); + if (IS_ERR(tl)) + return tl; + + /* Borrow a nearby lock; we only create these timelines during init */ + mutex_lock(&hwsp->vm->mutex); + list_add_tail(&tl->engine_link, &engine->status_page.timelines); + mutex_unlock(&hwsp->vm->mutex); + + return tl; +} + void __intel_timeline_pin(struct intel_timeline *tl) { GEM_BUG_ON(!atomic_read(&tl->pin_count)); @@ -617,6 +634,86 @@ void intel_gt_fini_timelines(struct intel_gt *gt) GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list)); } +void intel_gt_show_timelines(struct intel_gt *gt, + struct drm_printer *m, + void (*show_request)(struct drm_printer *m, + const struct i915_request *rq, + const char *prefix, + int indent)) +{ + struct intel_gt_timelines *timelines = >->timelines; + struct intel_timeline *tl, *tn; + LIST_HEAD(free); + + spin_lock(&timelines->lock); + list_for_each_entry_safe(tl, tn, &timelines->active_list, link) { + unsigned long count, ready, inflight; + struct i915_request *rq, *rn; + struct dma_fence *fence; + + if (!mutex_trylock(&tl->mutex)) { + drm_printf(m, "Timeline %llx: busy; skipping\n", + tl->fence_context); + continue; + } + + intel_timeline_get(tl); + GEM_BUG_ON(!atomic_read(&tl->active_count)); + atomic_inc(&tl->active_count); /* pin the list element */ + spin_unlock(&timelines->lock); + + count = 0; + ready = 0; + inflight = 0; + list_for_each_entry_safe(rq, rn, &tl->requests, link) { + if (i915_request_completed(rq)) + continue; + + count++; + if (i915_request_is_ready(rq)) + ready++; + if (i915_request_is_active(rq)) + inflight++; + } + + drm_printf(m, "Timeline %llx: { ", tl->fence_context); + drm_printf(m, "count: %lu, ready: %lu, inflight: %lu", + count, ready, inflight); + drm_printf(m, ", seqno: { current: %d, last: %d }", + *tl->hwsp_seqno, tl->seqno); + fence = i915_active_fence_get(&tl->last_request); + if (fence) { + drm_printf(m, ", engine: %s", + to_request(fence)->engine->name); + dma_fence_put(fence); + } + drm_printf(m, " }\n"); + + if (show_request) { + list_for_each_entry_safe(rq, rn, &tl->requests, link) + show_request(m, rq, "", 2); + } + + mutex_unlock(&tl->mutex); + spin_lock(&timelines->lock); + + /* Resume list iteration after reacquiring spinlock */ + list_safe_reset_next(tl, tn, link); + if (atomic_dec_and_test(&tl->active_count)) + list_del(&tl->link); + + /* Defer the final release to after the spinlock */ + if (refcount_dec_and_test(&tl->kref.refcount)) { + GEM_BUG_ON(atomic_read(&tl->active_count)); + list_add(&tl->link, &free); + } + } + spin_unlock(&timelines->lock); + + list_for_each_entry_safe(tl, tn, &free, link) + __intel_timeline_free(&tl->kref); +} + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "gt/selftests/mock_timeline.c" #include "gt/selftest_timeline.c" diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h index 9882cd911d8e..dcdee692a80e 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.h +++ b/drivers/gpu/drm/i915/gt/intel_timeline.h @@ -31,6 +31,8 @@ #include "i915_syncmap.h" #include "intel_timeline_types.h" +struct drm_printer; + struct intel_timeline * __intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp, @@ -42,14 +44,9 @@ intel_timeline_create(struct intel_gt *gt) return __intel_timeline_create(gt, NULL, 0); } -static inline struct intel_timeline * +struct intel_timeline * intel_timeline_create_from_engine(struct intel_engine_cs *engine, - unsigned int offset) -{ - return __intel_timeline_create(engine->gt, - engine->status_page.vma, - offset); -} + unsigned int offset); static inline struct intel_timeline * intel_timeline_get(struct intel_timeline *timeline) @@ -106,4 +103,18 @@ int intel_timeline_read_hwsp(struct i915_request *from, void intel_gt_init_timelines(struct intel_gt *gt); void intel_gt_fini_timelines(struct intel_gt *gt); +void intel_gt_show_timelines(struct intel_gt *gt, + struct drm_printer *m, + void (*show_request)(struct drm_printer *m, + const struct i915_request *rq, + const char *prefix, + int indent)); + +static inline bool +intel_timeline_is_last(const struct intel_timeline *tl, + const struct i915_request *rq) +{ + return list_is_last_rcu(&rq->link, &tl->requests); +} + #endif diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h index 4474f487f589..e360f50706bf 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h +++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h @@ -84,6 +84,8 @@ struct intel_timeline { struct list_head link; struct intel_gt *gt; + struct list_head engine_link; + struct kref kref; struct rcu_head rcu; }; diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index fed9503a7c4e..d99773e6776e 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -7,6 +7,7 @@ #include "i915_drv.h" #include "intel_context.h" #include "intel_engine_pm.h" +#include "intel_gpu_commands.h" #include "intel_gt.h" #include "intel_ring.h" #include "intel_workarounds.h" @@ -131,8 +132,10 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa) return; } - if (wal->list) + if (wal->list) { memcpy(list, wal->list, sizeof(*wa) * wal->count); + kfree(wal->list); + } wal->list = list; } @@ -192,7 +195,7 @@ static void wa_add(struct i915_wa_list *wal, i915_reg_t reg, } static void -wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set) +wa_write_clr_set(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set) { wa_add(wal, reg, clear, set, clear); } @@ -200,21 +203,32 @@ wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set) static void wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set) { - wa_write_masked_or(wal, reg, ~0, set); + wa_write_clr_set(wal, reg, ~0, set); } static void wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set) { - wa_write_masked_or(wal, reg, set, set); + wa_write_clr_set(wal, reg, set, set); } static void wa_write_clr(struct i915_wa_list *wal, i915_reg_t reg, u32 clr) { - wa_write_masked_or(wal, reg, clr, 0); + wa_write_clr_set(wal, reg, clr, 0); } +/* + * WA operations on "masked register". A masked register has the upper 16 bits + * documented as "masked" in b-spec. Its purpose is to allow writing to just a + * portion of the register without a rmw: you simply write in the upper 16 bits + * the mask of bits you are going to modify. + * + * The wa_masked_* family of functions already does the necessary operations to + * calculate the mask based on the parameters passed, so user only has to + * provide the lower 16 bits of that register. + */ + static void wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val) { @@ -227,38 +241,36 @@ wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val) wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val); } -#define WA_SET_BIT_MASKED(addr, mask) \ - wa_masked_en(wal, (addr), (mask)) - -#define WA_CLR_BIT_MASKED(addr, mask) \ - wa_masked_dis(wal, (addr), (mask)) - -#define WA_SET_FIELD_MASKED(addr, mask, value) \ - wa_write_masked_or(wal, (addr), 0, _MASKED_FIELD((mask), (value))) +static void +wa_masked_field_set(struct i915_wa_list *wal, i915_reg_t reg, + u32 mask, u32 val) +{ + wa_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask); +} static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) { - WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); + wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING); } static void gen7_ctx_workarounds_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) { - WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); + wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING); } static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) { - WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); + wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING); /* WaDisableAsyncFlipPerfMode:bdw,chv */ - WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE); + wa_masked_en(wal, MI_MODE, ASYNC_FLIP_PERF_DISABLE); /* WaDisablePartialInstShootdown:bdw,chv */ - WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, - PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); + wa_masked_en(wal, GEN8_ROW_CHICKEN, + PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); /* Use Force Non-Coherent whenever executing a 3D context. This is a * workaround for for a possible hang in the unlikely event a TLB @@ -266,9 +278,9 @@ static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine, */ /* WaForceEnableNonCoherent:bdw,chv */ /* WaHdcDisableFetchWhenMasked:bdw,chv */ - WA_SET_BIT_MASKED(HDC_CHICKEN0, - HDC_DONOT_FETCH_MEM_WHEN_MASKED | - HDC_FORCE_NON_COHERENT); + wa_masked_en(wal, HDC_CHICKEN0, + HDC_DONOT_FETCH_MEM_WHEN_MASKED | + HDC_FORCE_NON_COHERENT); /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0: * "The Hierarchical Z RAW Stall Optimization allows non-overlapping @@ -278,10 +290,10 @@ static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine, * * This optimization is off by default for BDW and CHV; turn it on. */ - WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE); + wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE); /* Wa4x4STCOptimizationDisable:bdw,chv */ - WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE); + wa_masked_en(wal, CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE); /* * BSpec recommends 8x4 when MSAA is used, @@ -291,7 +303,7 @@ static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine, * disable bit, which we don't touch here, but it's good * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). */ - WA_SET_FIELD_MASKED(GEN7_GT_MODE, + wa_masked_field_set(wal, GEN7_GT_MODE, GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4); } @@ -304,24 +316,24 @@ static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine, gen8_ctx_workarounds_init(engine, wal); /* WaDisableThreadStallDopClockGating:bdw (pre-production) */ - WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); + wa_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); /* WaDisableDopClockGating:bdw * * Also see the related UCGTCL1 write in bdw_init_clock_gating() * to disable EUTC clock gating. */ - WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, - DOP_CLOCK_GATING_DISABLE); + wa_masked_en(wal, GEN7_ROW_CHICKEN2, + DOP_CLOCK_GATING_DISABLE); - WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, - GEN8_SAMPLER_POWER_BYPASS_DIS); + wa_masked_en(wal, HALF_SLICE_CHICKEN3, + GEN8_SAMPLER_POWER_BYPASS_DIS); - WA_SET_BIT_MASKED(HDC_CHICKEN0, - /* WaForceContextSaveRestoreNonCoherent:bdw */ - HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | - /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ - (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); + wa_masked_en(wal, HDC_CHICKEN0, + /* WaForceContextSaveRestoreNonCoherent:bdw */ + HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | + /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ + (IS_BDW_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); } static void chv_ctx_workarounds_init(struct intel_engine_cs *engine, @@ -330,10 +342,10 @@ static void chv_ctx_workarounds_init(struct intel_engine_cs *engine, gen8_ctx_workarounds_init(engine, wal); /* WaDisableThreadStallDopClockGating:chv */ - WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); + wa_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); /* Improve HiZ throughput on CHV. */ - WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X); + wa_masked_en(wal, HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X); } static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine, @@ -347,38 +359,38 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine, * Must match Display Engine. See * WaCompressedResourceDisplayNewHashMode. */ - WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, - GEN9_PBE_COMPRESSED_HASH_SELECTION); - WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, - GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR); + wa_masked_en(wal, COMMON_SLICE_CHICKEN2, + GEN9_PBE_COMPRESSED_HASH_SELECTION); + wa_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7, + GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR); } /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */ /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */ - WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, - FLOW_CONTROL_ENABLE | - PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); + wa_masked_en(wal, GEN8_ROW_CHICKEN, + FLOW_CONTROL_ENABLE | + PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */ /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */ - WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, - GEN9_ENABLE_YV12_BUGFIX | - GEN9_ENABLE_GPGPU_PREEMPTION); + wa_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7, + GEN9_ENABLE_YV12_BUGFIX | + GEN9_ENABLE_GPGPU_PREEMPTION); /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */ /* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */ - WA_SET_BIT_MASKED(CACHE_MODE_1, - GEN8_4x4_STC_OPTIMIZATION_DISABLE | - GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE); + wa_masked_en(wal, CACHE_MODE_1, + GEN8_4x4_STC_OPTIMIZATION_DISABLE | + GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE); /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */ - WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, - GEN9_CCS_TLB_PREFETCH_ENABLE); + wa_masked_dis(wal, GEN9_HALF_SLICE_CHICKEN5, + GEN9_CCS_TLB_PREFETCH_ENABLE); /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */ - WA_SET_BIT_MASKED(HDC_CHICKEN0, - HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | - HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE); + wa_masked_en(wal, HDC_CHICKEN0, + HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | + HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE); /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are * both tied to WaForceContextSaveRestoreNonCoherent @@ -394,19 +406,19 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine, */ /* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */ - WA_SET_BIT_MASKED(HDC_CHICKEN0, - HDC_FORCE_NON_COHERENT); + wa_masked_en(wal, HDC_CHICKEN0, + HDC_FORCE_NON_COHERENT); /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */ if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) || IS_COFFEELAKE(i915) || IS_COMETLAKE(i915)) - WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, - GEN8_SAMPLER_POWER_BYPASS_DIS); + wa_masked_en(wal, HALF_SLICE_CHICKEN3, + GEN8_SAMPLER_POWER_BYPASS_DIS); /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */ - WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE); + wa_masked_en(wal, HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE); /* * Supporting preemption with fine-granularity requires changes in the @@ -420,16 +432,16 @@ static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine, */ /* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */ - WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL); + wa_masked_dis(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL); /* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */ - WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, + wa_masked_field_set(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK, GEN9_PREEMPT_GPGPU_COMMAND_LEVEL); /* WaClearHIZ_WM_CHICKEN3:bxt,glk */ if (IS_GEN9_LP(i915)) - WA_SET_BIT_MASKED(GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ); + wa_masked_en(wal, GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ); } static void skl_tune_iz_hashing(struct intel_engine_cs *engine, @@ -463,7 +475,7 @@ static void skl_tune_iz_hashing(struct intel_engine_cs *engine, return; /* Tune IZ hashing. See intel_device_info_runtime_init() */ - WA_SET_FIELD_MASKED(GEN7_GT_MODE, + wa_masked_field_set(wal, GEN7_GT_MODE, GEN9_IZ_HASHING_MASK(2) | GEN9_IZ_HASHING_MASK(1) | GEN9_IZ_HASHING_MASK(0), @@ -485,12 +497,12 @@ static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine, gen9_ctx_workarounds_init(engine, wal); /* WaDisableThreadStallDopClockGating:bxt */ - WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, - STALL_DOP_GATING_DISABLE); + wa_masked_en(wal, GEN8_ROW_CHICKEN, + STALL_DOP_GATING_DISABLE); /* WaToEnableHwFixForPushConstHWBug:bxt */ - WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, - GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); + wa_masked_en(wal, COMMON_SLICE_CHICKEN2, + GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); } static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine, @@ -502,12 +514,12 @@ static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine, /* WaToEnableHwFixForPushConstHWBug:kbl */ if (IS_KBL_GT_REVID(i915, KBL_REVID_C0, REVID_FOREVER)) - WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, - GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); + wa_masked_en(wal, COMMON_SLICE_CHICKEN2, + GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); /* WaDisableSbeCacheDispatchPortSharing:kbl */ - WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1, - GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); + wa_masked_en(wal, GEN7_HALF_SLICE_CHICKEN1, + GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); } static void glk_ctx_workarounds_init(struct intel_engine_cs *engine, @@ -516,8 +528,8 @@ static void glk_ctx_workarounds_init(struct intel_engine_cs *engine, gen9_ctx_workarounds_init(engine, wal); /* WaToEnableHwFixForPushConstHWBug:glk */ - WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, - GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); + wa_masked_en(wal, COMMON_SLICE_CHICKEN2, + GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); } static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine, @@ -526,41 +538,41 @@ static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine, gen9_ctx_workarounds_init(engine, wal); /* WaToEnableHwFixForPushConstHWBug:cfl */ - WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, - GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); + wa_masked_en(wal, COMMON_SLICE_CHICKEN2, + GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); /* WaDisableSbeCacheDispatchPortSharing:cfl */ - WA_SET_BIT_MASKED(GEN7_HALF_SLICE_CHICKEN1, - GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); + wa_masked_en(wal, GEN7_HALF_SLICE_CHICKEN1, + GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); } static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) { /* WaForceContextSaveRestoreNonCoherent:cnl */ - WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0, - HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT); + wa_masked_en(wal, CNL_HDC_CHICKEN0, + HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT); /* WaDisableReplayBufferBankArbitrationOptimization:cnl */ - WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, - GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); + wa_masked_en(wal, COMMON_SLICE_CHICKEN2, + GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); /* WaPushConstantDereferenceHoldDisable:cnl */ - WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE); + wa_masked_en(wal, GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE); /* FtrEnableFastAnisoL1BankingFix:cnl */ - WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX); + wa_masked_en(wal, HALF_SLICE_CHICKEN3, CNL_FAST_ANISO_L1_BANKING_FIX); /* WaDisable3DMidCmdPreemption:cnl */ - WA_CLR_BIT_MASKED(GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL); + wa_masked_dis(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL); /* WaDisableGPGPUMidCmdPreemption:cnl */ - WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, + wa_masked_field_set(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK, GEN9_PREEMPT_GPGPU_COMMAND_LEVEL); /* WaDisableEarlyEOT:cnl */ - WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT); + wa_masked_en(wal, GEN8_ROW_CHICKEN, DISABLE_EARLY_EOT); } static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, @@ -578,8 +590,8 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, * Formerly known as WaPushConstantDereferenceHoldDisable */ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0)) - WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, - PUSH_CONSTANT_DEREF_DISABLE); + wa_masked_en(wal, GEN7_ROW_CHICKEN2, + PUSH_CONSTANT_DEREF_DISABLE); /* WaForceEnableNonCoherent:icl * This is not the same workaround as in early Gen9 platforms, where @@ -588,40 +600,40 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, * (the register is whitelisted in hardware now, so UMDs can opt in * for coherency if they have a good reason). */ - WA_SET_BIT_MASKED(ICL_HDC_MODE, HDC_FORCE_NON_COHERENT); + wa_masked_en(wal, ICL_HDC_MODE, HDC_FORCE_NON_COHERENT); /* Wa_2006611047:icl (pre-prod) * Formerly known as WaDisableImprovedTdlClkGating */ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0)) - WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, - GEN11_TDL_CLOCK_GATING_FIX_DISABLE); + wa_masked_en(wal, GEN7_ROW_CHICKEN2, + GEN11_TDL_CLOCK_GATING_FIX_DISABLE); /* Wa_2006665173:icl (pre-prod) */ if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_A0)) - WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3, - GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC); + wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3, + GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC); /* WaEnableFloatBlendOptimization:icl */ - wa_write_masked_or(wal, - GEN10_CACHE_MODE_SS, - 0, /* write-only, so skip validation */ - _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE)); + wa_write_clr_set(wal, + GEN10_CACHE_MODE_SS, + 0, /* write-only, so skip validation */ + _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE)); /* WaDisableGPGPUMidThreadPreemption:icl */ - WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, + wa_masked_field_set(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK, GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL); /* allow headerless messages for preemptible GPGPU context */ - WA_SET_BIT_MASKED(GEN10_SAMPLER_MODE, - GEN11_SAMPLER_ENABLE_HEADLESS_MSG); + wa_masked_en(wal, GEN10_SAMPLER_MODE, + GEN11_SAMPLER_ENABLE_HEADLESS_MSG); /* Wa_1604278689:icl,ehl */ wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID); - wa_write_masked_or(wal, IVB_FBC_RT_BASE_UPPER, - 0, /* write-only register; skip validation */ - 0xFFFFFFFF); + wa_write_clr_set(wal, IVB_FBC_RT_BASE_UPPER, + 0, /* write-only register; skip validation */ + 0xFFFFFFFF); /* Wa_1406306137:icl,ehl */ wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU); @@ -641,11 +653,11 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine, * Wa_14010443199:rkl * Wa_14010698770:rkl */ - WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3, - GEN12_DISABLE_CPS_AWARE_COLOR_PIPE); + wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3, + GEN12_DISABLE_CPS_AWARE_COLOR_PIPE); /* WaDisableGPGPUMidThreadPreemption:gen12 */ - WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1, + wa_masked_field_set(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_GPGPU_LEVEL_MASK, GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL); } @@ -678,12 +690,22 @@ static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine, gen12_ctx_workarounds_init(engine, wal); /* Wa_1409044764 */ - WA_CLR_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3, - DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN); + wa_masked_dis(wal, GEN11_COMMON_SLICE_CHICKEN3, + DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN); /* Wa_22010493298 */ - WA_SET_BIT_MASKED(HIZ_CHICKEN, - DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE); + wa_masked_en(wal, HIZ_CHICKEN, + DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE); + + /* + * Wa_16011163337 + * + * Like in tgl_ctx_workarounds_init(), read verification is ignored due + * to Wa_1608008084. + */ + wa_add(wal, + FF_MODE2, + FF_MODE2_GS_TIMER_MASK, FF_MODE2_GS_TIMER_224, 0); } static void @@ -802,57 +824,11 @@ ilk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) static void snb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) { - /* WaDisableHiZPlanesWhenMSAAEnabled:snb */ - wa_masked_en(wal, - _3D_CHICKEN, - _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB); - - /* WaDisable_RenderCache_OperationalFlush:snb */ - wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE); - - /* - * BSpec recommends 8x4 when MSAA is used, - * however in practice 16x4 seems fastest. - * - * Note that PS/WM thread counts depend on the WIZ hashing - * disable bit, which we don't touch here, but it's good - * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). - */ - wa_add(wal, - GEN6_GT_MODE, 0, - _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4), - GEN6_WIZ_HASHING_16x4); - - wa_masked_dis(wal, CACHE_MODE_0, CM0_STC_EVICT_DISABLE_LRA_SNB); - - wa_masked_en(wal, - _3D_CHICKEN3, - /* WaStripsFansDisableFastClipPerformanceFix:snb */ - _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL | - /* - * Bspec says: - * "This bit must be set if 3DSTATE_CLIP clip mode is set - * to normal and 3DSTATE_SF number of SF output attributes - * is more than 16." - */ - _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH); } static void ivb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) { - /* WaDisableEarlyCull:ivb */ - wa_masked_en(wal, _3D_CHICKEN3, _3D_CHICKEN_SF_DISABLE_OBJEND_CULL); - - /* WaDisablePSDDualDispatchEnable:ivb */ - if (IS_IVB_GT1(i915)) - wa_masked_en(wal, - GEN7_HALF_SLICE_CHICKEN1, - GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE); - - /* WaDisable_RenderCache_OperationalFlush:ivb */ - wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE); - /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */ wa_masked_dis(wal, GEN7_COMMON_SLICE_CHICKEN1, @@ -864,91 +840,15 @@ ivb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) /* WaForceL3Serialization:ivb */ wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE); - - /* - * WaVSThreadDispatchOverride:ivb,vlv - * - * This actually overrides the dispatch - * mode for all thread types. - */ - wa_write_masked_or(wal, GEN7_FF_THREAD_MODE, - GEN7_FF_SCHED_MASK, - GEN7_FF_TS_SCHED_HW | - GEN7_FF_VS_SCHED_HW | - GEN7_FF_DS_SCHED_HW); - - if (0) { /* causes HiZ corruption on ivb:gt1 */ - /* enable HiZ Raw Stall Optimization */ - wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE); - } - - /* WaDisable4x2SubspanOptimization:ivb */ - wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE); - - /* - * BSpec recommends 8x4 when MSAA is used, - * however in practice 16x4 seems fastest. - * - * Note that PS/WM thread counts depend on the WIZ hashing - * disable bit, which we don't touch here, but it's good - * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). - */ - wa_add(wal, GEN7_GT_MODE, 0, - _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4), - GEN6_WIZ_HASHING_16x4); } static void vlv_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) { - /* WaDisableEarlyCull:vlv */ - wa_masked_en(wal, _3D_CHICKEN3, _3D_CHICKEN_SF_DISABLE_OBJEND_CULL); - - /* WaPsdDispatchEnable:vlv */ - /* WaDisablePSDDualDispatchEnable:vlv */ - wa_masked_en(wal, - GEN7_HALF_SLICE_CHICKEN1, - GEN7_MAX_PS_THREAD_DEP | - GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE); - - /* WaDisable_RenderCache_OperationalFlush:vlv */ - wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE); - /* WaForceL3Serialization:vlv */ wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE); /* - * WaVSThreadDispatchOverride:ivb,vlv - * - * This actually overrides the dispatch - * mode for all thread types. - */ - wa_write_masked_or(wal, - GEN7_FF_THREAD_MODE, - GEN7_FF_SCHED_MASK, - GEN7_FF_TS_SCHED_HW | - GEN7_FF_VS_SCHED_HW | - GEN7_FF_DS_SCHED_HW); - - /* - * BSpec says this must be set, even though - * WaDisable4x2SubspanOptimization isn't listed for VLV. - */ - wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE); - - /* - * BSpec recommends 8x4 when MSAA is used, - * however in practice 16x4 seems fastest. - * - * Note that PS/WM thread counts depend on the WIZ hashing - * disable bit, which we don't touch here, but it's good - * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). - */ - wa_add(wal, GEN7_GT_MODE, 0, - _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4), - GEN6_WIZ_HASHING_16x4); - - /* * WaIncreaseL3CreditsForVLVB0:vlv * This is the hardware default actually. */ @@ -968,31 +868,6 @@ hsw_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) /* WaVSRefCountFullforceMissDisable:hsw */ wa_write_clr(wal, GEN7_FF_THREAD_MODE, GEN7_FF_VS_REF_CNT_FFME); - - wa_masked_dis(wal, - CACHE_MODE_0_GEN7, - /* WaDisable_RenderCache_OperationalFlush:hsw */ - RC_OP_FLUSH_ENABLE | - /* enable HiZ Raw Stall Optimization */ - HIZ_RAW_STALL_OPT_DISABLE); - - /* WaDisable4x2SubspanOptimization:hsw */ - wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE); - - /* - * BSpec recommends 8x4 when MSAA is used, - * however in practice 16x4 seems fastest. - * - * Note that PS/WM thread counts depend on the WIZ hashing - * disable bit, which we don't touch here, but it's good - * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). - */ - wa_add(wal, GEN7_GT_MODE, 0, - _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4), - GEN6_WIZ_HASHING_16x4); - - /* WaSampleCChickenBitEnable:hsw */ - wa_masked_en(wal, HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE); } static void @@ -1162,7 +1037,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) drm_dbg(&i915->drm, "MCR slice/subslice = %x\n", mcr); - wa_write_masked_or(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr); + wa_write_clr_set(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr); } static void @@ -1187,10 +1062,10 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); /* WaModifyGamTlbPartitioning:icl */ - wa_write_masked_or(wal, - GEN11_GACB_PERF_CTRL, - GEN11_HASH_CTRL_MASK, - GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4); + wa_write_clr_set(wal, + GEN11_GACB_PERF_CTRL, + GEN11_HASH_CTRL_MASK, + GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4); /* Wa_1405766107:icl * Formerly known as WaCL2SFHalfMaxAlloc @@ -1258,6 +1133,11 @@ tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) wa_write_or(wal, SLICE_UNIT_LEVEL_CLKGATE, L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS); + + /* Wa_1408615072:tgl[a0] */ + if (IS_TGL_UY_GT_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) + wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, + VSUNIT_CLKGATE_DIS_TGL); } static void @@ -1356,9 +1236,9 @@ static bool wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from) { if ((cur ^ wa->set) & wa->read) { - DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x)\n", + DRM_ERROR("%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n", name, from, i915_mmio_reg_offset(wa->reg), - cur, cur & wa->read, wa->set); + cur, cur & wa->read, wa->set & wa->read); return false; } @@ -1423,6 +1303,7 @@ bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from) return wa_list_verify(gt->uncore, >->i915->gt_wa_list, from); } +__maybe_unused static inline bool is_nonpriv_flags_valid(u32 flags) { /* Check only valid flag bits are set */ @@ -1750,10 +1631,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) wa_write_or(wal, GEN7_SARCHKMD, GEN7_DISABLE_SAMPLER_PREFETCH); - - /* Wa_1408615072:tgl */ - wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, - VSUNIT_CLKGATE_DIS_TGL); } if (IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { @@ -1768,6 +1645,19 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) */ wa_write_or(wal, GEN7_FF_THREAD_MODE, GEN12_FF_TESSELATION_DOP_GATE_DISABLE); + + /* + * Wa_1606700617:tgl,dg1 + * Wa_22010271021:tgl,rkl,dg1 + */ + wa_masked_en(wal, + GEN9_CS_DEBUG_MODE1, + FF_DOP_CLOCK_GATE_DISABLE); + + /* Wa_1406941453:tgl,rkl,dg1 */ + wa_masked_en(wal, + GEN10_SAMPLER_MODE, + ENABLE_SMALLPL); } if (IS_DG1_REVID(i915, DG1_REVID_A0, DG1_REVID_A0) || @@ -1796,21 +1686,6 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN6_RC_SLEEP_PSMI_CONTROL, GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE | GEN8_RC_SEMA_IDLE_MSG_DISABLE); - - /* - * Wa_1606700617:tgl - * Wa_22010271021:tgl,rkl - */ - wa_masked_en(wal, - GEN9_CS_DEBUG_MODE1, - FF_DOP_CLOCK_GATE_DISABLE); - } - - if (IS_GEN(i915, 12)) { - /* Wa_1406941453:gen12 */ - wa_masked_en(wal, - GEN10_SAMPLER_MODE, - ENABLE_SMALLPL); } if (IS_GEN(i915, 11)) { @@ -1836,14 +1711,14 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) * Wa_1604223664:icl * Formerly known as WaL3BankAddressHashing */ - wa_write_masked_or(wal, - GEN8_GARBCNTL, - GEN11_HASH_CTRL_EXCL_MASK, - GEN11_HASH_CTRL_EXCL_BIT0); - wa_write_masked_or(wal, - GEN11_GLBLINVL, - GEN11_BANK_HASH_ADDR_EXCL_MASK, - GEN11_BANK_HASH_ADDR_EXCL_BIT0); + wa_write_clr_set(wal, + GEN8_GARBCNTL, + GEN11_HASH_CTRL_EXCL_MASK, + GEN11_HASH_CTRL_EXCL_BIT0); + wa_write_clr_set(wal, + GEN11_GLBLINVL, + GEN11_BANK_HASH_ADDR_EXCL_MASK, + GEN11_BANK_HASH_ADDR_EXCL_BIT0); /* * Wa_1405733216:icl @@ -1872,10 +1747,10 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN7_DISABLE_SAMPLER_PREFETCH); /* Wa_1409178092:icl */ - wa_write_masked_or(wal, - GEN11_SCRATCH2, - GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE, - 0); + wa_write_clr_set(wal, + GEN11_SCRATCH2, + GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE, + 0); /* WaEnable32PlaneMode:icl */ wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS, @@ -1949,11 +1824,11 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */ if (IS_GEN9_LP(i915)) - wa_write_masked_or(wal, - GEN8_L3SQCREG1, - L3_PRIO_CREDITS_MASK, - L3_GENERAL_PRIO_CREDITS(62) | - L3_HIGH_PRIO_CREDITS(2)); + wa_write_clr_set(wal, + GEN8_L3SQCREG1, + L3_PRIO_CREDITS_MASK, + L3_GENERAL_PRIO_CREDITS(62) | + L3_HIGH_PRIO_CREDITS(2)); /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */ wa_write_or(wal, @@ -1961,12 +1836,112 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GEN8_LQSC_FLUSH_COHERENT_LINES); } - if (IS_GEN(i915, 7)) + if (IS_HASWELL(i915)) { + /* WaSampleCChickenBitEnable:hsw */ + wa_masked_en(wal, + HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE); + + wa_masked_dis(wal, + CACHE_MODE_0_GEN7, + /* enable HiZ Raw Stall Optimization */ + HIZ_RAW_STALL_OPT_DISABLE); + + /* WaDisable4x2SubspanOptimization:hsw */ + wa_masked_en(wal, CACHE_MODE_1, PIXEL_SUBSPAN_COLLECT_OPT_DISABLE); + } + + if (IS_VALLEYVIEW(i915)) { + /* WaDisableEarlyCull:vlv */ + wa_masked_en(wal, + _3D_CHICKEN3, + _3D_CHICKEN_SF_DISABLE_OBJEND_CULL); + + /* + * WaVSThreadDispatchOverride:ivb,vlv + * + * This actually overrides the dispatch + * mode for all thread types. + */ + wa_write_clr_set(wal, + GEN7_FF_THREAD_MODE, + GEN7_FF_SCHED_MASK, + GEN7_FF_TS_SCHED_HW | + GEN7_FF_VS_SCHED_HW | + GEN7_FF_DS_SCHED_HW); + + /* WaPsdDispatchEnable:vlv */ + /* WaDisablePSDDualDispatchEnable:vlv */ + wa_masked_en(wal, + GEN7_HALF_SLICE_CHICKEN1, + GEN7_MAX_PS_THREAD_DEP | + GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE); + } + + if (IS_IVYBRIDGE(i915)) { + /* WaDisableEarlyCull:ivb */ + wa_masked_en(wal, + _3D_CHICKEN3, + _3D_CHICKEN_SF_DISABLE_OBJEND_CULL); + + if (0) { /* causes HiZ corruption on ivb:gt1 */ + /* enable HiZ Raw Stall Optimization */ + wa_masked_dis(wal, + CACHE_MODE_0_GEN7, + HIZ_RAW_STALL_OPT_DISABLE); + } + + /* + * WaVSThreadDispatchOverride:ivb,vlv + * + * This actually overrides the dispatch + * mode for all thread types. + */ + wa_write_clr_set(wal, + GEN7_FF_THREAD_MODE, + GEN7_FF_SCHED_MASK, + GEN7_FF_TS_SCHED_HW | + GEN7_FF_VS_SCHED_HW | + GEN7_FF_DS_SCHED_HW); + + /* WaDisablePSDDualDispatchEnable:ivb */ + if (IS_IVB_GT1(i915)) + wa_masked_en(wal, + GEN7_HALF_SLICE_CHICKEN1, + GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE); + } + + if (IS_GEN(i915, 7)) { /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ wa_masked_en(wal, GFX_MODE_GEN7, GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE); + /* WaDisable_RenderCache_OperationalFlush:ivb,vlv,hsw */ + wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE); + + /* + * BSpec says this must be set, even though + * WaDisable4x2SubspanOptimization:ivb,hsw + * WaDisable4x2SubspanOptimization isn't listed for VLV. + */ + wa_masked_en(wal, + CACHE_MODE_1, + PIXEL_SUBSPAN_COLLECT_OPT_DISABLE); + + /* + * BSpec recommends 8x4 when MSAA is used, + * however in practice 16x4 seems fastest. + * + * Note that PS/WM thread counts depend on the WIZ hashing + * disable bit, which we don't touch here, but it's good + * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). + */ + wa_add(wal, GEN7_GT_MODE, 0, + _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, + GEN6_WIZ_HASHING_16x4), + GEN6_WIZ_HASHING_16x4); + } + if (IS_GEN_RANGE(i915, 6, 7)) /* * We need to disable the AsyncFlip performance optimisations in @@ -1989,6 +1964,39 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) GFX_MODE, GFX_TLB_INVALIDATE_EXPLICIT); + /* WaDisableHiZPlanesWhenMSAAEnabled:snb */ + wa_masked_en(wal, + _3D_CHICKEN, + _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB); + + wa_masked_en(wal, + _3D_CHICKEN3, + /* WaStripsFansDisableFastClipPerformanceFix:snb */ + _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL | + /* + * Bspec says: + * "This bit must be set if 3DSTATE_CLIP clip mode is set + * to normal and 3DSTATE_SF number of SF output attributes + * is more than 16." + */ + _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH); + + /* + * BSpec recommends 8x4 when MSAA is used, + * however in practice 16x4 seems fastest. + * + * Note that PS/WM thread counts depend on the WIZ hashing + * disable bit, which we don't touch here, but it's good + * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). + */ + wa_add(wal, + GEN6_GT_MODE, 0, + _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4), + GEN6_WIZ_HASHING_16x4); + + /* WaDisable_RenderCache_OperationalFlush:snb */ + wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE); + /* * From the Sandybridge PRM, volume 1 part 3, page 24: * "If this bit is set, STCunit will have LRA as replacement @@ -2065,39 +2073,6 @@ void intel_engine_apply_workarounds(struct intel_engine_cs *engine) wa_list_apply(engine->uncore, &engine->wa_list); } -static struct i915_vma * -create_scratch(struct i915_address_space *vm, int count) -{ - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - unsigned int size; - int err; - - size = round_up(count * sizeof(u32), PAGE_SIZE); - obj = i915_gem_object_create_internal(vm->i915, size); - if (IS_ERR(obj)) - return ERR_CAST(obj); - - i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); - - vma = i915_vma_instance(obj, vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto err_obj; - } - - err = i915_vma_pin(vma, 0, 0, - i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER); - if (err) - goto err_obj; - - return vma; - -err_obj: - i915_gem_object_put(obj); - return ERR_PTR(err); -} - struct mcr_range { u32 start; u32 end; @@ -2200,7 +2175,8 @@ static int engine_wa_list_verify(struct intel_context *ce, if (!wal->count) return 0; - vma = create_scratch(&ce->engine->gt->ggtt->vm, wal->count); + vma = __vm_create_scratch_for_read(&ce->engine->gt->ggtt->vm, + wal->count * sizeof(u32)); if (IS_ERR(vma)) return PTR_ERR(vma); diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c index 2f830017c51d..4b4f03b70df7 100644 --- a/drivers/gpu/drm/i915/gt/mock_engine.c +++ b/drivers/gpu/drm/i915/gt/mock_engine.c @@ -245,17 +245,6 @@ static void mock_reset_rewind(struct intel_engine_cs *engine, bool stalled) GEM_BUG_ON(stalled); } -static void mark_eio(struct i915_request *rq) -{ - if (i915_request_completed(rq)) - return; - - GEM_BUG_ON(i915_request_signaled(rq)); - - i915_request_set_error_once(rq, -EIO); - i915_request_mark_complete(rq); -} - static void mock_reset_cancel(struct intel_engine_cs *engine) { struct mock_engine *mock = @@ -269,12 +258,12 @@ static void mock_reset_cancel(struct intel_engine_cs *engine) /* Mark all submitted requests as skipped. */ list_for_each_entry(rq, &engine->active.requests, sched.link) - mark_eio(rq); + i915_request_mark_eio(rq); intel_engine_signal_breadcrumbs(engine); /* Cancel and submit all pending requests. */ list_for_each_entry(rq, &mock->hw_queue, mock.link) { - mark_eio(rq); + i915_request_mark_eio(rq); __i915_request_submit(rq); } INIT_LIST_HEAD(&mock->hw_queue); diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c index 1f4020e906a8..db738d400168 100644 --- a/drivers/gpu/drm/i915/gt/selftest_context.c +++ b/drivers/gpu/drm/i915/gt/selftest_context.c @@ -25,7 +25,7 @@ static int request_sync(struct i915_request *rq) /* Opencode i915_request_add() so we can keep the timeline locked. */ __i915_request_commit(rq); rq->sched.attr.priority = I915_PRIORITY_BARRIER; - __i915_request_queue(rq, NULL); + __i915_request_queue_bh(rq); timeout = i915_request_wait(rq, 0, HZ / 10); if (timeout < 0) diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c index 729c3c7b11e2..439c8984f5fa 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_cs.c @@ -6,6 +6,7 @@ #include <linux/sort.h> +#include "intel_gpu_commands.h" #include "intel_gt_pm.h" #include "intel_rps.h" diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c index b88aa35ad75b..223ab88f7e57 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c @@ -197,6 +197,7 @@ static int cmp_u32(const void *_a, const void *_b) static int __live_heartbeat_fast(struct intel_engine_cs *engine) { + const unsigned int error_threshold = max(20000u, jiffies_to_usecs(6)); struct intel_context *ce; struct i915_request *rq; ktime_t t0, t1; @@ -254,12 +255,18 @@ static int __live_heartbeat_fast(struct intel_engine_cs *engine) times[0], times[ARRAY_SIZE(times) - 1]); - /* Min work delay is 2 * 2 (worst), +1 for scheduling, +1 for slack */ - if (times[ARRAY_SIZE(times) / 2] > jiffies_to_usecs(6)) { + /* + * Ideally, the upper bound on min work delay would be something like + * 2 * 2 (worst), +1 for scheduling, +1 for slack. In practice, we + * are, even with system_wq_highpri, at the mercy of the CPU scheduler + * and may be stuck behind some slow work for many millisecond. Such + * as our very own display workers. + */ + if (times[ARRAY_SIZE(times) / 2] > error_threshold) { pr_err("%s: Heartbeat delay was %uus, expected less than %dus\n", engine->name, times[ARRAY_SIZE(times) / 2], - jiffies_to_usecs(6)); + error_threshold); err = -EINVAL; } diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c index b08fc5390e8a..c3d965279fc3 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c @@ -4,13 +4,215 @@ * Copyright © 2018 Intel Corporation */ +#include <linux/sort.h> + #include "i915_selftest.h" +#include "intel_gpu_commands.h" +#include "intel_gt_clock_utils.h" #include "selftest_engine.h" #include "selftest_engine_heartbeat.h" #include "selftests/igt_atomic.h" #include "selftests/igt_flush_test.h" #include "selftests/igt_spinner.h" +#define COUNT 5 + +static int cmp_u64(const void *A, const void *B) +{ + const u64 *a = A, *b = B; + + return *a - *b; +} + +static u64 trifilter(u64 *a) +{ + sort(a, COUNT, sizeof(*a), cmp_u64, NULL); + return (a[1] + 2 * a[2] + a[3]) >> 2; +} + +static u32 *emit_wait(u32 *cs, u32 offset, int op, u32 value) +{ + *cs++ = MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + op; + *cs++ = value; + *cs++ = offset; + *cs++ = 0; + + return cs; +} + +static u32 *emit_store(u32 *cs, u32 offset, u32 value) +{ + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = offset; + *cs++ = 0; + *cs++ = value; + + return cs; +} + +static u32 *emit_srm(u32 *cs, i915_reg_t reg, u32 offset) +{ + *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; + *cs++ = i915_mmio_reg_offset(reg); + *cs++ = offset; + *cs++ = 0; + + return cs; +} + +static void write_semaphore(u32 *x, u32 value) +{ + WRITE_ONCE(*x, value); + wmb(); +} + +static int __measure_timestamps(struct intel_context *ce, + u64 *dt, u64 *d_ring, u64 *d_ctx) +{ + struct intel_engine_cs *engine = ce->engine; + u32 *sema = memset32(engine->status_page.addr + 1000, 0, 5); + u32 offset = i915_ggtt_offset(engine->status_page.vma); + struct i915_request *rq; + u32 *cs; + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + cs = intel_ring_begin(rq, 28); + if (IS_ERR(cs)) { + i915_request_add(rq); + return PTR_ERR(cs); + } + + /* Signal & wait for start */ + cs = emit_store(cs, offset + 4008, 1); + cs = emit_wait(cs, offset + 4008, MI_SEMAPHORE_SAD_NEQ_SDD, 1); + + cs = emit_srm(cs, RING_TIMESTAMP(engine->mmio_base), offset + 4000); + cs = emit_srm(cs, RING_CTX_TIMESTAMP(engine->mmio_base), offset + 4004); + + /* Busy wait */ + cs = emit_wait(cs, offset + 4008, MI_SEMAPHORE_SAD_EQ_SDD, 1); + + cs = emit_srm(cs, RING_TIMESTAMP(engine->mmio_base), offset + 4016); + cs = emit_srm(cs, RING_CTX_TIMESTAMP(engine->mmio_base), offset + 4012); + + intel_ring_advance(rq, cs); + i915_request_get(rq); + i915_request_add(rq); + intel_engine_flush_submission(engine); + + /* Wait for the request to start executing, that then waits for us */ + while (READ_ONCE(sema[2]) == 0) + cpu_relax(); + + /* Run the request for a 100us, sampling timestamps before/after */ + preempt_disable(); + *dt = local_clock(); + write_semaphore(&sema[2], 0); + udelay(100); + *dt = local_clock() - *dt; + write_semaphore(&sema[2], 1); + preempt_enable(); + + if (i915_request_wait(rq, 0, HZ / 2) < 0) { + i915_request_put(rq); + return -ETIME; + } + i915_request_put(rq); + + pr_debug("%s CTX_TIMESTAMP: [%x, %x], RING_TIMESTAMP: [%x, %x]\n", + engine->name, sema[1], sema[3], sema[0], sema[4]); + + *d_ctx = sema[3] - sema[1]; + *d_ring = sema[4] - sema[0]; + return 0; +} + +static int __live_engine_timestamps(struct intel_engine_cs *engine) +{ + u64 s_ring[COUNT], s_ctx[COUNT], st[COUNT], d_ring, d_ctx, dt; + struct intel_context *ce; + int i, err = 0; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + for (i = 0; i < COUNT; i++) { + err = __measure_timestamps(ce, &st[i], &s_ring[i], &s_ctx[i]); + if (err) + break; + } + intel_context_put(ce); + if (err) + return err; + + dt = trifilter(st); + d_ring = trifilter(s_ring); + d_ctx = trifilter(s_ctx); + + pr_info("%s elapsed:%lldns, CTX_TIMESTAMP:%lldns, RING_TIMESTAMP:%lldns\n", + engine->name, dt, + intel_gt_clock_interval_to_ns(engine->gt, d_ctx), + intel_gt_clock_interval_to_ns(engine->gt, d_ring)); + + d_ring = intel_gt_clock_interval_to_ns(engine->gt, d_ring); + if (3 * dt > 4 * d_ring || 4 * dt < 3 * d_ring) { + pr_err("%s Mismatch between ring timestamp and walltime!\n", + engine->name); + return -EINVAL; + } + + d_ring = trifilter(s_ring); + d_ctx = trifilter(s_ctx); + + d_ctx *= engine->gt->clock_frequency; + if (IS_ICELAKE(engine->i915)) + d_ring *= 12500000; /* Fixed 80ns for icl ctx timestamp? */ + else + d_ring *= engine->gt->clock_frequency; + + if (3 * d_ctx > 4 * d_ring || 4 * d_ctx < 3 * d_ring) { + pr_err("%s Mismatch between ring and context timestamps!\n", + engine->name); + return -EINVAL; + } + + return 0; +} + +static int live_engine_timestamps(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + + /* + * Check that CS_TIMESTAMP / CTX_TIMESTAMP are in sync, i.e. share + * the same CS clock. + */ + + if (INTEL_GEN(gt->i915) < 8) + return 0; + + for_each_engine(engine, gt, id) { + int err; + + st_engine_heartbeat_disable(engine); + err = __live_engine_timestamps(engine); + st_engine_heartbeat_enable(engine); + if (err) + return err; + } + + return 0; +} + static int live_engine_busy_stats(void *arg) { struct intel_gt *gt = arg; @@ -177,6 +379,7 @@ static int live_engine_pm(void *arg) int live_engine_pm_selftests(struct intel_gt *gt) { static const struct i915_subtest tests[] = { + SUBTEST(live_engine_timestamps), SUBTEST(live_engine_busy_stats), SUBTEST(live_engine_pm), }; diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c new file mode 100644 index 000000000000..264b5ebdb021 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c @@ -0,0 +1,4741 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#include <linux/prime_numbers.h> + +#include "gem/i915_gem_pm.h" +#include "gt/intel_engine_heartbeat.h" +#include "gt/intel_reset.h" +#include "gt/selftest_engine_heartbeat.h" + +#include "i915_selftest.h" +#include "selftests/i915_random.h" +#include "selftests/igt_flush_test.h" +#include "selftests/igt_live_test.h" +#include "selftests/igt_spinner.h" +#include "selftests/lib_sw_fence.h" + +#include "gem/selftests/igt_gem_utils.h" +#include "gem/selftests/mock_context.h" + +#define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4) +#define NUM_GPR 16 +#define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */ + +static bool is_active(struct i915_request *rq) +{ + if (i915_request_is_active(rq)) + return true; + + if (i915_request_on_hold(rq)) + return true; + + if (i915_request_has_initial_breadcrumb(rq) && i915_request_started(rq)) + return true; + + return false; +} + +static int wait_for_submit(struct intel_engine_cs *engine, + struct i915_request *rq, + unsigned long timeout) +{ + /* Ignore our own attempts to suppress excess tasklets */ + tasklet_hi_schedule(&engine->execlists.tasklet); + + timeout += jiffies; + do { + bool done = time_after(jiffies, timeout); + + if (i915_request_completed(rq)) /* that was quick! */ + return 0; + + /* Wait until the HW has acknowleged the submission (or err) */ + intel_engine_flush_submission(engine); + if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq)) + return 0; + + if (done) + return -ETIME; + + cond_resched(); + } while (1); +} + +static int wait_for_reset(struct intel_engine_cs *engine, + struct i915_request *rq, + unsigned long timeout) +{ + timeout += jiffies; + + do { + cond_resched(); + intel_engine_flush_submission(engine); + + if (READ_ONCE(engine->execlists.pending[0])) + continue; + + if (i915_request_completed(rq)) + break; + + if (READ_ONCE(rq->fence.error)) + break; + } while (time_before(jiffies, timeout)); + + flush_scheduled_work(); + + if (rq->fence.error != -EIO) { + pr_err("%s: hanging request %llx:%lld not reset\n", + engine->name, + rq->fence.context, + rq->fence.seqno); + return -EINVAL; + } + + /* Give the request a jiffie to complete after flushing the worker */ + if (i915_request_wait(rq, 0, + max(0l, (long)(timeout - jiffies)) + 1) < 0) { + pr_err("%s: hanging request %llx:%lld did not complete\n", + engine->name, + rq->fence.context, + rq->fence.seqno); + return -ETIME; + } + + return 0; +} + +static int live_sanitycheck(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + struct igt_spinner spin; + int err = 0; + + if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915)) + return 0; + + if (igt_spinner_init(&spin, gt)) + return -ENOMEM; + + for_each_engine(engine, gt, id) { + struct intel_context *ce; + struct i915_request *rq; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + break; + } + + rq = igt_spinner_create_request(&spin, ce, MI_NOOP); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_ctx; + } + + i915_request_add(rq); + if (!igt_wait_for_spinner(&spin, rq)) { + GEM_TRACE("spinner failed to start\n"); + GEM_TRACE_DUMP(); + intel_gt_set_wedged(gt); + err = -EIO; + goto out_ctx; + } + + igt_spinner_end(&spin); + if (igt_flush_test(gt->i915)) { + err = -EIO; + goto out_ctx; + } + +out_ctx: + intel_context_put(ce); + if (err) + break; + } + + igt_spinner_fini(&spin); + return err; +} + +static int live_unlite_restore(struct intel_gt *gt, int prio) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + struct igt_spinner spin; + int err = -ENOMEM; + + /* + * Check that we can correctly context switch between 2 instances + * on the same engine from the same parent context. + */ + + if (igt_spinner_init(&spin, gt)) + return err; + + err = 0; + for_each_engine(engine, gt, id) { + struct intel_context *ce[2] = {}; + struct i915_request *rq[2]; + struct igt_live_test t; + int n; + + if (prio && !intel_engine_has_preemption(engine)) + continue; + + if (!intel_engine_can_store_dword(engine)) + continue; + + if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { + err = -EIO; + break; + } + st_engine_heartbeat_disable(engine); + + for (n = 0; n < ARRAY_SIZE(ce); n++) { + struct intel_context *tmp; + + tmp = intel_context_create(engine); + if (IS_ERR(tmp)) { + err = PTR_ERR(tmp); + goto err_ce; + } + + err = intel_context_pin(tmp); + if (err) { + intel_context_put(tmp); + goto err_ce; + } + + /* + * Setup the pair of contexts such that if we + * lite-restore using the RING_TAIL from ce[1] it + * will execute garbage from ce[0]->ring. + */ + memset(tmp->ring->vaddr, + POISON_INUSE, /* IPEHR: 0x5a5a5a5a [hung!] */ + tmp->ring->vma->size); + + ce[n] = tmp; + } + GEM_BUG_ON(!ce[1]->ring->size); + intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2); + lrc_update_regs(ce[1], engine, ce[1]->ring->head); + + rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK); + if (IS_ERR(rq[0])) { + err = PTR_ERR(rq[0]); + goto err_ce; + } + + i915_request_get(rq[0]); + i915_request_add(rq[0]); + GEM_BUG_ON(rq[0]->postfix > ce[1]->ring->emit); + + if (!igt_wait_for_spinner(&spin, rq[0])) { + i915_request_put(rq[0]); + goto err_ce; + } + + rq[1] = i915_request_create(ce[1]); + if (IS_ERR(rq[1])) { + err = PTR_ERR(rq[1]); + i915_request_put(rq[0]); + goto err_ce; + } + + if (!prio) { + /* + * Ensure we do the switch to ce[1] on completion. + * + * rq[0] is already submitted, so this should reduce + * to a no-op (a wait on a request on the same engine + * uses the submit fence, not the completion fence), + * but it will install a dependency on rq[1] for rq[0] + * that will prevent the pair being reordered by + * timeslicing. + */ + i915_request_await_dma_fence(rq[1], &rq[0]->fence); + } + + i915_request_get(rq[1]); + i915_request_add(rq[1]); + GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix); + i915_request_put(rq[0]); + + if (prio) { + struct i915_sched_attr attr = { + .priority = prio, + }; + + /* Alternatively preempt the spinner with ce[1] */ + engine->schedule(rq[1], &attr); + } + + /* And switch back to ce[0] for good measure */ + rq[0] = i915_request_create(ce[0]); + if (IS_ERR(rq[0])) { + err = PTR_ERR(rq[0]); + i915_request_put(rq[1]); + goto err_ce; + } + + i915_request_await_dma_fence(rq[0], &rq[1]->fence); + i915_request_get(rq[0]); + i915_request_add(rq[0]); + GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix); + i915_request_put(rq[1]); + i915_request_put(rq[0]); + +err_ce: + intel_engine_flush_submission(engine); + igt_spinner_end(&spin); + for (n = 0; n < ARRAY_SIZE(ce); n++) { + if (IS_ERR_OR_NULL(ce[n])) + break; + + intel_context_unpin(ce[n]); + intel_context_put(ce[n]); + } + + st_engine_heartbeat_enable(engine); + if (igt_live_test_end(&t)) + err = -EIO; + if (err) + break; + } + + igt_spinner_fini(&spin); + return err; +} + +static int live_unlite_switch(void *arg) +{ + return live_unlite_restore(arg, 0); +} + +static int live_unlite_preempt(void *arg) +{ + return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX)); +} + +static int live_unlite_ring(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + struct igt_spinner spin; + enum intel_engine_id id; + int err = 0; + + /* + * Setup a preemption event that will cause almost the entire ring + * to be unwound, potentially fooling our intel_ring_direction() + * into emitting a forward lite-restore instead of the rollback. + */ + + if (igt_spinner_init(&spin, gt)) + return -ENOMEM; + + for_each_engine(engine, gt, id) { + struct intel_context *ce[2] = {}; + struct i915_request *rq; + struct igt_live_test t; + int n; + + if (!intel_engine_has_preemption(engine)) + continue; + + if (!intel_engine_can_store_dword(engine)) + continue; + + if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { + err = -EIO; + break; + } + st_engine_heartbeat_disable(engine); + + for (n = 0; n < ARRAY_SIZE(ce); n++) { + struct intel_context *tmp; + + tmp = intel_context_create(engine); + if (IS_ERR(tmp)) { + err = PTR_ERR(tmp); + goto err_ce; + } + + err = intel_context_pin(tmp); + if (err) { + intel_context_put(tmp); + goto err_ce; + } + + memset32(tmp->ring->vaddr, + 0xdeadbeef, /* trigger a hang if executed */ + tmp->ring->vma->size / sizeof(u32)); + + ce[n] = tmp; + } + + /* Create max prio spinner, followed by N low prio nops */ + rq = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_ce; + } + + i915_request_get(rq); + rq->sched.attr.priority = I915_PRIORITY_BARRIER; + i915_request_add(rq); + + if (!igt_wait_for_spinner(&spin, rq)) { + intel_gt_set_wedged(gt); + i915_request_put(rq); + err = -ETIME; + goto err_ce; + } + + /* Fill the ring, until we will cause a wrap */ + n = 0; + while (intel_ring_direction(ce[0]->ring, + rq->wa_tail, + ce[0]->ring->tail) <= 0) { + struct i915_request *tmp; + + tmp = intel_context_create_request(ce[0]); + if (IS_ERR(tmp)) { + err = PTR_ERR(tmp); + i915_request_put(rq); + goto err_ce; + } + + i915_request_add(tmp); + intel_engine_flush_submission(engine); + n++; + } + intel_engine_flush_submission(engine); + pr_debug("%s: Filled ring with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n", + engine->name, n, + ce[0]->ring->size, + ce[0]->ring->tail, + ce[0]->ring->emit, + rq->tail); + GEM_BUG_ON(intel_ring_direction(ce[0]->ring, + rq->tail, + ce[0]->ring->tail) <= 0); + i915_request_put(rq); + + /* Create a second ring to preempt the first ring after rq[0] */ + rq = intel_context_create_request(ce[1]); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_ce; + } + + rq->sched.attr.priority = I915_PRIORITY_BARRIER; + i915_request_get(rq); + i915_request_add(rq); + + err = wait_for_submit(engine, rq, HZ / 2); + i915_request_put(rq); + if (err) { + pr_err("%s: preemption request was not submitted\n", + engine->name); + err = -ETIME; + } + + pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n", + engine->name, + ce[0]->ring->tail, ce[0]->ring->emit, + ce[1]->ring->tail, ce[1]->ring->emit); + +err_ce: + intel_engine_flush_submission(engine); + igt_spinner_end(&spin); + for (n = 0; n < ARRAY_SIZE(ce); n++) { + if (IS_ERR_OR_NULL(ce[n])) + break; + + intel_context_unpin(ce[n]); + intel_context_put(ce[n]); + } + st_engine_heartbeat_enable(engine); + if (igt_live_test_end(&t)) + err = -EIO; + if (err) + break; + } + + igt_spinner_fini(&spin); + return err; +} + +static int live_pin_rewind(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + /* + * We have to be careful not to trust intel_ring too much, for example + * ring->head is updated upon retire which is out of sync with pinning + * the context. Thus we cannot use ring->head to set CTX_RING_HEAD, + * or else we risk writing an older, stale value. + * + * To simulate this, let's apply a bit of deliberate sabotague. + */ + + for_each_engine(engine, gt, id) { + struct intel_context *ce; + struct i915_request *rq; + struct intel_ring *ring; + struct igt_live_test t; + + if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { + err = -EIO; + break; + } + + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + break; + } + + err = intel_context_pin(ce); + if (err) { + intel_context_put(ce); + break; + } + + /* Keep the context awake while we play games */ + err = i915_active_acquire(&ce->active); + if (err) { + intel_context_unpin(ce); + intel_context_put(ce); + break; + } + ring = ce->ring; + + /* Poison the ring, and offset the next request from HEAD */ + memset32(ring->vaddr, STACK_MAGIC, ring->size / sizeof(u32)); + ring->emit = ring->size / 2; + ring->tail = ring->emit; + GEM_BUG_ON(ring->head); + + intel_context_unpin(ce); + + /* Submit a simple nop request */ + GEM_BUG_ON(intel_context_is_pinned(ce)); + rq = intel_context_create_request(ce); + i915_active_release(&ce->active); /* e.g. async retire */ + intel_context_put(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } + GEM_BUG_ON(!rq->head); + i915_request_add(rq); + + /* Expect not to hang! */ + if (igt_live_test_end(&t)) { + err = -EIO; + break; + } + } + + return err; +} + +static int live_hold_reset(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + struct igt_spinner spin; + int err = 0; + + /* + * In order to support offline error capture for fast preempt reset, + * we need to decouple the guilty request and ensure that it and its + * descendents are not executed while the capture is in progress. + */ + + if (!intel_has_reset_engine(gt)) + return 0; + + if (igt_spinner_init(&spin, gt)) + return -ENOMEM; + + for_each_engine(engine, gt, id) { + struct intel_context *ce; + struct i915_request *rq; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + break; + } + + st_engine_heartbeat_disable(engine); + + rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out; + } + i915_request_add(rq); + + if (!igt_wait_for_spinner(&spin, rq)) { + intel_gt_set_wedged(gt); + err = -ETIME; + goto out; + } + + /* We have our request executing, now remove it and reset */ + + local_bh_disable(); + if (test_and_set_bit(I915_RESET_ENGINE + id, + >->reset.flags)) { + local_bh_enable(); + intel_gt_set_wedged(gt); + err = -EBUSY; + goto out; + } + tasklet_disable(&engine->execlists.tasklet); + + engine->execlists.tasklet.func(engine->execlists.tasklet.data); + GEM_BUG_ON(execlists_active(&engine->execlists) != rq); + + i915_request_get(rq); + execlists_hold(engine, rq); + GEM_BUG_ON(!i915_request_on_hold(rq)); + + __intel_engine_reset_bh(engine, NULL); + GEM_BUG_ON(rq->fence.error != -EIO); + + tasklet_enable(&engine->execlists.tasklet); + clear_and_wake_up_bit(I915_RESET_ENGINE + id, + >->reset.flags); + local_bh_enable(); + + /* Check that we do not resubmit the held request */ + if (!i915_request_wait(rq, 0, HZ / 5)) { + pr_err("%s: on hold request completed!\n", + engine->name); + i915_request_put(rq); + err = -EIO; + goto out; + } + GEM_BUG_ON(!i915_request_on_hold(rq)); + + /* But is resubmitted on release */ + execlists_unhold(engine, rq); + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + pr_err("%s: held request did not complete!\n", + engine->name); + intel_gt_set_wedged(gt); + err = -ETIME; + } + i915_request_put(rq); + +out: + st_engine_heartbeat_enable(engine); + intel_context_put(ce); + if (err) + break; + } + + igt_spinner_fini(&spin); + return err; +} + +static const char *error_repr(int err) +{ + return err ? "bad" : "good"; +} + +static int live_error_interrupt(void *arg) +{ + static const struct error_phase { + enum { GOOD = 0, BAD = -EIO } error[2]; + } phases[] = { + { { BAD, GOOD } }, + { { BAD, BAD } }, + { { BAD, GOOD } }, + { { GOOD, GOOD } }, /* sentinel */ + }; + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + + /* + * We hook up the CS_MASTER_ERROR_INTERRUPT to have forewarning + * of invalid commands in user batches that will cause a GPU hang. + * This is a faster mechanism than using hangcheck/heartbeats, but + * only detects problems the HW knows about -- it will not warn when + * we kill the HW! + * + * To verify our detection and reset, we throw some invalid commands + * at the HW and wait for the interrupt. + */ + + if (!intel_has_reset_engine(gt)) + return 0; + + for_each_engine(engine, gt, id) { + const struct error_phase *p; + int err = 0; + + st_engine_heartbeat_disable(engine); + + for (p = phases; p->error[0] != GOOD; p++) { + struct i915_request *client[ARRAY_SIZE(phases->error)]; + u32 *cs; + int i; + + memset(client, 0, sizeof(*client)); + for (i = 0; i < ARRAY_SIZE(client); i++) { + struct intel_context *ce; + struct i915_request *rq; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + goto out; + } + + rq = intel_context_create_request(ce); + intel_context_put(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out; + } + + if (rq->engine->emit_init_breadcrumb) { + err = rq->engine->emit_init_breadcrumb(rq); + if (err) { + i915_request_add(rq); + goto out; + } + } + + cs = intel_ring_begin(rq, 2); + if (IS_ERR(cs)) { + i915_request_add(rq); + err = PTR_ERR(cs); + goto out; + } + + if (p->error[i]) { + *cs++ = 0xdeadbeef; + *cs++ = 0xdeadbeef; + } else { + *cs++ = MI_NOOP; + *cs++ = MI_NOOP; + } + + client[i] = i915_request_get(rq); + i915_request_add(rq); + } + + err = wait_for_submit(engine, client[0], HZ / 2); + if (err) { + pr_err("%s: first request did not start within time!\n", + engine->name); + err = -ETIME; + goto out; + } + + for (i = 0; i < ARRAY_SIZE(client); i++) { + if (i915_request_wait(client[i], 0, HZ / 5) < 0) + pr_debug("%s: %s request incomplete!\n", + engine->name, + error_repr(p->error[i])); + + if (!i915_request_started(client[i])) { + pr_err("%s: %s request not started!\n", + engine->name, + error_repr(p->error[i])); + err = -ETIME; + goto out; + } + + /* Kick the tasklet to process the error */ + intel_engine_flush_submission(engine); + if (client[i]->fence.error != p->error[i]) { + pr_err("%s: %s request (%s) with wrong error code: %d\n", + engine->name, + error_repr(p->error[i]), + i915_request_completed(client[i]) ? "completed" : "running", + client[i]->fence.error); + err = -EINVAL; + goto out; + } + } + +out: + for (i = 0; i < ARRAY_SIZE(client); i++) + if (client[i]) + i915_request_put(client[i]); + if (err) { + pr_err("%s: failed at phase[%zd] { %d, %d }\n", + engine->name, p - phases, + p->error[0], p->error[1]); + break; + } + } + + st_engine_heartbeat_enable(engine); + if (err) { + intel_gt_set_wedged(gt); + return err; + } + } + + return 0; +} + +static int +emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx) +{ + u32 *cs; + + cs = intel_ring_begin(rq, 10); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + + *cs++ = MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_NEQ_SDD; + *cs++ = 0; + *cs++ = i915_ggtt_offset(vma) + 4 * idx; + *cs++ = 0; + + if (idx > 0) { + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1); + *cs++ = 0; + *cs++ = 1; + } else { + *cs++ = MI_NOOP; + *cs++ = MI_NOOP; + *cs++ = MI_NOOP; + *cs++ = MI_NOOP; + } + + *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; + + intel_ring_advance(rq, cs); + return 0; +} + +static struct i915_request * +semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx) +{ + struct intel_context *ce; + struct i915_request *rq; + int err; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) + return ERR_CAST(ce); + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) + goto out_ce; + + err = 0; + if (rq->engine->emit_init_breadcrumb) + err = rq->engine->emit_init_breadcrumb(rq); + if (err == 0) + err = emit_semaphore_chain(rq, vma, idx); + if (err == 0) + i915_request_get(rq); + i915_request_add(rq); + if (err) + rq = ERR_PTR(err); + +out_ce: + intel_context_put(ce); + return rq; +} + +static int +release_queue(struct intel_engine_cs *engine, + struct i915_vma *vma, + int idx, int prio) +{ + struct i915_sched_attr attr = { + .priority = prio, + }; + struct i915_request *rq; + u32 *cs; + + rq = intel_engine_create_kernel_request(engine); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) { + i915_request_add(rq); + return PTR_ERR(cs); + } + + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1); + *cs++ = 0; + *cs++ = 1; + + intel_ring_advance(rq, cs); + + i915_request_get(rq); + i915_request_add(rq); + + local_bh_disable(); + engine->schedule(rq, &attr); + local_bh_enable(); /* kick tasklet */ + + i915_request_put(rq); + + return 0; +} + +static int +slice_semaphore_queue(struct intel_engine_cs *outer, + struct i915_vma *vma, + int count) +{ + struct intel_engine_cs *engine; + struct i915_request *head; + enum intel_engine_id id; + int err, i, n = 0; + + head = semaphore_queue(outer, vma, n++); + if (IS_ERR(head)) + return PTR_ERR(head); + + for_each_engine(engine, outer->gt, id) { + if (!intel_engine_has_preemption(engine)) + continue; + + for (i = 0; i < count; i++) { + struct i915_request *rq; + + rq = semaphore_queue(engine, vma, n++); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out; + } + + i915_request_put(rq); + } + } + + err = release_queue(outer, vma, n, I915_PRIORITY_BARRIER); + if (err) + goto out; + + if (i915_request_wait(head, 0, + 2 * outer->gt->info.num_engines * (count + 2) * (count + 3)) < 0) { + pr_err("%s: Failed to slice along semaphore chain of length (%d, %d)!\n", + outer->name, count, n); + GEM_TRACE_DUMP(); + intel_gt_set_wedged(outer->gt); + err = -EIO; + } + +out: + i915_request_put(head); + return err; +} + +static int live_timeslice_preempt(void *arg) +{ + struct intel_gt *gt = arg; + struct drm_i915_gem_object *obj; + struct intel_engine_cs *engine; + enum intel_engine_id id; + struct i915_vma *vma; + void *vaddr; + int err = 0; + + /* + * If a request takes too long, we would like to give other users + * a fair go on the GPU. In particular, users may create batches + * that wait upon external input, where that input may even be + * supplied by another GPU job. To avoid blocking forever, we + * need to preempt the current task and replace it with another + * ready task. + */ + if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) + return 0; + + obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + vma = i915_vma_instance(obj, >->ggtt->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_obj; + } + + vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(vaddr)) { + err = PTR_ERR(vaddr); + goto err_obj; + } + + err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); + if (err) + goto err_map; + + err = i915_vma_sync(vma); + if (err) + goto err_pin; + + for_each_engine(engine, gt, id) { + if (!intel_engine_has_preemption(engine)) + continue; + + memset(vaddr, 0, PAGE_SIZE); + + st_engine_heartbeat_disable(engine); + err = slice_semaphore_queue(engine, vma, 5); + st_engine_heartbeat_enable(engine); + if (err) + goto err_pin; + + if (igt_flush_test(gt->i915)) { + err = -EIO; + goto err_pin; + } + } + +err_pin: + i915_vma_unpin(vma); +err_map: + i915_gem_object_unpin_map(obj); +err_obj: + i915_gem_object_put(obj); + return err; +} + +static struct i915_request * +create_rewinder(struct intel_context *ce, + struct i915_request *wait, + void *slot, int idx) +{ + const u32 offset = + i915_ggtt_offset(ce->engine->status_page.vma) + + offset_in_page(slot); + struct i915_request *rq; + u32 *cs; + int err; + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) + return rq; + + if (wait) { + err = i915_request_await_dma_fence(rq, &wait->fence); + if (err) + goto err; + } + + cs = intel_ring_begin(rq, 14); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + goto err; + } + + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + *cs++ = MI_NOOP; + + *cs++ = MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_GTE_SDD; + *cs++ = idx; + *cs++ = offset; + *cs++ = 0; + + *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; + *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base)); + *cs++ = offset + idx * sizeof(u32); + *cs++ = 0; + + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = offset; + *cs++ = 0; + *cs++ = idx + 1; + + intel_ring_advance(rq, cs); + + rq->sched.attr.priority = I915_PRIORITY_MASK; + err = 0; +err: + i915_request_get(rq); + i915_request_add(rq); + if (err) { + i915_request_put(rq); + return ERR_PTR(err); + } + + return rq; +} + +static int live_timeslice_rewind(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + + /* + * The usual presumption on timeslice expiration is that we replace + * the active context with another. However, given a chain of + * dependencies we may end up with replacing the context with itself, + * but only a few of those requests, forcing us to rewind the + * RING_TAIL of the original request. + */ + if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) + return 0; + + for_each_engine(engine, gt, id) { + enum { A1, A2, B1 }; + enum { X = 1, Z, Y }; + struct i915_request *rq[3] = {}; + struct intel_context *ce; + unsigned long timeslice; + int i, err = 0; + u32 *slot; + + if (!intel_engine_has_timeslices(engine)) + continue; + + /* + * A:rq1 -- semaphore wait, timestamp X + * A:rq2 -- write timestamp Y + * + * B:rq1 [await A:rq1] -- write timestamp Z + * + * Force timeslice, release semaphore. + * + * Expect execution/evaluation order XZY + */ + + st_engine_heartbeat_disable(engine); + timeslice = xchg(&engine->props.timeslice_duration_ms, 1); + + slot = memset32(engine->status_page.addr + 1000, 0, 4); + + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + goto err; + } + + rq[A1] = create_rewinder(ce, NULL, slot, X); + if (IS_ERR(rq[A1])) { + intel_context_put(ce); + goto err; + } + + rq[A2] = create_rewinder(ce, NULL, slot, Y); + intel_context_put(ce); + if (IS_ERR(rq[A2])) + goto err; + + err = wait_for_submit(engine, rq[A2], HZ / 2); + if (err) { + pr_err("%s: failed to submit first context\n", + engine->name); + goto err; + } + + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + goto err; + } + + rq[B1] = create_rewinder(ce, rq[A1], slot, Z); + intel_context_put(ce); + if (IS_ERR(rq[2])) + goto err; + + err = wait_for_submit(engine, rq[B1], HZ / 2); + if (err) { + pr_err("%s: failed to submit second context\n", + engine->name); + goto err; + } + + /* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */ + ENGINE_TRACE(engine, "forcing tasklet for rewind\n"); + while (i915_request_is_active(rq[A2])) { /* semaphore yield! */ + /* Wait for the timeslice to kick in */ + del_timer(&engine->execlists.timer); + tasklet_hi_schedule(&engine->execlists.tasklet); + intel_engine_flush_submission(engine); + } + /* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */ + GEM_BUG_ON(!i915_request_is_active(rq[A1])); + GEM_BUG_ON(!i915_request_is_active(rq[B1])); + GEM_BUG_ON(i915_request_is_active(rq[A2])); + + /* Release the hounds! */ + slot[0] = 1; + wmb(); /* "pairs" with GPU; paranoid kick of internal CPU$ */ + + for (i = 1; i <= 3; i++) { + unsigned long timeout = jiffies + HZ / 2; + + while (!READ_ONCE(slot[i]) && + time_before(jiffies, timeout)) + ; + + if (!time_before(jiffies, timeout)) { + pr_err("%s: rq[%d] timed out\n", + engine->name, i - 1); + err = -ETIME; + goto err; + } + + pr_debug("%s: slot[%d]:%x\n", engine->name, i, slot[i]); + } + + /* XZY: XZ < XY */ + if (slot[Z] - slot[X] >= slot[Y] - slot[X]) { + pr_err("%s: timeslicing did not run context B [%u] before A [%u]!\n", + engine->name, + slot[Z] - slot[X], + slot[Y] - slot[X]); + err = -EINVAL; + } + +err: + memset32(&slot[0], -1, 4); + wmb(); + + engine->props.timeslice_duration_ms = timeslice; + st_engine_heartbeat_enable(engine); + for (i = 0; i < 3; i++) + i915_request_put(rq[i]); + if (igt_flush_test(gt->i915)) + err = -EIO; + if (err) + return err; + } + + return 0; +} + +static struct i915_request *nop_request(struct intel_engine_cs *engine) +{ + struct i915_request *rq; + + rq = intel_engine_create_kernel_request(engine); + if (IS_ERR(rq)) + return rq; + + i915_request_get(rq); + i915_request_add(rq); + + return rq; +} + +static long slice_timeout(struct intel_engine_cs *engine) +{ + long timeout; + + /* Enough time for a timeslice to kick in, and kick out */ + timeout = 2 * msecs_to_jiffies_timeout(timeslice(engine)); + + /* Enough time for the nop request to complete */ + timeout += HZ / 5; + + return timeout + 1; +} + +static int live_timeslice_queue(void *arg) +{ + struct intel_gt *gt = arg; + struct drm_i915_gem_object *obj; + struct intel_engine_cs *engine; + enum intel_engine_id id; + struct i915_vma *vma; + void *vaddr; + int err = 0; + + /* + * Make sure that even if ELSP[0] and ELSP[1] are filled with + * timeslicing between them disabled, we *do* enable timeslicing + * if the queue demands it. (Normally, we do not submit if + * ELSP[1] is already occupied, so must rely on timeslicing to + * eject ELSP[0] in favour of the queue.) + */ + if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) + return 0; + + obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + vma = i915_vma_instance(obj, >->ggtt->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_obj; + } + + vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(vaddr)) { + err = PTR_ERR(vaddr); + goto err_obj; + } + + err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); + if (err) + goto err_map; + + err = i915_vma_sync(vma); + if (err) + goto err_pin; + + for_each_engine(engine, gt, id) { + struct i915_sched_attr attr = { + .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX), + }; + struct i915_request *rq, *nop; + + if (!intel_engine_has_preemption(engine)) + continue; + + st_engine_heartbeat_disable(engine); + memset(vaddr, 0, PAGE_SIZE); + + /* ELSP[0]: semaphore wait */ + rq = semaphore_queue(engine, vma, 0); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_heartbeat; + } + engine->schedule(rq, &attr); + err = wait_for_submit(engine, rq, HZ / 2); + if (err) { + pr_err("%s: Timed out trying to submit semaphores\n", + engine->name); + goto err_rq; + } + + /* ELSP[1]: nop request */ + nop = nop_request(engine); + if (IS_ERR(nop)) { + err = PTR_ERR(nop); + goto err_rq; + } + err = wait_for_submit(engine, nop, HZ / 2); + i915_request_put(nop); + if (err) { + pr_err("%s: Timed out trying to submit nop\n", + engine->name); + goto err_rq; + } + + GEM_BUG_ON(i915_request_completed(rq)); + GEM_BUG_ON(execlists_active(&engine->execlists) != rq); + + /* Queue: semaphore signal, matching priority as semaphore */ + err = release_queue(engine, vma, 1, effective_prio(rq)); + if (err) + goto err_rq; + + /* Wait until we ack the release_queue and start timeslicing */ + do { + cond_resched(); + intel_engine_flush_submission(engine); + } while (READ_ONCE(engine->execlists.pending[0])); + + /* Timeslice every jiffy, so within 2 we should signal */ + if (i915_request_wait(rq, 0, slice_timeout(engine)) < 0) { + struct drm_printer p = + drm_info_printer(gt->i915->drm.dev); + + pr_err("%s: Failed to timeslice into queue\n", + engine->name); + intel_engine_dump(engine, &p, + "%s\n", engine->name); + + memset(vaddr, 0xff, PAGE_SIZE); + err = -EIO; + } +err_rq: + i915_request_put(rq); +err_heartbeat: + st_engine_heartbeat_enable(engine); + if (err) + break; + } + +err_pin: + i915_vma_unpin(vma); +err_map: + i915_gem_object_unpin_map(obj); +err_obj: + i915_gem_object_put(obj); + return err; +} + +static int live_timeslice_nopreempt(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + struct igt_spinner spin; + int err = 0; + + /* + * We should not timeslice into a request that is marked with + * I915_REQUEST_NOPREEMPT. + */ + if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) + return 0; + + if (igt_spinner_init(&spin, gt)) + return -ENOMEM; + + for_each_engine(engine, gt, id) { + struct intel_context *ce; + struct i915_request *rq; + unsigned long timeslice; + + if (!intel_engine_has_preemption(engine)) + continue; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + break; + } + + st_engine_heartbeat_disable(engine); + timeslice = xchg(&engine->props.timeslice_duration_ms, 1); + + /* Create an unpreemptible spinner */ + + rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); + intel_context_put(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_heartbeat; + } + + i915_request_get(rq); + i915_request_add(rq); + + if (!igt_wait_for_spinner(&spin, rq)) { + i915_request_put(rq); + err = -ETIME; + goto out_spin; + } + + set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags); + i915_request_put(rq); + + /* Followed by a maximum priority barrier (heartbeat) */ + + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + goto out_spin; + } + + rq = intel_context_create_request(ce); + intel_context_put(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_spin; + } + + rq->sched.attr.priority = I915_PRIORITY_BARRIER; + i915_request_get(rq); + i915_request_add(rq); + + /* + * Wait until the barrier is in ELSP, and we know timeslicing + * will have been activated. + */ + if (wait_for_submit(engine, rq, HZ / 2)) { + i915_request_put(rq); + err = -ETIME; + goto out_spin; + } + + /* + * Since the ELSP[0] request is unpreemptible, it should not + * allow the maximum priority barrier through. Wait long + * enough to see if it is timesliced in by mistake. + */ + if (i915_request_wait(rq, 0, slice_timeout(engine)) >= 0) { + pr_err("%s: I915_PRIORITY_BARRIER request completed, bypassing no-preempt request\n", + engine->name); + err = -EINVAL; + } + i915_request_put(rq); + +out_spin: + igt_spinner_end(&spin); +out_heartbeat: + xchg(&engine->props.timeslice_duration_ms, timeslice); + st_engine_heartbeat_enable(engine); + if (err) + break; + + if (igt_flush_test(gt->i915)) { + err = -EIO; + break; + } + } + + igt_spinner_fini(&spin); + return err; +} + +static int live_busywait_preempt(void *arg) +{ + struct intel_gt *gt = arg; + struct i915_gem_context *ctx_hi, *ctx_lo; + struct intel_engine_cs *engine; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + enum intel_engine_id id; + int err = -ENOMEM; + u32 *map; + + /* + * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can + * preempt the busywaits used to synchronise between rings. + */ + + ctx_hi = kernel_context(gt->i915); + if (!ctx_hi) + return -ENOMEM; + ctx_hi->sched.priority = + I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY); + + ctx_lo = kernel_context(gt->i915); + if (!ctx_lo) + goto err_ctx_hi; + ctx_lo->sched.priority = + I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY); + + obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto err_ctx_lo; + } + + map = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(map)) { + err = PTR_ERR(map); + goto err_obj; + } + + vma = i915_vma_instance(obj, >->ggtt->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_map; + } + + err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); + if (err) + goto err_map; + + err = i915_vma_sync(vma); + if (err) + goto err_vma; + + for_each_engine(engine, gt, id) { + struct i915_request *lo, *hi; + struct igt_live_test t; + u32 *cs; + + if (!intel_engine_has_preemption(engine)) + continue; + + if (!intel_engine_can_store_dword(engine)) + continue; + + if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { + err = -EIO; + goto err_vma; + } + + /* + * We create two requests. The low priority request + * busywaits on a semaphore (inside the ringbuffer where + * is should be preemptible) and the high priority requests + * uses a MI_STORE_DWORD_IMM to update the semaphore value + * allowing the first request to complete. If preemption + * fails, we hang instead. + */ + + lo = igt_request_alloc(ctx_lo, engine); + if (IS_ERR(lo)) { + err = PTR_ERR(lo); + goto err_vma; + } + + cs = intel_ring_begin(lo, 8); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + i915_request_add(lo); + goto err_vma; + } + + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = i915_ggtt_offset(vma); + *cs++ = 0; + *cs++ = 1; + + /* XXX Do we need a flush + invalidate here? */ + + *cs++ = MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_EQ_SDD; + *cs++ = 0; + *cs++ = i915_ggtt_offset(vma); + *cs++ = 0; + + intel_ring_advance(lo, cs); + + i915_request_get(lo); + i915_request_add(lo); + + if (wait_for(READ_ONCE(*map), 10)) { + i915_request_put(lo); + err = -ETIMEDOUT; + goto err_vma; + } + + /* Low priority request should be busywaiting now */ + if (i915_request_wait(lo, 0, 1) != -ETIME) { + i915_request_put(lo); + pr_err("%s: Busywaiting request did not!\n", + engine->name); + err = -EIO; + goto err_vma; + } + + hi = igt_request_alloc(ctx_hi, engine); + if (IS_ERR(hi)) { + err = PTR_ERR(hi); + i915_request_put(lo); + goto err_vma; + } + + cs = intel_ring_begin(hi, 4); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + i915_request_add(hi); + i915_request_put(lo); + goto err_vma; + } + + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = i915_ggtt_offset(vma); + *cs++ = 0; + *cs++ = 0; + + intel_ring_advance(hi, cs); + i915_request_add(hi); + + if (i915_request_wait(lo, 0, HZ / 5) < 0) { + struct drm_printer p = drm_info_printer(gt->i915->drm.dev); + + pr_err("%s: Failed to preempt semaphore busywait!\n", + engine->name); + + intel_engine_dump(engine, &p, "%s\n", engine->name); + GEM_TRACE_DUMP(); + + i915_request_put(lo); + intel_gt_set_wedged(gt); + err = -EIO; + goto err_vma; + } + GEM_BUG_ON(READ_ONCE(*map)); + i915_request_put(lo); + + if (igt_live_test_end(&t)) { + err = -EIO; + goto err_vma; + } + } + + err = 0; +err_vma: + i915_vma_unpin(vma); +err_map: + i915_gem_object_unpin_map(obj); +err_obj: + i915_gem_object_put(obj); +err_ctx_lo: + kernel_context_close(ctx_lo); +err_ctx_hi: + kernel_context_close(ctx_hi); + return err; +} + +static struct i915_request * +spinner_create_request(struct igt_spinner *spin, + struct i915_gem_context *ctx, + struct intel_engine_cs *engine, + u32 arb) +{ + struct intel_context *ce; + struct i915_request *rq; + + ce = i915_gem_context_get_engine(ctx, engine->legacy_idx); + if (IS_ERR(ce)) + return ERR_CAST(ce); + + rq = igt_spinner_create_request(spin, ce, arb); + intel_context_put(ce); + return rq; +} + +static int live_preempt(void *arg) +{ + struct intel_gt *gt = arg; + struct i915_gem_context *ctx_hi, *ctx_lo; + struct igt_spinner spin_hi, spin_lo; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = -ENOMEM; + + if (igt_spinner_init(&spin_hi, gt)) + return -ENOMEM; + + if (igt_spinner_init(&spin_lo, gt)) + goto err_spin_hi; + + ctx_hi = kernel_context(gt->i915); + if (!ctx_hi) + goto err_spin_lo; + ctx_hi->sched.priority = + I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY); + + ctx_lo = kernel_context(gt->i915); + if (!ctx_lo) + goto err_ctx_hi; + ctx_lo->sched.priority = + I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY); + + for_each_engine(engine, gt, id) { + struct igt_live_test t; + struct i915_request *rq; + + if (!intel_engine_has_preemption(engine)) + continue; + + if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { + err = -EIO; + goto err_ctx_lo; + } + + rq = spinner_create_request(&spin_lo, ctx_lo, engine, + MI_ARB_CHECK); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_ctx_lo; + } + + i915_request_add(rq); + if (!igt_wait_for_spinner(&spin_lo, rq)) { + GEM_TRACE("lo spinner failed to start\n"); + GEM_TRACE_DUMP(); + intel_gt_set_wedged(gt); + err = -EIO; + goto err_ctx_lo; + } + + rq = spinner_create_request(&spin_hi, ctx_hi, engine, + MI_ARB_CHECK); + if (IS_ERR(rq)) { + igt_spinner_end(&spin_lo); + err = PTR_ERR(rq); + goto err_ctx_lo; + } + + i915_request_add(rq); + if (!igt_wait_for_spinner(&spin_hi, rq)) { + GEM_TRACE("hi spinner failed to start\n"); + GEM_TRACE_DUMP(); + intel_gt_set_wedged(gt); + err = -EIO; + goto err_ctx_lo; + } + + igt_spinner_end(&spin_hi); + igt_spinner_end(&spin_lo); + + if (igt_live_test_end(&t)) { + err = -EIO; + goto err_ctx_lo; + } + } + + err = 0; +err_ctx_lo: + kernel_context_close(ctx_lo); +err_ctx_hi: + kernel_context_close(ctx_hi); +err_spin_lo: + igt_spinner_fini(&spin_lo); +err_spin_hi: + igt_spinner_fini(&spin_hi); + return err; +} + +static int live_late_preempt(void *arg) +{ + struct intel_gt *gt = arg; + struct i915_gem_context *ctx_hi, *ctx_lo; + struct igt_spinner spin_hi, spin_lo; + struct intel_engine_cs *engine; + struct i915_sched_attr attr = {}; + enum intel_engine_id id; + int err = -ENOMEM; + + if (igt_spinner_init(&spin_hi, gt)) + return -ENOMEM; + + if (igt_spinner_init(&spin_lo, gt)) + goto err_spin_hi; + + ctx_hi = kernel_context(gt->i915); + if (!ctx_hi) + goto err_spin_lo; + + ctx_lo = kernel_context(gt->i915); + if (!ctx_lo) + goto err_ctx_hi; + + /* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */ + ctx_lo->sched.priority = I915_USER_PRIORITY(1); + + for_each_engine(engine, gt, id) { + struct igt_live_test t; + struct i915_request *rq; + + if (!intel_engine_has_preemption(engine)) + continue; + + if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { + err = -EIO; + goto err_ctx_lo; + } + + rq = spinner_create_request(&spin_lo, ctx_lo, engine, + MI_ARB_CHECK); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_ctx_lo; + } + + i915_request_add(rq); + if (!igt_wait_for_spinner(&spin_lo, rq)) { + pr_err("First context failed to start\n"); + goto err_wedged; + } + + rq = spinner_create_request(&spin_hi, ctx_hi, engine, + MI_NOOP); + if (IS_ERR(rq)) { + igt_spinner_end(&spin_lo); + err = PTR_ERR(rq); + goto err_ctx_lo; + } + + i915_request_add(rq); + if (igt_wait_for_spinner(&spin_hi, rq)) { + pr_err("Second context overtook first?\n"); + goto err_wedged; + } + + attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX); + engine->schedule(rq, &attr); + + if (!igt_wait_for_spinner(&spin_hi, rq)) { + pr_err("High priority context failed to preempt the low priority context\n"); + GEM_TRACE_DUMP(); + goto err_wedged; + } + + igt_spinner_end(&spin_hi); + igt_spinner_end(&spin_lo); + + if (igt_live_test_end(&t)) { + err = -EIO; + goto err_ctx_lo; + } + } + + err = 0; +err_ctx_lo: + kernel_context_close(ctx_lo); +err_ctx_hi: + kernel_context_close(ctx_hi); +err_spin_lo: + igt_spinner_fini(&spin_lo); +err_spin_hi: + igt_spinner_fini(&spin_hi); + return err; + +err_wedged: + igt_spinner_end(&spin_hi); + igt_spinner_end(&spin_lo); + intel_gt_set_wedged(gt); + err = -EIO; + goto err_ctx_lo; +} + +struct preempt_client { + struct igt_spinner spin; + struct i915_gem_context *ctx; +}; + +static int preempt_client_init(struct intel_gt *gt, struct preempt_client *c) +{ + c->ctx = kernel_context(gt->i915); + if (!c->ctx) + return -ENOMEM; + + if (igt_spinner_init(&c->spin, gt)) + goto err_ctx; + + return 0; + +err_ctx: + kernel_context_close(c->ctx); + return -ENOMEM; +} + +static void preempt_client_fini(struct preempt_client *c) +{ + igt_spinner_fini(&c->spin); + kernel_context_close(c->ctx); +} + +static int live_nopreempt(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + struct preempt_client a, b; + enum intel_engine_id id; + int err = -ENOMEM; + + /* + * Verify that we can disable preemption for an individual request + * that may be being observed and not want to be interrupted. + */ + + if (preempt_client_init(gt, &a)) + return -ENOMEM; + if (preempt_client_init(gt, &b)) + goto err_client_a; + b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX); + + for_each_engine(engine, gt, id) { + struct i915_request *rq_a, *rq_b; + + if (!intel_engine_has_preemption(engine)) + continue; + + engine->execlists.preempt_hang.count = 0; + + rq_a = spinner_create_request(&a.spin, + a.ctx, engine, + MI_ARB_CHECK); + if (IS_ERR(rq_a)) { + err = PTR_ERR(rq_a); + goto err_client_b; + } + + /* Low priority client, but unpreemptable! */ + __set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq_a->fence.flags); + + i915_request_add(rq_a); + if (!igt_wait_for_spinner(&a.spin, rq_a)) { + pr_err("First client failed to start\n"); + goto err_wedged; + } + + rq_b = spinner_create_request(&b.spin, + b.ctx, engine, + MI_ARB_CHECK); + if (IS_ERR(rq_b)) { + err = PTR_ERR(rq_b); + goto err_client_b; + } + + i915_request_add(rq_b); + + /* B is much more important than A! (But A is unpreemptable.) */ + GEM_BUG_ON(rq_prio(rq_b) <= rq_prio(rq_a)); + + /* Wait long enough for preemption and timeslicing */ + if (igt_wait_for_spinner(&b.spin, rq_b)) { + pr_err("Second client started too early!\n"); + goto err_wedged; + } + + igt_spinner_end(&a.spin); + + if (!igt_wait_for_spinner(&b.spin, rq_b)) { + pr_err("Second client failed to start\n"); + goto err_wedged; + } + + igt_spinner_end(&b.spin); + + if (engine->execlists.preempt_hang.count) { + pr_err("Preemption recorded x%d; should have been suppressed!\n", + engine->execlists.preempt_hang.count); + err = -EINVAL; + goto err_wedged; + } + + if (igt_flush_test(gt->i915)) + goto err_wedged; + } + + err = 0; +err_client_b: + preempt_client_fini(&b); +err_client_a: + preempt_client_fini(&a); + return err; + +err_wedged: + igt_spinner_end(&b.spin); + igt_spinner_end(&a.spin); + intel_gt_set_wedged(gt); + err = -EIO; + goto err_client_b; +} + +struct live_preempt_cancel { + struct intel_engine_cs *engine; + struct preempt_client a, b; +}; + +static int __cancel_active0(struct live_preempt_cancel *arg) +{ + struct i915_request *rq; + struct igt_live_test t; + int err; + + /* Preempt cancel of ELSP0 */ + GEM_TRACE("%s(%s)\n", __func__, arg->engine->name); + if (igt_live_test_begin(&t, arg->engine->i915, + __func__, arg->engine->name)) + return -EIO; + + rq = spinner_create_request(&arg->a.spin, + arg->a.ctx, arg->engine, + MI_ARB_CHECK); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + clear_bit(CONTEXT_BANNED, &rq->context->flags); + i915_request_get(rq); + i915_request_add(rq); + if (!igt_wait_for_spinner(&arg->a.spin, rq)) { + err = -EIO; + goto out; + } + + intel_context_set_banned(rq->context); + err = intel_engine_pulse(arg->engine); + if (err) + goto out; + + err = wait_for_reset(arg->engine, rq, HZ / 2); + if (err) { + pr_err("Cancelled inflight0 request did not reset\n"); + goto out; + } + +out: + i915_request_put(rq); + if (igt_live_test_end(&t)) + err = -EIO; + return err; +} + +static int __cancel_active1(struct live_preempt_cancel *arg) +{ + struct i915_request *rq[2] = {}; + struct igt_live_test t; + int err; + + /* Preempt cancel of ELSP1 */ + GEM_TRACE("%s(%s)\n", __func__, arg->engine->name); + if (igt_live_test_begin(&t, arg->engine->i915, + __func__, arg->engine->name)) + return -EIO; + + rq[0] = spinner_create_request(&arg->a.spin, + arg->a.ctx, arg->engine, + MI_NOOP); /* no preemption */ + if (IS_ERR(rq[0])) + return PTR_ERR(rq[0]); + + clear_bit(CONTEXT_BANNED, &rq[0]->context->flags); + i915_request_get(rq[0]); + i915_request_add(rq[0]); + if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) { + err = -EIO; + goto out; + } + + rq[1] = spinner_create_request(&arg->b.spin, + arg->b.ctx, arg->engine, + MI_ARB_CHECK); + if (IS_ERR(rq[1])) { + err = PTR_ERR(rq[1]); + goto out; + } + + clear_bit(CONTEXT_BANNED, &rq[1]->context->flags); + i915_request_get(rq[1]); + err = i915_request_await_dma_fence(rq[1], &rq[0]->fence); + i915_request_add(rq[1]); + if (err) + goto out; + + intel_context_set_banned(rq[1]->context); + err = intel_engine_pulse(arg->engine); + if (err) + goto out; + + igt_spinner_end(&arg->a.spin); + err = wait_for_reset(arg->engine, rq[1], HZ / 2); + if (err) + goto out; + + if (rq[0]->fence.error != 0) { + pr_err("Normal inflight0 request did not complete\n"); + err = -EINVAL; + goto out; + } + + if (rq[1]->fence.error != -EIO) { + pr_err("Cancelled inflight1 request did not report -EIO\n"); + err = -EINVAL; + goto out; + } + +out: + i915_request_put(rq[1]); + i915_request_put(rq[0]); + if (igt_live_test_end(&t)) + err = -EIO; + return err; +} + +static int __cancel_queued(struct live_preempt_cancel *arg) +{ + struct i915_request *rq[3] = {}; + struct igt_live_test t; + int err; + + /* Full ELSP and one in the wings */ + GEM_TRACE("%s(%s)\n", __func__, arg->engine->name); + if (igt_live_test_begin(&t, arg->engine->i915, + __func__, arg->engine->name)) + return -EIO; + + rq[0] = spinner_create_request(&arg->a.spin, + arg->a.ctx, arg->engine, + MI_ARB_CHECK); + if (IS_ERR(rq[0])) + return PTR_ERR(rq[0]); + + clear_bit(CONTEXT_BANNED, &rq[0]->context->flags); + i915_request_get(rq[0]); + i915_request_add(rq[0]); + if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) { + err = -EIO; + goto out; + } + + rq[1] = igt_request_alloc(arg->b.ctx, arg->engine); + if (IS_ERR(rq[1])) { + err = PTR_ERR(rq[1]); + goto out; + } + + clear_bit(CONTEXT_BANNED, &rq[1]->context->flags); + i915_request_get(rq[1]); + err = i915_request_await_dma_fence(rq[1], &rq[0]->fence); + i915_request_add(rq[1]); + if (err) + goto out; + + rq[2] = spinner_create_request(&arg->b.spin, + arg->a.ctx, arg->engine, + MI_ARB_CHECK); + if (IS_ERR(rq[2])) { + err = PTR_ERR(rq[2]); + goto out; + } + + i915_request_get(rq[2]); + err = i915_request_await_dma_fence(rq[2], &rq[1]->fence); + i915_request_add(rq[2]); + if (err) + goto out; + + intel_context_set_banned(rq[2]->context); + err = intel_engine_pulse(arg->engine); + if (err) + goto out; + + err = wait_for_reset(arg->engine, rq[2], HZ / 2); + if (err) + goto out; + + if (rq[0]->fence.error != -EIO) { + pr_err("Cancelled inflight0 request did not report -EIO\n"); + err = -EINVAL; + goto out; + } + + if (rq[1]->fence.error != 0) { + pr_err("Normal inflight1 request did not complete\n"); + err = -EINVAL; + goto out; + } + + if (rq[2]->fence.error != -EIO) { + pr_err("Cancelled queued request did not report -EIO\n"); + err = -EINVAL; + goto out; + } + +out: + i915_request_put(rq[2]); + i915_request_put(rq[1]); + i915_request_put(rq[0]); + if (igt_live_test_end(&t)) + err = -EIO; + return err; +} + +static int __cancel_hostile(struct live_preempt_cancel *arg) +{ + struct i915_request *rq; + int err; + + /* Preempt cancel non-preemptible spinner in ELSP0 */ + if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) + return 0; + + if (!intel_has_reset_engine(arg->engine->gt)) + return 0; + + GEM_TRACE("%s(%s)\n", __func__, arg->engine->name); + rq = spinner_create_request(&arg->a.spin, + arg->a.ctx, arg->engine, + MI_NOOP); /* preemption disabled */ + if (IS_ERR(rq)) + return PTR_ERR(rq); + + clear_bit(CONTEXT_BANNED, &rq->context->flags); + i915_request_get(rq); + i915_request_add(rq); + if (!igt_wait_for_spinner(&arg->a.spin, rq)) { + err = -EIO; + goto out; + } + + intel_context_set_banned(rq->context); + err = intel_engine_pulse(arg->engine); /* force reset */ + if (err) + goto out; + + err = wait_for_reset(arg->engine, rq, HZ / 2); + if (err) { + pr_err("Cancelled inflight0 request did not reset\n"); + goto out; + } + +out: + i915_request_put(rq); + if (igt_flush_test(arg->engine->i915)) + err = -EIO; + return err; +} + +static void force_reset_timeout(struct intel_engine_cs *engine) +{ + engine->reset_timeout.probability = 999; + atomic_set(&engine->reset_timeout.times, -1); +} + +static void cancel_reset_timeout(struct intel_engine_cs *engine) +{ + memset(&engine->reset_timeout, 0, sizeof(engine->reset_timeout)); +} + +static int __cancel_fail(struct live_preempt_cancel *arg) +{ + struct intel_engine_cs *engine = arg->engine; + struct i915_request *rq; + int err; + + if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) + return 0; + + if (!intel_has_reset_engine(engine->gt)) + return 0; + + GEM_TRACE("%s(%s)\n", __func__, engine->name); + rq = spinner_create_request(&arg->a.spin, + arg->a.ctx, engine, + MI_NOOP); /* preemption disabled */ + if (IS_ERR(rq)) + return PTR_ERR(rq); + + clear_bit(CONTEXT_BANNED, &rq->context->flags); + i915_request_get(rq); + i915_request_add(rq); + if (!igt_wait_for_spinner(&arg->a.spin, rq)) { + err = -EIO; + goto out; + } + + intel_context_set_banned(rq->context); + + err = intel_engine_pulse(engine); + if (err) + goto out; + + force_reset_timeout(engine); + + /* force preempt reset [failure] */ + while (!engine->execlists.pending[0]) + intel_engine_flush_submission(engine); + del_timer_sync(&engine->execlists.preempt); + intel_engine_flush_submission(engine); + + cancel_reset_timeout(engine); + + /* after failure, require heartbeats to reset device */ + intel_engine_set_heartbeat(engine, 1); + err = wait_for_reset(engine, rq, HZ / 2); + intel_engine_set_heartbeat(engine, + engine->defaults.heartbeat_interval_ms); + if (err) { + pr_err("Cancelled inflight0 request did not reset\n"); + goto out; + } + +out: + i915_request_put(rq); + if (igt_flush_test(engine->i915)) + err = -EIO; + return err; +} + +static int live_preempt_cancel(void *arg) +{ + struct intel_gt *gt = arg; + struct live_preempt_cancel data; + enum intel_engine_id id; + int err = -ENOMEM; + + /* + * To cancel an inflight context, we need to first remove it from the + * GPU. That sounds like preemption! Plus a little bit of bookkeeping. + */ + + if (preempt_client_init(gt, &data.a)) + return -ENOMEM; + if (preempt_client_init(gt, &data.b)) + goto err_client_a; + + for_each_engine(data.engine, gt, id) { + if (!intel_engine_has_preemption(data.engine)) + continue; + + err = __cancel_active0(&data); + if (err) + goto err_wedged; + + err = __cancel_active1(&data); + if (err) + goto err_wedged; + + err = __cancel_queued(&data); + if (err) + goto err_wedged; + + err = __cancel_hostile(&data); + if (err) + goto err_wedged; + + err = __cancel_fail(&data); + if (err) + goto err_wedged; + } + + err = 0; +err_client_b: + preempt_client_fini(&data.b); +err_client_a: + preempt_client_fini(&data.a); + return err; + +err_wedged: + GEM_TRACE_DUMP(); + igt_spinner_end(&data.b.spin); + igt_spinner_end(&data.a.spin); + intel_gt_set_wedged(gt); + goto err_client_b; +} + +static int live_suppress_self_preempt(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + struct i915_sched_attr attr = { + .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX) + }; + struct preempt_client a, b; + enum intel_engine_id id; + int err = -ENOMEM; + + /* + * Verify that if a preemption request does not cause a change in + * the current execution order, the preempt-to-idle injection is + * skipped and that we do not accidentally apply it after the CS + * completion event. + */ + + if (intel_uc_uses_guc_submission(>->uc)) + return 0; /* presume black blox */ + + if (intel_vgpu_active(gt->i915)) + return 0; /* GVT forces single port & request submission */ + + if (preempt_client_init(gt, &a)) + return -ENOMEM; + if (preempt_client_init(gt, &b)) + goto err_client_a; + + for_each_engine(engine, gt, id) { + struct i915_request *rq_a, *rq_b; + int depth; + + if (!intel_engine_has_preemption(engine)) + continue; + + if (igt_flush_test(gt->i915)) + goto err_wedged; + + st_engine_heartbeat_disable(engine); + engine->execlists.preempt_hang.count = 0; + + rq_a = spinner_create_request(&a.spin, + a.ctx, engine, + MI_NOOP); + if (IS_ERR(rq_a)) { + err = PTR_ERR(rq_a); + st_engine_heartbeat_enable(engine); + goto err_client_b; + } + + i915_request_add(rq_a); + if (!igt_wait_for_spinner(&a.spin, rq_a)) { + pr_err("First client failed to start\n"); + st_engine_heartbeat_enable(engine); + goto err_wedged; + } + + /* Keep postponing the timer to avoid premature slicing */ + mod_timer(&engine->execlists.timer, jiffies + HZ); + for (depth = 0; depth < 8; depth++) { + rq_b = spinner_create_request(&b.spin, + b.ctx, engine, + MI_NOOP); + if (IS_ERR(rq_b)) { + err = PTR_ERR(rq_b); + st_engine_heartbeat_enable(engine); + goto err_client_b; + } + i915_request_add(rq_b); + + GEM_BUG_ON(i915_request_completed(rq_a)); + engine->schedule(rq_a, &attr); + igt_spinner_end(&a.spin); + + if (!igt_wait_for_spinner(&b.spin, rq_b)) { + pr_err("Second client failed to start\n"); + st_engine_heartbeat_enable(engine); + goto err_wedged; + } + + swap(a, b); + rq_a = rq_b; + } + igt_spinner_end(&a.spin); + + if (engine->execlists.preempt_hang.count) { + pr_err("Preemption on %s recorded x%d, depth %d; should have been suppressed!\n", + engine->name, + engine->execlists.preempt_hang.count, + depth); + st_engine_heartbeat_enable(engine); + err = -EINVAL; + goto err_client_b; + } + + st_engine_heartbeat_enable(engine); + if (igt_flush_test(gt->i915)) + goto err_wedged; + } + + err = 0; +err_client_b: + preempt_client_fini(&b); +err_client_a: + preempt_client_fini(&a); + return err; + +err_wedged: + igt_spinner_end(&b.spin); + igt_spinner_end(&a.spin); + intel_gt_set_wedged(gt); + err = -EIO; + goto err_client_b; +} + +static int live_chain_preempt(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + struct preempt_client hi, lo; + enum intel_engine_id id; + int err = -ENOMEM; + + /* + * Build a chain AB...BA between two contexts (A, B) and request + * preemption of the last request. It should then complete before + * the previously submitted spinner in B. + */ + + if (preempt_client_init(gt, &hi)) + return -ENOMEM; + + if (preempt_client_init(gt, &lo)) + goto err_client_hi; + + for_each_engine(engine, gt, id) { + struct i915_sched_attr attr = { + .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX), + }; + struct igt_live_test t; + struct i915_request *rq; + int ring_size, count, i; + + if (!intel_engine_has_preemption(engine)) + continue; + + rq = spinner_create_request(&lo.spin, + lo.ctx, engine, + MI_ARB_CHECK); + if (IS_ERR(rq)) + goto err_wedged; + + i915_request_get(rq); + i915_request_add(rq); + + ring_size = rq->wa_tail - rq->head; + if (ring_size < 0) + ring_size += rq->ring->size; + ring_size = rq->ring->size / ring_size; + pr_debug("%s(%s): Using maximum of %d requests\n", + __func__, engine->name, ring_size); + + igt_spinner_end(&lo.spin); + if (i915_request_wait(rq, 0, HZ / 2) < 0) { + pr_err("Timed out waiting to flush %s\n", engine->name); + i915_request_put(rq); + goto err_wedged; + } + i915_request_put(rq); + + if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { + err = -EIO; + goto err_wedged; + } + + for_each_prime_number_from(count, 1, ring_size) { + rq = spinner_create_request(&hi.spin, + hi.ctx, engine, + MI_ARB_CHECK); + if (IS_ERR(rq)) + goto err_wedged; + i915_request_add(rq); + if (!igt_wait_for_spinner(&hi.spin, rq)) + goto err_wedged; + + rq = spinner_create_request(&lo.spin, + lo.ctx, engine, + MI_ARB_CHECK); + if (IS_ERR(rq)) + goto err_wedged; + i915_request_add(rq); + + for (i = 0; i < count; i++) { + rq = igt_request_alloc(lo.ctx, engine); + if (IS_ERR(rq)) + goto err_wedged; + i915_request_add(rq); + } + + rq = igt_request_alloc(hi.ctx, engine); + if (IS_ERR(rq)) + goto err_wedged; + + i915_request_get(rq); + i915_request_add(rq); + engine->schedule(rq, &attr); + + igt_spinner_end(&hi.spin); + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + struct drm_printer p = + drm_info_printer(gt->i915->drm.dev); + + pr_err("Failed to preempt over chain of %d\n", + count); + intel_engine_dump(engine, &p, + "%s\n", engine->name); + i915_request_put(rq); + goto err_wedged; + } + igt_spinner_end(&lo.spin); + i915_request_put(rq); + + rq = igt_request_alloc(lo.ctx, engine); + if (IS_ERR(rq)) + goto err_wedged; + + i915_request_get(rq); + i915_request_add(rq); + + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + struct drm_printer p = + drm_info_printer(gt->i915->drm.dev); + + pr_err("Failed to flush low priority chain of %d requests\n", + count); + intel_engine_dump(engine, &p, + "%s\n", engine->name); + + i915_request_put(rq); + goto err_wedged; + } + i915_request_put(rq); + } + + if (igt_live_test_end(&t)) { + err = -EIO; + goto err_wedged; + } + } + + err = 0; +err_client_lo: + preempt_client_fini(&lo); +err_client_hi: + preempt_client_fini(&hi); + return err; + +err_wedged: + igt_spinner_end(&hi.spin); + igt_spinner_end(&lo.spin); + intel_gt_set_wedged(gt); + err = -EIO; + goto err_client_lo; +} + +static int create_gang(struct intel_engine_cs *engine, + struct i915_request **prev) +{ + struct drm_i915_gem_object *obj; + struct intel_context *ce; + struct i915_request *rq; + struct i915_vma *vma; + u32 *cs; + int err; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + obj = i915_gem_object_create_internal(engine->i915, 4096); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto err_ce; + } + + vma = i915_vma_instance(obj, ce->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_obj; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + goto err_obj; + + cs = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + goto err_obj; + } + + /* Semaphore target: spin until zero */ + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + + *cs++ = MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_EQ_SDD; + *cs++ = 0; + *cs++ = lower_32_bits(vma->node.start); + *cs++ = upper_32_bits(vma->node.start); + + if (*prev) { + u64 offset = (*prev)->batch->node.start; + + /* Terminate the spinner in the next lower priority batch. */ + *cs++ = MI_STORE_DWORD_IMM_GEN4; + *cs++ = lower_32_bits(offset); + *cs++ = upper_32_bits(offset); + *cs++ = 0; + } + + *cs++ = MI_BATCH_BUFFER_END; + i915_gem_object_flush_map(obj); + i915_gem_object_unpin_map(obj); + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_obj; + } + + rq->batch = i915_vma_get(vma); + i915_request_get(rq); + + i915_vma_lock(vma); + err = i915_request_await_object(rq, vma->obj, false); + if (!err) + err = i915_vma_move_to_active(vma, rq, 0); + if (!err) + err = rq->engine->emit_bb_start(rq, + vma->node.start, + PAGE_SIZE, 0); + i915_vma_unlock(vma); + i915_request_add(rq); + if (err) + goto err_rq; + + i915_gem_object_put(obj); + intel_context_put(ce); + + rq->mock.link.next = &(*prev)->mock.link; + *prev = rq; + return 0; + +err_rq: + i915_vma_put(rq->batch); + i915_request_put(rq); +err_obj: + i915_gem_object_put(obj); +err_ce: + intel_context_put(ce); + return err; +} + +static int __live_preempt_ring(struct intel_engine_cs *engine, + struct igt_spinner *spin, + int queue_sz, int ring_sz) +{ + struct intel_context *ce[2] = {}; + struct i915_request *rq; + struct igt_live_test t; + int err = 0; + int n; + + if (igt_live_test_begin(&t, engine->i915, __func__, engine->name)) + return -EIO; + + for (n = 0; n < ARRAY_SIZE(ce); n++) { + struct intel_context *tmp; + + tmp = intel_context_create(engine); + if (IS_ERR(tmp)) { + err = PTR_ERR(tmp); + goto err_ce; + } + + tmp->ring = __intel_context_ring_size(ring_sz); + + err = intel_context_pin(tmp); + if (err) { + intel_context_put(tmp); + goto err_ce; + } + + memset32(tmp->ring->vaddr, + 0xdeadbeef, /* trigger a hang if executed */ + tmp->ring->vma->size / sizeof(u32)); + + ce[n] = tmp; + } + + rq = igt_spinner_create_request(spin, ce[0], MI_ARB_CHECK); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_ce; + } + + i915_request_get(rq); + rq->sched.attr.priority = I915_PRIORITY_BARRIER; + i915_request_add(rq); + + if (!igt_wait_for_spinner(spin, rq)) { + intel_gt_set_wedged(engine->gt); + i915_request_put(rq); + err = -ETIME; + goto err_ce; + } + + /* Fill the ring, until we will cause a wrap */ + n = 0; + while (ce[0]->ring->tail - rq->wa_tail <= queue_sz) { + struct i915_request *tmp; + + tmp = intel_context_create_request(ce[0]); + if (IS_ERR(tmp)) { + err = PTR_ERR(tmp); + i915_request_put(rq); + goto err_ce; + } + + i915_request_add(tmp); + intel_engine_flush_submission(engine); + n++; + } + intel_engine_flush_submission(engine); + pr_debug("%s: Filled %d with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n", + engine->name, queue_sz, n, + ce[0]->ring->size, + ce[0]->ring->tail, + ce[0]->ring->emit, + rq->tail); + i915_request_put(rq); + + /* Create a second request to preempt the first ring */ + rq = intel_context_create_request(ce[1]); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_ce; + } + + rq->sched.attr.priority = I915_PRIORITY_BARRIER; + i915_request_get(rq); + i915_request_add(rq); + + err = wait_for_submit(engine, rq, HZ / 2); + i915_request_put(rq); + if (err) { + pr_err("%s: preemption request was not submited\n", + engine->name); + err = -ETIME; + } + + pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n", + engine->name, + ce[0]->ring->tail, ce[0]->ring->emit, + ce[1]->ring->tail, ce[1]->ring->emit); + +err_ce: + intel_engine_flush_submission(engine); + igt_spinner_end(spin); + for (n = 0; n < ARRAY_SIZE(ce); n++) { + if (IS_ERR_OR_NULL(ce[n])) + break; + + intel_context_unpin(ce[n]); + intel_context_put(ce[n]); + } + if (igt_live_test_end(&t)) + err = -EIO; + return err; +} + +static int live_preempt_ring(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + struct igt_spinner spin; + enum intel_engine_id id; + int err = 0; + + /* + * Check that we rollback large chunks of a ring in order to do a + * preemption event. Similar to live_unlite_ring, but looking at + * ring size rather than the impact of intel_ring_direction(). + */ + + if (igt_spinner_init(&spin, gt)) + return -ENOMEM; + + for_each_engine(engine, gt, id) { + int n; + + if (!intel_engine_has_preemption(engine)) + continue; + + if (!intel_engine_can_store_dword(engine)) + continue; + + st_engine_heartbeat_disable(engine); + + for (n = 0; n <= 3; n++) { + err = __live_preempt_ring(engine, &spin, + n * SZ_4K / 4, SZ_4K); + if (err) + break; + } + + st_engine_heartbeat_enable(engine); + if (err) + break; + } + + igt_spinner_fini(&spin); + return err; +} + +static int live_preempt_gang(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + + /* + * Build as long a chain of preempters as we can, with each + * request higher priority than the last. Once we are ready, we release + * the last batch which then precolates down the chain, each releasing + * the next oldest in turn. The intent is to simply push as hard as we + * can with the number of preemptions, trying to exceed narrow HW + * limits. At a minimum, we insist that we can sort all the user + * high priority levels into execution order. + */ + + for_each_engine(engine, gt, id) { + struct i915_request *rq = NULL; + struct igt_live_test t; + IGT_TIMEOUT(end_time); + int prio = 0; + int err = 0; + u32 *cs; + + if (!intel_engine_has_preemption(engine)) + continue; + + if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) + return -EIO; + + do { + struct i915_sched_attr attr = { + .priority = I915_USER_PRIORITY(prio++), + }; + + err = create_gang(engine, &rq); + if (err) + break; + + /* Submit each spinner at increasing priority */ + engine->schedule(rq, &attr); + } while (prio <= I915_PRIORITY_MAX && + !__igt_timeout(end_time, NULL)); + pr_debug("%s: Preempt chain of %d requests\n", + engine->name, prio); + + /* + * Such that the last spinner is the highest priority and + * should execute first. When that spinner completes, + * it will terminate the next lowest spinner until there + * are no more spinners and the gang is complete. + */ + cs = i915_gem_object_pin_map(rq->batch->obj, I915_MAP_WC); + if (!IS_ERR(cs)) { + *cs = 0; + i915_gem_object_unpin_map(rq->batch->obj); + } else { + err = PTR_ERR(cs); + intel_gt_set_wedged(gt); + } + + while (rq) { /* wait for each rq from highest to lowest prio */ + struct i915_request *n = list_next_entry(rq, mock.link); + + if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0) { + struct drm_printer p = + drm_info_printer(engine->i915->drm.dev); + + pr_err("Failed to flush chain of %d requests, at %d\n", + prio, rq_prio(rq) >> I915_USER_PRIORITY_SHIFT); + intel_engine_dump(engine, &p, + "%s\n", engine->name); + + err = -ETIME; + } + + i915_vma_put(rq->batch); + i915_request_put(rq); + rq = n; + } + + if (igt_live_test_end(&t)) + err = -EIO; + if (err) + return err; + } + + return 0; +} + +static struct i915_vma * +create_gpr_user(struct intel_engine_cs *engine, + struct i915_vma *result, + unsigned int offset) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + u32 *cs; + int err; + int i; + + obj = i915_gem_object_create_internal(engine->i915, 4096); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + vma = i915_vma_instance(obj, result->vm, NULL); + if (IS_ERR(vma)) { + i915_gem_object_put(obj); + return vma; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) { + i915_vma_put(vma); + return ERR_PTR(err); + } + + cs = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(cs)) { + i915_vma_put(vma); + return ERR_CAST(cs); + } + + /* All GPR are clear for new contexts. We use GPR(0) as a constant */ + *cs++ = MI_LOAD_REGISTER_IMM(1); + *cs++ = CS_GPR(engine, 0); + *cs++ = 1; + + for (i = 1; i < NUM_GPR; i++) { + u64 addr; + + /* + * Perform: GPR[i]++ + * + * As we read and write into the context saved GPR[i], if + * we restart this batch buffer from an earlier point, we + * will repeat the increment and store a value > 1. + */ + *cs++ = MI_MATH(4); + *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(i)); + *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(0)); + *cs++ = MI_MATH_ADD; + *cs++ = MI_MATH_STORE(MI_MATH_REG(i), MI_MATH_REG_ACCU); + + addr = result->node.start + offset + i * sizeof(*cs); + *cs++ = MI_STORE_REGISTER_MEM_GEN8; + *cs++ = CS_GPR(engine, 2 * i); + *cs++ = lower_32_bits(addr); + *cs++ = upper_32_bits(addr); + + *cs++ = MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_GTE_SDD; + *cs++ = i; + *cs++ = lower_32_bits(result->node.start); + *cs++ = upper_32_bits(result->node.start); + } + + *cs++ = MI_BATCH_BUFFER_END; + i915_gem_object_flush_map(obj); + i915_gem_object_unpin_map(obj); + + return vma; +} + +static struct i915_vma *create_global(struct intel_gt *gt, size_t sz) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + int err; + + obj = i915_gem_object_create_internal(gt->i915, sz); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + vma = i915_vma_instance(obj, >->ggtt->vm, NULL); + if (IS_ERR(vma)) { + i915_gem_object_put(obj); + return vma; + } + + err = i915_ggtt_pin(vma, NULL, 0, 0); + if (err) { + i915_vma_put(vma); + return ERR_PTR(err); + } + + return vma; +} + +static struct i915_request * +create_gpr_client(struct intel_engine_cs *engine, + struct i915_vma *global, + unsigned int offset) +{ + struct i915_vma *batch, *vma; + struct intel_context *ce; + struct i915_request *rq; + int err; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) + return ERR_CAST(ce); + + vma = i915_vma_instance(global->obj, ce->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto out_ce; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + goto out_ce; + + batch = create_gpr_user(engine, vma, offset); + if (IS_ERR(batch)) { + err = PTR_ERR(batch); + goto out_vma; + } + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_batch; + } + + i915_vma_lock(vma); + err = i915_request_await_object(rq, vma->obj, false); + if (!err) + err = i915_vma_move_to_active(vma, rq, 0); + i915_vma_unlock(vma); + + i915_vma_lock(batch); + if (!err) + err = i915_request_await_object(rq, batch->obj, false); + if (!err) + err = i915_vma_move_to_active(batch, rq, 0); + if (!err) + err = rq->engine->emit_bb_start(rq, + batch->node.start, + PAGE_SIZE, 0); + i915_vma_unlock(batch); + i915_vma_unpin(batch); + + if (!err) + i915_request_get(rq); + i915_request_add(rq); + +out_batch: + i915_vma_put(batch); +out_vma: + i915_vma_unpin(vma); +out_ce: + intel_context_put(ce); + return err ? ERR_PTR(err) : rq; +} + +static int preempt_user(struct intel_engine_cs *engine, + struct i915_vma *global, + int id) +{ + struct i915_sched_attr attr = { + .priority = I915_PRIORITY_MAX + }; + struct i915_request *rq; + int err = 0; + u32 *cs; + + rq = intel_engine_create_kernel_request(engine); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) { + i915_request_add(rq); + return PTR_ERR(cs); + } + + *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; + *cs++ = i915_ggtt_offset(global); + *cs++ = 0; + *cs++ = id; + + intel_ring_advance(rq, cs); + + i915_request_get(rq); + i915_request_add(rq); + + engine->schedule(rq, &attr); + + if (i915_request_wait(rq, 0, HZ / 2) < 0) + err = -ETIME; + i915_request_put(rq); + + return err; +} + +static int live_preempt_user(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + struct i915_vma *global; + enum intel_engine_id id; + u32 *result; + int err = 0; + + /* + * In our other tests, we look at preemption in carefully + * controlled conditions in the ringbuffer. Since most of the + * time is spent in user batches, most of our preemptions naturally + * occur there. We want to verify that when we preempt inside a batch + * we continue on from the current instruction and do not roll back + * to the start, or another earlier arbitration point. + * + * To verify this, we create a batch which is a mixture of + * MI_MATH (gpr++) MI_SRM (gpr) and preemption points. Then with + * a few preempting contexts thrown into the mix, we look for any + * repeated instructions (which show up as incorrect values). + */ + + global = create_global(gt, 4096); + if (IS_ERR(global)) + return PTR_ERR(global); + + result = i915_gem_object_pin_map(global->obj, I915_MAP_WC); + if (IS_ERR(result)) { + i915_vma_unpin_and_release(&global, 0); + return PTR_ERR(result); + } + + for_each_engine(engine, gt, id) { + struct i915_request *client[3] = {}; + struct igt_live_test t; + int i; + + if (!intel_engine_has_preemption(engine)) + continue; + + if (IS_GEN(gt->i915, 8) && engine->class != RENDER_CLASS) + continue; /* we need per-context GPR */ + + if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { + err = -EIO; + break; + } + + memset(result, 0, 4096); + + for (i = 0; i < ARRAY_SIZE(client); i++) { + struct i915_request *rq; + + rq = create_gpr_client(engine, global, + NUM_GPR * i * sizeof(u32)); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto end_test; + } + + client[i] = rq; + } + + /* Continuously preempt the set of 3 running contexts */ + for (i = 1; i <= NUM_GPR; i++) { + err = preempt_user(engine, global, i); + if (err) + goto end_test; + } + + if (READ_ONCE(result[0]) != NUM_GPR) { + pr_err("%s: Failed to release semaphore\n", + engine->name); + err = -EIO; + goto end_test; + } + + for (i = 0; i < ARRAY_SIZE(client); i++) { + int gpr; + + if (i915_request_wait(client[i], 0, HZ / 2) < 0) { + err = -ETIME; + goto end_test; + } + + for (gpr = 1; gpr < NUM_GPR; gpr++) { + if (result[NUM_GPR * i + gpr] != 1) { + pr_err("%s: Invalid result, client %d, gpr %d, result: %d\n", + engine->name, + i, gpr, result[NUM_GPR * i + gpr]); + err = -EINVAL; + goto end_test; + } + } + } + +end_test: + for (i = 0; i < ARRAY_SIZE(client); i++) { + if (!client[i]) + break; + + i915_request_put(client[i]); + } + + /* Flush the semaphores on error */ + smp_store_mb(result[0], -1); + if (igt_live_test_end(&t)) + err = -EIO; + if (err) + break; + } + + i915_vma_unpin_and_release(&global, I915_VMA_RELEASE_MAP); + return err; +} + +static int live_preempt_timeout(void *arg) +{ + struct intel_gt *gt = arg; + struct i915_gem_context *ctx_hi, *ctx_lo; + struct igt_spinner spin_lo; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = -ENOMEM; + + /* + * Check that we force preemption to occur by cancelling the previous + * context if it refuses to yield the GPU. + */ + if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) + return 0; + + if (!intel_has_reset_engine(gt)) + return 0; + + if (igt_spinner_init(&spin_lo, gt)) + return -ENOMEM; + + ctx_hi = kernel_context(gt->i915); + if (!ctx_hi) + goto err_spin_lo; + ctx_hi->sched.priority = + I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY); + + ctx_lo = kernel_context(gt->i915); + if (!ctx_lo) + goto err_ctx_hi; + ctx_lo->sched.priority = + I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY); + + for_each_engine(engine, gt, id) { + unsigned long saved_timeout; + struct i915_request *rq; + + if (!intel_engine_has_preemption(engine)) + continue; + + rq = spinner_create_request(&spin_lo, ctx_lo, engine, + MI_NOOP); /* preemption disabled */ + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_ctx_lo; + } + + i915_request_add(rq); + if (!igt_wait_for_spinner(&spin_lo, rq)) { + intel_gt_set_wedged(gt); + err = -EIO; + goto err_ctx_lo; + } + + rq = igt_request_alloc(ctx_hi, engine); + if (IS_ERR(rq)) { + igt_spinner_end(&spin_lo); + err = PTR_ERR(rq); + goto err_ctx_lo; + } + + /* Flush the previous CS ack before changing timeouts */ + while (READ_ONCE(engine->execlists.pending[0])) + cpu_relax(); + + saved_timeout = engine->props.preempt_timeout_ms; + engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffie */ + + i915_request_get(rq); + i915_request_add(rq); + + intel_engine_flush_submission(engine); + engine->props.preempt_timeout_ms = saved_timeout; + + if (i915_request_wait(rq, 0, HZ / 10) < 0) { + intel_gt_set_wedged(gt); + i915_request_put(rq); + err = -ETIME; + goto err_ctx_lo; + } + + igt_spinner_end(&spin_lo); + i915_request_put(rq); + } + + err = 0; +err_ctx_lo: + kernel_context_close(ctx_lo); +err_ctx_hi: + kernel_context_close(ctx_hi); +err_spin_lo: + igt_spinner_fini(&spin_lo); + return err; +} + +static int random_range(struct rnd_state *rnd, int min, int max) +{ + return i915_prandom_u32_max_state(max - min, rnd) + min; +} + +static int random_priority(struct rnd_state *rnd) +{ + return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX); +} + +struct preempt_smoke { + struct intel_gt *gt; + struct i915_gem_context **contexts; + struct intel_engine_cs *engine; + struct drm_i915_gem_object *batch; + unsigned int ncontext; + struct rnd_state prng; + unsigned long count; +}; + +static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke) +{ + return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext, + &smoke->prng)]; +} + +static int smoke_submit(struct preempt_smoke *smoke, + struct i915_gem_context *ctx, int prio, + struct drm_i915_gem_object *batch) +{ + struct i915_request *rq; + struct i915_vma *vma = NULL; + int err = 0; + + if (batch) { + struct i915_address_space *vm; + + vm = i915_gem_context_get_vm_rcu(ctx); + vma = i915_vma_instance(batch, vm, NULL); + i915_vm_put(vm); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + return err; + } + + ctx->sched.priority = prio; + + rq = igt_request_alloc(ctx, smoke->engine); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto unpin; + } + + if (vma) { + i915_vma_lock(vma); + err = i915_request_await_object(rq, vma->obj, false); + if (!err) + err = i915_vma_move_to_active(vma, rq, 0); + if (!err) + err = rq->engine->emit_bb_start(rq, + vma->node.start, + PAGE_SIZE, 0); + i915_vma_unlock(vma); + } + + i915_request_add(rq); + +unpin: + if (vma) + i915_vma_unpin(vma); + + return err; +} + +static int smoke_crescendo_thread(void *arg) +{ + struct preempt_smoke *smoke = arg; + IGT_TIMEOUT(end_time); + unsigned long count; + + count = 0; + do { + struct i915_gem_context *ctx = smoke_context(smoke); + int err; + + err = smoke_submit(smoke, + ctx, count % I915_PRIORITY_MAX, + smoke->batch); + if (err) + return err; + + count++; + } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL)); + + smoke->count = count; + return 0; +} + +static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags) +#define BATCH BIT(0) +{ + struct task_struct *tsk[I915_NUM_ENGINES] = {}; + struct preempt_smoke arg[I915_NUM_ENGINES]; + struct intel_engine_cs *engine; + enum intel_engine_id id; + unsigned long count; + int err = 0; + + for_each_engine(engine, smoke->gt, id) { + arg[id] = *smoke; + arg[id].engine = engine; + if (!(flags & BATCH)) + arg[id].batch = NULL; + arg[id].count = 0; + + tsk[id] = kthread_run(smoke_crescendo_thread, &arg, + "igt/smoke:%d", id); + if (IS_ERR(tsk[id])) { + err = PTR_ERR(tsk[id]); + break; + } + get_task_struct(tsk[id]); + } + + yield(); /* start all threads before we kthread_stop() */ + + count = 0; + for_each_engine(engine, smoke->gt, id) { + int status; + + if (IS_ERR_OR_NULL(tsk[id])) + continue; + + status = kthread_stop(tsk[id]); + if (status && !err) + err = status; + + count += arg[id].count; + + put_task_struct(tsk[id]); + } + + pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n", + count, flags, smoke->gt->info.num_engines, smoke->ncontext); + return 0; +} + +static int smoke_random(struct preempt_smoke *smoke, unsigned int flags) +{ + enum intel_engine_id id; + IGT_TIMEOUT(end_time); + unsigned long count; + + count = 0; + do { + for_each_engine(smoke->engine, smoke->gt, id) { + struct i915_gem_context *ctx = smoke_context(smoke); + int err; + + err = smoke_submit(smoke, + ctx, random_priority(&smoke->prng), + flags & BATCH ? smoke->batch : NULL); + if (err) + return err; + + count++; + } + } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL)); + + pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n", + count, flags, smoke->gt->info.num_engines, smoke->ncontext); + return 0; +} + +static int live_preempt_smoke(void *arg) +{ + struct preempt_smoke smoke = { + .gt = arg, + .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed), + .ncontext = 256, + }; + const unsigned int phase[] = { 0, BATCH }; + struct igt_live_test t; + int err = -ENOMEM; + u32 *cs; + int n; + + smoke.contexts = kmalloc_array(smoke.ncontext, + sizeof(*smoke.contexts), + GFP_KERNEL); + if (!smoke.contexts) + return -ENOMEM; + + smoke.batch = + i915_gem_object_create_internal(smoke.gt->i915, PAGE_SIZE); + if (IS_ERR(smoke.batch)) { + err = PTR_ERR(smoke.batch); + goto err_free; + } + + cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + goto err_batch; + } + for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++) + cs[n] = MI_ARB_CHECK; + cs[n] = MI_BATCH_BUFFER_END; + i915_gem_object_flush_map(smoke.batch); + i915_gem_object_unpin_map(smoke.batch); + + if (igt_live_test_begin(&t, smoke.gt->i915, __func__, "all")) { + err = -EIO; + goto err_batch; + } + + for (n = 0; n < smoke.ncontext; n++) { + smoke.contexts[n] = kernel_context(smoke.gt->i915); + if (!smoke.contexts[n]) + goto err_ctx; + } + + for (n = 0; n < ARRAY_SIZE(phase); n++) { + err = smoke_crescendo(&smoke, phase[n]); + if (err) + goto err_ctx; + + err = smoke_random(&smoke, phase[n]); + if (err) + goto err_ctx; + } + +err_ctx: + if (igt_live_test_end(&t)) + err = -EIO; + + for (n = 0; n < smoke.ncontext; n++) { + if (!smoke.contexts[n]) + break; + kernel_context_close(smoke.contexts[n]); + } + +err_batch: + i915_gem_object_put(smoke.batch); +err_free: + kfree(smoke.contexts); + + return err; +} + +static int nop_virtual_engine(struct intel_gt *gt, + struct intel_engine_cs **siblings, + unsigned int nsibling, + unsigned int nctx, + unsigned int flags) +#define CHAIN BIT(0) +{ + IGT_TIMEOUT(end_time); + struct i915_request *request[16] = {}; + struct intel_context *ve[16]; + unsigned long n, prime, nc; + struct igt_live_test t; + ktime_t times[2] = {}; + int err; + + GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve)); + + for (n = 0; n < nctx; n++) { + ve[n] = intel_execlists_create_virtual(siblings, nsibling); + if (IS_ERR(ve[n])) { + err = PTR_ERR(ve[n]); + nctx = n; + goto out; + } + + err = intel_context_pin(ve[n]); + if (err) { + intel_context_put(ve[n]); + nctx = n; + goto out; + } + } + + err = igt_live_test_begin(&t, gt->i915, __func__, ve[0]->engine->name); + if (err) + goto out; + + for_each_prime_number_from(prime, 1, 8192) { + times[1] = ktime_get_raw(); + + if (flags & CHAIN) { + for (nc = 0; nc < nctx; nc++) { + for (n = 0; n < prime; n++) { + struct i915_request *rq; + + rq = i915_request_create(ve[nc]); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out; + } + + if (request[nc]) + i915_request_put(request[nc]); + request[nc] = i915_request_get(rq); + i915_request_add(rq); + } + } + } else { + for (n = 0; n < prime; n++) { + for (nc = 0; nc < nctx; nc++) { + struct i915_request *rq; + + rq = i915_request_create(ve[nc]); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out; + } + + if (request[nc]) + i915_request_put(request[nc]); + request[nc] = i915_request_get(rq); + i915_request_add(rq); + } + } + } + + for (nc = 0; nc < nctx; nc++) { + if (i915_request_wait(request[nc], 0, HZ / 10) < 0) { + pr_err("%s(%s): wait for %llx:%lld timed out\n", + __func__, ve[0]->engine->name, + request[nc]->fence.context, + request[nc]->fence.seqno); + + GEM_TRACE("%s(%s) failed at request %llx:%lld\n", + __func__, ve[0]->engine->name, + request[nc]->fence.context, + request[nc]->fence.seqno); + GEM_TRACE_DUMP(); + intel_gt_set_wedged(gt); + break; + } + } + + times[1] = ktime_sub(ktime_get_raw(), times[1]); + if (prime == 1) + times[0] = times[1]; + + for (nc = 0; nc < nctx; nc++) { + i915_request_put(request[nc]); + request[nc] = NULL; + } + + if (__igt_timeout(end_time, NULL)) + break; + } + + err = igt_live_test_end(&t); + if (err) + goto out; + + pr_info("Requestx%d latencies on %s: 1 = %lluns, %lu = %lluns\n", + nctx, ve[0]->engine->name, ktime_to_ns(times[0]), + prime, div64_u64(ktime_to_ns(times[1]), prime)); + +out: + if (igt_flush_test(gt->i915)) + err = -EIO; + + for (nc = 0; nc < nctx; nc++) { + i915_request_put(request[nc]); + intel_context_unpin(ve[nc]); + intel_context_put(ve[nc]); + } + return err; +} + +static unsigned int +__select_siblings(struct intel_gt *gt, + unsigned int class, + struct intel_engine_cs **siblings, + bool (*filter)(const struct intel_engine_cs *)) +{ + unsigned int n = 0; + unsigned int inst; + + for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) { + if (!gt->engine_class[class][inst]) + continue; + + if (filter && !filter(gt->engine_class[class][inst])) + continue; + + siblings[n++] = gt->engine_class[class][inst]; + } + + return n; +} + +static unsigned int +select_siblings(struct intel_gt *gt, + unsigned int class, + struct intel_engine_cs **siblings) +{ + return __select_siblings(gt, class, siblings, NULL); +} + +static int live_virtual_engine(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; + struct intel_engine_cs *engine; + enum intel_engine_id id; + unsigned int class; + int err; + + if (intel_uc_uses_guc_submission(>->uc)) + return 0; + + for_each_engine(engine, gt, id) { + err = nop_virtual_engine(gt, &engine, 1, 1, 0); + if (err) { + pr_err("Failed to wrap engine %s: err=%d\n", + engine->name, err); + return err; + } + } + + for (class = 0; class <= MAX_ENGINE_CLASS; class++) { + int nsibling, n; + + nsibling = select_siblings(gt, class, siblings); + if (nsibling < 2) + continue; + + for (n = 1; n <= nsibling + 1; n++) { + err = nop_virtual_engine(gt, siblings, nsibling, + n, 0); + if (err) + return err; + } + + err = nop_virtual_engine(gt, siblings, nsibling, n, CHAIN); + if (err) + return err; + } + + return 0; +} + +static int mask_virtual_engine(struct intel_gt *gt, + struct intel_engine_cs **siblings, + unsigned int nsibling) +{ + struct i915_request *request[MAX_ENGINE_INSTANCE + 1]; + struct intel_context *ve; + struct igt_live_test t; + unsigned int n; + int err; + + /* + * Check that by setting the execution mask on a request, we can + * restrict it to our desired engine within the virtual engine. + */ + + ve = intel_execlists_create_virtual(siblings, nsibling); + if (IS_ERR(ve)) { + err = PTR_ERR(ve); + goto out_close; + } + + err = intel_context_pin(ve); + if (err) + goto out_put; + + err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name); + if (err) + goto out_unpin; + + for (n = 0; n < nsibling; n++) { + request[n] = i915_request_create(ve); + if (IS_ERR(request[n])) { + err = PTR_ERR(request[n]); + nsibling = n; + goto out; + } + + /* Reverse order as it's more likely to be unnatural */ + request[n]->execution_mask = siblings[nsibling - n - 1]->mask; + + i915_request_get(request[n]); + i915_request_add(request[n]); + } + + for (n = 0; n < nsibling; n++) { + if (i915_request_wait(request[n], 0, HZ / 10) < 0) { + pr_err("%s(%s): wait for %llx:%lld timed out\n", + __func__, ve->engine->name, + request[n]->fence.context, + request[n]->fence.seqno); + + GEM_TRACE("%s(%s) failed at request %llx:%lld\n", + __func__, ve->engine->name, + request[n]->fence.context, + request[n]->fence.seqno); + GEM_TRACE_DUMP(); + intel_gt_set_wedged(gt); + err = -EIO; + goto out; + } + + if (request[n]->engine != siblings[nsibling - n - 1]) { + pr_err("Executed on wrong sibling '%s', expected '%s'\n", + request[n]->engine->name, + siblings[nsibling - n - 1]->name); + err = -EINVAL; + goto out; + } + } + + err = igt_live_test_end(&t); +out: + if (igt_flush_test(gt->i915)) + err = -EIO; + + for (n = 0; n < nsibling; n++) + i915_request_put(request[n]); + +out_unpin: + intel_context_unpin(ve); +out_put: + intel_context_put(ve); +out_close: + return err; +} + +static int live_virtual_mask(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; + unsigned int class; + int err; + + if (intel_uc_uses_guc_submission(>->uc)) + return 0; + + for (class = 0; class <= MAX_ENGINE_CLASS; class++) { + unsigned int nsibling; + + nsibling = select_siblings(gt, class, siblings); + if (nsibling < 2) + continue; + + err = mask_virtual_engine(gt, siblings, nsibling); + if (err) + return err; + } + + return 0; +} + +static int slicein_virtual_engine(struct intel_gt *gt, + struct intel_engine_cs **siblings, + unsigned int nsibling) +{ + const long timeout = slice_timeout(siblings[0]); + struct intel_context *ce; + struct i915_request *rq; + struct igt_spinner spin; + unsigned int n; + int err = 0; + + /* + * Virtual requests must take part in timeslicing on the target engines. + */ + + if (igt_spinner_init(&spin, gt)) + return -ENOMEM; + + for (n = 0; n < nsibling; n++) { + ce = intel_context_create(siblings[n]); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + goto out; + } + + rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); + intel_context_put(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out; + } + + i915_request_add(rq); + } + + ce = intel_execlists_create_virtual(siblings, nsibling); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + goto out; + } + + rq = intel_context_create_request(ce); + intel_context_put(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out; + } + + i915_request_get(rq); + i915_request_add(rq); + if (i915_request_wait(rq, 0, timeout) < 0) { + GEM_TRACE_ERR("%s(%s) failed to slice in virtual request\n", + __func__, rq->engine->name); + GEM_TRACE_DUMP(); + intel_gt_set_wedged(gt); + err = -EIO; + } + i915_request_put(rq); + +out: + igt_spinner_end(&spin); + if (igt_flush_test(gt->i915)) + err = -EIO; + igt_spinner_fini(&spin); + return err; +} + +static int sliceout_virtual_engine(struct intel_gt *gt, + struct intel_engine_cs **siblings, + unsigned int nsibling) +{ + const long timeout = slice_timeout(siblings[0]); + struct intel_context *ce; + struct i915_request *rq; + struct igt_spinner spin; + unsigned int n; + int err = 0; + + /* + * Virtual requests must allow others a fair timeslice. + */ + + if (igt_spinner_init(&spin, gt)) + return -ENOMEM; + + /* XXX We do not handle oversubscription and fairness with normal rq */ + for (n = 0; n < nsibling; n++) { + ce = intel_execlists_create_virtual(siblings, nsibling); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + goto out; + } + + rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); + intel_context_put(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out; + } + + i915_request_add(rq); + } + + for (n = 0; !err && n < nsibling; n++) { + ce = intel_context_create(siblings[n]); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + goto out; + } + + rq = intel_context_create_request(ce); + intel_context_put(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out; + } + + i915_request_get(rq); + i915_request_add(rq); + if (i915_request_wait(rq, 0, timeout) < 0) { + GEM_TRACE_ERR("%s(%s) failed to slice out virtual request\n", + __func__, siblings[n]->name); + GEM_TRACE_DUMP(); + intel_gt_set_wedged(gt); + err = -EIO; + } + i915_request_put(rq); + } + +out: + igt_spinner_end(&spin); + if (igt_flush_test(gt->i915)) + err = -EIO; + igt_spinner_fini(&spin); + return err; +} + +static int live_virtual_slice(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; + unsigned int class; + int err; + + if (intel_uc_uses_guc_submission(>->uc)) + return 0; + + for (class = 0; class <= MAX_ENGINE_CLASS; class++) { + unsigned int nsibling; + + nsibling = __select_siblings(gt, class, siblings, + intel_engine_has_timeslices); + if (nsibling < 2) + continue; + + err = slicein_virtual_engine(gt, siblings, nsibling); + if (err) + return err; + + err = sliceout_virtual_engine(gt, siblings, nsibling); + if (err) + return err; + } + + return 0; +} + +static int preserved_virtual_engine(struct intel_gt *gt, + struct intel_engine_cs **siblings, + unsigned int nsibling) +{ + struct i915_request *last = NULL; + struct intel_context *ve; + struct i915_vma *scratch; + struct igt_live_test t; + unsigned int n; + int err = 0; + u32 *cs; + + scratch = __vm_create_scratch_for_read(&siblings[0]->gt->ggtt->vm, + PAGE_SIZE); + if (IS_ERR(scratch)) + return PTR_ERR(scratch); + + err = i915_vma_sync(scratch); + if (err) + goto out_scratch; + + ve = intel_execlists_create_virtual(siblings, nsibling); + if (IS_ERR(ve)) { + err = PTR_ERR(ve); + goto out_scratch; + } + + err = intel_context_pin(ve); + if (err) + goto out_put; + + err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name); + if (err) + goto out_unpin; + + for (n = 0; n < NUM_GPR_DW; n++) { + struct intel_engine_cs *engine = siblings[n % nsibling]; + struct i915_request *rq; + + rq = i915_request_create(ve); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_end; + } + + i915_request_put(last); + last = i915_request_get(rq); + + cs = intel_ring_begin(rq, 8); + if (IS_ERR(cs)) { + i915_request_add(rq); + err = PTR_ERR(cs); + goto out_end; + } + + *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; + *cs++ = CS_GPR(engine, n); + *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32); + *cs++ = 0; + + *cs++ = MI_LOAD_REGISTER_IMM(1); + *cs++ = CS_GPR(engine, (n + 1) % NUM_GPR_DW); + *cs++ = n + 1; + + *cs++ = MI_NOOP; + intel_ring_advance(rq, cs); + + /* Restrict this request to run on a particular engine */ + rq->execution_mask = engine->mask; + i915_request_add(rq); + } + + if (i915_request_wait(last, 0, HZ / 5) < 0) { + err = -ETIME; + goto out_end; + } + + cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + goto out_end; + } + + for (n = 0; n < NUM_GPR_DW; n++) { + if (cs[n] != n) { + pr_err("Incorrect value[%d] found for GPR[%d]\n", + cs[n], n); + err = -EINVAL; + break; + } + } + + i915_gem_object_unpin_map(scratch->obj); + +out_end: + if (igt_live_test_end(&t)) + err = -EIO; + i915_request_put(last); +out_unpin: + intel_context_unpin(ve); +out_put: + intel_context_put(ve); +out_scratch: + i915_vma_unpin_and_release(&scratch, 0); + return err; +} + +static int live_virtual_preserved(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; + unsigned int class; + + /* + * Check that the context image retains non-privileged (user) registers + * from one engine to the next. For this we check that the CS_GPR + * are preserved. + */ + + if (intel_uc_uses_guc_submission(>->uc)) + return 0; + + /* As we use CS_GPR we cannot run before they existed on all engines. */ + if (INTEL_GEN(gt->i915) < 9) + return 0; + + for (class = 0; class <= MAX_ENGINE_CLASS; class++) { + int nsibling, err; + + nsibling = select_siblings(gt, class, siblings); + if (nsibling < 2) + continue; + + err = preserved_virtual_engine(gt, siblings, nsibling); + if (err) + return err; + } + + return 0; +} + +static int bond_virtual_engine(struct intel_gt *gt, + unsigned int class, + struct intel_engine_cs **siblings, + unsigned int nsibling, + unsigned int flags) +#define BOND_SCHEDULE BIT(0) +{ + struct intel_engine_cs *master; + struct i915_request *rq[16]; + enum intel_engine_id id; + struct igt_spinner spin; + unsigned long n; + int err; + + /* + * A set of bonded requests is intended to be run concurrently + * across a number of engines. We use one request per-engine + * and a magic fence to schedule each of the bonded requests + * at the same time. A consequence of our current scheduler is that + * we only move requests to the HW ready queue when the request + * becomes ready, that is when all of its prerequisite fences have + * been signaled. As one of those fences is the master submit fence, + * there is a delay on all secondary fences as the HW may be + * currently busy. Equally, as all the requests are independent, + * they may have other fences that delay individual request + * submission to HW. Ergo, we do not guarantee that all requests are + * immediately submitted to HW at the same time, just that if the + * rules are abided by, they are ready at the same time as the + * first is submitted. Userspace can embed semaphores in its batch + * to ensure parallel execution of its phases as it requires. + * Though naturally it gets requested that perhaps the scheduler should + * take care of parallel execution, even across preemption events on + * different HW. (The proper answer is of course "lalalala".) + * + * With the submit-fence, we have identified three possible phases + * of synchronisation depending on the master fence: queued (not + * ready), executing, and signaled. The first two are quite simple + * and checked below. However, the signaled master fence handling is + * contentious. Currently we do not distinguish between a signaled + * fence and an expired fence, as once signaled it does not convey + * any information about the previous execution. It may even be freed + * and hence checking later it may not exist at all. Ergo we currently + * do not apply the bonding constraint for an already signaled fence, + * as our expectation is that it should not constrain the secondaries + * and is outside of the scope of the bonded request API (i.e. all + * userspace requests are meant to be running in parallel). As + * it imposes no constraint, and is effectively a no-op, we do not + * check below as normal execution flows are checked extensively above. + * + * XXX Is the degenerate handling of signaled submit fences the + * expected behaviour for userpace? + */ + + GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1); + + if (igt_spinner_init(&spin, gt)) + return -ENOMEM; + + err = 0; + rq[0] = ERR_PTR(-ENOMEM); + for_each_engine(master, gt, id) { + struct i915_sw_fence fence = {}; + struct intel_context *ce; + + if (master->class == class) + continue; + + ce = intel_context_create(master); + if (IS_ERR(ce)) { + err = PTR_ERR(ce); + goto out; + } + + memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq)); + + rq[0] = igt_spinner_create_request(&spin, ce, MI_NOOP); + intel_context_put(ce); + if (IS_ERR(rq[0])) { + err = PTR_ERR(rq[0]); + goto out; + } + i915_request_get(rq[0]); + + if (flags & BOND_SCHEDULE) { + onstack_fence_init(&fence); + err = i915_sw_fence_await_sw_fence_gfp(&rq[0]->submit, + &fence, + GFP_KERNEL); + } + + i915_request_add(rq[0]); + if (err < 0) + goto out; + + if (!(flags & BOND_SCHEDULE) && + !igt_wait_for_spinner(&spin, rq[0])) { + err = -EIO; + goto out; + } + + for (n = 0; n < nsibling; n++) { + struct intel_context *ve; + + ve = intel_execlists_create_virtual(siblings, nsibling); + if (IS_ERR(ve)) { + err = PTR_ERR(ve); + onstack_fence_fini(&fence); + goto out; + } + + err = intel_virtual_engine_attach_bond(ve->engine, + master, + siblings[n]); + if (err) { + intel_context_put(ve); + onstack_fence_fini(&fence); + goto out; + } + + err = intel_context_pin(ve); + intel_context_put(ve); + if (err) { + onstack_fence_fini(&fence); + goto out; + } + + rq[n + 1] = i915_request_create(ve); + intel_context_unpin(ve); + if (IS_ERR(rq[n + 1])) { + err = PTR_ERR(rq[n + 1]); + onstack_fence_fini(&fence); + goto out; + } + i915_request_get(rq[n + 1]); + + err = i915_request_await_execution(rq[n + 1], + &rq[0]->fence, + ve->engine->bond_execute); + i915_request_add(rq[n + 1]); + if (err < 0) { + onstack_fence_fini(&fence); + goto out; + } + } + onstack_fence_fini(&fence); + intel_engine_flush_submission(master); + igt_spinner_end(&spin); + + if (i915_request_wait(rq[0], 0, HZ / 10) < 0) { + pr_err("Master request did not execute (on %s)!\n", + rq[0]->engine->name); + err = -EIO; + goto out; + } + + for (n = 0; n < nsibling; n++) { + if (i915_request_wait(rq[n + 1], 0, + MAX_SCHEDULE_TIMEOUT) < 0) { + err = -EIO; + goto out; + } + + if (rq[n + 1]->engine != siblings[n]) { + pr_err("Bonded request did not execute on target engine: expected %s, used %s; master was %s\n", + siblings[n]->name, + rq[n + 1]->engine->name, + rq[0]->engine->name); + err = -EINVAL; + goto out; + } + } + + for (n = 0; !IS_ERR(rq[n]); n++) + i915_request_put(rq[n]); + rq[0] = ERR_PTR(-ENOMEM); + } + +out: + for (n = 0; !IS_ERR(rq[n]); n++) + i915_request_put(rq[n]); + if (igt_flush_test(gt->i915)) + err = -EIO; + + igt_spinner_fini(&spin); + return err; +} + +static int live_virtual_bond(void *arg) +{ + static const struct phase { + const char *name; + unsigned int flags; + } phases[] = { + { "", 0 }, + { "schedule", BOND_SCHEDULE }, + { }, + }; + struct intel_gt *gt = arg; + struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; + unsigned int class; + int err; + + if (intel_uc_uses_guc_submission(>->uc)) + return 0; + + for (class = 0; class <= MAX_ENGINE_CLASS; class++) { + const struct phase *p; + int nsibling; + + nsibling = select_siblings(gt, class, siblings); + if (nsibling < 2) + continue; + + for (p = phases; p->name; p++) { + err = bond_virtual_engine(gt, + class, siblings, nsibling, + p->flags); + if (err) { + pr_err("%s(%s): failed class=%d, nsibling=%d, err=%d\n", + __func__, p->name, class, nsibling, err); + return err; + } + } + } + + return 0; +} + +static int reset_virtual_engine(struct intel_gt *gt, + struct intel_engine_cs **siblings, + unsigned int nsibling) +{ + struct intel_engine_cs *engine; + struct intel_context *ve; + struct igt_spinner spin; + struct i915_request *rq; + unsigned int n; + int err = 0; + + /* + * In order to support offline error capture for fast preempt reset, + * we need to decouple the guilty request and ensure that it and its + * descendents are not executed while the capture is in progress. + */ + + if (igt_spinner_init(&spin, gt)) + return -ENOMEM; + + ve = intel_execlists_create_virtual(siblings, nsibling); + if (IS_ERR(ve)) { + err = PTR_ERR(ve); + goto out_spin; + } + + for (n = 0; n < nsibling; n++) + st_engine_heartbeat_disable(siblings[n]); + + rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_heartbeat; + } + i915_request_add(rq); + + if (!igt_wait_for_spinner(&spin, rq)) { + intel_gt_set_wedged(gt); + err = -ETIME; + goto out_heartbeat; + } + + engine = rq->engine; + GEM_BUG_ON(engine == ve->engine); + + /* Take ownership of the reset and tasklet */ + local_bh_disable(); + if (test_and_set_bit(I915_RESET_ENGINE + engine->id, + >->reset.flags)) { + local_bh_enable(); + intel_gt_set_wedged(gt); + err = -EBUSY; + goto out_heartbeat; + } + tasklet_disable(&engine->execlists.tasklet); + + engine->execlists.tasklet.func(engine->execlists.tasklet.data); + GEM_BUG_ON(execlists_active(&engine->execlists) != rq); + + /* Fake a preemption event; failed of course */ + spin_lock_irq(&engine->active.lock); + __unwind_incomplete_requests(engine); + spin_unlock_irq(&engine->active.lock); + GEM_BUG_ON(rq->engine != engine); + + /* Reset the engine while keeping our active request on hold */ + execlists_hold(engine, rq); + GEM_BUG_ON(!i915_request_on_hold(rq)); + + __intel_engine_reset_bh(engine, NULL); + GEM_BUG_ON(rq->fence.error != -EIO); + + /* Release our grasp on the engine, letting CS flow again */ + tasklet_enable(&engine->execlists.tasklet); + clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, >->reset.flags); + local_bh_enable(); + + /* Check that we do not resubmit the held request */ + i915_request_get(rq); + if (!i915_request_wait(rq, 0, HZ / 5)) { + pr_err("%s: on hold request completed!\n", + engine->name); + intel_gt_set_wedged(gt); + err = -EIO; + goto out_rq; + } + GEM_BUG_ON(!i915_request_on_hold(rq)); + + /* But is resubmitted on release */ + execlists_unhold(engine, rq); + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + pr_err("%s: held request did not complete!\n", + engine->name); + intel_gt_set_wedged(gt); + err = -ETIME; + } + +out_rq: + i915_request_put(rq); +out_heartbeat: + for (n = 0; n < nsibling; n++) + st_engine_heartbeat_enable(siblings[n]); + + intel_context_put(ve); +out_spin: + igt_spinner_fini(&spin); + return err; +} + +static int live_virtual_reset(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; + unsigned int class; + + /* + * Check that we handle a reset event within a virtual engine. + * Only the physical engine is reset, but we have to check the flow + * of the virtual requests around the reset, and make sure it is not + * forgotten. + */ + + if (intel_uc_uses_guc_submission(>->uc)) + return 0; + + if (!intel_has_reset_engine(gt)) + return 0; + + for (class = 0; class <= MAX_ENGINE_CLASS; class++) { + int nsibling, err; + + nsibling = select_siblings(gt, class, siblings); + if (nsibling < 2) + continue; + + err = reset_virtual_engine(gt, siblings, nsibling); + if (err) + return err; + } + + return 0; +} + +int intel_execlists_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(live_sanitycheck), + SUBTEST(live_unlite_switch), + SUBTEST(live_unlite_preempt), + SUBTEST(live_unlite_ring), + SUBTEST(live_pin_rewind), + SUBTEST(live_hold_reset), + SUBTEST(live_error_interrupt), + SUBTEST(live_timeslice_preempt), + SUBTEST(live_timeslice_rewind), + SUBTEST(live_timeslice_queue), + SUBTEST(live_timeslice_nopreempt), + SUBTEST(live_busywait_preempt), + SUBTEST(live_preempt), + SUBTEST(live_late_preempt), + SUBTEST(live_nopreempt), + SUBTEST(live_preempt_cancel), + SUBTEST(live_suppress_self_preempt), + SUBTEST(live_chain_preempt), + SUBTEST(live_preempt_ring), + SUBTEST(live_preempt_gang), + SUBTEST(live_preempt_timeout), + SUBTEST(live_preempt_user), + SUBTEST(live_preempt_smoke), + SUBTEST(live_virtual_engine), + SUBTEST(live_virtual_mask), + SUBTEST(live_virtual_preserved), + SUBTEST(live_virtual_slice), + SUBTEST(live_virtual_bond), + SUBTEST(live_virtual_reset), + }; + + if (!HAS_EXECLISTS(i915)) + return 0; + + if (intel_gt_is_wedged(&i915->gt)) + return 0; + + return intel_gt_live_subtests(tests, &i915->gt); +} diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c index 6180a47c1b51..5d911f724ebe 100644 --- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c @@ -71,7 +71,7 @@ static int live_gt_clocks(void *arg) enum intel_engine_id id; int err = 0; - if (!RUNTIME_INFO(gt->i915)->cs_timestamp_frequency_hz) { /* unknown */ + if (!gt->clock_frequency) { /* unknown */ pr_info("CS_TIMESTAMP frequency unknown\n"); return 0; } @@ -112,12 +112,12 @@ static int live_gt_clocks(void *arg) measure_clocks(engine, &cycles, &dt); - time = i915_cs_timestamp_ticks_to_ns(engine->i915, cycles); - expected = i915_cs_timestamp_ns_to_ticks(engine->i915, dt); + time = intel_gt_clock_interval_to_ns(engine->gt, cycles); + expected = intel_gt_ns_to_clock_interval(engine->gt, dt); pr_info("%s: TIMESTAMP %d cycles [%lldns] in %lldns [%d cycles], using CS clock frequency of %uKHz\n", engine->name, cycles, time, dt, expected, - RUNTIME_INFO(engine->i915)->cs_timestamp_frequency_hz / 1000); + engine->gt->clock_frequency / 1000); if (9 * time < 8 * dt || 8 * time > 9 * dt) { pr_err("%s: CS ticks did not match walltime!\n", diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index fb5ebf930ab2..460c3e9542f4 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -506,7 +506,8 @@ static int igt_reset_nop_engine(void *arg) } err = intel_engine_reset(engine, NULL); if (err) { - pr_err("i915_reset_engine failed\n"); + pr_err("intel_engine_reset(%s) failed, err:%d\n", + engine->name, err); break; } @@ -539,6 +540,149 @@ static int igt_reset_nop_engine(void *arg) return 0; } +static void force_reset_timeout(struct intel_engine_cs *engine) +{ + engine->reset_timeout.probability = 999; + atomic_set(&engine->reset_timeout.times, -1); +} + +static void cancel_reset_timeout(struct intel_engine_cs *engine) +{ + memset(&engine->reset_timeout, 0, sizeof(engine->reset_timeout)); +} + +static int igt_reset_fail_engine(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + + /* Check that we can recover from engine-reset failues */ + + if (!intel_has_reset_engine(gt)) + return 0; + + for_each_engine(engine, gt, id) { + unsigned int count; + struct intel_context *ce; + IGT_TIMEOUT(end_time); + int err; + + ce = intel_context_create(engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + st_engine_heartbeat_disable(engine); + set_bit(I915_RESET_ENGINE + id, >->reset.flags); + + force_reset_timeout(engine); + err = intel_engine_reset(engine, NULL); + cancel_reset_timeout(engine); + if (err == 0) /* timeouts only generated on gen8+ */ + goto skip; + + count = 0; + do { + struct i915_request *last = NULL; + int i; + + if (!wait_for_idle(engine)) { + pr_err("%s failed to idle before reset\n", + engine->name); + err = -EIO; + break; + } + + for (i = 0; i < count % 15; i++) { + struct i915_request *rq; + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) { + struct drm_printer p = + drm_info_printer(gt->i915->drm.dev); + intel_engine_dump(engine, &p, + "%s(%s): failed to submit request\n", + __func__, + engine->name); + + GEM_TRACE("%s(%s): failed to submit request\n", + __func__, + engine->name); + GEM_TRACE_DUMP(); + + intel_gt_set_wedged(gt); + if (last) + i915_request_put(last); + + err = PTR_ERR(rq); + goto out; + } + + if (last) + i915_request_put(last); + last = i915_request_get(rq); + i915_request_add(rq); + } + + if (count & 1) { + err = intel_engine_reset(engine, NULL); + if (err) { + GEM_TRACE_ERR("intel_engine_reset(%s) failed, err:%d\n", + engine->name, err); + GEM_TRACE_DUMP(); + i915_request_put(last); + break; + } + } else { + force_reset_timeout(engine); + err = intel_engine_reset(engine, NULL); + cancel_reset_timeout(engine); + if (err != -ETIMEDOUT) { + pr_err("intel_engine_reset(%s) did not fail, err:%d\n", + engine->name, err); + i915_request_put(last); + break; + } + } + + err = 0; + if (last) { + if (i915_request_wait(last, 0, HZ / 2) < 0) { + struct drm_printer p = + drm_info_printer(gt->i915->drm.dev); + + intel_engine_dump(engine, &p, + "%s(%s): failed to complete request\n", + __func__, + engine->name); + + GEM_TRACE("%s(%s): failed to complete request\n", + __func__, + engine->name); + GEM_TRACE_DUMP(); + + err = -EIO; + } + i915_request_put(last); + } + count++; + } while (err == 0 && time_before(jiffies, end_time)); +out: + pr_info("%s(%s): %d resets\n", __func__, engine->name, count); +skip: + clear_bit(I915_RESET_ENGINE + id, >->reset.flags); + st_engine_heartbeat_enable(engine); + intel_context_put(ce); + + if (igt_flush_test(gt->i915)) + err = -EIO; + if (err) + return err; + } + + return 0; +} + static int __igt_reset_engine(struct intel_gt *gt, bool active) { struct i915_gpu_error *global = >->i915->gpu_error; @@ -608,7 +752,8 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active) err = intel_engine_reset(engine, NULL); if (err) { - pr_err("i915_reset_engine failed\n"); + pr_err("intel_engine_reset(%s) failed, err:%d\n", + engine->name, err); break; } @@ -1576,12 +1721,17 @@ static int __igt_atomic_reset_engine(struct intel_engine_cs *engine, engine->name, mode, p->name); tasklet_disable(t); + if (strcmp(p->name, "softirq")) + local_bh_disable(); p->critical_section_begin(); - err = intel_engine_reset(engine, NULL); + err = __intel_engine_reset_bh(engine, NULL); p->critical_section_end(); + if (strcmp(p->name, "softirq")) + local_bh_enable(); tasklet_enable(t); + tasklet_hi_schedule(t); if (err) pr_err("i915_reset_engine(%s:%s) failed under %s\n", @@ -1687,6 +1837,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_reset_nop_engine), SUBTEST(igt_reset_idle_engine), SUBTEST(igt_reset_active_engine), + SUBTEST(igt_reset_fail_engine), SUBTEST(igt_reset_engines), SUBTEST(igt_reset_engines_atomic), SUBTEST(igt_reset_queue), diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index 95d41c01d0e0..920979a89413 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -1,22 +1,22 @@ +// SPDX-License-Identifier: MIT /* - * SPDX-License-Identifier: MIT - * * Copyright © 2018 Intel Corporation */ #include <linux/prime_numbers.h> -#include "gem/i915_gem_pm.h" -#include "gt/intel_engine_heartbeat.h" -#include "gt/intel_reset.h" -#include "gt/selftest_engine_heartbeat.h" - #include "i915_selftest.h" +#include "intel_engine_heartbeat.h" +#include "intel_engine_pm.h" +#include "intel_reset.h" +#include "intel_ring.h" +#include "selftest_engine_heartbeat.h" #include "selftests/i915_random.h" #include "selftests/igt_flush_test.h" #include "selftests/igt_live_test.h" #include "selftests/igt_spinner.h" #include "selftests/lib_sw_fence.h" +#include "shmem_utils.h" #include "gem/selftests/igt_gem_utils.h" #include "gem/selftests/mock_context.h" @@ -27,29 +27,7 @@ static struct i915_vma *create_scratch(struct intel_gt *gt) { - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - int err; - - obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); - if (IS_ERR(obj)) - return ERR_CAST(obj); - - i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED); - - vma = i915_vma_instance(obj, >->ggtt->vm, NULL); - if (IS_ERR(vma)) { - i915_gem_object_put(obj); - return vma; - } - - err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); - if (err) { - i915_gem_object_put(obj); - return ERR_PTR(err); - } - - return vma; + return __vm_create_scratch_for_read(>->ggtt->vm, PAGE_SIZE); } static bool is_active(struct i915_request *rq) @@ -70,6 +48,9 @@ static int wait_for_submit(struct intel_engine_cs *engine, struct i915_request *rq, unsigned long timeout) { + /* Ignore our own attempts to suppress excess tasklets */ + tasklet_hi_schedule(&engine->execlists.tasklet); + timeout += jiffies; do { bool done = time_after(jiffies, timeout); @@ -89,4623 +70,6 @@ static int wait_for_submit(struct intel_engine_cs *engine, } while (1); } -static int wait_for_reset(struct intel_engine_cs *engine, - struct i915_request *rq, - unsigned long timeout) -{ - timeout += jiffies; - - do { - cond_resched(); - intel_engine_flush_submission(engine); - - if (READ_ONCE(engine->execlists.pending[0])) - continue; - - if (i915_request_completed(rq)) - break; - - if (READ_ONCE(rq->fence.error)) - break; - } while (time_before(jiffies, timeout)); - - flush_scheduled_work(); - - if (rq->fence.error != -EIO) { - pr_err("%s: hanging request %llx:%lld not reset\n", - engine->name, - rq->fence.context, - rq->fence.seqno); - return -EINVAL; - } - - /* Give the request a jiffie to complete after flushing the worker */ - if (i915_request_wait(rq, 0, - max(0l, (long)(timeout - jiffies)) + 1) < 0) { - pr_err("%s: hanging request %llx:%lld did not complete\n", - engine->name, - rq->fence.context, - rq->fence.seqno); - return -ETIME; - } - - return 0; -} - -static int live_sanitycheck(void *arg) -{ - struct intel_gt *gt = arg; - struct intel_engine_cs *engine; - enum intel_engine_id id; - struct igt_spinner spin; - int err = 0; - - if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915)) - return 0; - - if (igt_spinner_init(&spin, gt)) - return -ENOMEM; - - for_each_engine(engine, gt, id) { - struct intel_context *ce; - struct i915_request *rq; - - ce = intel_context_create(engine); - if (IS_ERR(ce)) { - err = PTR_ERR(ce); - break; - } - - rq = igt_spinner_create_request(&spin, ce, MI_NOOP); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto out_ctx; - } - - i915_request_add(rq); - if (!igt_wait_for_spinner(&spin, rq)) { - GEM_TRACE("spinner failed to start\n"); - GEM_TRACE_DUMP(); - intel_gt_set_wedged(gt); - err = -EIO; - goto out_ctx; - } - - igt_spinner_end(&spin); - if (igt_flush_test(gt->i915)) { - err = -EIO; - goto out_ctx; - } - -out_ctx: - intel_context_put(ce); - if (err) - break; - } - - igt_spinner_fini(&spin); - return err; -} - -static int live_unlite_restore(struct intel_gt *gt, int prio) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - struct igt_spinner spin; - int err = -ENOMEM; - - /* - * Check that we can correctly context switch between 2 instances - * on the same engine from the same parent context. - */ - - if (igt_spinner_init(&spin, gt)) - return err; - - err = 0; - for_each_engine(engine, gt, id) { - struct intel_context *ce[2] = {}; - struct i915_request *rq[2]; - struct igt_live_test t; - int n; - - if (prio && !intel_engine_has_preemption(engine)) - continue; - - if (!intel_engine_can_store_dword(engine)) - continue; - - if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { - err = -EIO; - break; - } - st_engine_heartbeat_disable(engine); - - for (n = 0; n < ARRAY_SIZE(ce); n++) { - struct intel_context *tmp; - - tmp = intel_context_create(engine); - if (IS_ERR(tmp)) { - err = PTR_ERR(tmp); - goto err_ce; - } - - err = intel_context_pin(tmp); - if (err) { - intel_context_put(tmp); - goto err_ce; - } - - /* - * Setup the pair of contexts such that if we - * lite-restore using the RING_TAIL from ce[1] it - * will execute garbage from ce[0]->ring. - */ - memset(tmp->ring->vaddr, - POISON_INUSE, /* IPEHR: 0x5a5a5a5a [hung!] */ - tmp->ring->vma->size); - - ce[n] = tmp; - } - GEM_BUG_ON(!ce[1]->ring->size); - intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2); - __execlists_update_reg_state(ce[1], engine, ce[1]->ring->head); - - rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK); - if (IS_ERR(rq[0])) { - err = PTR_ERR(rq[0]); - goto err_ce; - } - - i915_request_get(rq[0]); - i915_request_add(rq[0]); - GEM_BUG_ON(rq[0]->postfix > ce[1]->ring->emit); - - if (!igt_wait_for_spinner(&spin, rq[0])) { - i915_request_put(rq[0]); - goto err_ce; - } - - rq[1] = i915_request_create(ce[1]); - if (IS_ERR(rq[1])) { - err = PTR_ERR(rq[1]); - i915_request_put(rq[0]); - goto err_ce; - } - - if (!prio) { - /* - * Ensure we do the switch to ce[1] on completion. - * - * rq[0] is already submitted, so this should reduce - * to a no-op (a wait on a request on the same engine - * uses the submit fence, not the completion fence), - * but it will install a dependency on rq[1] for rq[0] - * that will prevent the pair being reordered by - * timeslicing. - */ - i915_request_await_dma_fence(rq[1], &rq[0]->fence); - } - - i915_request_get(rq[1]); - i915_request_add(rq[1]); - GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix); - i915_request_put(rq[0]); - - if (prio) { - struct i915_sched_attr attr = { - .priority = prio, - }; - - /* Alternatively preempt the spinner with ce[1] */ - engine->schedule(rq[1], &attr); - } - - /* And switch back to ce[0] for good measure */ - rq[0] = i915_request_create(ce[0]); - if (IS_ERR(rq[0])) { - err = PTR_ERR(rq[0]); - i915_request_put(rq[1]); - goto err_ce; - } - - i915_request_await_dma_fence(rq[0], &rq[1]->fence); - i915_request_get(rq[0]); - i915_request_add(rq[0]); - GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix); - i915_request_put(rq[1]); - i915_request_put(rq[0]); - -err_ce: - intel_engine_flush_submission(engine); - igt_spinner_end(&spin); - for (n = 0; n < ARRAY_SIZE(ce); n++) { - if (IS_ERR_OR_NULL(ce[n])) - break; - - intel_context_unpin(ce[n]); - intel_context_put(ce[n]); - } - - st_engine_heartbeat_enable(engine); - if (igt_live_test_end(&t)) - err = -EIO; - if (err) - break; - } - - igt_spinner_fini(&spin); - return err; -} - -static int live_unlite_switch(void *arg) -{ - return live_unlite_restore(arg, 0); -} - -static int live_unlite_preempt(void *arg) -{ - return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX)); -} - -static int live_unlite_ring(void *arg) -{ - struct intel_gt *gt = arg; - struct intel_engine_cs *engine; - struct igt_spinner spin; - enum intel_engine_id id; - int err = 0; - - /* - * Setup a preemption event that will cause almost the entire ring - * to be unwound, potentially fooling our intel_ring_direction() - * into emitting a forward lite-restore instead of the rollback. - */ - - if (igt_spinner_init(&spin, gt)) - return -ENOMEM; - - for_each_engine(engine, gt, id) { - struct intel_context *ce[2] = {}; - struct i915_request *rq; - struct igt_live_test t; - int n; - - if (!intel_engine_has_preemption(engine)) - continue; - - if (!intel_engine_can_store_dword(engine)) - continue; - - if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { - err = -EIO; - break; - } - st_engine_heartbeat_disable(engine); - - for (n = 0; n < ARRAY_SIZE(ce); n++) { - struct intel_context *tmp; - - tmp = intel_context_create(engine); - if (IS_ERR(tmp)) { - err = PTR_ERR(tmp); - goto err_ce; - } - - err = intel_context_pin(tmp); - if (err) { - intel_context_put(tmp); - goto err_ce; - } - - memset32(tmp->ring->vaddr, - 0xdeadbeef, /* trigger a hang if executed */ - tmp->ring->vma->size / sizeof(u32)); - - ce[n] = tmp; - } - - /* Create max prio spinner, followed by N low prio nops */ - rq = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto err_ce; - } - - i915_request_get(rq); - rq->sched.attr.priority = I915_PRIORITY_BARRIER; - i915_request_add(rq); - - if (!igt_wait_for_spinner(&spin, rq)) { - intel_gt_set_wedged(gt); - i915_request_put(rq); - err = -ETIME; - goto err_ce; - } - - /* Fill the ring, until we will cause a wrap */ - n = 0; - while (intel_ring_direction(ce[0]->ring, - rq->wa_tail, - ce[0]->ring->tail) <= 0) { - struct i915_request *tmp; - - tmp = intel_context_create_request(ce[0]); - if (IS_ERR(tmp)) { - err = PTR_ERR(tmp); - i915_request_put(rq); - goto err_ce; - } - - i915_request_add(tmp); - intel_engine_flush_submission(engine); - n++; - } - intel_engine_flush_submission(engine); - pr_debug("%s: Filled ring with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n", - engine->name, n, - ce[0]->ring->size, - ce[0]->ring->tail, - ce[0]->ring->emit, - rq->tail); - GEM_BUG_ON(intel_ring_direction(ce[0]->ring, - rq->tail, - ce[0]->ring->tail) <= 0); - i915_request_put(rq); - - /* Create a second ring to preempt the first ring after rq[0] */ - rq = intel_context_create_request(ce[1]); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto err_ce; - } - - rq->sched.attr.priority = I915_PRIORITY_BARRIER; - i915_request_get(rq); - i915_request_add(rq); - - err = wait_for_submit(engine, rq, HZ / 2); - i915_request_put(rq); - if (err) { - pr_err("%s: preemption request was not submitted\n", - engine->name); - err = -ETIME; - } - - pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n", - engine->name, - ce[0]->ring->tail, ce[0]->ring->emit, - ce[1]->ring->tail, ce[1]->ring->emit); - -err_ce: - intel_engine_flush_submission(engine); - igt_spinner_end(&spin); - for (n = 0; n < ARRAY_SIZE(ce); n++) { - if (IS_ERR_OR_NULL(ce[n])) - break; - - intel_context_unpin(ce[n]); - intel_context_put(ce[n]); - } - st_engine_heartbeat_enable(engine); - if (igt_live_test_end(&t)) - err = -EIO; - if (err) - break; - } - - igt_spinner_fini(&spin); - return err; -} - -static int live_pin_rewind(void *arg) -{ - struct intel_gt *gt = arg; - struct intel_engine_cs *engine; - enum intel_engine_id id; - int err = 0; - - /* - * We have to be careful not to trust intel_ring too much, for example - * ring->head is updated upon retire which is out of sync with pinning - * the context. Thus we cannot use ring->head to set CTX_RING_HEAD, - * or else we risk writing an older, stale value. - * - * To simulate this, let's apply a bit of deliberate sabotague. - */ - - for_each_engine(engine, gt, id) { - struct intel_context *ce; - struct i915_request *rq; - struct intel_ring *ring; - struct igt_live_test t; - - if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { - err = -EIO; - break; - } - - ce = intel_context_create(engine); - if (IS_ERR(ce)) { - err = PTR_ERR(ce); - break; - } - - err = intel_context_pin(ce); - if (err) { - intel_context_put(ce); - break; - } - - /* Keep the context awake while we play games */ - err = i915_active_acquire(&ce->active); - if (err) { - intel_context_unpin(ce); - intel_context_put(ce); - break; - } - ring = ce->ring; - - /* Poison the ring, and offset the next request from HEAD */ - memset32(ring->vaddr, STACK_MAGIC, ring->size / sizeof(u32)); - ring->emit = ring->size / 2; - ring->tail = ring->emit; - GEM_BUG_ON(ring->head); - - intel_context_unpin(ce); - - /* Submit a simple nop request */ - GEM_BUG_ON(intel_context_is_pinned(ce)); - rq = intel_context_create_request(ce); - i915_active_release(&ce->active); /* e.g. async retire */ - intel_context_put(ce); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - break; - } - GEM_BUG_ON(!rq->head); - i915_request_add(rq); - - /* Expect not to hang! */ - if (igt_live_test_end(&t)) { - err = -EIO; - break; - } - } - - return err; -} - -static int live_hold_reset(void *arg) -{ - struct intel_gt *gt = arg; - struct intel_engine_cs *engine; - enum intel_engine_id id; - struct igt_spinner spin; - int err = 0; - - /* - * In order to support offline error capture for fast preempt reset, - * we need to decouple the guilty request and ensure that it and its - * descendents are not executed while the capture is in progress. - */ - - if (!intel_has_reset_engine(gt)) - return 0; - - if (igt_spinner_init(&spin, gt)) - return -ENOMEM; - - for_each_engine(engine, gt, id) { - struct intel_context *ce; - struct i915_request *rq; - - ce = intel_context_create(engine); - if (IS_ERR(ce)) { - err = PTR_ERR(ce); - break; - } - - st_engine_heartbeat_disable(engine); - - rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto out; - } - i915_request_add(rq); - - if (!igt_wait_for_spinner(&spin, rq)) { - intel_gt_set_wedged(gt); - err = -ETIME; - goto out; - } - - /* We have our request executing, now remove it and reset */ - - if (test_and_set_bit(I915_RESET_ENGINE + id, - >->reset.flags)) { - intel_gt_set_wedged(gt); - err = -EBUSY; - goto out; - } - tasklet_disable(&engine->execlists.tasklet); - - engine->execlists.tasklet.func(engine->execlists.tasklet.data); - GEM_BUG_ON(execlists_active(&engine->execlists) != rq); - - i915_request_get(rq); - execlists_hold(engine, rq); - GEM_BUG_ON(!i915_request_on_hold(rq)); - - intel_engine_reset(engine, NULL); - GEM_BUG_ON(rq->fence.error != -EIO); - - tasklet_enable(&engine->execlists.tasklet); - clear_and_wake_up_bit(I915_RESET_ENGINE + id, - >->reset.flags); - - /* Check that we do not resubmit the held request */ - if (!i915_request_wait(rq, 0, HZ / 5)) { - pr_err("%s: on hold request completed!\n", - engine->name); - i915_request_put(rq); - err = -EIO; - goto out; - } - GEM_BUG_ON(!i915_request_on_hold(rq)); - - /* But is resubmitted on release */ - execlists_unhold(engine, rq); - if (i915_request_wait(rq, 0, HZ / 5) < 0) { - pr_err("%s: held request did not complete!\n", - engine->name); - intel_gt_set_wedged(gt); - err = -ETIME; - } - i915_request_put(rq); - -out: - st_engine_heartbeat_enable(engine); - intel_context_put(ce); - if (err) - break; - } - - igt_spinner_fini(&spin); - return err; -} - -static const char *error_repr(int err) -{ - return err ? "bad" : "good"; -} - -static int live_error_interrupt(void *arg) -{ - static const struct error_phase { - enum { GOOD = 0, BAD = -EIO } error[2]; - } phases[] = { - { { BAD, GOOD } }, - { { BAD, BAD } }, - { { BAD, GOOD } }, - { { GOOD, GOOD } }, /* sentinel */ - }; - struct intel_gt *gt = arg; - struct intel_engine_cs *engine; - enum intel_engine_id id; - - /* - * We hook up the CS_MASTER_ERROR_INTERRUPT to have forewarning - * of invalid commands in user batches that will cause a GPU hang. - * This is a faster mechanism than using hangcheck/heartbeats, but - * only detects problems the HW knows about -- it will not warn when - * we kill the HW! - * - * To verify our detection and reset, we throw some invalid commands - * at the HW and wait for the interrupt. - */ - - if (!intel_has_reset_engine(gt)) - return 0; - - for_each_engine(engine, gt, id) { - const struct error_phase *p; - int err = 0; - - st_engine_heartbeat_disable(engine); - - for (p = phases; p->error[0] != GOOD; p++) { - struct i915_request *client[ARRAY_SIZE(phases->error)]; - u32 *cs; - int i; - - memset(client, 0, sizeof(*client)); - for (i = 0; i < ARRAY_SIZE(client); i++) { - struct intel_context *ce; - struct i915_request *rq; - - ce = intel_context_create(engine); - if (IS_ERR(ce)) { - err = PTR_ERR(ce); - goto out; - } - - rq = intel_context_create_request(ce); - intel_context_put(ce); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto out; - } - - if (rq->engine->emit_init_breadcrumb) { - err = rq->engine->emit_init_breadcrumb(rq); - if (err) { - i915_request_add(rq); - goto out; - } - } - - cs = intel_ring_begin(rq, 2); - if (IS_ERR(cs)) { - i915_request_add(rq); - err = PTR_ERR(cs); - goto out; - } - - if (p->error[i]) { - *cs++ = 0xdeadbeef; - *cs++ = 0xdeadbeef; - } else { - *cs++ = MI_NOOP; - *cs++ = MI_NOOP; - } - - client[i] = i915_request_get(rq); - i915_request_add(rq); - } - - err = wait_for_submit(engine, client[0], HZ / 2); - if (err) { - pr_err("%s: first request did not start within time!\n", - engine->name); - err = -ETIME; - goto out; - } - - for (i = 0; i < ARRAY_SIZE(client); i++) { - if (i915_request_wait(client[i], 0, HZ / 5) < 0) - pr_debug("%s: %s request incomplete!\n", - engine->name, - error_repr(p->error[i])); - - if (!i915_request_started(client[i])) { - pr_err("%s: %s request not started!\n", - engine->name, - error_repr(p->error[i])); - err = -ETIME; - goto out; - } - - /* Kick the tasklet to process the error */ - intel_engine_flush_submission(engine); - if (client[i]->fence.error != p->error[i]) { - pr_err("%s: %s request (%s) with wrong error code: %d\n", - engine->name, - error_repr(p->error[i]), - i915_request_completed(client[i]) ? "completed" : "running", - client[i]->fence.error); - err = -EINVAL; - goto out; - } - } - -out: - for (i = 0; i < ARRAY_SIZE(client); i++) - if (client[i]) - i915_request_put(client[i]); - if (err) { - pr_err("%s: failed at phase[%zd] { %d, %d }\n", - engine->name, p - phases, - p->error[0], p->error[1]); - break; - } - } - - st_engine_heartbeat_enable(engine); - if (err) { - intel_gt_set_wedged(gt); - return err; - } - } - - return 0; -} - -static int -emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx) -{ - u32 *cs; - - cs = intel_ring_begin(rq, 10); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; - - *cs++ = MI_SEMAPHORE_WAIT | - MI_SEMAPHORE_GLOBAL_GTT | - MI_SEMAPHORE_POLL | - MI_SEMAPHORE_SAD_NEQ_SDD; - *cs++ = 0; - *cs++ = i915_ggtt_offset(vma) + 4 * idx; - *cs++ = 0; - - if (idx > 0) { - *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; - *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1); - *cs++ = 0; - *cs++ = 1; - } else { - *cs++ = MI_NOOP; - *cs++ = MI_NOOP; - *cs++ = MI_NOOP; - *cs++ = MI_NOOP; - } - - *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; - - intel_ring_advance(rq, cs); - return 0; -} - -static struct i915_request * -semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx) -{ - struct intel_context *ce; - struct i915_request *rq; - int err; - - ce = intel_context_create(engine); - if (IS_ERR(ce)) - return ERR_CAST(ce); - - rq = intel_context_create_request(ce); - if (IS_ERR(rq)) - goto out_ce; - - err = 0; - if (rq->engine->emit_init_breadcrumb) - err = rq->engine->emit_init_breadcrumb(rq); - if (err == 0) - err = emit_semaphore_chain(rq, vma, idx); - if (err == 0) - i915_request_get(rq); - i915_request_add(rq); - if (err) - rq = ERR_PTR(err); - -out_ce: - intel_context_put(ce); - return rq; -} - -static int -release_queue(struct intel_engine_cs *engine, - struct i915_vma *vma, - int idx, int prio) -{ - struct i915_sched_attr attr = { - .priority = prio, - }; - struct i915_request *rq; - u32 *cs; - - rq = intel_engine_create_kernel_request(engine); - if (IS_ERR(rq)) - return PTR_ERR(rq); - - cs = intel_ring_begin(rq, 4); - if (IS_ERR(cs)) { - i915_request_add(rq); - return PTR_ERR(cs); - } - - *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; - *cs++ = i915_ggtt_offset(vma) + 4 * (idx - 1); - *cs++ = 0; - *cs++ = 1; - - intel_ring_advance(rq, cs); - - i915_request_get(rq); - i915_request_add(rq); - - local_bh_disable(); - engine->schedule(rq, &attr); - local_bh_enable(); /* kick tasklet */ - - i915_request_put(rq); - - return 0; -} - -static int -slice_semaphore_queue(struct intel_engine_cs *outer, - struct i915_vma *vma, - int count) -{ - struct intel_engine_cs *engine; - struct i915_request *head; - enum intel_engine_id id; - int err, i, n = 0; - - head = semaphore_queue(outer, vma, n++); - if (IS_ERR(head)) - return PTR_ERR(head); - - for_each_engine(engine, outer->gt, id) { - for (i = 0; i < count; i++) { - struct i915_request *rq; - - rq = semaphore_queue(engine, vma, n++); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto out; - } - - i915_request_put(rq); - } - } - - err = release_queue(outer, vma, n, I915_PRIORITY_BARRIER); - if (err) - goto out; - - if (i915_request_wait(head, 0, - 2 * outer->gt->info.num_engines * (count + 2) * (count + 3)) < 0) { - pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n", - count, n); - GEM_TRACE_DUMP(); - intel_gt_set_wedged(outer->gt); - err = -EIO; - } - -out: - i915_request_put(head); - return err; -} - -static int live_timeslice_preempt(void *arg) -{ - struct intel_gt *gt = arg; - struct drm_i915_gem_object *obj; - struct intel_engine_cs *engine; - enum intel_engine_id id; - struct i915_vma *vma; - void *vaddr; - int err = 0; - - /* - * If a request takes too long, we would like to give other users - * a fair go on the GPU. In particular, users may create batches - * that wait upon external input, where that input may even be - * supplied by another GPU job. To avoid blocking forever, we - * need to preempt the current task and replace it with another - * ready task. - */ - if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) - return 0; - - obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); - if (IS_ERR(obj)) - return PTR_ERR(obj); - - vma = i915_vma_instance(obj, >->ggtt->vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto err_obj; - } - - vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC); - if (IS_ERR(vaddr)) { - err = PTR_ERR(vaddr); - goto err_obj; - } - - err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); - if (err) - goto err_map; - - err = i915_vma_sync(vma); - if (err) - goto err_pin; - - for_each_engine(engine, gt, id) { - if (!intel_engine_has_preemption(engine)) - continue; - - memset(vaddr, 0, PAGE_SIZE); - - st_engine_heartbeat_disable(engine); - err = slice_semaphore_queue(engine, vma, 5); - st_engine_heartbeat_enable(engine); - if (err) - goto err_pin; - - if (igt_flush_test(gt->i915)) { - err = -EIO; - goto err_pin; - } - } - -err_pin: - i915_vma_unpin(vma); -err_map: - i915_gem_object_unpin_map(obj); -err_obj: - i915_gem_object_put(obj); - return err; -} - -static struct i915_request * -create_rewinder(struct intel_context *ce, - struct i915_request *wait, - void *slot, int idx) -{ - const u32 offset = - i915_ggtt_offset(ce->engine->status_page.vma) + - offset_in_page(slot); - struct i915_request *rq; - u32 *cs; - int err; - - rq = intel_context_create_request(ce); - if (IS_ERR(rq)) - return rq; - - if (wait) { - err = i915_request_await_dma_fence(rq, &wait->fence); - if (err) - goto err; - } - - cs = intel_ring_begin(rq, 14); - if (IS_ERR(cs)) { - err = PTR_ERR(cs); - goto err; - } - - *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; - *cs++ = MI_NOOP; - - *cs++ = MI_SEMAPHORE_WAIT | - MI_SEMAPHORE_GLOBAL_GTT | - MI_SEMAPHORE_POLL | - MI_SEMAPHORE_SAD_GTE_SDD; - *cs++ = idx; - *cs++ = offset; - *cs++ = 0; - - *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; - *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base)); - *cs++ = offset + idx * sizeof(u32); - *cs++ = 0; - - *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; - *cs++ = offset; - *cs++ = 0; - *cs++ = idx + 1; - - intel_ring_advance(rq, cs); - - rq->sched.attr.priority = I915_PRIORITY_MASK; - err = 0; -err: - i915_request_get(rq); - i915_request_add(rq); - if (err) { - i915_request_put(rq); - return ERR_PTR(err); - } - - return rq; -} - -static int live_timeslice_rewind(void *arg) -{ - struct intel_gt *gt = arg; - struct intel_engine_cs *engine; - enum intel_engine_id id; - - /* - * The usual presumption on timeslice expiration is that we replace - * the active context with another. However, given a chain of - * dependencies we may end up with replacing the context with itself, - * but only a few of those requests, forcing us to rewind the - * RING_TAIL of the original request. - */ - if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) - return 0; - - for_each_engine(engine, gt, id) { - enum { A1, A2, B1 }; - enum { X = 1, Z, Y }; - struct i915_request *rq[3] = {}; - struct intel_context *ce; - unsigned long timeslice; - int i, err = 0; - u32 *slot; - - if (!intel_engine_has_timeslices(engine)) - continue; - - /* - * A:rq1 -- semaphore wait, timestamp X - * A:rq2 -- write timestamp Y - * - * B:rq1 [await A:rq1] -- write timestamp Z - * - * Force timeslice, release semaphore. - * - * Expect execution/evaluation order XZY - */ - - st_engine_heartbeat_disable(engine); - timeslice = xchg(&engine->props.timeslice_duration_ms, 1); - - slot = memset32(engine->status_page.addr + 1000, 0, 4); - - ce = intel_context_create(engine); - if (IS_ERR(ce)) { - err = PTR_ERR(ce); - goto err; - } - - rq[A1] = create_rewinder(ce, NULL, slot, X); - if (IS_ERR(rq[A1])) { - intel_context_put(ce); - goto err; - } - - rq[A2] = create_rewinder(ce, NULL, slot, Y); - intel_context_put(ce); - if (IS_ERR(rq[A2])) - goto err; - - err = wait_for_submit(engine, rq[A2], HZ / 2); - if (err) { - pr_err("%s: failed to submit first context\n", - engine->name); - goto err; - } - - ce = intel_context_create(engine); - if (IS_ERR(ce)) { - err = PTR_ERR(ce); - goto err; - } - - rq[B1] = create_rewinder(ce, rq[A1], slot, Z); - intel_context_put(ce); - if (IS_ERR(rq[2])) - goto err; - - err = wait_for_submit(engine, rq[B1], HZ / 2); - if (err) { - pr_err("%s: failed to submit second context\n", - engine->name); - goto err; - } - - /* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */ - ENGINE_TRACE(engine, "forcing tasklet for rewind\n"); - if (i915_request_is_active(rq[A2])) { /* semaphore yielded! */ - /* Wait for the timeslice to kick in */ - del_timer(&engine->execlists.timer); - tasklet_hi_schedule(&engine->execlists.tasklet); - intel_engine_flush_submission(engine); - } - /* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */ - GEM_BUG_ON(!i915_request_is_active(rq[A1])); - GEM_BUG_ON(!i915_request_is_active(rq[B1])); - GEM_BUG_ON(i915_request_is_active(rq[A2])); - - /* Release the hounds! */ - slot[0] = 1; - wmb(); /* "pairs" with GPU; paranoid kick of internal CPU$ */ - - for (i = 1; i <= 3; i++) { - unsigned long timeout = jiffies + HZ / 2; - - while (!READ_ONCE(slot[i]) && - time_before(jiffies, timeout)) - ; - - if (!time_before(jiffies, timeout)) { - pr_err("%s: rq[%d] timed out\n", - engine->name, i - 1); - err = -ETIME; - goto err; - } - - pr_debug("%s: slot[%d]:%x\n", engine->name, i, slot[i]); - } - - /* XZY: XZ < XY */ - if (slot[Z] - slot[X] >= slot[Y] - slot[X]) { - pr_err("%s: timeslicing did not run context B [%u] before A [%u]!\n", - engine->name, - slot[Z] - slot[X], - slot[Y] - slot[X]); - err = -EINVAL; - } - -err: - memset32(&slot[0], -1, 4); - wmb(); - - engine->props.timeslice_duration_ms = timeslice; - st_engine_heartbeat_enable(engine); - for (i = 0; i < 3; i++) - i915_request_put(rq[i]); - if (igt_flush_test(gt->i915)) - err = -EIO; - if (err) - return err; - } - - return 0; -} - -static struct i915_request *nop_request(struct intel_engine_cs *engine) -{ - struct i915_request *rq; - - rq = intel_engine_create_kernel_request(engine); - if (IS_ERR(rq)) - return rq; - - i915_request_get(rq); - i915_request_add(rq); - - return rq; -} - -static long slice_timeout(struct intel_engine_cs *engine) -{ - long timeout; - - /* Enough time for a timeslice to kick in, and kick out */ - timeout = 2 * msecs_to_jiffies_timeout(timeslice(engine)); - - /* Enough time for the nop request to complete */ - timeout += HZ / 5; - - return timeout + 1; -} - -static int live_timeslice_queue(void *arg) -{ - struct intel_gt *gt = arg; - struct drm_i915_gem_object *obj; - struct intel_engine_cs *engine; - enum intel_engine_id id; - struct i915_vma *vma; - void *vaddr; - int err = 0; - - /* - * Make sure that even if ELSP[0] and ELSP[1] are filled with - * timeslicing between them disabled, we *do* enable timeslicing - * if the queue demands it. (Normally, we do not submit if - * ELSP[1] is already occupied, so must rely on timeslicing to - * eject ELSP[0] in favour of the queue.) - */ - if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) - return 0; - - obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); - if (IS_ERR(obj)) - return PTR_ERR(obj); - - vma = i915_vma_instance(obj, >->ggtt->vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto err_obj; - } - - vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC); - if (IS_ERR(vaddr)) { - err = PTR_ERR(vaddr); - goto err_obj; - } - - err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); - if (err) - goto err_map; - - err = i915_vma_sync(vma); - if (err) - goto err_pin; - - for_each_engine(engine, gt, id) { - struct i915_sched_attr attr = { - .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX), - }; - struct i915_request *rq, *nop; - - if (!intel_engine_has_preemption(engine)) - continue; - - st_engine_heartbeat_disable(engine); - memset(vaddr, 0, PAGE_SIZE); - - /* ELSP[0]: semaphore wait */ - rq = semaphore_queue(engine, vma, 0); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto err_heartbeat; - } - engine->schedule(rq, &attr); - err = wait_for_submit(engine, rq, HZ / 2); - if (err) { - pr_err("%s: Timed out trying to submit semaphores\n", - engine->name); - goto err_rq; - } - - /* ELSP[1]: nop request */ - nop = nop_request(engine); - if (IS_ERR(nop)) { - err = PTR_ERR(nop); - goto err_rq; - } - err = wait_for_submit(engine, nop, HZ / 2); - i915_request_put(nop); - if (err) { - pr_err("%s: Timed out trying to submit nop\n", - engine->name); - goto err_rq; - } - - GEM_BUG_ON(i915_request_completed(rq)); - GEM_BUG_ON(execlists_active(&engine->execlists) != rq); - - /* Queue: semaphore signal, matching priority as semaphore */ - err = release_queue(engine, vma, 1, effective_prio(rq)); - if (err) - goto err_rq; - - /* Wait until we ack the release_queue and start timeslicing */ - do { - cond_resched(); - intel_engine_flush_submission(engine); - } while (READ_ONCE(engine->execlists.pending[0])); - - /* Timeslice every jiffy, so within 2 we should signal */ - if (i915_request_wait(rq, 0, slice_timeout(engine)) < 0) { - struct drm_printer p = - drm_info_printer(gt->i915->drm.dev); - - pr_err("%s: Failed to timeslice into queue\n", - engine->name); - intel_engine_dump(engine, &p, - "%s\n", engine->name); - - memset(vaddr, 0xff, PAGE_SIZE); - err = -EIO; - } -err_rq: - i915_request_put(rq); -err_heartbeat: - st_engine_heartbeat_enable(engine); - if (err) - break; - } - -err_pin: - i915_vma_unpin(vma); -err_map: - i915_gem_object_unpin_map(obj); -err_obj: - i915_gem_object_put(obj); - return err; -} - -static int live_timeslice_nopreempt(void *arg) -{ - struct intel_gt *gt = arg; - struct intel_engine_cs *engine; - enum intel_engine_id id; - struct igt_spinner spin; - int err = 0; - - /* - * We should not timeslice into a request that is marked with - * I915_REQUEST_NOPREEMPT. - */ - if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) - return 0; - - if (igt_spinner_init(&spin, gt)) - return -ENOMEM; - - for_each_engine(engine, gt, id) { - struct intel_context *ce; - struct i915_request *rq; - unsigned long timeslice; - - if (!intel_engine_has_preemption(engine)) - continue; - - ce = intel_context_create(engine); - if (IS_ERR(ce)) { - err = PTR_ERR(ce); - break; - } - - st_engine_heartbeat_disable(engine); - timeslice = xchg(&engine->props.timeslice_duration_ms, 1); - - /* Create an unpreemptible spinner */ - - rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); - intel_context_put(ce); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto out_heartbeat; - } - - i915_request_get(rq); - i915_request_add(rq); - - if (!igt_wait_for_spinner(&spin, rq)) { - i915_request_put(rq); - err = -ETIME; - goto out_spin; - } - - set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags); - i915_request_put(rq); - - /* Followed by a maximum priority barrier (heartbeat) */ - - ce = intel_context_create(engine); - if (IS_ERR(ce)) { - err = PTR_ERR(ce); - goto out_spin; - } - - rq = intel_context_create_request(ce); - intel_context_put(ce); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto out_spin; - } - - rq->sched.attr.priority = I915_PRIORITY_BARRIER; - i915_request_get(rq); - i915_request_add(rq); - - /* - * Wait until the barrier is in ELSP, and we know timeslicing - * will have been activated. - */ - if (wait_for_submit(engine, rq, HZ / 2)) { - i915_request_put(rq); - err = -ETIME; - goto out_spin; - } - - /* - * Since the ELSP[0] request is unpreemptible, it should not - * allow the maximum priority barrier through. Wait long - * enough to see if it is timesliced in by mistake. - */ - if (i915_request_wait(rq, 0, slice_timeout(engine)) >= 0) { - pr_err("%s: I915_PRIORITY_BARRIER request completed, bypassing no-preempt request\n", - engine->name); - err = -EINVAL; - } - i915_request_put(rq); - -out_spin: - igt_spinner_end(&spin); -out_heartbeat: - xchg(&engine->props.timeslice_duration_ms, timeslice); - st_engine_heartbeat_enable(engine); - if (err) - break; - - if (igt_flush_test(gt->i915)) { - err = -EIO; - break; - } - } - - igt_spinner_fini(&spin); - return err; -} - -static int live_busywait_preempt(void *arg) -{ - struct intel_gt *gt = arg; - struct i915_gem_context *ctx_hi, *ctx_lo; - struct intel_engine_cs *engine; - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - enum intel_engine_id id; - int err = -ENOMEM; - u32 *map; - - /* - * Verify that even without HAS_LOGICAL_RING_PREEMPTION, we can - * preempt the busywaits used to synchronise between rings. - */ - - ctx_hi = kernel_context(gt->i915); - if (!ctx_hi) - return -ENOMEM; - ctx_hi->sched.priority = - I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY); - - ctx_lo = kernel_context(gt->i915); - if (!ctx_lo) - goto err_ctx_hi; - ctx_lo->sched.priority = - I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY); - - obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - goto err_ctx_lo; - } - - map = i915_gem_object_pin_map(obj, I915_MAP_WC); - if (IS_ERR(map)) { - err = PTR_ERR(map); - goto err_obj; - } - - vma = i915_vma_instance(obj, >->ggtt->vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto err_map; - } - - err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); - if (err) - goto err_map; - - err = i915_vma_sync(vma); - if (err) - goto err_vma; - - for_each_engine(engine, gt, id) { - struct i915_request *lo, *hi; - struct igt_live_test t; - u32 *cs; - - if (!intel_engine_has_preemption(engine)) - continue; - - if (!intel_engine_can_store_dword(engine)) - continue; - - if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { - err = -EIO; - goto err_vma; - } - - /* - * We create two requests. The low priority request - * busywaits on a semaphore (inside the ringbuffer where - * is should be preemptible) and the high priority requests - * uses a MI_STORE_DWORD_IMM to update the semaphore value - * allowing the first request to complete. If preemption - * fails, we hang instead. - */ - - lo = igt_request_alloc(ctx_lo, engine); - if (IS_ERR(lo)) { - err = PTR_ERR(lo); - goto err_vma; - } - - cs = intel_ring_begin(lo, 8); - if (IS_ERR(cs)) { - err = PTR_ERR(cs); - i915_request_add(lo); - goto err_vma; - } - - *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; - *cs++ = i915_ggtt_offset(vma); - *cs++ = 0; - *cs++ = 1; - - /* XXX Do we need a flush + invalidate here? */ - - *cs++ = MI_SEMAPHORE_WAIT | - MI_SEMAPHORE_GLOBAL_GTT | - MI_SEMAPHORE_POLL | - MI_SEMAPHORE_SAD_EQ_SDD; - *cs++ = 0; - *cs++ = i915_ggtt_offset(vma); - *cs++ = 0; - - intel_ring_advance(lo, cs); - - i915_request_get(lo); - i915_request_add(lo); - - if (wait_for(READ_ONCE(*map), 10)) { - i915_request_put(lo); - err = -ETIMEDOUT; - goto err_vma; - } - - /* Low priority request should be busywaiting now */ - if (i915_request_wait(lo, 0, 1) != -ETIME) { - i915_request_put(lo); - pr_err("%s: Busywaiting request did not!\n", - engine->name); - err = -EIO; - goto err_vma; - } - - hi = igt_request_alloc(ctx_hi, engine); - if (IS_ERR(hi)) { - err = PTR_ERR(hi); - i915_request_put(lo); - goto err_vma; - } - - cs = intel_ring_begin(hi, 4); - if (IS_ERR(cs)) { - err = PTR_ERR(cs); - i915_request_add(hi); - i915_request_put(lo); - goto err_vma; - } - - *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; - *cs++ = i915_ggtt_offset(vma); - *cs++ = 0; - *cs++ = 0; - - intel_ring_advance(hi, cs); - i915_request_add(hi); - - if (i915_request_wait(lo, 0, HZ / 5) < 0) { - struct drm_printer p = drm_info_printer(gt->i915->drm.dev); - - pr_err("%s: Failed to preempt semaphore busywait!\n", - engine->name); - - intel_engine_dump(engine, &p, "%s\n", engine->name); - GEM_TRACE_DUMP(); - - i915_request_put(lo); - intel_gt_set_wedged(gt); - err = -EIO; - goto err_vma; - } - GEM_BUG_ON(READ_ONCE(*map)); - i915_request_put(lo); - - if (igt_live_test_end(&t)) { - err = -EIO; - goto err_vma; - } - } - - err = 0; -err_vma: - i915_vma_unpin(vma); -err_map: - i915_gem_object_unpin_map(obj); -err_obj: - i915_gem_object_put(obj); -err_ctx_lo: - kernel_context_close(ctx_lo); -err_ctx_hi: - kernel_context_close(ctx_hi); - return err; -} - -static struct i915_request * -spinner_create_request(struct igt_spinner *spin, - struct i915_gem_context *ctx, - struct intel_engine_cs *engine, - u32 arb) -{ - struct intel_context *ce; - struct i915_request *rq; - - ce = i915_gem_context_get_engine(ctx, engine->legacy_idx); - if (IS_ERR(ce)) - return ERR_CAST(ce); - - rq = igt_spinner_create_request(spin, ce, arb); - intel_context_put(ce); - return rq; -} - -static int live_preempt(void *arg) -{ - struct intel_gt *gt = arg; - struct i915_gem_context *ctx_hi, *ctx_lo; - struct igt_spinner spin_hi, spin_lo; - struct intel_engine_cs *engine; - enum intel_engine_id id; - int err = -ENOMEM; - - if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) - return 0; - - if (!(gt->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) - pr_err("Logical preemption supported, but not exposed\n"); - - if (igt_spinner_init(&spin_hi, gt)) - return -ENOMEM; - - if (igt_spinner_init(&spin_lo, gt)) - goto err_spin_hi; - - ctx_hi = kernel_context(gt->i915); - if (!ctx_hi) - goto err_spin_lo; - ctx_hi->sched.priority = - I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY); - - ctx_lo = kernel_context(gt->i915); - if (!ctx_lo) - goto err_ctx_hi; - ctx_lo->sched.priority = - I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY); - - for_each_engine(engine, gt, id) { - struct igt_live_test t; - struct i915_request *rq; - - if (!intel_engine_has_preemption(engine)) - continue; - - if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { - err = -EIO; - goto err_ctx_lo; - } - - rq = spinner_create_request(&spin_lo, ctx_lo, engine, - MI_ARB_CHECK); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto err_ctx_lo; - } - - i915_request_add(rq); - if (!igt_wait_for_spinner(&spin_lo, rq)) { - GEM_TRACE("lo spinner failed to start\n"); - GEM_TRACE_DUMP(); - intel_gt_set_wedged(gt); - err = -EIO; - goto err_ctx_lo; - } - - rq = spinner_create_request(&spin_hi, ctx_hi, engine, - MI_ARB_CHECK); - if (IS_ERR(rq)) { - igt_spinner_end(&spin_lo); - err = PTR_ERR(rq); - goto err_ctx_lo; - } - - i915_request_add(rq); - if (!igt_wait_for_spinner(&spin_hi, rq)) { - GEM_TRACE("hi spinner failed to start\n"); - GEM_TRACE_DUMP(); - intel_gt_set_wedged(gt); - err = -EIO; - goto err_ctx_lo; - } - - igt_spinner_end(&spin_hi); - igt_spinner_end(&spin_lo); - - if (igt_live_test_end(&t)) { - err = -EIO; - goto err_ctx_lo; - } - } - - err = 0; -err_ctx_lo: - kernel_context_close(ctx_lo); -err_ctx_hi: - kernel_context_close(ctx_hi); -err_spin_lo: - igt_spinner_fini(&spin_lo); -err_spin_hi: - igt_spinner_fini(&spin_hi); - return err; -} - -static int live_late_preempt(void *arg) -{ - struct intel_gt *gt = arg; - struct i915_gem_context *ctx_hi, *ctx_lo; - struct igt_spinner spin_hi, spin_lo; - struct intel_engine_cs *engine; - struct i915_sched_attr attr = {}; - enum intel_engine_id id; - int err = -ENOMEM; - - if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) - return 0; - - if (igt_spinner_init(&spin_hi, gt)) - return -ENOMEM; - - if (igt_spinner_init(&spin_lo, gt)) - goto err_spin_hi; - - ctx_hi = kernel_context(gt->i915); - if (!ctx_hi) - goto err_spin_lo; - - ctx_lo = kernel_context(gt->i915); - if (!ctx_lo) - goto err_ctx_hi; - - /* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */ - ctx_lo->sched.priority = I915_USER_PRIORITY(1); - - for_each_engine(engine, gt, id) { - struct igt_live_test t; - struct i915_request *rq; - - if (!intel_engine_has_preemption(engine)) - continue; - - if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { - err = -EIO; - goto err_ctx_lo; - } - - rq = spinner_create_request(&spin_lo, ctx_lo, engine, - MI_ARB_CHECK); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto err_ctx_lo; - } - - i915_request_add(rq); - if (!igt_wait_for_spinner(&spin_lo, rq)) { - pr_err("First context failed to start\n"); - goto err_wedged; - } - - rq = spinner_create_request(&spin_hi, ctx_hi, engine, - MI_NOOP); - if (IS_ERR(rq)) { - igt_spinner_end(&spin_lo); - err = PTR_ERR(rq); - goto err_ctx_lo; - } - - i915_request_add(rq); - if (igt_wait_for_spinner(&spin_hi, rq)) { - pr_err("Second context overtook first?\n"); - goto err_wedged; - } - - attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX); - engine->schedule(rq, &attr); - - if (!igt_wait_for_spinner(&spin_hi, rq)) { - pr_err("High priority context failed to preempt the low priority context\n"); - GEM_TRACE_DUMP(); - goto err_wedged; - } - - igt_spinner_end(&spin_hi); - igt_spinner_end(&spin_lo); - - if (igt_live_test_end(&t)) { - err = -EIO; - goto err_ctx_lo; - } - } - - err = 0; -err_ctx_lo: - kernel_context_close(ctx_lo); -err_ctx_hi: - kernel_context_close(ctx_hi); -err_spin_lo: - igt_spinner_fini(&spin_lo); -err_spin_hi: - igt_spinner_fini(&spin_hi); - return err; - -err_wedged: - igt_spinner_end(&spin_hi); - igt_spinner_end(&spin_lo); - intel_gt_set_wedged(gt); - err = -EIO; - goto err_ctx_lo; -} - -struct preempt_client { - struct igt_spinner spin; - struct i915_gem_context *ctx; -}; - -static int preempt_client_init(struct intel_gt *gt, struct preempt_client *c) -{ - c->ctx = kernel_context(gt->i915); - if (!c->ctx) - return -ENOMEM; - - if (igt_spinner_init(&c->spin, gt)) - goto err_ctx; - - return 0; - -err_ctx: - kernel_context_close(c->ctx); - return -ENOMEM; -} - -static void preempt_client_fini(struct preempt_client *c) -{ - igt_spinner_fini(&c->spin); - kernel_context_close(c->ctx); -} - -static int live_nopreempt(void *arg) -{ - struct intel_gt *gt = arg; - struct intel_engine_cs *engine; - struct preempt_client a, b; - enum intel_engine_id id; - int err = -ENOMEM; - - /* - * Verify that we can disable preemption for an individual request - * that may be being observed and not want to be interrupted. - */ - - if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) - return 0; - - if (preempt_client_init(gt, &a)) - return -ENOMEM; - if (preempt_client_init(gt, &b)) - goto err_client_a; - b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX); - - for_each_engine(engine, gt, id) { - struct i915_request *rq_a, *rq_b; - - if (!intel_engine_has_preemption(engine)) - continue; - - engine->execlists.preempt_hang.count = 0; - - rq_a = spinner_create_request(&a.spin, - a.ctx, engine, - MI_ARB_CHECK); - if (IS_ERR(rq_a)) { - err = PTR_ERR(rq_a); - goto err_client_b; - } - - /* Low priority client, but unpreemptable! */ - __set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq_a->fence.flags); - - i915_request_add(rq_a); - if (!igt_wait_for_spinner(&a.spin, rq_a)) { - pr_err("First client failed to start\n"); - goto err_wedged; - } - - rq_b = spinner_create_request(&b.spin, - b.ctx, engine, - MI_ARB_CHECK); - if (IS_ERR(rq_b)) { - err = PTR_ERR(rq_b); - goto err_client_b; - } - - i915_request_add(rq_b); - - /* B is much more important than A! (But A is unpreemptable.) */ - GEM_BUG_ON(rq_prio(rq_b) <= rq_prio(rq_a)); - - /* Wait long enough for preemption and timeslicing */ - if (igt_wait_for_spinner(&b.spin, rq_b)) { - pr_err("Second client started too early!\n"); - goto err_wedged; - } - - igt_spinner_end(&a.spin); - - if (!igt_wait_for_spinner(&b.spin, rq_b)) { - pr_err("Second client failed to start\n"); - goto err_wedged; - } - - igt_spinner_end(&b.spin); - - if (engine->execlists.preempt_hang.count) { - pr_err("Preemption recorded x%d; should have been suppressed!\n", - engine->execlists.preempt_hang.count); - err = -EINVAL; - goto err_wedged; - } - - if (igt_flush_test(gt->i915)) - goto err_wedged; - } - - err = 0; -err_client_b: - preempt_client_fini(&b); -err_client_a: - preempt_client_fini(&a); - return err; - -err_wedged: - igt_spinner_end(&b.spin); - igt_spinner_end(&a.spin); - intel_gt_set_wedged(gt); - err = -EIO; - goto err_client_b; -} - -struct live_preempt_cancel { - struct intel_engine_cs *engine; - struct preempt_client a, b; -}; - -static int __cancel_active0(struct live_preempt_cancel *arg) -{ - struct i915_request *rq; - struct igt_live_test t; - int err; - - /* Preempt cancel of ELSP0 */ - GEM_TRACE("%s(%s)\n", __func__, arg->engine->name); - if (igt_live_test_begin(&t, arg->engine->i915, - __func__, arg->engine->name)) - return -EIO; - - rq = spinner_create_request(&arg->a.spin, - arg->a.ctx, arg->engine, - MI_ARB_CHECK); - if (IS_ERR(rq)) - return PTR_ERR(rq); - - clear_bit(CONTEXT_BANNED, &rq->context->flags); - i915_request_get(rq); - i915_request_add(rq); - if (!igt_wait_for_spinner(&arg->a.spin, rq)) { - err = -EIO; - goto out; - } - - intel_context_set_banned(rq->context); - err = intel_engine_pulse(arg->engine); - if (err) - goto out; - - err = wait_for_reset(arg->engine, rq, HZ / 2); - if (err) { - pr_err("Cancelled inflight0 request did not reset\n"); - goto out; - } - -out: - i915_request_put(rq); - if (igt_live_test_end(&t)) - err = -EIO; - return err; -} - -static int __cancel_active1(struct live_preempt_cancel *arg) -{ - struct i915_request *rq[2] = {}; - struct igt_live_test t; - int err; - - /* Preempt cancel of ELSP1 */ - GEM_TRACE("%s(%s)\n", __func__, arg->engine->name); - if (igt_live_test_begin(&t, arg->engine->i915, - __func__, arg->engine->name)) - return -EIO; - - rq[0] = spinner_create_request(&arg->a.spin, - arg->a.ctx, arg->engine, - MI_NOOP); /* no preemption */ - if (IS_ERR(rq[0])) - return PTR_ERR(rq[0]); - - clear_bit(CONTEXT_BANNED, &rq[0]->context->flags); - i915_request_get(rq[0]); - i915_request_add(rq[0]); - if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) { - err = -EIO; - goto out; - } - - rq[1] = spinner_create_request(&arg->b.spin, - arg->b.ctx, arg->engine, - MI_ARB_CHECK); - if (IS_ERR(rq[1])) { - err = PTR_ERR(rq[1]); - goto out; - } - - clear_bit(CONTEXT_BANNED, &rq[1]->context->flags); - i915_request_get(rq[1]); - err = i915_request_await_dma_fence(rq[1], &rq[0]->fence); - i915_request_add(rq[1]); - if (err) - goto out; - - intel_context_set_banned(rq[1]->context); - err = intel_engine_pulse(arg->engine); - if (err) - goto out; - - igt_spinner_end(&arg->a.spin); - err = wait_for_reset(arg->engine, rq[1], HZ / 2); - if (err) - goto out; - - if (rq[0]->fence.error != 0) { - pr_err("Normal inflight0 request did not complete\n"); - err = -EINVAL; - goto out; - } - - if (rq[1]->fence.error != -EIO) { - pr_err("Cancelled inflight1 request did not report -EIO\n"); - err = -EINVAL; - goto out; - } - -out: - i915_request_put(rq[1]); - i915_request_put(rq[0]); - if (igt_live_test_end(&t)) - err = -EIO; - return err; -} - -static int __cancel_queued(struct live_preempt_cancel *arg) -{ - struct i915_request *rq[3] = {}; - struct igt_live_test t; - int err; - - /* Full ELSP and one in the wings */ - GEM_TRACE("%s(%s)\n", __func__, arg->engine->name); - if (igt_live_test_begin(&t, arg->engine->i915, - __func__, arg->engine->name)) - return -EIO; - - rq[0] = spinner_create_request(&arg->a.spin, - arg->a.ctx, arg->engine, - MI_ARB_CHECK); - if (IS_ERR(rq[0])) - return PTR_ERR(rq[0]); - - clear_bit(CONTEXT_BANNED, &rq[0]->context->flags); - i915_request_get(rq[0]); - i915_request_add(rq[0]); - if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) { - err = -EIO; - goto out; - } - - rq[1] = igt_request_alloc(arg->b.ctx, arg->engine); - if (IS_ERR(rq[1])) { - err = PTR_ERR(rq[1]); - goto out; - } - - clear_bit(CONTEXT_BANNED, &rq[1]->context->flags); - i915_request_get(rq[1]); - err = i915_request_await_dma_fence(rq[1], &rq[0]->fence); - i915_request_add(rq[1]); - if (err) - goto out; - - rq[2] = spinner_create_request(&arg->b.spin, - arg->a.ctx, arg->engine, - MI_ARB_CHECK); - if (IS_ERR(rq[2])) { - err = PTR_ERR(rq[2]); - goto out; - } - - i915_request_get(rq[2]); - err = i915_request_await_dma_fence(rq[2], &rq[1]->fence); - i915_request_add(rq[2]); - if (err) - goto out; - - intel_context_set_banned(rq[2]->context); - err = intel_engine_pulse(arg->engine); - if (err) - goto out; - - err = wait_for_reset(arg->engine, rq[2], HZ / 2); - if (err) - goto out; - - if (rq[0]->fence.error != -EIO) { - pr_err("Cancelled inflight0 request did not report -EIO\n"); - err = -EINVAL; - goto out; - } - - if (rq[1]->fence.error != 0) { - pr_err("Normal inflight1 request did not complete\n"); - err = -EINVAL; - goto out; - } - - if (rq[2]->fence.error != -EIO) { - pr_err("Cancelled queued request did not report -EIO\n"); - err = -EINVAL; - goto out; - } - -out: - i915_request_put(rq[2]); - i915_request_put(rq[1]); - i915_request_put(rq[0]); - if (igt_live_test_end(&t)) - err = -EIO; - return err; -} - -static int __cancel_hostile(struct live_preempt_cancel *arg) -{ - struct i915_request *rq; - int err; - - /* Preempt cancel non-preemptible spinner in ELSP0 */ - if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) - return 0; - - if (!intel_has_reset_engine(arg->engine->gt)) - return 0; - - GEM_TRACE("%s(%s)\n", __func__, arg->engine->name); - rq = spinner_create_request(&arg->a.spin, - arg->a.ctx, arg->engine, - MI_NOOP); /* preemption disabled */ - if (IS_ERR(rq)) - return PTR_ERR(rq); - - clear_bit(CONTEXT_BANNED, &rq->context->flags); - i915_request_get(rq); - i915_request_add(rq); - if (!igt_wait_for_spinner(&arg->a.spin, rq)) { - err = -EIO; - goto out; - } - - intel_context_set_banned(rq->context); - err = intel_engine_pulse(arg->engine); /* force reset */ - if (err) - goto out; - - err = wait_for_reset(arg->engine, rq, HZ / 2); - if (err) { - pr_err("Cancelled inflight0 request did not reset\n"); - goto out; - } - -out: - i915_request_put(rq); - if (igt_flush_test(arg->engine->i915)) - err = -EIO; - return err; -} - -static int live_preempt_cancel(void *arg) -{ - struct intel_gt *gt = arg; - struct live_preempt_cancel data; - enum intel_engine_id id; - int err = -ENOMEM; - - /* - * To cancel an inflight context, we need to first remove it from the - * GPU. That sounds like preemption! Plus a little bit of bookkeeping. - */ - - if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) - return 0; - - if (preempt_client_init(gt, &data.a)) - return -ENOMEM; - if (preempt_client_init(gt, &data.b)) - goto err_client_a; - - for_each_engine(data.engine, gt, id) { - if (!intel_engine_has_preemption(data.engine)) - continue; - - err = __cancel_active0(&data); - if (err) - goto err_wedged; - - err = __cancel_active1(&data); - if (err) - goto err_wedged; - - err = __cancel_queued(&data); - if (err) - goto err_wedged; - - err = __cancel_hostile(&data); - if (err) - goto err_wedged; - } - - err = 0; -err_client_b: - preempt_client_fini(&data.b); -err_client_a: - preempt_client_fini(&data.a); - return err; - -err_wedged: - GEM_TRACE_DUMP(); - igt_spinner_end(&data.b.spin); - igt_spinner_end(&data.a.spin); - intel_gt_set_wedged(gt); - goto err_client_b; -} - -static int live_suppress_self_preempt(void *arg) -{ - struct intel_gt *gt = arg; - struct intel_engine_cs *engine; - struct i915_sched_attr attr = { - .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX) - }; - struct preempt_client a, b; - enum intel_engine_id id; - int err = -ENOMEM; - - /* - * Verify that if a preemption request does not cause a change in - * the current execution order, the preempt-to-idle injection is - * skipped and that we do not accidentally apply it after the CS - * completion event. - */ - - if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) - return 0; - - if (intel_uc_uses_guc_submission(>->uc)) - return 0; /* presume black blox */ - - if (intel_vgpu_active(gt->i915)) - return 0; /* GVT forces single port & request submission */ - - if (preempt_client_init(gt, &a)) - return -ENOMEM; - if (preempt_client_init(gt, &b)) - goto err_client_a; - - for_each_engine(engine, gt, id) { - struct i915_request *rq_a, *rq_b; - int depth; - - if (!intel_engine_has_preemption(engine)) - continue; - - if (igt_flush_test(gt->i915)) - goto err_wedged; - - st_engine_heartbeat_disable(engine); - engine->execlists.preempt_hang.count = 0; - - rq_a = spinner_create_request(&a.spin, - a.ctx, engine, - MI_NOOP); - if (IS_ERR(rq_a)) { - err = PTR_ERR(rq_a); - st_engine_heartbeat_enable(engine); - goto err_client_b; - } - - i915_request_add(rq_a); - if (!igt_wait_for_spinner(&a.spin, rq_a)) { - pr_err("First client failed to start\n"); - st_engine_heartbeat_enable(engine); - goto err_wedged; - } - - /* Keep postponing the timer to avoid premature slicing */ - mod_timer(&engine->execlists.timer, jiffies + HZ); - for (depth = 0; depth < 8; depth++) { - rq_b = spinner_create_request(&b.spin, - b.ctx, engine, - MI_NOOP); - if (IS_ERR(rq_b)) { - err = PTR_ERR(rq_b); - st_engine_heartbeat_enable(engine); - goto err_client_b; - } - i915_request_add(rq_b); - - GEM_BUG_ON(i915_request_completed(rq_a)); - engine->schedule(rq_a, &attr); - igt_spinner_end(&a.spin); - - if (!igt_wait_for_spinner(&b.spin, rq_b)) { - pr_err("Second client failed to start\n"); - st_engine_heartbeat_enable(engine); - goto err_wedged; - } - - swap(a, b); - rq_a = rq_b; - } - igt_spinner_end(&a.spin); - - if (engine->execlists.preempt_hang.count) { - pr_err("Preemption on %s recorded x%d, depth %d; should have been suppressed!\n", - engine->name, - engine->execlists.preempt_hang.count, - depth); - st_engine_heartbeat_enable(engine); - err = -EINVAL; - goto err_client_b; - } - - st_engine_heartbeat_enable(engine); - if (igt_flush_test(gt->i915)) - goto err_wedged; - } - - err = 0; -err_client_b: - preempt_client_fini(&b); -err_client_a: - preempt_client_fini(&a); - return err; - -err_wedged: - igt_spinner_end(&b.spin); - igt_spinner_end(&a.spin); - intel_gt_set_wedged(gt); - err = -EIO; - goto err_client_b; -} - -static int live_chain_preempt(void *arg) -{ - struct intel_gt *gt = arg; - struct intel_engine_cs *engine; - struct preempt_client hi, lo; - enum intel_engine_id id; - int err = -ENOMEM; - - /* - * Build a chain AB...BA between two contexts (A, B) and request - * preemption of the last request. It should then complete before - * the previously submitted spinner in B. - */ - - if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) - return 0; - - if (preempt_client_init(gt, &hi)) - return -ENOMEM; - - if (preempt_client_init(gt, &lo)) - goto err_client_hi; - - for_each_engine(engine, gt, id) { - struct i915_sched_attr attr = { - .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX), - }; - struct igt_live_test t; - struct i915_request *rq; - int ring_size, count, i; - - if (!intel_engine_has_preemption(engine)) - continue; - - rq = spinner_create_request(&lo.spin, - lo.ctx, engine, - MI_ARB_CHECK); - if (IS_ERR(rq)) - goto err_wedged; - - i915_request_get(rq); - i915_request_add(rq); - - ring_size = rq->wa_tail - rq->head; - if (ring_size < 0) - ring_size += rq->ring->size; - ring_size = rq->ring->size / ring_size; - pr_debug("%s(%s): Using maximum of %d requests\n", - __func__, engine->name, ring_size); - - igt_spinner_end(&lo.spin); - if (i915_request_wait(rq, 0, HZ / 2) < 0) { - pr_err("Timed out waiting to flush %s\n", engine->name); - i915_request_put(rq); - goto err_wedged; - } - i915_request_put(rq); - - if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { - err = -EIO; - goto err_wedged; - } - - for_each_prime_number_from(count, 1, ring_size) { - rq = spinner_create_request(&hi.spin, - hi.ctx, engine, - MI_ARB_CHECK); - if (IS_ERR(rq)) - goto err_wedged; - i915_request_add(rq); - if (!igt_wait_for_spinner(&hi.spin, rq)) - goto err_wedged; - - rq = spinner_create_request(&lo.spin, - lo.ctx, engine, - MI_ARB_CHECK); - if (IS_ERR(rq)) - goto err_wedged; - i915_request_add(rq); - - for (i = 0; i < count; i++) { - rq = igt_request_alloc(lo.ctx, engine); - if (IS_ERR(rq)) - goto err_wedged; - i915_request_add(rq); - } - - rq = igt_request_alloc(hi.ctx, engine); - if (IS_ERR(rq)) - goto err_wedged; - - i915_request_get(rq); - i915_request_add(rq); - engine->schedule(rq, &attr); - - igt_spinner_end(&hi.spin); - if (i915_request_wait(rq, 0, HZ / 5) < 0) { - struct drm_printer p = - drm_info_printer(gt->i915->drm.dev); - - pr_err("Failed to preempt over chain of %d\n", - count); - intel_engine_dump(engine, &p, - "%s\n", engine->name); - i915_request_put(rq); - goto err_wedged; - } - igt_spinner_end(&lo.spin); - i915_request_put(rq); - - rq = igt_request_alloc(lo.ctx, engine); - if (IS_ERR(rq)) - goto err_wedged; - - i915_request_get(rq); - i915_request_add(rq); - - if (i915_request_wait(rq, 0, HZ / 5) < 0) { - struct drm_printer p = - drm_info_printer(gt->i915->drm.dev); - - pr_err("Failed to flush low priority chain of %d requests\n", - count); - intel_engine_dump(engine, &p, - "%s\n", engine->name); - - i915_request_put(rq); - goto err_wedged; - } - i915_request_put(rq); - } - - if (igt_live_test_end(&t)) { - err = -EIO; - goto err_wedged; - } - } - - err = 0; -err_client_lo: - preempt_client_fini(&lo); -err_client_hi: - preempt_client_fini(&hi); - return err; - -err_wedged: - igt_spinner_end(&hi.spin); - igt_spinner_end(&lo.spin); - intel_gt_set_wedged(gt); - err = -EIO; - goto err_client_lo; -} - -static int create_gang(struct intel_engine_cs *engine, - struct i915_request **prev) -{ - struct drm_i915_gem_object *obj; - struct intel_context *ce; - struct i915_request *rq; - struct i915_vma *vma; - u32 *cs; - int err; - - ce = intel_context_create(engine); - if (IS_ERR(ce)) - return PTR_ERR(ce); - - obj = i915_gem_object_create_internal(engine->i915, 4096); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - goto err_ce; - } - - vma = i915_vma_instance(obj, ce->vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto err_obj; - } - - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) - goto err_obj; - - cs = i915_gem_object_pin_map(obj, I915_MAP_WC); - if (IS_ERR(cs)) - goto err_obj; - - /* Semaphore target: spin until zero */ - *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; - - *cs++ = MI_SEMAPHORE_WAIT | - MI_SEMAPHORE_POLL | - MI_SEMAPHORE_SAD_EQ_SDD; - *cs++ = 0; - *cs++ = lower_32_bits(vma->node.start); - *cs++ = upper_32_bits(vma->node.start); - - if (*prev) { - u64 offset = (*prev)->batch->node.start; - - /* Terminate the spinner in the next lower priority batch. */ - *cs++ = MI_STORE_DWORD_IMM_GEN4; - *cs++ = lower_32_bits(offset); - *cs++ = upper_32_bits(offset); - *cs++ = 0; - } - - *cs++ = MI_BATCH_BUFFER_END; - i915_gem_object_flush_map(obj); - i915_gem_object_unpin_map(obj); - - rq = intel_context_create_request(ce); - if (IS_ERR(rq)) - goto err_obj; - - rq->batch = i915_vma_get(vma); - i915_request_get(rq); - - i915_vma_lock(vma); - err = i915_request_await_object(rq, vma->obj, false); - if (!err) - err = i915_vma_move_to_active(vma, rq, 0); - if (!err) - err = rq->engine->emit_bb_start(rq, - vma->node.start, - PAGE_SIZE, 0); - i915_vma_unlock(vma); - i915_request_add(rq); - if (err) - goto err_rq; - - i915_gem_object_put(obj); - intel_context_put(ce); - - rq->mock.link.next = &(*prev)->mock.link; - *prev = rq; - return 0; - -err_rq: - i915_vma_put(rq->batch); - i915_request_put(rq); -err_obj: - i915_gem_object_put(obj); -err_ce: - intel_context_put(ce); - return err; -} - -static int __live_preempt_ring(struct intel_engine_cs *engine, - struct igt_spinner *spin, - int queue_sz, int ring_sz) -{ - struct intel_context *ce[2] = {}; - struct i915_request *rq; - struct igt_live_test t; - int err = 0; - int n; - - if (igt_live_test_begin(&t, engine->i915, __func__, engine->name)) - return -EIO; - - for (n = 0; n < ARRAY_SIZE(ce); n++) { - struct intel_context *tmp; - - tmp = intel_context_create(engine); - if (IS_ERR(tmp)) { - err = PTR_ERR(tmp); - goto err_ce; - } - - tmp->ring = __intel_context_ring_size(ring_sz); - - err = intel_context_pin(tmp); - if (err) { - intel_context_put(tmp); - goto err_ce; - } - - memset32(tmp->ring->vaddr, - 0xdeadbeef, /* trigger a hang if executed */ - tmp->ring->vma->size / sizeof(u32)); - - ce[n] = tmp; - } - - rq = igt_spinner_create_request(spin, ce[0], MI_ARB_CHECK); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto err_ce; - } - - i915_request_get(rq); - rq->sched.attr.priority = I915_PRIORITY_BARRIER; - i915_request_add(rq); - - if (!igt_wait_for_spinner(spin, rq)) { - intel_gt_set_wedged(engine->gt); - i915_request_put(rq); - err = -ETIME; - goto err_ce; - } - - /* Fill the ring, until we will cause a wrap */ - n = 0; - while (ce[0]->ring->tail - rq->wa_tail <= queue_sz) { - struct i915_request *tmp; - - tmp = intel_context_create_request(ce[0]); - if (IS_ERR(tmp)) { - err = PTR_ERR(tmp); - i915_request_put(rq); - goto err_ce; - } - - i915_request_add(tmp); - intel_engine_flush_submission(engine); - n++; - } - intel_engine_flush_submission(engine); - pr_debug("%s: Filled %d with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n", - engine->name, queue_sz, n, - ce[0]->ring->size, - ce[0]->ring->tail, - ce[0]->ring->emit, - rq->tail); - i915_request_put(rq); - - /* Create a second request to preempt the first ring */ - rq = intel_context_create_request(ce[1]); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto err_ce; - } - - rq->sched.attr.priority = I915_PRIORITY_BARRIER; - i915_request_get(rq); - i915_request_add(rq); - - err = wait_for_submit(engine, rq, HZ / 2); - i915_request_put(rq); - if (err) { - pr_err("%s: preemption request was not submited\n", - engine->name); - err = -ETIME; - } - - pr_debug("%s: ring[0]:{ tail:%x, emit:%x }, ring[1]:{ tail:%x, emit:%x }\n", - engine->name, - ce[0]->ring->tail, ce[0]->ring->emit, - ce[1]->ring->tail, ce[1]->ring->emit); - -err_ce: - intel_engine_flush_submission(engine); - igt_spinner_end(spin); - for (n = 0; n < ARRAY_SIZE(ce); n++) { - if (IS_ERR_OR_NULL(ce[n])) - break; - - intel_context_unpin(ce[n]); - intel_context_put(ce[n]); - } - if (igt_live_test_end(&t)) - err = -EIO; - return err; -} - -static int live_preempt_ring(void *arg) -{ - struct intel_gt *gt = arg; - struct intel_engine_cs *engine; - struct igt_spinner spin; - enum intel_engine_id id; - int err = 0; - - /* - * Check that we rollback large chunks of a ring in order to do a - * preemption event. Similar to live_unlite_ring, but looking at - * ring size rather than the impact of intel_ring_direction(). - */ - - if (igt_spinner_init(&spin, gt)) - return -ENOMEM; - - for_each_engine(engine, gt, id) { - int n; - - if (!intel_engine_has_preemption(engine)) - continue; - - if (!intel_engine_can_store_dword(engine)) - continue; - - st_engine_heartbeat_disable(engine); - - for (n = 0; n <= 3; n++) { - err = __live_preempt_ring(engine, &spin, - n * SZ_4K / 4, SZ_4K); - if (err) - break; - } - - st_engine_heartbeat_enable(engine); - if (err) - break; - } - - igt_spinner_fini(&spin); - return err; -} - -static int live_preempt_gang(void *arg) -{ - struct intel_gt *gt = arg; - struct intel_engine_cs *engine; - enum intel_engine_id id; - - if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) - return 0; - - /* - * Build as long a chain of preempters as we can, with each - * request higher priority than the last. Once we are ready, we release - * the last batch which then precolates down the chain, each releasing - * the next oldest in turn. The intent is to simply push as hard as we - * can with the number of preemptions, trying to exceed narrow HW - * limits. At a minimum, we insist that we can sort all the user - * high priority levels into execution order. - */ - - for_each_engine(engine, gt, id) { - struct i915_request *rq = NULL; - struct igt_live_test t; - IGT_TIMEOUT(end_time); - int prio = 0; - int err = 0; - u32 *cs; - - if (!intel_engine_has_preemption(engine)) - continue; - - if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) - return -EIO; - - do { - struct i915_sched_attr attr = { - .priority = I915_USER_PRIORITY(prio++), - }; - - err = create_gang(engine, &rq); - if (err) - break; - - /* Submit each spinner at increasing priority */ - engine->schedule(rq, &attr); - } while (prio <= I915_PRIORITY_MAX && - !__igt_timeout(end_time, NULL)); - pr_debug("%s: Preempt chain of %d requests\n", - engine->name, prio); - - /* - * Such that the last spinner is the highest priority and - * should execute first. When that spinner completes, - * it will terminate the next lowest spinner until there - * are no more spinners and the gang is complete. - */ - cs = i915_gem_object_pin_map(rq->batch->obj, I915_MAP_WC); - if (!IS_ERR(cs)) { - *cs = 0; - i915_gem_object_unpin_map(rq->batch->obj); - } else { - err = PTR_ERR(cs); - intel_gt_set_wedged(gt); - } - - while (rq) { /* wait for each rq from highest to lowest prio */ - struct i915_request *n = list_next_entry(rq, mock.link); - - if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0) { - struct drm_printer p = - drm_info_printer(engine->i915->drm.dev); - - pr_err("Failed to flush chain of %d requests, at %d\n", - prio, rq_prio(rq) >> I915_USER_PRIORITY_SHIFT); - intel_engine_dump(engine, &p, - "%s\n", engine->name); - - err = -ETIME; - } - - i915_vma_put(rq->batch); - i915_request_put(rq); - rq = n; - } - - if (igt_live_test_end(&t)) - err = -EIO; - if (err) - return err; - } - - return 0; -} - -static struct i915_vma * -create_gpr_user(struct intel_engine_cs *engine, - struct i915_vma *result, - unsigned int offset) -{ - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - u32 *cs; - int err; - int i; - - obj = i915_gem_object_create_internal(engine->i915, 4096); - if (IS_ERR(obj)) - return ERR_CAST(obj); - - vma = i915_vma_instance(obj, result->vm, NULL); - if (IS_ERR(vma)) { - i915_gem_object_put(obj); - return vma; - } - - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) { - i915_vma_put(vma); - return ERR_PTR(err); - } - - cs = i915_gem_object_pin_map(obj, I915_MAP_WC); - if (IS_ERR(cs)) { - i915_vma_put(vma); - return ERR_CAST(cs); - } - - /* All GPR are clear for new contexts. We use GPR(0) as a constant */ - *cs++ = MI_LOAD_REGISTER_IMM(1); - *cs++ = CS_GPR(engine, 0); - *cs++ = 1; - - for (i = 1; i < NUM_GPR; i++) { - u64 addr; - - /* - * Perform: GPR[i]++ - * - * As we read and write into the context saved GPR[i], if - * we restart this batch buffer from an earlier point, we - * will repeat the increment and store a value > 1. - */ - *cs++ = MI_MATH(4); - *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(i)); - *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(0)); - *cs++ = MI_MATH_ADD; - *cs++ = MI_MATH_STORE(MI_MATH_REG(i), MI_MATH_REG_ACCU); - - addr = result->node.start + offset + i * sizeof(*cs); - *cs++ = MI_STORE_REGISTER_MEM_GEN8; - *cs++ = CS_GPR(engine, 2 * i); - *cs++ = lower_32_bits(addr); - *cs++ = upper_32_bits(addr); - - *cs++ = MI_SEMAPHORE_WAIT | - MI_SEMAPHORE_POLL | - MI_SEMAPHORE_SAD_GTE_SDD; - *cs++ = i; - *cs++ = lower_32_bits(result->node.start); - *cs++ = upper_32_bits(result->node.start); - } - - *cs++ = MI_BATCH_BUFFER_END; - i915_gem_object_flush_map(obj); - i915_gem_object_unpin_map(obj); - - return vma; -} - -static struct i915_vma *create_global(struct intel_gt *gt, size_t sz) -{ - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - int err; - - obj = i915_gem_object_create_internal(gt->i915, sz); - if (IS_ERR(obj)) - return ERR_CAST(obj); - - vma = i915_vma_instance(obj, >->ggtt->vm, NULL); - if (IS_ERR(vma)) { - i915_gem_object_put(obj); - return vma; - } - - err = i915_ggtt_pin(vma, NULL, 0, 0); - if (err) { - i915_vma_put(vma); - return ERR_PTR(err); - } - - return vma; -} - -static struct i915_request * -create_gpr_client(struct intel_engine_cs *engine, - struct i915_vma *global, - unsigned int offset) -{ - struct i915_vma *batch, *vma; - struct intel_context *ce; - struct i915_request *rq; - int err; - - ce = intel_context_create(engine); - if (IS_ERR(ce)) - return ERR_CAST(ce); - - vma = i915_vma_instance(global->obj, ce->vm, NULL); - if (IS_ERR(vma)) { - err = PTR_ERR(vma); - goto out_ce; - } - - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) - goto out_ce; - - batch = create_gpr_user(engine, vma, offset); - if (IS_ERR(batch)) { - err = PTR_ERR(batch); - goto out_vma; - } - - rq = intel_context_create_request(ce); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto out_batch; - } - - i915_vma_lock(vma); - err = i915_request_await_object(rq, vma->obj, false); - if (!err) - err = i915_vma_move_to_active(vma, rq, 0); - i915_vma_unlock(vma); - - i915_vma_lock(batch); - if (!err) - err = i915_request_await_object(rq, batch->obj, false); - if (!err) - err = i915_vma_move_to_active(batch, rq, 0); - if (!err) - err = rq->engine->emit_bb_start(rq, - batch->node.start, - PAGE_SIZE, 0); - i915_vma_unlock(batch); - i915_vma_unpin(batch); - - if (!err) - i915_request_get(rq); - i915_request_add(rq); - -out_batch: - i915_vma_put(batch); -out_vma: - i915_vma_unpin(vma); -out_ce: - intel_context_put(ce); - return err ? ERR_PTR(err) : rq; -} - -static int preempt_user(struct intel_engine_cs *engine, - struct i915_vma *global, - int id) -{ - struct i915_sched_attr attr = { - .priority = I915_PRIORITY_MAX - }; - struct i915_request *rq; - int err = 0; - u32 *cs; - - rq = intel_engine_create_kernel_request(engine); - if (IS_ERR(rq)) - return PTR_ERR(rq); - - cs = intel_ring_begin(rq, 4); - if (IS_ERR(cs)) { - i915_request_add(rq); - return PTR_ERR(cs); - } - - *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; - *cs++ = i915_ggtt_offset(global); - *cs++ = 0; - *cs++ = id; - - intel_ring_advance(rq, cs); - - i915_request_get(rq); - i915_request_add(rq); - - engine->schedule(rq, &attr); - - if (i915_request_wait(rq, 0, HZ / 2) < 0) - err = -ETIME; - i915_request_put(rq); - - return err; -} - -static int live_preempt_user(void *arg) -{ - struct intel_gt *gt = arg; - struct intel_engine_cs *engine; - struct i915_vma *global; - enum intel_engine_id id; - u32 *result; - int err = 0; - - if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) - return 0; - - /* - * In our other tests, we look at preemption in carefully - * controlled conditions in the ringbuffer. Since most of the - * time is spent in user batches, most of our preemptions naturally - * occur there. We want to verify that when we preempt inside a batch - * we continue on from the current instruction and do not roll back - * to the start, or another earlier arbitration point. - * - * To verify this, we create a batch which is a mixture of - * MI_MATH (gpr++) MI_SRM (gpr) and preemption points. Then with - * a few preempting contexts thrown into the mix, we look for any - * repeated instructions (which show up as incorrect values). - */ - - global = create_global(gt, 4096); - if (IS_ERR(global)) - return PTR_ERR(global); - - result = i915_gem_object_pin_map(global->obj, I915_MAP_WC); - if (IS_ERR(result)) { - i915_vma_unpin_and_release(&global, 0); - return PTR_ERR(result); - } - - for_each_engine(engine, gt, id) { - struct i915_request *client[3] = {}; - struct igt_live_test t; - int i; - - if (!intel_engine_has_preemption(engine)) - continue; - - if (IS_GEN(gt->i915, 8) && engine->class != RENDER_CLASS) - continue; /* we need per-context GPR */ - - if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { - err = -EIO; - break; - } - - memset(result, 0, 4096); - - for (i = 0; i < ARRAY_SIZE(client); i++) { - struct i915_request *rq; - - rq = create_gpr_client(engine, global, - NUM_GPR * i * sizeof(u32)); - if (IS_ERR(rq)) - goto end_test; - - client[i] = rq; - } - - /* Continuously preempt the set of 3 running contexts */ - for (i = 1; i <= NUM_GPR; i++) { - err = preempt_user(engine, global, i); - if (err) - goto end_test; - } - - if (READ_ONCE(result[0]) != NUM_GPR) { - pr_err("%s: Failed to release semaphore\n", - engine->name); - err = -EIO; - goto end_test; - } - - for (i = 0; i < ARRAY_SIZE(client); i++) { - int gpr; - - if (i915_request_wait(client[i], 0, HZ / 2) < 0) { - err = -ETIME; - goto end_test; - } - - for (gpr = 1; gpr < NUM_GPR; gpr++) { - if (result[NUM_GPR * i + gpr] != 1) { - pr_err("%s: Invalid result, client %d, gpr %d, result: %d\n", - engine->name, - i, gpr, result[NUM_GPR * i + gpr]); - err = -EINVAL; - goto end_test; - } - } - } - -end_test: - for (i = 0; i < ARRAY_SIZE(client); i++) { - if (!client[i]) - break; - - i915_request_put(client[i]); - } - - /* Flush the semaphores on error */ - smp_store_mb(result[0], -1); - if (igt_live_test_end(&t)) - err = -EIO; - if (err) - break; - } - - i915_vma_unpin_and_release(&global, I915_VMA_RELEASE_MAP); - return err; -} - -static int live_preempt_timeout(void *arg) -{ - struct intel_gt *gt = arg; - struct i915_gem_context *ctx_hi, *ctx_lo; - struct igt_spinner spin_lo; - struct intel_engine_cs *engine; - enum intel_engine_id id; - int err = -ENOMEM; - - /* - * Check that we force preemption to occur by cancelling the previous - * context if it refuses to yield the GPU. - */ - if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) - return 0; - - if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) - return 0; - - if (!intel_has_reset_engine(gt)) - return 0; - - if (igt_spinner_init(&spin_lo, gt)) - return -ENOMEM; - - ctx_hi = kernel_context(gt->i915); - if (!ctx_hi) - goto err_spin_lo; - ctx_hi->sched.priority = - I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY); - - ctx_lo = kernel_context(gt->i915); - if (!ctx_lo) - goto err_ctx_hi; - ctx_lo->sched.priority = - I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY); - - for_each_engine(engine, gt, id) { - unsigned long saved_timeout; - struct i915_request *rq; - - if (!intel_engine_has_preemption(engine)) - continue; - - rq = spinner_create_request(&spin_lo, ctx_lo, engine, - MI_NOOP); /* preemption disabled */ - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto err_ctx_lo; - } - - i915_request_add(rq); - if (!igt_wait_for_spinner(&spin_lo, rq)) { - intel_gt_set_wedged(gt); - err = -EIO; - goto err_ctx_lo; - } - - rq = igt_request_alloc(ctx_hi, engine); - if (IS_ERR(rq)) { - igt_spinner_end(&spin_lo); - err = PTR_ERR(rq); - goto err_ctx_lo; - } - - /* Flush the previous CS ack before changing timeouts */ - while (READ_ONCE(engine->execlists.pending[0])) - cpu_relax(); - - saved_timeout = engine->props.preempt_timeout_ms; - engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffie */ - - i915_request_get(rq); - i915_request_add(rq); - - intel_engine_flush_submission(engine); - engine->props.preempt_timeout_ms = saved_timeout; - - if (i915_request_wait(rq, 0, HZ / 10) < 0) { - intel_gt_set_wedged(gt); - i915_request_put(rq); - err = -ETIME; - goto err_ctx_lo; - } - - igt_spinner_end(&spin_lo); - i915_request_put(rq); - } - - err = 0; -err_ctx_lo: - kernel_context_close(ctx_lo); -err_ctx_hi: - kernel_context_close(ctx_hi); -err_spin_lo: - igt_spinner_fini(&spin_lo); - return err; -} - -static int random_range(struct rnd_state *rnd, int min, int max) -{ - return i915_prandom_u32_max_state(max - min, rnd) + min; -} - -static int random_priority(struct rnd_state *rnd) -{ - return random_range(rnd, I915_PRIORITY_MIN, I915_PRIORITY_MAX); -} - -struct preempt_smoke { - struct intel_gt *gt; - struct i915_gem_context **contexts; - struct intel_engine_cs *engine; - struct drm_i915_gem_object *batch; - unsigned int ncontext; - struct rnd_state prng; - unsigned long count; -}; - -static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke) -{ - return smoke->contexts[i915_prandom_u32_max_state(smoke->ncontext, - &smoke->prng)]; -} - -static int smoke_submit(struct preempt_smoke *smoke, - struct i915_gem_context *ctx, int prio, - struct drm_i915_gem_object *batch) -{ - struct i915_request *rq; - struct i915_vma *vma = NULL; - int err = 0; - - if (batch) { - struct i915_address_space *vm; - - vm = i915_gem_context_get_vm_rcu(ctx); - vma = i915_vma_instance(batch, vm, NULL); - i915_vm_put(vm); - if (IS_ERR(vma)) - return PTR_ERR(vma); - - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) - return err; - } - - ctx->sched.priority = prio; - - rq = igt_request_alloc(ctx, smoke->engine); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto unpin; - } - - if (vma) { - i915_vma_lock(vma); - err = i915_request_await_object(rq, vma->obj, false); - if (!err) - err = i915_vma_move_to_active(vma, rq, 0); - if (!err) - err = rq->engine->emit_bb_start(rq, - vma->node.start, - PAGE_SIZE, 0); - i915_vma_unlock(vma); - } - - i915_request_add(rq); - -unpin: - if (vma) - i915_vma_unpin(vma); - - return err; -} - -static int smoke_crescendo_thread(void *arg) -{ - struct preempt_smoke *smoke = arg; - IGT_TIMEOUT(end_time); - unsigned long count; - - count = 0; - do { - struct i915_gem_context *ctx = smoke_context(smoke); - int err; - - err = smoke_submit(smoke, - ctx, count % I915_PRIORITY_MAX, - smoke->batch); - if (err) - return err; - - count++; - } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL)); - - smoke->count = count; - return 0; -} - -static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags) -#define BATCH BIT(0) -{ - struct task_struct *tsk[I915_NUM_ENGINES] = {}; - struct preempt_smoke arg[I915_NUM_ENGINES]; - struct intel_engine_cs *engine; - enum intel_engine_id id; - unsigned long count; - int err = 0; - - for_each_engine(engine, smoke->gt, id) { - arg[id] = *smoke; - arg[id].engine = engine; - if (!(flags & BATCH)) - arg[id].batch = NULL; - arg[id].count = 0; - - tsk[id] = kthread_run(smoke_crescendo_thread, &arg, - "igt/smoke:%d", id); - if (IS_ERR(tsk[id])) { - err = PTR_ERR(tsk[id]); - break; - } - get_task_struct(tsk[id]); - } - - yield(); /* start all threads before we kthread_stop() */ - - count = 0; - for_each_engine(engine, smoke->gt, id) { - int status; - - if (IS_ERR_OR_NULL(tsk[id])) - continue; - - status = kthread_stop(tsk[id]); - if (status && !err) - err = status; - - count += arg[id].count; - - put_task_struct(tsk[id]); - } - - pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n", - count, flags, smoke->gt->info.num_engines, smoke->ncontext); - return 0; -} - -static int smoke_random(struct preempt_smoke *smoke, unsigned int flags) -{ - enum intel_engine_id id; - IGT_TIMEOUT(end_time); - unsigned long count; - - count = 0; - do { - for_each_engine(smoke->engine, smoke->gt, id) { - struct i915_gem_context *ctx = smoke_context(smoke); - int err; - - err = smoke_submit(smoke, - ctx, random_priority(&smoke->prng), - flags & BATCH ? smoke->batch : NULL); - if (err) - return err; - - count++; - } - } while (count < smoke->ncontext && !__igt_timeout(end_time, NULL)); - - pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n", - count, flags, smoke->gt->info.num_engines, smoke->ncontext); - return 0; -} - -static int live_preempt_smoke(void *arg) -{ - struct preempt_smoke smoke = { - .gt = arg, - .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed), - .ncontext = 256, - }; - const unsigned int phase[] = { 0, BATCH }; - struct igt_live_test t; - int err = -ENOMEM; - u32 *cs; - int n; - - if (!HAS_LOGICAL_RING_PREEMPTION(smoke.gt->i915)) - return 0; - - smoke.contexts = kmalloc_array(smoke.ncontext, - sizeof(*smoke.contexts), - GFP_KERNEL); - if (!smoke.contexts) - return -ENOMEM; - - smoke.batch = - i915_gem_object_create_internal(smoke.gt->i915, PAGE_SIZE); - if (IS_ERR(smoke.batch)) { - err = PTR_ERR(smoke.batch); - goto err_free; - } - - cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB); - if (IS_ERR(cs)) { - err = PTR_ERR(cs); - goto err_batch; - } - for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++) - cs[n] = MI_ARB_CHECK; - cs[n] = MI_BATCH_BUFFER_END; - i915_gem_object_flush_map(smoke.batch); - i915_gem_object_unpin_map(smoke.batch); - - if (igt_live_test_begin(&t, smoke.gt->i915, __func__, "all")) { - err = -EIO; - goto err_batch; - } - - for (n = 0; n < smoke.ncontext; n++) { - smoke.contexts[n] = kernel_context(smoke.gt->i915); - if (!smoke.contexts[n]) - goto err_ctx; - } - - for (n = 0; n < ARRAY_SIZE(phase); n++) { - err = smoke_crescendo(&smoke, phase[n]); - if (err) - goto err_ctx; - - err = smoke_random(&smoke, phase[n]); - if (err) - goto err_ctx; - } - -err_ctx: - if (igt_live_test_end(&t)) - err = -EIO; - - for (n = 0; n < smoke.ncontext; n++) { - if (!smoke.contexts[n]) - break; - kernel_context_close(smoke.contexts[n]); - } - -err_batch: - i915_gem_object_put(smoke.batch); -err_free: - kfree(smoke.contexts); - - return err; -} - -static int nop_virtual_engine(struct intel_gt *gt, - struct intel_engine_cs **siblings, - unsigned int nsibling, - unsigned int nctx, - unsigned int flags) -#define CHAIN BIT(0) -{ - IGT_TIMEOUT(end_time); - struct i915_request *request[16] = {}; - struct intel_context *ve[16]; - unsigned long n, prime, nc; - struct igt_live_test t; - ktime_t times[2] = {}; - int err; - - GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve)); - - for (n = 0; n < nctx; n++) { - ve[n] = intel_execlists_create_virtual(siblings, nsibling); - if (IS_ERR(ve[n])) { - err = PTR_ERR(ve[n]); - nctx = n; - goto out; - } - - err = intel_context_pin(ve[n]); - if (err) { - intel_context_put(ve[n]); - nctx = n; - goto out; - } - } - - err = igt_live_test_begin(&t, gt->i915, __func__, ve[0]->engine->name); - if (err) - goto out; - - for_each_prime_number_from(prime, 1, 8192) { - times[1] = ktime_get_raw(); - - if (flags & CHAIN) { - for (nc = 0; nc < nctx; nc++) { - for (n = 0; n < prime; n++) { - struct i915_request *rq; - - rq = i915_request_create(ve[nc]); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto out; - } - - if (request[nc]) - i915_request_put(request[nc]); - request[nc] = i915_request_get(rq); - i915_request_add(rq); - } - } - } else { - for (n = 0; n < prime; n++) { - for (nc = 0; nc < nctx; nc++) { - struct i915_request *rq; - - rq = i915_request_create(ve[nc]); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto out; - } - - if (request[nc]) - i915_request_put(request[nc]); - request[nc] = i915_request_get(rq); - i915_request_add(rq); - } - } - } - - for (nc = 0; nc < nctx; nc++) { - if (i915_request_wait(request[nc], 0, HZ / 10) < 0) { - pr_err("%s(%s): wait for %llx:%lld timed out\n", - __func__, ve[0]->engine->name, - request[nc]->fence.context, - request[nc]->fence.seqno); - - GEM_TRACE("%s(%s) failed at request %llx:%lld\n", - __func__, ve[0]->engine->name, - request[nc]->fence.context, - request[nc]->fence.seqno); - GEM_TRACE_DUMP(); - intel_gt_set_wedged(gt); - break; - } - } - - times[1] = ktime_sub(ktime_get_raw(), times[1]); - if (prime == 1) - times[0] = times[1]; - - for (nc = 0; nc < nctx; nc++) { - i915_request_put(request[nc]); - request[nc] = NULL; - } - - if (__igt_timeout(end_time, NULL)) - break; - } - - err = igt_live_test_end(&t); - if (err) - goto out; - - pr_info("Requestx%d latencies on %s: 1 = %lluns, %lu = %lluns\n", - nctx, ve[0]->engine->name, ktime_to_ns(times[0]), - prime, div64_u64(ktime_to_ns(times[1]), prime)); - -out: - if (igt_flush_test(gt->i915)) - err = -EIO; - - for (nc = 0; nc < nctx; nc++) { - i915_request_put(request[nc]); - intel_context_unpin(ve[nc]); - intel_context_put(ve[nc]); - } - return err; -} - -static unsigned int -__select_siblings(struct intel_gt *gt, - unsigned int class, - struct intel_engine_cs **siblings, - bool (*filter)(const struct intel_engine_cs *)) -{ - unsigned int n = 0; - unsigned int inst; - - for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) { - if (!gt->engine_class[class][inst]) - continue; - - if (filter && !filter(gt->engine_class[class][inst])) - continue; - - siblings[n++] = gt->engine_class[class][inst]; - } - - return n; -} - -static unsigned int -select_siblings(struct intel_gt *gt, - unsigned int class, - struct intel_engine_cs **siblings) -{ - return __select_siblings(gt, class, siblings, NULL); -} - -static int live_virtual_engine(void *arg) -{ - struct intel_gt *gt = arg; - struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; - struct intel_engine_cs *engine; - enum intel_engine_id id; - unsigned int class; - int err; - - if (intel_uc_uses_guc_submission(>->uc)) - return 0; - - for_each_engine(engine, gt, id) { - err = nop_virtual_engine(gt, &engine, 1, 1, 0); - if (err) { - pr_err("Failed to wrap engine %s: err=%d\n", - engine->name, err); - return err; - } - } - - for (class = 0; class <= MAX_ENGINE_CLASS; class++) { - int nsibling, n; - - nsibling = select_siblings(gt, class, siblings); - if (nsibling < 2) - continue; - - for (n = 1; n <= nsibling + 1; n++) { - err = nop_virtual_engine(gt, siblings, nsibling, - n, 0); - if (err) - return err; - } - - err = nop_virtual_engine(gt, siblings, nsibling, n, CHAIN); - if (err) - return err; - } - - return 0; -} - -static int mask_virtual_engine(struct intel_gt *gt, - struct intel_engine_cs **siblings, - unsigned int nsibling) -{ - struct i915_request *request[MAX_ENGINE_INSTANCE + 1]; - struct intel_context *ve; - struct igt_live_test t; - unsigned int n; - int err; - - /* - * Check that by setting the execution mask on a request, we can - * restrict it to our desired engine within the virtual engine. - */ - - ve = intel_execlists_create_virtual(siblings, nsibling); - if (IS_ERR(ve)) { - err = PTR_ERR(ve); - goto out_close; - } - - err = intel_context_pin(ve); - if (err) - goto out_put; - - err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name); - if (err) - goto out_unpin; - - for (n = 0; n < nsibling; n++) { - request[n] = i915_request_create(ve); - if (IS_ERR(request[n])) { - err = PTR_ERR(request[n]); - nsibling = n; - goto out; - } - - /* Reverse order as it's more likely to be unnatural */ - request[n]->execution_mask = siblings[nsibling - n - 1]->mask; - - i915_request_get(request[n]); - i915_request_add(request[n]); - } - - for (n = 0; n < nsibling; n++) { - if (i915_request_wait(request[n], 0, HZ / 10) < 0) { - pr_err("%s(%s): wait for %llx:%lld timed out\n", - __func__, ve->engine->name, - request[n]->fence.context, - request[n]->fence.seqno); - - GEM_TRACE("%s(%s) failed at request %llx:%lld\n", - __func__, ve->engine->name, - request[n]->fence.context, - request[n]->fence.seqno); - GEM_TRACE_DUMP(); - intel_gt_set_wedged(gt); - err = -EIO; - goto out; - } - - if (request[n]->engine != siblings[nsibling - n - 1]) { - pr_err("Executed on wrong sibling '%s', expected '%s'\n", - request[n]->engine->name, - siblings[nsibling - n - 1]->name); - err = -EINVAL; - goto out; - } - } - - err = igt_live_test_end(&t); -out: - if (igt_flush_test(gt->i915)) - err = -EIO; - - for (n = 0; n < nsibling; n++) - i915_request_put(request[n]); - -out_unpin: - intel_context_unpin(ve); -out_put: - intel_context_put(ve); -out_close: - return err; -} - -static int live_virtual_mask(void *arg) -{ - struct intel_gt *gt = arg; - struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; - unsigned int class; - int err; - - if (intel_uc_uses_guc_submission(>->uc)) - return 0; - - for (class = 0; class <= MAX_ENGINE_CLASS; class++) { - unsigned int nsibling; - - nsibling = select_siblings(gt, class, siblings); - if (nsibling < 2) - continue; - - err = mask_virtual_engine(gt, siblings, nsibling); - if (err) - return err; - } - - return 0; -} - -static int slicein_virtual_engine(struct intel_gt *gt, - struct intel_engine_cs **siblings, - unsigned int nsibling) -{ - const long timeout = slice_timeout(siblings[0]); - struct intel_context *ce; - struct i915_request *rq; - struct igt_spinner spin; - unsigned int n; - int err = 0; - - /* - * Virtual requests must take part in timeslicing on the target engines. - */ - - if (igt_spinner_init(&spin, gt)) - return -ENOMEM; - - for (n = 0; n < nsibling; n++) { - ce = intel_context_create(siblings[n]); - if (IS_ERR(ce)) { - err = PTR_ERR(ce); - goto out; - } - - rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); - intel_context_put(ce); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto out; - } - - i915_request_add(rq); - } - - ce = intel_execlists_create_virtual(siblings, nsibling); - if (IS_ERR(ce)) { - err = PTR_ERR(ce); - goto out; - } - - rq = intel_context_create_request(ce); - intel_context_put(ce); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto out; - } - - i915_request_get(rq); - i915_request_add(rq); - if (i915_request_wait(rq, 0, timeout) < 0) { - GEM_TRACE_ERR("%s(%s) failed to slice in virtual request\n", - __func__, rq->engine->name); - GEM_TRACE_DUMP(); - intel_gt_set_wedged(gt); - err = -EIO; - } - i915_request_put(rq); - -out: - igt_spinner_end(&spin); - if (igt_flush_test(gt->i915)) - err = -EIO; - igt_spinner_fini(&spin); - return err; -} - -static int sliceout_virtual_engine(struct intel_gt *gt, - struct intel_engine_cs **siblings, - unsigned int nsibling) -{ - const long timeout = slice_timeout(siblings[0]); - struct intel_context *ce; - struct i915_request *rq; - struct igt_spinner spin; - unsigned int n; - int err = 0; - - /* - * Virtual requests must allow others a fair timeslice. - */ - - if (igt_spinner_init(&spin, gt)) - return -ENOMEM; - - /* XXX We do not handle oversubscription and fairness with normal rq */ - for (n = 0; n < nsibling; n++) { - ce = intel_execlists_create_virtual(siblings, nsibling); - if (IS_ERR(ce)) { - err = PTR_ERR(ce); - goto out; - } - - rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK); - intel_context_put(ce); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto out; - } - - i915_request_add(rq); - } - - for (n = 0; !err && n < nsibling; n++) { - ce = intel_context_create(siblings[n]); - if (IS_ERR(ce)) { - err = PTR_ERR(ce); - goto out; - } - - rq = intel_context_create_request(ce); - intel_context_put(ce); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto out; - } - - i915_request_get(rq); - i915_request_add(rq); - if (i915_request_wait(rq, 0, timeout) < 0) { - GEM_TRACE_ERR("%s(%s) failed to slice out virtual request\n", - __func__, siblings[n]->name); - GEM_TRACE_DUMP(); - intel_gt_set_wedged(gt); - err = -EIO; - } - i915_request_put(rq); - } - -out: - igt_spinner_end(&spin); - if (igt_flush_test(gt->i915)) - err = -EIO; - igt_spinner_fini(&spin); - return err; -} - -static int live_virtual_slice(void *arg) -{ - struct intel_gt *gt = arg; - struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; - unsigned int class; - int err; - - if (intel_uc_uses_guc_submission(>->uc)) - return 0; - - for (class = 0; class <= MAX_ENGINE_CLASS; class++) { - unsigned int nsibling; - - nsibling = __select_siblings(gt, class, siblings, - intel_engine_has_timeslices); - if (nsibling < 2) - continue; - - err = slicein_virtual_engine(gt, siblings, nsibling); - if (err) - return err; - - err = sliceout_virtual_engine(gt, siblings, nsibling); - if (err) - return err; - } - - return 0; -} - -static int preserved_virtual_engine(struct intel_gt *gt, - struct intel_engine_cs **siblings, - unsigned int nsibling) -{ - struct i915_request *last = NULL; - struct intel_context *ve; - struct i915_vma *scratch; - struct igt_live_test t; - unsigned int n; - int err = 0; - u32 *cs; - - scratch = create_scratch(siblings[0]->gt); - if (IS_ERR(scratch)) - return PTR_ERR(scratch); - - err = i915_vma_sync(scratch); - if (err) - goto out_scratch; - - ve = intel_execlists_create_virtual(siblings, nsibling); - if (IS_ERR(ve)) { - err = PTR_ERR(ve); - goto out_scratch; - } - - err = intel_context_pin(ve); - if (err) - goto out_put; - - err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name); - if (err) - goto out_unpin; - - for (n = 0; n < NUM_GPR_DW; n++) { - struct intel_engine_cs *engine = siblings[n % nsibling]; - struct i915_request *rq; - - rq = i915_request_create(ve); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto out_end; - } - - i915_request_put(last); - last = i915_request_get(rq); - - cs = intel_ring_begin(rq, 8); - if (IS_ERR(cs)) { - i915_request_add(rq); - err = PTR_ERR(cs); - goto out_end; - } - - *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; - *cs++ = CS_GPR(engine, n); - *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32); - *cs++ = 0; - - *cs++ = MI_LOAD_REGISTER_IMM(1); - *cs++ = CS_GPR(engine, (n + 1) % NUM_GPR_DW); - *cs++ = n + 1; - - *cs++ = MI_NOOP; - intel_ring_advance(rq, cs); - - /* Restrict this request to run on a particular engine */ - rq->execution_mask = engine->mask; - i915_request_add(rq); - } - - if (i915_request_wait(last, 0, HZ / 5) < 0) { - err = -ETIME; - goto out_end; - } - - cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB); - if (IS_ERR(cs)) { - err = PTR_ERR(cs); - goto out_end; - } - - for (n = 0; n < NUM_GPR_DW; n++) { - if (cs[n] != n) { - pr_err("Incorrect value[%d] found for GPR[%d]\n", - cs[n], n); - err = -EINVAL; - break; - } - } - - i915_gem_object_unpin_map(scratch->obj); - -out_end: - if (igt_live_test_end(&t)) - err = -EIO; - i915_request_put(last); -out_unpin: - intel_context_unpin(ve); -out_put: - intel_context_put(ve); -out_scratch: - i915_vma_unpin_and_release(&scratch, 0); - return err; -} - -static int live_virtual_preserved(void *arg) -{ - struct intel_gt *gt = arg; - struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; - unsigned int class; - - /* - * Check that the context image retains non-privileged (user) registers - * from one engine to the next. For this we check that the CS_GPR - * are preserved. - */ - - if (intel_uc_uses_guc_submission(>->uc)) - return 0; - - /* As we use CS_GPR we cannot run before they existed on all engines. */ - if (INTEL_GEN(gt->i915) < 9) - return 0; - - for (class = 0; class <= MAX_ENGINE_CLASS; class++) { - int nsibling, err; - - nsibling = select_siblings(gt, class, siblings); - if (nsibling < 2) - continue; - - err = preserved_virtual_engine(gt, siblings, nsibling); - if (err) - return err; - } - - return 0; -} - -static int bond_virtual_engine(struct intel_gt *gt, - unsigned int class, - struct intel_engine_cs **siblings, - unsigned int nsibling, - unsigned int flags) -#define BOND_SCHEDULE BIT(0) -{ - struct intel_engine_cs *master; - struct i915_request *rq[16]; - enum intel_engine_id id; - struct igt_spinner spin; - unsigned long n; - int err; - - /* - * A set of bonded requests is intended to be run concurrently - * across a number of engines. We use one request per-engine - * and a magic fence to schedule each of the bonded requests - * at the same time. A consequence of our current scheduler is that - * we only move requests to the HW ready queue when the request - * becomes ready, that is when all of its prerequisite fences have - * been signaled. As one of those fences is the master submit fence, - * there is a delay on all secondary fences as the HW may be - * currently busy. Equally, as all the requests are independent, - * they may have other fences that delay individual request - * submission to HW. Ergo, we do not guarantee that all requests are - * immediately submitted to HW at the same time, just that if the - * rules are abided by, they are ready at the same time as the - * first is submitted. Userspace can embed semaphores in its batch - * to ensure parallel execution of its phases as it requires. - * Though naturally it gets requested that perhaps the scheduler should - * take care of parallel execution, even across preemption events on - * different HW. (The proper answer is of course "lalalala".) - * - * With the submit-fence, we have identified three possible phases - * of synchronisation depending on the master fence: queued (not - * ready), executing, and signaled. The first two are quite simple - * and checked below. However, the signaled master fence handling is - * contentious. Currently we do not distinguish between a signaled - * fence and an expired fence, as once signaled it does not convey - * any information about the previous execution. It may even be freed - * and hence checking later it may not exist at all. Ergo we currently - * do not apply the bonding constraint for an already signaled fence, - * as our expectation is that it should not constrain the secondaries - * and is outside of the scope of the bonded request API (i.e. all - * userspace requests are meant to be running in parallel). As - * it imposes no constraint, and is effectively a no-op, we do not - * check below as normal execution flows are checked extensively above. - * - * XXX Is the degenerate handling of signaled submit fences the - * expected behaviour for userpace? - */ - - GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1); - - if (igt_spinner_init(&spin, gt)) - return -ENOMEM; - - err = 0; - rq[0] = ERR_PTR(-ENOMEM); - for_each_engine(master, gt, id) { - struct i915_sw_fence fence = {}; - struct intel_context *ce; - - if (master->class == class) - continue; - - ce = intel_context_create(master); - if (IS_ERR(ce)) { - err = PTR_ERR(ce); - goto out; - } - - memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq)); - - rq[0] = igt_spinner_create_request(&spin, ce, MI_NOOP); - intel_context_put(ce); - if (IS_ERR(rq[0])) { - err = PTR_ERR(rq[0]); - goto out; - } - i915_request_get(rq[0]); - - if (flags & BOND_SCHEDULE) { - onstack_fence_init(&fence); - err = i915_sw_fence_await_sw_fence_gfp(&rq[0]->submit, - &fence, - GFP_KERNEL); - } - - i915_request_add(rq[0]); - if (err < 0) - goto out; - - if (!(flags & BOND_SCHEDULE) && - !igt_wait_for_spinner(&spin, rq[0])) { - err = -EIO; - goto out; - } - - for (n = 0; n < nsibling; n++) { - struct intel_context *ve; - - ve = intel_execlists_create_virtual(siblings, nsibling); - if (IS_ERR(ve)) { - err = PTR_ERR(ve); - onstack_fence_fini(&fence); - goto out; - } - - err = intel_virtual_engine_attach_bond(ve->engine, - master, - siblings[n]); - if (err) { - intel_context_put(ve); - onstack_fence_fini(&fence); - goto out; - } - - err = intel_context_pin(ve); - intel_context_put(ve); - if (err) { - onstack_fence_fini(&fence); - goto out; - } - - rq[n + 1] = i915_request_create(ve); - intel_context_unpin(ve); - if (IS_ERR(rq[n + 1])) { - err = PTR_ERR(rq[n + 1]); - onstack_fence_fini(&fence); - goto out; - } - i915_request_get(rq[n + 1]); - - err = i915_request_await_execution(rq[n + 1], - &rq[0]->fence, - ve->engine->bond_execute); - i915_request_add(rq[n + 1]); - if (err < 0) { - onstack_fence_fini(&fence); - goto out; - } - } - onstack_fence_fini(&fence); - intel_engine_flush_submission(master); - igt_spinner_end(&spin); - - if (i915_request_wait(rq[0], 0, HZ / 10) < 0) { - pr_err("Master request did not execute (on %s)!\n", - rq[0]->engine->name); - err = -EIO; - goto out; - } - - for (n = 0; n < nsibling; n++) { - if (i915_request_wait(rq[n + 1], 0, - MAX_SCHEDULE_TIMEOUT) < 0) { - err = -EIO; - goto out; - } - - if (rq[n + 1]->engine != siblings[n]) { - pr_err("Bonded request did not execute on target engine: expected %s, used %s; master was %s\n", - siblings[n]->name, - rq[n + 1]->engine->name, - rq[0]->engine->name); - err = -EINVAL; - goto out; - } - } - - for (n = 0; !IS_ERR(rq[n]); n++) - i915_request_put(rq[n]); - rq[0] = ERR_PTR(-ENOMEM); - } - -out: - for (n = 0; !IS_ERR(rq[n]); n++) - i915_request_put(rq[n]); - if (igt_flush_test(gt->i915)) - err = -EIO; - - igt_spinner_fini(&spin); - return err; -} - -static int live_virtual_bond(void *arg) -{ - static const struct phase { - const char *name; - unsigned int flags; - } phases[] = { - { "", 0 }, - { "schedule", BOND_SCHEDULE }, - { }, - }; - struct intel_gt *gt = arg; - struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; - unsigned int class; - int err; - - if (intel_uc_uses_guc_submission(>->uc)) - return 0; - - for (class = 0; class <= MAX_ENGINE_CLASS; class++) { - const struct phase *p; - int nsibling; - - nsibling = select_siblings(gt, class, siblings); - if (nsibling < 2) - continue; - - for (p = phases; p->name; p++) { - err = bond_virtual_engine(gt, - class, siblings, nsibling, - p->flags); - if (err) { - pr_err("%s(%s): failed class=%d, nsibling=%d, err=%d\n", - __func__, p->name, class, nsibling, err); - return err; - } - } - } - - return 0; -} - -static int reset_virtual_engine(struct intel_gt *gt, - struct intel_engine_cs **siblings, - unsigned int nsibling) -{ - struct intel_engine_cs *engine; - struct intel_context *ve; - struct igt_spinner spin; - struct i915_request *rq; - unsigned int n; - int err = 0; - - /* - * In order to support offline error capture for fast preempt reset, - * we need to decouple the guilty request and ensure that it and its - * descendents are not executed while the capture is in progress. - */ - - if (igt_spinner_init(&spin, gt)) - return -ENOMEM; - - ve = intel_execlists_create_virtual(siblings, nsibling); - if (IS_ERR(ve)) { - err = PTR_ERR(ve); - goto out_spin; - } - - for (n = 0; n < nsibling; n++) - st_engine_heartbeat_disable(siblings[n]); - - rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK); - if (IS_ERR(rq)) { - err = PTR_ERR(rq); - goto out_heartbeat; - } - i915_request_add(rq); - - if (!igt_wait_for_spinner(&spin, rq)) { - intel_gt_set_wedged(gt); - err = -ETIME; - goto out_heartbeat; - } - - engine = rq->engine; - GEM_BUG_ON(engine == ve->engine); - - /* Take ownership of the reset and tasklet */ - if (test_and_set_bit(I915_RESET_ENGINE + engine->id, - >->reset.flags)) { - intel_gt_set_wedged(gt); - err = -EBUSY; - goto out_heartbeat; - } - tasklet_disable(&engine->execlists.tasklet); - - engine->execlists.tasklet.func(engine->execlists.tasklet.data); - GEM_BUG_ON(execlists_active(&engine->execlists) != rq); - - /* Fake a preemption event; failed of course */ - spin_lock_irq(&engine->active.lock); - __unwind_incomplete_requests(engine); - spin_unlock_irq(&engine->active.lock); - GEM_BUG_ON(rq->engine != ve->engine); - - /* Reset the engine while keeping our active request on hold */ - execlists_hold(engine, rq); - GEM_BUG_ON(!i915_request_on_hold(rq)); - - intel_engine_reset(engine, NULL); - GEM_BUG_ON(rq->fence.error != -EIO); - - /* Release our grasp on the engine, letting CS flow again */ - tasklet_enable(&engine->execlists.tasklet); - clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, >->reset.flags); - - /* Check that we do not resubmit the held request */ - i915_request_get(rq); - if (!i915_request_wait(rq, 0, HZ / 5)) { - pr_err("%s: on hold request completed!\n", - engine->name); - intel_gt_set_wedged(gt); - err = -EIO; - goto out_rq; - } - GEM_BUG_ON(!i915_request_on_hold(rq)); - - /* But is resubmitted on release */ - execlists_unhold(engine, rq); - if (i915_request_wait(rq, 0, HZ / 5) < 0) { - pr_err("%s: held request did not complete!\n", - engine->name); - intel_gt_set_wedged(gt); - err = -ETIME; - } - -out_rq: - i915_request_put(rq); -out_heartbeat: - for (n = 0; n < nsibling; n++) - st_engine_heartbeat_enable(siblings[n]); - - intel_context_put(ve); -out_spin: - igt_spinner_fini(&spin); - return err; -} - -static int live_virtual_reset(void *arg) -{ - struct intel_gt *gt = arg; - struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; - unsigned int class; - - /* - * Check that we handle a reset event within a virtual engine. - * Only the physical engine is reset, but we have to check the flow - * of the virtual requests around the reset, and make sure it is not - * forgotten. - */ - - if (intel_uc_uses_guc_submission(>->uc)) - return 0; - - if (!intel_has_reset_engine(gt)) - return 0; - - for (class = 0; class <= MAX_ENGINE_CLASS; class++) { - int nsibling, err; - - nsibling = select_siblings(gt, class, siblings); - if (nsibling < 2) - continue; - - err = reset_virtual_engine(gt, siblings, nsibling); - if (err) - return err; - } - - return 0; -} - -int intel_execlists_live_selftests(struct drm_i915_private *i915) -{ - static const struct i915_subtest tests[] = { - SUBTEST(live_sanitycheck), - SUBTEST(live_unlite_switch), - SUBTEST(live_unlite_preempt), - SUBTEST(live_unlite_ring), - SUBTEST(live_pin_rewind), - SUBTEST(live_hold_reset), - SUBTEST(live_error_interrupt), - SUBTEST(live_timeslice_preempt), - SUBTEST(live_timeslice_rewind), - SUBTEST(live_timeslice_queue), - SUBTEST(live_timeslice_nopreempt), - SUBTEST(live_busywait_preempt), - SUBTEST(live_preempt), - SUBTEST(live_late_preempt), - SUBTEST(live_nopreempt), - SUBTEST(live_preempt_cancel), - SUBTEST(live_suppress_self_preempt), - SUBTEST(live_chain_preempt), - SUBTEST(live_preempt_ring), - SUBTEST(live_preempt_gang), - SUBTEST(live_preempt_timeout), - SUBTEST(live_preempt_user), - SUBTEST(live_preempt_smoke), - SUBTEST(live_virtual_engine), - SUBTEST(live_virtual_mask), - SUBTEST(live_virtual_preserved), - SUBTEST(live_virtual_slice), - SUBTEST(live_virtual_bond), - SUBTEST(live_virtual_reset), - }; - - if (!HAS_EXECLISTS(i915)) - return 0; - - if (intel_gt_is_wedged(&i915->gt)) - return 0; - - return intel_gt_live_subtests(tests, &i915->gt); -} - static int emit_semaphore_signal(struct intel_context *ce, void *slot) { const u32 offset = @@ -4775,9 +139,10 @@ static int live_lrc_layout(void *arg) * match the layout saved by HW. */ - lrc = kmalloc(PAGE_SIZE, GFP_KERNEL); + lrc = (u32 *)__get_free_page(GFP_KERNEL); /* requires page alignment */ if (!lrc) return -ENOMEM; + GEM_BUG_ON(offset_in_page(lrc)); err = 0; for_each_engine(engine, gt, id) { @@ -4794,15 +159,12 @@ static int live_lrc_layout(void *arg) } hw += LRC_STATE_OFFSET / sizeof(*hw); - execlists_init_reg_state(memset(lrc, POISON_INUSE, PAGE_SIZE), - engine->kernel_context, - engine, - engine->kernel_context->ring, - true); + __lrc_init_regs(memset(lrc, POISON_INUSE, PAGE_SIZE), + engine->kernel_context, engine, true); dw = 0; do { - u32 lri = hw[dw]; + u32 lri = READ_ONCE(hw[dw]); if (lri == 0) { dw++; @@ -4835,9 +197,11 @@ static int live_lrc_layout(void *arg) dw++; while (lri) { - if (hw[dw] != lrc[dw]) { + u32 offset = READ_ONCE(hw[dw]); + + if (offset != lrc[dw]) { pr_err("%s: Different registers found at dword %d, expected %x, found %x\n", - engine->name, dw, hw[dw], lrc[dw]); + engine->name, dw, offset, lrc[dw]); err = -EINVAL; break; } @@ -4849,7 +213,7 @@ static int live_lrc_layout(void *arg) dw += 2; lri -= 2; } - } while ((lrc[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END); + } while (!err && (lrc[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END); if (err) { pr_info("%s: HW register image:\n", engine->name); @@ -4864,7 +228,7 @@ static int live_lrc_layout(void *arg) break; } - kfree(lrc); + free_page((unsigned long)lrc); return err; } @@ -5249,6 +613,10 @@ static int __live_lrc_gpr(struct intel_engine_cs *engine, err = emit_semaphore_signal(engine->kernel_context, slot); if (err) goto err_rq; + + err = wait_for_submit(engine, rq, HZ / 2); + if (err) + goto err_rq; } else { slot[0] = 1; wmb(); @@ -6242,16 +1610,17 @@ static void garbage_reset(struct intel_engine_cs *engine, const unsigned int bit = I915_RESET_ENGINE + engine->id; unsigned long *lock = &engine->gt->reset.flags; - if (test_and_set_bit(bit, lock)) - return; - - tasklet_disable(&engine->execlists.tasklet); + local_bh_disable(); + if (!test_and_set_bit(bit, lock)) { + tasklet_disable(&engine->execlists.tasklet); - if (!rq->fence.error) - intel_engine_reset(engine, NULL); + if (!rq->fence.error) + __intel_engine_reset_bh(engine, NULL); - tasklet_enable(&engine->execlists.tasklet); - clear_and_wake_up_bit(bit, lock); + tasklet_enable(&engine->execlists.tasklet); + clear_and_wake_up_bit(bit, lock); + } + local_bh_enable(); } static struct i915_request *garbage(struct intel_context *ce, diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c index b25eba50c88e..cf373c72359e 100644 --- a/drivers/gpu/drm/i915/gt/selftest_mocs.c +++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c @@ -5,6 +5,7 @@ */ #include "gt/intel_engine_pm.h" +#include "gt/intel_gpu_commands.h" #include "i915_selftest.h" #include "gem/selftests/mock_context.h" @@ -56,33 +57,6 @@ static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin) return err; } -static struct i915_vma *create_scratch(struct intel_gt *gt) -{ - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - int err; - - obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); - if (IS_ERR(obj)) - return ERR_CAST(obj); - - i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED); - - vma = i915_vma_instance(obj, >->ggtt->vm, NULL); - if (IS_ERR(vma)) { - i915_gem_object_put(obj); - return vma; - } - - err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); - if (err) { - i915_gem_object_put(obj); - return ERR_PTR(err); - } - - return vma; -} - static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt) { struct drm_i915_mocs_table table; @@ -101,7 +75,7 @@ static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt) if (flags & (HAS_GLOBAL_MOCS | HAS_ENGINE_MOCS)) arg->mocs = table; - arg->scratch = create_scratch(gt); + arg->scratch = __vm_create_scratch_for_read(>->ggtt->vm, PAGE_SIZE); if (IS_ERR(arg->scratch)) return PTR_ERR(arg->scratch); @@ -125,7 +99,7 @@ static void live_mocs_fini(struct live_mocs *arg) static int read_regs(struct i915_request *rq, u32 addr, unsigned int count, - uint32_t *offset) + u32 *offset) { unsigned int i; u32 *cs; @@ -153,7 +127,7 @@ static int read_regs(struct i915_request *rq, static int read_mocs_table(struct i915_request *rq, const struct drm_i915_mocs_table *table, - uint32_t *offset) + u32 *offset) { u32 addr; @@ -167,7 +141,7 @@ static int read_mocs_table(struct i915_request *rq, static int read_l3cc_table(struct i915_request *rq, const struct drm_i915_mocs_table *table, - uint32_t *offset) + u32 *offset) { u32 addr = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0)); @@ -176,7 +150,7 @@ static int read_l3cc_table(struct i915_request *rq, static int check_mocs_table(struct intel_engine_cs *engine, const struct drm_i915_mocs_table *table, - uint32_t **vaddr) + u32 **vaddr) { unsigned int i; u32 expect; @@ -205,7 +179,7 @@ static bool mcr_range(struct drm_i915_private *i915, u32 offset) static int check_l3cc_table(struct intel_engine_cs *engine, const struct drm_i915_mocs_table *table, - uint32_t **vaddr) + u32 **vaddr) { /* Can we read the MCR range 0xb00 directly? See intel_workarounds! */ u32 reg = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0)); @@ -361,29 +335,34 @@ static int active_engine_reset(struct intel_context *ce, static int __live_mocs_reset(struct live_mocs *mocs, struct intel_context *ce) { + struct intel_gt *gt = ce->engine->gt; int err; - err = intel_engine_reset(ce->engine, "mocs"); - if (err) - return err; + if (intel_has_reset_engine(gt)) { + err = intel_engine_reset(ce->engine, "mocs"); + if (err) + return err; - err = check_mocs_engine(mocs, ce); - if (err) - return err; + err = check_mocs_engine(mocs, ce); + if (err) + return err; - err = active_engine_reset(ce, "mocs"); - if (err) - return err; + err = active_engine_reset(ce, "mocs"); + if (err) + return err; - err = check_mocs_engine(mocs, ce); - if (err) - return err; + err = check_mocs_engine(mocs, ce); + if (err) + return err; + } - intel_gt_reset(ce->engine->gt, ce->engine->mask, "mocs"); + if (intel_has_gpu_reset(gt)) { + intel_gt_reset(gt, ce->engine->mask, "mocs"); - err = check_mocs_engine(mocs, ce); - if (err) - return err; + err = check_mocs_engine(mocs, ce); + if (err) + return err; + } return 0; } @@ -398,9 +377,6 @@ static int live_mocs_reset(void *arg) /* Check the mocs setup is retained over per-engine and global resets */ - if (!intel_has_reset_engine(gt)) - return 0; - err = live_mocs_init(&mocs, gt); if (err) return err; diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c index 64ef5ee5decf..61abc0556601 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rc6.c +++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c @@ -6,6 +6,7 @@ #include "intel_context.h" #include "intel_engine_pm.h" +#include "intel_gpu_commands.h" #include "intel_gt_requests.h" #include "intel_ring.h" #include "selftest_rc6.h" diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c index ef5aeebbeeb0..b7befcfbdcde 100644 --- a/drivers/gpu/drm/i915/gt/selftest_reset.c +++ b/drivers/gpu/drm/i915/gt/selftest_reset.c @@ -9,6 +9,7 @@ #include "i915_memcpy.h" #include "i915_selftest.h" +#include "intel_gpu_commands.h" #include "selftests/igt_reset.h" #include "selftests/igt_atomic.h" #include "selftests/igt_spinner.h" @@ -95,10 +96,10 @@ __igt_reset_stolen(struct intel_gt *gt, if (!__drm_mm_interval_first(>->i915->mm.stolen, page << PAGE_SHIFT, ((page + 1) << PAGE_SHIFT) - 1)) - memset32(s, STACK_MAGIC, PAGE_SIZE / sizeof(u32)); + memset_io(s, STACK_MAGIC, PAGE_SIZE); - in = s; - if (i915_memcpy_from_wc(tmp, s, PAGE_SIZE)) + in = (void __force *)s; + if (i915_memcpy_from_wc(tmp, in, PAGE_SIZE)) in = tmp; crc[page] = crc32_le(0, in, PAGE_SIZE); @@ -133,8 +134,8 @@ __igt_reset_stolen(struct intel_gt *gt, ggtt->error_capture.start, PAGE_SIZE); - in = s; - if (i915_memcpy_from_wc(tmp, s, PAGE_SIZE)) + in = (void __force *)s; + if (i915_memcpy_from_wc(tmp, in, PAGE_SIZE)) in = tmp; x = crc32_le(0, in, PAGE_SIZE); @@ -326,11 +327,16 @@ static int igt_atomic_engine_reset(void *arg) for (p = igt_atomic_phases; p->name; p++) { GEM_TRACE("intel_engine_reset(%s) under %s\n", engine->name, p->name); + if (strcmp(p->name, "softirq")) + local_bh_disable(); p->critical_section_begin(); - err = intel_engine_reset(engine, NULL); + err = __intel_engine_reset_bh(engine, NULL); p->critical_section_end(); + if (strcmp(p->name, "softirq")) + local_bh_enable(); + if (err) { pr_err("intel_engine_reset(%s) failed under %s\n", engine->name, p->name); @@ -340,6 +346,7 @@ static int igt_atomic_engine_reset(void *arg) intel_engine_pm_put(engine); tasklet_enable(&engine->execlists.tasklet); + tasklet_hi_schedule(&engine->execlists.tasklet); if (err) break; } diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index aa5675ecb5cc..967641fee42a 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -185,7 +185,10 @@ static u8 rps_set_check(struct intel_rps *rps, u8 freq) { mutex_lock(&rps->lock); GEM_BUG_ON(!intel_rps_is_active(rps)); - intel_rps_set(rps, freq); + if (wait_for(!intel_rps_set(rps, freq), 50)) { + mutex_unlock(&rps->lock); + return 0; + } GEM_BUG_ON(rps->last_freq != freq); mutex_unlock(&rps->lock); diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c index 2edf2b15885f..6f3a3687ef0f 100644 --- a/drivers/gpu/drm/i915/gt/selftest_timeline.c +++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c @@ -9,6 +9,7 @@ #include "intel_context.h" #include "intel_engine_heartbeat.h" #include "intel_engine_pm.h" +#include "intel_gpu_commands.h" #include "intel_gt.h" #include "intel_gt_requests.h" #include "intel_ring.h" @@ -1090,12 +1091,6 @@ static int live_hwsp_read(void *arg) } count++; - if (8 * watcher[1].rq->ring->emit > - 3 * watcher[1].rq->ring->size) { - i915_request_put(rq); - break; - } - /* Flush the timeline before manually wrapping again */ if (i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE, @@ -1104,9 +1099,14 @@ static int live_hwsp_read(void *arg) i915_request_put(rq); goto out; } - retire_requests(tl); i915_request_put(rq); + + /* Single requests are limited to half a ring at most */ + if (8 * watcher[1].rq->ring->emit > + 3 * watcher[1].rq->ring->size) + break; + } while (!__igt_timeout(end_time, NULL)); WRITE_ONCE(*(u32 *)tl->hwsp_seqno, 0xdeadbeef); diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index 61a0532d0f3d..2070b91cb607 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -95,8 +95,9 @@ reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists) } static struct drm_i915_gem_object * -read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) +read_nonprivs(struct intel_context *ce) { + struct intel_engine_cs *engine = ce->engine; const u32 base = engine->mmio_base; struct drm_i915_gem_object *result; struct i915_request *rq; @@ -130,7 +131,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) if (err) goto err_obj; - rq = igt_request_alloc(ctx, engine); + rq = intel_context_create_request(ce); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_pin; @@ -145,7 +146,7 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine) goto err_req; srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; - if (INTEL_GEN(ctx->i915) >= 8) + if (INTEL_GEN(engine->i915) >= 8) srm++; cs = intel_ring_begin(rq, 4 * RING_MAX_NONPRIV_SLOTS); @@ -200,16 +201,16 @@ print_results(const struct intel_engine_cs *engine, const u32 *results) } } -static int check_whitelist(struct i915_gem_context *ctx, - struct intel_engine_cs *engine) +static int check_whitelist(struct intel_context *ce) { + struct intel_engine_cs *engine = ce->engine; struct drm_i915_gem_object *results; struct intel_wedge_me wedge; u32 *vaddr; int err; int i; - results = read_nonprivs(ctx, engine); + results = read_nonprivs(ce); if (IS_ERR(results)) return PTR_ERR(results); @@ -293,8 +294,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine, int (*reset)(struct intel_engine_cs *), const char *name) { - struct drm_i915_private *i915 = engine->i915; - struct i915_gem_context *ctx, *tmp; + struct intel_context *ce, *tmp; struct igt_spinner spin; intel_wakeref_t wakeref; int err; @@ -302,15 +302,15 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine, pr_info("Checking %d whitelisted registers on %s (RING_NONPRIV) [%s]\n", engine->whitelist.count, engine->name, name); - ctx = kernel_context(i915); - if (IS_ERR(ctx)) - return PTR_ERR(ctx); + ce = intel_context_create(engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); err = igt_spinner_init(&spin, engine->gt); if (err) goto out_ctx; - err = check_whitelist(ctx, engine); + err = check_whitelist(ce); if (err) { pr_err("Invalid whitelist *before* %s reset!\n", name); goto out_spin; @@ -330,22 +330,22 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine, goto out_spin; } - err = check_whitelist(ctx, engine); + err = check_whitelist(ce); if (err) { pr_err("Whitelist not preserved in context across %s reset!\n", name); goto out_spin; } - tmp = kernel_context(i915); + tmp = intel_context_create(engine); if (IS_ERR(tmp)) { err = PTR_ERR(tmp); goto out_spin; } - kernel_context_close(ctx); - ctx = tmp; + intel_context_put(ce); + ce = tmp; - err = check_whitelist(ctx, engine); + err = check_whitelist(ce); if (err) { pr_err("Invalid whitelist *after* %s reset in fresh context!\n", name); @@ -355,7 +355,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine, out_spin: igt_spinner_fini(&spin); out_ctx: - kernel_context_close(ctx); + intel_context_put(ce); return err; } @@ -486,10 +486,11 @@ static int check_dirty_whitelist(struct intel_context *ce) struct intel_engine_cs *engine = ce->engine; struct i915_vma *scratch; struct i915_vma *batch; - int err = 0, i, v; + int err = 0, i, v, sz; u32 *cs, *results; - scratch = create_scratch(ce->vm, 2 * ARRAY_SIZE(values) + 1); + sz = (2 * ARRAY_SIZE(values) + 1) * sizeof(u32); + scratch = __vm_create_scratch_for_read(ce->vm, sz); if (IS_ERR(scratch)) return PTR_ERR(scratch); @@ -786,15 +787,15 @@ out: return err; } -static int read_whitelisted_registers(struct i915_gem_context *ctx, - struct intel_engine_cs *engine, +static int read_whitelisted_registers(struct intel_context *ce, struct i915_vma *results) { + struct intel_engine_cs *engine = ce->engine; struct i915_request *rq; int i, err = 0; u32 srm, *cs; - rq = igt_request_alloc(ctx, engine); + rq = intel_context_create_request(ce); if (IS_ERR(rq)) return PTR_ERR(rq); @@ -807,7 +808,7 @@ static int read_whitelisted_registers(struct i915_gem_context *ctx, goto err_req; srm = MI_STORE_REGISTER_MEM; - if (INTEL_GEN(ctx->i915) >= 8) + if (INTEL_GEN(engine->i915) >= 8) srm++; cs = intel_ring_begin(rq, 4 * engine->whitelist.count); @@ -834,18 +835,15 @@ err_req: return request_add_sync(rq, err); } -static int scrub_whitelisted_registers(struct i915_gem_context *ctx, - struct intel_engine_cs *engine) +static int scrub_whitelisted_registers(struct intel_context *ce) { - struct i915_address_space *vm; + struct intel_engine_cs *engine = ce->engine; struct i915_request *rq; struct i915_vma *batch; int i, err = 0; u32 *cs; - vm = i915_gem_context_get_vm_rcu(ctx); - batch = create_batch(vm); - i915_vm_put(vm); + batch = create_batch(ce->vm); if (IS_ERR(batch)) return PTR_ERR(batch); @@ -873,7 +871,7 @@ static int scrub_whitelisted_registers(struct i915_gem_context *ctx, i915_gem_object_flush_map(batch->obj); intel_gt_chipset_flush(engine->gt); - rq = igt_request_alloc(ctx, engine); + rq = intel_context_create_request(ce); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_unpin; @@ -1016,7 +1014,6 @@ static int live_isolated_whitelist(void *arg) { struct intel_gt *gt = arg; struct { - struct i915_gem_context *ctx; struct i915_vma *scratch[2]; } client[2] = {}; struct intel_engine_cs *engine; @@ -1032,61 +1029,57 @@ static int live_isolated_whitelist(void *arg) return 0; for (i = 0; i < ARRAY_SIZE(client); i++) { - struct i915_address_space *vm; - struct i915_gem_context *c; - - c = kernel_context(gt->i915); - if (IS_ERR(c)) { - err = PTR_ERR(c); - goto err; - } - - vm = i915_gem_context_get_vm_rcu(c); - - client[i].scratch[0] = create_scratch(vm, 1024); + client[i].scratch[0] = + __vm_create_scratch_for_read(gt->vm, 4096); if (IS_ERR(client[i].scratch[0])) { err = PTR_ERR(client[i].scratch[0]); - i915_vm_put(vm); - kernel_context_close(c); goto err; } - client[i].scratch[1] = create_scratch(vm, 1024); + client[i].scratch[1] = + __vm_create_scratch_for_read(gt->vm, 4096); if (IS_ERR(client[i].scratch[1])) { err = PTR_ERR(client[i].scratch[1]); i915_vma_unpin_and_release(&client[i].scratch[0], 0); - i915_vm_put(vm); - kernel_context_close(c); goto err; } - - client[i].ctx = c; - i915_vm_put(vm); } for_each_engine(engine, gt, id) { + struct intel_context *ce[2]; + if (!engine->kernel_context->vm) continue; if (!whitelist_writable_count(engine)) continue; + ce[0] = intel_context_create(engine); + if (IS_ERR(ce[0])) { + err = PTR_ERR(ce[0]); + break; + } + ce[1] = intel_context_create(engine); + if (IS_ERR(ce[1])) { + err = PTR_ERR(ce[1]); + intel_context_put(ce[0]); + break; + } + /* Read default values */ - err = read_whitelisted_registers(client[0].ctx, engine, - client[0].scratch[0]); + err = read_whitelisted_registers(ce[0], client[0].scratch[0]); if (err) - goto err; + goto err_ce; /* Try to overwrite registers (should only affect ctx0) */ - err = scrub_whitelisted_registers(client[0].ctx, engine); + err = scrub_whitelisted_registers(ce[0]); if (err) - goto err; + goto err_ce; /* Read values from ctx1, we expect these to be defaults */ - err = read_whitelisted_registers(client[1].ctx, engine, - client[1].scratch[0]); + err = read_whitelisted_registers(ce[1], client[1].scratch[0]); if (err) - goto err; + goto err_ce; /* Verify that both reads return the same default values */ err = check_whitelisted_registers(engine, @@ -1094,31 +1087,29 @@ static int live_isolated_whitelist(void *arg) client[1].scratch[0], result_eq); if (err) - goto err; + goto err_ce; /* Read back the updated values in ctx0 */ - err = read_whitelisted_registers(client[0].ctx, engine, - client[0].scratch[1]); + err = read_whitelisted_registers(ce[0], client[0].scratch[1]); if (err) - goto err; + goto err_ce; /* User should be granted privilege to overwhite regs */ err = check_whitelisted_registers(engine, client[0].scratch[0], client[0].scratch[1], result_neq); +err_ce: + intel_context_put(ce[1]); + intel_context_put(ce[0]); if (err) - goto err; + break; } err: for (i = 0; i < ARRAY_SIZE(client); i++) { - if (!client[i].ctx) - break; - i915_vma_unpin_and_release(&client[i].scratch[1], 0); i915_vma_unpin_and_release(&client[i].scratch[0], 0); - kernel_context_close(client[i].ctx); } if (igt_flush_test(gt->i915)) @@ -1128,18 +1119,21 @@ err: } static bool -verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists, +verify_wa_lists(struct intel_gt *gt, struct wa_lists *lists, const char *str) { - struct drm_i915_private *i915 = ctx->i915; - struct i915_gem_engines_iter it; - struct intel_context *ce; + struct intel_engine_cs *engine; + enum intel_engine_id id; bool ok = true; - ok &= wa_list_verify(&i915->uncore, &lists->gt_wa_list, str); + ok &= wa_list_verify(gt->uncore, &lists->gt_wa_list, str); + + for_each_engine(engine, gt, id) { + struct intel_context *ce; - for_each_gem_engine(ce, i915_gem_context_engines(ctx), it) { - enum intel_engine_id id = ce->engine->id; + ce = intel_context_create(engine); + if (IS_ERR(ce)) + return false; ok &= engine_wa_list_verify(ce, &lists->engine[id].wa_list, @@ -1148,6 +1142,8 @@ verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists, ok &= engine_wa_list_verify(ce, &lists->engine[id].ctx_wa_list, str) == 0; + + intel_context_put(ce); } return ok; @@ -1157,7 +1153,6 @@ static int live_gpu_reset_workarounds(void *arg) { struct intel_gt *gt = arg; - struct i915_gem_context *ctx; intel_wakeref_t wakeref; struct wa_lists lists; bool ok; @@ -1165,12 +1160,6 @@ live_gpu_reset_workarounds(void *arg) if (!intel_has_gpu_reset(gt)) return 0; - ctx = kernel_context(gt->i915); - if (IS_ERR(ctx)) - return PTR_ERR(ctx); - - i915_gem_context_lock_engines(ctx); - pr_info("Verifying after GPU reset...\n"); igt_global_reset_lock(gt); @@ -1178,17 +1167,15 @@ live_gpu_reset_workarounds(void *arg) reference_lists_init(gt, &lists); - ok = verify_wa_lists(ctx, &lists, "before reset"); + ok = verify_wa_lists(gt, &lists, "before reset"); if (!ok) goto out; intel_gt_reset(gt, ALL_ENGINES, "live_workarounds"); - ok = verify_wa_lists(ctx, &lists, "after reset"); + ok = verify_wa_lists(gt, &lists, "after reset"); out: - i915_gem_context_unlock_engines(ctx); - kernel_context_close(ctx); reference_lists_fini(gt, &lists); intel_runtime_pm_put(gt->uncore->rpm, wakeref); igt_global_reset_unlock(gt); @@ -1200,8 +1187,8 @@ static int live_engine_reset_workarounds(void *arg) { struct intel_gt *gt = arg; - struct i915_gem_engines_iter it; - struct i915_gem_context *ctx; + struct intel_engine_cs *engine; + enum intel_engine_id id; struct intel_context *ce; struct igt_spinner spin; struct i915_request *rq; @@ -1212,30 +1199,30 @@ live_engine_reset_workarounds(void *arg) if (!intel_has_reset_engine(gt)) return 0; - ctx = kernel_context(gt->i915); - if (IS_ERR(ctx)) - return PTR_ERR(ctx); - igt_global_reset_lock(gt); wakeref = intel_runtime_pm_get(gt->uncore->rpm); reference_lists_init(gt, &lists); - for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { - struct intel_engine_cs *engine = ce->engine; + for_each_engine(engine, gt, id) { bool ok; pr_info("Verifying after %s reset...\n", engine->name); + ce = intel_context_create(engine); + if (IS_ERR(ce)) { + ret = PTR_ERR(ce); + break; + } - ok = verify_wa_lists(ctx, &lists, "before reset"); + ok = verify_wa_lists(gt, &lists, "before reset"); if (!ok) { ret = -ESRCH; goto err; } - intel_engine_reset(engine, "live_workarounds"); + intel_engine_reset(engine, "live_workarounds:idle"); - ok = verify_wa_lists(ctx, &lists, "after idle reset"); + ok = verify_wa_lists(gt, &lists, "after idle reset"); if (!ok) { ret = -ESRCH; goto err; @@ -1259,23 +1246,26 @@ live_engine_reset_workarounds(void *arg) goto err; } - intel_engine_reset(engine, "live_workarounds"); + intel_engine_reset(engine, "live_workarounds:active"); igt_spinner_end(&spin); igt_spinner_fini(&spin); - ok = verify_wa_lists(ctx, &lists, "after busy reset"); + ok = verify_wa_lists(gt, &lists, "after busy reset"); if (!ok) { ret = -ESRCH; goto err; } - } + err: - i915_gem_context_unlock_engines(ctx); + intel_context_put(ce); + if (ret) + break; + } + reference_lists_fini(gt, &lists); intel_runtime_pm_put(gt->uncore->rpm, wakeref); igt_global_reset_unlock(gt); - kernel_context_close(ctx); igt_flush_test(gt->i915); diff --git a/drivers/gpu/drm/i915/gt/shmem_utils.c b/drivers/gpu/drm/i915/gt/shmem_utils.c index f011ea42487e..5982b62f913d 100644 --- a/drivers/gpu/drm/i915/gt/shmem_utils.c +++ b/drivers/gpu/drm/i915/gt/shmem_utils.c @@ -73,7 +73,7 @@ void *shmem_pin_map(struct file *file) mapping_set_unevictable(file->f_mapping); return vaddr; err_page: - while (--i >= 0) + while (i--) put_page(pages[i]); kvfree(pages); return NULL; @@ -103,10 +103,13 @@ static int __shmem_rw(struct file *file, loff_t off, return PTR_ERR(page); vaddr = kmap(page); - if (write) + if (write) { memcpy(vaddr + offset_in_page(off), ptr, this); - else + set_page_dirty(page); + } else { memcpy(ptr, vaddr + offset_in_page(off), this); + } + mark_page_accessed(page); kunmap(page); put_page(page); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 2a343a977987..4545e90e3bf1 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -579,20 +579,8 @@ int intel_guc_reset_engine(struct intel_guc *guc, */ int intel_guc_resume(struct intel_guc *guc) { - u32 action[] = { - INTEL_GUC_ACTION_EXIT_S_STATE, - GUC_POWER_D0, - }; - - /* - * If GuC communication is enabled but submission is not supported, - * we do not need to resume the GuC but we do need to enable the - * GuC communication on resume (above). - */ - if (!intel_guc_submission_is_used(guc) || !intel_guc_is_ready(guc)) - return 0; - - return intel_guc_send(guc, action, ARRAY_SIZE(action)); + /* XXX: to be implemented with submission interface rework */ + return 0; } /** diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index e84ab67b317d..bc2ba7d0626c 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -47,13 +47,6 @@ struct intel_guc { struct i915_vma *stage_desc_pool; void *stage_desc_pool_vaddr; - struct i915_vma *workqueue; - void *workqueue_vaddr; - spinlock_t wq_lock; - - struct i915_vma *proc_desc; - void *proc_desc_vaddr; - /* Control params for fw initialization */ u32 params[GUC_CTL_MAX_DWORDS]; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index 5212ff844292..17526717368c 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -4,6 +4,7 @@ */ #include "gt/intel_gt.h" +#include "gt/intel_lrc.h" #include "intel_guc_ads.h" #include "intel_uc.h" #include "i915_drv.h" diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c index f9d0907ea1a5..2270d6b3b272 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c @@ -76,7 +76,6 @@ static inline bool guc_ready(struct intel_uncore *uncore, u32 *status) static int guc_wait_ucode(struct intel_uncore *uncore) { - struct drm_device *drm = &uncore->i915->drm; u32 status; int ret; @@ -89,11 +88,11 @@ static int guc_wait_ucode(struct intel_uncore *uncore) * attempt the ucode load again if this happens.) */ ret = wait_for(guc_ready(uncore, &status), 100); - DRM_DEBUG_DRIVER("GuC status %#x\n", status); - if (ret) { - drm_err(drm, "GuC load failed: status = 0x%08X\n", status); - drm_err(drm, "GuC load failed: status: Reset = %d, " + struct drm_device *drm = &uncore->i915->drm; + + drm_dbg(drm, "GuC load failed: status = 0x%08X\n", status); + drm_dbg(drm, "GuC load failed: status: Reset = %d, " "BootROM = 0x%02X, UKernel = 0x%02X, " "MIA = 0x%02X, Auth = 0x%02X\n", REG_FIELD_GET(GS_MIA_IN_RESET, status), @@ -103,12 +102,12 @@ static int guc_wait_ucode(struct intel_uncore *uncore) REG_FIELD_GET(GS_AUTH_STATUS_MASK, status)); if ((status & GS_BOOTROM_MASK) == GS_BOOTROM_RSA_FAILED) { - drm_err(drm, "GuC firmware signature verification failed\n"); + drm_dbg(drm, "GuC firmware signature verification failed\n"); ret = -ENOEXEC; } if ((status & GS_UKERNEL_MASK) == GS_UKERNEL_EXCEPTION) { - drm_err(drm, "GuC firmware exception. EIP: %#x\n", + drm_dbg(drm, "GuC firmware exception. EIP: %#x\n", intel_uncore_read(uncore, SOFT_SCRATCH(13))); ret = -ENXIO; } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c index 9bbe8a795cb8..c92f2c056db4 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c @@ -134,7 +134,7 @@ static int remove_buf_file_callback(struct dentry *dentry) } /* relay channel callbacks */ -static struct rchan_callbacks relay_callbacks = { +static const struct rchan_callbacks relay_callbacks = { .subbuf_start = subbuf_start_callback, .create_buf_file = create_buf_file_callback, .remove_buf_file = remove_buf_file_callback, diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index fdfeb4b9b0f5..23dc0aeaa0ab 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -6,11 +6,14 @@ #include <linux/circ_buf.h> #include "gem/i915_gem_context.h" +#include "gt/gen8_engine_cs.h" +#include "gt/intel_breadcrumbs.h" #include "gt/intel_context.h" #include "gt/intel_engine_pm.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" -#include "gt/intel_lrc_reg.h" +#include "gt/intel_lrc.h" +#include "gt/intel_mocs.h" #include "gt/intel_ring.h" #include "intel_guc_submission.h" @@ -54,6 +57,8 @@ * */ +#define GUC_REQUEST_SIZE 64 /* bytes */ + static inline struct i915_priolist *to_priolist(struct rb_node *rb) { return rb_entry(rb, struct i915_priolist, node); @@ -66,58 +71,6 @@ static struct guc_stage_desc *__get_stage_desc(struct intel_guc *guc, u32 id) return &base[id]; } -static int guc_workqueue_create(struct intel_guc *guc) -{ - return intel_guc_allocate_and_map_vma(guc, GUC_WQ_SIZE, &guc->workqueue, - &guc->workqueue_vaddr); -} - -static void guc_workqueue_destroy(struct intel_guc *guc) -{ - i915_vma_unpin_and_release(&guc->workqueue, I915_VMA_RELEASE_MAP); -} - -/* - * Initialise the process descriptor shared with the GuC firmware. - */ -static int guc_proc_desc_create(struct intel_guc *guc) -{ - const u32 size = PAGE_ALIGN(sizeof(struct guc_process_desc)); - - return intel_guc_allocate_and_map_vma(guc, size, &guc->proc_desc, - &guc->proc_desc_vaddr); -} - -static void guc_proc_desc_destroy(struct intel_guc *guc) -{ - i915_vma_unpin_and_release(&guc->proc_desc, I915_VMA_RELEASE_MAP); -} - -static void guc_proc_desc_init(struct intel_guc *guc) -{ - struct guc_process_desc *desc; - - desc = memset(guc->proc_desc_vaddr, 0, sizeof(*desc)); - - /* - * XXX: pDoorbell and WQVBaseAddress are pointers in process address - * space for ring3 clients (set them as in mmap_ioctl) or kernel - * space for kernel clients (map on demand instead? May make debug - * easier to have it mapped). - */ - desc->wq_base_addr = 0; - desc->db_base_addr = 0; - - desc->wq_size_bytes = GUC_WQ_SIZE; - desc->wq_status = WQ_STATUS_ACTIVE; - desc->priority = GUC_CLIENT_PRIORITY_KMD_NORMAL; -} - -static void guc_proc_desc_fini(struct intel_guc *guc) -{ - memset(guc->proc_desc_vaddr, 0, sizeof(struct guc_process_desc)); -} - static int guc_stage_desc_pool_create(struct intel_guc *guc) { u32 size = PAGE_ALIGN(sizeof(struct guc_stage_desc) * @@ -153,8 +106,6 @@ static void guc_stage_desc_init(struct intel_guc *guc) desc->stage_id = 0; desc->priority = GUC_CLIENT_PRIORITY_KMD_NORMAL; - desc->process_desc = intel_guc_ggtt_offset(guc, guc->proc_desc); - desc->wq_addr = intel_guc_ggtt_offset(guc, guc->workqueue); desc->wq_size = GUC_WQ_SIZE; } @@ -166,62 +117,9 @@ static void guc_stage_desc_fini(struct intel_guc *guc) memset(desc, 0, sizeof(*desc)); } -/* Construct a Work Item and append it to the GuC's Work Queue */ -static void guc_wq_item_append(struct intel_guc *guc, - u32 target_engine, u32 context_desc, - u32 ring_tail, u32 fence_id) -{ - /* wqi_len is in DWords, and does not include the one-word header */ - const size_t wqi_size = sizeof(struct guc_wq_item); - const u32 wqi_len = wqi_size / sizeof(u32) - 1; - struct guc_process_desc *desc = guc->proc_desc_vaddr; - struct guc_wq_item *wqi; - u32 wq_off; - - lockdep_assert_held(&guc->wq_lock); - - /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we - * should not have the case where structure wqi is across page, neither - * wrapped to the beginning. This simplifies the implementation below. - * - * XXX: if not the case, we need save data to a temp wqi and copy it to - * workqueue buffer dw by dw. - */ - BUILD_BUG_ON(wqi_size != 16); - - /* We expect the WQ to be active if we're appending items to it */ - GEM_BUG_ON(desc->wq_status != WQ_STATUS_ACTIVE); - - /* Free space is guaranteed. */ - wq_off = READ_ONCE(desc->tail); - GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head), - GUC_WQ_SIZE) < wqi_size); - GEM_BUG_ON(wq_off & (wqi_size - 1)); - - wqi = guc->workqueue_vaddr + wq_off; - - /* Now fill in the 4-word work queue item */ - wqi->header = WQ_TYPE_INORDER | - (wqi_len << WQ_LEN_SHIFT) | - (target_engine << WQ_TARGET_SHIFT) | - WQ_NO_WCFLUSH_WAIT; - wqi->context_desc = context_desc; - wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT; - GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX); - wqi->fence_id = fence_id; - - /* Make the update visible to GuC */ - WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1)); -} - static void guc_add_request(struct intel_guc *guc, struct i915_request *rq) { - struct intel_engine_cs *engine = rq->engine; - u32 ctx_desc = rq->context->lrc.ccid; - u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64); - - guc_wq_item_append(guc, engine->guc_id, ctx_desc, - ring_tail, rq->fence.seqno); + /* Leaving stub as this function will be used in future patches */ } /* @@ -244,16 +142,12 @@ static void guc_submit(struct intel_engine_cs *engine, { struct intel_guc *guc = &engine->gt->uc.guc; - spin_lock(&guc->wq_lock); - do { struct i915_request *rq = *out++; flush_ggtt_writes(rq->ring->vma); guc_add_request(guc, rq); } while (out != end); - - spin_unlock(&guc->wq_lock); } static inline int rq_prio(const struct i915_request *rq) @@ -388,17 +282,26 @@ static void guc_reset_prepare(struct intel_engine_cs *engine) __tasklet_disable_sync_once(&execlists->tasklet); } -static void -cancel_port_requests(struct intel_engine_execlists * const execlists) +static void guc_reset_state(struct intel_context *ce, + struct intel_engine_cs *engine, + u32 head, + bool scrub) { - struct i915_request * const *port, *rq; + GEM_BUG_ON(!intel_context_is_pinned(ce)); - /* Note we are only using the inflight and not the pending queue */ + /* + * We want a simple context + ring to execute the breadcrumb update. + * We cannot rely on the context being intact across the GPU hang, + * so clear it and rebuild just what we need for the breadcrumb. + * All pending requests for this context will be zapped, and any + * future request will be after userspace has had the opportunity + * to recreate its own state. + */ + if (scrub) + lrc_init_regs(ce, engine, true); - for (port = execlists->active; (rq = *port); port++) - schedule_out(rq); - execlists->active = - memset(execlists->inflight, 0, sizeof(execlists->inflight)); + /* Rerun the request; its payload has been neutered (if guilty). */ + lrc_update_regs(ce, engine, head); } static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled) @@ -409,8 +312,6 @@ static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled) spin_lock_irqsave(&engine->active.lock, flags); - cancel_port_requests(execlists); - /* Push back any incomplete requests for replay after the reset. */ rq = execlists_unwind_incomplete_requests(execlists); if (!rq) @@ -420,7 +321,7 @@ static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled) stalled = false; __i915_request_reset(rq, stalled); - intel_lr_context_reset(engine, rq->context, rq->head, stalled); + guc_reset_state(rq->context, engine, rq->head, stalled); out_unlock: spin_unlock_irqrestore(&engine->active.lock, flags); @@ -451,9 +352,6 @@ static void guc_reset_cancel(struct intel_engine_cs *engine) */ spin_lock_irqsave(&engine->active.lock, flags); - /* Cancel the requests on the HW and clear the ELSP tracker. */ - cancel_port_requests(execlists); - /* Mark all executing requests as skipped. */ list_for_each_entry(rq, &engine->active.requests, sched.link) { i915_request_set_error_once(rq, -EIO); @@ -497,12 +395,6 @@ static void guc_reset_finish(struct intel_engine_cs *engine) } /* - * Everything below here is concerned with setup & teardown, and is - * therefore not part of the somewhat time-critical batch-submission - * path of guc_submit() above. - */ - -/* * Set up the memory resources to be shared with the GuC (via the GGTT) * at firmware loading time. */ @@ -522,30 +414,12 @@ int intel_guc_submission_init(struct intel_guc *guc) */ GEM_BUG_ON(!guc->stage_desc_pool); - ret = guc_workqueue_create(guc); - if (ret) - goto err_pool; - - ret = guc_proc_desc_create(guc); - if (ret) - goto err_workqueue; - - spin_lock_init(&guc->wq_lock); - return 0; - -err_workqueue: - guc_workqueue_destroy(guc); -err_pool: - guc_stage_desc_pool_destroy(guc); - return ret; } void intel_guc_submission_fini(struct intel_guc *guc) { if (guc->stage_desc_pool) { - guc_proc_desc_destroy(guc); - guc_workqueue_destroy(guc); guc_stage_desc_pool_destroy(guc); } } @@ -576,33 +450,186 @@ static void guc_interrupts_release(struct intel_gt *gt) intel_uncore_rmw(uncore, GEN11_VCS_VECS_INTR_ENABLE, 0, dmask); } -static void guc_set_default_submission(struct intel_engine_cs *engine) +static int guc_context_alloc(struct intel_context *ce) +{ + return lrc_alloc(ce, ce->engine); +} + +static int guc_context_pre_pin(struct intel_context *ce, + struct i915_gem_ww_ctx *ww, + void **vaddr) +{ + return lrc_pre_pin(ce, ce->engine, ww, vaddr); +} + +static int guc_context_pin(struct intel_context *ce, void *vaddr) { + return lrc_pin(ce, ce->engine, vaddr); +} + +static const struct intel_context_ops guc_context_ops = { + .alloc = guc_context_alloc, + + .pre_pin = guc_context_pre_pin, + .pin = guc_context_pin, + .unpin = lrc_unpin, + .post_unpin = lrc_post_unpin, + + .enter = intel_context_enter_engine, + .exit = intel_context_exit_engine, + + .reset = lrc_reset, + .destroy = lrc_destroy, +}; + +static int guc_request_alloc(struct i915_request *request) +{ + int ret; + + GEM_BUG_ON(!intel_context_is_pinned(request->context)); + /* - * We inherit a bunch of functions from execlists that we'd like - * to keep using: - * - * engine->submit_request = execlists_submit_request; - * engine->cancel_requests = execlists_cancel_requests; - * engine->schedule = execlists_schedule; + * Flush enough space to reduce the likelihood of waiting after + * we start building the request - in which case we will just + * have to repeat work. + */ + request->reserved_space += GUC_REQUEST_SIZE; + + /* + * Note that after this point, we have committed to using + * this request as it is being used to both track the + * state of engine initialisation and liveness of the + * golden renderstate above. Think twice before you try + * to cancel/unwind this request now. + */ + + /* Unconditionally invalidate GPU caches and TLBs. */ + ret = request->engine->emit_flush(request, EMIT_INVALIDATE); + if (ret) + return ret; + + request->reserved_space -= GUC_REQUEST_SIZE; + return 0; +} + +static inline void queue_request(struct intel_engine_cs *engine, + struct i915_request *rq, + int prio) +{ + GEM_BUG_ON(!list_empty(&rq->sched.link)); + list_add_tail(&rq->sched.link, + i915_sched_lookup_priolist(engine, prio)); + set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); +} + +static void guc_submit_request(struct i915_request *rq) +{ + struct intel_engine_cs *engine = rq->engine; + unsigned long flags; + + /* Will be called from irq-context when using foreign fences. */ + spin_lock_irqsave(&engine->active.lock, flags); + + queue_request(engine, rq, rq_prio(rq)); + + GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)); + GEM_BUG_ON(list_empty(&rq->sched.link)); + + tasklet_hi_schedule(&engine->execlists.tasklet); + + spin_unlock_irqrestore(&engine->active.lock, flags); +} + +static void sanitize_hwsp(struct intel_engine_cs *engine) +{ + struct intel_timeline *tl; + + list_for_each_entry(tl, &engine->status_page.timelines, engine_link) + intel_timeline_reset_seqno(tl); +} + +static void guc_sanitize(struct intel_engine_cs *engine) +{ + /* + * Poison residual state on resume, in case the suspend didn't! * - * But we need to override the actual submission backend in order - * to talk to the GuC. + * We have to assume that across suspend/resume (or other loss + * of control) that the contents of our pinned buffers has been + * lost, replaced by garbage. Since this doesn't always happen, + * let's poison such state so that we more quickly spot when + * we falsely assume it has been preserved. */ - intel_execlists_set_default_submission(engine); + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) + memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE); - engine->execlists.tasklet.func = guc_submission_tasklet; + /* + * The kernel_context HWSP is stored in the status_page. As above, + * that may be lost on resume/initialisation, and so we need to + * reset the value in the HWSP. + */ + sanitize_hwsp(engine); + + /* And scrub the dirty cachelines for the HWSP */ + clflush_cache_range(engine->status_page.addr, PAGE_SIZE); +} - /* do not use execlists park/unpark */ - engine->park = engine->unpark = NULL; +static void setup_hwsp(struct intel_engine_cs *engine) +{ + intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */ + + ENGINE_WRITE_FW(engine, + RING_HWS_PGA, + i915_ggtt_offset(engine->status_page.vma)); +} + +static void start_engine(struct intel_engine_cs *engine) +{ + ENGINE_WRITE_FW(engine, + RING_MODE_GEN7, + _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE)); + + ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); + ENGINE_POSTING_READ(engine, RING_MI_MODE); +} + +static int guc_resume(struct intel_engine_cs *engine) +{ + assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL); + + intel_mocs_init_engine(engine); + + intel_breadcrumbs_reset(engine->breadcrumbs); + + setup_hwsp(engine); + start_engine(engine); + + return 0; +} + +static void guc_set_default_submission(struct intel_engine_cs *engine) +{ + engine->submit_request = guc_submit_request; + engine->schedule = i915_schedule; + engine->execlists.tasklet.func = guc_submission_tasklet; engine->reset.prepare = guc_reset_prepare; engine->reset.rewind = guc_reset_rewind; engine->reset.cancel = guc_reset_cancel; engine->reset.finish = guc_reset_finish; - engine->flags &= ~I915_ENGINE_SUPPORTS_STATS; engine->flags |= I915_ENGINE_NEEDS_BREADCRUMB_TASKLET; + engine->flags |= I915_ENGINE_HAS_PREEMPTION; + + /* + * TODO: GuC supports timeslicing and semaphores as well, but they're + * handled by the firmware so some minor tweaks are required before + * enabling. + * + * engine->flags |= I915_ENGINE_HAS_TIMESLICES; + * engine->flags |= I915_ENGINE_HAS_SEMAPHORES; + */ + + engine->emit_bb_start = gen8_emit_bb_start; /* * For the breadcrumb irq to work we need the interrupts to stay @@ -613,35 +640,92 @@ static void guc_set_default_submission(struct intel_engine_cs *engine) GEM_BUG_ON(engine->irq_enable || engine->irq_disable); } -void intel_guc_submission_enable(struct intel_guc *guc) +static void guc_release(struct intel_engine_cs *engine) { - struct intel_gt *gt = guc_to_gt(guc); - struct intel_engine_cs *engine; - enum intel_engine_id id; + engine->sanitize = NULL; /* no longer in control, nothing to sanitize */ + + tasklet_kill(&engine->execlists.tasklet); + + intel_engine_cleanup_common(engine); + lrc_fini_wa_ctx(engine); +} + +static void guc_default_vfuncs(struct intel_engine_cs *engine) +{ + /* Default vfuncs which can be overridden by each engine. */ + + engine->resume = guc_resume; + + engine->cops = &guc_context_ops; + engine->request_alloc = guc_request_alloc; + + engine->emit_flush = gen8_emit_flush_xcs; + engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb; + engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs; + if (INTEL_GEN(engine->i915) >= 12) { + engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs; + engine->emit_flush = gen12_emit_flush_xcs; + } + engine->set_default_submission = guc_set_default_submission; +} + +static void rcs_submission_override(struct intel_engine_cs *engine) +{ + switch (INTEL_GEN(engine->i915)) { + case 12: + engine->emit_flush = gen12_emit_flush_rcs; + engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs; + break; + case 11: + engine->emit_flush = gen11_emit_flush_rcs; + engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs; + break; + default: + engine->emit_flush = gen8_emit_flush_rcs; + engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs; + break; + } +} + +static inline void guc_default_irqs(struct intel_engine_cs *engine) +{ + engine->irq_keep_mask = GT_RENDER_USER_INTERRUPT; +} + +int intel_guc_submission_setup(struct intel_engine_cs *engine) +{ + struct drm_i915_private *i915 = engine->i915; /* - * We're using GuC work items for submitting work through GuC. Since - * we're coalescing multiple requests from a single context into a - * single work item prior to assigning it to execlist_port, we can - * never have more work items than the total number of ports (for all - * engines). The GuC firmware is controlling the HEAD of work queue, - * and it is guaranteed that it will remove the work item from the - * queue before our request is completed. + * The setup relies on several assumptions (e.g. irqs always enabled) + * that are only valid on gen11+ */ - BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.inflight) * - sizeof(struct guc_wq_item) * - I915_NUM_ENGINES > GUC_WQ_SIZE); + GEM_BUG_ON(INTEL_GEN(i915) < 11); + + tasklet_init(&engine->execlists.tasklet, + guc_submission_tasklet, (unsigned long)engine); + + guc_default_vfuncs(engine); + guc_default_irqs(engine); - guc_proc_desc_init(guc); + if (engine->class == RENDER_CLASS) + rcs_submission_override(engine); + + lrc_init_wa_ctx(engine); + + /* Finally, take ownership and responsibility for cleanup! */ + engine->sanitize = guc_sanitize; + engine->release = guc_release; + + return 0; +} + +void intel_guc_submission_enable(struct intel_guc *guc) +{ guc_stage_desc_init(guc); /* Take over from manual control of ELSP (execlists) */ - guc_interrupts_capture(gt); - - for_each_engine(engine, gt, id) { - engine->set_default_submission = guc_set_default_submission; - engine->set_default_submission(engine); - } + guc_interrupts_capture(guc_to_gt(guc)); } void intel_guc_submission_disable(struct intel_guc *guc) @@ -655,7 +739,6 @@ void intel_guc_submission_disable(struct intel_guc *guc) guc_interrupts_release(gt); guc_stage_desc_fini(guc); - guc_proc_desc_fini(guc); } static bool __guc_submission_selected(struct intel_guc *guc) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h index 4cf9d3e50263..5f7b9e6347d0 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h @@ -19,6 +19,7 @@ void intel_guc_submission_disable(struct intel_guc *guc); void intel_guc_submission_fini(struct intel_guc *guc); int intel_guc_preempt_work_create(struct intel_guc *guc); void intel_guc_preempt_work_destroy(struct intel_guc *guc); +int intel_guc_submission_setup(struct intel_engine_cs *engine); bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine); static inline bool intel_guc_submission_is_supported(struct intel_guc *guc) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 4e6070e95fe9..6a0452815c41 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -175,19 +175,15 @@ static void guc_get_mmio_msg(struct intel_guc *guc) static void guc_handle_mmio_msg(struct intel_guc *guc) { - struct drm_i915_private *i915 = guc_to_gt(guc)->i915; - /* we need communication to be enabled to reply to GuC */ GEM_BUG_ON(!guc_communication_enabled(guc)); - if (!guc->mmio_msg) - return; - - spin_lock_irq(&i915->irq_lock); - intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1); - spin_unlock_irq(&i915->irq_lock); - - guc->mmio_msg = 0; + spin_lock_irq(&guc->irq_lock); + if (guc->mmio_msg) { + intel_guc_to_host_process_recv_msg(guc, &guc->mmio_msg, 1); + guc->mmio_msg = 0; + } + spin_unlock_irq(&guc->irq_lock); } static void guc_reset_interrupts(struct intel_guc *guc) @@ -207,7 +203,8 @@ static void guc_disable_interrupts(struct intel_guc *guc) static int guc_enable_communication(struct intel_guc *guc) { - struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + struct intel_gt *gt = guc_to_gt(guc); + struct drm_i915_private *i915 = gt->i915; int ret; GEM_BUG_ON(guc_communication_enabled(guc)); @@ -227,9 +224,9 @@ static int guc_enable_communication(struct intel_guc *guc) guc_enable_interrupts(guc); /* check for CT messages received before we enabled interrupts */ - spin_lock_irq(&i915->irq_lock); + spin_lock_irq(>->irq_lock); intel_guc_ct_event_handler(&guc->ct); - spin_unlock_irq(&i915->irq_lock); + spin_unlock_irq(>->irq_lock); drm_dbg(&i915->drm, "GuC communication enabled\n"); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index 180c23e2e25e..602f1a0bc587 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -53,6 +53,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, fw_def(ELKHARTLAKE, 0, guc_def(ehl, 49, 0, 1), huc_def(ehl, 9, 0, 0)) \ fw_def(ICELAKE, 0, guc_def(icl, 49, 0, 1), huc_def(icl, 9, 0, 0)) \ fw_def(COMETLAKE, 5, guc_def(cml, 49, 0, 1), huc_def(cml, 4, 0, 0)) \ + fw_def(COMETLAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \ fw_def(COFFEELAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \ fw_def(GEMINILAKE, 0, guc_def(glk, 49, 0, 1), huc_def(glk, 4, 0, 0)) \ fw_def(KABYLAKE, 0, guc_def(kbl, 49, 0, 1), huc_def(kbl, 4, 0, 0)) \ diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 16b582cb97ed..3fea967ee817 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -37,6 +37,7 @@ #include <linux/slab.h> #include "i915_drv.h" +#include "gt/intel_gpu_commands.h" #include "gt/intel_ring.h" #include "gvt.h" #include "i915_pvinfo.h" diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 5c1fcac260d3..a15f87539657 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -164,7 +164,7 @@ static unsigned char virtual_dp_monitor_edid[GVT_EDID_NUM][EDID_SIZE] = { /* let the virtual display supports DP1.2 */ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = { - 0x12, 0x014, 0x04, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 + 0x12, 0x014, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; static void emulate_monitor_status_change(struct intel_vgpu *vgpu) diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h index d62cd14605a3..84ad74b37d66 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.h +++ b/drivers/gpu/drm/i915/gvt/execlist.h @@ -182,7 +182,4 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu); int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, const struct intel_engine_cs *engine); -void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu, - intel_engine_mask_t engine_mask); - #endif /*_GVT_EXECLIST_H_*/ diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.h b/drivers/gpu/drm/i915/gvt/fb_decoder.h index 67b6ede9e707..0daa3931aef7 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.h +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.h @@ -38,6 +38,10 @@ #include <linux/types.h> +#include "display/intel_display.h" + +struct intel_vgpu; + #define _PLANE_CTL_FORMAT_SHIFT 24 #define _PLANE_CTL_TILED_SHIFT 10 #define _PIPE_V_SRCSZ_SHIFT 0 @@ -98,8 +102,6 @@ enum DDI_PORT { DDI_PORT_E = 4 }; -struct intel_gvt; - /* color space conversion and gamma correction are not included */ struct intel_vgpu_primary_plane_format { u8 enabled; /* plane is enabled */ diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index b0e173f2d990..3bf45672ef98 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -34,10 +34,19 @@ #ifndef _GVT_GTT_H_ #define _GVT_GTT_H_ -#define I915_GTT_PAGE_SHIFT 12 +#include <linux/kernel.h> +#include <linux/kref.h> +#include <linux/mutex.h> +#include <linux/radix-tree.h> + +#include "gt/intel_gtt.h" +struct intel_gvt; +struct intel_vgpu; struct intel_vgpu_mm; +#define I915_GTT_PAGE_SHIFT 12 + #define INTEL_GVT_INVALID_ADDR (~0UL) struct intel_gvt_gtt_entry { diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index cf3578e3f4dd..62a4807424bb 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -33,6 +33,10 @@ #ifndef _GVT_H_ #define _GVT_H_ +#include <uapi/linux/pci_regs.h> + +#include "i915_drv.h" + #include "debug.h" #include "hypercall.h" #include "mmio.h" diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index aa7e75cb3e6a..0d124ced5f94 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1651,7 +1651,7 @@ static int edp_psr_imr_iir_write(struct intel_vgpu *vgpu, return 0; } -/** +/* * FixMe: * If guest fills non-priv batch buffer on ApolloLake/Broxton as Mesa i965 did: * 717e7539124d (i965: Use a WC map and memcpy for the batch instead of pwrite.) @@ -3686,14 +3686,13 @@ void intel_gvt_restore_fence(struct intel_gvt *gvt) } } -static inline int mmio_pm_restore_handler(struct intel_gvt *gvt, - u32 offset, void *data) +static int mmio_pm_restore_handler(struct intel_gvt *gvt, u32 offset, void *data) { struct intel_vgpu *vgpu = data; struct drm_i915_private *dev_priv = gvt->gt->i915; if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE) - I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset)); + intel_uncore_write(&dev_priv->uncore, _MMIO(offset), vgpu_vreg(vgpu, offset)); return 0; } diff --git a/drivers/gpu/drm/i915/gvt/interrupt.h b/drivers/gpu/drm/i915/gvt/interrupt.h index fcd663811d37..287cd142629e 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.h +++ b/drivers/gpu/drm/i915/gvt/interrupt.h @@ -32,7 +32,10 @@ #ifndef _GVT_INTERRUPT_H_ #define _GVT_INTERRUPT_H_ -#include <linux/types.h> +#include <linux/hrtimer.h> +#include <linux/kernel.h> + +#include "i915_reg.h" enum intel_gvt_event_type { RCS_MI_USER_INTERRUPT = 0, diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index d830b6c65284..60f1a386dd06 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -829,8 +829,10 @@ static int intel_vgpu_open(struct mdev_device *mdev) /* Take a module reference as mdev core doesn't take * a reference for vendor driver. */ - if (!try_module_get(THIS_MODULE)) + if (!try_module_get(THIS_MODULE)) { + ret = -ENODEV; goto undo_group; + } ret = kvmgt_guest_init(mdev); if (ret) diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index afe574d6b3b5..c9589e26af93 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -35,6 +35,7 @@ #include "i915_drv.h" #include "gt/intel_context.h" +#include "gt/intel_gpu_commands.h" #include "gt/intel_ring.h" #include "gvt.h" #include "trace.h" diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h index 3b25e7fe32f6..b6b69777af49 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.h +++ b/drivers/gpu/drm/i915/gvt/mmio_context.h @@ -36,6 +36,18 @@ #ifndef __GVT_RENDER_H__ #define __GVT_RENDER_H__ +#include <linux/types.h> + +#include "gt/intel_engine_types.h" +#include "gt/intel_lrc_reg.h" +#include "i915_reg.h" + +struct i915_request; +struct intel_context; +struct intel_engine_cs; +struct intel_gvt; +struct intel_vgpu; + struct engine_mmio { enum intel_engine_id id; i915_reg_t reg; diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h index 6f92cde71971..550a456e936f 100644 --- a/drivers/gpu/drm/i915/gvt/mpt.h +++ b/drivers/gpu/drm/i915/gvt/mpt.h @@ -33,6 +33,8 @@ #ifndef _GVT_MPT_H_ #define _GVT_MPT_H_ +#include "gvt.h" + /** * DOC: Hypervisor Service APIs for GVT-g Core Logic * diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index aed2ef6466a2..6af5c06caee0 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -37,6 +37,8 @@ #include "gem/i915_gem_pm.h" #include "gt/intel_context.h" +#include "gt/intel_execlists_submission.h" +#include "gt/intel_lrc.h" #include "gt/intel_ring.h" #include "i915_drv.h" diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 64e7a0b791c3..7c86984a842f 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -36,6 +36,11 @@ #ifndef _GVT_SCHEDULER_H_ #define _GVT_SCHEDULER_H_ +#include "gt/intel_engine_types.h" + +#include "execlist.h" +#include "interrupt.h" + struct intel_gvt_workload_scheduler { struct intel_vgpu *current_vgpu; struct intel_vgpu *next_vgpu; diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 1c8e63f84134..e49944fde333 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -439,7 +439,8 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, if (IS_BROADWELL(dev_priv)) ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B); - else + /* FixMe: Re-enable APL/BXT once vfio_edid enabled */ + else if (!IS_BROXTON(dev_priv)) ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D); if (ret) goto out_clean_sched_policy; diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index 10a865f3dc09..ab4382841c6b 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -159,8 +159,7 @@ __active_retire(struct i915_active *ref) GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node); /* Make the cached node available for reuse with any timeline */ - if (IS_ENABLED(CONFIG_64BIT)) - ref->cache->timeline = 0; /* needs cmpxchg(u64) */ + ref->cache->timeline = 0; /* needs cmpxchg(u64) */ } spin_unlock_irqrestore(&ref->tree_lock, flags); @@ -256,7 +255,6 @@ static struct active_node *__active_lookup(struct i915_active *ref, u64 idx) if (cached == idx) return it; -#ifdef CONFIG_64BIT /* for cmpxchg(u64) */ /* * An unclaimed cache [.timeline=0] can only be claimed once. * @@ -267,9 +265,8 @@ static struct active_node *__active_lookup(struct i915_active *ref, u64 idx) * only the winner of that race will cmpxchg return the old * value of 0). */ - if (!cached && !cmpxchg(&it->timeline, 0, idx)) + if (!cached && !cmpxchg64(&it->timeline, 0, idx)) return it; -#endif } BUILD_BUG_ON(offsetof(typeof(*it), node)); diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 93265951fdbb..82d0f19e86df 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -26,6 +26,7 @@ */ #include "gt/intel_engine.h" +#include "gt/intel_gpu_commands.h" #include "i915_drv.h" #include "i915_memcpy.h" @@ -1166,7 +1167,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, } } if (IS_ERR(src)) { - unsigned long x, n; + unsigned long x, n, remain; void *ptr; /* @@ -1177,14 +1178,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, * We don't care about copying too much here as we only * validate up to the end of the batch. */ + remain = length; if (!(dst_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) - length = round_up(length, + remain = round_up(remain, boot_cpu_data.x86_clflush_size); ptr = dst; x = offset_in_page(offset); - for (n = offset >> PAGE_SHIFT; length; n++) { - int len = min(length, PAGE_SIZE - x); + for (n = offset >> PAGE_SHIFT; remain; n++) { + int len = min(remain, PAGE_SIZE - x); src = kmap_atomic(i915_gem_object_get_page(src_obj, n)); if (needs_clflush) @@ -1193,13 +1195,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, kunmap_atomic(src); ptr += len; - length -= len; + remain -= len; x = 0; } } i915_gem_object_unpin_pages(src_obj); + memset32(dst + length, 0, (dst_obj->base.size - length) / sizeof(u32)); + /* dst_obj is returned with vmap pinned */ return dst; } @@ -1392,11 +1396,6 @@ static unsigned long *alloc_whitelist(u32 batch_length) #define LENGTH_BIAS 2 -static bool shadow_needs_clflush(struct drm_i915_gem_object *obj) -{ - return !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE); -} - /** * intel_engine_cmd_parser() - parse a batch buffer for privilege violations * @engine: the engine on which the batch is to execute @@ -1538,16 +1537,9 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, ret = 0; /* allow execution */ } } - - if (shadow_needs_clflush(shadow->obj)) - drm_clflush_virt_range(batch_end, 8); } - if (shadow_needs_clflush(shadow->obj)) { - void *ptr = page_mask_bits(shadow->obj->mm.mapping); - - drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr); - } + i915_gem_object_flush_map(shadow->obj); if (!IS_ERR_OR_NULL(jump_whitelist)) kfree(jump_whitelist); diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 77e76b665098..de8e0e44cfb6 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -45,6 +45,7 @@ #include "i915_debugfs.h" #include "i915_debugfs_params.h" #include "i915_irq.h" +#include "i915_scheduler.h" #include "i915_trace.h" #include "intel_pm.h" #include "intel_sideband.h" @@ -378,308 +379,6 @@ static int i915_gem_object_info(struct seq_file *m, void *data) return 0; } -static void gen8_display_interrupt_info(struct seq_file *m) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - enum pipe pipe; - - for_each_pipe(dev_priv, pipe) { - enum intel_display_power_domain power_domain; - intel_wakeref_t wakeref; - - power_domain = POWER_DOMAIN_PIPE(pipe); - wakeref = intel_display_power_get_if_enabled(dev_priv, - power_domain); - if (!wakeref) { - seq_printf(m, "Pipe %c power disabled\n", - pipe_name(pipe)); - continue; - } - seq_printf(m, "Pipe %c IMR:\t%08x\n", - pipe_name(pipe), - I915_READ(GEN8_DE_PIPE_IMR(pipe))); - seq_printf(m, "Pipe %c IIR:\t%08x\n", - pipe_name(pipe), - I915_READ(GEN8_DE_PIPE_IIR(pipe))); - seq_printf(m, "Pipe %c IER:\t%08x\n", - pipe_name(pipe), - I915_READ(GEN8_DE_PIPE_IER(pipe))); - - intel_display_power_put(dev_priv, power_domain, wakeref); - } - - seq_printf(m, "Display Engine port interrupt mask:\t%08x\n", - I915_READ(GEN8_DE_PORT_IMR)); - seq_printf(m, "Display Engine port interrupt identity:\t%08x\n", - I915_READ(GEN8_DE_PORT_IIR)); - seq_printf(m, "Display Engine port interrupt enable:\t%08x\n", - I915_READ(GEN8_DE_PORT_IER)); - - seq_printf(m, "Display Engine misc interrupt mask:\t%08x\n", - I915_READ(GEN8_DE_MISC_IMR)); - seq_printf(m, "Display Engine misc interrupt identity:\t%08x\n", - I915_READ(GEN8_DE_MISC_IIR)); - seq_printf(m, "Display Engine misc interrupt enable:\t%08x\n", - I915_READ(GEN8_DE_MISC_IER)); - - seq_printf(m, "PCU interrupt mask:\t%08x\n", - I915_READ(GEN8_PCU_IMR)); - seq_printf(m, "PCU interrupt identity:\t%08x\n", - I915_READ(GEN8_PCU_IIR)); - seq_printf(m, "PCU interrupt enable:\t%08x\n", - I915_READ(GEN8_PCU_IER)); -} - -static int i915_interrupt_info(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct intel_engine_cs *engine; - intel_wakeref_t wakeref; - int i, pipe; - - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - - if (IS_CHERRYVIEW(dev_priv)) { - intel_wakeref_t pref; - - seq_printf(m, "Master Interrupt Control:\t%08x\n", - I915_READ(GEN8_MASTER_IRQ)); - - seq_printf(m, "Display IER:\t%08x\n", - I915_READ(VLV_IER)); - seq_printf(m, "Display IIR:\t%08x\n", - I915_READ(VLV_IIR)); - seq_printf(m, "Display IIR_RW:\t%08x\n", - I915_READ(VLV_IIR_RW)); - seq_printf(m, "Display IMR:\t%08x\n", - I915_READ(VLV_IMR)); - for_each_pipe(dev_priv, pipe) { - enum intel_display_power_domain power_domain; - - power_domain = POWER_DOMAIN_PIPE(pipe); - pref = intel_display_power_get_if_enabled(dev_priv, - power_domain); - if (!pref) { - seq_printf(m, "Pipe %c power disabled\n", - pipe_name(pipe)); - continue; - } - - seq_printf(m, "Pipe %c stat:\t%08x\n", - pipe_name(pipe), - I915_READ(PIPESTAT(pipe))); - - intel_display_power_put(dev_priv, power_domain, pref); - } - - pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); - seq_printf(m, "Port hotplug:\t%08x\n", - I915_READ(PORT_HOTPLUG_EN)); - seq_printf(m, "DPFLIPSTAT:\t%08x\n", - I915_READ(VLV_DPFLIPSTAT)); - seq_printf(m, "DPINVGTT:\t%08x\n", - I915_READ(DPINVGTT)); - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref); - - for (i = 0; i < 4; i++) { - seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", - i, I915_READ(GEN8_GT_IMR(i))); - seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", - i, I915_READ(GEN8_GT_IIR(i))); - seq_printf(m, "GT Interrupt IER %d:\t%08x\n", - i, I915_READ(GEN8_GT_IER(i))); - } - - seq_printf(m, "PCU interrupt mask:\t%08x\n", - I915_READ(GEN8_PCU_IMR)); - seq_printf(m, "PCU interrupt identity:\t%08x\n", - I915_READ(GEN8_PCU_IIR)); - seq_printf(m, "PCU interrupt enable:\t%08x\n", - I915_READ(GEN8_PCU_IER)); - } else if (INTEL_GEN(dev_priv) >= 11) { - if (HAS_MASTER_UNIT_IRQ(dev_priv)) - seq_printf(m, "Master Unit Interrupt Control: %08x\n", - I915_READ(DG1_MSTR_UNIT_INTR)); - - seq_printf(m, "Master Interrupt Control: %08x\n", - I915_READ(GEN11_GFX_MSTR_IRQ)); - - seq_printf(m, "Render/Copy Intr Enable: %08x\n", - I915_READ(GEN11_RENDER_COPY_INTR_ENABLE)); - seq_printf(m, "VCS/VECS Intr Enable: %08x\n", - I915_READ(GEN11_VCS_VECS_INTR_ENABLE)); - seq_printf(m, "GUC/SG Intr Enable:\t %08x\n", - I915_READ(GEN11_GUC_SG_INTR_ENABLE)); - seq_printf(m, "GPM/WGBOXPERF Intr Enable: %08x\n", - I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE)); - seq_printf(m, "Crypto Intr Enable:\t %08x\n", - I915_READ(GEN11_CRYPTO_RSVD_INTR_ENABLE)); - seq_printf(m, "GUnit/CSME Intr Enable:\t %08x\n", - I915_READ(GEN11_GUNIT_CSME_INTR_ENABLE)); - - seq_printf(m, "Display Interrupt Control:\t%08x\n", - I915_READ(GEN11_DISPLAY_INT_CTL)); - - gen8_display_interrupt_info(m); - } else if (INTEL_GEN(dev_priv) >= 8) { - seq_printf(m, "Master Interrupt Control:\t%08x\n", - I915_READ(GEN8_MASTER_IRQ)); - - for (i = 0; i < 4; i++) { - seq_printf(m, "GT Interrupt IMR %d:\t%08x\n", - i, I915_READ(GEN8_GT_IMR(i))); - seq_printf(m, "GT Interrupt IIR %d:\t%08x\n", - i, I915_READ(GEN8_GT_IIR(i))); - seq_printf(m, "GT Interrupt IER %d:\t%08x\n", - i, I915_READ(GEN8_GT_IER(i))); - } - - gen8_display_interrupt_info(m); - } else if (IS_VALLEYVIEW(dev_priv)) { - intel_wakeref_t pref; - - seq_printf(m, "Display IER:\t%08x\n", - I915_READ(VLV_IER)); - seq_printf(m, "Display IIR:\t%08x\n", - I915_READ(VLV_IIR)); - seq_printf(m, "Display IIR_RW:\t%08x\n", - I915_READ(VLV_IIR_RW)); - seq_printf(m, "Display IMR:\t%08x\n", - I915_READ(VLV_IMR)); - for_each_pipe(dev_priv, pipe) { - enum intel_display_power_domain power_domain; - - power_domain = POWER_DOMAIN_PIPE(pipe); - pref = intel_display_power_get_if_enabled(dev_priv, - power_domain); - if (!pref) { - seq_printf(m, "Pipe %c power disabled\n", - pipe_name(pipe)); - continue; - } - - seq_printf(m, "Pipe %c stat:\t%08x\n", - pipe_name(pipe), - I915_READ(PIPESTAT(pipe))); - intel_display_power_put(dev_priv, power_domain, pref); - } - - seq_printf(m, "Master IER:\t%08x\n", - I915_READ(VLV_MASTER_IER)); - - seq_printf(m, "Render IER:\t%08x\n", - I915_READ(GTIER)); - seq_printf(m, "Render IIR:\t%08x\n", - I915_READ(GTIIR)); - seq_printf(m, "Render IMR:\t%08x\n", - I915_READ(GTIMR)); - - seq_printf(m, "PM IER:\t\t%08x\n", - I915_READ(GEN6_PMIER)); - seq_printf(m, "PM IIR:\t\t%08x\n", - I915_READ(GEN6_PMIIR)); - seq_printf(m, "PM IMR:\t\t%08x\n", - I915_READ(GEN6_PMIMR)); - - pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); - seq_printf(m, "Port hotplug:\t%08x\n", - I915_READ(PORT_HOTPLUG_EN)); - seq_printf(m, "DPFLIPSTAT:\t%08x\n", - I915_READ(VLV_DPFLIPSTAT)); - seq_printf(m, "DPINVGTT:\t%08x\n", - I915_READ(DPINVGTT)); - intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref); - - } else if (!HAS_PCH_SPLIT(dev_priv)) { - seq_printf(m, "Interrupt enable: %08x\n", - I915_READ(GEN2_IER)); - seq_printf(m, "Interrupt identity: %08x\n", - I915_READ(GEN2_IIR)); - seq_printf(m, "Interrupt mask: %08x\n", - I915_READ(GEN2_IMR)); - for_each_pipe(dev_priv, pipe) - seq_printf(m, "Pipe %c stat: %08x\n", - pipe_name(pipe), - I915_READ(PIPESTAT(pipe))); - } else { - seq_printf(m, "North Display Interrupt enable: %08x\n", - I915_READ(DEIER)); - seq_printf(m, "North Display Interrupt identity: %08x\n", - I915_READ(DEIIR)); - seq_printf(m, "North Display Interrupt mask: %08x\n", - I915_READ(DEIMR)); - seq_printf(m, "South Display Interrupt enable: %08x\n", - I915_READ(SDEIER)); - seq_printf(m, "South Display Interrupt identity: %08x\n", - I915_READ(SDEIIR)); - seq_printf(m, "South Display Interrupt mask: %08x\n", - I915_READ(SDEIMR)); - seq_printf(m, "Graphics Interrupt enable: %08x\n", - I915_READ(GTIER)); - seq_printf(m, "Graphics Interrupt identity: %08x\n", - I915_READ(GTIIR)); - seq_printf(m, "Graphics Interrupt mask: %08x\n", - I915_READ(GTIMR)); - } - - if (INTEL_GEN(dev_priv) >= 11) { - seq_printf(m, "RCS Intr Mask:\t %08x\n", - I915_READ(GEN11_RCS0_RSVD_INTR_MASK)); - seq_printf(m, "BCS Intr Mask:\t %08x\n", - I915_READ(GEN11_BCS_RSVD_INTR_MASK)); - seq_printf(m, "VCS0/VCS1 Intr Mask:\t %08x\n", - I915_READ(GEN11_VCS0_VCS1_INTR_MASK)); - seq_printf(m, "VCS2/VCS3 Intr Mask:\t %08x\n", - I915_READ(GEN11_VCS2_VCS3_INTR_MASK)); - seq_printf(m, "VECS0/VECS1 Intr Mask:\t %08x\n", - I915_READ(GEN11_VECS0_VECS1_INTR_MASK)); - seq_printf(m, "GUC/SG Intr Mask:\t %08x\n", - I915_READ(GEN11_GUC_SG_INTR_MASK)); - seq_printf(m, "GPM/WGBOXPERF Intr Mask: %08x\n", - I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK)); - seq_printf(m, "Crypto Intr Mask:\t %08x\n", - I915_READ(GEN11_CRYPTO_RSVD_INTR_MASK)); - seq_printf(m, "Gunit/CSME Intr Mask:\t %08x\n", - I915_READ(GEN11_GUNIT_CSME_INTR_MASK)); - - } else if (INTEL_GEN(dev_priv) >= 6) { - for_each_uabi_engine(engine, dev_priv) { - seq_printf(m, - "Graphics Interrupt mask (%s): %08x\n", - engine->name, ENGINE_READ(engine, RING_IMR)); - } - } - - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - - return 0; -} - -static int i915_gem_fence_regs_info(struct seq_file *m, void *data) -{ - struct drm_i915_private *i915 = node_to_i915(m->private); - unsigned int i; - - seq_printf(m, "Total fences = %d\n", i915->ggtt.num_fences); - - rcu_read_lock(); - for (i = 0; i < i915->ggtt.num_fences; i++) { - struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i]; - struct i915_vma *vma = reg->vma; - - seq_printf(m, "Fence %d, pin count = %d, object = ", - i, atomic_read(®->pin_count)); - if (!vma) - seq_puts(m, "unused"); - else - i915_debugfs_describe_obj(m, vma->obj); - seq_putc(m, '\n'); - } - rcu_read_unlock(); - - return 0; -} - #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) static ssize_t gpu_state_read(struct file *file, char __user *ubuf, size_t count, loff_t *pos) @@ -802,7 +501,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { u32 rpmodectl, freq_sts; - rpmodectl = I915_READ(GEN6_RP_CONTROL); + rpmodectl = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CONTROL); seq_printf(m, "Video Turbo Mode: %s\n", yesno(rpmodectl & GEN6_RP_MEDIA_TURBO)); seq_printf(m, "HW control enabled: %s\n", @@ -847,19 +546,19 @@ static int i915_frequency_info(struct seq_file *m, void *unused) u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask; int max_freq; - rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); + rp_state_limits = intel_uncore_read(&dev_priv->uncore, GEN6_RP_STATE_LIMITS); if (IS_GEN9_LP(dev_priv)) { - rp_state_cap = I915_READ(BXT_RP_STATE_CAP); - gt_perf_status = I915_READ(BXT_GT_PERF_STATUS); + rp_state_cap = intel_uncore_read(&dev_priv->uncore, BXT_RP_STATE_CAP); + gt_perf_status = intel_uncore_read(&dev_priv->uncore, BXT_GT_PERF_STATUS); } else { - rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); - gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); + rp_state_cap = intel_uncore_read(&dev_priv->uncore, GEN6_RP_STATE_CAP); + gt_perf_status = intel_uncore_read(&dev_priv->uncore, GEN6_GT_PERF_STATUS); } /* RPSTAT1 is in the GT power well */ intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - reqf = I915_READ(GEN6_RPNSWREQ); + reqf = intel_uncore_read(&dev_priv->uncore, GEN6_RPNSWREQ); if (INTEL_GEN(dev_priv) >= 9) reqf >>= 23; else { @@ -871,24 +570,24 @@ static int i915_frequency_info(struct seq_file *m, void *unused) } reqf = intel_gpu_freq(rps, reqf); - rpmodectl = I915_READ(GEN6_RP_CONTROL); - rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD); - rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD); + rpmodectl = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CONTROL); + rpinclimit = intel_uncore_read(&dev_priv->uncore, GEN6_RP_UP_THRESHOLD); + rpdeclimit = intel_uncore_read(&dev_priv->uncore, GEN6_RP_DOWN_THRESHOLD); - rpstat = I915_READ(GEN6_RPSTAT1); - rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK; - rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK; - rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK; - rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK; - rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK; - rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK; + rpstat = intel_uncore_read(&dev_priv->uncore, GEN6_RPSTAT1); + rpupei = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK; + rpcurup = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK; + rpprevup = intel_uncore_read(&dev_priv->uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK; + rpdownei = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK; + rpcurdown = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK; + rpprevdown = intel_uncore_read(&dev_priv->uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK; cagf = intel_rps_read_actual_frequency(rps); intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); if (INTEL_GEN(dev_priv) >= 11) { - pm_ier = I915_READ(GEN11_GPM_WGBOXPERF_INTR_ENABLE); - pm_imr = I915_READ(GEN11_GPM_WGBOXPERF_INTR_MASK); + pm_ier = intel_uncore_read(&dev_priv->uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE); + pm_imr = intel_uncore_read(&dev_priv->uncore, GEN11_GPM_WGBOXPERF_INTR_MASK); /* * The equivalent to the PM ISR & IIR cannot be read * without affecting the current state of the system @@ -896,17 +595,17 @@ static int i915_frequency_info(struct seq_file *m, void *unused) pm_isr = 0; pm_iir = 0; } else if (INTEL_GEN(dev_priv) >= 8) { - pm_ier = I915_READ(GEN8_GT_IER(2)); - pm_imr = I915_READ(GEN8_GT_IMR(2)); - pm_isr = I915_READ(GEN8_GT_ISR(2)); - pm_iir = I915_READ(GEN8_GT_IIR(2)); + pm_ier = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IER(2)); + pm_imr = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IMR(2)); + pm_isr = intel_uncore_read(&dev_priv->uncore, GEN8_GT_ISR(2)); + pm_iir = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IIR(2)); } else { - pm_ier = I915_READ(GEN6_PMIER); - pm_imr = I915_READ(GEN6_PMIMR); - pm_isr = I915_READ(GEN6_PMISR); - pm_iir = I915_READ(GEN6_PMIIR); + pm_ier = intel_uncore_read(&dev_priv->uncore, GEN6_PMIER); + pm_imr = intel_uncore_read(&dev_priv->uncore, GEN6_PMIMR); + pm_isr = intel_uncore_read(&dev_priv->uncore, GEN6_PMISR); + pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR); } - pm_mask = I915_READ(GEN6_PMINTRMSK); + pm_mask = intel_uncore_read(&dev_priv->uncore, GEN6_PMINTRMSK); seq_printf(m, "Video Turbo Mode: %s\n", yesno(rpmodectl & GEN6_RP_MEDIA_TURBO)); @@ -936,27 +635,27 @@ static int i915_frequency_info(struct seq_file *m, void *unused) seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit); seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); seq_printf(m, "CAGF: %dMHz\n", cagf); - seq_printf(m, "RP CUR UP EI: %d (%dns)\n", + seq_printf(m, "RP CUR UP EI: %d (%lldns)\n", rpupei, intel_gt_pm_interval_to_ns(&dev_priv->gt, rpupei)); - seq_printf(m, "RP CUR UP: %d (%dun)\n", + seq_printf(m, "RP CUR UP: %d (%lldun)\n", rpcurup, intel_gt_pm_interval_to_ns(&dev_priv->gt, rpcurup)); - seq_printf(m, "RP PREV UP: %d (%dns)\n", + seq_printf(m, "RP PREV UP: %d (%lldns)\n", rpprevup, intel_gt_pm_interval_to_ns(&dev_priv->gt, rpprevup)); seq_printf(m, "Up threshold: %d%%\n", rps->power.up_threshold); - seq_printf(m, "RP CUR DOWN EI: %d (%dns)\n", + seq_printf(m, "RP CUR DOWN EI: %d (%lldns)\n", rpdownei, intel_gt_pm_interval_to_ns(&dev_priv->gt, rpdownei)); - seq_printf(m, "RP CUR DOWN: %d (%dns)\n", + seq_printf(m, "RP CUR DOWN: %d (%lldns)\n", rpcurdown, intel_gt_pm_interval_to_ns(&dev_priv->gt, rpcurdown)); - seq_printf(m, "RP PREV DOWN: %d (%dns)\n", + seq_printf(m, "RP PREV DOWN: %d (%lldns)\n", rpprevdown, intel_gt_pm_interval_to_ns(&dev_priv->gt, rpprevdown)); @@ -1011,111 +710,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused) return 0; } -static int i915_ring_freq_table(struct seq_file *m, void *unused) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct intel_rps *rps = &dev_priv->gt.rps; - unsigned int max_gpu_freq, min_gpu_freq; - intel_wakeref_t wakeref; - int gpu_freq, ia_freq; - - if (!HAS_LLC(dev_priv)) - return -ENODEV; - - min_gpu_freq = rps->min_freq; - max_gpu_freq = rps->max_freq; - if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) { - /* Convert GT frequency to 50 HZ units */ - min_gpu_freq /= GEN9_FREQ_SCALER; - max_gpu_freq /= GEN9_FREQ_SCALER; - } - - seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); - - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { - ia_freq = gpu_freq; - sandybridge_pcode_read(dev_priv, - GEN6_PCODE_READ_MIN_FREQ_TABLE, - &ia_freq, NULL); - seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", - intel_gpu_freq(rps, - (gpu_freq * - (IS_GEN9_BC(dev_priv) || - INTEL_GEN(dev_priv) >= 10 ? - GEN9_FREQ_SCALER : 1))), - ((ia_freq >> 0) & 0xff) * 100, - ((ia_freq >> 8) & 0xff) * 100); - } - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - - return 0; -} - -static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring) -{ - seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)", - ring->space, ring->head, ring->tail, ring->emit); -} - -static int i915_context_status(struct seq_file *m, void *unused) -{ - struct drm_i915_private *i915 = node_to_i915(m->private); - struct i915_gem_context *ctx, *cn; - - spin_lock(&i915->gem.contexts.lock); - list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) { - struct i915_gem_engines_iter it; - struct intel_context *ce; - - if (!kref_get_unless_zero(&ctx->ref)) - continue; - - spin_unlock(&i915->gem.contexts.lock); - - seq_puts(m, "HW context "); - if (ctx->pid) { - struct task_struct *task; - - task = get_pid_task(ctx->pid, PIDTYPE_PID); - if (task) { - seq_printf(m, "(%s [%d]) ", - task->comm, task->pid); - put_task_struct(task); - } - } else if (IS_ERR(ctx->file_priv)) { - seq_puts(m, "(deleted) "); - } else { - seq_puts(m, "(kernel) "); - } - - seq_putc(m, ctx->remap_slice ? 'R' : 'r'); - seq_putc(m, '\n'); - - for_each_gem_engine(ce, - i915_gem_context_lock_engines(ctx), it) { - if (intel_context_pin_if_active(ce)) { - seq_printf(m, "%s: ", ce->engine->name); - if (ce->state) - i915_debugfs_describe_obj(m, ce->state->obj); - describe_ctx_ring(m, ce->ring); - seq_putc(m, '\n'); - intel_context_unpin(ce); - } - } - i915_gem_context_unlock_engines(ctx); - - seq_putc(m, '\n'); - - spin_lock(&i915->gem.contexts.lock); - list_safe_reset_next(ctx, cn, link); - i915_gem_context_put(ctx); - } - spin_unlock(&i915->gem.contexts.lock); - - return 0; -} - static const char *swizzle_string(unsigned swizzle) { switch (swizzle) { @@ -1193,20 +787,6 @@ static int i915_swizzle_info(struct seq_file *m, void *data) return 0; } -static const char *rps_power_to_str(unsigned int power) -{ - static const char * const strings[] = { - [LOW_POWER] = "low power", - [BETWEEN] = "mixed", - [HIGH_POWER] = "high power", - }; - - if (power >= ARRAY_SIZE(strings) || !strings[power]) - return "unknown"; - - return strings[power]; -} - static int i915_rps_boost_info(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); @@ -1231,42 +811,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) intel_gpu_freq(rps, rps->efficient_freq), intel_gpu_freq(rps, rps->boost_freq)); - seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts)); - - if (INTEL_GEN(dev_priv) >= 6 && intel_rps_is_active(rps)) { - u32 rpup, rpupei; - u32 rpdown, rpdownei; - - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - rpup = I915_READ_FW(GEN6_RP_CUR_UP) & GEN6_RP_EI_MASK; - rpupei = I915_READ_FW(GEN6_RP_CUR_UP_EI) & GEN6_RP_EI_MASK; - rpdown = I915_READ_FW(GEN6_RP_CUR_DOWN) & GEN6_RP_EI_MASK; - rpdownei = I915_READ_FW(GEN6_RP_CUR_DOWN_EI) & GEN6_RP_EI_MASK; - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); - - seq_printf(m, "\nRPS Autotuning (current \"%s\" window):\n", - rps_power_to_str(rps->power.mode)); - seq_printf(m, " Avg. up: %d%% [above threshold? %d%%]\n", - rpup && rpupei ? 100 * rpup / rpupei : 0, - rps->power.up_threshold); - seq_printf(m, " Avg. down: %d%% [below threshold? %d%%]\n", - rpdown && rpdownei ? 100 * rpdown / rpdownei : 0, - rps->power.down_threshold); - } else { - seq_puts(m, "\nRPS Autotuning inactive\n"); - } - - return 0; -} - -static int i915_llc(struct seq_file *m, void *data) -{ - struct drm_i915_private *dev_priv = node_to_i915(m->private); - const bool edram = INTEL_GEN(dev_priv) > 8; - - seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev_priv))); - seq_printf(m, "%s: %uMB\n", edram ? "eDRAM" : "eLLC", - dev_priv->edram_size_mb); + seq_printf(m, "Wait boosts: %d\n", READ_ONCE(rps->boosts)); return 0; } @@ -1280,7 +825,7 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused) seq_puts(m, "Runtime power management not supported\n"); seq_printf(m, "Runtime power status: %s\n", - enableddisabled(!dev_priv->power_domains.wakeref)); + enableddisabled(!dev_priv->power_domains.init_wakeref)); seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake)); seq_printf(m, "IRQs disabled: %s\n", @@ -1306,34 +851,28 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused) static int i915_engine_info(struct seq_file *m, void *unused) { - struct drm_i915_private *dev_priv = node_to_i915(m->private); + struct drm_i915_private *i915 = node_to_i915(m->private); struct intel_engine_cs *engine; intel_wakeref_t wakeref; struct drm_printer p; - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); - seq_printf(m, "GT awake? %s [%d]\n", - yesno(dev_priv->gt.awake), - atomic_read(&dev_priv->gt.wakeref.count)); - seq_printf(m, "CS timestamp frequency: %u Hz\n", - RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_hz); + seq_printf(m, "GT awake? %s [%d], %llums\n", + yesno(i915->gt.awake), + atomic_read(&i915->gt.wakeref.count), + ktime_to_ms(intel_gt_get_awake_time(&i915->gt))); + seq_printf(m, "CS timestamp frequency: %u Hz, %d ns\n", + i915->gt.clock_frequency, + i915->gt.clock_period_ns); p = drm_seq_file_printer(m); - for_each_uabi_engine(engine, dev_priv) + for_each_uabi_engine(engine, i915) intel_engine_dump(engine, &p, "%s\n", engine->name); - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - - return 0; -} - -static int i915_shrinker_info(struct seq_file *m, void *unused) -{ - struct drm_i915_private *i915 = node_to_i915(m->private); + intel_gt_show_timelines(&i915->gt, &p, i915_request_show_with_schedule); - seq_printf(m, "seeks = %d\n", i915->mm.shrinker.seeks); - seq_printf(m, "batch = %lu\n", i915->mm.shrinker.batch); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); return 0; } @@ -1411,7 +950,7 @@ i915_perf_noa_delay_set(void *data, u64 val) * This would lead to infinite waits as we're doing timestamp * difference on the CS with only 32bits. */ - if (i915_cs_timestamp_ns_to_ticks(i915, val) > U32_MAX) + if (intel_gt_ns_to_clock_interval(&i915->gt, val) > U32_MAX) return -EINVAL; atomic64_set(&i915->perf.noa_programming_delay, val); @@ -1529,55 +1068,6 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_drop_caches_fops, i915_drop_caches_get, i915_drop_caches_set, "0x%08llx\n"); -static int -i915_cache_sharing_get(void *data, u64 *val) -{ - struct drm_i915_private *dev_priv = data; - intel_wakeref_t wakeref; - u32 snpcr = 0; - - if (!(IS_GEN_RANGE(dev_priv, 6, 7))) - return -ENODEV; - - with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) - snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); - - *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; - - return 0; -} - -static int -i915_cache_sharing_set(void *data, u64 val) -{ - struct drm_i915_private *dev_priv = data; - intel_wakeref_t wakeref; - - if (!(IS_GEN_RANGE(dev_priv, 6, 7))) - return -ENODEV; - - if (val > 3) - return -EINVAL; - - drm_dbg(&dev_priv->drm, - "Manually setting uncore sharing to %llu\n", val); - with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { - u32 snpcr; - - /* Update the cache sharing policy here as well */ - snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); - snpcr &= ~GEN6_MBC_SNPCR_MASK; - snpcr |= val << GEN6_MBC_SNPCR_SHIFT; - I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); - } - - return 0; -} - -DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, - i915_cache_sharing_get, i915_cache_sharing_set, - "%llu\n"); - static int i915_sseu_status(struct seq_file *m, void *unused) { struct drm_i915_private *i915 = node_to_i915(m->private); @@ -1621,16 +1111,10 @@ static const struct file_operations i915_forcewake_fops = { static const struct drm_info_list i915_debugfs_list[] = { {"i915_capabilities", i915_capabilities, 0}, {"i915_gem_objects", i915_gem_object_info, 0}, - {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, - {"i915_gem_interrupt", i915_interrupt_info, 0}, {"i915_frequency_info", i915_frequency_info, 0}, - {"i915_ring_freq_table", i915_ring_freq_table, 0}, - {"i915_context_status", i915_context_status, 0}, {"i915_swizzle_info", i915_swizzle_info, 0}, - {"i915_llc", i915_llc, 0}, {"i915_runtime_pm_status", i915_runtime_pm_status, 0}, {"i915_engine_info", i915_engine_info, 0}, - {"i915_shrinker_info", i915_shrinker_info, 0}, {"i915_wa_registers", i915_wa_registers, 0}, {"i915_sseu_status", i915_sseu_status, 0}, {"i915_rps_boost_info", i915_rps_boost_info, 0}, @@ -1643,7 +1127,6 @@ static const struct i915_debugfs_files { } i915_debugfs_files[] = { {"i915_perf_noa_delay", &i915_perf_noa_delay_fops}, {"i915_wedged", &i915_wedged_fops}, - {"i915_cache_sharing", &i915_cache_sharing_fops}, {"i915_gem_drop_caches", &i915_drop_caches_fops}, #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) {"i915_error_state", &i915_error_state_fops}, diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 320856b665a1..f5666b44ea9d 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -410,6 +410,7 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) /* Try to make sure MCHBAR is enabled before poking at it */ intel_setup_mchbar(dev_priv); + intel_device_info_runtime_init(dev_priv); ret = intel_gt_init_mmio(&dev_priv->gt); if (ret) @@ -516,8 +517,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) if (i915_inject_probe_failure(dev_priv)) return -ENODEV; - intel_device_info_runtime_init(dev_priv); - if (HAS_PPGTT(dev_priv)) { if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_full_ppgtt(dev_priv)) { @@ -578,8 +577,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) pci_set_master(pdev); - cpu_latency_qos_add_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE); - intel_gt_init_workarounds(dev_priv); /* On the 945G/GM, the chipset reports the MSI capability on the @@ -626,7 +623,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) err_msi: if (pdev->msi_enabled) pci_disable_msi(pdev); - cpu_latency_qos_remove_request(&dev_priv->pm_qos); err_mem_regions: intel_memory_regions_driver_release(dev_priv); err_ggtt: @@ -648,8 +644,6 @@ static void i915_driver_hw_remove(struct drm_i915_private *dev_priv) if (pdev->msi_enabled) pci_disable_msi(pdev); - - cpu_latency_qos_remove_request(&dev_priv->pm_qos); } /** @@ -738,6 +732,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv) * events. */ drm_kms_helper_poll_fini(&dev_priv->drm); + drm_atomic_helper_shutdown(&dev_priv->drm); intel_gt_driver_unregister(&dev_priv->gt); acpi_video_unregister(); @@ -940,8 +935,6 @@ void i915_driver_remove(struct drm_i915_private *i915) i915_gem_suspend(i915); - drm_atomic_helper_shutdown(&i915->drm); - intel_gvt_driver_remove(i915); intel_modeset_driver_remove(i915); @@ -1052,6 +1045,8 @@ static void intel_shutdown_encoders(struct drm_i915_private *dev_priv) void i915_driver_shutdown(struct drm_i915_private *i915) { + disable_rpm_wakeref_asserts(&i915->runtime_pm); + i915_gem_suspend(i915); drm_kms_helper_poll_disable(&i915->drm); @@ -1065,6 +1060,8 @@ void i915_driver_shutdown(struct drm_i915_private *i915) intel_suspend_encoders(i915); intel_shutdown_encoders(i915); + + enable_rpm_wakeref_asserts(&i915->runtime_pm); } static bool suspend_to_idle(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 15be8debae54..e3d58299b323 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -79,9 +79,9 @@ #include "gem/i915_gem_shrinker.h" #include "gem/i915_gem_stolen.h" -#include "gt/intel_lrc.h" #include "gt/intel_engine.h" #include "gt/intel_gt_types.h" +#include "gt/intel_region_lmem.h" #include "gt/intel_workarounds.h" #include "gt/uc/intel_uc.h" @@ -103,7 +103,6 @@ #include "i915_vma.h" #include "i915_irq.h" -#include "intel_region_lmem.h" /* General customization: */ @@ -416,6 +415,7 @@ struct intel_fbc { u16 gen9_wa_cfb_stride; u16 interval; s8 fence_id; + bool psr2_active; } state_cache; /* @@ -891,9 +891,6 @@ struct drm_i915_private { bool display_irqs_enabled; - /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */ - struct pm_qos_request pm_qos; - /* Sideband mailbox protection */ struct mutex sb_lock; struct pm_qos_request sb_qos; @@ -1172,9 +1169,6 @@ struct drm_i915_private { struct i915_gem_contexts { spinlock_t lock; /* locks list */ struct list_head list; - - struct llist_head free_list; - struct work_struct free_work; } contexts; /* @@ -1572,16 +1566,30 @@ enum { TGL_REVID_D0, }; -extern const struct i915_rev_steppings tgl_uy_revids[]; -extern const struct i915_rev_steppings tgl_revids[]; +#define TGL_UY_REVIDS_SIZE 4 +#define TGL_REVIDS_SIZE 2 + +extern const struct i915_rev_steppings tgl_uy_revids[TGL_UY_REVIDS_SIZE]; +extern const struct i915_rev_steppings tgl_revids[TGL_REVIDS_SIZE]; static inline const struct i915_rev_steppings * tgl_revids_get(struct drm_i915_private *dev_priv) { - if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) - return tgl_uy_revids; - else - return tgl_revids; + u8 revid = INTEL_REVID(dev_priv); + u8 size; + const struct i915_rev_steppings *tgl_revid_tbl; + + if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) { + tgl_revid_tbl = tgl_uy_revids; + size = ARRAY_SIZE(tgl_uy_revids); + } else { + tgl_revid_tbl = tgl_revids; + size = ARRAY_SIZE(tgl_revids); + } + + revid = min_t(u8, revid, size - 1); + + return &tgl_revid_tbl[revid]; } #define IS_TGL_DISP_REVID(p, since, until) \ @@ -1591,14 +1599,14 @@ tgl_revids_get(struct drm_i915_private *dev_priv) #define IS_TGL_UY_GT_REVID(p, since, until) \ ((IS_TGL_U(p) || IS_TGL_Y(p)) && \ - tgl_uy_revids->gt_stepping >= (since) && \ - tgl_uy_revids->gt_stepping <= (until)) + tgl_revids_get(p)->gt_stepping >= (since) && \ + tgl_revids_get(p)->gt_stepping <= (until)) #define IS_TGL_GT_REVID(p, since, until) \ (IS_TIGERLAKE(p) && \ !(IS_TGL_U(p) || IS_TGL_Y(p)) && \ - tgl_revids->gt_stepping >= (since) && \ - tgl_revids->gt_stepping <= (until)) + tgl_revids_get(p)->gt_stepping >= (since) && \ + tgl_revids_get(p)->gt_stepping <= (until)) #define RKL_REVID_A0 0x0 #define RKL_REVID_B0 0x1 @@ -1649,8 +1657,6 @@ tgl_revids_get(struct drm_i915_private *dev_priv) (INTEL_INFO(dev_priv)->has_logical_ring_contexts) #define HAS_LOGICAL_RING_ELSQ(dev_priv) \ (INTEL_INFO(dev_priv)->has_logical_ring_elsq) -#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \ - (INTEL_INFO(dev_priv)->has_logical_ring_preemption) #define HAS_MASTER_UNIT_IRQ(dev_priv) (INTEL_INFO(dev_priv)->has_master_unit_irq) @@ -1970,43 +1976,6 @@ mkwrite_device_info(struct drm_i915_private *dev_priv) int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file); -#define __I915_REG_OP(op__, dev_priv__, ...) \ - intel_uncore_##op__(&(dev_priv__)->uncore, __VA_ARGS__) - -#define I915_READ(reg__) __I915_REG_OP(read, dev_priv, (reg__)) -#define I915_WRITE(reg__, val__) __I915_REG_OP(write, dev_priv, (reg__), (val__)) - -#define POSTING_READ(reg__) __I915_REG_OP(posting_read, dev_priv, (reg__)) - -/* These are untraced mmio-accessors that are only valid to be used inside - * critical sections, such as inside IRQ handlers, where forcewake is explicitly - * controlled. - * - * Think twice, and think again, before using these. - * - * As an example, these accessors can possibly be used between: - * - * spin_lock_irq(&dev_priv->uncore.lock); - * intel_uncore_forcewake_get__locked(); - * - * and - * - * intel_uncore_forcewake_put__locked(); - * spin_unlock_irq(&dev_priv->uncore.lock); - * - * - * Note: some registers may not need forcewake held, so - * intel_uncore_forcewake_{get,put} can be omitted, see - * intel_uncore_forcewake_for_reg(). - * - * Certain architectures will die if the same cacheline is concurrently accessed - * by different clients (e.g. on Ivybridge). Access to registers should - * therefore generally be serialised, by either the dev_priv->uncore.lock or - * a more localised lock guarding all access to that bank of registers. - */ -#define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__)) -#define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__)) - /* i915_mm.c */ int remap_io_mapping(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, unsigned long size, @@ -2029,16 +1998,4 @@ i915_coherent_map_type(struct drm_i915_private *i915) return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC; } -static inline u64 i915_cs_timestamp_ns_to_ticks(struct drm_i915_private *i915, u64 val) -{ - return DIV_ROUND_UP_ULL(val * RUNTIME_INFO(i915)->cs_timestamp_frequency_hz, - 1000000000); -} - -static inline u64 i915_cs_timestamp_ticks_to_ns(struct drm_i915_private *i915, u64 val) -{ - return div_u64(val * 1000000000, - RUNTIME_INFO(i915)->cs_timestamp_frequency_hz); -} - #endif diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 58276694c848..17a4636ee542 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1207,8 +1207,6 @@ void i915_gem_driver_remove(struct drm_i915_private *dev_priv) void i915_gem_driver_release(struct drm_i915_private *dev_priv) { - i915_gem_driver_release__contexts(dev_priv); - intel_gt_driver_release(&dev_priv->gt); intel_wa_list_free(&dev_priv->gt_wa_list); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index c5ee1567f3d1..3ee2f682eff6 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -55,22 +55,17 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) { - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); - struct device *kdev = &dev_priv->drm.pdev->dev; - struct i915_ggtt *ggtt = &dev_priv->ggtt; + struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct i915_ggtt *ggtt = &i915->ggtt; - if (unlikely(ggtt->do_idle_maps)) { - /* XXX This does not prevent more requests being submitted! */ - if (intel_gt_retire_requests_timeout(ggtt->vm.gt, - -MAX_SCHEDULE_TIMEOUT)) { - drm_err(&dev_priv->drm, - "Failed to wait for idle; VT'd may hang.\n"); - /* Wait a bit, in hopes it avoids the hang */ - udelay(10); - } - } + /* XXX This does not prevent more requests being submitted! */ + if (unlikely(ggtt->do_idle_maps)) + /* Wait a bit, in the hope it avoids the hang */ + usleep_range(100, 250); - dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL); + dma_unmap_sg(&i915->drm.pdev->dev, + pages->sgl, pages->nents, + PCI_DMA_BIDIRECTIONAL); } /** diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c index f96032c60a12..75c3bfc2486e 100644 --- a/drivers/gpu/drm/i915/i915_getparam.c +++ b/drivers/gpu/drm/i915/i915_getparam.c @@ -154,7 +154,7 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data, return -ENODEV; break; case I915_PARAM_CS_TIMESTAMP_FREQUENCY: - value = RUNTIME_INFO(i915)->cs_timestamp_frequency_hz; + value = i915->gt.clock_frequency; break; case I915_PARAM_MMAP_GTT_COHERENT: value = INTEL_INFO(i915)->has_coherent_ggtt; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index d8cac4c5881f..8b163ee1b86d 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -485,7 +485,7 @@ static void error_print_context(struct drm_i915_error_state_buf *m, const char *header, const struct i915_gem_context_coredump *ctx) { - const u32 period = RUNTIME_INFO(m->i915)->cs_timestamp_period_ns; + const u32 period = m->i915->gt.clock_period_ns; err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n", header, ctx->comm, ctx->pid, ctx->sched_attr.priority, diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index dc6febc63f1c..dd1971040bbc 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -60,6 +60,24 @@ * and related files, but that will be described in separate chapters. */ +/* + * Interrupt statistic for PMU. Increments the counter only if the + * interrupt originated from the the GPU so interrupts from a device which + * shares the interrupt line are not accounted. + */ +static inline void pmu_irq_stats(struct drm_i915_private *i915, + irqreturn_t res) +{ + if (unlikely(res != IRQ_HANDLED)) + return; + + /* + * A clever compiler translates that into INC. A not so clever one + * should at least prevent store tearing. + */ + WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1); +} + typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val); typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915, enum hpd_pin pin); @@ -309,10 +327,10 @@ i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv, lockdep_assert_held(&dev_priv->irq_lock); drm_WARN_ON(&dev_priv->drm, bits & ~mask); - val = I915_READ(PORT_HOTPLUG_EN); + val = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_EN); val &= ~mask; val |= bits; - I915_WRITE(PORT_HOTPLUG_EN, val); + intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_EN, val); } /** @@ -358,8 +376,8 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv, if (new_val != dev_priv->irq_mask && !drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) { dev_priv->irq_mask = new_val; - I915_WRITE(DEIMR, dev_priv->irq_mask); - POSTING_READ(DEIMR); + intel_uncore_write(&dev_priv->uncore, DEIMR, dev_priv->irq_mask); + intel_uncore_posting_read(&dev_priv->uncore, DEIMR); } } @@ -383,15 +401,15 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv, if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) return; - old_val = I915_READ(GEN8_DE_PORT_IMR); + old_val = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IMR); new_val = old_val; new_val &= ~interrupt_mask; new_val |= (~enabled_irq_mask & interrupt_mask); if (new_val != old_val) { - I915_WRITE(GEN8_DE_PORT_IMR, new_val); - POSTING_READ(GEN8_DE_PORT_IMR); + intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IMR, new_val); + intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PORT_IMR); } } @@ -422,8 +440,8 @@ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv, if (new_val != dev_priv->de_irq_mask[pipe]) { dev_priv->de_irq_mask[pipe] = new_val; - I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); - POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); + intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); + intel_uncore_posting_read(&dev_priv->uncore, GEN8_DE_PIPE_IMR(pipe)); } } @@ -437,7 +455,7 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, u32 interrupt_mask, u32 enabled_irq_mask) { - u32 sdeimr = I915_READ(SDEIMR); + u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR); sdeimr &= ~interrupt_mask; sdeimr |= (~enabled_irq_mask & interrupt_mask); @@ -448,8 +466,8 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv))) return; - I915_WRITE(SDEIMR, sdeimr); - POSTING_READ(SDEIMR); + intel_uncore_write(&dev_priv->uncore, SDEIMR, sdeimr); + intel_uncore_posting_read(&dev_priv->uncore, SDEIMR); } u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv, @@ -515,8 +533,8 @@ void i915_enable_pipestat(struct drm_i915_private *dev_priv, dev_priv->pipestat_irq_mask[pipe] |= status_mask; enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); - I915_WRITE(reg, enable_mask | status_mask); - POSTING_READ(reg); + intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask); + intel_uncore_posting_read(&dev_priv->uncore, reg); } void i915_disable_pipestat(struct drm_i915_private *dev_priv, @@ -538,8 +556,8 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv, dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); - I915_WRITE(reg, enable_mask | status_mask); - POSTING_READ(reg); + intel_uncore_write(&dev_priv->uncore, reg, enable_mask | status_mask); + intel_uncore_posting_read(&dev_priv->uncore, reg); } static bool i915_has_asle(struct drm_i915_private *dev_priv) @@ -697,7 +715,7 @@ u32 g4x_get_vblank_counter(struct drm_crtc *crtc) if (!vblank->max_vblank_count) return 0; - return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); + return intel_uncore_read(&dev_priv->uncore, PIPE_FRMCOUNT_G4X(pipe)); } /* @@ -986,9 +1004,9 @@ static void ivb_parity_work(struct work_struct *work) if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice)) goto out; - misccpctl = I915_READ(GEN7_MISCCPCTL); - I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); - POSTING_READ(GEN7_MISCCPCTL); + misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL); + intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); + intel_uncore_posting_read(&dev_priv->uncore, GEN7_MISCCPCTL); while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { i915_reg_t reg; @@ -1002,13 +1020,13 @@ static void ivb_parity_work(struct work_struct *work) reg = GEN7_L3CDERRST1(slice); - error_status = I915_READ(reg); + error_status = intel_uncore_read(&dev_priv->uncore, reg); row = GEN7_PARITY_ERROR_ROW(error_status); bank = GEN7_PARITY_ERROR_BANK(error_status); subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); - I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); - POSTING_READ(reg); + intel_uncore_write(&dev_priv->uncore, reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); + intel_uncore_posting_read(&dev_priv->uncore, reg); parity_event[0] = I915_L3_PARITY_UEVENT "=1"; parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); @@ -1029,7 +1047,7 @@ static void ivb_parity_work(struct work_struct *work) kfree(parity_event[1]); } - I915_WRITE(GEN7_MISCCPCTL, misccpctl); + intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl); out: drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice); @@ -1044,17 +1062,12 @@ static bool gen11_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { switch (pin) { case HPD_PORT_TC1: - return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC1); case HPD_PORT_TC2: - return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC2); case HPD_PORT_TC3: - return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC3); case HPD_PORT_TC4: - return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC4); case HPD_PORT_TC5: - return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC5); case HPD_PORT_TC6: - return val & GEN11_HOTPLUG_CTL_LONG_DETECT(HPD_PORT_TC6); + return val & GEN11_HOTPLUG_CTL_LONG_DETECT(pin); default: return false; } @@ -1078,13 +1091,10 @@ static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { switch (pin) { case HPD_PORT_A: - return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_A); case HPD_PORT_B: - return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_B); case HPD_PORT_C: - return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_C); case HPD_PORT_D: - return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(HPD_PORT_D); + return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(pin); default: return false; } @@ -1094,17 +1104,12 @@ static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { switch (pin) { case HPD_PORT_TC1: - return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC1); case HPD_PORT_TC2: - return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC2); case HPD_PORT_TC3: - return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC3); case HPD_PORT_TC4: - return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC4); case HPD_PORT_TC5: - return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC5); case HPD_PORT_TC6: - return val & ICP_TC_HPD_LONG_DETECT(HPD_PORT_TC6); + return val & ICP_TC_HPD_LONG_DETECT(pin); default: return false; } @@ -1319,7 +1324,7 @@ static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, enum pipe pipe) { display_pipe_crc_irq_handler(dev_priv, pipe, - I915_READ(PIPE_CRC_RES_1_IVB(pipe)), + intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)), 0, 0, 0, 0); } @@ -1327,11 +1332,11 @@ static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, enum pipe pipe) { display_pipe_crc_irq_handler(dev_priv, pipe, - I915_READ(PIPE_CRC_RES_1_IVB(pipe)), - I915_READ(PIPE_CRC_RES_2_IVB(pipe)), - I915_READ(PIPE_CRC_RES_3_IVB(pipe)), - I915_READ(PIPE_CRC_RES_4_IVB(pipe)), - I915_READ(PIPE_CRC_RES_5_IVB(pipe))); + intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_1_IVB(pipe)), + intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_2_IVB(pipe)), + intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_3_IVB(pipe)), + intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_4_IVB(pipe)), + intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_5_IVB(pipe))); } static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, @@ -1340,19 +1345,19 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, u32 res1, res2; if (INTEL_GEN(dev_priv) >= 3) - res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); + res1 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES1_I915(pipe)); else res1 = 0; if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) - res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); + res2 = intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RES2_G4X(pipe)); else res2 = 0; display_pipe_crc_irq_handler(dev_priv, pipe, - I915_READ(PIPE_CRC_RES_RED(pipe)), - I915_READ(PIPE_CRC_RES_GREEN(pipe)), - I915_READ(PIPE_CRC_RES_BLUE(pipe)), + intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_RED(pipe)), + intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_GREEN(pipe)), + intel_uncore_read(&dev_priv->uncore, PIPE_CRC_RES_BLUE(pipe)), res1, res2); } @@ -1361,7 +1366,7 @@ static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) enum pipe pipe; for_each_pipe(dev_priv, pipe) { - I915_WRITE(PIPESTAT(pipe), + intel_uncore_write(&dev_priv->uncore, PIPESTAT(pipe), PIPESTAT_INT_STATUS_MASK | PIPE_FIFO_UNDERRUN_STATUS); @@ -1415,7 +1420,7 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, continue; reg = PIPESTAT(pipe); - pipe_stats[pipe] = I915_READ(reg) & status_mask; + pipe_stats[pipe] = intel_uncore_read(&dev_priv->uncore, reg) & status_mask; enable_mask = i915_pipestat_enable_mask(dev_priv, pipe); /* @@ -1428,8 +1433,8 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, * an interrupt is still pending. */ if (pipe_stats[pipe]) { - I915_WRITE(reg, pipe_stats[pipe]); - I915_WRITE(reg, enable_mask); + intel_uncore_write(&dev_priv->uncore, reg, pipe_stats[pipe]); + intel_uncore_write(&dev_priv->uncore, reg, enable_mask); } } spin_unlock(&dev_priv->irq_lock); @@ -1545,18 +1550,18 @@ static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) * bits can itself generate a new hotplug interrupt :( */ for (i = 0; i < 10; i++) { - u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask; + u32 tmp = intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT) & hotplug_status_mask; if (tmp == 0) return hotplug_status; hotplug_status |= tmp; - I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); + intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, hotplug_status); } drm_WARN_ONCE(&dev_priv->drm, 1, "PORT_HOTPLUG_STAT did not clear (0x%08x)\n", - I915_READ(PORT_HOTPLUG_STAT)); + intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT)); return hotplug_status; } @@ -1605,9 +1610,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) u32 hotplug_status = 0; u32 ier = 0; - gt_iir = I915_READ(GTIIR); - pm_iir = I915_READ(GEN6_PMIIR); - iir = I915_READ(VLV_IIR); + gt_iir = intel_uncore_read(&dev_priv->uncore, GTIIR); + pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR); + iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR); if (gt_iir == 0 && pm_iir == 0 && iir == 0) break; @@ -1627,14 +1632,14 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR * bits this time around. */ - I915_WRITE(VLV_MASTER_IER, 0); - ier = I915_READ(VLV_IER); - I915_WRITE(VLV_IER, 0); + intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0); + ier = intel_uncore_read(&dev_priv->uncore, VLV_IER); + intel_uncore_write(&dev_priv->uncore, VLV_IER, 0); if (gt_iir) - I915_WRITE(GTIIR, gt_iir); + intel_uncore_write(&dev_priv->uncore, GTIIR, gt_iir); if (pm_iir) - I915_WRITE(GEN6_PMIIR, pm_iir); + intel_uncore_write(&dev_priv->uncore, GEN6_PMIIR, pm_iir); if (iir & I915_DISPLAY_PORT_INTERRUPT) hotplug_status = i9xx_hpd_irq_ack(dev_priv); @@ -1652,10 +1657,10 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. */ if (iir) - I915_WRITE(VLV_IIR, iir); + intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir); - I915_WRITE(VLV_IER, ier); - I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); + intel_uncore_write(&dev_priv->uncore, VLV_IER, ier); + intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); if (gt_iir) gen6_gt_irq_handler(&dev_priv->gt, gt_iir); @@ -1668,6 +1673,8 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) valleyview_pipestat_irq_handler(dev_priv, pipe_stats); } while (0); + pmu_irq_stats(dev_priv, ret); + enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); return ret; @@ -1690,8 +1697,8 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) u32 hotplug_status = 0; u32 ier = 0; - master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; - iir = I915_READ(VLV_IIR); + master_ctl = intel_uncore_read(&dev_priv->uncore, GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; + iir = intel_uncore_read(&dev_priv->uncore, VLV_IIR); if (master_ctl == 0 && iir == 0) break; @@ -1711,9 +1718,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL * bits this time around. */ - I915_WRITE(GEN8_MASTER_IRQ, 0); - ier = I915_READ(VLV_IER); - I915_WRITE(VLV_IER, 0); + intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0); + ier = intel_uncore_read(&dev_priv->uncore, VLV_IER); + intel_uncore_write(&dev_priv->uncore, VLV_IER, 0); gen8_gt_irq_handler(&dev_priv->gt, master_ctl); @@ -1734,10 +1741,10 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last. */ if (iir) - I915_WRITE(VLV_IIR, iir); + intel_uncore_write(&dev_priv->uncore, VLV_IIR, iir); - I915_WRITE(VLV_IER, ier); - I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); + intel_uncore_write(&dev_priv->uncore, VLV_IER, ier); + intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); if (hotplug_status) i9xx_hpd_irq_handler(dev_priv, hotplug_status); @@ -1745,6 +1752,8 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) valleyview_pipestat_irq_handler(dev_priv, pipe_stats); } while (0); + pmu_irq_stats(dev_priv, ret); + enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); return ret; @@ -1761,7 +1770,7 @@ static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, * zero. Not acking leads to "The master control interrupt lied (SDE)!" * errors. */ - dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); + dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); if (!hotplug_trigger) { u32 mask = PORTA_HOTPLUG_STATUS_MASK | PORTD_HOTPLUG_STATUS_MASK | @@ -1770,7 +1779,7 @@ static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, dig_hotplug_reg &= ~mask; } - I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); + intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg); if (!hotplug_trigger) return; @@ -1815,7 +1824,7 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) for_each_pipe(dev_priv, pipe) drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", pipe_name(pipe), - I915_READ(FDI_RX_IIR(pipe))); + intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe))); } if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) @@ -1834,7 +1843,7 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) static void ivb_err_int_handler(struct drm_i915_private *dev_priv) { - u32 err_int = I915_READ(GEN7_ERR_INT); + u32 err_int = intel_uncore_read(&dev_priv->uncore, GEN7_ERR_INT); enum pipe pipe; if (err_int & ERR_INT_POISON) @@ -1852,12 +1861,12 @@ static void ivb_err_int_handler(struct drm_i915_private *dev_priv) } } - I915_WRITE(GEN7_ERR_INT, err_int); + intel_uncore_write(&dev_priv->uncore, GEN7_ERR_INT, err_int); } static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) { - u32 serr_int = I915_READ(SERR_INT); + u32 serr_int = intel_uncore_read(&dev_priv->uncore, SERR_INT); enum pipe pipe; if (serr_int & SERR_INT_POISON) @@ -1867,7 +1876,7 @@ static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe)) intel_pch_fifo_underrun_irq_handler(dev_priv, pipe); - I915_WRITE(SERR_INT, serr_int); + intel_uncore_write(&dev_priv->uncore, SERR_INT, serr_int); } static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) @@ -1900,7 +1909,7 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) for_each_pipe(dev_priv, pipe) drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n", pipe_name(pipe), - I915_READ(FDI_RX_IIR(pipe))); + intel_uncore_read(&dev_priv->uncore, FDI_RX_IIR(pipe))); } if (pch_iir & SDE_ERROR_CPT) @@ -1916,8 +1925,8 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) if (ddi_hotplug_trigger) { u32 dig_hotplug_reg; - dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI); - I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg); + dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI); + intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, dig_hotplug_reg); intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, ddi_hotplug_trigger, dig_hotplug_reg, @@ -1928,8 +1937,8 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) if (tc_hotplug_trigger) { u32 dig_hotplug_reg; - dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC); - I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg); + dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC); + intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, dig_hotplug_reg); intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, tc_hotplug_trigger, dig_hotplug_reg, @@ -1954,8 +1963,8 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) if (hotplug_trigger) { u32 dig_hotplug_reg; - dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); - I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); + dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); + intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg); intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, dig_hotplug_reg, @@ -1966,8 +1975,8 @@ static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) if (hotplug2_trigger) { u32 dig_hotplug_reg; - dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2); - I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg); + dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2); + intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, dig_hotplug_reg); intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug2_trigger, dig_hotplug_reg, @@ -1987,8 +1996,8 @@ static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv, { u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; - dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); - I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); + dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL); + intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg); intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, dig_hotplug_reg, @@ -2029,7 +2038,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, /* check event from PCH */ if (de_iir & DE_PCH_EVENT) { - u32 pch_iir = I915_READ(SDEIIR); + u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); if (HAS_PCH_CPT(dev_priv)) cpt_irq_handler(dev_priv, pch_iir); @@ -2037,7 +2046,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, ibx_irq_handler(dev_priv, pch_iir); /* should clear PCH hotplug event before clear CPU irq */ - I915_WRITE(SDEIIR, pch_iir); + intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir); } if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT) @@ -2057,10 +2066,10 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, ivb_err_int_handler(dev_priv); if (de_iir & DE_EDP_PSR_INT_HSW) { - u32 psr_iir = I915_READ(EDP_PSR_IIR); + u32 psr_iir = intel_uncore_read(&dev_priv->uncore, EDP_PSR_IIR); intel_psr_irq_handler(dev_priv, psr_iir); - I915_WRITE(EDP_PSR_IIR, psr_iir); + intel_uncore_write(&dev_priv->uncore, EDP_PSR_IIR, psr_iir); } if (de_iir & DE_AUX_CHANNEL_A_IVB) @@ -2076,12 +2085,12 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, /* check event from PCH */ if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) { - u32 pch_iir = I915_READ(SDEIIR); + u32 pch_iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); cpt_irq_handler(dev_priv, pch_iir); /* clear PCH hotplug event before clear CPU irq */ - I915_WRITE(SDEIIR, pch_iir); + intel_uncore_write(&dev_priv->uncore, SDEIIR, pch_iir); } } @@ -2155,6 +2164,8 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg) if (sde_ier) raw_reg_write(regs, SDEIER, sde_ier); + pmu_irq_stats(i915, ret); + /* IRQs are synced during runtime_suspend, we don't require a wakeref */ enable_rpm_wakeref_asserts(&i915->runtime_pm); @@ -2166,8 +2177,8 @@ static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv, { u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; - dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); - I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); + dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); + intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, dig_hotplug_reg); intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, hotplug_trigger, dig_hotplug_reg, @@ -2186,8 +2197,8 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) if (trigger_tc) { u32 dig_hotplug_reg; - dig_hotplug_reg = I915_READ(GEN11_TC_HOTPLUG_CTL); - I915_WRITE(GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg); + dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL); + intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, dig_hotplug_reg); intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tc, dig_hotplug_reg, @@ -2198,8 +2209,8 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir) if (trigger_tbt) { u32 dig_hotplug_reg; - dig_hotplug_reg = I915_READ(GEN11_TBT_HOTPLUG_CTL); - I915_WRITE(GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg); + dig_hotplug_reg = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL); + intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, dig_hotplug_reg); intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, trigger_tbt, dig_hotplug_reg, @@ -2276,8 +2287,8 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) else iir_reg = EDP_PSR_IIR; - psr_iir = I915_READ(iir_reg); - I915_WRITE(iir_reg, psr_iir); + psr_iir = intel_uncore_read(&dev_priv->uncore, iir_reg); + intel_uncore_write(&dev_priv->uncore, iir_reg, psr_iir); if (psr_iir) found = true; @@ -2301,7 +2312,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv, * Incase of dual link, TE comes from DSI_1 * this is to check if dual link is enabled */ - val = I915_READ(TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0)); + val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL2(TRANSCODER_DSI_0)); val &= PORT_SYNC_MODE_ENABLE; /* @@ -2313,7 +2324,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv, dsi_trans = (port == PORT_A) ? TRANSCODER_DSI_0 : TRANSCODER_DSI_1; /* Check if DSI configured in command mode */ - val = I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans)); + val = intel_uncore_read(&dev_priv->uncore, DSI_TRANS_FUNC_CONF(dsi_trans)); val = val & OP_MODE_MASK; if (val != CMD_MODE_NO_GATE && val != CMD_MODE_TE_GATE) { @@ -2322,7 +2333,7 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv, } /* Get PIPE for handling VBLANK event */ - val = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans)); + val = intel_uncore_read(&dev_priv->uncore, TRANS_DDI_FUNC_CTL(dsi_trans)); switch (val & TRANS_DDI_EDP_INPUT_MASK) { case TRANS_DDI_EDP_INPUT_A_ON: pipe = PIPE_A; @@ -2342,8 +2353,8 @@ static void gen11_dsi_te_interrupt_handler(struct drm_i915_private *dev_priv, /* clear TE in dsi IIR */ port = (te_trigger & DSI1_TE) ? PORT_B : PORT_A; - tmp = I915_READ(DSI_INTR_IDENT_REG(port)); - I915_WRITE(DSI_INTR_IDENT_REG(port), tmp); + tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port)); + intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp); } static irqreturn_t @@ -2354,9 +2365,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) enum pipe pipe; if (master_ctl & GEN8_DE_MISC_IRQ) { - iir = I915_READ(GEN8_DE_MISC_IIR); + iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_MISC_IIR); if (iir) { - I915_WRITE(GEN8_DE_MISC_IIR, iir); + intel_uncore_write(&dev_priv->uncore, GEN8_DE_MISC_IIR, iir); ret = IRQ_HANDLED; gen8_de_misc_irq_handler(dev_priv, iir); } else { @@ -2366,9 +2377,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) } if (INTEL_GEN(dev_priv) >= 11 && (master_ctl & GEN11_DE_HPD_IRQ)) { - iir = I915_READ(GEN11_DE_HPD_IIR); + iir = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IIR); if (iir) { - I915_WRITE(GEN11_DE_HPD_IIR, iir); + intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IIR, iir); ret = IRQ_HANDLED; gen11_hpd_irq_handler(dev_priv, iir); } else { @@ -2378,11 +2389,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) } if (master_ctl & GEN8_DE_PORT_IRQ) { - iir = I915_READ(GEN8_DE_PORT_IIR); + iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PORT_IIR); if (iir) { bool found = false; - I915_WRITE(GEN8_DE_PORT_IIR, iir); + intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir); ret = IRQ_HANDLED; if (iir & gen8_de_port_aux_mask(dev_priv)) { @@ -2435,7 +2446,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) continue; - iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); + iir = intel_uncore_read(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe)); if (!iir) { drm_err(&dev_priv->drm, "The master control interrupt lied (DE PIPE)!\n"); @@ -2443,7 +2454,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) } ret = IRQ_HANDLED; - I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); + intel_uncore_write(&dev_priv->uncore, GEN8_DE_PIPE_IIR(pipe), iir); if (iir & GEN8_PIPE_VBLANK) intel_handle_vblank(dev_priv, pipe); @@ -2472,9 +2483,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) * scheme also closed the SDE interrupt handling race we've seen * on older pch-split platforms. But this needs testing. */ - iir = I915_READ(SDEIIR); + iir = intel_uncore_read(&dev_priv->uncore, SDEIIR); if (iir) { - I915_WRITE(SDEIIR, iir); + intel_uncore_write(&dev_priv->uncore, SDEIIR, iir); ret = IRQ_HANDLED; if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) @@ -2541,6 +2552,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) gen8_master_intr_enable(regs); + pmu_irq_stats(dev_priv, IRQ_HANDLED); + return IRQ_HANDLED; } @@ -2636,6 +2649,8 @@ __gen11_irq_handler(struct drm_i915_private * const i915, gen11_gu_misc_irq_handler(gt, gu_misc_iir); + pmu_irq_stats(i915, IRQ_HANDLED); + return IRQ_HANDLED; } @@ -2713,7 +2728,7 @@ int i915gm_enable_vblank(struct drm_crtc *crtc) * only when vblank interrupts are actually enabled. */ if (dev_priv->vblank_enabled++ == 0) - I915_WRITE(SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); + intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); return i8xx_enable_vblank(crtc); } @@ -2770,16 +2785,16 @@ static bool gen11_dsi_configure_te(struct intel_crtc *intel_crtc, else port = PORT_A; - tmp = I915_READ(DSI_INTR_MASK_REG(port)); + tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_MASK_REG(port)); if (enable) tmp &= ~DSI_TE_EVENT; else tmp |= DSI_TE_EVENT; - I915_WRITE(DSI_INTR_MASK_REG(port), tmp); + intel_uncore_write(&dev_priv->uncore, DSI_INTR_MASK_REG(port), tmp); - tmp = I915_READ(DSI_INTR_IDENT_REG(port)); - I915_WRITE(DSI_INTR_IDENT_REG(port), tmp); + tmp = intel_uncore_read(&dev_priv->uncore, DSI_INTR_IDENT_REG(port)); + intel_uncore_write(&dev_priv->uncore, DSI_INTR_IDENT_REG(port), tmp); return true; } @@ -2841,7 +2856,7 @@ void i915gm_disable_vblank(struct drm_crtc *crtc) i8xx_disable_vblank(crtc); if (--dev_priv->vblank_enabled == 0) - I915_WRITE(SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); + intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); } void i965_disable_vblank(struct drm_crtc *crtc) @@ -2907,7 +2922,7 @@ static void ibx_irq_reset(struct drm_i915_private *dev_priv) GEN3_IRQ_RESET(uncore, SDE); if (HAS_PCH_CPT(dev_priv) || HAS_PCH_LPT(dev_priv)) - I915_WRITE(SERR_INT, 0xffffffff); + intel_uncore_write(&dev_priv->uncore, SERR_INT, 0xffffffff); } static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) @@ -2920,7 +2935,7 @@ static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) intel_uncore_write(uncore, DPINVGTT, DPINVGTT_STATUS_MASK); i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0); - intel_uncore_write(uncore, PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); + intel_uncore_write(uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT)); i9xx_pipestat_irq_reset(dev_priv); @@ -2983,8 +2998,8 @@ static void ilk_irq_reset(struct drm_i915_private *dev_priv) static void valleyview_irq_reset(struct drm_i915_private *dev_priv) { - I915_WRITE(VLV_MASTER_IER, 0); - POSTING_READ(VLV_MASTER_IER); + intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, 0); + intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER); gen5_gt_irq_reset(&dev_priv->gt); @@ -3137,8 +3152,8 @@ static void cherryview_irq_reset(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; - I915_WRITE(GEN8_MASTER_IRQ, 0); - POSTING_READ(GEN8_MASTER_IRQ); + intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, 0); + intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ); gen8_gt_irq_reset(&dev_priv->gt); @@ -3184,7 +3199,7 @@ static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) * duration to 2ms (which is the minimum in the Display Port spec). * The pulse duration bits are reserved on LPT+. */ - hotplug = I915_READ(PCH_PORT_HOTPLUG); + hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); hotplug &= ~(PORTA_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE | @@ -3193,7 +3208,7 @@ static void ibx_hpd_detection_setup(struct drm_i915_private *dev_priv) PORTC_PULSE_DURATION_MASK | PORTD_PULSE_DURATION_MASK); hotplug |= intel_hpd_hotplug_enables(dev_priv, ibx_hotplug_enables); - I915_WRITE(PCH_PORT_HOTPLUG, hotplug); + intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug); } static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv) @@ -3242,20 +3257,20 @@ static void icp_ddi_hpd_detection_setup(struct drm_i915_private *dev_priv) { u32 hotplug; - hotplug = I915_READ(SHOTPLUG_CTL_DDI); + hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_DDI); hotplug &= ~(SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_A) | SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_B) | SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_C) | SHOTPLUG_CTL_DDI_HPD_ENABLE(HPD_PORT_D)); hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_ddi_hotplug_enables); - I915_WRITE(SHOTPLUG_CTL_DDI, hotplug); + intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_DDI, hotplug); } static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv) { u32 hotplug; - hotplug = I915_READ(SHOTPLUG_CTL_TC); + hotplug = intel_uncore_read(&dev_priv->uncore, SHOTPLUG_CTL_TC); hotplug &= ~(ICP_TC_HPD_ENABLE(HPD_PORT_TC1) | ICP_TC_HPD_ENABLE(HPD_PORT_TC2) | ICP_TC_HPD_ENABLE(HPD_PORT_TC3) | @@ -3263,7 +3278,7 @@ static void icp_tc_hpd_detection_setup(struct drm_i915_private *dev_priv) ICP_TC_HPD_ENABLE(HPD_PORT_TC5) | ICP_TC_HPD_ENABLE(HPD_PORT_TC6)); hotplug |= intel_hpd_hotplug_enables(dev_priv, icp_tc_hotplug_enables); - I915_WRITE(SHOTPLUG_CTL_TC, hotplug); + intel_uncore_write(&dev_priv->uncore, SHOTPLUG_CTL_TC, hotplug); } static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) @@ -3274,7 +3289,7 @@ static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd); if (INTEL_PCH_TYPE(dev_priv) <= PCH_TGP) - I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); + intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); @@ -3302,12 +3317,12 @@ static void dg1_hpd_irq_setup(struct drm_i915_private *dev_priv) { u32 val; - val = I915_READ(SOUTH_CHICKEN1); + val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1); val |= (INVERT_DDIA_HPD | INVERT_DDIB_HPD | INVERT_DDIC_HPD | INVERT_DDID_HPD); - I915_WRITE(SOUTH_CHICKEN1, val); + intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val); icp_hpd_irq_setup(dev_priv); } @@ -3316,7 +3331,7 @@ static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv) { u32 hotplug; - hotplug = I915_READ(GEN11_TC_HOTPLUG_CTL); + hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL); hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) | GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) | GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) | @@ -3324,14 +3339,14 @@ static void gen11_tc_hpd_detection_setup(struct drm_i915_private *dev_priv) GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) | GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6)); hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables); - I915_WRITE(GEN11_TC_HOTPLUG_CTL, hotplug); + intel_uncore_write(&dev_priv->uncore, GEN11_TC_HOTPLUG_CTL, hotplug); } static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv) { u32 hotplug; - hotplug = I915_READ(GEN11_TBT_HOTPLUG_CTL); + hotplug = intel_uncore_read(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL); hotplug &= ~(GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC1) | GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC2) | GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC3) | @@ -3339,7 +3354,7 @@ static void gen11_tbt_hpd_detection_setup(struct drm_i915_private *dev_priv) GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC5) | GEN11_HOTPLUG_CTL_ENABLE(HPD_PORT_TC6)); hotplug |= intel_hpd_hotplug_enables(dev_priv, gen11_hotplug_enables); - I915_WRITE(GEN11_TBT_HOTPLUG_CTL, hotplug); + intel_uncore_write(&dev_priv->uncore, GEN11_TBT_HOTPLUG_CTL, hotplug); } static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) @@ -3350,11 +3365,11 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.hpd); hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.hpd); - val = I915_READ(GEN11_DE_HPD_IMR); + val = intel_uncore_read(&dev_priv->uncore, GEN11_DE_HPD_IMR); val &= ~hotplug_irqs; val |= ~enabled_irqs & hotplug_irqs; - I915_WRITE(GEN11_DE_HPD_IMR, val); - POSTING_READ(GEN11_DE_HPD_IMR); + intel_uncore_write(&dev_priv->uncore, GEN11_DE_HPD_IMR, val); + intel_uncore_posting_read(&dev_priv->uncore, GEN11_DE_HPD_IMR); gen11_tc_hpd_detection_setup(dev_priv); gen11_tbt_hpd_detection_setup(dev_priv); @@ -3397,25 +3412,25 @@ static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) /* Display WA #1179 WaHardHangonHotPlug: cnp */ if (HAS_PCH_CNP(dev_priv)) { - val = I915_READ(SOUTH_CHICKEN1); + val = intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN1); val &= ~CHASSIS_CLK_REQ_DURATION_MASK; val |= CHASSIS_CLK_REQ_DURATION(0xf); - I915_WRITE(SOUTH_CHICKEN1, val); + intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN1, val); } /* Enable digital hotplug on the PCH */ - hotplug = I915_READ(PCH_PORT_HOTPLUG); + hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); hotplug &= ~(PORTA_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE | PORTD_HOTPLUG_ENABLE); hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug_enables); - I915_WRITE(PCH_PORT_HOTPLUG, hotplug); + intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug); - hotplug = I915_READ(PCH_PORT_HOTPLUG2); + hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG2); hotplug &= ~PORTE_HOTPLUG_ENABLE; hotplug |= intel_hpd_hotplug_enables(dev_priv, spt_hotplug2_enables); - I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); + intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG2, hotplug); } static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) @@ -3423,7 +3438,7 @@ static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv) u32 hotplug_irqs, enabled_irqs; if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) - I915_WRITE(SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); + intel_uncore_write(&dev_priv->uncore, SHPD_FILTER_CNT, SHPD_FILTER_CNT_500_ADJ); enabled_irqs = intel_hpd_enabled_irqs(dev_priv, dev_priv->hotplug.pch_hpd); hotplug_irqs = intel_hpd_hotplug_irqs(dev_priv, dev_priv->hotplug.pch_hpd); @@ -3454,11 +3469,11 @@ static void ilk_hpd_detection_setup(struct drm_i915_private *dev_priv) * duration to 2ms (which is the minimum in the Display Port spec) * The pulse duration bits are reserved on HSW+. */ - hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); + hotplug = intel_uncore_read(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL); hotplug &= ~(DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_MASK); hotplug |= intel_hpd_hotplug_enables(dev_priv, ilk_hotplug_enables); - I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); + intel_uncore_write(&dev_priv->uncore, DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); } static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv) @@ -3508,7 +3523,7 @@ static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) { u32 hotplug; - hotplug = I915_READ(PCH_PORT_HOTPLUG); + hotplug = intel_uncore_read(&dev_priv->uncore, PCH_PORT_HOTPLUG); hotplug &= ~(PORTA_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE | @@ -3516,7 +3531,7 @@ static void bxt_hpd_detection_setup(struct drm_i915_private *dev_priv) BXT_DDIB_HPD_INVERT | BXT_DDIC_HPD_INVERT); hotplug |= intel_hpd_hotplug_enables(dev_priv, bxt_hotplug_enables); - I915_WRITE(PCH_PORT_HOTPLUG, hotplug); + intel_uncore_write(&dev_priv->uncore, PCH_PORT_HOTPLUG, hotplug); } static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv) @@ -3636,8 +3651,8 @@ static void valleyview_irq_postinstall(struct drm_i915_private *dev_priv) vlv_display_irq_postinstall(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); - I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); - POSTING_READ(VLV_MASTER_IER); + intel_uncore_write(&dev_priv->uncore, VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); + intel_uncore_posting_read(&dev_priv->uncore, VLV_MASTER_IER); } static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) @@ -3750,14 +3765,14 @@ static void gen11_irq_postinstall(struct drm_i915_private *dev_priv) GEN3_IRQ_INIT(uncore, GEN11_GU_MISC_, ~gu_misc_masked, gu_misc_masked); - I915_WRITE(GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); + intel_uncore_write(&dev_priv->uncore, GEN11_DISPLAY_INT_CTL, GEN11_DISPLAY_IRQ_ENABLE); if (HAS_MASTER_UNIT_IRQ(dev_priv)) { dg1_master_intr_enable(uncore->regs); - POSTING_READ(DG1_MSTR_UNIT_INTR); + intel_uncore_posting_read(&dev_priv->uncore, DG1_MSTR_UNIT_INTR); } else { gen11_master_intr_enable(uncore->regs); - POSTING_READ(GEN11_GFX_MSTR_IRQ); + intel_uncore_posting_read(&dev_priv->uncore, GEN11_GFX_MSTR_IRQ); } } @@ -3770,8 +3785,8 @@ static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv) vlv_display_irq_postinstall(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); - I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); - POSTING_READ(GEN8_MASTER_IRQ); + intel_uncore_write(&dev_priv->uncore, GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); + intel_uncore_posting_read(&dev_priv->uncore, GEN8_MASTER_IRQ); } static void i8xx_irq_reset(struct drm_i915_private *dev_priv) @@ -3861,11 +3876,11 @@ static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, { u32 emr; - *eir = I915_READ(EIR); + *eir = intel_uncore_read(&dev_priv->uncore, EIR); - I915_WRITE(EIR, *eir); + intel_uncore_write(&dev_priv->uncore, EIR, *eir); - *eir_stuck = I915_READ(EIR); + *eir_stuck = intel_uncore_read(&dev_priv->uncore, EIR); if (*eir_stuck == 0) return; @@ -3879,9 +3894,9 @@ static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv, * (or by a GPU reset) so we mask any bit that * remains set. */ - emr = I915_READ(EMR); - I915_WRITE(EMR, 0xffffffff); - I915_WRITE(EMR, emr | *eir_stuck); + emr = intel_uncore_read(&dev_priv->uncore, EMR); + intel_uncore_write(&dev_priv->uncore, EMR, 0xffffffff); + intel_uncore_write(&dev_priv->uncore, EMR, emr | *eir_stuck); } static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv, @@ -3934,6 +3949,8 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); } while (0); + pmu_irq_stats(dev_priv, ret); + enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); return ret; @@ -3945,7 +3962,7 @@ static void i915_irq_reset(struct drm_i915_private *dev_priv) if (I915_HAS_HOTPLUG(dev_priv)) { i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); - I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); + intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT)); } i9xx_pipestat_irq_reset(dev_priv); @@ -3959,7 +3976,7 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv) struct intel_uncore *uncore = &dev_priv->uncore; u32 enable_mask; - I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | + intel_uncore_write(&dev_priv->uncore, EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); /* Unmask the interrupts that we always want on. */ @@ -4012,7 +4029,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) u32 hotplug_status = 0; u32 iir; - iir = I915_READ(GEN2_IIR); + iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR); if (iir == 0) break; @@ -4029,7 +4046,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) if (iir & I915_MASTER_ERROR_INTERRUPT) i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); - I915_WRITE(GEN2_IIR, iir); + intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir); if (iir & I915_USER_INTERRUPT) intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]); @@ -4043,6 +4060,8 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); } while (0); + pmu_irq_stats(dev_priv, ret); + enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); return ret; @@ -4053,7 +4072,7 @@ static void i965_irq_reset(struct drm_i915_private *dev_priv) struct intel_uncore *uncore = &dev_priv->uncore; i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); - I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); + intel_uncore_write(&dev_priv->uncore, PORT_HOTPLUG_STAT, intel_uncore_read(&dev_priv->uncore, PORT_HOTPLUG_STAT)); i9xx_pipestat_irq_reset(dev_priv); @@ -4080,7 +4099,7 @@ static void i965_irq_postinstall(struct drm_i915_private *dev_priv) error_mask = ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH); } - I915_WRITE(EMR, error_mask); + intel_uncore_write(&dev_priv->uncore, EMR, error_mask); /* Unmask the interrupts that we always want on. */ dev_priv->irq_mask = @@ -4156,7 +4175,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) u32 hotplug_status = 0; u32 iir; - iir = I915_READ(GEN2_IIR); + iir = intel_uncore_read(&dev_priv->uncore, GEN2_IIR); if (iir == 0) break; @@ -4172,7 +4191,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) if (iir & I915_MASTER_ERROR_INTERRUPT) i9xx_error_irq_ack(dev_priv, &eir, &eir_stuck); - I915_WRITE(GEN2_IIR, iir); + intel_uncore_write(&dev_priv->uncore, GEN2_IIR, iir); if (iir & I915_USER_INTERRUPT) intel_engine_signal_breadcrumbs(dev_priv->gt.engine[RCS0]); @@ -4189,6 +4208,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); } while (0); + pmu_irq_stats(dev_priv, IRQ_HANDLED); + enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); return ret; @@ -4242,18 +4263,21 @@ void intel_irq_init(struct drm_i915_private *dev_priv) */ dev_priv->hotplug.hpd_short_storm_enabled = !HAS_DP_MST(dev_priv); - if (HAS_PCH_DG1(dev_priv)) - dev_priv->display.hpd_irq_setup = dg1_hpd_irq_setup; - else if (INTEL_GEN(dev_priv) >= 11) - dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup; - else if (IS_GEN9_LP(dev_priv)) - dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; - else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) - dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; - else if (HAS_GMCH(dev_priv) && I915_HAS_HOTPLUG(dev_priv)) - dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; - else - dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; + if (HAS_GMCH(dev_priv)) { + if (I915_HAS_HOTPLUG(dev_priv)) + dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; + } else { + if (HAS_PCH_DG1(dev_priv)) + dev_priv->display.hpd_irq_setup = dg1_hpd_irq_setup; + else if (INTEL_GEN(dev_priv) >= 11) + dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup; + else if (IS_GEN9_LP(dev_priv)) + dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; + else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) + dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; + else + dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; + } } /** diff --git a/drivers/gpu/drm/i915/i915_mitigations.c b/drivers/gpu/drm/i915/i915_mitigations.c new file mode 100644 index 000000000000..84f12598d145 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_mitigations.c @@ -0,0 +1,146 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2021 Intel Corporation + */ + +#include <linux/kernel.h> +#include <linux/moduleparam.h> +#include <linux/slab.h> +#include <linux/string.h> + +#include "i915_drv.h" +#include "i915_mitigations.h" + +static unsigned long mitigations __read_mostly = ~0UL; + +enum { + CLEAR_RESIDUALS = 0, +}; + +static const char * const names[] = { + [CLEAR_RESIDUALS] = "residuals", +}; + +bool i915_mitigate_clear_residuals(void) +{ + return READ_ONCE(mitigations) & BIT(CLEAR_RESIDUALS); +} + +static int mitigations_set(const char *val, const struct kernel_param *kp) +{ + unsigned long new = ~0UL; + char *str, *sep, *tok; + bool first = true; + int err = 0; + + BUILD_BUG_ON(ARRAY_SIZE(names) >= BITS_PER_TYPE(mitigations)); + + str = kstrdup(val, GFP_KERNEL); + if (!str) + return -ENOMEM; + + for (sep = str; (tok = strsep(&sep, ","));) { + bool enable = true; + int i; + + /* Be tolerant of leading/trailing whitespace */ + tok = strim(tok); + + if (first) { + first = false; + + if (!strcmp(tok, "auto")) + continue; + + new = 0; + if (!strcmp(tok, "off")) + continue; + } + + if (*tok == '!') { + enable = !enable; + tok++; + } + + if (!strncmp(tok, "no", 2)) { + enable = !enable; + tok += 2; + } + + if (*tok == '\0') + continue; + + for (i = 0; i < ARRAY_SIZE(names); i++) { + if (!strcmp(tok, names[i])) { + if (enable) + new |= BIT(i); + else + new &= ~BIT(i); + break; + } + } + if (i == ARRAY_SIZE(names)) { + pr_err("Bad \"%s.mitigations=%s\", '%s' is unknown\n", + DRIVER_NAME, val, tok); + err = -EINVAL; + break; + } + } + kfree(str); + if (err) + return err; + + WRITE_ONCE(mitigations, new); + return 0; +} + +static int mitigations_get(char *buffer, const struct kernel_param *kp) +{ + unsigned long local = READ_ONCE(mitigations); + int count, i; + bool enable; + + if (!local) + return scnprintf(buffer, PAGE_SIZE, "%s\n", "off"); + + if (local & BIT(BITS_PER_LONG - 1)) { + count = scnprintf(buffer, PAGE_SIZE, "%s,", "auto"); + enable = false; + } else { + enable = true; + count = 0; + } + + for (i = 0; i < ARRAY_SIZE(names); i++) { + if ((local & BIT(i)) != enable) + continue; + + count += scnprintf(buffer + count, PAGE_SIZE - count, + "%s%s,", enable ? "" : "!", names[i]); + } + + buffer[count - 1] = '\n'; + return count; +} + +static const struct kernel_param_ops ops = { + .set = mitigations_set, + .get = mitigations_get, +}; + +module_param_cb_unsafe(mitigations, &ops, NULL, 0600); +MODULE_PARM_DESC(mitigations, +"Selectively enable security mitigations for all Intel® GPUs in the system.\n" +"\n" +" auto -- enables all mitigations required for the platform [default]\n" +" off -- disables all mitigations\n" +"\n" +"Individual mitigations can be enabled by passing a comma-separated string,\n" +"e.g. mitigations=residuals to enable only clearing residuals or\n" +"mitigations=auto,noresiduals to disable only the clear residual mitigation.\n" +"Either '!' or 'no' may be used to switch from enabling the mitigation to\n" +"disabling it.\n" +"\n" +"Active mitigations for Ivybridge, Baytrail, Haswell:\n" +" residuals -- clear all thread-local registers between contexts" +); diff --git a/drivers/gpu/drm/i915/i915_mitigations.h b/drivers/gpu/drm/i915/i915_mitigations.h new file mode 100644 index 000000000000..1359d8135287 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_mitigations.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __I915_MITIGATIONS_H__ +#define __I915_MITIGATIONS_H__ + +#include <linux/types.h> + +bool i915_mitigate_clear_residuals(void); + +#endif /* __I915_MITIGATIONS_H__ */ diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 11fe790b1969..39608381b4a4 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -639,7 +639,6 @@ static const struct intel_device_info chv_info = { GEN8_FEATURES, \ GEN(9), \ GEN9_DEFAULT_PAGE_SIZES, \ - .has_logical_ring_preemption = 1, \ .display.has_csr = 1, \ .has_gt_uc = 1, \ .display.has_hdcp = 1, \ @@ -700,7 +699,6 @@ static const struct intel_device_info skl_gt4_info = { .has_rps = true, \ .display.has_dp_mst = 1, \ .has_logical_ring_contexts = 1, \ - .has_logical_ring_preemption = 1, \ .has_gt_uc = 1, \ .dma_mask_size = 39, \ .ppgtt_type = INTEL_PPGTT_FULL, \ diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 3b12c8ff7182..112ba5f2ce90 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -198,8 +198,11 @@ #include "gem/i915_gem_context.h" #include "gt/intel_engine_pm.h" #include "gt/intel_engine_user.h" +#include "gt/intel_execlists_submission.h" +#include "gt/intel_gpu_commands.h" #include "gt/intel_gt.h" -#include "gt/intel_lrc_reg.h" +#include "gt/intel_gt_clock_utils.h" +#include "gt/intel_lrc.h" #include "gt/intel_ring.h" #include "i915_drv.h" @@ -914,7 +917,7 @@ static int gen8_oa_read(struct i915_perf_stream *stream, intel_uncore_rmw(uncore, oastatus_reg, GEN8_OASTATUS_COUNTER_OVERFLOW | GEN8_OASTATUS_REPORT_LOST, - IS_GEN_RANGE(uncore->i915, 8, 10) ? + IS_GEN_RANGE(uncore->i915, 8, 11) ? (GEN8_OASTATUS_HEAD_POINTER_WRAP | GEN8_OASTATUS_TAIL_POINTER_WRAP) : 0); } @@ -1635,7 +1638,8 @@ static int alloc_noa_wait(struct i915_perf_stream *stream) struct drm_i915_gem_object *bo; struct i915_vma *vma; const u64 delay_ticks = 0xffffffffffffffff - - i915_cs_timestamp_ns_to_ticks(i915, atomic64_read(&stream->perf->noa_programming_delay)); + intel_gt_ns_to_clock_interval(stream->perf->i915->ggtt.vm.gt, + atomic64_read(&stream->perf->noa_programming_delay)); const u32 base = stream->engine->mmio_base; #define CS_GPR(x) GEN8_RING_CS_GPR(base, x) u32 *batch, *ts0, *cs, *jump; @@ -3516,7 +3520,8 @@ err: static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent) { - return i915_cs_timestamp_ticks_to_ns(perf->i915, 2ULL << exponent); + return intel_gt_clock_interval_to_ns(perf->i915->ggtt.vm.gt, + 2ULL << exponent); } /** @@ -4370,11 +4375,11 @@ void i915_perf_init(struct drm_i915_private *i915) if (perf->ops.enable_metric_set) { mutex_init(&perf->lock); - oa_sample_rate_hard_limit = - RUNTIME_INFO(i915)->cs_timestamp_frequency_hz / 2; + /* Choose a representative limit */ + oa_sample_rate_hard_limit = i915->gt.clock_frequency / 2; mutex_init(&perf->metrics_lock); - idr_init(&perf->metrics_idr); + idr_init_base(&perf->metrics_idr, 1); /* We set up some ratelimit state to potentially throttle any * _NOTES about spurious, invalid OA reports which we don't diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index cd786ad12be7..2b88c0baa1bf 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -4,7 +4,6 @@ * Copyright © 2017-2018 Intel Corporation */ -#include <linux/irq.h> #include <linux/pm_runtime.h> #include "gt/intel_engine.h" @@ -27,8 +26,6 @@ BIT(I915_SAMPLE_WAIT) | \ BIT(I915_SAMPLE_SEMA)) -#define ENGINE_SAMPLE_BITS (1 << I915_PMU_SAMPLE_BITS) - static cpumask_t i915_pmu_cpumask; static unsigned int i915_pmu_target_cpu = -1; @@ -57,17 +54,42 @@ static bool is_engine_config(u64 config) return config < __I915_PMU_OTHER(0); } -static unsigned int config_enabled_bit(u64 config) +static unsigned int other_bit(const u64 config) +{ + unsigned int val; + + switch (config) { + case I915_PMU_ACTUAL_FREQUENCY: + val = __I915_PMU_ACTUAL_FREQUENCY_ENABLED; + break; + case I915_PMU_REQUESTED_FREQUENCY: + val = __I915_PMU_REQUESTED_FREQUENCY_ENABLED; + break; + case I915_PMU_RC6_RESIDENCY: + val = __I915_PMU_RC6_RESIDENCY_ENABLED; + break; + default: + /* + * Events that do not require sampling, or tracking state + * transitions between enabled and disabled can be ignored. + */ + return -1; + } + + return I915_ENGINE_SAMPLE_COUNT + val; +} + +static unsigned int config_bit(const u64 config) { if (is_engine_config(config)) return engine_config_sample(config); else - return ENGINE_SAMPLE_BITS + (config - __I915_PMU_OTHER(0)); + return other_bit(config); } -static u64 config_enabled_mask(u64 config) +static u64 config_mask(u64 config) { - return BIT_ULL(config_enabled_bit(config)); + return BIT_ULL(config_bit(config)); } static bool is_engine_event(struct perf_event *event) @@ -75,15 +97,15 @@ static bool is_engine_event(struct perf_event *event) return is_engine_config(event->attr.config); } -static unsigned int event_enabled_bit(struct perf_event *event) +static unsigned int event_bit(struct perf_event *event) { - return config_enabled_bit(event->attr.config); + return config_bit(event->attr.config); } static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active) { struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); - u64 enable; + u32 enable; /* * Only some counters need the sampling timer. @@ -96,8 +118,8 @@ static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active) * Mask out all the ones which do not need the timer, or in * other words keep all the ones that could need the timer. */ - enable &= config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) | - config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY) | + enable &= config_mask(I915_PMU_ACTUAL_FREQUENCY) | + config_mask(I915_PMU_REQUESTED_FREQUENCY) | ENGINE_SAMPLE_MASK; /* @@ -138,11 +160,9 @@ static u64 __get_rc6(struct intel_gt *gt) return val; } -#if IS_ENABLED(CONFIG_PM) - -static inline s64 ktime_since(const ktime_t kt) +static inline s64 ktime_since_raw(const ktime_t kt) { - return ktime_to_ns(ktime_sub(ktime_get(), kt)); + return ktime_to_ns(ktime_sub(ktime_get_raw(), kt)); } static u64 get_rc6(struct intel_gt *gt) @@ -171,7 +191,7 @@ static u64 get_rc6(struct intel_gt *gt) * on top of the last known real value, as the approximated RC6 * counter value. */ - val = ktime_since(pmu->sleep_last); + val = ktime_since_raw(pmu->sleep_last); val += pmu->sample[__I915_SAMPLE_RC6].cur; } @@ -185,26 +205,26 @@ static u64 get_rc6(struct intel_gt *gt) return val; } -static void park_rc6(struct drm_i915_private *i915) +static void init_rc6(struct i915_pmu *pmu) { - struct i915_pmu *pmu = &i915->pmu; + struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); + intel_wakeref_t wakeref; - if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY)) + with_intel_runtime_pm(i915->gt.uncore->rpm, wakeref) { pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); - - pmu->sleep_last = ktime_get(); + pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = + pmu->sample[__I915_SAMPLE_RC6].cur; + pmu->sleep_last = ktime_get_raw(); + } } -#else - -static u64 get_rc6(struct intel_gt *gt) +static void park_rc6(struct drm_i915_private *i915) { - return __get_rc6(gt); -} - -static void park_rc6(struct drm_i915_private *i915) {} + struct i915_pmu *pmu = &i915->pmu; -#endif + pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); + pmu->sleep_last = ktime_get_raw(); +} static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu) { @@ -344,8 +364,8 @@ add_sample_mult(struct i915_pmu_sample *sample, u32 val, u32 mul) static bool frequency_sampling_enabled(struct i915_pmu *pmu) { return pmu->enable & - (config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY) | - config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)); + (config_mask(I915_PMU_ACTUAL_FREQUENCY) | + config_mask(I915_PMU_REQUESTED_FREQUENCY)); } static void @@ -363,7 +383,7 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns) if (!intel_gt_pm_get_if_awake(gt)) return; - if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) { + if (pmu->enable & config_mask(I915_PMU_ACTUAL_FREQUENCY)) { u32 val; /* @@ -385,7 +405,7 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns) intel_gpu_freq(rps, val), period_ns / 1000); } - if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) { + if (pmu->enable & config_mask(I915_PMU_REQUESTED_FREQUENCY)) { add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ], intel_gpu_freq(rps, rps->cur_freq), period_ns / 1000); @@ -424,22 +444,6 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer) return HRTIMER_RESTART; } -static u64 count_interrupts(struct drm_i915_private *i915) -{ - /* open-coded kstat_irqs() */ - struct irq_desc *desc = irq_to_desc(i915->drm.pdev->irq); - u64 sum = 0; - int cpu; - - if (!desc || !desc->kstat_irqs) - return 0; - - for_each_possible_cpu(cpu) - sum += *per_cpu_ptr(desc->kstat_irqs, cpu); - - return sum; -} - static void i915_pmu_event_destroy(struct perf_event *event) { struct drm_i915_private *i915 = @@ -488,6 +492,8 @@ config_status(struct drm_i915_private *i915, u64 config) if (!HAS_RC6(i915)) return -ENODEV; break; + case I915_PMU_SOFTWARE_GT_AWAKE_TIME: + break; default: return -ENOENT; } @@ -590,11 +596,14 @@ static u64 __i915_pmu_event_read(struct perf_event *event) USEC_PER_SEC /* to MHz */); break; case I915_PMU_INTERRUPTS: - val = count_interrupts(i915); + val = READ_ONCE(pmu->irq_count); break; case I915_PMU_RC6_RESIDENCY: val = get_rc6(&i915->gt); break; + case I915_PMU_SOFTWARE_GT_AWAKE_TIME: + val = ktime_to_ns(intel_gt_get_awake_time(&i915->gt)); + break; } } @@ -627,12 +636,14 @@ static void i915_pmu_enable(struct perf_event *event) { struct drm_i915_private *i915 = container_of(event->pmu, typeof(*i915), pmu.base); - unsigned int bit = event_enabled_bit(event); struct i915_pmu *pmu = &i915->pmu; - intel_wakeref_t wakeref; unsigned long flags; + unsigned int bit; + + bit = event_bit(event); + if (bit == -1) + goto update; - wakeref = intel_runtime_pm_get(&i915->runtime_pm); spin_lock_irqsave(&pmu->lock, flags); /* @@ -643,13 +654,6 @@ static void i915_pmu_enable(struct perf_event *event) GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); GEM_BUG_ON(pmu->enable_count[bit] == ~0); - if (pmu->enable_count[bit] == 0 && - config_enabled_mask(I915_PMU_RC6_RESIDENCY) & BIT_ULL(bit)) { - pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = 0; - pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); - pmu->sleep_last = ktime_get(); - } - pmu->enable |= BIT_ULL(bit); pmu->enable_count[bit]++; @@ -684,24 +688,26 @@ static void i915_pmu_enable(struct perf_event *event) spin_unlock_irqrestore(&pmu->lock, flags); +update: /* * Store the current counter value so we can report the correct delta * for all listeners. Even when the event was already enabled and has * an existing non-zero value. */ local64_set(&event->hw.prev_count, __i915_pmu_event_read(event)); - - intel_runtime_pm_put(&i915->runtime_pm, wakeref); } static void i915_pmu_disable(struct perf_event *event) { struct drm_i915_private *i915 = container_of(event->pmu, typeof(*i915), pmu.base); - unsigned int bit = event_enabled_bit(event); + unsigned int bit = event_bit(event); struct i915_pmu *pmu = &i915->pmu; unsigned long flags; + if (bit == -1) + return; + spin_lock_irqsave(&pmu->lock, flags); if (is_engine_event(event)) { @@ -898,6 +904,7 @@ create_event_attributes(struct i915_pmu *pmu) __event(I915_PMU_REQUESTED_FREQUENCY, "requested-frequency", "M"), __event(I915_PMU_INTERRUPTS, "interrupts", NULL), __event(I915_PMU_RC6_RESIDENCY, "rc6-residency", "ns"), + __event(I915_PMU_SOFTWARE_GT_AWAKE_TIME, "software-gt-awake-time", "ns"), }; static const struct { enum drm_i915_pmu_engine_sample sample; @@ -1147,6 +1154,7 @@ void i915_pmu_register(struct drm_i915_private *i915) hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); pmu->timer.function = i915_sample; pmu->cpuhp.cpu = -1; + init_rc6(pmu); if (!is_igp(i915)) { pmu->name = kasprintf(GFP_KERNEL, diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h index a24885ab415c..60f9595f902c 100644 --- a/drivers/gpu/drm/i915/i915_pmu.h +++ b/drivers/gpu/drm/i915/i915_pmu.h @@ -14,6 +14,21 @@ struct drm_i915_private; +/** + * Non-engine events that we need to track enabled-disabled transition and + * current state. + */ +enum i915_pmu_tracked_events { + __I915_PMU_ACTUAL_FREQUENCY_ENABLED = 0, + __I915_PMU_REQUESTED_FREQUENCY_ENABLED, + __I915_PMU_RC6_RESIDENCY_ENABLED, + __I915_PMU_TRACKED_EVENT_COUNT, /* count marker */ +}; + +/** + * Slots used from the sampling timer (non-engine events) with some extras for + * convenience. + */ enum { __I915_SAMPLE_FREQ_ACT = 0, __I915_SAMPLE_FREQ_REQ, @@ -28,8 +43,7 @@ enum { * It is also used to know to needed number of event reference counters. */ #define I915_PMU_MASK_BITS \ - ((1 << I915_PMU_SAMPLE_BITS) + \ - (I915_PMU_LAST + 1 - __I915_PMU_OTHER(0))) + (I915_ENGINE_SAMPLE_COUNT + __I915_PMU_TRACKED_EVENT_COUNT) #define I915_ENGINE_SAMPLE_COUNT (I915_SAMPLE_SEMA + 1) @@ -66,18 +80,17 @@ struct i915_pmu { */ struct hrtimer timer; /** - * @enable: Bitmask of all currently enabled events. + * @enable: Bitmask of specific enabled events. + * + * For some events we need to track their state and do some internal + * house keeping. * - * Bits are derived from uAPI event numbers in a way that low 16 bits - * correspond to engine event _sample_ _type_ (I915_SAMPLE_QUEUED is - * bit 0), and higher bits correspond to other events (for instance - * I915_PMU_ACTUAL_FREQUENCY is bit 16 etc). + * Each engine event sampler type and event listed in enum + * i915_pmu_tracked_events gets a bit in this field. * - * In other words, low 16 bits are not per engine but per engine - * sampler type, while the upper bits are directly mapped to other - * event types. + * Low bits are engine samplers and other events continue from there. */ - u64 enable; + u32 enable; /** * @timer_last: @@ -112,6 +125,14 @@ struct i915_pmu { */ ktime_t sleep_last; /** + * @irq_count: Number of interrupts + * + * Intentionally unsigned long to avoid atomics or heuristics on 32bit. + * 4e9 interrupts are a lot and postprocessing can really deal with an + * occasional wraparound easily. It's 32bit after all. + */ + unsigned long irq_count; + /** * @events_attr_group: Device events attribute group. */ struct attribute_group events_attr_group; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5375b219cc3b..1d8ba10847ca 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2928,8 +2928,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define HDPORT_STATE _MMIO(0x45050) #define HDPORT_DPLL_USED_MASK REG_GENMASK(14, 12) -#define HDPORT_PHY_USED_DP(phy) REG_BIT(2 * (phy) + 2) -#define HDPORT_PHY_USED_HDMI(phy) REG_BIT(2 * (phy) + 1) +#define HDPORT_DDI_USED(phy) REG_BIT(2 * (phy) + 1) #define HDPORT_ENABLED REG_BIT(0) /* Make render/texture TLB fetches lower priorty than associated data @@ -4352,6 +4351,7 @@ enum { #define VRR_CTL_IGN_MAX_SHIFT REG_BIT(30) #define VRR_CTL_FLIP_LINE_EN REG_BIT(29) #define VRR_CTL_LINE_COUNT_MASK REG_GENMASK(10, 3) +#define VRR_CTL_LINE_COUNT(x) REG_FIELD_PREP(VRR_CTL_LINE_COUNT_MASK, (x)) #define VRR_CTL_SW_FULLLINE_COUNT REG_BIT(0) #define _TRANS_VRR_VMAX_A 0x60424 @@ -10851,8 +10851,10 @@ enum skl_power_gate { #define CNL_DRAM_RANK_3 (0x2 << 9) #define CNL_DRAM_RANK_4 (0x3 << 9) -/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register, - * since on HSW we can't write to it using I915_WRITE. */ +/* + * Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register, + * since on HSW we can't write to it using intel_uncore_write. + */ #define D_COMP_HSW _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5F0C) #define D_COMP_BDW _MMIO(0x138144) #define D_COMP_RCOMP_IN_PROGRESS (1 << 9) diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 0e813819b041..0b1a46a0d866 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -33,6 +33,7 @@ #include "gem/i915_gem_context.h" #include "gt/intel_breadcrumbs.h" #include "gt/intel_context.h" +#include "gt/intel_gpu_commands.h" #include "gt/intel_ring.h" #include "gt/intel_rps.h" @@ -197,7 +198,7 @@ __notify_execute_cb(struct i915_request *rq, bool (*fn)(struct irq_work *wrk)) llist_for_each_entry_safe(cb, cn, llist_del_all(&rq->execute_cb), - work.llnode) + work.node.llist) fn(&cb->work); } @@ -306,10 +307,8 @@ bool i915_request_retire(struct i915_request *rq) spin_unlock_irq(&rq->lock); } - if (i915_request_has_waitboost(rq)) { - GEM_BUG_ON(!atomic_read(&rq->engine->gt->rps.num_waiters)); + if (test_and_set_bit(I915_FENCE_FLAG_BOOST, &rq->fence.flags)) atomic_dec(&rq->engine->gt->rps.num_waiters); - } /* * We only loosely track inflight requests across preemption, @@ -321,7 +320,8 @@ bool i915_request_retire(struct i915_request *rq) * after removing the breadcrumb and signaling it, so that we do not * inadvertently attach the breadcrumb to a completed request. */ - remove_from_engine(rq); + if (!list_empty(&rq->sched.link)) + remove_from_engine(rq); GEM_BUG_ON(!llist_empty(&rq->execute_cb)); __list_del_entry(&rq->link); /* poison neither prev/next (RCU walks) */ @@ -460,7 +460,7 @@ __await_execution(struct i915_request *rq, * callback first, then checking the ACTIVE bit, we serialise with * the completed/retired request. */ - if (llist_add(&cb->work.llnode, &signal->execute_cb)) { + if (llist_add(&cb->work.node.llist, &signal->execute_cb)) { if (i915_request_is_active(signal) || __request_in_flight(signal)) __notify_execute_cb_imm(signal); @@ -488,6 +488,8 @@ void __i915_request_skip(struct i915_request *rq) if (rq->infix == rq->postfix) return; + RQ_TRACE(rq, "error: %d\n", rq->fence.error); + /* * As this request likely depends on state from the lost * context, clear out all the user operations leaving the @@ -513,6 +515,17 @@ void i915_request_set_error_once(struct i915_request *rq, int error) } while (!try_cmpxchg(&rq->fence.error, &old, error)); } +void i915_request_mark_eio(struct i915_request *rq) +{ + if (__i915_request_is_complete(rq)) + return; + + GEM_BUG_ON(i915_request_signaled(rq)); + + i915_request_set_error_once(rq, -EIO); + i915_request_mark_complete(rq); +} + bool __i915_request_submit(struct i915_request *request) { struct intel_engine_cs *engine = request->engine; @@ -542,10 +555,6 @@ bool __i915_request_submit(struct i915_request *request) if (i915_request_completed(request)) goto xfer; - if (unlikely(intel_context_is_closed(request->context) && - !intel_engine_has_heartbeat(engine))) - intel_context_set_banned(request->context); - if (unlikely(intel_context_is_banned(request->context))) i915_request_set_error_once(request, -EIO); @@ -1582,6 +1591,12 @@ struct i915_request *__i915_request_commit(struct i915_request *rq) return __i915_request_add_to_timeline(rq); } +void __i915_request_queue_bh(struct i915_request *rq) +{ + i915_sw_fence_commit(&rq->semaphore); + i915_sw_fence_commit(&rq->submit); +} + void __i915_request_queue(struct i915_request *rq, const struct i915_sched_attr *attr) { @@ -1598,8 +1613,10 @@ void __i915_request_queue(struct i915_request *rq, */ if (attr && rq->engine->schedule) rq->engine->schedule(rq, attr); - i915_sw_fence_commit(&rq->semaphore); - i915_sw_fence_commit(&rq->submit); + + local_bh_disable(); + __i915_request_queue_bh(rq); + local_bh_enable(); /* kick tasklets */ } void i915_request_add(struct i915_request *rq) @@ -1823,7 +1840,7 @@ long i915_request_wait(struct i915_request *rq, * for unhappy HW. */ if (i915_request_is_ready(rq)) - intel_engine_flush_submission(rq->engine); + __intel_engine_flush_submission(rq->engine, false); for (;;) { set_current_state(state); @@ -1855,6 +1872,106 @@ out: return timeout; } +static int print_sched_attr(const struct i915_sched_attr *attr, + char *buf, int x, int len) +{ + if (attr->priority == I915_PRIORITY_INVALID) + return x; + + x += snprintf(buf + x, len - x, + " prio=%d", attr->priority); + + return x; +} + +static char queue_status(const struct i915_request *rq) +{ + if (i915_request_is_active(rq)) + return 'E'; + + if (i915_request_is_ready(rq)) + return intel_engine_is_virtual(rq->engine) ? 'V' : 'R'; + + return 'U'; +} + +static const char *run_status(const struct i915_request *rq) +{ + if (i915_request_completed(rq)) + return "!"; + + if (i915_request_started(rq)) + return "*"; + + if (!i915_sw_fence_signaled(&rq->semaphore)) + return "&"; + + return ""; +} + +static const char *fence_status(const struct i915_request *rq) +{ + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)) + return "+"; + + if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags)) + return "-"; + + return ""; +} + +void i915_request_show(struct drm_printer *m, + const struct i915_request *rq, + const char *prefix, + int indent) +{ + const char *name = rq->fence.ops->get_timeline_name((struct dma_fence *)&rq->fence); + char buf[80] = ""; + int x = 0; + + /* + * The prefix is used to show the queue status, for which we use + * the following flags: + * + * U [Unready] + * - initial status upon being submitted by the user + * + * - the request is not ready for execution as it is waiting + * for external fences + * + * R [Ready] + * - all fences the request was waiting on have been signaled, + * and the request is now ready for execution and will be + * in a backend queue + * + * - a ready request may still need to wait on semaphores + * [internal fences] + * + * V [Ready/virtual] + * - same as ready, but queued over multiple backends + * + * E [Executing] + * - the request has been transferred from the backend queue and + * submitted for execution on HW + * + * - a completed request may still be regarded as executing, its + * status may not be updated until it is retired and removed + * from the lists + */ + + x = print_sched_attr(&rq->sched.attr, buf, x, sizeof(buf)); + + drm_printf(m, "%s%.*s%c %llx:%lld%s%s %s @ %dms: %s\n", + prefix, indent, " ", + queue_status(rq), + rq->fence.context, rq->fence.seqno, + run_status(rq), + fence_status(rq), + buf, + jiffies_to_msecs(jiffies - rq->emitted_jiffies), + name); +} + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/mock_request.c" #include "selftests/i915_request.c" diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 16b721080195..1bfe214a47e9 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -43,6 +43,7 @@ struct drm_file; struct drm_i915_gem_object; +struct drm_printer; struct i915_request; struct i915_capture_list { @@ -176,7 +177,9 @@ struct i915_request { struct intel_context *context; struct intel_ring *ring; struct intel_timeline __rcu *timeline; + struct list_head signal_link; + struct llist_node signal_node; /* * The rcu epoch of when this request was allocated. Used to judiciously @@ -306,12 +309,14 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp); struct i915_request * __must_check i915_request_create(struct intel_context *ce); -void i915_request_set_error_once(struct i915_request *rq, int error); void __i915_request_skip(struct i915_request *rq); +void i915_request_set_error_once(struct i915_request *rq, int error); +void i915_request_mark_eio(struct i915_request *rq); struct i915_request *__i915_request_commit(struct i915_request *request); void __i915_request_queue(struct i915_request *rq, const struct i915_sched_attr *attr); +void __i915_request_queue_bh(struct i915_request *rq); bool i915_request_retire(struct i915_request *rq); void i915_request_retire_upto(struct i915_request *rq); @@ -369,6 +374,11 @@ long i915_request_wait(struct i915_request *rq, #define I915_WAIT_PRIORITY BIT(1) /* small priority bump for the request */ #define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */ +void i915_request_show(struct drm_printer *m, + const struct i915_request *rq, + const char *prefix, + int indent); + static inline bool i915_request_signaled(const struct i915_request *rq) { /* The request may live longer than its HWSP, so check flags first! */ @@ -432,7 +442,7 @@ static inline u32 hwsp_seqno(const struct i915_request *rq) static inline bool __i915_request_has_started(const struct i915_request *rq) { - return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1); + return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno - 1); } /** @@ -463,11 +473,19 @@ static inline bool __i915_request_has_started(const struct i915_request *rq) */ static inline bool i915_request_started(const struct i915_request *rq) { + bool result; + if (i915_request_signaled(rq)) return true; - /* Remember: started but may have since been preempted! */ - return __i915_request_has_started(rq); + result = true; + rcu_read_lock(); /* the HWSP may be freed at runtime */ + if (likely(!i915_request_signaled(rq))) + /* Remember: started but may have since been preempted! */ + result = __i915_request_has_started(rq); + rcu_read_unlock(); + + return result; } /** @@ -480,10 +498,16 @@ static inline bool i915_request_started(const struct i915_request *rq) */ static inline bool i915_request_is_running(const struct i915_request *rq) { + bool result; + if (!i915_request_is_active(rq)) return false; - return __i915_request_has_started(rq); + rcu_read_lock(); + result = __i915_request_has_started(rq) && i915_request_is_active(rq); + rcu_read_unlock(); + + return result; } /** @@ -507,12 +531,25 @@ static inline bool i915_request_is_ready(const struct i915_request *rq) return !list_empty(&rq->sched.link); } +static inline bool __i915_request_is_complete(const struct i915_request *rq) +{ + return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno); +} + static inline bool i915_request_completed(const struct i915_request *rq) { + bool result; + if (i915_request_signaled(rq)) return true; - return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno); + result = true; + rcu_read_lock(); /* the HWSP may be freed at runtime */ + if (likely(!i915_request_signaled(rq))) + result = __i915_request_is_complete(rq); + rcu_read_unlock(); + + return result; } static inline void i915_request_mark_complete(struct i915_request *rq) diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index cbb880b10c65..318e359bf5c3 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -458,14 +458,10 @@ int i915_sched_node_add_dependency(struct i915_sched_node *node, if (!dep) return -ENOMEM; - local_bh_disable(); - if (!__i915_sched_node_add_dependency(node, signal, dep, flags | I915_DEPENDENCY_ALLOC)) i915_dependency_free(dep); - local_bh_enable(); /* kick submission tasklet */ - return 0; } @@ -504,6 +500,34 @@ void i915_sched_node_fini(struct i915_sched_node *node) spin_unlock_irq(&schedule_lock); } +void i915_request_show_with_schedule(struct drm_printer *m, + const struct i915_request *rq, + const char *prefix, + int indent) +{ + struct i915_dependency *dep; + + i915_request_show(m, rq, prefix, indent); + if (i915_request_completed(rq)) + return; + + rcu_read_lock(); + for_each_signaler(dep, rq) { + const struct i915_request *signaler = + node_to_request(dep->signaler); + + /* Dependencies along the same timeline are expected. */ + if (signaler->timeline == rq->timeline) + continue; + + if (i915_request_completed(signaler)) + continue; + + i915_request_show(m, signaler, prefix, indent + 2); + } + rcu_read_unlock(); +} + static void i915_global_scheduler_shrink(void) { kmem_cache_shrink(global.slab_dependencies); diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h index 6f0bf00fc569..4501e5ac2637 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.h +++ b/drivers/gpu/drm/i915/i915_scheduler.h @@ -13,6 +13,8 @@ #include "i915_scheduler_types.h" +struct drm_printer; + #define priolist_for_each_request(it, plist, idx) \ for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \ list_for_each_entry(it, &(plist)->requests[idx], sched.link) @@ -54,4 +56,9 @@ static inline void i915_priolist_free(struct i915_priolist *p) __i915_priolist_free(p); } +void i915_request_show_with_schedule(struct drm_printer *m, + const struct i915_request *rq, + const char *prefix, + int indent); + #endif /* _I915_SCHEDULER_H_ */ diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h index f72e6c397b08..343ed44d5ed4 100644 --- a/drivers/gpu/drm/i915/i915_scheduler_types.h +++ b/drivers/gpu/drm/i915/i915_scheduler_types.h @@ -81,4 +81,14 @@ struct i915_dependency { #define I915_DEPENDENCY_WEAK BIT(2) }; +#define for_each_waiter(p__, rq__) \ + list_for_each_entry_lockless(p__, \ + &(rq__)->sched.waiters_list, \ + wait_link) + +#define for_each_signaler(p__, rq__) \ + list_for_each_entry_rcu(p__, \ + &(rq__)->sched.signalers_list, \ + signal_link) + #endif /* _I915_SCHEDULER_TYPES_H_ */ diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index db2111fc809e..63212df33c9e 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -24,6 +24,7 @@ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ +#include "display/intel_de.h" #include "display/intel_fbc.h" #include "display/intel_gmbus.h" #include "display/intel_vga.h" @@ -39,21 +40,21 @@ static void intel_save_swf(struct drm_i915_private *dev_priv) /* Scratch space */ if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) { for (i = 0; i < 7; i++) { - dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i)); - dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i)); + dev_priv->regfile.saveSWF0[i] = intel_de_read(dev_priv, SWF0(i)); + dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv, SWF1(i)); } for (i = 0; i < 3; i++) - dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i)); + dev_priv->regfile.saveSWF3[i] = intel_de_read(dev_priv, SWF3(i)); } else if (IS_GEN(dev_priv, 2)) { for (i = 0; i < 7; i++) - dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i)); + dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv, SWF1(i)); } else if (HAS_GMCH(dev_priv)) { for (i = 0; i < 16; i++) { - dev_priv->regfile.saveSWF0[i] = I915_READ(SWF0(i)); - dev_priv->regfile.saveSWF1[i] = I915_READ(SWF1(i)); + dev_priv->regfile.saveSWF0[i] = intel_de_read(dev_priv, SWF0(i)); + dev_priv->regfile.saveSWF1[i] = intel_de_read(dev_priv, SWF1(i)); } for (i = 0; i < 3; i++) - dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i)); + dev_priv->regfile.saveSWF3[i] = intel_de_read(dev_priv, SWF3(i)); } } @@ -64,21 +65,21 @@ static void intel_restore_swf(struct drm_i915_private *dev_priv) /* Scratch space */ if (IS_GEN(dev_priv, 2) && IS_MOBILE(dev_priv)) { for (i = 0; i < 7; i++) { - I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]); - I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]); + intel_de_write(dev_priv, SWF0(i), dev_priv->regfile.saveSWF0[i]); + intel_de_write(dev_priv, SWF1(i), dev_priv->regfile.saveSWF1[i]); } for (i = 0; i < 3; i++) - I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]); + intel_de_write(dev_priv, SWF3(i), dev_priv->regfile.saveSWF3[i]); } else if (IS_GEN(dev_priv, 2)) { for (i = 0; i < 7; i++) - I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]); + intel_de_write(dev_priv, SWF1(i), dev_priv->regfile.saveSWF1[i]); } else if (HAS_GMCH(dev_priv)) { for (i = 0; i < 16; i++) { - I915_WRITE(SWF0(i), dev_priv->regfile.saveSWF0[i]); - I915_WRITE(SWF1(i), dev_priv->regfile.saveSWF1[i]); + intel_de_write(dev_priv, SWF0(i), dev_priv->regfile.saveSWF0[i]); + intel_de_write(dev_priv, SWF1(i), dev_priv->regfile.saveSWF1[i]); } for (i = 0; i < 3; i++) - I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]); + intel_de_write(dev_priv, SWF3(i), dev_priv->regfile.saveSWF3[i]); } } @@ -88,7 +89,7 @@ void i915_save_display(struct drm_i915_private *dev_priv) /* Display arbitration control */ if (INTEL_GEN(dev_priv) <= 4) - dev_priv->regfile.saveDSPARB = I915_READ(DSPARB); + dev_priv->regfile.saveDSPARB = intel_de_read(dev_priv, DSPARB); if (IS_GEN(dev_priv, 4)) pci_read_config_word(pdev, GCDGMBUS, @@ -109,7 +110,7 @@ void i915_restore_display(struct drm_i915_private *dev_priv) /* Display arbitration */ if (INTEL_GEN(dev_priv) <= 4) - I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB); + intel_de_write(dev_priv, DSPARB, dev_priv->regfile.saveDSPARB); /* only restore FBC info on the platform that supports FBC*/ intel_fbc_global_disable(dev_priv); diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c index 038d4c6884c5..2744558f3050 100644 --- a/drivers/gpu/drm/i915/i915_sw_fence.c +++ b/drivers/gpu/drm/i915/i915_sw_fence.c @@ -18,10 +18,15 @@ #define I915_SW_FENCE_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr) #endif -#define I915_SW_FENCE_FLAG_ALLOC BIT(3) /* after WQ_FLAG_* for safety */ - static DEFINE_SPINLOCK(i915_sw_fence_lock); +#define WQ_FLAG_BITS \ + BITS_PER_TYPE(typeof_member(struct wait_queue_entry, flags)) + +/* after WQ_FLAG_* for safety */ +#define I915_SW_FENCE_FLAG_FENCE BIT(WQ_FLAG_BITS - 1) +#define I915_SW_FENCE_FLAG_ALLOC BIT(WQ_FLAG_BITS - 2) + enum { DEBUG_FENCE_IDLE = 0, DEBUG_FENCE_NOTIFY, @@ -154,10 +159,10 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence, spin_lock_irqsave_nested(&x->lock, flags, 1 + !!continuation); if (continuation) { list_for_each_entry_safe(pos, next, &x->head, entry) { - if (pos->func == autoremove_wake_function) - pos->func(pos, TASK_NORMAL, 0, continuation); - else + if (pos->flags & I915_SW_FENCE_FLAG_FENCE) list_move_tail(&pos->entry, continuation); + else + pos->func(pos, TASK_NORMAL, 0, continuation); } } else { LIST_HEAD(extra); @@ -166,9 +171,9 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence, list_for_each_entry_safe(pos, next, &x->head, entry) { int wake_flags; - wake_flags = fence->error; - if (pos->func == autoremove_wake_function) - wake_flags = 0; + wake_flags = 0; + if (pos->flags & I915_SW_FENCE_FLAG_FENCE) + wake_flags = fence->error; pos->func(pos, TASK_NORMAL, wake_flags, &extra); } @@ -332,8 +337,8 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, struct i915_sw_fence *signaler, wait_queue_entry_t *wq, gfp_t gfp) { + unsigned int pending; unsigned long flags; - int pending; debug_fence_assert(fence); might_sleep_if(gfpflags_allow_blocking(gfp)); @@ -349,7 +354,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence, if (unlikely(i915_sw_fence_check_if_after(fence, signaler))) return -EINVAL; - pending = 0; + pending = I915_SW_FENCE_FLAG_FENCE; if (!wq) { wq = kmalloc(sizeof(*wq), gfp); if (!wq) { diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c index 4c305d838016..f9e780dee9de 100644 --- a/drivers/gpu/drm/i915/i915_utils.c +++ b/drivers/gpu/drm/i915/i915_utils.c @@ -87,7 +87,7 @@ bool i915_error_injected(void) void cancel_timer(struct timer_list *t) { - if (!READ_ONCE(t->expires)) + if (!timer_active(t)) return; del_timer(t); diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index 54773371e6bd..abd4dcd9f79c 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -438,9 +438,14 @@ static inline void __add_taint_for_CI(unsigned int taint) void cancel_timer(struct timer_list *t); void set_timer_ms(struct timer_list *t, unsigned long timeout); +static inline bool timer_active(const struct timer_list *t) +{ + return READ_ONCE(t->expires); +} + static inline bool timer_expired(const struct timer_list *t) { - return READ_ONCE(t->expires) && !timer_pending(t); + return timer_active(t) && !timer_pending(t); } /* diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index e67cec8fa2aa..f2d5ae59081e 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -104,7 +104,7 @@ void intel_device_info_print_static(const struct intel_device_info *info, drm_printf(p, "ppgtt-type: %d\n", info->ppgtt_type); drm_printf(p, "dma_mask_size: %u\n", info->dma_mask_size); -#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name)); +#define PRINT_FLAG(name) drm_printf(p, "%s: %s\n", #name, yesno(info->name)) DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG); #undef PRINT_FLAG @@ -117,150 +117,6 @@ void intel_device_info_print_runtime(const struct intel_runtime_info *info, struct drm_printer *p) { drm_printf(p, "rawclk rate: %u kHz\n", info->rawclk_freq); - drm_printf(p, "CS timestamp frequency: %u Hz\n", - info->cs_timestamp_frequency_hz); -} - -static u32 read_reference_ts_freq(struct drm_i915_private *dev_priv) -{ - u32 ts_override = intel_uncore_read(&dev_priv->uncore, - GEN9_TIMESTAMP_OVERRIDE); - u32 base_freq, frac_freq; - - base_freq = ((ts_override & GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK) >> - GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_SHIFT) + 1; - base_freq *= 1000000; - - frac_freq = ((ts_override & - GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_MASK) >> - GEN9_TIMESTAMP_OVERRIDE_US_COUNTER_DENOMINATOR_SHIFT); - frac_freq = 1000000 / (frac_freq + 1); - - return base_freq + frac_freq; -} - -static u32 gen10_get_crystal_clock_freq(struct drm_i915_private *dev_priv, - u32 rpm_config_reg) -{ - u32 f19_2_mhz = 19200000; - u32 f24_mhz = 24000000; - u32 crystal_clock = (rpm_config_reg & - GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >> - GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT; - - switch (crystal_clock) { - case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ: - return f19_2_mhz; - case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ: - return f24_mhz; - default: - MISSING_CASE(crystal_clock); - return 0; - } -} - -static u32 gen11_get_crystal_clock_freq(struct drm_i915_private *dev_priv, - u32 rpm_config_reg) -{ - u32 f19_2_mhz = 19200000; - u32 f24_mhz = 24000000; - u32 f25_mhz = 25000000; - u32 f38_4_mhz = 38400000; - u32 crystal_clock = (rpm_config_reg & - GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >> - GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT; - - switch (crystal_clock) { - case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ: - return f24_mhz; - case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ: - return f19_2_mhz; - case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_38_4_MHZ: - return f38_4_mhz; - case GEN11_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_25_MHZ: - return f25_mhz; - default: - MISSING_CASE(crystal_clock); - return 0; - } -} - -static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv) -{ - struct intel_uncore *uncore = &dev_priv->uncore; - u32 f12_5_mhz = 12500000; - u32 f19_2_mhz = 19200000; - u32 f24_mhz = 24000000; - - if (INTEL_GEN(dev_priv) <= 4) { - /* PRMs say: - * - * "The value in this register increments once every 16 - * hclks." (through the “Clocking Configuration” - * (“CLKCFG”) MCHBAR register) - */ - return RUNTIME_INFO(dev_priv)->rawclk_freq * 1000 / 16; - } else if (INTEL_GEN(dev_priv) <= 8) { - /* PRMs say: - * - * "The PCU TSC counts 10ns increments; this timestamp - * reflects bits 38:3 of the TSC (i.e. 80ns granularity, - * rolling over every 1.5 hours). - */ - return f12_5_mhz; - } else if (INTEL_GEN(dev_priv) <= 9) { - u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE); - u32 freq = 0; - - if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) { - freq = read_reference_ts_freq(dev_priv); - } else { - freq = IS_GEN9_LP(dev_priv) ? f19_2_mhz : f24_mhz; - - /* Now figure out how the command stream's timestamp - * register increments from this frequency (it might - * increment only every few clock cycle). - */ - freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >> - CTC_SHIFT_PARAMETER_SHIFT); - } - - return freq; - } else if (INTEL_GEN(dev_priv) <= 12) { - u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE); - u32 freq = 0; - - /* First figure out the reference frequency. There are 2 ways - * we can compute the frequency, either through the - * TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE - * tells us which one we should use. - */ - if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) { - freq = read_reference_ts_freq(dev_priv); - } else { - u32 rpm_config_reg = intel_uncore_read(uncore, RPM_CONFIG0); - - if (INTEL_GEN(dev_priv) <= 10) - freq = gen10_get_crystal_clock_freq(dev_priv, - rpm_config_reg); - else - freq = gen11_get_crystal_clock_freq(dev_priv, - rpm_config_reg); - - /* Now figure out how the command stream's timestamp - * register increments from this frequency (it might - * increment only every few clock cycle). - */ - freq >>= 3 - ((rpm_config_reg & - GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >> - GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT); - } - - return freq; - } - - MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n"); - return 0; } #undef INTEL_VGA_DEVICE @@ -505,19 +361,6 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) runtime->rawclk_freq = intel_read_rawclk(dev_priv); drm_dbg(&dev_priv->drm, "rawclk rate: %d kHz\n", runtime->rawclk_freq); - /* Initialize command stream timestamp frequency */ - runtime->cs_timestamp_frequency_hz = - read_timestamp_frequency(dev_priv); - if (runtime->cs_timestamp_frequency_hz) { - runtime->cs_timestamp_period_ns = - i915_cs_timestamp_ticks_to_ns(dev_priv, 1); - drm_dbg(&dev_priv->drm, - "CS timestamp wraparound in %lldms\n", - div_u64(mul_u32_u32(runtime->cs_timestamp_period_ns, - S32_MAX), - USEC_PER_SEC)); - } - if (!HAS_DISPLAY(dev_priv)) { dev_priv->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC); diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index d92fa041c700..cf2d528c6e9b 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -123,7 +123,6 @@ enum intel_ppgtt_type { func(has_llc); \ func(has_logical_ring_contexts); \ func(has_logical_ring_elsq); \ - func(has_logical_ring_preemption); \ func(has_master_unit_irq); \ func(has_pooled_eu); \ func(has_rc6); \ @@ -224,9 +223,6 @@ struct intel_runtime_info { u8 num_scalers[I915_MAX_PIPES]; u32 rawclk_freq; - - u32 cs_timestamp_frequency_hz; - u32 cs_timestamp_period_ns; }; struct intel_driver_caps { diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c index b326993a1026..1bfcdd89b241 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.c +++ b/drivers/gpu/drm/i915/intel_memory_region.c @@ -10,7 +10,7 @@ #define REGION_MAP(type, inst) \ BIT((type) + INTEL_MEMORY_TYPE_SHIFT) | BIT(inst) -const u32 intel_region_map[] = { +static const u32 intel_region_map[] = { [INTEL_REGION_SMEM] = REGION_MAP(INTEL_MEMORY_SYSTEM, 0), [INTEL_REGION_LMEM] = REGION_MAP(INTEL_MEMORY_LOCAL, 0), [INTEL_REGION_STOLEN] = REGION_MAP(INTEL_MEMORY_STOLEN, 0), diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h index 232490d89a83..6590d55df6cb 100644 --- a/drivers/gpu/drm/i915/intel_memory_region.h +++ b/drivers/gpu/drm/i915/intel_memory_region.h @@ -51,11 +51,6 @@ enum intel_region_id { for (id = 0; id < ARRAY_SIZE((i915)->mm.regions); id++) \ for_each_if((mr) = (i915)->mm.regions[id]) -/** - * Memory regions encoded as type | instance - */ -extern const u32 intel_region_map[]; - struct intel_memory_region_ops { unsigned int flags; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index a20b5051f18c..bbc73df7f753 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -82,24 +82,24 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv) * Must match Sampler, Pixel Back End, and Media. See * WaCompressedResourceSamplerPbeMediaNewHashMode. */ - I915_WRITE(CHICKEN_PAR1_1, - I915_READ(CHICKEN_PAR1_1) | + intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1, + intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | SKL_DE_COMPRESSED_HASH_MODE); } /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */ - I915_WRITE(CHICKEN_PAR1_1, - I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP); + intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1, + intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP); /* WaEnableChickenDCPR:skl,bxt,kbl,glk,cfl */ - I915_WRITE(GEN8_CHICKEN_DCPR_1, - I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM); + intel_uncore_write(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1, + intel_uncore_read(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM); /* * WaFbcWakeMemOn:skl,bxt,kbl,glk,cfl * Display WA #0859: skl,bxt,kbl,glk,cfl */ - I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | + intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) | DISP_FBC_MEMORY_WAKE); } @@ -108,21 +108,21 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv) gen9_init_clock_gating(dev_priv); /* WaDisableSDEUnitClockGating:bxt */ - I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | + intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) | GEN8_SDEUNIT_CLOCK_GATE_DISABLE); /* * FIXME: * GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ applies on 3x6 GT SKUs only. */ - I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | + intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) | GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ); /* * Wa: Backlight PWM may stop in the asserted state, causing backlight * to stay fully on. */ - I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) | + intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_0, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_0) | PWM1_GATING_DIS | PWM2_GATING_DIS); /* @@ -131,20 +131,20 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv) * is off and a MMIO access is attempted by any privilege * application, using batch buffers or any other means. */ - I915_WRITE(RM_TIMEOUT, MMIO_TIMEOUT_US(950)); + intel_uncore_write(&dev_priv->uncore, RM_TIMEOUT, MMIO_TIMEOUT_US(950)); /* * WaFbcTurnOffFbcWatermark:bxt * Display WA #0562: bxt */ - I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | + intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) | DISP_FBC_WM_DIS); /* * WaFbcHighMemBwCorruptionAvoidance:bxt * Display WA #0883: bxt */ - I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | + intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | ILK_DPFC_DISABLE_DUMMY0); } @@ -157,7 +157,7 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv) * Backlight PWM may stop in the asserted state, causing backlight * to stay fully on. */ - I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) | + intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_0, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_0) | PWM1_GATING_DIS | PWM2_GATING_DIS); } @@ -165,7 +165,7 @@ static void pnv_get_mem_freq(struct drm_i915_private *dev_priv) { u32 tmp; - tmp = I915_READ(CLKCFG); + tmp = intel_uncore_read(&dev_priv->uncore, CLKCFG); switch (tmp & CLKCFG_FSB_MASK) { case CLKCFG_FSB_533: @@ -195,7 +195,7 @@ static void pnv_get_mem_freq(struct drm_i915_private *dev_priv) } /* detect pineview DDR3 setting */ - tmp = I915_READ(CSHRDDR3CTL); + tmp = intel_uncore_read(&dev_priv->uncore, CSHRDDR3CTL); dev_priv->is_ddr3 = (tmp & CSHRDDR3CTL_DDR3) ? 1 : 0; } @@ -366,39 +366,39 @@ static bool _intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enabl u32 val; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - was_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; - I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); - POSTING_READ(FW_BLC_SELF_VLV); + was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; + intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); + intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF_VLV); } else if (IS_G4X(dev_priv) || IS_I965GM(dev_priv)) { - was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; - I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); - POSTING_READ(FW_BLC_SELF); + was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN; + intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); + intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF); } else if (IS_PINEVIEW(dev_priv)) { - val = I915_READ(DSPFW3); + val = intel_uncore_read(&dev_priv->uncore, DSPFW3); was_enabled = val & PINEVIEW_SELF_REFRESH_EN; if (enable) val |= PINEVIEW_SELF_REFRESH_EN; else val &= ~PINEVIEW_SELF_REFRESH_EN; - I915_WRITE(DSPFW3, val); - POSTING_READ(DSPFW3); + intel_uncore_write(&dev_priv->uncore, DSPFW3, val); + intel_uncore_posting_read(&dev_priv->uncore, DSPFW3); } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) { - was_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; + was_enabled = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN; val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) : _MASKED_BIT_DISABLE(FW_BLC_SELF_EN); - I915_WRITE(FW_BLC_SELF, val); - POSTING_READ(FW_BLC_SELF); + intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, val); + intel_uncore_posting_read(&dev_priv->uncore, FW_BLC_SELF); } else if (IS_I915GM(dev_priv)) { /* * FIXME can't find a bit like this for 915G, and * and yet it does have the related watermark in * FW_BLC_SELF. What's going on? */ - was_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN; + was_enabled = intel_uncore_read(&dev_priv->uncore, INSTPM) & INSTPM_SELF_EN; val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) : _MASKED_BIT_DISABLE(INSTPM_SELF_EN); - I915_WRITE(INSTPM, val); - POSTING_READ(INSTPM); + intel_uncore_write(&dev_priv->uncore, INSTPM, val); + intel_uncore_posting_read(&dev_priv->uncore, INSTPM); } else { return false; } @@ -494,20 +494,20 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state) switch (pipe) { case PIPE_A: - dsparb = I915_READ(DSPARB); - dsparb2 = I915_READ(DSPARB2); + dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); + dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2); sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0); sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4); break; case PIPE_B: - dsparb = I915_READ(DSPARB); - dsparb2 = I915_READ(DSPARB2); + dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); + dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2); sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8); sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12); break; case PIPE_C: - dsparb2 = I915_READ(DSPARB2); - dsparb3 = I915_READ(DSPARB3); + dsparb2 = intel_uncore_read(&dev_priv->uncore, DSPARB2); + dsparb3 = intel_uncore_read(&dev_priv->uncore, DSPARB3); sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16); sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20); break; @@ -525,7 +525,7 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state) static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, enum i9xx_plane_id i9xx_plane) { - u32 dsparb = I915_READ(DSPARB); + u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); int size; size = dsparb & 0x7f; @@ -541,7 +541,7 @@ static int i9xx_get_fifo_size(struct drm_i915_private *dev_priv, static int i830_get_fifo_size(struct drm_i915_private *dev_priv, enum i9xx_plane_id i9xx_plane) { - u32 dsparb = I915_READ(DSPARB); + u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); int size; size = dsparb & 0x1ff; @@ -558,7 +558,7 @@ static int i830_get_fifo_size(struct drm_i915_private *dev_priv, static int i845_get_fifo_size(struct drm_i915_private *dev_priv, enum i9xx_plane_id i9xx_plane) { - u32 dsparb = I915_READ(DSPARB); + u32 dsparb = intel_uncore_read(&dev_priv->uncore, DSPARB); int size; size = dsparb & 0x7f; @@ -911,38 +911,38 @@ static void pnv_update_wm(struct intel_crtc *unused_crtc) wm = intel_calculate_wm(clock, &pnv_display_wm, pnv_display_wm.fifo_size, cpp, latency->display_sr); - reg = I915_READ(DSPFW1); + reg = intel_uncore_read(&dev_priv->uncore, DSPFW1); reg &= ~DSPFW_SR_MASK; reg |= FW_WM(wm, SR); - I915_WRITE(DSPFW1, reg); + intel_uncore_write(&dev_priv->uncore, DSPFW1, reg); drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg); /* cursor SR */ wm = intel_calculate_wm(clock, &pnv_cursor_wm, pnv_display_wm.fifo_size, 4, latency->cursor_sr); - reg = I915_READ(DSPFW3); + reg = intel_uncore_read(&dev_priv->uncore, DSPFW3); reg &= ~DSPFW_CURSOR_SR_MASK; reg |= FW_WM(wm, CURSOR_SR); - I915_WRITE(DSPFW3, reg); + intel_uncore_write(&dev_priv->uncore, DSPFW3, reg); /* Display HPLL off SR */ wm = intel_calculate_wm(clock, &pnv_display_hplloff_wm, pnv_display_hplloff_wm.fifo_size, cpp, latency->display_hpll_disable); - reg = I915_READ(DSPFW3); + reg = intel_uncore_read(&dev_priv->uncore, DSPFW3); reg &= ~DSPFW_HPLL_SR_MASK; reg |= FW_WM(wm, HPLL_SR); - I915_WRITE(DSPFW3, reg); + intel_uncore_write(&dev_priv->uncore, DSPFW3, reg); /* cursor HPLL off SR */ wm = intel_calculate_wm(clock, &pnv_cursor_hplloff_wm, pnv_display_hplloff_wm.fifo_size, 4, latency->cursor_hpll_disable); - reg = I915_READ(DSPFW3); + reg = intel_uncore_read(&dev_priv->uncore, DSPFW3); reg &= ~DSPFW_HPLL_CURSOR_MASK; reg |= FW_WM(wm, HPLL_CURSOR); - I915_WRITE(DSPFW3, reg); + intel_uncore_write(&dev_priv->uncore, DSPFW3, reg); drm_dbg_kms(&dev_priv->drm, "DSPFW3 register is %x\n", reg); intel_set_memory_cxsr(dev_priv, true); @@ -976,25 +976,25 @@ static void g4x_write_wm_values(struct drm_i915_private *dev_priv, for_each_pipe(dev_priv, pipe) trace_g4x_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm); - I915_WRITE(DSPFW1, + intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(wm->sr.plane, SR) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA)); - I915_WRITE(DSPFW2, + intel_uncore_write(&dev_priv->uncore, DSPFW2, (wm->fbc_en ? DSPFW_FBC_SR_EN : 0) | FW_WM(wm->sr.fbc, FBC_SR) | FW_WM(wm->hpll.fbc, FBC_HPLL_SR) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEB) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA)); - I915_WRITE(DSPFW3, + intel_uncore_write(&dev_priv->uncore, DSPFW3, (wm->hpll_en ? DSPFW_HPLL_SR_EN : 0) | FW_WM(wm->sr.cursor, CURSOR_SR) | FW_WM(wm->hpll.cursor, HPLL_CURSOR) | FW_WM(wm->hpll.plane, HPLL_SR)); - POSTING_READ(DSPFW1); + intel_uncore_posting_read(&dev_priv->uncore, DSPFW1); } #define FW_WM_VLV(value, plane) \ @@ -1008,7 +1008,7 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv, for_each_pipe(dev_priv, pipe) { trace_vlv_wm(intel_get_crtc_for_pipe(dev_priv, pipe), wm); - I915_WRITE(VLV_DDL(pipe), + intel_uncore_write(&dev_priv->uncore, VLV_DDL(pipe), (wm->ddl[pipe].plane[PLANE_CURSOR] << DDL_CURSOR_SHIFT) | (wm->ddl[pipe].plane[PLANE_SPRITE1] << DDL_SPRITE_SHIFT(1)) | (wm->ddl[pipe].plane[PLANE_SPRITE0] << DDL_SPRITE_SHIFT(0)) | @@ -1020,35 +1020,35 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv, * high order bits so that there are no out of bounds values * present in the registers during the reprogramming. */ - I915_WRITE(DSPHOWM, 0); - I915_WRITE(DSPHOWM1, 0); - I915_WRITE(DSPFW4, 0); - I915_WRITE(DSPFW5, 0); - I915_WRITE(DSPFW6, 0); + intel_uncore_write(&dev_priv->uncore, DSPHOWM, 0); + intel_uncore_write(&dev_priv->uncore, DSPHOWM1, 0); + intel_uncore_write(&dev_priv->uncore, DSPFW4, 0); + intel_uncore_write(&dev_priv->uncore, DSPFW5, 0); + intel_uncore_write(&dev_priv->uncore, DSPFW6, 0); - I915_WRITE(DSPFW1, + intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(wm->sr.plane, SR) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_CURSOR], CURSORB) | FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_PRIMARY], PLANEB) | FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_PRIMARY], PLANEA)); - I915_WRITE(DSPFW2, + intel_uncore_write(&dev_priv->uncore, DSPFW2, FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE1], SPRITEB) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_CURSOR], CURSORA) | FW_WM_VLV(wm->pipe[PIPE_A].plane[PLANE_SPRITE0], SPRITEA)); - I915_WRITE(DSPFW3, + intel_uncore_write(&dev_priv->uncore, DSPFW3, FW_WM(wm->sr.cursor, CURSOR_SR)); if (IS_CHERRYVIEW(dev_priv)) { - I915_WRITE(DSPFW7_CHV, + intel_uncore_write(&dev_priv->uncore, DSPFW7_CHV, FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) | FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC)); - I915_WRITE(DSPFW8_CHV, + intel_uncore_write(&dev_priv->uncore, DSPFW8_CHV, FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE1], SPRITEF) | FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_SPRITE0], SPRITEE)); - I915_WRITE(DSPFW9_CHV, + intel_uncore_write(&dev_priv->uncore, DSPFW9_CHV, FW_WM_VLV(wm->pipe[PIPE_C].plane[PLANE_PRIMARY], PLANEC) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_CURSOR], CURSORC)); - I915_WRITE(DSPHOWM, + intel_uncore_write(&dev_priv->uncore, DSPHOWM, FW_WM(wm->sr.plane >> 9, SR_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE1] >> 8, SPRITEF_HI) | FW_WM(wm->pipe[PIPE_C].plane[PLANE_SPRITE0] >> 8, SPRITEE_HI) | @@ -1060,10 +1060,10 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv, FW_WM(wm->pipe[PIPE_A].plane[PLANE_SPRITE0] >> 8, SPRITEA_HI) | FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI)); } else { - I915_WRITE(DSPFW7, + intel_uncore_write(&dev_priv->uncore, DSPFW7, FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE1], SPRITED) | FW_WM_VLV(wm->pipe[PIPE_B].plane[PLANE_SPRITE0], SPRITEC)); - I915_WRITE(DSPHOWM, + intel_uncore_write(&dev_priv->uncore, DSPHOWM, FW_WM(wm->sr.plane >> 9, SR_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE1] >> 8, SPRITED_HI) | FW_WM(wm->pipe[PIPE_B].plane[PLANE_SPRITE0] >> 8, SPRITEC_HI) | @@ -1073,7 +1073,7 @@ static void vlv_write_wm_values(struct drm_i915_private *dev_priv, FW_WM(wm->pipe[PIPE_A].plane[PLANE_PRIMARY] >> 8, PLANEA_HI)); } - POSTING_READ(DSPFW1); + intel_uncore_posting_read(&dev_priv->uncore, DSPFW1); } #undef FW_WM_VLV @@ -2310,14 +2310,14 @@ static void i965_update_wm(struct intel_crtc *unused_crtc) srwm); /* 965 has limitations... */ - I915_WRITE(DSPFW1, FW_WM(srwm, SR) | + intel_uncore_write(&dev_priv->uncore, DSPFW1, FW_WM(srwm, SR) | FW_WM(8, CURSORB) | FW_WM(8, PLANEB) | FW_WM(8, PLANEA)); - I915_WRITE(DSPFW2, FW_WM(8, CURSORA) | + intel_uncore_write(&dev_priv->uncore, DSPFW2, FW_WM(8, CURSORA) | FW_WM(8, PLANEC_OLD)); /* update cursor SR watermark */ - I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR)); + intel_uncore_write(&dev_priv->uncore, DSPFW3, FW_WM(cursor_sr, CURSOR_SR)); if (cxsr_enabled) intel_set_memory_cxsr(dev_priv, true); @@ -2447,10 +2447,10 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc) srwm = 1; if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) - I915_WRITE(FW_BLC_SELF, + intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); else - I915_WRITE(FW_BLC_SELF, srwm & 0x3f); + intel_uncore_write(&dev_priv->uncore, FW_BLC_SELF, srwm & 0x3f); } drm_dbg_kms(&dev_priv->drm, @@ -2464,8 +2464,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc) fwater_lo = fwater_lo | (1 << 24) | (1 << 8); fwater_hi = fwater_hi | (1 << 8); - I915_WRITE(FW_BLC, fwater_lo); - I915_WRITE(FW_BLC2, fwater_hi); + intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo); + intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi); if (enabled) intel_set_memory_cxsr(dev_priv, true); @@ -2488,13 +2488,13 @@ static void i845_update_wm(struct intel_crtc *unused_crtc) &i845_wm_info, dev_priv->display.get_fifo_size(dev_priv, PLANE_A), 4, pessimal_latency_ns); - fwater_lo = I915_READ(FW_BLC) & ~0xfff; + fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff; fwater_lo |= (3<<8) | planea_wm; drm_dbg_kms(&dev_priv->drm, "Setting FIFO watermarks - A: %d\n", planea_wm); - I915_WRITE(FW_BLC, fwater_lo); + intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo); } /* latency must be in 0.1us units. */ @@ -3534,17 +3534,17 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv, if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] & WM1_LP_SR_EN) { previous->wm_lp[2] &= ~WM1_LP_SR_EN; - I915_WRITE(WM3_LP_ILK, previous->wm_lp[2]); + intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, previous->wm_lp[2]); changed = true; } if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] & WM1_LP_SR_EN) { previous->wm_lp[1] &= ~WM1_LP_SR_EN; - I915_WRITE(WM2_LP_ILK, previous->wm_lp[1]); + intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, previous->wm_lp[1]); changed = true; } if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] & WM1_LP_SR_EN) { previous->wm_lp[0] &= ~WM1_LP_SR_EN; - I915_WRITE(WM1_LP_ILK, previous->wm_lp[0]); + intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, previous->wm_lp[0]); changed = true; } @@ -3574,56 +3574,56 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv, _ilk_disable_lp_wm(dev_priv, dirty); if (dirty & WM_DIRTY_PIPE(PIPE_A)) - I915_WRITE(WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]); + intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_A), results->wm_pipe[0]); if (dirty & WM_DIRTY_PIPE(PIPE_B)) - I915_WRITE(WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]); + intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_B), results->wm_pipe[1]); if (dirty & WM_DIRTY_PIPE(PIPE_C)) - I915_WRITE(WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]); + intel_uncore_write(&dev_priv->uncore, WM0_PIPE_ILK(PIPE_C), results->wm_pipe[2]); if (dirty & WM_DIRTY_DDB) { if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { - val = I915_READ(WM_MISC); + val = intel_uncore_read(&dev_priv->uncore, WM_MISC); if (results->partitioning == INTEL_DDB_PART_1_2) val &= ~WM_MISC_DATA_PARTITION_5_6; else val |= WM_MISC_DATA_PARTITION_5_6; - I915_WRITE(WM_MISC, val); + intel_uncore_write(&dev_priv->uncore, WM_MISC, val); } else { - val = I915_READ(DISP_ARB_CTL2); + val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2); if (results->partitioning == INTEL_DDB_PART_1_2) val &= ~DISP_DATA_PARTITION_5_6; else val |= DISP_DATA_PARTITION_5_6; - I915_WRITE(DISP_ARB_CTL2, val); + intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL2, val); } } if (dirty & WM_DIRTY_FBC) { - val = I915_READ(DISP_ARB_CTL); + val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL); if (results->enable_fbc_wm) val &= ~DISP_FBC_WM_DIS; else val |= DISP_FBC_WM_DIS; - I915_WRITE(DISP_ARB_CTL, val); + intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, val); } if (dirty & WM_DIRTY_LP(1) && previous->wm_lp_spr[0] != results->wm_lp_spr[0]) - I915_WRITE(WM1S_LP_ILK, results->wm_lp_spr[0]); + intel_uncore_write(&dev_priv->uncore, WM1S_LP_ILK, results->wm_lp_spr[0]); if (INTEL_GEN(dev_priv) >= 7) { if (dirty & WM_DIRTY_LP(2) && previous->wm_lp_spr[1] != results->wm_lp_spr[1]) - I915_WRITE(WM2S_LP_IVB, results->wm_lp_spr[1]); + intel_uncore_write(&dev_priv->uncore, WM2S_LP_IVB, results->wm_lp_spr[1]); if (dirty & WM_DIRTY_LP(3) && previous->wm_lp_spr[2] != results->wm_lp_spr[2]) - I915_WRITE(WM3S_LP_IVB, results->wm_lp_spr[2]); + intel_uncore_write(&dev_priv->uncore, WM3S_LP_IVB, results->wm_lp_spr[2]); } if (dirty & WM_DIRTY_LP(1) && previous->wm_lp[0] != results->wm_lp[0]) - I915_WRITE(WM1_LP_ILK, results->wm_lp[0]); + intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, results->wm_lp[0]); if (dirty & WM_DIRTY_LP(2) && previous->wm_lp[1] != results->wm_lp[1]) - I915_WRITE(WM2_LP_ILK, results->wm_lp[1]); + intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, results->wm_lp[1]); if (dirty & WM_DIRTY_LP(3) && previous->wm_lp[2] != results->wm_lp[2]) - I915_WRITE(WM3_LP_ILK, results->wm_lp[2]); + intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, results->wm_lp[2]); dev_priv->wm.hw = *results; } @@ -3640,7 +3640,7 @@ u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv) u8 enabled_slices_mask = 0; for (i = 0; i < max_slices; i++) { - if (I915_READ(DBUF_CTL_S(i)) & DBUF_POWER_STATE) + if (intel_uncore_read(&dev_priv->uncore, DBUF_CTL_S(i)) & DBUF_POWER_STATE) enabled_slices_mask |= BIT(i); } @@ -4300,12 +4300,12 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv, /* Cursor doesn't support NV12/planar, so no extra calculation needed */ if (plane_id == PLANE_CURSOR) { - val = I915_READ(CUR_BUF_CFG(pipe)); + val = intel_uncore_read(&dev_priv->uncore, CUR_BUF_CFG(pipe)); skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val); return; } - val = I915_READ(PLANE_CTL(pipe, plane_id)); + val = intel_uncore_read(&dev_priv->uncore, PLANE_CTL(pipe, plane_id)); /* No DDB allocated for disabled planes */ if (val & PLANE_CTL_ENABLE) @@ -4314,11 +4314,11 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv, val & PLANE_CTL_ALPHA_MASK); if (INTEL_GEN(dev_priv) >= 11) { - val = I915_READ(PLANE_BUF_CFG(pipe, plane_id)); + val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id)); skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val); } else { - val = I915_READ(PLANE_BUF_CFG(pipe, plane_id)); - val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id)); + val = intel_uncore_read(&dev_priv->uncore, PLANE_BUF_CFG(pipe, plane_id)); + val2 = intel_uncore_read(&dev_priv->uncore, PLANE_NV12_BUF_CFG(pipe, plane_id)); if (fourcc && drm_format_info_is_yuv_semiplanar(drm_format_info(fourcc))) @@ -6231,9 +6231,9 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, for (level = 0; level <= max_level; level++) { if (plane_id != PLANE_CURSOR) - val = I915_READ(PLANE_WM(pipe, plane_id, level)); + val = intel_uncore_read(&dev_priv->uncore, PLANE_WM(pipe, plane_id, level)); else - val = I915_READ(CUR_WM(pipe, level)); + val = intel_uncore_read(&dev_priv->uncore, CUR_WM(pipe, level)); skl_wm_level_from_reg_val(val, &wm->wm[level]); } @@ -6242,9 +6242,9 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc, wm->sagv_wm0 = wm->wm[0]; if (plane_id != PLANE_CURSOR) - val = I915_READ(PLANE_WM_TRANS(pipe, plane_id)); + val = intel_uncore_read(&dev_priv->uncore, PLANE_WM_TRANS(pipe, plane_id)); else - val = I915_READ(CUR_WM_TRANS(pipe)); + val = intel_uncore_read(&dev_priv->uncore, CUR_WM_TRANS(pipe)); skl_wm_level_from_reg_val(val, &wm->trans_wm); } @@ -6280,7 +6280,7 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc) struct intel_pipe_wm *active = &crtc_state->wm.ilk.optimal; enum pipe pipe = crtc->pipe; - hw->wm_pipe[pipe] = I915_READ(WM0_PIPE_ILK(pipe)); + hw->wm_pipe[pipe] = intel_uncore_read(&dev_priv->uncore, WM0_PIPE_ILK(pipe)); memset(active, 0, sizeof(*active)); @@ -6324,13 +6324,13 @@ static void g4x_read_wm_values(struct drm_i915_private *dev_priv, { u32 tmp; - tmp = I915_READ(DSPFW1); + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1); wm->sr.plane = _FW_WM(tmp, SR); wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB); wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEB); wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM(tmp, PLANEA); - tmp = I915_READ(DSPFW2); + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2); wm->fbc_en = tmp & DSPFW_FBC_SR_EN; wm->sr.fbc = _FW_WM(tmp, FBC_SR); wm->hpll.fbc = _FW_WM(tmp, FBC_HPLL_SR); @@ -6338,7 +6338,7 @@ static void g4x_read_wm_values(struct drm_i915_private *dev_priv, wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA); wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM(tmp, SPRITEA); - tmp = I915_READ(DSPFW3); + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3); wm->hpll_en = tmp & DSPFW_HPLL_SR_EN; wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); wm->hpll.cursor = _FW_WM(tmp, HPLL_CURSOR); @@ -6352,7 +6352,7 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv, u32 tmp; for_each_pipe(dev_priv, pipe) { - tmp = I915_READ(VLV_DDL(pipe)); + tmp = intel_uncore_read(&dev_priv->uncore, VLV_DDL(pipe)); wm->ddl[pipe].plane[PLANE_PRIMARY] = (tmp >> DDL_PLANE_SHIFT) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); @@ -6364,34 +6364,34 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv, (tmp >> DDL_SPRITE_SHIFT(1)) & (DDL_PRECISION_HIGH | DRAIN_LATENCY_MASK); } - tmp = I915_READ(DSPFW1); + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW1); wm->sr.plane = _FW_WM(tmp, SR); wm->pipe[PIPE_B].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORB); wm->pipe[PIPE_B].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEB); wm->pipe[PIPE_A].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEA); - tmp = I915_READ(DSPFW2); + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW2); wm->pipe[PIPE_A].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEB); wm->pipe[PIPE_A].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORA); wm->pipe[PIPE_A].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEA); - tmp = I915_READ(DSPFW3); + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW3); wm->sr.cursor = _FW_WM(tmp, CURSOR_SR); if (IS_CHERRYVIEW(dev_priv)) { - tmp = I915_READ(DSPFW7_CHV); + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7_CHV); wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED); wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC); - tmp = I915_READ(DSPFW8_CHV); + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW8_CHV); wm->pipe[PIPE_C].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITEF); wm->pipe[PIPE_C].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEE); - tmp = I915_READ(DSPFW9_CHV); + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW9_CHV); wm->pipe[PIPE_C].plane[PLANE_PRIMARY] = _FW_WM_VLV(tmp, PLANEC); wm->pipe[PIPE_C].plane[PLANE_CURSOR] = _FW_WM(tmp, CURSORC); - tmp = I915_READ(DSPHOWM); + tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM); wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; wm->pipe[PIPE_C].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITEF_HI) << 8; wm->pipe[PIPE_C].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEE_HI) << 8; @@ -6403,11 +6403,11 @@ static void vlv_read_wm_values(struct drm_i915_private *dev_priv, wm->pipe[PIPE_A].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEA_HI) << 8; wm->pipe[PIPE_A].plane[PLANE_PRIMARY] |= _FW_WM(tmp, PLANEA_HI) << 8; } else { - tmp = I915_READ(DSPFW7); + tmp = intel_uncore_read(&dev_priv->uncore, DSPFW7); wm->pipe[PIPE_B].plane[PLANE_SPRITE1] = _FW_WM_VLV(tmp, SPRITED); wm->pipe[PIPE_B].plane[PLANE_SPRITE0] = _FW_WM_VLV(tmp, SPRITEC); - tmp = I915_READ(DSPHOWM); + tmp = intel_uncore_read(&dev_priv->uncore, DSPHOWM); wm->sr.plane |= _FW_WM(tmp, SR_HI) << 9; wm->pipe[PIPE_B].plane[PLANE_SPRITE1] |= _FW_WM(tmp, SPRITED_HI) << 8; wm->pipe[PIPE_B].plane[PLANE_SPRITE0] |= _FW_WM(tmp, SPRITEC_HI) << 8; @@ -6428,7 +6428,7 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv) g4x_read_wm_values(dev_priv, wm); - wm->cxsr = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; + wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF) & FW_BLC_SELF_EN; for_each_intel_crtc(&dev_priv->drm, crtc) { struct intel_crtc_state *crtc_state = @@ -6572,7 +6572,7 @@ void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv) vlv_read_wm_values(dev_priv, wm); - wm->cxsr = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; + wm->cxsr = intel_uncore_read(&dev_priv->uncore, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN; wm->level = VLV_WM_LEVEL_PM2; if (IS_CHERRYVIEW(dev_priv)) { @@ -6719,9 +6719,9 @@ void vlv_wm_sanitize(struct drm_i915_private *dev_priv) */ static void ilk_init_lp_watermarks(struct drm_i915_private *dev_priv) { - I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN); - I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN); - I915_WRITE(WM1_LP_ILK, I915_READ(WM1_LP_ILK) & ~WM1_LP_SR_EN); + intel_uncore_write(&dev_priv->uncore, WM3_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK) & ~WM1_LP_SR_EN); + intel_uncore_write(&dev_priv->uncore, WM2_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK) & ~WM1_LP_SR_EN); + intel_uncore_write(&dev_priv->uncore, WM1_LP_ILK, intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK) & ~WM1_LP_SR_EN); /* * Don't touch WM1S_LP_EN here. @@ -6739,25 +6739,25 @@ void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv) for_each_intel_crtc(&dev_priv->drm, crtc) ilk_pipe_wm_get_hw_state(crtc); - hw->wm_lp[0] = I915_READ(WM1_LP_ILK); - hw->wm_lp[1] = I915_READ(WM2_LP_ILK); - hw->wm_lp[2] = I915_READ(WM3_LP_ILK); + hw->wm_lp[0] = intel_uncore_read(&dev_priv->uncore, WM1_LP_ILK); + hw->wm_lp[1] = intel_uncore_read(&dev_priv->uncore, WM2_LP_ILK); + hw->wm_lp[2] = intel_uncore_read(&dev_priv->uncore, WM3_LP_ILK); - hw->wm_lp_spr[0] = I915_READ(WM1S_LP_ILK); + hw->wm_lp_spr[0] = intel_uncore_read(&dev_priv->uncore, WM1S_LP_ILK); if (INTEL_GEN(dev_priv) >= 7) { - hw->wm_lp_spr[1] = I915_READ(WM2S_LP_IVB); - hw->wm_lp_spr[2] = I915_READ(WM3S_LP_IVB); + hw->wm_lp_spr[1] = intel_uncore_read(&dev_priv->uncore, WM2S_LP_IVB); + hw->wm_lp_spr[2] = intel_uncore_read(&dev_priv->uncore, WM3S_LP_IVB); } if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - hw->partitioning = (I915_READ(WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? + hw->partitioning = (intel_uncore_read(&dev_priv->uncore, WM_MISC) & WM_MISC_DATA_PARTITION_5_6) ? INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; else if (IS_IVYBRIDGE(dev_priv)) - hw->partitioning = (I915_READ(DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ? + hw->partitioning = (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2) & DISP_DATA_PARTITION_5_6) ? INTEL_DDB_PART_5_6 : INTEL_DDB_PART_1_2; hw->enable_fbc_wm = - !(I915_READ(DISP_ARB_CTL) & DISP_FBC_WM_DIS); + !(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS); } /** @@ -6808,14 +6808,14 @@ void intel_enable_ipc(struct drm_i915_private *dev_priv) if (!HAS_IPC(dev_priv)) return; - val = I915_READ(DISP_ARB_CTL2); + val = intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL2); if (dev_priv->ipc_enabled) val |= DISP_IPC_ENABLE; else val &= ~DISP_IPC_ENABLE; - I915_WRITE(DISP_ARB_CTL2, val); + intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL2, val); } static bool intel_can_enable_ipc(struct drm_i915_private *dev_priv) @@ -6850,7 +6850,7 @@ static void ibx_init_clock_gating(struct drm_i915_private *dev_priv) * gating for the panel power sequencer or it will fail to * start up when no ports are active. */ - I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); + intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); } static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv) @@ -6858,12 +6858,12 @@ static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv) enum pipe pipe; for_each_pipe(dev_priv, pipe) { - I915_WRITE(DSPCNTR(pipe), - I915_READ(DSPCNTR(pipe)) | + intel_uncore_write(&dev_priv->uncore, DSPCNTR(pipe), + intel_uncore_read(&dev_priv->uncore, DSPCNTR(pipe)) | DISPPLANE_TRICKLE_FEED_DISABLE); - I915_WRITE(DSPSURF(pipe), I915_READ(DSPSURF(pipe))); - POSTING_READ(DSPSURF(pipe)); + intel_uncore_write(&dev_priv->uncore, DSPSURF(pipe), intel_uncore_read(&dev_priv->uncore, DSPSURF(pipe))); + intel_uncore_posting_read(&dev_priv->uncore, DSPSURF(pipe)); } } @@ -6879,10 +6879,10 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv) ILK_DPFCUNIT_CLOCK_GATE_DISABLE | ILK_DPFDUNIT_CLOCK_GATE_ENABLE; - I915_WRITE(PCH_3DCGDIS0, + intel_uncore_write(&dev_priv->uncore, PCH_3DCGDIS0, MARIUNIT_CLOCK_GATE_DISABLE | SVSMUNIT_CLOCK_GATE_DISABLE); - I915_WRITE(PCH_3DCGDIS1, + intel_uncore_write(&dev_priv->uncore, PCH_3DCGDIS1, VFMUNIT_CLOCK_GATE_DISABLE); /* @@ -6892,12 +6892,12 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv) * The bit 5 of 0x42020 * The bit 15 of 0x45000 */ - I915_WRITE(ILK_DISPLAY_CHICKEN2, - (I915_READ(ILK_DISPLAY_CHICKEN2) | + intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2, + (intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) | ILK_DPARB_GATE | ILK_VSDPFD_FULL)); dspclk_gate |= ILK_DPARBUNIT_CLOCK_GATE_ENABLE; - I915_WRITE(DISP_ARB_CTL, - (I915_READ(DISP_ARB_CTL) | + intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, + (intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) | DISP_FBC_WM_DIS)); /* @@ -6909,18 +6909,18 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv) */ if (IS_IRONLAKE_M(dev_priv)) { /* WaFbcAsynchFlipDisableFbcQueue:ilk */ - I915_WRITE(ILK_DISPLAY_CHICKEN1, - I915_READ(ILK_DISPLAY_CHICKEN1) | + intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1, + intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) | ILK_FBCQ_DIS); - I915_WRITE(ILK_DISPLAY_CHICKEN2, - I915_READ(ILK_DISPLAY_CHICKEN2) | + intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2, + intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) | ILK_DPARB_GATE); } - I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); + intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, dspclk_gate); - I915_WRITE(ILK_DISPLAY_CHICKEN2, - I915_READ(ILK_DISPLAY_CHICKEN2) | + intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2, + intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) | ILK_ELPIN_409_SELECT); g4x_disable_trickle_feed(dev_priv); @@ -6938,27 +6938,27 @@ static void cpt_init_clock_gating(struct drm_i915_private *dev_priv) * gating for the panel power sequencer or it will fail to * start up when no ports are active. */ - I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE | + intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE | PCH_DPLUNIT_CLOCK_GATE_DISABLE | PCH_CPUNIT_CLOCK_GATE_DISABLE); - I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | + intel_uncore_write(&dev_priv->uncore, SOUTH_CHICKEN2, intel_uncore_read(&dev_priv->uncore, SOUTH_CHICKEN2) | DPLS_EDP_PPS_FIX_DIS); /* The below fixes the weird display corruption, a few pixels shifted * downward, on (only) LVDS of some HP laptops with IVY. */ for_each_pipe(dev_priv, pipe) { - val = I915_READ(TRANS_CHICKEN2(pipe)); + val = intel_uncore_read(&dev_priv->uncore, TRANS_CHICKEN2(pipe)); val |= TRANS_CHICKEN2_TIMING_OVERRIDE; val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED; if (dev_priv->vbt.fdi_rx_polarity_inverted) val |= TRANS_CHICKEN2_FDI_POLARITY_REVERSED; val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER; val &= ~TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH; - I915_WRITE(TRANS_CHICKEN2(pipe), val); + intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN2(pipe), val); } /* WADP0ClockGatingDisable */ for_each_pipe(dev_priv, pipe) { - I915_WRITE(TRANS_CHICKEN1(pipe), + intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN1(pipe), TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); } } @@ -6967,7 +6967,7 @@ static void gen6_check_mch_setup(struct drm_i915_private *dev_priv) { u32 tmp; - tmp = I915_READ(MCH_SSKPD); + tmp = intel_uncore_read(&dev_priv->uncore, MCH_SSKPD); if ((tmp & MCH_SSKPD_WM0_MASK) != MCH_SSKPD_WM0_VAL) drm_dbg_kms(&dev_priv->drm, "Wrong MCH_SSKPD value: 0x%08x This can cause underruns.\n", @@ -6978,14 +6978,14 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv) { u32 dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; - I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); + intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, dspclk_gate); - I915_WRITE(ILK_DISPLAY_CHICKEN2, - I915_READ(ILK_DISPLAY_CHICKEN2) | + intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2, + intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) | ILK_ELPIN_409_SELECT); - I915_WRITE(GEN6_UCGCTL1, - I915_READ(GEN6_UCGCTL1) | + intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, + intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) | GEN6_BLBUNIT_CLOCK_GATE_DISABLE | GEN6_CSUNIT_CLOCK_GATE_DISABLE); @@ -7002,7 +7002,7 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv) * WaDisableRCCUnitClockGating:snb * WaDisableRCPBUnitClockGating:snb */ - I915_WRITE(GEN6_UCGCTL2, + intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2, GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | GEN6_RCCUNIT_CLOCK_GATE_DISABLE); @@ -7017,14 +7017,14 @@ static void gen6_init_clock_gating(struct drm_i915_private *dev_priv) * * WaFbcAsynchFlipDisableFbcQueue:snb */ - I915_WRITE(ILK_DISPLAY_CHICKEN1, - I915_READ(ILK_DISPLAY_CHICKEN1) | + intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1, + intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) | ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); - I915_WRITE(ILK_DISPLAY_CHICKEN2, - I915_READ(ILK_DISPLAY_CHICKEN2) | + intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2, + intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN2) | ILK_DPARB_GATE | ILK_VSDPFD_FULL); - I915_WRITE(ILK_DSPCLK_GATE_D, - I915_READ(ILK_DSPCLK_GATE_D) | + intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, + intel_uncore_read(&dev_priv->uncore, ILK_DSPCLK_GATE_D) | ILK_DPARBUNIT_CLOCK_GATE_ENABLE | ILK_DPFDUNIT_CLOCK_GATE_ENABLE); @@ -7042,23 +7042,23 @@ static void lpt_init_clock_gating(struct drm_i915_private *dev_priv) * disabled when not needed anymore in order to save power. */ if (HAS_PCH_LPT_LP(dev_priv)) - I915_WRITE(SOUTH_DSPCLK_GATE_D, - I915_READ(SOUTH_DSPCLK_GATE_D) | + intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, + intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D) | PCH_LP_PARTITION_LEVEL_DISABLE); /* WADPOClockGatingDisable:hsw */ - I915_WRITE(TRANS_CHICKEN1(PIPE_A), - I915_READ(TRANS_CHICKEN1(PIPE_A)) | + intel_uncore_write(&dev_priv->uncore, TRANS_CHICKEN1(PIPE_A), + intel_uncore_read(&dev_priv->uncore, TRANS_CHICKEN1(PIPE_A)) | TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); } static void lpt_suspend_hw(struct drm_i915_private *dev_priv) { if (HAS_PCH_LPT_LP(dev_priv)) { - u32 val = I915_READ(SOUTH_DSPCLK_GATE_D); + u32 val = intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D); val &= ~PCH_LP_PARTITION_LEVEL_DISABLE; - I915_WRITE(SOUTH_DSPCLK_GATE_D, val); + intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, val); } } @@ -7070,33 +7070,33 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv, u32 val; /* WaTempDisableDOPClkGating:bdw */ - misccpctl = I915_READ(GEN7_MISCCPCTL); - I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); + misccpctl = intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL); + intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); - val = I915_READ(GEN8_L3SQCREG1); + val = intel_uncore_read(&dev_priv->uncore, GEN8_L3SQCREG1); val &= ~L3_PRIO_CREDITS_MASK; val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits); val |= L3_HIGH_PRIO_CREDITS(high_prio_credits); - I915_WRITE(GEN8_L3SQCREG1, val); + intel_uncore_write(&dev_priv->uncore, GEN8_L3SQCREG1, val); /* * Wait at least 100 clocks before re-enabling clock gating. * See the definition of L3SQCREG1 in BSpec. */ - POSTING_READ(GEN8_L3SQCREG1); + intel_uncore_posting_read(&dev_priv->uncore, GEN8_L3SQCREG1); udelay(1); - I915_WRITE(GEN7_MISCCPCTL, misccpctl); + intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, misccpctl); } static void icl_init_clock_gating(struct drm_i915_private *dev_priv) { /* Wa_1409120013:icl,ehl */ - I915_WRITE(ILK_DPFC_CHICKEN, + intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL); /* This is not an Wa. Enable to reduce Sampler power */ - I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN, - I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE); + intel_uncore_write(&dev_priv->uncore, GEN10_DFR_RATIO_EN_AND_CHICKEN, + intel_uncore_read(&dev_priv->uncore, GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE); /*Wa_14010594013:icl, ehl */ intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1, @@ -7106,12 +7106,12 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv) static void tgl_init_clock_gating(struct drm_i915_private *dev_priv) { /* Wa_1409120013:tgl */ - I915_WRITE(ILK_DPFC_CHICKEN, + intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL); /* Wa_1409825376:tgl (pre-prod)*/ if (IS_TGL_DISP_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_B1)) - I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) | + intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) | TGL_VRH_GATING_DIS); /* Wa_14011059788:tgl */ @@ -7123,7 +7123,7 @@ static void dg1_init_clock_gating(struct drm_i915_private *dev_priv) { /* Wa_1409836686:dg1[a0] */ if (IS_DG1_REVID(dev_priv, DG1_REVID_A0, DG1_REVID_A0)) - I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) | + intel_uncore_write(&dev_priv->uncore, GEN9_CLKGATE_DIS_3, intel_uncore_read(&dev_priv->uncore, GEN9_CLKGATE_DIS_3) | DPT_GATING_DIS); } @@ -7133,7 +7133,7 @@ static void cnp_init_clock_gating(struct drm_i915_private *dev_priv) return; /* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */ - I915_WRITE(SOUTH_DSPCLK_GATE_D, I915_READ(SOUTH_DSPCLK_GATE_D) | + intel_uncore_write(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D, intel_uncore_read(&dev_priv->uncore, SOUTH_DSPCLK_GATE_D) | CNP_PWM_CGE_GATING_DISABLE); } @@ -7143,35 +7143,35 @@ static void cnl_init_clock_gating(struct drm_i915_private *dev_priv) cnp_init_clock_gating(dev_priv); /* This is not an Wa. Enable for better image quality */ - I915_WRITE(_3D_CHICKEN3, + intel_uncore_write(&dev_priv->uncore, _3D_CHICKEN3, _MASKED_BIT_ENABLE(_3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE)); /* WaEnableChickenDCPR:cnl */ - I915_WRITE(GEN8_CHICKEN_DCPR_1, - I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM); + intel_uncore_write(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1, + intel_uncore_read(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM); /* * WaFbcWakeMemOn:cnl * Display WA #0859: cnl */ - I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | + intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) | DISP_FBC_MEMORY_WAKE); - val = I915_READ(SLICE_UNIT_LEVEL_CLKGATE); + val = intel_uncore_read(&dev_priv->uncore, SLICE_UNIT_LEVEL_CLKGATE); /* ReadHitWriteOnlyDisable:cnl */ val |= RCCUNIT_CLKGATE_DIS; - I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, val); + intel_uncore_write(&dev_priv->uncore, SLICE_UNIT_LEVEL_CLKGATE, val); /* Wa_2201832410:cnl */ - val = I915_READ(SUBSLICE_UNIT_LEVEL_CLKGATE); + val = intel_uncore_read(&dev_priv->uncore, SUBSLICE_UNIT_LEVEL_CLKGATE); val |= GWUNIT_CLKGATE_DIS; - I915_WRITE(SUBSLICE_UNIT_LEVEL_CLKGATE, val); + intel_uncore_write(&dev_priv->uncore, SUBSLICE_UNIT_LEVEL_CLKGATE, val); /* WaDisableVFclkgate:cnl */ /* WaVFUnitClockGatingDisable:cnl */ - val = I915_READ(UNSLICE_UNIT_LEVEL_CLKGATE); + val = intel_uncore_read(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE); val |= VFUNIT_CLKGATE_DIS; - I915_WRITE(UNSLICE_UNIT_LEVEL_CLKGATE, val); + intel_uncore_write(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE, val); } static void cfl_init_clock_gating(struct drm_i915_private *dev_priv) @@ -7180,21 +7180,21 @@ static void cfl_init_clock_gating(struct drm_i915_private *dev_priv) gen9_init_clock_gating(dev_priv); /* WAC6entrylatency:cfl */ - I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) | + intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) | FBC_LLC_FULLY_OPEN); /* * WaFbcTurnOffFbcWatermark:cfl * Display WA #0562: cfl */ - I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | + intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) | DISP_FBC_WM_DIS); /* * WaFbcNukeOnHostModify:cfl * Display WA #0873: cfl */ - I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | + intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | ILK_DPFC_NUKE_ON_ANY_MODIFICATION); } @@ -7203,31 +7203,31 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv) gen9_init_clock_gating(dev_priv); /* WAC6entrylatency:kbl */ - I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) | + intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) | FBC_LLC_FULLY_OPEN); /* WaDisableSDEUnitClockGating:kbl */ if (IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_B0)) - I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | + intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) | GEN8_SDEUNIT_CLOCK_GATE_DISABLE); /* WaDisableGamClockGating:kbl */ if (IS_KBL_GT_REVID(dev_priv, 0, KBL_REVID_B0)) - I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) | + intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) | GEN6_GAMUNIT_CLOCK_GATE_DISABLE); /* * WaFbcTurnOffFbcWatermark:kbl * Display WA #0562: kbl */ - I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | + intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) | DISP_FBC_WM_DIS); /* * WaFbcNukeOnHostModify:kbl * Display WA #0873: kbl */ - I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | + intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | ILK_DPFC_NUKE_ON_ANY_MODIFICATION); } @@ -7236,32 +7236,32 @@ static void skl_init_clock_gating(struct drm_i915_private *dev_priv) gen9_init_clock_gating(dev_priv); /* WaDisableDopClockGating:skl */ - I915_WRITE(GEN7_MISCCPCTL, I915_READ(GEN7_MISCCPCTL) & + intel_uncore_write(&dev_priv->uncore, GEN7_MISCCPCTL, intel_uncore_read(&dev_priv->uncore, GEN7_MISCCPCTL) & ~GEN7_DOP_CLOCK_GATE_ENABLE); /* WAC6entrylatency:skl */ - I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) | + intel_uncore_write(&dev_priv->uncore, FBC_LLC_READ_CTRL, intel_uncore_read(&dev_priv->uncore, FBC_LLC_READ_CTRL) | FBC_LLC_FULLY_OPEN); /* * WaFbcTurnOffFbcWatermark:skl * Display WA #0562: skl */ - I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) | + intel_uncore_write(&dev_priv->uncore, DISP_ARB_CTL, intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) | DISP_FBC_WM_DIS); /* * WaFbcNukeOnHostModify:skl * Display WA #0873: skl */ - I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | + intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | ILK_DPFC_NUKE_ON_ANY_MODIFICATION); /* * WaFbcHighMemBwCorruptionAvoidance:skl * Display WA #0883: skl */ - I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | + intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | ILK_DPFC_DISABLE_DUMMY0); } @@ -7270,42 +7270,42 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv) enum pipe pipe; /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ - I915_WRITE(CHICKEN_PIPESL_1(PIPE_A), - I915_READ(CHICKEN_PIPESL_1(PIPE_A)) | + intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A), + intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) | HSW_FBCQ_DIS); /* WaSwitchSolVfFArbitrationPriority:bdw */ - I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); + intel_uncore_write(&dev_priv->uncore, GAM_ECOCHK, intel_uncore_read(&dev_priv->uncore, GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); /* WaPsrDPAMaskVBlankInSRD:bdw */ - I915_WRITE(CHICKEN_PAR1_1, - I915_READ(CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD); + intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1, + intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | DPA_MASK_VBLANK_SRD); /* WaPsrDPRSUnmaskVBlankInSRD:bdw */ for_each_pipe(dev_priv, pipe) { - I915_WRITE(CHICKEN_PIPESL_1(pipe), - I915_READ(CHICKEN_PIPESL_1(pipe)) | + intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe), + intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe)) | BDW_DPRS_MASK_VBLANK_SRD); } /* WaVSRefCountFullforceMissDisable:bdw */ /* WaDSRefCountFullforceMissDisable:bdw */ - I915_WRITE(GEN7_FF_THREAD_MODE, - I915_READ(GEN7_FF_THREAD_MODE) & + intel_uncore_write(&dev_priv->uncore, GEN7_FF_THREAD_MODE, + intel_uncore_read(&dev_priv->uncore, GEN7_FF_THREAD_MODE) & ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME)); - I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL, + intel_uncore_write(&dev_priv->uncore, GEN6_RC_SLEEP_PSMI_CONTROL, _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE)); /* WaDisableSDEUnitClockGating:bdw */ - I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | + intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) | GEN8_SDEUNIT_CLOCK_GATE_DISABLE); /* WaProgramL3SqcReg1Default:bdw */ gen8_set_l3sqc_credits(dev_priv, 30, 2); /* WaKVMNotificationOnConfigChange:bdw */ - I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1) + intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR2_1, intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR2_1) | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT); lpt_init_clock_gating(dev_priv); @@ -7315,24 +7315,24 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv) * Also see the CHICKEN2 write in bdw_init_workarounds() to disable DOP * clock gating. */ - I915_WRITE(GEN6_UCGCTL1, - I915_READ(GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE); + intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, + intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) | GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE); } static void hsw_init_clock_gating(struct drm_i915_private *dev_priv) { /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ - I915_WRITE(CHICKEN_PIPESL_1(PIPE_A), - I915_READ(CHICKEN_PIPESL_1(PIPE_A)) | + intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A), + intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) | HSW_FBCQ_DIS); /* This is required by WaCatErrorRejectionIssue:hsw */ - I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, - I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | + intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, + intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); /* WaSwitchSolVfFArbitrationPriority:hsw */ - I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); + intel_uncore_write(&dev_priv->uncore, GAM_ECOCHK, intel_uncore_read(&dev_priv->uncore, GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); lpt_init_clock_gating(dev_priv); } @@ -7341,26 +7341,26 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv) { u32 snpcr; - I915_WRITE(ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); + intel_uncore_write(&dev_priv->uncore, ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); /* WaFbcAsynchFlipDisableFbcQueue:ivb */ - I915_WRITE(ILK_DISPLAY_CHICKEN1, - I915_READ(ILK_DISPLAY_CHICKEN1) | + intel_uncore_write(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1, + intel_uncore_read(&dev_priv->uncore, ILK_DISPLAY_CHICKEN1) | ILK_FBCQ_DIS); /* WaDisableBackToBackFlipFix:ivb */ - I915_WRITE(IVB_CHICKEN3, + intel_uncore_write(&dev_priv->uncore, IVB_CHICKEN3, CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | CHICKEN3_DGMG_DONE_FIX_DISABLE); if (IS_IVB_GT1(dev_priv)) - I915_WRITE(GEN7_ROW_CHICKEN2, + intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2, _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); else { /* must write both registers */ - I915_WRITE(GEN7_ROW_CHICKEN2, + intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2, _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); - I915_WRITE(GEN7_ROW_CHICKEN2_GT2, + intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2_GT2, _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); } @@ -7368,20 +7368,20 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv) * According to the spec, bit 13 (RCZUNIT) must be set on IVB. * This implements the WaDisableRCZUnitClockGating:ivb workaround. */ - I915_WRITE(GEN6_UCGCTL2, + intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE); /* This is required by WaCatErrorRejectionIssue:ivb */ - I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, - I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | + intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, + intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); g4x_disable_trickle_feed(dev_priv); - snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); + snpcr = intel_uncore_read(&dev_priv->uncore, GEN6_MBCUNIT_SNPCR); snpcr &= ~GEN6_MBC_SNPCR_MASK; snpcr |= GEN6_MBC_SNPCR_MED; - I915_WRITE(GEN6_MBCUNIT_SNPCR, snpcr); + intel_uncore_write(&dev_priv->uncore, GEN6_MBCUNIT_SNPCR, snpcr); if (!HAS_PCH_NOP(dev_priv)) cpt_init_clock_gating(dev_priv); @@ -7392,58 +7392,58 @@ static void ivb_init_clock_gating(struct drm_i915_private *dev_priv) static void vlv_init_clock_gating(struct drm_i915_private *dev_priv) { /* WaDisableBackToBackFlipFix:vlv */ - I915_WRITE(IVB_CHICKEN3, + intel_uncore_write(&dev_priv->uncore, IVB_CHICKEN3, CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE | CHICKEN3_DGMG_DONE_FIX_DISABLE); /* WaDisableDopClockGating:vlv */ - I915_WRITE(GEN7_ROW_CHICKEN2, + intel_uncore_write(&dev_priv->uncore, GEN7_ROW_CHICKEN2, _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); /* This is required by WaCatErrorRejectionIssue:vlv */ - I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, - I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | + intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, + intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); /* * According to the spec, bit 13 (RCZUNIT) must be set on IVB. * This implements the WaDisableRCZUnitClockGating:vlv workaround. */ - I915_WRITE(GEN6_UCGCTL2, + intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE); /* WaDisableL3Bank2xClockGate:vlv * Disabling L3 clock gating- MMIO 940c[25] = 1 * Set bit 25, to disable L3_BANK_2x_CLK_GATING */ - I915_WRITE(GEN7_UCGCTL4, - I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE); + intel_uncore_write(&dev_priv->uncore, GEN7_UCGCTL4, + intel_uncore_read(&dev_priv->uncore, GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE); /* * WaDisableVLVClockGating_VBIIssue:vlv * Disable clock gating on th GCFG unit to prevent a delay * in the reporting of vblank events. */ - I915_WRITE(VLV_GUNIT_CLOCK_GATE, GCFG_DIS); + intel_uncore_write(&dev_priv->uncore, VLV_GUNIT_CLOCK_GATE, GCFG_DIS); } static void chv_init_clock_gating(struct drm_i915_private *dev_priv) { /* WaVSRefCountFullforceMissDisable:chv */ /* WaDSRefCountFullforceMissDisable:chv */ - I915_WRITE(GEN7_FF_THREAD_MODE, - I915_READ(GEN7_FF_THREAD_MODE) & + intel_uncore_write(&dev_priv->uncore, GEN7_FF_THREAD_MODE, + intel_uncore_read(&dev_priv->uncore, GEN7_FF_THREAD_MODE) & ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME)); /* WaDisableSemaphoreAndSyncFlipWait:chv */ - I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL, + intel_uncore_write(&dev_priv->uncore, GEN6_RC_SLEEP_PSMI_CONTROL, _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE)); /* WaDisableCSUnitClockGating:chv */ - I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) | + intel_uncore_write(&dev_priv->uncore, GEN6_UCGCTL1, intel_uncore_read(&dev_priv->uncore, GEN6_UCGCTL1) | GEN6_CSUNIT_CLOCK_GATE_DISABLE); /* WaDisableSDEUnitClockGating:chv */ - I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | + intel_uncore_write(&dev_priv->uncore, GEN8_UCGCTL6, intel_uncore_read(&dev_priv->uncore, GEN8_UCGCTL6) | GEN8_SDEUNIT_CLOCK_GATE_DISABLE); /* @@ -7458,17 +7458,17 @@ static void g4x_init_clock_gating(struct drm_i915_private *dev_priv) { u32 dspclk_gate; - I915_WRITE(RENCLK_GATE_D1, 0); - I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | + intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, 0); + intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | GS_UNIT_CLOCK_GATE_DISABLE | CL_UNIT_CLOCK_GATE_DISABLE); - I915_WRITE(RAMCLK_GATE_D, 0); + intel_uncore_write(&dev_priv->uncore, RAMCLK_GATE_D, 0); dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | OVRUNIT_CLOCK_GATE_DISABLE | OVCUNIT_CLOCK_GATE_DISABLE; if (IS_GM45(dev_priv)) dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; - I915_WRITE(DSPCLK_GATE_D, dspclk_gate); + intel_uncore_write(&dev_priv->uncore, DSPCLK_GATE_D, dspclk_gate); g4x_disable_trickle_feed(dev_priv); } @@ -7489,49 +7489,49 @@ static void i965gm_init_clock_gating(struct drm_i915_private *dev_priv) static void i965g_init_clock_gating(struct drm_i915_private *dev_priv) { - I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | + intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | I965_RCC_CLOCK_GATE_DISABLE | I965_RCPB_CLOCK_GATE_DISABLE | I965_ISC_CLOCK_GATE_DISABLE | I965_FBC_CLOCK_GATE_DISABLE); - I915_WRITE(RENCLK_GATE_D2, 0); - I915_WRITE(MI_ARB_STATE, + intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D2, 0); + intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); } static void gen3_init_clock_gating(struct drm_i915_private *dev_priv) { - u32 dstate = I915_READ(D_STATE); + u32 dstate = intel_uncore_read(&dev_priv->uncore, D_STATE); dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | DSTATE_DOT_CLOCK_GATING; - I915_WRITE(D_STATE, dstate); + intel_uncore_write(&dev_priv->uncore, D_STATE, dstate); if (IS_PINEVIEW(dev_priv)) - I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY)); + intel_uncore_write(&dev_priv->uncore, ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY)); /* IIR "flip pending" means done if this bit is set */ - I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE)); + intel_uncore_write(&dev_priv->uncore, ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE)); /* interrupts should cause a wake up from C3 */ - I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN)); + intel_uncore_write(&dev_priv->uncore, INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN)); /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ - I915_WRITE(MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE)); + intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE)); - I915_WRITE(MI_ARB_STATE, + intel_uncore_write(&dev_priv->uncore, MI_ARB_STATE, _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE)); } static void i85x_init_clock_gating(struct drm_i915_private *dev_priv) { - I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); + intel_uncore_write(&dev_priv->uncore, RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); /* interrupts should cause a wake up from C3 */ - I915_WRITE(MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) | + intel_uncore_write(&dev_priv->uncore, MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) | _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE)); - I915_WRITE(MEM_MODE, + intel_uncore_write(&dev_priv->uncore, MEM_MODE, _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE)); /* @@ -7541,13 +7541,13 @@ static void i85x_init_clock_gating(struct drm_i915_private *dev_priv) * abosultely nothing) would not allow FBC to recompress * until a 2D blit occurs. */ - I915_WRITE(SCPD0, + intel_uncore_write(&dev_priv->uncore, SCPD0, _MASKED_BIT_ENABLE(SCPD_FBC_IGNORE_3D)); } static void i830_init_clock_gating(struct drm_i915_private *dev_priv) { - I915_WRITE(MEM_MODE, + intel_uncore_write(&dev_priv->uncore, MEM_MODE, _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) | _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE)); } diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c index 02ebf5a04a9b..0ec0cf191955 100644 --- a/drivers/gpu/drm/i915/intel_sideband.c +++ b/drivers/gpu/drm/i915/intel_sideband.c @@ -404,8 +404,8 @@ static int __sandybridge_pcode_rw(struct drm_i915_private *i915, lockdep_assert_held(&i915->sb_lock); /* - * GEN6_PCODE_* are outside of the forcewake domain, we can - * use te fw I915_READ variants to reduce the amount of work + * GEN6_PCODE_* are outside of the forcewake domain, we can use + * intel_uncore_read/write_fw variants to reduce the amount of work * required when reading/writing. */ diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 1c14a07eba7d..9ac501bcfdad 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -2070,7 +2070,7 @@ int i915_reg_read_ioctl(struct drm_device *dev, * This routine waits until the target register @reg contains the expected * @value after applying the @mask, i.e. it waits until :: * - * (I915_READ_FW(reg) & mask) == value + * (intel_uncore_read_fw(uncore, reg) & mask) == value * * Otherwise, the wait will timeout after @slow_timeout_ms milliseconds. * For atomic context @slow_timeout_ms must be zero and @fast_timeout_us @@ -2126,7 +2126,7 @@ int __intel_wait_for_register_fw(struct intel_uncore *uncore, * This routine waits until the target register @reg contains the expected * @value after applying the @mask, i.e. it waits until :: * - * (I915_READ(reg) & mask) == value + * (intel_uncore_read(uncore, reg) & mask) == value * * Otherwise, the wait will timeout after @timeout_ms milliseconds. * diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index bd2467284295..59f0da8f1fbb 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -216,7 +216,7 @@ void intel_uncore_forcewake_flush(struct intel_uncore *uncore, /* * Like above but the caller must manage the uncore.lock itself. - * Must be used with I915_READ_FW and friends. + * Must be used with intel_uncore_read_fw() and friends. */ void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore, enum forcewake_domains domains); @@ -318,8 +318,8 @@ __uncore_write(write_notrace, 32, l, false) * will be implemented using 2 32-bit writes in an arbitrary order with * an arbitrary delay between them. This can cause the hardware to * act upon the intermediate value, possibly leading to corruption and - * machine death. For this reason we do not support I915_WRITE64, or - * uncore->funcs.mmio_writeq. + * machine death. For this reason we do not support intel_uncore_write64, + * or uncore->funcs.mmio_writeq. * * When reading a 64-bit value as two 32-bit values, the delay may cause * the two reads to mismatch, e.g. a timestamp overflowing. Also note that diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index 23a6132c5f4e..412e21604a05 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -211,8 +211,8 @@ static int igt_gem_ww_ctx(void *arg) return PTR_ERR(obj); obj2 = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); + if (IS_ERR(obj2)) { + err = PTR_ERR(obj2); goto put1; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index f88473d396f4..3512bb8433cf 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -442,28 +442,22 @@ static int igt_evict_contexts(void *arg) /* Overfill the GGTT with context objects and so try to evict one. */ for_each_engine(engine, gt, id) { struct i915_sw_fence fence; - struct file *file; - - file = mock_file(i915); - if (IS_ERR(file)) { - err = PTR_ERR(file); - break; - } count = 0; onstack_fence_init(&fence); do { + struct intel_context *ce; struct i915_request *rq; - struct i915_gem_context *ctx; - ctx = live_context(i915, file); - if (IS_ERR(ctx)) + ce = intel_context_create(engine); + if (IS_ERR(ce)) break; /* We will need some GGTT space for the rq's context */ igt_evict_ctl.fail_if_busy = true; - rq = igt_request_alloc(ctx, engine); + rq = intel_context_create_request(ce); igt_evict_ctl.fail_if_busy = false; + intel_context_put(ce); if (IS_ERR(rq)) { /* When full, fail_if_busy will trigger EBUSY */ @@ -490,8 +484,6 @@ static int igt_evict_contexts(void *arg) onstack_fence_fini(&fence); pr_info("Submitted %lu contexts/requests on %s\n", count, engine->name); - - fput(file); if (err) break; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index c53a222e3dec..70e07e9b78c2 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -28,6 +28,7 @@ #include "gem/i915_gem_context.h" #include "gem/selftests/mock_context.h" #include "gt/intel_context.h" +#include "gt/intel_gpu_commands.h" #include "i915_random.h" #include "i915_selftest.h" diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c index debbac660519..e9d86dab8677 100644 --- a/drivers/gpu/drm/i915/selftests/i915_perf.c +++ b/drivers/gpu/drm/i915/selftests/i915_perf.c @@ -262,7 +262,7 @@ static int live_noa_delay(void *arg) delay = intel_read_status_page(stream->engine, 0x102); delay -= intel_read_status_page(stream->engine, 0x100); - delay = i915_cs_timestamp_ticks_to_ns(i915, delay); + delay = intel_gt_clock_interval_to_ns(stream->engine->gt, delay); pr_info("GPU delay: %uns, expected %lluns\n", delay, expected); diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 64bbb8288249..d2a678a2497e 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -33,6 +33,7 @@ #include "gt/intel_engine_pm.h" #include "gt/intel_engine_user.h" #include "gt/intel_gt.h" +#include "gt/intel_gt_clock_utils.h" #include "gt/intel_gt_requests.h" #include "gt/selftest_engine_heartbeat.h" @@ -1560,7 +1561,7 @@ static u32 trifilter(u32 *a) static u64 cycles_to_ns(struct intel_engine_cs *engine, u32 cycles) { - u64 ns = i915_cs_timestamp_ticks_to_ns(engine->i915, cycles); + u64 ns = intel_gt_clock_interval_to_ns(engine->gt, cycles); return DIV_ROUND_CLOSEST(ns, 1 << TF_BIAS); } @@ -1932,9 +1933,7 @@ static int measure_inter_request(struct intel_context *ce) intel_ring_advance(rq, cs); i915_request_add(rq); } - local_bh_disable(); i915_sw_fence_commit(submit); - local_bh_enable(); intel_engine_flush_submission(ce->engine); heap_fence_put(submit); @@ -2220,11 +2219,9 @@ static int measure_completion(struct intel_context *ce) intel_ring_advance(rq, cs); dma_fence_add_callback(&rq->fence, &cb.base, signal_cb); - - local_bh_disable(); i915_request_add(rq); - local_bh_enable(); + intel_engine_flush_submission(ce->engine); if (wait_for(READ_ONCE(sema[i]) == -1, 50)) { err = -EIO; goto err; @@ -2293,8 +2290,10 @@ static int perf_request_latency(void *arg) struct intel_context *ce; ce = intel_context_create(engine); - if (IS_ERR(ce)) + if (IS_ERR(ce)) { + err = PTR_ERR(ce); goto out; + } err = intel_context_pin(ce); if (err) { @@ -2467,8 +2466,10 @@ static int perf_series_engines(void *arg) struct intel_context *ce; ce = intel_context_create(engine); - if (IS_ERR(ce)) + if (IS_ERR(ce)) { + err = PTR_ERR(ce); goto out; + } err = intel_context_pin(ce); if (err) { diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c index ec0ecb4e4ca6..83f6e5f31fb3 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.c +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c @@ -3,6 +3,7 @@ * * Copyright © 2018 Intel Corporation */ +#include "gt/intel_gpu_commands.h" #include "gt/intel_gt.h" #include "gem/selftests/igt_gem_utils.h" @@ -219,6 +220,9 @@ void igt_spinner_fini(struct igt_spinner *spin) bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq) { + if (i915_request_is_ready(rq)) + intel_engine_flush_submission(rq->engine); + return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq), rq->fence.seqno), 100) && diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c index 0aeba8e3af28..ce7adfa3bca0 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -129,6 +129,21 @@ static void igt_object_release(struct drm_i915_gem_object *obj) i915_gem_object_put(obj); } +static bool is_contiguous(struct drm_i915_gem_object *obj) +{ + struct scatterlist *sg; + dma_addr_t addr = -1; + + for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) { + if (addr != -1 && sg_dma_address(sg) != addr) + return false; + + addr = sg_dma_address(sg) + sg_dma_len(sg); + } + + return true; +} + static int igt_mock_contiguous(void *arg) { struct intel_memory_region *mem = arg; @@ -150,8 +165,8 @@ static int igt_mock_contiguous(void *arg) if (IS_ERR(obj)) return PTR_ERR(obj); - if (obj->mm.pages->nents != 1) { - pr_err("%s min object spans multiple sg entries\n", __func__); + if (!is_contiguous(obj)) { + pr_err("%s min object spans disjoint sg entries\n", __func__); err = -EINVAL; goto err_close_objects; } @@ -163,8 +178,8 @@ static int igt_mock_contiguous(void *arg) if (IS_ERR(obj)) return PTR_ERR(obj); - if (obj->mm.pages->nents != 1) { - pr_err("%s max object spans multiple sg entries\n", __func__); + if (!is_contiguous(obj)) { + pr_err("%s max object spans disjoint sg entries\n", __func__); err = -EINVAL; goto err_close_objects; } @@ -189,8 +204,8 @@ static int igt_mock_contiguous(void *arg) goto err_close_objects; } - if (obj->mm.pages->nents != 1) { - pr_err("%s object spans multiple sg entries\n", __func__); + if (!is_contiguous(obj)) { + pr_err("%s object spans disjoint sg entries\n", __func__); err = -EINVAL; goto err_close_objects; } @@ -337,6 +352,68 @@ out_put: return err; } +#ifndef SZ_8G +#define SZ_8G BIT_ULL(33) +#endif + +static int igt_mock_max_segment(void *arg) +{ + const unsigned int max_segment = i915_sg_segment_size(); + struct intel_memory_region *mem = arg; + struct drm_i915_private *i915 = mem->i915; + struct drm_i915_gem_object *obj; + struct i915_buddy_block *block; + struct scatterlist *sg; + LIST_HEAD(objects); + u64 size; + int err = 0; + + /* + * While we may create very large contiguous blocks, we may need + * to break those down for consumption elsewhere. In particular, + * dma-mapping with scatterlist elements have an implicit limit of + * UINT_MAX on each element. + */ + + size = SZ_8G; + mem = mock_region_create(i915, 0, size, PAGE_SIZE, 0); + if (IS_ERR(mem)) + return PTR_ERR(mem); + + obj = igt_object_create(mem, &objects, size, 0); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto out_put; + } + + size = 0; + list_for_each_entry(block, &obj->mm.blocks, link) { + if (i915_buddy_block_size(&mem->mm, block) > size) + size = i915_buddy_block_size(&mem->mm, block); + } + if (size < max_segment) { + pr_err("%s: Failed to create a huge contiguous block [> %u], largest block %lld\n", + __func__, max_segment, size); + err = -EINVAL; + goto out_close; + } + + for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) { + if (sg->length > max_segment) { + pr_err("%s: Created an oversized scatterlist entry, %u > %u\n", + __func__, sg->length, max_segment); + err = -EINVAL; + goto out_close; + } + } + +out_close: + close_objects(mem, &objects); +out_put: + intel_memory_region_put(mem); + return err; +} + static int igt_gpu_write_dw(struct intel_context *ce, struct i915_vma *vma, u32 dword, @@ -775,14 +852,22 @@ static int _perf_memcpy(struct intel_memory_region *src_mr, } sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL); + if (t[0] <= 0) { + /* ignore the impossible to protect our sanity */ + pr_debug("Skipping %s src(%s, %s) -> dst(%s, %s) %14s %4lluKiB copy, unstable measurement [%lld, %lld]\n", + __func__, + src_mr->name, repr_type(src_type), + dst_mr->name, repr_type(dst_type), + tests[i].name, size >> 10, + t[0], t[4]); + continue; + } + pr_info("%s src(%s, %s) -> dst(%s, %s) %14s %4llu KiB copy: %5lld MiB/s\n", __func__, - src_mr->name, - repr_type(src_type), - dst_mr->name, - repr_type(dst_type), - tests[i].name, - size >> 10, + src_mr->name, repr_type(src_type), + dst_mr->name, repr_type(dst_type), + tests[i].name, size >> 10, div64_u64(mul_u32_u32(4 * size, 1000 * 1000 * 1000), t[1] + 2 * t[2] + t[3]) >> 20); @@ -848,6 +933,7 @@ int intel_memory_region_mock_selftests(void) SUBTEST(igt_mock_fill), SUBTEST(igt_mock_contiguous), SUBTEST(igt_mock_splintered_region), + SUBTEST(igt_mock_max_segment), }; struct intel_memory_region *mem; struct drm_i915_private *i915; diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index e946bd2087d8..0188f877cab2 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -64,8 +64,6 @@ static void mock_device_release(struct drm_device *dev) mock_device_flush(i915); intel_gt_driver_remove(&i915->gt); - i915_gem_driver_release__contexts(i915); - i915_gem_drain_workqueue(i915); i915_gem_drain_freed_objects(i915); diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig index 6231048aa5aa..b5fa0e45a839 100644 --- a/drivers/gpu/drm/imx/Kconfig +++ b/drivers/gpu/drm/imx/Kconfig @@ -28,6 +28,7 @@ config DRM_IMX_TVE config DRM_IMX_LDB tristate "Support for LVDS displays" depends on DRM_IMX && MFD_SYSCON + depends on COMMON_CLK select DRM_PANEL help Choose this to enable the internal LVDS Display Bridge (LDB) @@ -36,7 +37,7 @@ config DRM_IMX_LDB config DRM_IMX_HDMI tristate "Freescale i.MX DRM HDMI" select DRM_DW_HDMI - depends on DRM_IMX + depends on DRM_IMX && OF help Choose this if you want to use HDMI on i.MX6. diff --git a/drivers/gpu/drm/imx/dcss/dcss-dev.h b/drivers/gpu/drm/imx/dcss/dcss-dev.h index c642ae17837f..1e582270c6ea 100644 --- a/drivers/gpu/drm/imx/dcss/dcss-dev.h +++ b/drivers/gpu/drm/imx/dcss/dcss-dev.h @@ -7,6 +7,7 @@ #define __DCSS_PRV_H__ #include <drm/drm_fourcc.h> +#include <drm/drm_plane.h> #include <linux/io.h> #include <video/videomode.h> @@ -165,6 +166,8 @@ void dcss_ss_sync_set(struct dcss_ss *ss, struct videomode *vm, /* SCALER */ int dcss_scaler_init(struct dcss_dev *dcss, unsigned long scaler_base); void dcss_scaler_exit(struct dcss_scaler *scl); +void dcss_scaler_set_filter(struct dcss_scaler *scl, int ch_num, + enum drm_scaling_filter scaling_filter); void dcss_scaler_setup(struct dcss_scaler *scl, int ch_num, const struct drm_format_info *format, int src_xres, int src_yres, int dst_xres, int dst_yres, diff --git a/drivers/gpu/drm/imx/dcss/dcss-plane.c b/drivers/gpu/drm/imx/dcss/dcss-plane.c index e13652e3a115..03ba88f7f995 100644 --- a/drivers/gpu/drm/imx/dcss/dcss-plane.c +++ b/drivers/gpu/drm/imx/dcss/dcss-plane.c @@ -103,15 +103,15 @@ static bool dcss_plane_can_rotate(const struct drm_format_info *format, bool mod_present, u64 modifier, unsigned int rotation) { - bool linear_format = !mod_present || - (mod_present && modifier == DRM_FORMAT_MOD_LINEAR); + bool linear_format = !mod_present || modifier == DRM_FORMAT_MOD_LINEAR; u32 supported_rotation = DRM_MODE_ROTATE_0; if (!format->is_yuv && linear_format) supported_rotation = DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | DRM_MODE_REFLECT_MASK; else if (!format->is_yuv && - modifier == DRM_FORMAT_MOD_VIVANTE_TILED) + (modifier == DRM_FORMAT_MOD_VIVANTE_TILED || + modifier == DRM_FORMAT_MOD_VIVANTE_SUPER_TILED)) supported_rotation = DRM_MODE_ROTATE_MASK | DRM_MODE_REFLECT_MASK; else if (format->is_yuv && linear_format && @@ -257,7 +257,8 @@ static bool dcss_plane_needs_setup(struct drm_plane_state *state, state->src_h != old_state->src_h || fb->format->format != old_fb->format->format || fb->modifier != old_fb->modifier || - state->rotation != old_state->rotation; + state->rotation != old_state->rotation || + state->scaling_filter != old_state->scaling_filter; } static void dcss_plane_atomic_update(struct drm_plane *plane, @@ -272,6 +273,7 @@ static void dcss_plane_atomic_update(struct drm_plane *plane, u32 src_w, src_h, dst_w, dst_h; struct drm_rect src, dst; bool enable = true; + bool is_rotation_90_or_270; if (!fb || !state->crtc || !state->visible) return; @@ -309,8 +311,16 @@ static void dcss_plane_atomic_update(struct drm_plane *plane, dcss_plane_atomic_set_base(dcss_plane); + is_rotation_90_or_270 = state->rotation & (DRM_MODE_ROTATE_90 | + DRM_MODE_ROTATE_270); + + dcss_scaler_set_filter(dcss->scaler, dcss_plane->ch_num, + state->scaling_filter); + dcss_scaler_setup(dcss->scaler, dcss_plane->ch_num, - state->fb->format, src_w, src_h, + state->fb->format, + is_rotation_90_or_270 ? src_h : src_w, + is_rotation_90_or_270 ? src_w : src_h, dst_w, dst_h, drm_mode_vrefresh(&crtc_state->mode)); @@ -388,6 +398,10 @@ struct dcss_plane *dcss_plane_init(struct drm_device *drm, if (ret) return ERR_PTR(ret); + drm_plane_create_scaling_filter_property(&dcss_plane->base, + BIT(DRM_SCALING_FILTER_DEFAULT) | + BIT(DRM_SCALING_FILTER_NEAREST_NEIGHBOR)); + drm_plane_create_rotation_property(&dcss_plane->base, DRM_MODE_ROTATE_0, DRM_MODE_ROTATE_0 | diff --git a/drivers/gpu/drm/imx/dcss/dcss-scaler.c b/drivers/gpu/drm/imx/dcss/dcss-scaler.c index cd21905de580..47852b9dd5ea 100644 --- a/drivers/gpu/drm/imx/dcss/dcss-scaler.c +++ b/drivers/gpu/drm/imx/dcss/dcss-scaler.c @@ -77,6 +77,8 @@ struct dcss_scaler_ch { u32 c_vstart; u32 c_hstart; + + bool use_nn_interpolation; }; struct dcss_scaler { @@ -243,6 +245,17 @@ static void dcss_scaler_gaussian_filter(int fc_q, bool use_5_taps, } } +static void dcss_scaler_nearest_neighbor_filter(bool use_5_taps, + int coef[][PSC_NUM_TAPS]) +{ + int i, j; + + for (i = 0; i < PSC_STORED_PHASES; i++) + for (j = 0; j < PSC_NUM_TAPS; j++) + coef[i][j] = j == PSC_NUM_TAPS >> 1 ? + (1 << PSC_COEFF_PRECISION) : 0; +} + /** * dcss_scaler_filter_design() - Compute filter coefficients using * Gaussian filter. @@ -253,7 +266,8 @@ static void dcss_scaler_gaussian_filter(int fc_q, bool use_5_taps, */ static void dcss_scaler_filter_design(int src_length, int dst_length, bool use_5_taps, bool phase0_identity, - int coef[][PSC_NUM_TAPS]) + int coef[][PSC_NUM_TAPS], + bool nn_interpolation) { int fc_q; @@ -263,8 +277,11 @@ static void dcss_scaler_filter_design(int src_length, int dst_length, else fc_q = div_q(dst_length, src_length * PSC_NUM_PHASES); - /* compute gaussian filter coefficients */ - dcss_scaler_gaussian_filter(fc_q, use_5_taps, phase0_identity, coef); + if (nn_interpolation) + dcss_scaler_nearest_neighbor_filter(use_5_taps, coef); + else + /* compute gaussian filter coefficients */ + dcss_scaler_gaussian_filter(fc_q, use_5_taps, phase0_identity, coef); } static void dcss_scaler_write(struct dcss_scaler_ch *ch, u32 val, u32 ofs) @@ -653,12 +670,14 @@ static void dcss_scaler_yuv_coef_set(struct dcss_scaler_ch *ch, /* horizontal luma */ dcss_scaler_filter_design(src_xres, dst_xres, false, - src_xres == dst_xres, coef); + src_xres == dst_xres, coef, + ch->use_nn_interpolation); dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HLUM, coef); /* vertical luma */ dcss_scaler_filter_design(src_yres, dst_yres, program_5_taps, - src_yres == dst_yres, coef); + src_yres == dst_yres, coef, + ch->use_nn_interpolation); if (program_5_taps) dcss_scaler_program_5_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef); @@ -678,14 +697,14 @@ static void dcss_scaler_yuv_coef_set(struct dcss_scaler_ch *ch, /* horizontal chroma */ dcss_scaler_filter_design(src_xres, dst_xres, false, (src_xres == dst_xres) && (ch->c_hstart == 0), - coef); + coef, ch->use_nn_interpolation); dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HCHR, coef); /* vertical chroma */ dcss_scaler_filter_design(src_yres, dst_yres, program_5_taps, (src_yres == dst_yres) && (ch->c_vstart == 0), - coef); + coef, ch->use_nn_interpolation); if (program_5_taps) dcss_scaler_program_5_coef_set(ch, DCSS_SCALER_COEF_VCHR, coef); else @@ -700,12 +719,14 @@ static void dcss_scaler_rgb_coef_set(struct dcss_scaler_ch *ch, /* horizontal RGB */ dcss_scaler_filter_design(src_xres, dst_xres, false, - src_xres == dst_xres, coef); + src_xres == dst_xres, coef, + ch->use_nn_interpolation); dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_HLUM, coef); /* vertical RGB */ dcss_scaler_filter_design(src_yres, dst_yres, false, - src_yres == dst_yres, coef); + src_yres == dst_yres, coef, + ch->use_nn_interpolation); dcss_scaler_program_7_coef_set(ch, DCSS_SCALER_COEF_VLUM, coef); } @@ -751,6 +772,14 @@ static void dcss_scaler_set_rgb10_order(struct dcss_scaler_ch *ch, ch->sdata_ctrl |= a2r10g10b10_format << A2R10G10B10_FORMAT_POS; } +void dcss_scaler_set_filter(struct dcss_scaler *scl, int ch_num, + enum drm_scaling_filter scaling_filter) +{ + struct dcss_scaler_ch *ch = &scl->ch[ch_num]; + + ch->use_nn_interpolation = scaling_filter == DRM_SCALING_FILTER_NEAREST_NEIGHBOR; +} + void dcss_scaler_setup(struct dcss_scaler *scl, int ch_num, const struct drm_format_info *format, int src_xres, int src_yres, int dst_xres, int dst_yres, diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c index d07b39b8afd2..87428fb23d9f 100644 --- a/drivers/gpu/drm/imx/dw_hdmi-imx.c +++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c @@ -15,23 +15,32 @@ #include <drm/bridge/dw_hdmi.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_edid.h> #include <drm/drm_encoder.h> +#include <drm/drm_managed.h> #include <drm/drm_of.h> #include <drm/drm_simple_kms_helper.h> #include "imx-drm.h" +struct imx_hdmi; + +struct imx_hdmi_encoder { + struct drm_encoder encoder; + struct imx_hdmi *hdmi; +}; + struct imx_hdmi { struct device *dev; - struct drm_encoder encoder; + struct drm_bridge *bridge; struct dw_hdmi *hdmi; struct regmap *regmap; }; static inline struct imx_hdmi *enc_to_imx_hdmi(struct drm_encoder *e) { - return container_of(e, struct imx_hdmi, encoder); + return container_of(e, struct imx_hdmi_encoder, encoder)->hdmi; } static const struct dw_hdmi_mpll_config imx_mpll_cfg[] = { @@ -98,19 +107,6 @@ static const struct dw_hdmi_phy_config imx_phy_config[] = { { ~0UL, 0x0000, 0x0000, 0x0000} }; -static int dw_hdmi_imx_parse_dt(struct imx_hdmi *hdmi) -{ - struct device_node *np = hdmi->dev->of_node; - - hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "gpr"); - if (IS_ERR(hdmi->regmap)) { - dev_err(hdmi->dev, "Unable to get gpr\n"); - return PTR_ERR(hdmi->regmap); - } - - return 0; -} - static void dw_hdmi_imx_encoder_enable(struct drm_encoder *encoder) { struct imx_hdmi *hdmi = enc_to_imx_hdmi(encoder); @@ -195,65 +191,36 @@ MODULE_DEVICE_TABLE(of, dw_hdmi_imx_dt_ids); static int dw_hdmi_imx_bind(struct device *dev, struct device *master, void *data) { - struct platform_device *pdev = to_platform_device(dev); - const struct dw_hdmi_plat_data *plat_data; - const struct of_device_id *match; struct drm_device *drm = data; + struct imx_hdmi_encoder *hdmi_encoder; struct drm_encoder *encoder; - struct imx_hdmi *hdmi; int ret; - if (!pdev->dev.of_node) - return -ENODEV; + hdmi_encoder = drmm_simple_encoder_alloc(drm, struct imx_hdmi_encoder, + encoder, DRM_MODE_ENCODER_TMDS); + if (IS_ERR(hdmi_encoder)) + return PTR_ERR(hdmi_encoder); - hdmi = dev_get_drvdata(dev); - memset(hdmi, 0, sizeof(*hdmi)); - - match = of_match_node(dw_hdmi_imx_dt_ids, pdev->dev.of_node); - plat_data = match->data; - hdmi->dev = &pdev->dev; - encoder = &hdmi->encoder; + hdmi_encoder->hdmi = dev_get_drvdata(dev); + encoder = &hdmi_encoder->encoder; ret = imx_drm_encoder_parse_of(drm, encoder, dev->of_node); if (ret) return ret; - ret = dw_hdmi_imx_parse_dt(hdmi); - if (ret < 0) - return ret; - drm_encoder_helper_add(encoder, &dw_hdmi_imx_encoder_helper_funcs); - drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); - - hdmi->hdmi = dw_hdmi_bind(pdev, encoder, plat_data); - /* - * If dw_hdmi_bind() fails we'll never call dw_hdmi_unbind(), - * which would have called the encoder cleanup. Do it manually. - */ - if (IS_ERR(hdmi->hdmi)) { - ret = PTR_ERR(hdmi->hdmi); - drm_encoder_cleanup(encoder); - } - - return ret; -} - -static void dw_hdmi_imx_unbind(struct device *dev, struct device *master, - void *data) -{ - struct imx_hdmi *hdmi = dev_get_drvdata(dev); - - dw_hdmi_unbind(hdmi->hdmi); + return drm_bridge_attach(encoder, hdmi_encoder->hdmi->bridge, NULL, 0); } static const struct component_ops dw_hdmi_imx_ops = { .bind = dw_hdmi_imx_bind, - .unbind = dw_hdmi_imx_unbind, }; static int dw_hdmi_imx_probe(struct platform_device *pdev) { + struct device_node *np = pdev->dev.of_node; + const struct of_device_id *match = of_match_node(dw_hdmi_imx_dt_ids, np); struct imx_hdmi *hdmi; hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL); @@ -261,13 +228,33 @@ static int dw_hdmi_imx_probe(struct platform_device *pdev) return -ENOMEM; platform_set_drvdata(pdev, hdmi); + hdmi->dev = &pdev->dev; + + hdmi->regmap = syscon_regmap_lookup_by_phandle(np, "gpr"); + if (IS_ERR(hdmi->regmap)) { + dev_err(hdmi->dev, "Unable to get gpr\n"); + return PTR_ERR(hdmi->regmap); + } + + hdmi->hdmi = dw_hdmi_probe(pdev, match->data); + if (IS_ERR(hdmi->hdmi)) + return PTR_ERR(hdmi->hdmi); + + hdmi->bridge = of_drm_find_bridge(np); + if (!hdmi->bridge) { + dev_err(hdmi->dev, "Unable to find bridge\n"); + return -ENODEV; + } return component_add(&pdev->dev, &dw_hdmi_imx_ops); } static int dw_hdmi_imx_remove(struct platform_device *pdev) { + struct imx_hdmi *hdmi = platform_get_drvdata(pdev); + component_del(&pdev->dev, &dw_hdmi_imx_ops); + dw_hdmi_remove(hdmi->hdmi); return 0; } diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 41e2978cb1eb..dbfe39e2f7f6 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -22,6 +22,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_managed.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_print.h> @@ -47,12 +48,18 @@ #define LDB_DI1_VS_POL_ACT_LOW (1 << 10) #define LDB_BGREF_RMODE_INT (1 << 15) +struct imx_ldb_channel; + +struct imx_ldb_encoder { + struct drm_connector connector; + struct drm_encoder encoder; + struct imx_ldb_channel *channel; +}; + struct imx_ldb; struct imx_ldb_channel { struct imx_ldb *ldb; - struct drm_connector connector; - struct drm_encoder encoder; /* Defines what is connected to the ldb, only one at a time */ struct drm_panel *panel; @@ -70,12 +77,12 @@ struct imx_ldb_channel { static inline struct imx_ldb_channel *con_to_imx_ldb_ch(struct drm_connector *c) { - return container_of(c, struct imx_ldb_channel, connector); + return container_of(c, struct imx_ldb_encoder, connector)->channel; } static inline struct imx_ldb_channel *enc_to_imx_ldb_ch(struct drm_encoder *e) { - return container_of(e, struct imx_ldb_channel, encoder); + return container_of(e, struct imx_ldb_encoder, encoder)->channel; } struct bus_mux { @@ -411,9 +418,20 @@ static int imx_ldb_register(struct drm_device *drm, struct imx_ldb_channel *imx_ldb_ch) { struct imx_ldb *ldb = imx_ldb_ch->ldb; - struct drm_encoder *encoder = &imx_ldb_ch->encoder; + struct imx_ldb_encoder *ldb_encoder; + struct drm_connector *connector; + struct drm_encoder *encoder; int ret; + ldb_encoder = drmm_simple_encoder_alloc(drm, struct imx_ldb_encoder, + encoder, DRM_MODE_ENCODER_LVDS); + if (IS_ERR(ldb_encoder)) + return PTR_ERR(ldb_encoder); + + ldb_encoder->channel = imx_ldb_ch; + connector = &ldb_encoder->connector; + encoder = &ldb_encoder->encoder; + ret = imx_drm_encoder_parse_of(drm, encoder, imx_ldb_ch->child); if (ret) return ret; @@ -429,11 +447,9 @@ static int imx_ldb_register(struct drm_device *drm, } drm_encoder_helper_add(encoder, &imx_ldb_encoder_helper_funcs); - drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_LVDS); if (imx_ldb_ch->bridge) { - ret = drm_bridge_attach(&imx_ldb_ch->encoder, - imx_ldb_ch->bridge, NULL, 0); + ret = drm_bridge_attach(encoder, imx_ldb_ch->bridge, NULL, 0); if (ret) { DRM_ERROR("Failed to initialize bridge with drm\n"); return ret; @@ -445,13 +461,13 @@ static int imx_ldb_register(struct drm_device *drm, * historical reasons, the ldb driver can also work without * a panel. */ - drm_connector_helper_add(&imx_ldb_ch->connector, - &imx_ldb_connector_helper_funcs); - drm_connector_init_with_ddc(drm, &imx_ldb_ch->connector, + drm_connector_helper_add(connector, + &imx_ldb_connector_helper_funcs); + drm_connector_init_with_ddc(drm, connector, &imx_ldb_connector_funcs, DRM_MODE_CONNECTOR_LVDS, imx_ldb_ch->ddc); - drm_connector_attach_encoder(&imx_ldb_ch->connector, encoder); + drm_connector_attach_encoder(connector, encoder); } return 0; @@ -559,17 +575,42 @@ static int imx_ldb_panel_ddc(struct device *dev, static int imx_ldb_bind(struct device *dev, struct device *master, void *data) { struct drm_device *drm = data; + struct imx_ldb *imx_ldb = dev_get_drvdata(dev); + int ret; + int i; + + for (i = 0; i < 2; i++) { + struct imx_ldb_channel *channel = &imx_ldb->channel[i]; + + if (!channel->ldb) + break; + + ret = imx_ldb_register(drm, channel); + if (ret) + return ret; + } + + return 0; +} + +static const struct component_ops imx_ldb_ops = { + .bind = imx_ldb_bind, +}; + +static int imx_ldb_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; - const struct of_device_id *of_id = - of_match_device(imx_ldb_dt_ids, dev); + const struct of_device_id *of_id = of_match_device(imx_ldb_dt_ids, dev); struct device_node *child; struct imx_ldb *imx_ldb; int dual; int ret; int i; - imx_ldb = dev_get_drvdata(dev); - memset(imx_ldb, 0, sizeof(*imx_ldb)); + imx_ldb = devm_kzalloc(dev, sizeof(*imx_ldb), GFP_KERNEL); + if (!imx_ldb) + return -ENOMEM; imx_ldb->regmap = syscon_regmap_lookup_by_phandle(np, "gpr"); if (IS_ERR(imx_ldb->regmap)) { @@ -669,25 +710,20 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data) } channel->bus_format = bus_format; channel->child = child; - - ret = imx_ldb_register(drm, channel); - if (ret) { - channel->child = NULL; - goto free_child; - } } - return 0; + platform_set_drvdata(pdev, imx_ldb); + + return component_add(&pdev->dev, &imx_ldb_ops); free_child: of_node_put(child); return ret; } -static void imx_ldb_unbind(struct device *dev, struct device *master, - void *data) +static int imx_ldb_remove(struct platform_device *pdev) { - struct imx_ldb *imx_ldb = dev_get_drvdata(dev); + struct imx_ldb *imx_ldb = platform_get_drvdata(pdev); int i; for (i = 0; i < 2; i++) { @@ -696,28 +732,7 @@ static void imx_ldb_unbind(struct device *dev, struct device *master, kfree(channel->edid); i2c_put_adapter(channel->ddc); } -} - -static const struct component_ops imx_ldb_ops = { - .bind = imx_ldb_bind, - .unbind = imx_ldb_unbind, -}; - -static int imx_ldb_probe(struct platform_device *pdev) -{ - struct imx_ldb *imx_ldb; - - imx_ldb = devm_kzalloc(&pdev->dev, sizeof(*imx_ldb), GFP_KERNEL); - if (!imx_ldb) - return -ENOMEM; - - platform_set_drvdata(pdev, imx_ldb); - - return component_add(&pdev->dev, &imx_ldb_ops); -} -static int imx_ldb_remove(struct platform_device *pdev) -{ component_del(&pdev->dev, &imx_ldb_ops); return 0; } diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index 2a8d2e32e7b4..bc8c3f802a15 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c @@ -19,6 +19,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_managed.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> @@ -99,9 +100,13 @@ enum { TVE_MODE_VGA, }; -struct imx_tve { +struct imx_tve_encoder { struct drm_connector connector; struct drm_encoder encoder; + struct imx_tve *tve; +}; + +struct imx_tve { struct device *dev; int mode; int di_hsync_pin; @@ -118,12 +123,12 @@ struct imx_tve { static inline struct imx_tve *con_to_tve(struct drm_connector *c) { - return container_of(c, struct imx_tve, connector); + return container_of(c, struct imx_tve_encoder, connector)->tve; } static inline struct imx_tve *enc_to_tve(struct drm_encoder *e) { - return container_of(e, struct imx_tve, encoder); + return container_of(e, struct imx_tve_encoder, encoder)->tve; } static void tve_enable(struct imx_tve *tve) @@ -418,7 +423,7 @@ static int tve_clk_init(struct imx_tve *tve, void __iomem *base) init.parent_names = (const char **)&tve_di_parent; tve->clk_hw_di.init = &init; - tve->di_clk = clk_register(tve->dev, &tve->clk_hw_di); + tve->di_clk = devm_clk_register(tve->dev, &tve->clk_hw_di); if (IS_ERR(tve->di_clk)) { dev_err(tve->dev, "failed to register TVE output clock: %ld\n", PTR_ERR(tve->di_clk)); @@ -428,33 +433,6 @@ static int tve_clk_init(struct imx_tve *tve, void __iomem *base) return 0; } -static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve) -{ - int encoder_type; - int ret; - - encoder_type = tve->mode == TVE_MODE_VGA ? - DRM_MODE_ENCODER_DAC : DRM_MODE_ENCODER_TVDAC; - - ret = imx_drm_encoder_parse_of(drm, &tve->encoder, tve->dev->of_node); - if (ret) - return ret; - - drm_encoder_helper_add(&tve->encoder, &imx_tve_encoder_helper_funcs); - drm_simple_encoder_init(drm, &tve->encoder, encoder_type); - - drm_connector_helper_add(&tve->connector, - &imx_tve_connector_helper_funcs); - drm_connector_init_with_ddc(drm, &tve->connector, - &imx_tve_connector_funcs, - DRM_MODE_CONNECTOR_VGA, - tve->ddc); - - drm_connector_attach_encoder(&tve->connector, &tve->encoder); - - return 0; -} - static void imx_tve_disable_regulator(void *data) { struct imx_tve *tve = data; @@ -502,8 +480,49 @@ static int of_get_tve_mode(struct device_node *np) static int imx_tve_bind(struct device *dev, struct device *master, void *data) { - struct platform_device *pdev = to_platform_device(dev); struct drm_device *drm = data; + struct imx_tve *tve = dev_get_drvdata(dev); + struct imx_tve_encoder *tvee; + struct drm_encoder *encoder; + struct drm_connector *connector; + int encoder_type; + int ret; + + encoder_type = tve->mode == TVE_MODE_VGA ? + DRM_MODE_ENCODER_DAC : DRM_MODE_ENCODER_TVDAC; + + tvee = drmm_simple_encoder_alloc(drm, struct imx_tve_encoder, encoder, + encoder_type); + if (IS_ERR(tvee)) + return PTR_ERR(tvee); + + tvee->tve = tve; + encoder = &tvee->encoder; + connector = &tvee->connector; + + ret = imx_drm_encoder_parse_of(drm, encoder, tve->dev->of_node); + if (ret) + return ret; + + drm_encoder_helper_add(encoder, &imx_tve_encoder_helper_funcs); + + drm_connector_helper_add(connector, &imx_tve_connector_helper_funcs); + ret = drm_connector_init_with_ddc(drm, connector, + &imx_tve_connector_funcs, + DRM_MODE_CONNECTOR_VGA, tve->ddc); + if (ret) + return ret; + + return drm_connector_attach_encoder(connector, encoder); +} + +static const struct component_ops imx_tve_ops = { + .bind = imx_tve_bind, +}; + +static int imx_tve_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct device_node *ddc_node; struct imx_tve *tve; @@ -513,8 +532,9 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data) int irq; int ret; - tve = dev_get_drvdata(dev); - memset(tve, 0, sizeof(*tve)); + tve = devm_kzalloc(dev, sizeof(*tve), GFP_KERNEL); + if (!tve) + return -ENOMEM; tve->dev = dev; @@ -621,28 +641,9 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data) if (ret) return ret; - ret = imx_tve_register(drm, tve); - if (ret) - return ret; - - return 0; -} - -static const struct component_ops imx_tve_ops = { - .bind = imx_tve_bind, -}; - -static int imx_tve_probe(struct platform_device *pdev) -{ - struct imx_tve *tve; - - tve = devm_kzalloc(&pdev->dev, sizeof(*tve), GFP_KERNEL); - if (!tve) - return -ENOMEM; - platform_set_drvdata(pdev, tve); - return component_add(&pdev->dev, &imx_tve_ops); + return component_add(dev, &imx_tve_ops); } static int imx_tve_remove(struct platform_device *pdev) diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 7ebd99ee3240..e6431a227feb 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -20,6 +20,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_gem_cma_helper.h> +#include <drm/drm_managed.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> @@ -163,7 +164,6 @@ static void ipu_disable_vblank(struct drm_crtc *crtc) static const struct drm_crtc_funcs ipu_crtc_funcs = { .set_config = drm_atomic_helper_set_config, - .destroy = drm_crtc_cleanup, .page_flip = drm_atomic_helper_page_flip, .reset = imx_drm_crtc_reset, .atomic_duplicate_state = imx_drm_crtc_duplicate_state, @@ -322,73 +322,73 @@ static const struct drm_crtc_helper_funcs ipu_helper_funcs = { .atomic_enable = ipu_crtc_atomic_enable, }; -static void ipu_put_resources(struct ipu_crtc *ipu_crtc) +static void ipu_put_resources(struct drm_device *dev, void *ptr) { + struct ipu_crtc *ipu_crtc = ptr; + if (!IS_ERR_OR_NULL(ipu_crtc->dc)) ipu_dc_put(ipu_crtc->dc); if (!IS_ERR_OR_NULL(ipu_crtc->di)) ipu_di_put(ipu_crtc->di); } -static int ipu_get_resources(struct ipu_crtc *ipu_crtc, - struct ipu_client_platformdata *pdata) +static int ipu_get_resources(struct drm_device *dev, struct ipu_crtc *ipu_crtc, + struct ipu_client_platformdata *pdata) { struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); int ret; ipu_crtc->dc = ipu_dc_get(ipu, pdata->dc); - if (IS_ERR(ipu_crtc->dc)) { - ret = PTR_ERR(ipu_crtc->dc); - goto err_out; - } + if (IS_ERR(ipu_crtc->dc)) + return PTR_ERR(ipu_crtc->dc); + + ret = drmm_add_action_or_reset(dev, ipu_put_resources, ipu_crtc); + if (ret) + return ret; ipu_crtc->di = ipu_di_get(ipu, pdata->di); - if (IS_ERR(ipu_crtc->di)) { - ret = PTR_ERR(ipu_crtc->di); - goto err_out; - } + if (IS_ERR(ipu_crtc->di)) + return PTR_ERR(ipu_crtc->di); return 0; -err_out: - ipu_put_resources(ipu_crtc); - - return ret; } -static int ipu_crtc_init(struct ipu_crtc *ipu_crtc, - struct ipu_client_platformdata *pdata, struct drm_device *drm) +static int ipu_drm_bind(struct device *dev, struct device *master, void *data) { - struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); - struct drm_crtc *crtc = &ipu_crtc->base; + struct ipu_client_platformdata *pdata = dev->platform_data; + struct ipu_soc *ipu = dev_get_drvdata(dev->parent); + struct drm_device *drm = data; + struct ipu_plane *primary_plane; + struct ipu_crtc *ipu_crtc; + struct drm_crtc *crtc; int dp = -EINVAL; int ret; - ret = ipu_get_resources(ipu_crtc, pdata); - if (ret) { - dev_err(ipu_crtc->dev, "getting resources failed with %d.\n", - ret); - return ret; - } - if (pdata->dp >= 0) dp = IPU_DP_FLOW_SYNC_BG; - ipu_crtc->plane[0] = ipu_plane_init(drm, ipu, pdata->dma[0], dp, 0, - DRM_PLANE_TYPE_PRIMARY); - if (IS_ERR(ipu_crtc->plane[0])) { - ret = PTR_ERR(ipu_crtc->plane[0]); - goto err_put_resources; - } + primary_plane = ipu_plane_init(drm, ipu, pdata->dma[0], dp, 0, + DRM_PLANE_TYPE_PRIMARY); + if (IS_ERR(primary_plane)) + return PTR_ERR(primary_plane); + + ipu_crtc = drmm_crtc_alloc_with_planes(drm, struct ipu_crtc, base, + &primary_plane->base, NULL, + &ipu_crtc_funcs, NULL); + if (IS_ERR(ipu_crtc)) + return PTR_ERR(ipu_crtc); + ipu_crtc->dev = dev; + ipu_crtc->plane[0] = primary_plane; + + crtc = &ipu_crtc->base; crtc->port = pdata->of_node; drm_crtc_helper_add(crtc, &ipu_helper_funcs); - drm_crtc_init_with_planes(drm, crtc, &ipu_crtc->plane[0]->base, NULL, - &ipu_crtc_funcs, NULL); - ret = ipu_plane_get_resources(ipu_crtc->plane[0]); + ret = ipu_get_resources(drm, ipu_crtc, pdata); if (ret) { - dev_err(ipu_crtc->dev, "getting plane 0 resources failed with %d.\n", + dev_err(ipu_crtc->dev, "getting resources failed with %d.\n", ret); - goto err_put_resources; + return ret; } /* If this crtc is using the DP, add an overlay plane */ @@ -397,16 +397,8 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc, IPU_DP_FLOW_SYNC_FG, drm_crtc_mask(&ipu_crtc->base), DRM_PLANE_TYPE_OVERLAY); - if (IS_ERR(ipu_crtc->plane[1])) { + if (IS_ERR(ipu_crtc->plane[1])) ipu_crtc->plane[1] = NULL; - } else { - ret = ipu_plane_get_resources(ipu_crtc->plane[1]); - if (ret) { - dev_err(ipu_crtc->dev, "getting plane 1 " - "resources failed with %d.\n", ret); - goto err_put_plane0_res; - } - } } ipu_crtc->irq = ipu_plane_irq(ipu_crtc->plane[0]); @@ -414,58 +406,21 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc, "imx_drm", ipu_crtc); if (ret < 0) { dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret); - goto err_put_plane1_res; + return ret; } /* Only enable IRQ when we actually need it to trigger work. */ disable_irq(ipu_crtc->irq); return 0; - -err_put_plane1_res: - if (ipu_crtc->plane[1]) - ipu_plane_put_resources(ipu_crtc->plane[1]); -err_put_plane0_res: - ipu_plane_put_resources(ipu_crtc->plane[0]); -err_put_resources: - ipu_put_resources(ipu_crtc); - - return ret; -} - -static int ipu_drm_bind(struct device *dev, struct device *master, void *data) -{ - struct ipu_client_platformdata *pdata = dev->platform_data; - struct drm_device *drm = data; - struct ipu_crtc *ipu_crtc; - - ipu_crtc = dev_get_drvdata(dev); - memset(ipu_crtc, 0, sizeof(*ipu_crtc)); - - ipu_crtc->dev = dev; - - return ipu_crtc_init(ipu_crtc, pdata, drm); -} - -static void ipu_drm_unbind(struct device *dev, struct device *master, - void *data) -{ - struct ipu_crtc *ipu_crtc = dev_get_drvdata(dev); - - ipu_put_resources(ipu_crtc); - if (ipu_crtc->plane[1]) - ipu_plane_put_resources(ipu_crtc->plane[1]); - ipu_plane_put_resources(ipu_crtc->plane[0]); } static const struct component_ops ipu_crtc_ops = { .bind = ipu_drm_bind, - .unbind = ipu_drm_unbind, }; static int ipu_drm_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct ipu_crtc *ipu_crtc; int ret; if (!dev->platform_data) @@ -475,12 +430,6 @@ static int ipu_drm_probe(struct platform_device *pdev) if (ret) return ret; - ipu_crtc = devm_kzalloc(dev, sizeof(*ipu_crtc), GFP_KERNEL); - if (!ipu_crtc) - return -ENOMEM; - - dev_set_drvdata(dev, ipu_crtc); - return component_add(dev, &ipu_crtc_ops); } diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index 8a4235d9d9f1..075508051b5f 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -11,6 +11,7 @@ #include <drm/drm_fourcc.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_managed.h> #include <drm/drm_plane_helper.h> #include <video/imx-ipu-v3.h> @@ -142,8 +143,10 @@ drm_plane_state_to_vbo(struct drm_plane_state *state) fb->format->cpp[2] * x - eba; } -void ipu_plane_put_resources(struct ipu_plane *ipu_plane) +static void ipu_plane_put_resources(struct drm_device *dev, void *ptr) { + struct ipu_plane *ipu_plane = ptr; + if (!IS_ERR_OR_NULL(ipu_plane->dp)) ipu_dp_put(ipu_plane->dp); if (!IS_ERR_OR_NULL(ipu_plane->dmfc)) @@ -154,7 +157,8 @@ void ipu_plane_put_resources(struct ipu_plane *ipu_plane) ipu_idmac_put(ipu_plane->alpha_ch); } -int ipu_plane_get_resources(struct ipu_plane *ipu_plane) +static int ipu_plane_get_resources(struct drm_device *dev, + struct ipu_plane *ipu_plane) { int ret; int alpha_ch; @@ -166,6 +170,10 @@ int ipu_plane_get_resources(struct ipu_plane *ipu_plane) return ret; } + ret = drmm_add_action_or_reset(dev, ipu_plane_put_resources, ipu_plane); + if (ret) + return ret; + alpha_ch = ipu_channel_alpha_channel(ipu_plane->dma); if (alpha_ch >= 0) { ipu_plane->alpha_ch = ipu_idmac_get(ipu_plane->ipu, alpha_ch); @@ -181,7 +189,7 @@ int ipu_plane_get_resources(struct ipu_plane *ipu_plane) if (IS_ERR(ipu_plane->dmfc)) { ret = PTR_ERR(ipu_plane->dmfc); DRM_ERROR("failed to get dmfc: ret %d\n", ret); - goto err_out; + return ret; } if (ipu_plane->dp_flow >= 0) { @@ -189,15 +197,11 @@ int ipu_plane_get_resources(struct ipu_plane *ipu_plane) if (IS_ERR(ipu_plane->dp)) { ret = PTR_ERR(ipu_plane->dp); DRM_ERROR("failed to get dp flow: %d\n", ret); - goto err_out; + return ret; } } return 0; -err_out: - ipu_plane_put_resources(ipu_plane); - - return ret; } static bool ipu_plane_separate_alpha(struct ipu_plane *ipu_plane) @@ -262,16 +266,6 @@ void ipu_plane_disable_deferred(struct drm_plane *plane) } EXPORT_SYMBOL_GPL(ipu_plane_disable_deferred); -static void ipu_plane_destroy(struct drm_plane *plane) -{ - struct ipu_plane *ipu_plane = to_ipu_plane(plane); - - DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); - - drm_plane_cleanup(plane); - kfree(ipu_plane); -} - static void ipu_plane_state_reset(struct drm_plane *plane) { unsigned int zpos = (plane->type == DRM_PLANE_TYPE_PRIMARY) ? 0 : 1; @@ -336,7 +330,6 @@ static bool ipu_plane_format_mod_supported(struct drm_plane *plane, static const struct drm_plane_funcs ipu_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, - .destroy = ipu_plane_destroy, .reset = ipu_plane_state_reset, .atomic_duplicate_state = ipu_plane_duplicate_state, .atomic_destroy_state = ipu_plane_destroy_state, @@ -834,10 +827,15 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, DRM_DEBUG_KMS("channel %d, dp flow %d, possible_crtcs=0x%x\n", dma, dp, possible_crtcs); - ipu_plane = kzalloc(sizeof(*ipu_plane), GFP_KERNEL); - if (!ipu_plane) { - DRM_ERROR("failed to allocate plane\n"); - return ERR_PTR(-ENOMEM); + ipu_plane = drmm_universal_plane_alloc(dev, struct ipu_plane, base, + possible_crtcs, &ipu_plane_funcs, + ipu_plane_formats, + ARRAY_SIZE(ipu_plane_formats), + modifiers, type, NULL); + if (IS_ERR(ipu_plane)) { + DRM_ERROR("failed to allocate and initialize %s plane\n", + zpos ? "overlay" : "primary"); + return ipu_plane; } ipu_plane->ipu = ipu; @@ -847,22 +845,23 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, if (ipu_prg_present(ipu)) modifiers = pre_format_modifiers; - ret = drm_universal_plane_init(dev, &ipu_plane->base, possible_crtcs, - &ipu_plane_funcs, ipu_plane_formats, - ARRAY_SIZE(ipu_plane_formats), - modifiers, type, NULL); - if (ret) { - DRM_ERROR("failed to initialize plane\n"); - kfree(ipu_plane); - return ERR_PTR(ret); - } - drm_plane_helper_add(&ipu_plane->base, &ipu_plane_helper_funcs); if (dp == IPU_DP_FLOW_SYNC_BG || dp == IPU_DP_FLOW_SYNC_FG) - drm_plane_create_zpos_property(&ipu_plane->base, zpos, 0, 1); + ret = drm_plane_create_zpos_property(&ipu_plane->base, zpos, 0, + 1); else - drm_plane_create_zpos_immutable_property(&ipu_plane->base, 0); + ret = drm_plane_create_zpos_immutable_property(&ipu_plane->base, + 0); + if (ret) + return ERR_PTR(ret); + + ret = ipu_plane_get_resources(dev, ipu_plane); + if (ret) { + DRM_ERROR("failed to get %s plane resources: %pe\n", + zpos ? "overlay" : "primary", &ret); + return ERR_PTR(ret); + } return ipu_plane; } diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h index ffacbcdd2f98..6d544e6ce63f 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.h +++ b/drivers/gpu/drm/imx/ipuv3-plane.h @@ -41,9 +41,6 @@ int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc, uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h, bool interlaced); -int ipu_plane_get_resources(struct ipu_plane *plane); -void ipu_plane_put_resources(struct ipu_plane *plane); - int ipu_plane_irq(struct ipu_plane *plane); void ipu_plane_disable(struct ipu_plane *ipu_plane, bool disable_dp_channel); diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 2eb8df4697df..e0412e694fd9 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -15,6 +15,7 @@ #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_managed.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_probe_helper.h> @@ -22,10 +23,14 @@ #include "imx-drm.h" -struct imx_parallel_display { +struct imx_parallel_display_encoder { struct drm_connector connector; struct drm_encoder encoder; struct drm_bridge bridge; + struct imx_parallel_display *pd; +}; + +struct imx_parallel_display { struct device *dev; void *edid; u32 bus_format; @@ -37,12 +42,12 @@ struct imx_parallel_display { static inline struct imx_parallel_display *con_to_imxpd(struct drm_connector *c) { - return container_of(c, struct imx_parallel_display, connector); + return container_of(c, struct imx_parallel_display_encoder, connector)->pd; } static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b) { - return container_of(b, struct imx_parallel_display, bridge); + return container_of(b, struct imx_parallel_display_encoder, bridge)->pd; } static int imx_pd_connector_get_modes(struct drm_connector *connector) @@ -74,7 +79,7 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector) return ret; drm_mode_copy(mode, &imxpd->mode); - mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, + mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; drm_mode_probed_add(connector, mode); num_modes++; } @@ -253,12 +258,26 @@ static const struct drm_bridge_funcs imx_pd_bridge_funcs = { .atomic_get_output_bus_fmts = imx_pd_bridge_atomic_get_output_bus_fmts, }; -static int imx_pd_register(struct drm_device *drm, - struct imx_parallel_display *imxpd) +static int imx_pd_bind(struct device *dev, struct device *master, void *data) { - struct drm_encoder *encoder = &imxpd->encoder; + struct drm_device *drm = data; + struct imx_parallel_display *imxpd = dev_get_drvdata(dev); + struct imx_parallel_display_encoder *imxpd_encoder; + struct drm_connector *connector; + struct drm_encoder *encoder; + struct drm_bridge *bridge; int ret; + imxpd_encoder = drmm_simple_encoder_alloc(drm, struct imx_parallel_display_encoder, + encoder, DRM_MODE_ENCODER_NONE); + if (IS_ERR(imxpd_encoder)) + return PTR_ERR(imxpd_encoder); + + imxpd_encoder->pd = imxpd; + connector = &imxpd_encoder->connector; + encoder = &imxpd_encoder->encoder; + bridge = &imxpd_encoder->bridge; + ret = imx_drm_encoder_parse_of(drm, encoder, imxpd->dev->of_node); if (ret) return ret; @@ -268,39 +287,37 @@ static int imx_pd_register(struct drm_device *drm, * immediately since the current state is ON * at this point. */ - imxpd->connector.dpms = DRM_MODE_DPMS_OFF; - - drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_NONE); - - imxpd->bridge.funcs = &imx_pd_bridge_funcs; - drm_bridge_attach(encoder, &imxpd->bridge, NULL, 0); + connector->dpms = DRM_MODE_DPMS_OFF; - if (!imxpd->next_bridge) { - drm_connector_helper_add(&imxpd->connector, - &imx_pd_connector_helper_funcs); - drm_connector_init(drm, &imxpd->connector, - &imx_pd_connector_funcs, - DRM_MODE_CONNECTOR_DPI); - } + bridge->funcs = &imx_pd_bridge_funcs; + drm_bridge_attach(encoder, bridge, NULL, 0); if (imxpd->next_bridge) { - ret = drm_bridge_attach(encoder, imxpd->next_bridge, - &imxpd->bridge, 0); + ret = drm_bridge_attach(encoder, imxpd->next_bridge, bridge, 0); if (ret < 0) { dev_err(imxpd->dev, "failed to attach bridge: %d\n", ret); return ret; } } else { - drm_connector_attach_encoder(&imxpd->connector, encoder); + drm_connector_helper_add(connector, + &imx_pd_connector_helper_funcs); + drm_connector_init(drm, connector, &imx_pd_connector_funcs, + DRM_MODE_CONNECTOR_DPI); + + drm_connector_attach_encoder(connector, encoder); } return 0; } -static int imx_pd_bind(struct device *dev, struct device *master, void *data) +static const struct component_ops imx_pd_ops = { + .bind = imx_pd_bind, +}; + +static int imx_pd_probe(struct platform_device *pdev) { - struct drm_device *drm = data; + struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; const u8 *edidp; struct imx_parallel_display *imxpd; @@ -309,8 +326,9 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) u32 bus_format = 0; const char *fmt; - imxpd = dev_get_drvdata(dev); - memset(imxpd, 0, sizeof(*imxpd)); + imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL); + if (!imxpd) + return -ENOMEM; /* port@1 is the output port */ ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel, @@ -337,28 +355,9 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) imxpd->dev = dev; - ret = imx_pd_register(drm, imxpd); - if (ret) - return ret; - - return 0; -} - -static const struct component_ops imx_pd_ops = { - .bind = imx_pd_bind, -}; - -static int imx_pd_probe(struct platform_device *pdev) -{ - struct imx_parallel_display *imxpd; - - imxpd = devm_kzalloc(&pdev->dev, sizeof(*imxpd), GFP_KERNEL); - if (!imxpd) - return -ENOMEM; - platform_set_drvdata(pdev, imxpd); - return component_add(&pdev->dev, &imx_pd_ops); + return component_add(dev, &imx_pd_ops); } static int imx_pd_remove(struct platform_device *pdev) diff --git a/drivers/gpu/drm/ingenic/Kconfig b/drivers/gpu/drm/ingenic/Kconfig index 477d5387e43e..3b57f8be007c 100644 --- a/drivers/gpu/drm/ingenic/Kconfig +++ b/drivers/gpu/drm/ingenic/Kconfig @@ -4,6 +4,7 @@ config DRM_INGENIC depends on DRM depends on CMA depends on OF + depends on COMMON_CLK select DRM_BRIDGE select DRM_PANEL_BRIDGE select DRM_KMS_HELPER diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c index 368bfef8b340..7bb31fbee29d 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c @@ -14,6 +14,7 @@ #include <linux/of_device.h> #include <linux/of_reserved_mem.h> #include <linux/platform_device.h> +#include <linux/pm.h> #include <linux/regmap.h> #include <drm/drm_atomic.h> @@ -190,15 +191,15 @@ static void ingenic_drm_crtc_update_timings(struct ingenic_drm *priv, { unsigned int vpe, vds, vde, vt, hpe, hds, hde, ht; - vpe = mode->vsync_end - mode->vsync_start; - vds = mode->vtotal - mode->vsync_start; - vde = vds + mode->vdisplay; - vt = vde + mode->vsync_start - mode->vdisplay; + vpe = mode->crtc_vsync_end - mode->crtc_vsync_start; + vds = mode->crtc_vtotal - mode->crtc_vsync_start; + vde = vds + mode->crtc_vdisplay; + vt = vde + mode->crtc_vsync_start - mode->crtc_vdisplay; - hpe = mode->hsync_end - mode->hsync_start; - hds = mode->htotal - mode->hsync_start; - hde = hds + mode->hdisplay; - ht = hde + mode->hsync_start - mode->hdisplay; + hpe = mode->crtc_hsync_end - mode->crtc_hsync_start; + hds = mode->crtc_htotal - mode->crtc_hsync_start; + hde = hds + mode->crtc_hdisplay; + ht = hde + mode->crtc_hsync_start - mode->crtc_hdisplay; regmap_write(priv->map, JZ_REG_LCD_VSYNC, 0 << JZ_LCD_VSYNC_VPS_OFFSET | @@ -333,7 +334,7 @@ static void ingenic_drm_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_pending_vblank_event *event = crtc_state->event; if (drm_atomic_crtc_needs_modeset(crtc_state)) { - ingenic_drm_crtc_update_timings(priv, &crtc_state->mode); + ingenic_drm_crtc_update_timings(priv, &crtc_state->adjusted_mode); priv->update_clk_rate = true; } @@ -589,7 +590,7 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode = &crtc_state->adjusted_mode; struct drm_connector *conn = conn_state->connector; struct drm_display_info *info = &conn->display_info; - unsigned int cfg; + unsigned int cfg, rgbcfg = 0; priv->panel_is_sharp = info->bus_flags & DRM_BUS_FLAG_SHARP_SIGNALS; @@ -626,6 +627,9 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder, case MEDIA_BUS_FMT_RGB888_1X24: cfg |= JZ_LCD_CFG_MODE_GENERIC_24BIT; break; + case MEDIA_BUS_FMT_RGB888_3X8_DELTA: + rgbcfg = JZ_LCD_RGBC_EVEN_GBR | JZ_LCD_RGBC_ODD_RGB; + fallthrough; case MEDIA_BUS_FMT_RGB888_3X8: cfg |= JZ_LCD_CFG_MODE_8BIT_SERIAL; break; @@ -636,6 +640,7 @@ static void ingenic_drm_encoder_atomic_mode_set(struct drm_encoder *encoder, } regmap_write(priv->map, JZ_REG_LCD_CFG, cfg); + regmap_write(priv->map, JZ_REG_LCD_RGBC, rgbcfg); } static int ingenic_drm_encoder_atomic_check(struct drm_encoder *encoder, @@ -643,6 +648,7 @@ static int ingenic_drm_encoder_atomic_check(struct drm_encoder *encoder, struct drm_connector_state *conn_state) { struct drm_display_info *info = &conn_state->connector->display_info; + struct drm_display_mode *mode = &crtc_state->adjusted_mode; if (info->num_bus_formats != 1) return -EINVAL; @@ -651,10 +657,23 @@ static int ingenic_drm_encoder_atomic_check(struct drm_encoder *encoder, return 0; switch (*info->bus_formats) { + case MEDIA_BUS_FMT_RGB888_3X8: + case MEDIA_BUS_FMT_RGB888_3X8_DELTA: + /* + * The LCD controller expects timing values in dot-clock ticks, + * which is 3x the timing values in pixels when using a 3x8-bit + * display; but it will count the display area size in pixels + * either way. Go figure. + */ + mode->crtc_clock = mode->clock * 3; + mode->crtc_hsync_start = mode->hsync_start * 3 - mode->hdisplay * 2; + mode->crtc_hsync_end = mode->hsync_end * 3 - mode->hdisplay * 2; + mode->crtc_hdisplay = mode->hdisplay; + mode->crtc_htotal = mode->htotal * 3 - mode->hdisplay * 2; + return 0; case MEDIA_BUS_FMT_RGB565_1X16: case MEDIA_BUS_FMT_RGB666_1X18: case MEDIA_BUS_FMT_RGB888_1X24: - case MEDIA_BUS_FMT_RGB888_3X8: return 0; default: return -EINVAL; @@ -755,8 +774,6 @@ static const struct drm_crtc_funcs ingenic_drm_crtc_funcs = { .enable_vblank = ingenic_drm_enable_vblank, .disable_vblank = ingenic_drm_disable_vblank, - - .gamma_set = drm_atomic_helper_legacy_gamma_set, }; static const struct drm_plane_helper_funcs ingenic_drm_plane_helper_funcs = { @@ -1167,6 +1184,22 @@ static int ingenic_drm_remove(struct platform_device *pdev) return 0; } +static int __maybe_unused ingenic_drm_suspend(struct device *dev) +{ + struct ingenic_drm *priv = dev_get_drvdata(dev); + + return drm_mode_config_helper_suspend(&priv->drm); +} + +static int __maybe_unused ingenic_drm_resume(struct device *dev) +{ + struct ingenic_drm *priv = dev_get_drvdata(dev); + + return drm_mode_config_helper_resume(&priv->drm); +} + +static SIMPLE_DEV_PM_OPS(ingenic_drm_pm_ops, ingenic_drm_suspend, ingenic_drm_resume); + static const u32 jz4740_formats[] = { DRM_FORMAT_XRGB1555, DRM_FORMAT_RGB565, @@ -1246,6 +1279,7 @@ MODULE_DEVICE_TABLE(of, ingenic_drm_of_match); static struct platform_driver ingenic_drm_driver = { .driver = { .name = "ingenic-drm", + .pm = pm_ptr(&ingenic_drm_pm_ops), .of_match_table = of_match_ptr(ingenic_drm_of_match), }, .probe = ingenic_drm_probe, diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.h b/drivers/gpu/drm/ingenic/ingenic-drm.h index 9b48ce02803d..1b4347f7f084 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm.h +++ b/drivers/gpu/drm/ingenic/ingenic-drm.h @@ -31,6 +31,7 @@ #define JZ_REG_LCD_SA1 0x54 #define JZ_REG_LCD_FID1 0x58 #define JZ_REG_LCD_CMD1 0x5C +#define JZ_REG_LCD_RGBC 0x90 #define JZ_REG_LCD_OSDC 0x100 #define JZ_REG_LCD_OSDCTRL 0x104 #define JZ_REG_LCD_OSDS 0x108 @@ -138,6 +139,19 @@ #define JZ_LCD_STATE_SOF_IRQ BIT(4) #define JZ_LCD_STATE_DISABLED BIT(0) +#define JZ_LCD_RGBC_ODD_RGB (0x0 << 4) +#define JZ_LCD_RGBC_ODD_RBG (0x1 << 4) +#define JZ_LCD_RGBC_ODD_GRB (0x2 << 4) +#define JZ_LCD_RGBC_ODD_GBR (0x3 << 4) +#define JZ_LCD_RGBC_ODD_BRG (0x4 << 4) +#define JZ_LCD_RGBC_ODD_BGR (0x5 << 4) +#define JZ_LCD_RGBC_EVEN_RGB (0x0 << 0) +#define JZ_LCD_RGBC_EVEN_RBG (0x1 << 0) +#define JZ_LCD_RGBC_EVEN_GRB (0x2 << 0) +#define JZ_LCD_RGBC_EVEN_GBR (0x3 << 0) +#define JZ_LCD_RGBC_EVEN_BRG (0x4 << 0) +#define JZ_LCD_RGBC_EVEN_BGR (0x5 << 0) + #define JZ_LCD_OSDC_OSDEN BIT(0) #define JZ_LCD_OSDC_F0EN BIT(3) #define JZ_LCD_OSDC_F1EN BIT(4) diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c index a31a840ce634..f64e06e1067d 100644 --- a/drivers/gpu/drm/kmb/kmb_drv.c +++ b/drivers/gpu/drm/kmb/kmb_drv.c @@ -400,7 +400,7 @@ static void kmb_irq_reset(struct drm_device *drm) DEFINE_DRM_GEM_CMA_FOPS(fops); -static struct drm_driver kmb_driver = { +static const struct drm_driver kmb_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, .irq_handler = kmb_isr, @@ -556,7 +556,7 @@ MODULE_DEVICE_TABLE(of, kmb_of_match); static int __maybe_unused kmb_pm_suspend(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); - struct kmb_drm_private *kmb = drm ? to_kmb(drm) : NULL; + struct kmb_drm_private *kmb = to_kmb(drm); drm_kms_helper_poll_disable(drm); diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c index 8448d1edb553..be8eea3830c1 100644 --- a/drivers/gpu/drm/kmb/kmb_plane.c +++ b/drivers/gpu/drm/kmb/kmb_plane.c @@ -114,6 +114,9 @@ static void kmb_plane_atomic_disable(struct drm_plane *plane, kmb = to_kmb(plane->dev); + if (WARN_ON(plane_id >= KMB_MAX_PLANES)) + return; + switch (plane_id) { case LAYER_0: kmb->plane_status[plane_id].ctrl = LCD_CTRL_VL1_ENABLE; diff --git a/drivers/gpu/drm/lima/lima_devfreq.c b/drivers/gpu/drm/lima/lima_devfreq.c index da7099d20bd5..5686ad4aaf7c 100644 --- a/drivers/gpu/drm/lima/lima_devfreq.c +++ b/drivers/gpu/drm/lima/lima_devfreq.c @@ -102,15 +102,10 @@ void lima_devfreq_fini(struct lima_device *ldev) dev_pm_opp_of_remove_table(ldev->dev); - if (devfreq->regulators_opp_table) { - dev_pm_opp_put_regulators(devfreq->regulators_opp_table); - devfreq->regulators_opp_table = NULL; - } - - if (devfreq->clkname_opp_table) { - dev_pm_opp_put_clkname(devfreq->clkname_opp_table); - devfreq->clkname_opp_table = NULL; - } + dev_pm_opp_put_regulators(devfreq->regulators_opp_table); + dev_pm_opp_put_clkname(devfreq->clkname_opp_table); + devfreq->regulators_opp_table = NULL; + devfreq->clkname_opp_table = NULL; } int lima_devfreq_init(struct lima_device *ldev) diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index 832e5280a6ed..de62966243cd 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -225,7 +225,7 @@ struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t siz mutex_init(&bo->lock); INIT_LIST_HEAD(&bo->va); - + bo->base.map_wc = true; bo->base.base.funcs = &lima_gem_funcs; return &bo->base.base; diff --git a/drivers/gpu/drm/mcde/Kconfig b/drivers/gpu/drm/mcde/Kconfig index b3990126562c..71c689b573c9 100644 --- a/drivers/gpu/drm/mcde/Kconfig +++ b/drivers/gpu/drm/mcde/Kconfig @@ -4,6 +4,7 @@ config DRM_MCDE depends on CMA depends on ARM || COMPILE_TEST depends on OF + depends on COMMON_CLK select MFD_SYSCON select DRM_MIPI_DSI select DRM_BRIDGE diff --git a/drivers/gpu/drm/mcde/Makefile b/drivers/gpu/drm/mcde/Makefile index fe28f4e0fe46..15d9c89a3273 100644 --- a/drivers/gpu/drm/mcde/Makefile +++ b/drivers/gpu/drm/mcde/Makefile @@ -1,3 +1,3 @@ -mcde_drm-y += mcde_drv.o mcde_dsi.o mcde_display.o +mcde_drm-y += mcde_drv.o mcde_dsi.o mcde_clk_div.o mcde_display.o obj-$(CONFIG_DRM_MCDE) += mcde_drm.o diff --git a/drivers/gpu/drm/mcde/mcde_clk_div.c b/drivers/gpu/drm/mcde/mcde_clk_div.c new file mode 100644 index 000000000000..038821d2ef80 --- /dev/null +++ b/drivers/gpu/drm/mcde/mcde_clk_div.c @@ -0,0 +1,192 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/clk-provider.h> +#include <linux/regulator/consumer.h> + +#include "mcde_drm.h" +#include "mcde_display_regs.h" + +/* The MCDE internal clock dividers for FIFO A and B */ +struct mcde_clk_div { + struct clk_hw hw; + struct mcde *mcde; + u32 cr; + u32 cr_div; +}; + +static int mcde_clk_div_enable(struct clk_hw *hw) +{ + struct mcde_clk_div *cdiv = container_of(hw, struct mcde_clk_div, hw); + struct mcde *mcde = cdiv->mcde; + u32 val; + + spin_lock(&mcde->fifo_crx1_lock); + val = readl(mcde->regs + cdiv->cr); + /* + * Select the PLL72 (LCD) clock as parent + * FIXME: implement other parents. + */ + val &= ~MCDE_CRX1_CLKSEL_MASK; + val |= MCDE_CRX1_CLKSEL_CLKPLL72 << MCDE_CRX1_CLKSEL_SHIFT; + /* Internal clock */ + val |= MCDE_CRA1_CLKTYPE_TVXCLKSEL1; + + /* Clear then set the divider */ + val &= ~(MCDE_CRX1_BCD | MCDE_CRX1_PCD_MASK); + val |= cdiv->cr_div; + + writel(val, mcde->regs + cdiv->cr); + spin_unlock(&mcde->fifo_crx1_lock); + + return 0; +} + +static int mcde_clk_div_choose_div(struct clk_hw *hw, unsigned long rate, + unsigned long *prate, bool set_parent) +{ + int best_div = 1, div; + struct clk_hw *parent = clk_hw_get_parent(hw); + unsigned long best_prate = 0; + unsigned long best_diff = ~0ul; + int max_div = (1 << MCDE_CRX1_PCD_BITS) - 1; + + for (div = 1; div < max_div; div++) { + unsigned long this_prate, div_rate, diff; + + if (set_parent) + this_prate = clk_hw_round_rate(parent, rate * div); + else + this_prate = *prate; + div_rate = DIV_ROUND_UP_ULL(this_prate, div); + diff = abs(rate - div_rate); + + if (diff < best_diff) { + best_div = div; + best_diff = diff; + best_prate = this_prate; + } + } + + *prate = best_prate; + return best_div; +} + +static long mcde_clk_div_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + int div = mcde_clk_div_choose_div(hw, rate, prate, true); + + return DIV_ROUND_UP_ULL(*prate, div); +} + +static unsigned long mcde_clk_div_recalc_rate(struct clk_hw *hw, + unsigned long prate) +{ + struct mcde_clk_div *cdiv = container_of(hw, struct mcde_clk_div, hw); + struct mcde *mcde = cdiv->mcde; + u32 cr; + int div; + + /* + * If the MCDE is not powered we can't access registers. + * It will come up with 0 in the divider register bits, which + * means "divide by 2". + */ + if (!regulator_is_enabled(mcde->epod)) + return DIV_ROUND_UP_ULL(prate, 2); + + cr = readl(mcde->regs + cdiv->cr); + if (cr & MCDE_CRX1_BCD) + return prate; + + /* 0 in the PCD means "divide by 2", 1 means "divide by 3" etc */ + div = cr & MCDE_CRX1_PCD_MASK; + div += 2; + + return DIV_ROUND_UP_ULL(prate, div); +} + +static int mcde_clk_div_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long prate) +{ + struct mcde_clk_div *cdiv = container_of(hw, struct mcde_clk_div, hw); + int div = mcde_clk_div_choose_div(hw, rate, &prate, false); + u32 cr = 0; + + /* + * We cache the CR bits to set the divide in the state so that + * we can call this before we can even write to the hardware. + */ + if (div == 1) { + /* Bypass clock divider */ + cr |= MCDE_CRX1_BCD; + } else { + div -= 2; + cr |= div & MCDE_CRX1_PCD_MASK; + } + cdiv->cr_div = cr; + + return 0; +} + +static const struct clk_ops mcde_clk_div_ops = { + .enable = mcde_clk_div_enable, + .recalc_rate = mcde_clk_div_recalc_rate, + .round_rate = mcde_clk_div_round_rate, + .set_rate = mcde_clk_div_set_rate, +}; + +int mcde_init_clock_divider(struct mcde *mcde) +{ + struct device *dev = mcde->dev; + struct mcde_clk_div *fifoa; + struct mcde_clk_div *fifob; + const char *parent_name; + struct clk_init_data fifoa_init = { + .name = "fifoa", + .ops = &mcde_clk_div_ops, + .parent_names = &parent_name, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }; + struct clk_init_data fifob_init = { + .name = "fifob", + .ops = &mcde_clk_div_ops, + .parent_names = &parent_name, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + }; + int ret; + + spin_lock_init(&mcde->fifo_crx1_lock); + parent_name = __clk_get_name(mcde->lcd_clk); + + /* Allocate 2 clocks */ + fifoa = devm_kzalloc(dev, sizeof(*fifoa), GFP_KERNEL); + if (!fifoa) + return -ENOMEM; + fifob = devm_kzalloc(dev, sizeof(*fifob), GFP_KERNEL); + if (!fifob) + return -ENOMEM; + + fifoa->mcde = mcde; + fifoa->cr = MCDE_CRA1; + fifoa->hw.init = &fifoa_init; + ret = devm_clk_hw_register(dev, &fifoa->hw); + if (ret) { + dev_err(dev, "error registering FIFO A clock divider\n"); + return ret; + } + mcde->fifoa_clk = fifoa->hw.clk; + + fifob->mcde = mcde; + fifob->cr = MCDE_CRB1; + fifob->hw.init = &fifob_init; + ret = devm_clk_hw_register(dev, &fifob->hw); + if (ret) { + dev_err(dev, "error registering FIFO B clock divider\n"); + return ret; + } + mcde->fifob_clk = fifob->hw.clk; + + return 0; +} diff --git a/drivers/gpu/drm/mcde/mcde_display.c b/drivers/gpu/drm/mcde/mcde_display.c index c271e5bf042e..7c2e0b865441 100644 --- a/drivers/gpu/drm/mcde/mcde_display.c +++ b/drivers/gpu/drm/mcde/mcde_display.c @@ -8,6 +8,7 @@ #include <linux/delay.h> #include <linux/dma-buf.h> #include <linux/regulator/consumer.h> +#include <linux/media-bus-format.h> #include <drm/drm_device.h> #include <drm/drm_fb_cma_helper.h> @@ -16,6 +17,7 @@ #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_simple_kms_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_vblank.h> #include <video/mipi_display.h> @@ -57,10 +59,15 @@ enum mcde_overlay { MCDE_OVERLAY_5, }; -enum mcde_dsi_formatter { +enum mcde_formatter { MCDE_DSI_FORMATTER_0 = 0, MCDE_DSI_FORMATTER_1, MCDE_DSI_FORMATTER_2, + MCDE_DSI_FORMATTER_3, + MCDE_DSI_FORMATTER_4, + MCDE_DSI_FORMATTER_5, + MCDE_DPI_FORMATTER_0, + MCDE_DPI_FORMATTER_1, }; void mcde_display_irq(struct mcde *mcde) @@ -81,7 +88,7 @@ void mcde_display_irq(struct mcde *mcde) * * TODO: Currently only one DSI link is supported. */ - if (mcde_dsi_irq(mcde->mdsi)) { + if (!mcde->dpi_output && mcde_dsi_irq(mcde->mdsi)) { u32 val; /* @@ -243,73 +250,70 @@ static int mcde_configure_extsrc(struct mcde *mcde, enum mcde_extsrc src, val = 0 << MCDE_EXTSRCXCONF_BUF_ID_SHIFT; val |= 1 << MCDE_EXTSRCXCONF_BUF_NB_SHIFT; val |= 0 << MCDE_EXTSRCXCONF_PRI_OVLID_SHIFT; - /* - * MCDE has inverse semantics from DRM on RBG/BGR which is why - * all the modes are inversed here. - */ + switch (format) { case DRM_FORMAT_ARGB8888: val |= MCDE_EXTSRCXCONF_BPP_ARGB8888 << MCDE_EXTSRCXCONF_BPP_SHIFT; - val |= MCDE_EXTSRCXCONF_BGR; break; case DRM_FORMAT_ABGR8888: val |= MCDE_EXTSRCXCONF_BPP_ARGB8888 << MCDE_EXTSRCXCONF_BPP_SHIFT; + val |= MCDE_EXTSRCXCONF_BGR; break; case DRM_FORMAT_XRGB8888: val |= MCDE_EXTSRCXCONF_BPP_XRGB8888 << MCDE_EXTSRCXCONF_BPP_SHIFT; - val |= MCDE_EXTSRCXCONF_BGR; break; case DRM_FORMAT_XBGR8888: val |= MCDE_EXTSRCXCONF_BPP_XRGB8888 << MCDE_EXTSRCXCONF_BPP_SHIFT; + val |= MCDE_EXTSRCXCONF_BGR; break; case DRM_FORMAT_RGB888: val |= MCDE_EXTSRCXCONF_BPP_RGB888 << MCDE_EXTSRCXCONF_BPP_SHIFT; - val |= MCDE_EXTSRCXCONF_BGR; break; case DRM_FORMAT_BGR888: val |= MCDE_EXTSRCXCONF_BPP_RGB888 << MCDE_EXTSRCXCONF_BPP_SHIFT; + val |= MCDE_EXTSRCXCONF_BGR; break; case DRM_FORMAT_ARGB4444: val |= MCDE_EXTSRCXCONF_BPP_ARGB4444 << MCDE_EXTSRCXCONF_BPP_SHIFT; - val |= MCDE_EXTSRCXCONF_BGR; break; case DRM_FORMAT_ABGR4444: val |= MCDE_EXTSRCXCONF_BPP_ARGB4444 << MCDE_EXTSRCXCONF_BPP_SHIFT; + val |= MCDE_EXTSRCXCONF_BGR; break; case DRM_FORMAT_XRGB4444: val |= MCDE_EXTSRCXCONF_BPP_RGB444 << MCDE_EXTSRCXCONF_BPP_SHIFT; - val |= MCDE_EXTSRCXCONF_BGR; break; case DRM_FORMAT_XBGR4444: val |= MCDE_EXTSRCXCONF_BPP_RGB444 << MCDE_EXTSRCXCONF_BPP_SHIFT; + val |= MCDE_EXTSRCXCONF_BGR; break; case DRM_FORMAT_XRGB1555: val |= MCDE_EXTSRCXCONF_BPP_IRGB1555 << MCDE_EXTSRCXCONF_BPP_SHIFT; - val |= MCDE_EXTSRCXCONF_BGR; break; case DRM_FORMAT_XBGR1555: val |= MCDE_EXTSRCXCONF_BPP_IRGB1555 << MCDE_EXTSRCXCONF_BPP_SHIFT; + val |= MCDE_EXTSRCXCONF_BGR; break; case DRM_FORMAT_RGB565: val |= MCDE_EXTSRCXCONF_BPP_RGB565 << MCDE_EXTSRCXCONF_BPP_SHIFT; - val |= MCDE_EXTSRCXCONF_BGR; break; case DRM_FORMAT_BGR565: val |= MCDE_EXTSRCXCONF_BPP_RGB565 << MCDE_EXTSRCXCONF_BPP_SHIFT; + val |= MCDE_EXTSRCXCONF_BGR; break; case DRM_FORMAT_YUV422: val |= MCDE_EXTSRCXCONF_BPP_YCBCR422 << @@ -556,6 +560,7 @@ static void mcde_configure_channel(struct mcde *mcde, enum mcde_channel ch, << MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_SHIFT; break; case MCDE_VIDEO_FORMATTER_FLOW: + case MCDE_DPI_FORMATTER_FLOW: val = MCDE_CHNLXSYNCHMOD_SRC_SYNCH_HARDWARE << MCDE_CHNLXSYNCHMOD_SRC_SYNCH_SHIFT; val |= MCDE_CHNLXSYNCHMOD_OUT_SYNCH_SRC_FORMATTER @@ -564,7 +569,7 @@ static void mcde_configure_channel(struct mcde *mcde, enum mcde_channel ch, default: dev_err(mcde->dev, "unknown flow mode %d\n", mcde->flow_mode); - break; + return; } writel(val, mcde->regs + sync); @@ -594,10 +599,35 @@ static void mcde_configure_channel(struct mcde *mcde, enum mcde_channel ch, mcde->regs + mux); break; } + + /* + * If using DPI configure the sync event. + * TODO: this is for LCD only, it does not cover TV out. + */ + if (mcde->dpi_output) { + u32 stripwidth; + + stripwidth = 0xF000 / (mode->vdisplay * 4); + dev_info(mcde->dev, "stripwidth: %d\n", stripwidth); + + val = MCDE_SYNCHCONF_HWREQVEVENT_ACTIVE_VIDEO | + (mode->hdisplay - 1 - stripwidth) << MCDE_SYNCHCONF_HWREQVCNT_SHIFT | + MCDE_SYNCHCONF_SWINTVEVENT_ACTIVE_VIDEO | + (mode->hdisplay - 1 - stripwidth) << MCDE_SYNCHCONF_SWINTVCNT_SHIFT; + + switch (fifo) { + case MCDE_FIFO_A: + writel(val, mcde->regs + MCDE_SYNCHCONFA); + break; + case MCDE_FIFO_B: + writel(val, mcde->regs + MCDE_SYNCHCONFB); + break; + } + } } static void mcde_configure_fifo(struct mcde *mcde, enum mcde_fifo fifo, - enum mcde_dsi_formatter fmt, + enum mcde_formatter fmt, int fifo_wtrmrk) { u32 val; @@ -618,12 +648,49 @@ static void mcde_configure_fifo(struct mcde *mcde, enum mcde_fifo fifo, } val = fifo_wtrmrk << MCDE_CTRLX_FIFOWTRMRK_SHIFT; - /* We only support DSI formatting for now */ - val |= MCDE_CTRLX_FORMTYPE_DSI << - MCDE_CTRLX_FORMTYPE_SHIFT; - /* Select the formatter to use for this FIFO */ - val |= fmt << MCDE_CTRLX_FORMID_SHIFT; + /* + * Select the formatter to use for this FIFO + * + * The register definitions imply that different IDs should be used + * by the DSI formatters depending on if they are in VID or CMD + * mode, and the manual says they are dedicated but identical. + * The vendor code uses them as it seems fit. + */ + switch (fmt) { + case MCDE_DSI_FORMATTER_0: + val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT; + val |= MCDE_CTRLX_FORMID_DSI0VID << MCDE_CTRLX_FORMID_SHIFT; + break; + case MCDE_DSI_FORMATTER_1: + val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT; + val |= MCDE_CTRLX_FORMID_DSI0CMD << MCDE_CTRLX_FORMID_SHIFT; + break; + case MCDE_DSI_FORMATTER_2: + val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT; + val |= MCDE_CTRLX_FORMID_DSI1VID << MCDE_CTRLX_FORMID_SHIFT; + break; + case MCDE_DSI_FORMATTER_3: + val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT; + val |= MCDE_CTRLX_FORMID_DSI1CMD << MCDE_CTRLX_FORMID_SHIFT; + break; + case MCDE_DSI_FORMATTER_4: + val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT; + val |= MCDE_CTRLX_FORMID_DSI2VID << MCDE_CTRLX_FORMID_SHIFT; + break; + case MCDE_DSI_FORMATTER_5: + val |= MCDE_CTRLX_FORMTYPE_DSI << MCDE_CTRLX_FORMTYPE_SHIFT; + val |= MCDE_CTRLX_FORMID_DSI2CMD << MCDE_CTRLX_FORMID_SHIFT; + break; + case MCDE_DPI_FORMATTER_0: + val |= MCDE_CTRLX_FORMTYPE_DPITV << MCDE_CTRLX_FORMTYPE_SHIFT; + val |= MCDE_CTRLX_FORMID_DPIA << MCDE_CTRLX_FORMID_SHIFT; + break; + case MCDE_DPI_FORMATTER_1: + val |= MCDE_CTRLX_FORMTYPE_DPITV << MCDE_CTRLX_FORMTYPE_SHIFT; + val |= MCDE_CTRLX_FORMID_DPIB << MCDE_CTRLX_FORMID_SHIFT; + break; + } writel(val, mcde->regs + ctrl); /* Blend source with Alpha 0xff on FIFO */ @@ -631,17 +698,54 @@ static void mcde_configure_fifo(struct mcde *mcde, enum mcde_fifo fifo, 0xff << MCDE_CRX0_ALPHABLEND_SHIFT; writel(val, mcde->regs + cr0); - /* Set-up from mcde_fmtr_dsi.c, fmtr_dsi_enable_video() */ + spin_lock(&mcde->fifo_crx1_lock); + val = readl(mcde->regs + cr1); + /* + * Set-up from mcde_fmtr_dsi.c, fmtr_dsi_enable_video() + * FIXME: a different clock needs to be selected for TV out. + */ + if (mcde->dpi_output) { + struct drm_connector *connector = drm_panel_bridge_connector(mcde->bridge); + u32 bus_format; - /* Use the MCDE clock for this FIFO */ - val = MCDE_CRX1_CLKSEL_MCDECLK << MCDE_CRX1_CLKSEL_SHIFT; + /* Assume RGB888 24 bit if we have no further info */ + if (!connector->display_info.num_bus_formats) { + dev_info(mcde->dev, "panel does not specify bus format, assume RGB888\n"); + bus_format = MEDIA_BUS_FMT_RGB888_1X24; + } else { + bus_format = connector->display_info.bus_formats[0]; + } - /* TODO: when adding DPI support add OUTBPP etc here */ + /* + * Set up the CDWIN and OUTBPP for the LCD + * + * FIXME: fill this in if you know the correspondance between the MIPI + * DPI specification and the media bus formats. + */ + val &= ~MCDE_CRX1_CDWIN_MASK; + val &= ~MCDE_CRX1_OUTBPP_MASK; + switch (bus_format) { + case MEDIA_BUS_FMT_RGB888_1X24: + val |= MCDE_CRX1_CDWIN_24BPP << MCDE_CRX1_CDWIN_SHIFT; + val |= MCDE_CRX1_OUTBPP_24BPP << MCDE_CRX1_OUTBPP_SHIFT; + break; + default: + dev_err(mcde->dev, "unknown bus format, assume RGB888\n"); + val |= MCDE_CRX1_CDWIN_24BPP << MCDE_CRX1_CDWIN_SHIFT; + val |= MCDE_CRX1_OUTBPP_24BPP << MCDE_CRX1_OUTBPP_SHIFT; + break; + } + } else { + /* Use the MCDE clock for DSI */ + val &= ~MCDE_CRX1_CLKSEL_MASK; + val |= MCDE_CRX1_CLKSEL_MCDECLK << MCDE_CRX1_CLKSEL_SHIFT; + } writel(val, mcde->regs + cr1); + spin_unlock(&mcde->fifo_crx1_lock); }; static void mcde_configure_dsi_formatter(struct mcde *mcde, - enum mcde_dsi_formatter fmt, + enum mcde_formatter fmt, u32 formatter_frame, int pkt_size) { @@ -681,6 +785,9 @@ static void mcde_configure_dsi_formatter(struct mcde *mcde, delay0 = MCDE_DSIVID2DELAY0; delay1 = MCDE_DSIVID2DELAY1; break; + default: + dev_err(mcde->dev, "tried to configure a non-DSI formatter as DSI\n"); + return; } /* @@ -700,7 +807,9 @@ static void mcde_configure_dsi_formatter(struct mcde *mcde, MCDE_DSICONF0_PACKING_SHIFT; break; case MIPI_DSI_FMT_RGB666_PACKED: - val |= MCDE_DSICONF0_PACKING_RGB666_PACKED << + dev_err(mcde->dev, + "we cannot handle the packed RGB666 format\n"); + val |= MCDE_DSICONF0_PACKING_RGB666 << MCDE_DSICONF0_PACKING_SHIFT; break; case MIPI_DSI_FMT_RGB565: @@ -860,73 +969,140 @@ static int mcde_dsi_get_pkt_div(int ppl, int fifo_size) return 1; } -static void mcde_display_enable(struct drm_simple_display_pipe *pipe, - struct drm_crtc_state *cstate, - struct drm_plane_state *plane_state) +static void mcde_setup_dpi(struct mcde *mcde, const struct drm_display_mode *mode, + int *fifo_wtrmrk_lvl) { - struct drm_crtc *crtc = &pipe->crtc; - struct drm_plane *plane = &pipe->plane; - struct drm_device *drm = crtc->dev; - struct mcde *mcde = to_mcde(drm); - const struct drm_display_mode *mode = &cstate->mode; - struct drm_framebuffer *fb = plane->state->fb; - u32 format = fb->format->format; - u32 formatter_ppl = mode->hdisplay; /* pixels per line */ - u32 formatter_lpf = mode->vdisplay; /* lines per frame */ - int pkt_size, fifo_wtrmrk; - int cpp = fb->format->cpp[0]; - int formatter_cpp; - struct drm_format_name_buf tmp; - u32 formatter_frame; - u32 pkt_div; + struct drm_connector *connector = drm_panel_bridge_connector(mcde->bridge); + u32 hsw, hfp, hbp; + u32 vsw, vfp, vbp; u32 val; - int ret; - /* This powers up the entire MCDE block and the DSI hardware */ - ret = regulator_enable(mcde->epod); - if (ret) { - dev_err(drm->dev, "can't re-enable EPOD regulator\n"); - return; - } + /* FIXME: we only support LCD, implement TV out */ + hsw = mode->hsync_end - mode->hsync_start; + hfp = mode->hsync_start - mode->hdisplay; + hbp = mode->htotal - mode->hsync_end; + vsw = mode->vsync_end - mode->vsync_start; + vfp = mode->vsync_start - mode->vdisplay; + vbp = mode->vtotal - mode->vsync_end; - dev_info(drm->dev, "enable MCDE, %d x %d format %s\n", - mode->hdisplay, mode->vdisplay, - drm_get_format_name(format, &tmp)); - if (!mcde->mdsi) { - /* TODO: deal with this for non-DSI output */ - dev_err(drm->dev, "no DSI master attached!\n"); - return; - } + dev_info(mcde->dev, "output on DPI LCD from channel A\n"); + /* Display actual values */ + dev_info(mcde->dev, "HSW: %d, HFP: %d, HBP: %d, VSW: %d, VFP: %d, VBP: %d\n", + hsw, hfp, hbp, vsw, vfp, vbp); + + /* + * The pixel fetcher is 128 64-bit words deep = 1024 bytes. + * One overlay of 32bpp (4 cpp) assumed, fetch 160 pixels. + * 160 * 4 = 640 bytes. + */ + *fifo_wtrmrk_lvl = 640; /* Set up the main control, watermark level at 7 */ val = 7 << MCDE_CONF0_IFIFOCTRLWTRMRKLVL_SHIFT; - /* 24 bits DPI: connect LSB Ch B to D[0:7] */ - val |= 3 << MCDE_CONF0_OUTMUX0_SHIFT; - /* TV out: connect LSB Ch B to D[8:15] */ - val |= 3 << MCDE_CONF0_OUTMUX1_SHIFT; + + /* + * This sets up the internal silicon muxing of the DPI + * lines. This is how the silicon connects out to the + * external pins, then the pins need to be further + * configured into "alternate functions" using pin control + * to actually get the signals out. + * + * FIXME: this is hardcoded to the only setting found in + * the wild. If we need to use different settings for + * different DPI displays, make this parameterizable from + * the device tree. + */ + /* 24 bits DPI: connect Ch A LSB to D[0:7] */ + val |= 0 << MCDE_CONF0_OUTMUX0_SHIFT; + /* 24 bits DPI: connect Ch A MID to D[8:15] */ + val |= 1 << MCDE_CONF0_OUTMUX1_SHIFT; /* Don't care about this muxing */ val |= 0 << MCDE_CONF0_OUTMUX2_SHIFT; - /* 24 bits DPI: connect MID Ch B to D[24:31] */ - val |= 4 << MCDE_CONF0_OUTMUX3_SHIFT; - /* 5: 24 bits DPI: connect MSB Ch B to D[32:39] */ - val |= 5 << MCDE_CONF0_OUTMUX4_SHIFT; - /* Syncmux bits zero: DPI channel A and B on output pins A and B resp */ + /* Don't care about this muxing */ + val |= 0 << MCDE_CONF0_OUTMUX3_SHIFT; + /* 24 bits DPI: connect Ch A MSB to D[32:39] */ + val |= 2 << MCDE_CONF0_OUTMUX4_SHIFT; + /* Syncmux bits zero: DPI channel A */ writel(val, mcde->regs + MCDE_CONF0); - /* Clear any pending interrupts */ - mcde_display_disable_irqs(mcde); - writel(0, mcde->regs + MCDE_IMSCERR); - writel(0xFFFFFFFF, mcde->regs + MCDE_RISERR); + /* This hammers us into LCD mode */ + writel(0, mcde->regs + MCDE_TVCRA); + + /* Front porch and sync width */ + val = (vsw << MCDE_TVBL1_BEL1_SHIFT); + val |= (vfp << MCDE_TVBL1_BSL1_SHIFT); + writel(val, mcde->regs + MCDE_TVBL1A); + /* The vendor driver sets the same value into TVBL2A */ + writel(val, mcde->regs + MCDE_TVBL2A); + + /* Vertical back porch */ + val = (vbp << MCDE_TVDVO_DVO1_SHIFT); + /* The vendor drivers sets the same value into TVDVOA */ + val |= (vbp << MCDE_TVDVO_DVO2_SHIFT); + writel(val, mcde->regs + MCDE_TVDVOA); + + /* Horizontal back porch, as 0 = 1 cycle we need to subtract 1 */ + writel((hbp - 1), mcde->regs + MCDE_TVTIM1A); + + /* Horizongal sync width and horizonal front porch, 0 = 1 cycle */ + val = ((hsw - 1) << MCDE_TVLBALW_LBW_SHIFT); + val |= ((hfp - 1) << MCDE_TVLBALW_ALW_SHIFT); + writel(val, mcde->regs + MCDE_TVLBALWA); - dev_info(drm->dev, "output in %s mode, format %dbpp\n", + /* Blank some TV registers we don't use */ + writel(0, mcde->regs + MCDE_TVISLA); + writel(0, mcde->regs + MCDE_TVBLUA); + + /* Set up sync inversion etc */ + val = 0; + if (mode->flags & DRM_MODE_FLAG_NHSYNC) + val |= MCDE_LCDTIM1B_IHS; + if (mode->flags & DRM_MODE_FLAG_NVSYNC) + val |= MCDE_LCDTIM1B_IVS; + if (connector->display_info.bus_flags & DRM_BUS_FLAG_DE_LOW) + val |= MCDE_LCDTIM1B_IOE; + if (connector->display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE) + val |= MCDE_LCDTIM1B_IPC; + writel(val, mcde->regs + MCDE_LCDTIM1A); +} + +static void mcde_setup_dsi(struct mcde *mcde, const struct drm_display_mode *mode, + int cpp, int *fifo_wtrmrk_lvl, int *dsi_formatter_frame, + int *dsi_pkt_size) +{ + u32 formatter_ppl = mode->hdisplay; /* pixels per line */ + u32 formatter_lpf = mode->vdisplay; /* lines per frame */ + int formatter_frame; + int formatter_cpp; + int fifo_wtrmrk; + u32 pkt_div; + int pkt_size; + u32 val; + + dev_info(mcde->dev, "output in %s mode, format %dbpp\n", (mcde->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO) ? "VIDEO" : "CMD", mipi_dsi_pixel_format_to_bpp(mcde->mdsi->format)); formatter_cpp = mipi_dsi_pixel_format_to_bpp(mcde->mdsi->format) / 8; - dev_info(drm->dev, "overlay CPP %d bytes, DSI CPP %d bytes\n", - cpp, - formatter_cpp); + dev_info(mcde->dev, "Overlay CPP: %d bytes, DSI formatter CPP %d bytes\n", + cpp, formatter_cpp); + + /* Set up the main control, watermark level at 7 */ + val = 7 << MCDE_CONF0_IFIFOCTRLWTRMRKLVL_SHIFT; + + /* + * This is the internal silicon muxing of the DPI + * (parallell display) lines. Since we are not using + * this at all (we are using DSI) these are just + * dummy values from the vendor tree. + */ + val |= 3 << MCDE_CONF0_OUTMUX0_SHIFT; + val |= 3 << MCDE_CONF0_OUTMUX1_SHIFT; + val |= 0 << MCDE_CONF0_OUTMUX2_SHIFT; + val |= 4 << MCDE_CONF0_OUTMUX3_SHIFT; + val |= 5 << MCDE_CONF0_OUTMUX4_SHIFT; + writel(val, mcde->regs + MCDE_CONF0); /* Calculations from mcde_fmtr_dsi.c, fmtr_dsi_enable_video() */ @@ -948,9 +1124,9 @@ static void mcde_display_enable(struct drm_simple_display_pipe *pipe, /* The FIFO is 640 entries deep on this v3 hardware */ pkt_div = mcde_dsi_get_pkt_div(mode->hdisplay, 640); } - dev_dbg(drm->dev, "FIFO watermark after flooring: %d bytes\n", + dev_dbg(mcde->dev, "FIFO watermark after flooring: %d bytes\n", fifo_wtrmrk); - dev_dbg(drm->dev, "Packet divisor: %d bytes\n", pkt_div); + dev_dbg(mcde->dev, "Packet divisor: %d bytes\n", pkt_div); /* NOTE: pkt_div is 1 for video mode */ pkt_size = (formatter_ppl * formatter_cpp) / pkt_div; @@ -958,16 +1134,64 @@ static void mcde_display_enable(struct drm_simple_display_pipe *pipe, if (!(mcde->mdsi->mode_flags & MIPI_DSI_MODE_VIDEO)) pkt_size++; - dev_dbg(drm->dev, "DSI packet size: %d * %d bytes per line\n", + dev_dbg(mcde->dev, "DSI packet size: %d * %d bytes per line\n", pkt_size, pkt_div); - dev_dbg(drm->dev, "Overlay frame size: %u bytes\n", + dev_dbg(mcde->dev, "Overlay frame size: %u bytes\n", mode->hdisplay * mode->vdisplay * cpp); - mcde->stride = mode->hdisplay * cpp; - dev_dbg(drm->dev, "Overlay line stride: %u bytes\n", - mcde->stride); /* NOTE: pkt_div is 1 for video mode */ formatter_frame = pkt_size * pkt_div * formatter_lpf; - dev_dbg(drm->dev, "Formatter frame size: %u bytes\n", formatter_frame); + dev_dbg(mcde->dev, "Formatter frame size: %u bytes\n", formatter_frame); + + *fifo_wtrmrk_lvl = fifo_wtrmrk; + *dsi_pkt_size = pkt_size; + *dsi_formatter_frame = formatter_frame; +} + +static void mcde_display_enable(struct drm_simple_display_pipe *pipe, + struct drm_crtc_state *cstate, + struct drm_plane_state *plane_state) +{ + struct drm_crtc *crtc = &pipe->crtc; + struct drm_plane *plane = &pipe->plane; + struct drm_device *drm = crtc->dev; + struct mcde *mcde = to_mcde(drm); + const struct drm_display_mode *mode = &cstate->mode; + struct drm_framebuffer *fb = plane->state->fb; + u32 format = fb->format->format; + int dsi_pkt_size; + int fifo_wtrmrk; + int cpp = fb->format->cpp[0]; + struct drm_format_name_buf tmp; + u32 dsi_formatter_frame; + u32 val; + int ret; + + /* This powers up the entire MCDE block and the DSI hardware */ + ret = regulator_enable(mcde->epod); + if (ret) { + dev_err(drm->dev, "can't re-enable EPOD regulator\n"); + return; + } + + dev_info(drm->dev, "enable MCDE, %d x %d format %s\n", + mode->hdisplay, mode->vdisplay, + drm_get_format_name(format, &tmp)); + + + /* Clear any pending interrupts */ + mcde_display_disable_irqs(mcde); + writel(0, mcde->regs + MCDE_IMSCERR); + writel(0xFFFFFFFF, mcde->regs + MCDE_RISERR); + + if (mcde->dpi_output) + mcde_setup_dpi(mcde, mode, &fifo_wtrmrk); + else + mcde_setup_dsi(mcde, mode, cpp, &fifo_wtrmrk, + &dsi_formatter_frame, &dsi_pkt_size); + + mcde->stride = mode->hdisplay * cpp; + dev_dbg(drm->dev, "Overlay line stride: %u bytes\n", + mcde->stride); /* Drain the FIFO A + channel 0 pipe so we have a clean slate */ mcde_drain_pipe(mcde, MCDE_FIFO_A, MCDE_CHANNEL_0); @@ -995,29 +1219,47 @@ static void mcde_display_enable(struct drm_simple_display_pipe *pipe, */ mcde_configure_channel(mcde, MCDE_CHANNEL_0, MCDE_FIFO_A, mode); - /* Configure FIFO A to use DSI formatter 0 */ - mcde_configure_fifo(mcde, MCDE_FIFO_A, MCDE_DSI_FORMATTER_0, - fifo_wtrmrk); + if (mcde->dpi_output) { + unsigned long lcd_freq; - /* - * This brings up the DSI bridge which is tightly connected - * to the MCDE DSI formatter. - * - * FIXME: if we want to use another formatter, such as DPI, - * we need to be more elaborate here and select the appropriate - * bridge. - */ - mcde_dsi_enable(mcde->bridge); + /* Configure FIFO A to use DPI formatter 0 */ + mcde_configure_fifo(mcde, MCDE_FIFO_A, MCDE_DPI_FORMATTER_0, + fifo_wtrmrk); + + /* Set up and enable the LCD clock */ + lcd_freq = clk_round_rate(mcde->fifoa_clk, mode->clock * 1000); + ret = clk_set_rate(mcde->fifoa_clk, lcd_freq); + if (ret) + dev_err(mcde->dev, "failed to set LCD clock rate %lu Hz\n", + lcd_freq); + ret = clk_prepare_enable(mcde->fifoa_clk); + if (ret) { + dev_err(mcde->dev, "failed to enable FIFO A DPI clock\n"); + return; + } + dev_info(mcde->dev, "LCD FIFO A clk rate %lu Hz\n", + clk_get_rate(mcde->fifoa_clk)); + } else { + /* Configure FIFO A to use DSI formatter 0 */ + mcde_configure_fifo(mcde, MCDE_FIFO_A, MCDE_DSI_FORMATTER_0, + fifo_wtrmrk); + + /* + * This brings up the DSI bridge which is tightly connected + * to the MCDE DSI formatter. + */ + mcde_dsi_enable(mcde->bridge); - /* Configure the DSI formatter 0 for the DSI panel output */ - mcde_configure_dsi_formatter(mcde, MCDE_DSI_FORMATTER_0, - formatter_frame, pkt_size); + /* Configure the DSI formatter 0 for the DSI panel output */ + mcde_configure_dsi_formatter(mcde, MCDE_DSI_FORMATTER_0, + dsi_formatter_frame, dsi_pkt_size); + } switch (mcde->flow_mode) { case MCDE_COMMAND_TE_FLOW: case MCDE_COMMAND_BTA_TE_FLOW: case MCDE_VIDEO_TE_FLOW: - /* We are using TE in some comination */ + /* We are using TE in some combination */ if (mode->flags & DRM_MODE_FLAG_NVSYNC) val = MCDE_VSCRC_VSPOL; else @@ -1069,8 +1311,12 @@ static void mcde_display_disable(struct drm_simple_display_pipe *pipe) /* Disable FIFO A flow */ mcde_disable_fifo(mcde, MCDE_FIFO_A, true); - /* This disables the DSI bridge */ - mcde_dsi_disable(mcde->bridge); + if (mcde->dpi_output) { + clk_disable_unprepare(mcde->fifoa_clk); + } else { + /* This disables the DSI bridge */ + mcde_dsi_disable(mcde->bridge); + } event = crtc->state->event; if (event) { @@ -1261,6 +1507,10 @@ int mcde_display_init(struct drm_device *drm) DRM_FORMAT_YUV422, }; + ret = mcde_init_clock_divider(mcde); + if (ret) + return ret; + ret = drm_simple_display_pipe_init(drm, &mcde->pipe, &mcde_display_funcs, formats, ARRAY_SIZE(formats), diff --git a/drivers/gpu/drm/mcde/mcde_display_regs.h b/drivers/gpu/drm/mcde/mcde_display_regs.h index d3ac7ef5ff9a..2ad78c59d627 100644 --- a/drivers/gpu/drm/mcde/mcde_display_regs.h +++ b/drivers/gpu/drm/mcde/mcde_display_regs.h @@ -215,6 +215,80 @@ #define MCDE_OVLXCOMP_Z_SHIFT 27 #define MCDE_OVLXCOMP_Z_MASK 0x78000000 +/* DPI/TV configuration registers, channel A and B */ +#define MCDE_TVCRA 0x00000838 +#define MCDE_TVCRB 0x00000A38 +#define MCDE_TVCR_MOD_TV BIT(0) /* 0 = LCD mode */ +#define MCDE_TVCR_INTEREN BIT(1) +#define MCDE_TVCR_IFIELD BIT(2) +#define MCDE_TVCR_TVMODE_SDTV_656P (0 << 3) +#define MCDE_TVCR_TVMODE_SDTV_656P_LE (3 << 3) +#define MCDE_TVCR_TVMODE_SDTV_656P_BE (4 << 3) +#define MCDE_TVCR_SDTVMODE_Y0CBY1CR (0 << 6) +#define MCDE_TVCR_SDTVMODE_CBY0CRY1 (1 << 6) +#define MCDE_TVCR_AVRGEN BIT(8) +#define MCDE_TVCR_CKINV BIT(9) + +/* TV blanking control register 1, channel A and B */ +#define MCDE_TVBL1A 0x0000083C +#define MCDE_TVBL1B 0x00000A3C +#define MCDE_TVBL1_BEL1_SHIFT 0 /* VFP vertical front porch 11 bits */ +#define MCDE_TVBL1_BSL1_SHIFT 16 /* VSW vertical sync pulse width 11 bits */ + +/* Pixel processing TV start line, channel A and B */ +#define MCDE_TVISLA 0x00000840 +#define MCDE_TVISLB 0x00000A40 +#define MCDE_TVISL_FSL1_SHIFT 0 /* Field 1 identification start line 11 bits */ +#define MCDE_TVISL_FSL2_SHIFT 16 /* Field 2 identification start line 11 bits */ + +/* Pixel processing TV DVO offset */ +#define MCDE_TVDVOA 0x00000844 +#define MCDE_TVDVOB 0x00000A44 +#define MCDE_TVDVO_DVO1_SHIFT 0 /* VBP vertical back porch 0 = 0 */ +#define MCDE_TVDVO_DVO2_SHIFT 16 + +/* + * Pixel processing TV Timing 1 + * HBP horizontal back porch 11 bits horizontal offset + * 0 = 1 pixel HBP, 255 = 256 pixels, so actual value - 1 + */ +#define MCDE_TVTIM1A 0x0000084C +#define MCDE_TVTIM1B 0x00000A4C + +/* Pixel processing TV LBALW */ +/* 0 = 1 clock cycle, 255 = 256 clock cycles */ +#define MCDE_TVLBALWA 0x00000850 +#define MCDE_TVLBALWB 0x00000A50 +#define MCDE_TVLBALW_LBW_SHIFT 0 /* HSW horizonal sync width, line blanking width 11 bits */ +#define MCDE_TVLBALW_ALW_SHIFT 16 /* HFP horizontal front porch, active line width 11 bits */ + +/* TV blanking control register 1, channel A and B */ +#define MCDE_TVBL2A 0x00000854 +#define MCDE_TVBL2B 0x00000A54 +#define MCDE_TVBL2_BEL2_SHIFT 0 /* Field 2 blanking end line 11 bits */ +#define MCDE_TVBL2_BSL2_SHIFT 16 /* Field 2 blanking start line 11 bits */ + +/* Pixel processing TV background */ +#define MCDE_TVBLUA 0x00000858 +#define MCDE_TVBLUB 0x00000A58 +#define MCDE_TVBLU_TVBLU_SHIFT 0 /* 8 bits luminance */ +#define MCDE_TVBLU_TVBCB_SHIFT 8 /* 8 bits Cb chrominance */ +#define MCDE_TVBLU_TVBCR_SHIFT 16 /* 8 bits Cr chrominance */ + +/* Pixel processing LCD timing 1 */ +#define MCDE_LCDTIM1A 0x00000860 +#define MCDE_LCDTIM1B 0x00000A60 +/* inverted vertical sync pulse for HRTFT 0 = active low, 1 active high */ +#define MCDE_LCDTIM1B_IVP BIT(19) +/* inverted vertical sync, 0 = active high (the normal), 1 = active low */ +#define MCDE_LCDTIM1B_IVS BIT(20) +/* inverted horizontal sync, 0 = active high (the normal), 1 = active low */ +#define MCDE_LCDTIM1B_IHS BIT(21) +/* inverted panel clock 0 = rising edge data out, 1 = falling edge data out */ +#define MCDE_LCDTIM1B_IPC BIT(22) +/* invert output enable 0 = active high, 1 = active low */ +#define MCDE_LCDTIM1B_IOE BIT(23) + #define MCDE_CRC 0x00000C00 #define MCDE_CRC_C1EN BIT(2) #define MCDE_CRC_C2EN BIT(3) @@ -360,6 +434,7 @@ #define MCDE_CRB1 0x00000A04 #define MCDE_CRX1_PCD_SHIFT 0 #define MCDE_CRX1_PCD_MASK 0x000003FF +#define MCDE_CRX1_PCD_BITS 10 #define MCDE_CRX1_CLKSEL_SHIFT 10 #define MCDE_CRX1_CLKSEL_MASK 0x00001C00 #define MCDE_CRX1_CLKSEL_CLKPLL72 0 @@ -421,8 +496,20 @@ #define MCDE_ROTACONF 0x0000087C #define MCDE_ROTBCONF 0x00000A7C +/* Synchronization event configuration */ #define MCDE_SYNCHCONFA 0x00000880 #define MCDE_SYNCHCONFB 0x00000A80 +#define MCDE_SYNCHCONF_HWREQVEVENT_SHIFT 0 +#define MCDE_SYNCHCONF_HWREQVEVENT_VSYNC (0 << 0) +#define MCDE_SYNCHCONF_HWREQVEVENT_BACK_PORCH (1 << 0) +#define MCDE_SYNCHCONF_HWREQVEVENT_ACTIVE_VIDEO (2 << 0) +#define MCDE_SYNCHCONF_HWREQVEVENT_FRONT_PORCH (3 << 0) +#define MCDE_SYNCHCONF_HWREQVCNT_SHIFT 2 /* 14 bits */ +#define MCDE_SYNCHCONF_SWINTVEVENT_VSYNC (0 << 16) +#define MCDE_SYNCHCONF_SWINTVEVENT_BACK_PORCH (1 << 16) +#define MCDE_SYNCHCONF_SWINTVEVENT_ACTIVE_VIDEO (2 << 16) +#define MCDE_SYNCHCONF_SWINTVEVENT_FRONT_PORCH (3 << 16) +#define MCDE_SYNCHCONF_SWINTVCNT_SHIFT 18 /* 14 bits */ /* Channel A+B control registers */ #define MCDE_CTRLA 0x00000884 @@ -465,8 +552,8 @@ #define MCDE_DSICONF0_PACKING_MASK 0x00700000 #define MCDE_DSICONF0_PACKING_RGB565 0 #define MCDE_DSICONF0_PACKING_RGB666 1 -#define MCDE_DSICONF0_PACKING_RGB666_PACKED 2 -#define MCDE_DSICONF0_PACKING_RGB888 3 +#define MCDE_DSICONF0_PACKING_RGB888 2 +#define MCDE_DSICONF0_PACKING_BGR888 3 #define MCDE_DSICONF0_PACKING_HDTV 4 #define MCDE_DSIVID0FRAME 0x00000E04 diff --git a/drivers/gpu/drm/mcde/mcde_drm.h b/drivers/gpu/drm/mcde/mcde_drm.h index 8253e2f9993e..ecb70b4b737c 100644 --- a/drivers/gpu/drm/mcde/mcde_drm.h +++ b/drivers/gpu/drm/mcde/mcde_drm.h @@ -62,6 +62,8 @@ enum mcde_flow_mode { MCDE_VIDEO_TE_FLOW, /* Video mode with the formatter itself as sync source */ MCDE_VIDEO_FORMATTER_FLOW, + /* DPI video with the formatter itsels as sync source */ + MCDE_DPI_FORMATTER_FLOW, }; struct mcde { @@ -72,6 +74,7 @@ struct mcde { struct drm_connector *connector; struct drm_simple_display_pipe pipe; struct mipi_dsi_device *mdsi; + bool dpi_output; s16 stride; enum mcde_flow_mode flow_mode; unsigned int flow_active; @@ -82,6 +85,11 @@ struct mcde { struct clk *mcde_clk; struct clk *lcd_clk; struct clk *hdmi_clk; + /* Handles to the clock dividers for FIFO A and B */ + struct clk *fifoa_clk; + struct clk *fifob_clk; + /* Locks the MCDE FIFO control register A and B */ + spinlock_t fifo_crx1_lock; struct regulator *epod; struct regulator *vana; @@ -105,4 +113,6 @@ void mcde_display_irq(struct mcde *mcde); void mcde_display_disable_irqs(struct mcde *mcde); int mcde_display_init(struct drm_device *drm); +int mcde_init_clock_divider(struct mcde *mcde); + #endif /* _MCDE_DRM_H_ */ diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c index 870626e04ec0..e60566a5739c 100644 --- a/drivers/gpu/drm/mcde/mcde_drv.c +++ b/drivers/gpu/drm/mcde/mcde_drv.c @@ -22,13 +22,13 @@ * The hardware has four display pipes, and the layout is a little * bit like this:: * - * Memory -> Overlay -> Channel -> FIFO -> 5 formatters -> DSI/DPI - * External 0..5 0..3 A,B, 3 x DSI bridge + * Memory -> Overlay -> Channel -> FIFO -> 8 formatters -> DSI/DPI + * External 0..5 0..3 A,B, 6 x DSI bridge * source 0..9 C0,C1 2 x DPI * * FIFOs A and B are for LCD and HDMI while FIFO CO/C1 are for * panels with embedded buffer. - * 3 of the formatters are for DSI. + * 6 of the formatters are for DSI, 3 pairs for VID/CMD respectively. * 2 of the formatters are for DPI. * * Behind the formatters are the DSI or DPI ports that route to @@ -130,9 +130,37 @@ static int mcde_modeset_init(struct drm_device *drm) struct mcde *mcde = to_mcde(drm); int ret; + /* + * If no other bridge was found, check if we have a DPI panel or + * any other bridge connected directly to the MCDE DPI output. + * If a DSI bridge is found, DSI will take precedence. + * + * TODO: more elaborate bridge selection if we have more than one + * thing attached to the system. + */ if (!mcde->bridge) { - dev_err(drm->dev, "no display output bridge yet\n"); - return -EPROBE_DEFER; + struct drm_panel *panel; + struct drm_bridge *bridge; + + ret = drm_of_find_panel_or_bridge(drm->dev->of_node, + 0, 0, &panel, &bridge); + if (ret) { + dev_err(drm->dev, + "Could not locate any output bridge or panel\n"); + return ret; + } + if (panel) { + bridge = drm_panel_bridge_add_typed(panel, + DRM_MODE_CONNECTOR_DPI); + if (IS_ERR(bridge)) { + dev_err(drm->dev, + "Could not connect panel bridge\n"); + return PTR_ERR(bridge); + } + } + mcde->dpi_output = true; + mcde->bridge = bridge; + mcde->flow_mode = MCDE_DPI_FORMATTER_FLOW; } mode_config = &drm->mode_config; @@ -156,13 +184,7 @@ static int mcde_modeset_init(struct drm_device *drm) return ret; } - /* - * Attach the DSI bridge - * - * TODO: when adding support for the DPI bridge or several DSI bridges, - * we selectively connect the bridge(s) here instead of this simple - * attachment. - */ + /* Attach the bridge. */ ret = drm_simple_display_pipe_attach_bridge(&mcde->pipe, mcde->bridge); if (ret) { @@ -413,7 +435,13 @@ static int mcde_probe(struct platform_device *pdev) match); if (ret) { dev_err(dev, "failed to add component master\n"); - goto clk_disable; + /* + * The EPOD regulator is already disabled at this point so some + * special errorpath code is needed + */ + clk_disable_unprepare(mcde->mcde_clk); + regulator_disable(mcde->vana); + return ret; } return 0; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index bfe994230543..584dc26affc1 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -619,7 +619,6 @@ static const struct drm_crtc_funcs mtk_crtc_funcs = { .reset = mtk_drm_crtc_reset, .atomic_duplicate_state = mtk_drm_crtc_duplicate_state, .atomic_destroy_state = mtk_drm_crtc_destroy_state, - .gamma_set = drm_atomic_helper_legacy_gamma_set, .enable_vblank = mtk_drm_crtc_enable_vblank, .disable_vblank = mtk_drm_crtc_disable_vblank, }; @@ -829,8 +828,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, #if IS_REACHABLE(CONFIG_MTK_CMDQ) mtk_crtc->cmdq_client = cmdq_mbox_create(mtk_crtc->mmsys_dev, - drm_crtc_index(&mtk_crtc->base), - 2000); + drm_crtc_index(&mtk_crtc->base)); if (IS_ERR(mtk_crtc->cmdq_client)) { dev_dbg(dev, "mtk_crtc %d failed to create mailbox client, writing register by CPU now\n", drm_crtc_index(&mtk_crtc->base)); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h index 1d9e00b69462..5aa52b7afeec 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h @@ -7,6 +7,7 @@ #define MTK_DRM_DDP_COMP_H #include <linux/io.h> +#include <linux/soc/mediatek/mtk-mmsys.h> struct device; struct device_node; @@ -35,39 +36,6 @@ enum mtk_ddp_comp_type { MTK_DDP_COMP_TYPE_MAX, }; -enum mtk_ddp_comp_id { - DDP_COMPONENT_AAL0, - DDP_COMPONENT_AAL1, - DDP_COMPONENT_BLS, - DDP_COMPONENT_CCORR, - DDP_COMPONENT_COLOR0, - DDP_COMPONENT_COLOR1, - DDP_COMPONENT_DITHER, - DDP_COMPONENT_DPI0, - DDP_COMPONENT_DPI1, - DDP_COMPONENT_DSI0, - DDP_COMPONENT_DSI1, - DDP_COMPONENT_DSI2, - DDP_COMPONENT_DSI3, - DDP_COMPONENT_GAMMA, - DDP_COMPONENT_OD0, - DDP_COMPONENT_OD1, - DDP_COMPONENT_OVL0, - DDP_COMPONENT_OVL_2L0, - DDP_COMPONENT_OVL_2L1, - DDP_COMPONENT_OVL1, - DDP_COMPONENT_PWM0, - DDP_COMPONENT_PWM1, - DDP_COMPONENT_PWM2, - DDP_COMPONENT_RDMA0, - DDP_COMPONENT_RDMA1, - DDP_COMPONENT_RDMA2, - DDP_COMPONENT_UFOE, - DDP_COMPONENT_WDMA0, - DDP_COMPONENT_WDMA1, - DDP_COMPONENT_ID_MAX, -}; - struct mtk_ddp_comp; struct cmdq_pkt; struct mtk_ddp_comp_funcs { diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c index 28a2ee1336ef..280ea0d5e840 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c @@ -260,7 +260,7 @@ int mtk_drm_gem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) return -ENOMEM; } - drm_prime_sg_to_page_addr_arrays(sgt, mtk_gem->pages, NULL, npages); + drm_prime_sg_to_page_array(sgt, mtk_gem->pages, npages); mtk_gem->kvaddr = vmap(mtk_gem->pages, npages, VM_MAP, pgprot_writecombine(PAGE_KERNEL)); diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 4a188a942c38..65fd99c528af 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -444,7 +444,10 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi) u32 horizontal_sync_active_byte; u32 horizontal_backporch_byte; u32 horizontal_frontporch_byte; + u32 horizontal_front_back_byte; + u32 data_phy_cycles_byte; u32 dsi_tmp_buf_bpp, data_phy_cycles; + u32 delta; struct mtk_phy_timing *timing = &dsi->phy_timing; struct videomode *vm = &dsi->vm; @@ -466,50 +469,30 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi) horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10); if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) - horizontal_backporch_byte = vm->hback_porch * dsi_tmp_buf_bpp; + horizontal_backporch_byte = vm->hback_porch * dsi_tmp_buf_bpp - 10; else horizontal_backporch_byte = (vm->hback_porch + vm->hsync_len) * - dsi_tmp_buf_bpp; + dsi_tmp_buf_bpp - 10; data_phy_cycles = timing->lpx + timing->da_hs_prepare + - timing->da_hs_zero + timing->da_hs_exit; + timing->da_hs_zero + timing->da_hs_exit + 3; - if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) { - if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp > - data_phy_cycles * dsi->lanes + 18) { - horizontal_frontporch_byte = - vm->hfront_porch * dsi_tmp_buf_bpp - - (data_phy_cycles * dsi->lanes + 18) * - vm->hfront_porch / - (vm->hfront_porch + vm->hback_porch); + delta = dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST ? 18 : 12; - horizontal_backporch_byte = - horizontal_backporch_byte - - (data_phy_cycles * dsi->lanes + 18) * - vm->hback_porch / - (vm->hfront_porch + vm->hback_porch); - } else { - DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n"); - horizontal_frontporch_byte = vm->hfront_porch * - dsi_tmp_buf_bpp; - } + horizontal_frontporch_byte = vm->hfront_porch * dsi_tmp_buf_bpp; + horizontal_front_back_byte = horizontal_frontporch_byte + horizontal_backporch_byte; + data_phy_cycles_byte = data_phy_cycles * dsi->lanes + delta; + + if (horizontal_front_back_byte > data_phy_cycles_byte) { + horizontal_frontporch_byte -= data_phy_cycles_byte * + horizontal_frontporch_byte / + horizontal_front_back_byte; + + horizontal_backporch_byte -= data_phy_cycles_byte * + horizontal_backporch_byte / + horizontal_front_back_byte; } else { - if ((vm->hfront_porch + vm->hback_porch) * dsi_tmp_buf_bpp > - data_phy_cycles * dsi->lanes + 12) { - horizontal_frontporch_byte = - vm->hfront_porch * dsi_tmp_buf_bpp - - (data_phy_cycles * dsi->lanes + 12) * - vm->hfront_porch / - (vm->hfront_porch + vm->hback_porch); - horizontal_backporch_byte = horizontal_backporch_byte - - (data_phy_cycles * dsi->lanes + 12) * - vm->hback_porch / - (vm->hfront_porch + vm->hback_porch); - } else { - DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n"); - horizontal_frontporch_byte = vm->hfront_porch * - dsi_tmp_buf_bpp; - } + DRM_WARN("HFP + HBP less than d-phy, FPS will under 60Hz\n"); } writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC); diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c index 7f8eea494147..aad75a22dc33 100644 --- a/drivers/gpu/drm/meson/meson_dw_hdmi.c +++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c @@ -145,8 +145,6 @@ struct meson_dw_hdmi { struct reset_control *hdmitx_apb; struct reset_control *hdmitx_ctrl; struct reset_control *hdmitx_phy; - struct clk *hdmi_pclk; - struct clk *venci_clk; struct regulator *hdmi_supply; u32 irq_stat; struct dw_hdmi *hdmi; @@ -946,6 +944,29 @@ static void meson_disable_regulator(void *data) regulator_disable(data); } +static void meson_disable_clk(void *data) +{ + clk_disable_unprepare(data); +} + +static int meson_enable_clk(struct device *dev, char *name) +{ + struct clk *clk; + int ret; + + clk = devm_clk_get(dev, name); + if (IS_ERR(clk)) { + dev_err(dev, "Unable to get %s pclk\n", name); + return PTR_ERR(clk); + } + + ret = clk_prepare_enable(clk); + if (!ret) + ret = devm_add_action_or_reset(dev, meson_disable_clk, clk); + + return ret; +} + static int meson_dw_hdmi_bind(struct device *dev, struct device *master, void *data) { @@ -1026,19 +1047,17 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, if (IS_ERR(meson_dw_hdmi->hdmitx)) return PTR_ERR(meson_dw_hdmi->hdmitx); - meson_dw_hdmi->hdmi_pclk = devm_clk_get(dev, "isfr"); - if (IS_ERR(meson_dw_hdmi->hdmi_pclk)) { - dev_err(dev, "Unable to get HDMI pclk\n"); - return PTR_ERR(meson_dw_hdmi->hdmi_pclk); - } - clk_prepare_enable(meson_dw_hdmi->hdmi_pclk); + ret = meson_enable_clk(dev, "isfr"); + if (ret) + return ret; - meson_dw_hdmi->venci_clk = devm_clk_get(dev, "venci"); - if (IS_ERR(meson_dw_hdmi->venci_clk)) { - dev_err(dev, "Unable to get venci clk\n"); - return PTR_ERR(meson_dw_hdmi->venci_clk); - } - clk_prepare_enable(meson_dw_hdmi->venci_clk); + ret = meson_enable_clk(dev, "iahb"); + if (ret) + return ret; + + ret = meson_enable_clk(dev, "venci"); + if (ret) + return ret; dw_plat_data->regm = devm_regmap_init(dev, NULL, meson_dw_hdmi, &meson_dw_hdmi_regmap_config); @@ -1071,6 +1090,8 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, encoder->possible_crtcs = BIT(0); + meson_dw_hdmi_init(meson_dw_hdmi); + DRM_DEBUG_DRIVER("encoder initialized\n"); /* Bridge / Connector */ @@ -1095,8 +1116,6 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, if (IS_ERR(meson_dw_hdmi->hdmi)) return PTR_ERR(meson_dw_hdmi->hdmi); - meson_dw_hdmi_init(meson_dw_hdmi); - next_bridge = of_drm_find_bridge(pdev->dev.of_node); if (next_bridge) drm_bridge_attach(encoder, next_bridge, diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c index 6ccd270789c6..4fd4de16cd32 100644 --- a/drivers/gpu/drm/mga/mga_ioc32.c +++ b/drivers/gpu/drm/mga/mga_ioc32.c @@ -1,4 +1,4 @@ -/** +/* * \file mga_ioc32.c * * 32-bit ioctl compatibility routines for the MGA DRM. @@ -159,13 +159,13 @@ static struct { }; /** - * Called whenever a 32-bit process running under a 64-bit kernel - * performs an ioctl on /dev/dri/card<n>. + * mga_compat_ioctl - Called whenever a 32-bit process running under + * a 64-bit kernel performs an ioctl on /dev/dri/card<n>. * - * \param filp file pointer. - * \param cmd command. - * \param arg user argument. - * \return zero on success or negative number on failure. + * @filp: file pointer. + * @cmd: command. + * @arg: user argument. + * return: zero on success or negative number on failure. */ long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index 0f07f259503d..4e4c105f9a50 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -37,7 +37,6 @@ static const struct drm_driver mgag200_driver = { .major = DRIVER_MAJOR, .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, - .gem_create_object = drm_gem_shmem_create_object_cached, DRM_GEM_SHMEM_DRIVER_OPS, }; @@ -48,10 +47,11 @@ static const struct drm_driver mgag200_driver = { static bool mgag200_has_sgram(struct mga_device *mdev) { struct drm_device *dev = &mdev->base; + struct pci_dev *pdev = to_pci_dev(dev->dev); u32 option; int ret; - ret = pci_read_config_dword(dev->pdev, PCI_MGA_OPTION, &option); + ret = pci_read_config_dword(pdev, PCI_MGA_OPTION, &option); if (drm_WARN(dev, ret, "failed to read PCI config dword: %d\n", ret)) return false; @@ -61,6 +61,7 @@ static bool mgag200_has_sgram(struct mga_device *mdev) static int mgag200_regs_init(struct mga_device *mdev) { struct drm_device *dev = &mdev->base; + struct pci_dev *pdev = to_pci_dev(dev->dev); u32 option, option2; u8 crtcext3; @@ -100,13 +101,13 @@ static int mgag200_regs_init(struct mga_device *mdev) } if (option) - pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option); + pci_write_config_dword(pdev, PCI_MGA_OPTION, option); if (option2) - pci_write_config_dword(dev->pdev, PCI_MGA_OPTION2, option2); + pci_write_config_dword(pdev, PCI_MGA_OPTION2, option2); /* BAR 1 contains registers */ - mdev->rmmio_base = pci_resource_start(dev->pdev, 1); - mdev->rmmio_size = pci_resource_len(dev->pdev, 1); + mdev->rmmio_base = pci_resource_start(pdev, 1); + mdev->rmmio_size = pci_resource_len(pdev, 1); if (!devm_request_mem_region(dev->dev, mdev->rmmio_base, mdev->rmmio_size, "mgadrmfb_mmio")) { @@ -114,7 +115,7 @@ static int mgag200_regs_init(struct mga_device *mdev) return -ENOMEM; } - mdev->rmmio = pcim_iomap(dev->pdev, 1, 0); + mdev->rmmio = pcim_iomap(pdev, 1, 0); if (mdev->rmmio == NULL) return -ENOMEM; @@ -219,6 +220,7 @@ static void mgag200_g200_interpret_bios(struct mga_device *mdev, static void mgag200_g200_init_refclk(struct mga_device *mdev) { struct drm_device *dev = &mdev->base; + struct pci_dev *pdev = to_pci_dev(dev->dev); unsigned char __iomem *rom; unsigned char *bios; size_t size; @@ -227,7 +229,7 @@ static void mgag200_g200_init_refclk(struct mga_device *mdev) mdev->model.g200.pclk_max = 230000; mdev->model.g200.ref_clk = 27050; - rom = pci_map_rom(dev->pdev, &size); + rom = pci_map_rom(pdev, &size); if (!rom) return; @@ -245,7 +247,7 @@ static void mgag200_g200_init_refclk(struct mga_device *mdev) vfree(bios); out: - pci_unmap_rom(dev->pdev, rom); + pci_unmap_rom(pdev, rom); } static void mgag200_g200se_init_unique_id(struct mga_device *mdev) @@ -302,7 +304,6 @@ mgag200_device_create(struct pci_dev *pdev, unsigned long flags) return mdev; dev = &mdev->base; - dev->pdev = pdev; pci_set_drvdata(pdev, dev); ret = mgag200_device_init(mdev, flags); diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c index 09731e614e46..ac8e34eef513 100644 --- a/drivers/gpu/drm/mgag200/mgag200_i2c.c +++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c @@ -126,7 +126,7 @@ struct mga_i2c_chan *mgag200_i2c_create(struct drm_device *dev) i2c->clock = clock; i2c->adapter.owner = THIS_MODULE; i2c->adapter.class = I2C_CLASS_DDC; - i2c->adapter.dev.parent = &dev->pdev->dev; + i2c->adapter.dev.parent = dev->dev; i2c->dev = dev; i2c_set_adapdata(&i2c->adapter, i2c); snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), "mga i2c"); diff --git a/drivers/gpu/drm/mgag200/mgag200_mm.c b/drivers/gpu/drm/mgag200/mgag200_mm.c index 641f1aa992be..b667371b69a4 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mm.c +++ b/drivers/gpu/drm/mgag200/mgag200_mm.c @@ -78,11 +78,12 @@ static size_t mgag200_probe_vram(struct mga_device *mdev, void __iomem *mem, static void mgag200_mm_release(struct drm_device *dev, void *ptr) { struct mga_device *mdev = to_mga_device(dev); + struct pci_dev *pdev = to_pci_dev(dev->dev); mdev->vram_fb_available = 0; iounmap(mdev->vram); - arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0), - pci_resource_len(dev->pdev, 0)); + arch_io_free_memtype_wc(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); arch_phys_wc_del(mdev->fb_mtrr); mdev->fb_mtrr = 0; } @@ -90,6 +91,7 @@ static void mgag200_mm_release(struct drm_device *dev, void *ptr) int mgag200_mm_init(struct mga_device *mdev) { struct drm_device *dev = &mdev->base; + struct pci_dev *pdev = to_pci_dev(dev->dev); u8 misc; resource_size_t start, len; int ret; @@ -102,8 +104,8 @@ int mgag200_mm_init(struct mga_device *mdev) WREG8(MGA_MISC_OUT, misc); /* BAR 0 is VRAM */ - start = pci_resource_start(dev->pdev, 0); - len = pci_resource_len(dev->pdev, 0); + start = pci_resource_start(pdev, 0); + len = pci_resource_len(pdev, 0); if (!devm_request_mem_region(dev->dev, start, len, "mgadrmfb_vram")) { drm_err(dev, "can't reserve VRAM\n"); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 82cbaf337b50..45e23125dfdb 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -211,10 +211,8 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj, * address_space (so unmap_mapping_range does what we want, * in particular in the case of mmap'd dmabufs) */ - fput(vma->vm_file); - get_file(obj->filp); vma->vm_pgoff = 0; - vma->vm_file = obj->filp; + vma_set_file(vma, obj->filp); vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); } @@ -1213,7 +1211,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, goto fail; } - ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages); + ret = drm_prime_sg_to_page_array(sgt, msm_obj->pages, npages); if (ret) { msm_gem_unlock(obj); goto fail; diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 22ac7c692a81..50d881794758 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -139,7 +139,6 @@ static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather, static const struct iommu_flush_ops null_tlb_ops = { .tlb_flush_all = msm_iommu_tlb_flush_all, .tlb_flush_walk = msm_iommu_tlb_flush_walk, - .tlb_flush_leaf = msm_iommu_tlb_flush_walk, .tlb_add_page = msm_iommu_tlb_add_page, }; diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index 6faf17b6408d..6da93551e2e5 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c @@ -134,11 +134,8 @@ static int mxsfb_attach_bridge(struct mxsfb_drm_private *mxsfb) return -ENODEV; ret = drm_bridge_attach(&mxsfb->encoder, bridge, NULL, 0); - if (ret) { - DRM_DEV_ERROR(drm->dev, - "failed to attach bridge: %d\n", ret); - return ret; - } + if (ret) + return dev_err_probe(drm->dev, ret, "Failed to attach bridge\n"); mxsfb->bridge = bridge; @@ -212,7 +209,8 @@ static int mxsfb_load(struct drm_device *drm, ret = mxsfb_attach_bridge(mxsfb); if (ret) { - dev_err(drm->dev, "Cannot connect bridge: %d\n", ret); + if (ret != -EPROBE_DEFER) + dev_err(drm->dev, "Cannot connect bridge: %d\n", ret); goto err_vblank; } diff --git a/drivers/gpu/drm/mxsfb/mxsfb_kms.c b/drivers/gpu/drm/mxsfb/mxsfb_kms.c index a6b3d6e84c52..3e1bb0aefb87 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_kms.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_kms.c @@ -22,6 +22,7 @@ #include <drm/drm_fb_cma_helper.h> #include <drm/drm_fourcc.h> #include <drm/drm_gem_cma_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_plane.h> #include <drm/drm_plane_helper.h> #include <drm/drm_vblank.h> @@ -494,11 +495,13 @@ static bool mxsfb_format_mod_supported(struct drm_plane *plane, } static const struct drm_plane_helper_funcs mxsfb_plane_primary_helper_funcs = { + .prepare_fb = drm_gem_fb_prepare_fb, .atomic_check = mxsfb_plane_atomic_check, .atomic_update = mxsfb_plane_primary_atomic_update, }; static const struct drm_plane_helper_funcs mxsfb_plane_overlay_helper_funcs = { + .prepare_fb = drm_gem_fb_prepare_fb, .atomic_check = mxsfb_plane_atomic_check, .atomic_update = mxsfb_plane_overlay_atomic_update, }; diff --git a/drivers/gpu/drm/nouveau/dispnv04/arb.c b/drivers/gpu/drm/nouveau/dispnv04/arb.c index 9d4a2d97507e..1d3542d6006b 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/arb.c +++ b/drivers/gpu/drm/nouveau/dispnv04/arb.c @@ -200,16 +200,17 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp, int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY); int NVClk = nouveau_hw_get_clock(dev, PLL_CORE); uint32_t cfg1 = nvif_rd32(device, NV04_PFB_CFG1); + struct pci_dev *pdev = to_pci_dev(dev->dev); sim_data.pclk_khz = VClk; sim_data.mclk_khz = MClk; sim_data.nvclk_khz = NVClk; sim_data.bpp = bpp; sim_data.two_heads = nv_two_heads(dev); - if ((dev->pdev->device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ || - (dev->pdev->device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) { + if ((pdev->device & 0xffff) == 0x01a0 /*CHIPSET_NFORCE*/ || + (pdev->device & 0xffff) == 0x01f0 /*CHIPSET_NFORCE2*/) { uint32_t type; - int domain = pci_domain_nr(dev->pdev->bus); + int domain = pci_domain_nr(pdev->bus); pci_read_config_dword(pci_get_domain_bus_and_slot(domain, 0, 1), 0x7c, &type); @@ -251,11 +252,12 @@ void nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm) { struct nouveau_drm *drm = nouveau_drm(dev); + struct pci_dev *pdev = to_pci_dev(dev->dev); if (drm->client.device.info.family < NV_DEVICE_INFO_V0_KELVIN) nv04_update_arb(dev, vclk, bpp, burst, lwm); - else if ((dev->pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ || - (dev->pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) { + else if ((pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ || + (pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) { *burst = 128; *lwm = 0x0480; } else diff --git a/drivers/gpu/drm/nouveau/dispnv04/dfp.c b/drivers/gpu/drm/nouveau/dispnv04/dfp.c index 42687ea2a4ca..ce3d8c6ef000 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/dfp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/dfp.c @@ -488,12 +488,13 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode) #ifdef __powerpc__ struct drm_device *dev = encoder->dev; struct nvif_object *device = &nouveau_drm(dev)->client.device.object; + struct pci_dev *pdev = to_pci_dev(dev->dev); /* BIOS scripts usually take care of the backlight, thanks * Apple for your consistency. */ - if (dev->pdev->device == 0x0174 || dev->pdev->device == 0x0179 || - dev->pdev->device == 0x0189 || dev->pdev->device == 0x0329) { + if (pdev->device == 0x0174 || pdev->device == 0x0179 || + pdev->device == 0x0189 || pdev->device == 0x0329) { if (mode == DRM_MODE_DPMS_ON) { nvif_mask(device, NV_PBUS_DEBUG_DUALHEAD_CTL, 1 << 31, 1 << 31); nvif_mask(device, NV_PCRTC_GPIO_EXT, 3, 1); diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h index 5ace5e906949..f0a24126641a 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.h +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h @@ -130,7 +130,7 @@ static inline bool nv_two_heads(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); - const int impl = dev->pdev->device & 0x0ff0; + const int impl = to_pci_dev(dev->dev)->device & 0x0ff0; if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS && impl != 0x0100 && impl != 0x0150 && impl != 0x01a0 && impl != 0x0200) @@ -142,14 +142,14 @@ nv_two_heads(struct drm_device *dev) static inline bool nv_gf4_disp_arch(struct drm_device *dev) { - return nv_two_heads(dev) && (dev->pdev->device & 0x0ff0) != 0x0110; + return nv_two_heads(dev) && (to_pci_dev(dev->dev)->device & 0x0ff0) != 0x0110; } static inline bool nv_two_reg_pll(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); - const int impl = dev->pdev->device & 0x0ff0; + const int impl = to_pci_dev(dev->dev)->device & 0x0ff0; if (impl == 0x0310 || impl == 0x0340 || drm->client.device.info.family >= NV_DEVICE_INFO_V0_CURIE) return true; @@ -160,9 +160,11 @@ static inline bool nv_match_device(struct drm_device *dev, unsigned device, unsigned sub_vendor, unsigned sub_device) { - return dev->pdev->device == device && - dev->pdev->subsystem_vendor == sub_vendor && - dev->pdev->subsystem_device == sub_device; + struct pci_dev *pdev = to_pci_dev(dev->dev); + + return pdev->device == device && + pdev->subsystem_vendor == sub_vendor && + pdev->subsystem_device == sub_device; } #include <subdev/bios/init.h> diff --git a/drivers/gpu/drm/nouveau/dispnv04/hw.c b/drivers/gpu/drm/nouveau/dispnv04/hw.c index b674d68ef28a..f7d35657aa64 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/hw.c +++ b/drivers/gpu/drm/nouveau/dispnv04/hw.c @@ -214,14 +214,15 @@ nouveau_hw_pllvals_to_clk(struct nvkm_pll_vals *pv) int nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype) { + struct pci_dev *pdev = to_pci_dev(dev->dev); struct nvkm_pll_vals pllvals; int ret; int domain; - domain = pci_domain_nr(dev->pdev->bus); + domain = pci_domain_nr(pdev->bus); if (plltype == PLL_MEMORY && - (dev->pdev->device & 0x0ff0) == CHIPSET_NFORCE) { + (pdev->device & 0x0ff0) == CHIPSET_NFORCE) { uint32_t mpllP; pci_read_config_dword(pci_get_domain_bus_and_slot(domain, 0, 3), 0x6c, &mpllP); @@ -232,7 +233,7 @@ nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype) return 400000 / mpllP; } else if (plltype == PLL_MEMORY && - (dev->pdev->device & 0xff0) == CHIPSET_NFORCE2) { + (pdev->device & 0xff0) == CHIPSET_NFORCE2) { uint32_t clock; pci_read_config_dword(pci_get_domain_bus_and_slot(domain, 0, 5), @@ -309,6 +310,7 @@ void nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save) { struct nouveau_drm *drm = nouveau_drm(dev); + struct pci_dev *pdev = to_pci_dev(dev->dev); uint8_t misc, gr4, gr5, gr6, seq2, seq4; bool graphicsmode; unsigned plane; @@ -327,7 +329,7 @@ nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save) NV_INFO(drm, "%sing VGA fonts\n", save ? "Sav" : "Restor"); /* map first 64KiB of VRAM, holds VGA fonts etc */ - iovram = ioremap(pci_resource_start(dev->pdev, 1), 65536); + iovram = ioremap(pci_resource_start(pdev, 1), 65536); if (!iovram) { NV_ERROR(drm, "Failed to map VRAM, " "cannot save/restore VGA fonts.\n"); diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index b111fe24a06b..33fff388dd83 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -32,6 +32,7 @@ #include <linux/hdmi.h> #include <linux/component.h> +#include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_dp_helper.h> #include <drm/drm_edid.h> @@ -455,7 +456,7 @@ nv50_outp_get_old_connector(struct nouveau_encoder *outp, * DAC *****************************************************************************/ static void -nv50_dac_disable(struct drm_encoder *encoder) +nv50_dac_disable(struct drm_encoder *encoder, struct drm_atomic_state *state) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nv50_core *core = nv50_disp(encoder->dev)->core; @@ -467,7 +468,7 @@ nv50_dac_disable(struct drm_encoder *encoder) } static void -nv50_dac_enable(struct drm_encoder *encoder) +nv50_dac_enable(struct drm_encoder *encoder, struct drm_atomic_state *state) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); @@ -525,8 +526,8 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) static const struct drm_encoder_helper_funcs nv50_dac_help = { .atomic_check = nv50_outp_atomic_check, - .enable = nv50_dac_enable, - .disable = nv50_dac_disable, + .atomic_enable = nv50_dac_enable, + .atomic_disable = nv50_dac_disable, .detect = nv50_dac_detect }; @@ -1055,7 +1056,7 @@ nv50_dp_bpc_to_depth(unsigned int bpc) } static void -nv50_msto_enable(struct drm_encoder *encoder) +nv50_msto_enable(struct drm_encoder *encoder, struct drm_atomic_state *state) { struct nv50_head *head = nv50_head(encoder->crtc); struct nv50_head_atom *armh = nv50_head_atom(head->base.base.state); @@ -1101,7 +1102,7 @@ nv50_msto_enable(struct drm_encoder *encoder) } static void -nv50_msto_disable(struct drm_encoder *encoder) +nv50_msto_disable(struct drm_encoder *encoder, struct drm_atomic_state *state) { struct nv50_msto *msto = nv50_msto(encoder); struct nv50_mstc *mstc = msto->mstc; @@ -1118,8 +1119,8 @@ nv50_msto_disable(struct drm_encoder *encoder) static const struct drm_encoder_helper_funcs nv50_msto_help = { - .disable = nv50_msto_disable, - .enable = nv50_msto_enable, + .atomic_disable = nv50_msto_disable, + .atomic_enable = nv50_msto_enable, .atomic_check = nv50_msto_atomic_check, }; @@ -1161,8 +1162,10 @@ nv50_msto_new(struct drm_device *dev, struct nv50_head *head, int id) static struct drm_encoder * nv50_mstc_atomic_best_encoder(struct drm_connector *connector, - struct drm_connector_state *connector_state) + struct drm_atomic_state *state) { + struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state, + connector); struct nv50_mstc *mstc = nv50_mstc(connector); struct drm_crtc *crtc = connector_state->crtc; @@ -1645,8 +1648,7 @@ nv50_sor_disable(struct drm_encoder *encoder, } static void -nv50_sor_enable(struct drm_encoder *encoder, - struct drm_atomic_state *state) +nv50_sor_enable(struct drm_encoder *encoder, struct drm_atomic_state *state) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); @@ -1873,7 +1875,7 @@ nv50_pior_atomic_check(struct drm_encoder *encoder, } static void -nv50_pior_disable(struct drm_encoder *encoder) +nv50_pior_disable(struct drm_encoder *encoder, struct drm_atomic_state *state) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nv50_core *core = nv50_disp(encoder->dev)->core; @@ -1885,7 +1887,7 @@ nv50_pior_disable(struct drm_encoder *encoder) } static void -nv50_pior_enable(struct drm_encoder *encoder) +nv50_pior_enable(struct drm_encoder *encoder, struct drm_atomic_state *state) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); @@ -1921,14 +1923,14 @@ nv50_pior_enable(struct drm_encoder *encoder) } core->func->pior->ctrl(core, nv_encoder->or, ctrl, asyh); - nv_encoder->crtc = encoder->crtc; + nv_encoder->crtc = &nv_crtc->base; } static const struct drm_encoder_helper_funcs nv50_pior_help = { .atomic_check = nv50_pior_atomic_check, - .enable = nv50_pior_enable, - .disable = nv50_pior_disable, + .atomic_enable = nv50_pior_enable, + .atomic_disable = nv50_pior_disable, }; static void diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c index 537c1ef2e464..ec361d17e900 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head.c @@ -503,7 +503,6 @@ nv50_head_destroy(struct drm_crtc *crtc) static const struct drm_crtc_funcs nv50_head_func = { .reset = nv50_head_reset, - .gamma_set = drm_atomic_helper_legacy_gamma_set, .destroy = nv50_head_destroy, .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, @@ -518,7 +517,6 @@ nv50_head_func = { static const struct drm_crtc_funcs nvd9_head_func = { .reset = nv50_head_reset, - .gamma_set = drm_atomic_helper_legacy_gamma_set, .destroy = nv50_head_destroy, .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index 9a5be6f32424..f08b31d84d4d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -181,6 +181,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) struct nvif_device *device = &drm->client.device; struct nvkm_gr *gr = nvxx_gr(device); struct drm_nouveau_getparam *getparam = data; + struct pci_dev *pdev = to_pci_dev(dev->dev); switch (getparam->param) { case NOUVEAU_GETPARAM_CHIPSET_ID: @@ -188,13 +189,13 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) break; case NOUVEAU_GETPARAM_PCI_VENDOR: if (device->info.platform != NV_DEVICE_INFO_V0_SOC) - getparam->value = dev->pdev->vendor; + getparam->value = pdev->vendor; else getparam->value = 0; break; case NOUVEAU_GETPARAM_PCI_DEVICE: if (device->info.platform != NV_DEVICE_INFO_V0_SOC) - getparam->value = dev->pdev->device; + getparam->value = pdev->device; else getparam->value = 0; break; @@ -205,7 +206,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS) case NV_DEVICE_INFO_V0_PCIE: getparam->value = 2; break; case NV_DEVICE_INFO_V0_SOC : getparam->value = 3; break; case NV_DEVICE_INFO_V0_IGP : - if (!pci_is_pcie(dev->pdev)) + if (!pci_is_pcie(pdev)) getparam->value = 1; else getparam->value = 2; diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c index 69a84d0197d0..7c15f6448428 100644 --- a/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -377,7 +377,7 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) return NULL; } - handle = ACPI_HANDLE(&dev->pdev->dev); + handle = ACPI_HANDLE(dev->dev); if (!handle) return NULL; diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index d204ea8a5618..7cc683b8dc7a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c @@ -110,6 +110,9 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_outp struct nvbios *bios = &drm->vbios; uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & DCB_OUTPUT_C ? 1 : 0); uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]); +#ifdef __powerpc__ + struct pci_dev *pdev = to_pci_dev(dev->dev); +#endif if (!bios->fp.xlated_entry || !sub || !scriptofs) return -EINVAL; @@ -123,8 +126,8 @@ static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_outp #ifdef __powerpc__ /* Powerbook specific quirks */ if (script == LVDS_RESET && - (dev->pdev->device == 0x0179 || dev->pdev->device == 0x0189 || - dev->pdev->device == 0x0329)) + (pdev->device == 0x0179 || pdev->device == 0x0189 || + pdev->device == 0x0329)) nv_write_tmds(dev, dcbent->or, 0, 0x02, 0x72); #endif @@ -2080,11 +2083,13 @@ nouveau_bios_init(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); struct nvbios *bios = &drm->vbios; + struct pci_dev *pdev; int ret; /* only relevant for PCI devices */ - if (!dev->pdev) + if (!dev_is_pci(dev->dev)) return 0; + pdev = to_pci_dev(dev->dev); if (!NVInitVBIOS(dev)) return -ENODEV; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 62a4fdffd0ae..33dc886d1d6d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -473,10 +473,10 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig) switch (bo->mem.mem_type) { case TTM_PL_VRAM: - drm->gem.vram_available -= bo->mem.size; + drm->gem.vram_available -= bo->base.size; break; case TTM_PL_TT: - drm->gem.gart_available -= bo->mem.size; + drm->gem.gart_available -= bo->base.size; break; default: break; @@ -504,10 +504,10 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) if (!nvbo->bo.pin_count) { switch (bo->mem.mem_type) { case TTM_PL_VRAM: - drm->gem.vram_available += bo->mem.size; + drm->gem.vram_available += bo->base.size; break; case TTM_PL_TT: - drm->gem.gart_available += bo->mem.size; + drm->gem.gart_available += bo->base.size; break; default: break; @@ -774,7 +774,10 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, return ret; } - mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING); + if (drm_drv_uses_atomic_modeset(drm->dev)) + mutex_lock(&cli->mutex); + else + mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING); ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible); if (ret == 0) { ret = drm->ttm.move(chan, bo, &bo->mem, new_reg); @@ -910,7 +913,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_resource *new_reg, return 0; if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) { - *new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size, + *new_tile = nv10_bo_set_tiling(dev, offset, bo->base.size, nvbo->mode, nvbo->zeta); } @@ -942,16 +945,6 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, struct nouveau_drm_tile *new_tile = NULL; int ret = 0; - if ((old_reg->mem_type == TTM_PL_SYSTEM && - new_reg->mem_type == TTM_PL_VRAM) || - (old_reg->mem_type == TTM_PL_VRAM && - new_reg->mem_type == TTM_PL_SYSTEM)) { - hop->fpfn = 0; - hop->lpfn = 0; - hop->mem_type = TTM_PL_TT; - hop->flags = 0; - return -EMULTIHOP; - } if (new_reg->mem_type == TTM_PL_TT) { ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg); @@ -995,14 +988,25 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, /* Hardware assisted copy. */ if (drm->ttm.move) { + if ((old_reg->mem_type == TTM_PL_SYSTEM && + new_reg->mem_type == TTM_PL_VRAM) || + (old_reg->mem_type == TTM_PL_VRAM && + new_reg->mem_type == TTM_PL_SYSTEM)) { + hop->fpfn = 0; + hop->lpfn = 0; + hop->mem_type = TTM_PL_TT; + hop->flags = 0; + return -EMULTIHOP; + } ret = nouveau_bo_move_m2mf(bo, evict, ctx, new_reg); - if (!ret) - goto out; - } + } else + ret = -ENODEV; - /* Fallback to software copy. */ - ret = ttm_bo_move_memcpy(bo, ctx, new_reg); + if (ret) { + /* Fallback to software copy. */ + ret = ttm_bo_move_memcpy(bo, ctx, new_reg); + } out: if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { @@ -1131,8 +1135,8 @@ retry: } reg->bus.offset = handle; - ret = 0; } + ret = 0; break; default: ret = -EINVAL; @@ -1231,9 +1235,8 @@ nouveau_ttm_tt_populate(struct ttm_bo_device *bdev, return 0; if (slave && ttm->sg) { - /* make userspace faulting work */ - drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, - ttm_dma->dma_address, ttm->num_pages); + drm_prime_sg_to_dma_addr_array(ttm->sg, ttm_dma->dma_address, + ttm->num_pages); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 6f21f36719fc..14c29e68db8f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -411,6 +411,7 @@ static struct nouveau_encoder * nouveau_connector_ddc_detect(struct drm_connector *connector) { struct drm_device *dev = connector->dev; + struct pci_dev *pdev = to_pci_dev(dev->dev); struct nouveau_encoder *nv_encoder = NULL, *found = NULL; struct drm_encoder *encoder; int ret; @@ -438,11 +439,11 @@ nouveau_connector_ddc_detect(struct drm_connector *connector) break; if (switcheroo_ddc) - vga_switcheroo_lock_ddc(dev->pdev); + vga_switcheroo_lock_ddc(pdev); if (nvkm_probe_i2c(nv_encoder->i2c, 0x50)) found = nv_encoder; if (switcheroo_ddc) - vga_switcheroo_unlock_ddc(dev->pdev); + vga_switcheroo_unlock_ddc(pdev); break; } @@ -490,6 +491,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector, struct nouveau_connector *nv_connector = nouveau_connector(connector); struct nouveau_drm *drm = nouveau_drm(connector->dev); struct drm_device *dev = connector->dev; + struct pci_dev *pdev = to_pci_dev(dev->dev); if (nv_connector->detected_encoder == nv_encoder) return; @@ -511,8 +513,8 @@ nouveau_connector_set_encoder(struct drm_connector *connector, connector->doublescan_allowed = true; if (drm->client.device.info.family == NV_DEVICE_INFO_V0_KELVIN || (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS && - (dev->pdev->device & 0x0ff0) != 0x0100 && - (dev->pdev->device & 0x0ff0) != 0x0150)) + (pdev->device & 0x0ff0) != 0x0100 && + (pdev->device & 0x0ff0) != 0x0150)) /* HW is broken */ connector->interlace_allowed = false; else @@ -532,11 +534,13 @@ static void nouveau_connector_set_edid(struct nouveau_connector *nv_connector, struct edid *edid) { - struct edid *old_edid = nv_connector->edid; + if (nv_connector->edid != edid) { + struct edid *old_edid = nv_connector->edid; - drm_connector_update_edid_property(&nv_connector->base, edid); - kfree(old_edid); - nv_connector->edid = edid; + drm_connector_update_edid_property(&nv_connector->base, edid); + kfree(old_edid); + nv_connector->edid = edid; + } } static enum drm_connector_status @@ -669,8 +673,10 @@ nouveau_connector_detect_lvds(struct drm_connector *connector, bool force) /* Try retrieving EDID via DDC */ if (!drm->vbios.fp_no_ddc) { status = nouveau_connector_detect(connector, force); - if (status == connector_status_connected) + if (status == connector_status_connected) { + edid = nv_connector->edid; goto out; + } } /* On some laptops (Sony, i'm looking at you) there appears to diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index bceb48a2dfca..17831ee897ea 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -286,11 +286,11 @@ nouveau_check_bl_size(struct nouveau_drm *drm, struct nouveau_bo *nvbo, bl_size = bw * bh * (1 << tile_mode) * gob_size; - DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%lu\n", + DRM_DEBUG_KMS("offset=%u stride=%u h=%u tile_mode=0x%02x bw=%u bh=%u gob_size=%u bl_size=%llu size=%zu\n", offset, stride, h, tile_mode, bw, bh, gob_size, bl_size, - nvbo->bo.mem.size); + nvbo->bo.base.size); - if (bl_size + offset > nvbo->bo.mem.size) + if (bl_size + offset > nvbo->bo.base.size) return -ERANGE; return 0; @@ -363,7 +363,7 @@ nouveau_framebuffer_new(struct drm_device *dev, } else { uint32_t size = mode_cmd->pitches[i] * height; - if (size + mode_cmd->offsets[i] > nvbo->bo.mem.size) + if (size + mode_cmd->offsets[i] > nvbo->bo.base.size) return -ERANGE; } } diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index d141a5f004af..1b2169e9c295 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -115,8 +115,8 @@ nouveau_platform_name(struct platform_device *platformdev) static u64 nouveau_name(struct drm_device *dev) { - if (dev->pdev) - return nouveau_pci_name(dev->pdev); + if (dev_is_pci(dev->dev)) + return nouveau_pci_name(to_pci_dev(dev->dev)); else return nouveau_platform_name(to_platform_device(dev->dev)); } @@ -760,7 +760,6 @@ static int nouveau_drm_probe(struct pci_dev *pdev, if (ret) goto fail_drm; - drm_dev->pdev = pdev; pci_set_drvdata(pdev, drm_dev); ret = nouveau_drm_device_init(drm_dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 9d04d1b36434..c802d3d1ba39 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h @@ -55,7 +55,6 @@ #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> #include <drm/ttm/ttm_memory.h> -#include <drm/ttm/ttm_module.h> #include <drm/drm_audio_component.h> diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 24ec5339efb4..4fc0fa696461 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -396,7 +396,9 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n", fb->width, fb->height, nvbo->offset, nvbo); - vga_switcheroo_client_fb_set(dev->pdev, info); + if (dev_is_pci(dev->dev)) + vga_switcheroo_client_fb_set(to_pci_dev(dev->dev), info); + return 0; out_unlock: @@ -548,7 +550,7 @@ nouveau_fbcon_init(struct drm_device *dev) int ret; if (!dev->mode_config.num_crtc || - (dev->pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA) + (to_pci_dev(dev->dev)->class >> 8) != PCI_CLASS_DISPLAY_VGA) return 0; fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL); diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 787d05eefd9c..c88cbb85f101 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -572,8 +572,10 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, NV_PRINTK(err, cli, "validating bo list\n"); validate_fini(op, chan, NULL, NULL); return ret; + } else if (ret > 0) { + *apply_relocs = true; } - *apply_relocs = ret; + return 0; } @@ -676,7 +678,6 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data); } - u_free(reloc); return ret; } @@ -886,9 +887,10 @@ out: break; } } - u_free(reloc); } out_prevalid: + if (!IS_ERR(reloc)) + u_free(reloc); u_free(bo); u_free(push); diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c index 2f16b5249283..347488685f74 100644 --- a/drivers/gpu/drm/nouveau/nouveau_prime.c +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c @@ -30,9 +30,9 @@ struct sg_table *nouveau_gem_prime_get_sg_table(struct drm_gem_object *obj) { struct nouveau_bo *nvbo = nouveau_gem_object(obj); - int npages = nvbo->bo.num_pages; - return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages, npages); + return drm_prime_pages_to_sg(obj->dev, nvbo->bo.ttm->pages, + nvbo->bo.ttm->num_pages); } struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev, diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index a2e23fd4906c..1cf52635ea74 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -84,7 +84,7 @@ nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags) if (!nvbe) return NULL; - if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags, caching)) { + if (ttm_sg_tt_init(&nvbe->ttm, bo, page_flags, caching)) { kfree(nvbe); return NULL; } diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c index c85dd8afa3c3..7c4b374b3eca 100644 --- a/drivers/gpu/drm/nouveau/nouveau_vga.c +++ b/drivers/gpu/drm/nouveau/nouveau_vga.c @@ -87,18 +87,20 @@ nouveau_vga_init(struct nouveau_drm *drm) { struct drm_device *dev = drm->dev; bool runtime = nouveau_pmops_runtime(); + struct pci_dev *pdev; /* only relevant for PCI devices */ - if (!dev->pdev) + if (!dev_is_pci(dev->dev)) return; + pdev = to_pci_dev(dev->dev); - vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode); + vga_client_register(pdev, dev, NULL, nouveau_vga_set_decode); /* don't register Thunderbolt eGPU with vga_switcheroo */ - if (pci_is_thunderbolt_attached(dev->pdev)) + if (pci_is_thunderbolt_attached(pdev)) return; - vga_switcheroo_register_client(dev->pdev, &nouveau_switcheroo_ops, runtime); + vga_switcheroo_register_client(pdev, &nouveau_switcheroo_ops, runtime); if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus()) vga_switcheroo_init_domain_pm_ops(drm->dev->dev, &drm->vga_pm_domain); @@ -109,17 +111,19 @@ nouveau_vga_fini(struct nouveau_drm *drm) { struct drm_device *dev = drm->dev; bool runtime = nouveau_pmops_runtime(); + struct pci_dev *pdev; /* only relevant for PCI devices */ - if (!dev->pdev) + if (!dev_is_pci(dev->dev)) return; + pdev = to_pci_dev(dev->dev); - vga_client_register(dev->pdev, NULL, NULL, NULL); + vga_client_register(pdev, NULL, NULL, NULL); - if (pci_is_thunderbolt_attached(dev->pdev)) + if (pci_is_thunderbolt_attached(pdev)) return; - vga_switcheroo_unregister_client(dev->pdev); + vga_switcheroo_unregister_client(pdev); if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus()) vga_switcheroo_fini_domain_pm_ops(drm->dev->dev); } diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c index 1253fdec712d..b1cd8d7dd87d 100644 --- a/drivers/gpu/drm/nouveau/nv17_fence.c +++ b/drivers/gpu/drm/nouveau/nv17_fence.c @@ -80,7 +80,7 @@ nv17_fence_context_new(struct nouveau_channel *chan) struct nv10_fence_chan *fctx; struct ttm_resource *reg = &priv->bo->bo.mem; u32 start = reg->start * PAGE_SIZE; - u32 limit = start + reg->size - 1; + u32 limit = start + priv->bo->bo.base.size - 1; int ret = 0; fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c index 447238e3cbe7..1625826505f6 100644 --- a/drivers/gpu/drm/nouveau/nv50_fence.c +++ b/drivers/gpu/drm/nouveau/nv50_fence.c @@ -39,7 +39,7 @@ nv50_fence_context_new(struct nouveau_channel *chan) struct nv10_fence_chan *fctx; struct ttm_resource *reg = &priv->bo->bo.mem; u32 start = reg->start * PAGE_SIZE; - u32 limit = start + reg->size - 1; + u32 limit = start + priv->bo->bo.base.size - 1; int ret; fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig index 5417e7a47072..e7281da5bc6a 100644 --- a/drivers/gpu/drm/omapdrm/Kconfig +++ b/drivers/gpu/drm/omapdrm/Kconfig @@ -5,13 +5,129 @@ config DRM_OMAP depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM select OMAP2_DSS select DRM_KMS_HELPER + select VIDEOMODE_HELPERS + select HDMI default n help DRM display driver for OMAP2/3/4 based boards. if DRM_OMAP -source "drivers/gpu/drm/omapdrm/dss/Kconfig" -source "drivers/gpu/drm/omapdrm/displays/Kconfig" +config OMAP2_DSS_DEBUG + bool "Debug support" + default n + help + This enables printing of debug messages. Alternatively, debug messages + can also be enabled by setting CONFIG_DYNAMIC_DEBUG and then setting + appropriate flags in <debugfs>/dynamic_debug/control. + +config OMAP2_DSS_DEBUGFS + bool "Debugfs filesystem support" + depends on DEBUG_FS + default n + help + This enables debugfs for OMAPDSS at <debugfs>/omapdss. This enables + querying about clock configuration and register configuration of dss, + dispc, dsi, hdmi and rfbi. + +config OMAP2_DSS_COLLECT_IRQ_STATS + bool "Collect DSS IRQ statistics" + depends on OMAP2_DSS_DEBUGFS + default n + help + Collect DSS IRQ statistics, printable via debugfs. + + The statistics can be found from + <debugfs>/omapdss/dispc_irq for DISPC interrupts, and + <debugfs>/omapdss/dsi_irq for DSI interrupts. + +config OMAP2_DSS_DPI + bool "DPI support" + default y + help + DPI Interface. This is the Parallel Display Interface. + +config OMAP2_DSS_VENC + bool "VENC support" + default y + help + OMAP Video Encoder support for S-Video and composite TV-out. + +config OMAP2_DSS_HDMI_COMMON + bool + +config OMAP4_DSS_HDMI + bool "HDMI support for OMAP4" + default y + select OMAP2_DSS_HDMI_COMMON + help + HDMI support for OMAP4 based SoCs. + +config OMAP4_DSS_HDMI_CEC + bool "Enable HDMI CEC support for OMAP4" + depends on OMAP4_DSS_HDMI + select CEC_CORE + default y + help + When selected the HDMI transmitter will support the CEC feature. + +config OMAP5_DSS_HDMI + bool "HDMI support for OMAP5" + default n + select OMAP2_DSS_HDMI_COMMON + help + HDMI Interface for OMAP5 and similar cores. This adds the High + Definition Multimedia Interface. See http://www.hdmi.org/ for HDMI + specification. + +config OMAP2_DSS_SDI + bool "SDI support" + default n + help + SDI (Serial Display Interface) support. + + SDI is a high speed one-way display serial bus between the host + processor and a display. + +config OMAP2_DSS_DSI + bool "DSI support" + default n + select DRM_MIPI_DSI + help + MIPI DSI (Display Serial Interface) support. + + DSI is a high speed half-duplex serial interface between the host + processor and a peripheral, such as a display or a framebuffer chip. + + See http://www.mipi.org/ for DSI specifications. + +config OMAP2_DSS_MIN_FCK_PER_PCK + int "Minimum FCK/PCK ratio (for scaling)" + range 0 32 + default 0 + help + This can be used to adjust the minimum FCK/PCK ratio. + + With this you can make sure that DISPC FCK is at least + n x PCK. Video plane scaling requires higher FCK than + normally. + + If this is set to 0, there's no extra constraint on the + DISPC FCK. However, the FCK will at minimum be + 2xPCK (if active matrix) or 3xPCK (if passive matrix). + + Max FCK is 173MHz, so this doesn't work if your PCK + is very high. + +config OMAP2_DSS_SLEEP_AFTER_VENC_RESET + bool "Sleep 20ms after VENC reset" + default y + help + There is a 20ms sleep after VENC reset which seemed to fix the + reset. The reason for the bug is unclear, and it's also unclear + on what platforms this happens. + + This option enables the sleep, and is enabled by default. You can + disable the sleep if it doesn't cause problems on your platform. endif diff --git a/drivers/gpu/drm/omapdrm/Makefile b/drivers/gpu/drm/omapdrm/Makefile index f115253115c5..21e8277ff88f 100644 --- a/drivers/gpu/drm/omapdrm/Makefile +++ b/drivers/gpu/drm/omapdrm/Makefile @@ -4,16 +4,12 @@ # Direct Rendering Infrastructure (DRI) # -obj-y += dss/ -obj-y += displays/ - omapdrm-y := omap_drv.o \ omap_irq.o \ omap_debugfs.o \ omap_crtc.o \ omap_plane.o \ omap_encoder.o \ - omap_connector.o \ omap_fb.o \ omap_gem.o \ omap_gem_dmabuf.o \ @@ -22,4 +18,17 @@ omapdrm-y := omap_drv.o \ omapdrm-$(CONFIG_DRM_FBDEV_EMULATION) += omap_fbdev.o -obj-$(CONFIG_DRM_OMAP) += omapdrm.o +omapdrm-y += dss/base.o dss/output.o dss/dss.o dss/dispc.o \ + dss/dispc_coefs.o dss/pll.o dss/video-pll.o +omapdrm-$(CONFIG_OMAP2_DSS_DPI) += dss/dpi.o +omapdrm-$(CONFIG_OMAP2_DSS_VENC) += dss/venc.o +omapdrm-$(CONFIG_OMAP2_DSS_SDI) += dss/sdi.o +omapdrm-$(CONFIG_OMAP2_DSS_DSI) += dss/dsi.o +omapdrm-$(CONFIG_OMAP2_DSS_HDMI_COMMON) += dss/hdmi_common.o dss/hdmi_wp.o \ + dss/hdmi_pll.o dss/hdmi_phy.o +omapdrm-$(CONFIG_OMAP4_DSS_HDMI) += dss/hdmi4.o dss/hdmi4_core.o +omapdrm-$(CONFIG_OMAP4_DSS_HDMI_CEC) += dss/hdmi4_cec.o +omapdrm-$(CONFIG_OMAP5_DSS_HDMI) += dss/hdmi5.o dss/hdmi5_core.o +ccflags-$(CONFIG_OMAP2_DSS_DEBUG) += -DDEBUG + +obj-$(CONFIG_DRM_OMAP) += omapdrm.o diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig deleted file mode 100644 index f2be594c7eff..000000000000 --- a/drivers/gpu/drm/omapdrm/displays/Kconfig +++ /dev/null @@ -1,10 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -menu "OMAPDRM External Display Device Drivers" - -config DRM_OMAP_PANEL_DSI_CM - tristate "Generic DSI Command Mode Panel" - depends on BACKLIGHT_CLASS_DEVICE - help - Driver for generic DSI command mode panels. - -endmenu diff --git a/drivers/gpu/drm/omapdrm/displays/Makefile b/drivers/gpu/drm/omapdrm/displays/Makefile deleted file mode 100644 index 488ddf153613..000000000000 --- a/drivers/gpu/drm/omapdrm/displays/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_DRM_OMAP_PANEL_DSI_CM) += panel-dsi-cm.o diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c deleted file mode 100644 index e39ce0c0c9a9..000000000000 --- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c +++ /dev/null @@ -1,1385 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Generic DSI Command Mode panel driver - * - * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/ - * Author: Tomi Valkeinen <[email protected]> - */ - -/* #define DEBUG */ - -#include <linux/backlight.h> -#include <linux/delay.h> -#include <linux/gpio/consumer.h> -#include <linux/interrupt.h> -#include <linux/jiffies.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/sched/signal.h> -#include <linux/slab.h> -#include <linux/workqueue.h> -#include <linux/of_device.h> -#include <linux/regulator/consumer.h> - -#include <drm/drm_connector.h> - -#include <video/mipi_display.h> -#include <video/of_display_timing.h> - -#include "../dss/omapdss.h" - -/* DSI Virtual channel. Hardcoded for now. */ -#define TCH 0 - -#define DCS_READ_NUM_ERRORS 0x05 -#define DCS_BRIGHTNESS 0x51 -#define DCS_CTRL_DISPLAY 0x53 -#define DCS_GET_ID1 0xda -#define DCS_GET_ID2 0xdb -#define DCS_GET_ID3 0xdc - -struct panel_drv_data { - struct omap_dss_device dssdev; - struct omap_dss_device *src; - - struct videomode vm; - - struct platform_device *pdev; - - struct mutex lock; - - struct backlight_device *bldev; - struct backlight_device *extbldev; - - unsigned long hw_guard_end; /* next value of jiffies when we can - * issue the next sleep in/out command - */ - unsigned long hw_guard_wait; /* max guard time in jiffies */ - - /* panel HW configuration from DT or platform data */ - struct gpio_desc *reset_gpio; - struct gpio_desc *ext_te_gpio; - - struct regulator *vpnl; - struct regulator *vddi; - - bool use_dsi_backlight; - - int width_mm; - int height_mm; - - struct omap_dsi_pin_config pin_config; - - /* runtime variables */ - bool enabled; - - bool te_enabled; - - atomic_t do_update; - int channel; - - struct delayed_work te_timeout_work; - - bool intro_printed; - - struct workqueue_struct *workqueue; - - bool ulps_enabled; - unsigned int ulps_timeout; - struct delayed_work ulps_work; -}; - -#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) - -static irqreturn_t dsicm_te_isr(int irq, void *data); -static void dsicm_te_timeout_work_callback(struct work_struct *work); -static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable); - -static int dsicm_panel_reset(struct panel_drv_data *ddata); - -static void dsicm_ulps_work(struct work_struct *work); - -static void dsicm_bl_power(struct panel_drv_data *ddata, bool enable) -{ - struct backlight_device *backlight; - - if (ddata->bldev) - backlight = ddata->bldev; - else if (ddata->extbldev) - backlight = ddata->extbldev; - else - return; - - if (enable) { - backlight->props.fb_blank = FB_BLANK_UNBLANK; - backlight->props.state = ~(BL_CORE_FBBLANK | BL_CORE_SUSPENDED); - backlight->props.power = FB_BLANK_UNBLANK; - } else { - backlight->props.fb_blank = FB_BLANK_NORMAL; - backlight->props.power = FB_BLANK_POWERDOWN; - backlight->props.state |= BL_CORE_FBBLANK | BL_CORE_SUSPENDED; - } - - backlight_update_status(backlight); -} - -static void hw_guard_start(struct panel_drv_data *ddata, int guard_msec) -{ - ddata->hw_guard_wait = msecs_to_jiffies(guard_msec); - ddata->hw_guard_end = jiffies + ddata->hw_guard_wait; -} - -static void hw_guard_wait(struct panel_drv_data *ddata) -{ - unsigned long wait = ddata->hw_guard_end - jiffies; - - if ((long)wait > 0 && wait <= ddata->hw_guard_wait) { - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(wait); - } -} - -static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data) -{ - struct omap_dss_device *src = ddata->src; - int r; - u8 buf[1]; - - r = src->ops->dsi.dcs_read(src, ddata->channel, dcs_cmd, buf, 1); - - if (r < 0) - return r; - - *data = buf[0]; - - return 0; -} - -static int dsicm_dcs_write_0(struct panel_drv_data *ddata, u8 dcs_cmd) -{ - struct omap_dss_device *src = ddata->src; - - return src->ops->dsi.dcs_write(src, ddata->channel, &dcs_cmd, 1); -} - -static int dsicm_dcs_write_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 param) -{ - struct omap_dss_device *src = ddata->src; - u8 buf[2] = { dcs_cmd, param }; - - return src->ops->dsi.dcs_write(src, ddata->channel, buf, 2); -} - -static int dsicm_sleep_in(struct panel_drv_data *ddata) - -{ - struct omap_dss_device *src = ddata->src; - u8 cmd; - int r; - - hw_guard_wait(ddata); - - cmd = MIPI_DCS_ENTER_SLEEP_MODE; - r = src->ops->dsi.dcs_write_nosync(src, ddata->channel, &cmd, 1); - if (r) - return r; - - hw_guard_start(ddata, 120); - - usleep_range(5000, 10000); - - return 0; -} - -static int dsicm_sleep_out(struct panel_drv_data *ddata) -{ - int r; - - hw_guard_wait(ddata); - - r = dsicm_dcs_write_0(ddata, MIPI_DCS_EXIT_SLEEP_MODE); - if (r) - return r; - - hw_guard_start(ddata, 120); - - usleep_range(5000, 10000); - - return 0; -} - -static int dsicm_get_id(struct panel_drv_data *ddata, u8 *id1, u8 *id2, u8 *id3) -{ - int r; - - r = dsicm_dcs_read_1(ddata, DCS_GET_ID1, id1); - if (r) - return r; - r = dsicm_dcs_read_1(ddata, DCS_GET_ID2, id2); - if (r) - return r; - r = dsicm_dcs_read_1(ddata, DCS_GET_ID3, id3); - if (r) - return r; - - return 0; -} - -static int dsicm_set_update_window(struct panel_drv_data *ddata, - u16 x, u16 y, u16 w, u16 h) -{ - struct omap_dss_device *src = ddata->src; - int r; - u16 x1 = x; - u16 x2 = x + w - 1; - u16 y1 = y; - u16 y2 = y + h - 1; - - u8 buf[5]; - buf[0] = MIPI_DCS_SET_COLUMN_ADDRESS; - buf[1] = (x1 >> 8) & 0xff; - buf[2] = (x1 >> 0) & 0xff; - buf[3] = (x2 >> 8) & 0xff; - buf[4] = (x2 >> 0) & 0xff; - - r = src->ops->dsi.dcs_write_nosync(src, ddata->channel, buf, sizeof(buf)); - if (r) - return r; - - buf[0] = MIPI_DCS_SET_PAGE_ADDRESS; - buf[1] = (y1 >> 8) & 0xff; - buf[2] = (y1 >> 0) & 0xff; - buf[3] = (y2 >> 8) & 0xff; - buf[4] = (y2 >> 0) & 0xff; - - r = src->ops->dsi.dcs_write_nosync(src, ddata->channel, buf, sizeof(buf)); - if (r) - return r; - - src->ops->dsi.bta_sync(src, ddata->channel); - - return r; -} - -static void dsicm_queue_ulps_work(struct panel_drv_data *ddata) -{ - if (ddata->ulps_timeout > 0) - queue_delayed_work(ddata->workqueue, &ddata->ulps_work, - msecs_to_jiffies(ddata->ulps_timeout)); -} - -static void dsicm_cancel_ulps_work(struct panel_drv_data *ddata) -{ - cancel_delayed_work(&ddata->ulps_work); -} - -static int dsicm_enter_ulps(struct panel_drv_data *ddata) -{ - struct omap_dss_device *src = ddata->src; - int r; - - if (ddata->ulps_enabled) - return 0; - - dsicm_cancel_ulps_work(ddata); - - r = _dsicm_enable_te(ddata, false); - if (r) - goto err; - - if (ddata->ext_te_gpio) - disable_irq(gpiod_to_irq(ddata->ext_te_gpio)); - - src->ops->dsi.disable(src, false, true); - - ddata->ulps_enabled = true; - - return 0; - -err: - dev_err(&ddata->pdev->dev, "enter ULPS failed"); - dsicm_panel_reset(ddata); - - ddata->ulps_enabled = false; - - dsicm_queue_ulps_work(ddata); - - return r; -} - -static int dsicm_exit_ulps(struct panel_drv_data *ddata) -{ - struct omap_dss_device *src = ddata->src; - int r; - - if (!ddata->ulps_enabled) - return 0; - - src->ops->enable(src); - src->ops->dsi.enable_hs(src, ddata->channel, true); - - r = _dsicm_enable_te(ddata, true); - if (r) { - dev_err(&ddata->pdev->dev, "failed to re-enable TE"); - goto err2; - } - - if (ddata->ext_te_gpio) - enable_irq(gpiod_to_irq(ddata->ext_te_gpio)); - - dsicm_queue_ulps_work(ddata); - - ddata->ulps_enabled = false; - - return 0; - -err2: - dev_err(&ddata->pdev->dev, "failed to exit ULPS"); - - r = dsicm_panel_reset(ddata); - if (!r) { - if (ddata->ext_te_gpio) - enable_irq(gpiod_to_irq(ddata->ext_te_gpio)); - ddata->ulps_enabled = false; - } - - dsicm_queue_ulps_work(ddata); - - return r; -} - -static int dsicm_wake_up(struct panel_drv_data *ddata) -{ - if (ddata->ulps_enabled) - return dsicm_exit_ulps(ddata); - - dsicm_cancel_ulps_work(ddata); - dsicm_queue_ulps_work(ddata); - return 0; -} - -static int dsicm_bl_update_status(struct backlight_device *dev) -{ - struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev); - struct omap_dss_device *src = ddata->src; - int r = 0; - int level; - - if (dev->props.fb_blank == FB_BLANK_UNBLANK && - dev->props.power == FB_BLANK_UNBLANK) - level = dev->props.brightness; - else - level = 0; - - dev_dbg(&ddata->pdev->dev, "update brightness to %d\n", level); - - mutex_lock(&ddata->lock); - - if (ddata->enabled) { - src->ops->dsi.bus_lock(src); - - r = dsicm_wake_up(ddata); - if (!r) - r = dsicm_dcs_write_1(ddata, DCS_BRIGHTNESS, level); - - src->ops->dsi.bus_unlock(src); - } - - mutex_unlock(&ddata->lock); - - return r; -} - -static int dsicm_bl_get_intensity(struct backlight_device *dev) -{ - if (dev->props.fb_blank == FB_BLANK_UNBLANK && - dev->props.power == FB_BLANK_UNBLANK) - return dev->props.brightness; - - return 0; -} - -static const struct backlight_ops dsicm_bl_ops = { - .get_brightness = dsicm_bl_get_intensity, - .update_status = dsicm_bl_update_status, -}; - -static ssize_t dsicm_num_errors_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct panel_drv_data *ddata = dev_get_drvdata(dev); - struct omap_dss_device *src = ddata->src; - u8 errors = 0; - int r; - - mutex_lock(&ddata->lock); - - if (ddata->enabled) { - src->ops->dsi.bus_lock(src); - - r = dsicm_wake_up(ddata); - if (!r) - r = dsicm_dcs_read_1(ddata, DCS_READ_NUM_ERRORS, - &errors); - - src->ops->dsi.bus_unlock(src); - } else { - r = -ENODEV; - } - - mutex_unlock(&ddata->lock); - - if (r) - return r; - - return snprintf(buf, PAGE_SIZE, "%d\n", errors); -} - -static ssize_t dsicm_hw_revision_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct panel_drv_data *ddata = dev_get_drvdata(dev); - struct omap_dss_device *src = ddata->src; - u8 id1, id2, id3; - int r; - - mutex_lock(&ddata->lock); - - if (ddata->enabled) { - src->ops->dsi.bus_lock(src); - - r = dsicm_wake_up(ddata); - if (!r) - r = dsicm_get_id(ddata, &id1, &id2, &id3); - - src->ops->dsi.bus_unlock(src); - } else { - r = -ENODEV; - } - - mutex_unlock(&ddata->lock); - - if (r) - return r; - - return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3); -} - -static ssize_t dsicm_store_ulps(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct panel_drv_data *ddata = dev_get_drvdata(dev); - struct omap_dss_device *src = ddata->src; - unsigned long t; - int r; - - r = kstrtoul(buf, 0, &t); - if (r) - return r; - - mutex_lock(&ddata->lock); - - if (ddata->enabled) { - src->ops->dsi.bus_lock(src); - - if (t) - r = dsicm_enter_ulps(ddata); - else - r = dsicm_wake_up(ddata); - - src->ops->dsi.bus_unlock(src); - } - - mutex_unlock(&ddata->lock); - - if (r) - return r; - - return count; -} - -static ssize_t dsicm_show_ulps(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct panel_drv_data *ddata = dev_get_drvdata(dev); - unsigned int t; - - mutex_lock(&ddata->lock); - t = ddata->ulps_enabled; - mutex_unlock(&ddata->lock); - - return snprintf(buf, PAGE_SIZE, "%u\n", t); -} - -static ssize_t dsicm_store_ulps_timeout(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct panel_drv_data *ddata = dev_get_drvdata(dev); - struct omap_dss_device *src = ddata->src; - unsigned long t; - int r; - - r = kstrtoul(buf, 0, &t); - if (r) - return r; - - mutex_lock(&ddata->lock); - ddata->ulps_timeout = t; - - if (ddata->enabled) { - /* dsicm_wake_up will restart the timer */ - src->ops->dsi.bus_lock(src); - r = dsicm_wake_up(ddata); - src->ops->dsi.bus_unlock(src); - } - - mutex_unlock(&ddata->lock); - - if (r) - return r; - - return count; -} - -static ssize_t dsicm_show_ulps_timeout(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct panel_drv_data *ddata = dev_get_drvdata(dev); - unsigned int t; - - mutex_lock(&ddata->lock); - t = ddata->ulps_timeout; - mutex_unlock(&ddata->lock); - - return snprintf(buf, PAGE_SIZE, "%u\n", t); -} - -static DEVICE_ATTR(num_dsi_errors, S_IRUGO, dsicm_num_errors_show, NULL); -static DEVICE_ATTR(hw_revision, S_IRUGO, dsicm_hw_revision_show, NULL); -static DEVICE_ATTR(ulps, S_IRUGO | S_IWUSR, - dsicm_show_ulps, dsicm_store_ulps); -static DEVICE_ATTR(ulps_timeout, S_IRUGO | S_IWUSR, - dsicm_show_ulps_timeout, dsicm_store_ulps_timeout); - -static struct attribute *dsicm_attrs[] = { - &dev_attr_num_dsi_errors.attr, - &dev_attr_hw_revision.attr, - &dev_attr_ulps.attr, - &dev_attr_ulps_timeout.attr, - NULL, -}; - -static const struct attribute_group dsicm_attr_group = { - .attrs = dsicm_attrs, -}; - -static void dsicm_hw_reset(struct panel_drv_data *ddata) -{ - gpiod_set_value(ddata->reset_gpio, 1); - udelay(10); - /* reset the panel */ - gpiod_set_value(ddata->reset_gpio, 0); - /* assert reset */ - udelay(10); - gpiod_set_value(ddata->reset_gpio, 1); - /* wait after releasing reset */ - usleep_range(5000, 10000); -} - -static int dsicm_power_on(struct panel_drv_data *ddata) -{ - struct omap_dss_device *src = ddata->src; - u8 id1, id2, id3; - int r; - struct omap_dss_dsi_config dsi_config = { - .mode = OMAP_DSS_DSI_CMD_MODE, - .pixel_format = OMAP_DSS_DSI_FMT_RGB888, - .vm = &ddata->vm, - .hs_clk_min = 150000000, - .hs_clk_max = 300000000, - .lp_clk_min = 7000000, - .lp_clk_max = 10000000, - }; - - if (ddata->vpnl) { - r = regulator_enable(ddata->vpnl); - if (r) { - dev_err(&ddata->pdev->dev, - "failed to enable VPNL: %d\n", r); - return r; - } - } - - if (ddata->vddi) { - r = regulator_enable(ddata->vddi); - if (r) { - dev_err(&ddata->pdev->dev, - "failed to enable VDDI: %d\n", r); - goto err_vpnl; - } - } - - if (ddata->pin_config.num_pins > 0) { - r = src->ops->dsi.configure_pins(src, &ddata->pin_config); - if (r) { - dev_err(&ddata->pdev->dev, - "failed to configure DSI pins\n"); - goto err_vddi; - } - } - - r = src->ops->dsi.set_config(src, &dsi_config); - if (r) { - dev_err(&ddata->pdev->dev, "failed to configure DSI\n"); - goto err_vddi; - } - - src->ops->enable(src); - - dsicm_hw_reset(ddata); - - src->ops->dsi.enable_hs(src, ddata->channel, false); - - r = dsicm_sleep_out(ddata); - if (r) - goto err; - - r = dsicm_get_id(ddata, &id1, &id2, &id3); - if (r) - goto err; - - r = dsicm_dcs_write_1(ddata, DCS_BRIGHTNESS, 0xff); - if (r) - goto err; - - r = dsicm_dcs_write_1(ddata, DCS_CTRL_DISPLAY, - (1<<2) | (1<<5)); /* BL | BCTRL */ - if (r) - goto err; - - r = dsicm_dcs_write_1(ddata, MIPI_DCS_SET_PIXEL_FORMAT, - MIPI_DCS_PIXEL_FMT_24BIT); - if (r) - goto err; - - r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_DISPLAY_ON); - if (r) - goto err; - - r = _dsicm_enable_te(ddata, ddata->te_enabled); - if (r) - goto err; - - r = src->ops->dsi.enable_video_output(src, ddata->channel); - if (r) - goto err; - - ddata->enabled = true; - - if (!ddata->intro_printed) { - dev_info(&ddata->pdev->dev, "panel revision %02x.%02x.%02x\n", - id1, id2, id3); - ddata->intro_printed = true; - } - - src->ops->dsi.enable_hs(src, ddata->channel, true); - - return 0; -err: - dev_err(&ddata->pdev->dev, "error while enabling panel, issuing HW reset\n"); - - dsicm_hw_reset(ddata); - - src->ops->dsi.disable(src, true, false); -err_vddi: - if (ddata->vddi) - regulator_disable(ddata->vddi); -err_vpnl: - if (ddata->vpnl) - regulator_disable(ddata->vpnl); - - return r; -} - -static void dsicm_power_off(struct panel_drv_data *ddata) -{ - struct omap_dss_device *src = ddata->src; - int r; - - src->ops->dsi.disable_video_output(src, ddata->channel); - - r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_DISPLAY_OFF); - if (!r) - r = dsicm_sleep_in(ddata); - - if (r) { - dev_err(&ddata->pdev->dev, - "error disabling panel, issuing HW reset\n"); - dsicm_hw_reset(ddata); - } - - src->ops->dsi.disable(src, true, false); - - if (ddata->vddi) - regulator_disable(ddata->vddi); - if (ddata->vpnl) - regulator_disable(ddata->vpnl); - - ddata->enabled = false; -} - -static int dsicm_panel_reset(struct panel_drv_data *ddata) -{ - dev_err(&ddata->pdev->dev, "performing LCD reset\n"); - - dsicm_power_off(ddata); - dsicm_hw_reset(ddata); - return dsicm_power_on(ddata); -} - -static int dsicm_connect(struct omap_dss_device *src, - struct omap_dss_device *dst) -{ - struct panel_drv_data *ddata = to_panel_data(dst); - struct device *dev = &ddata->pdev->dev; - int r; - - r = src->ops->dsi.request_vc(src, &ddata->channel); - if (r) { - dev_err(dev, "failed to get virtual channel\n"); - return r; - } - - r = src->ops->dsi.set_vc_id(src, ddata->channel, TCH); - if (r) { - dev_err(dev, "failed to set VC_ID\n"); - src->ops->dsi.release_vc(src, ddata->channel); - return r; - } - - ddata->src = src; - return 0; -} - -static void dsicm_disconnect(struct omap_dss_device *src, - struct omap_dss_device *dst) -{ - struct panel_drv_data *ddata = to_panel_data(dst); - - src->ops->dsi.release_vc(src, ddata->channel); - ddata->src = NULL; -} - -static void dsicm_enable(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = ddata->src; - int r; - - mutex_lock(&ddata->lock); - - src->ops->dsi.bus_lock(src); - - r = dsicm_power_on(ddata); - - src->ops->dsi.bus_unlock(src); - - if (r) - goto err; - - mutex_unlock(&ddata->lock); - - dsicm_bl_power(ddata, true); - - return; -err: - dev_dbg(&ddata->pdev->dev, "enable failed (%d)\n", r); - mutex_unlock(&ddata->lock); -} - -static void dsicm_disable(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = ddata->src; - int r; - - dsicm_bl_power(ddata, false); - - mutex_lock(&ddata->lock); - - dsicm_cancel_ulps_work(ddata); - - src->ops->dsi.bus_lock(src); - - r = dsicm_wake_up(ddata); - if (!r) - dsicm_power_off(ddata); - - src->ops->dsi.bus_unlock(src); - - mutex_unlock(&ddata->lock); -} - -static void dsicm_framedone_cb(int err, void *data) -{ - struct panel_drv_data *ddata = data; - struct omap_dss_device *src = ddata->src; - - dev_dbg(&ddata->pdev->dev, "framedone, err %d\n", err); - src->ops->dsi.bus_unlock(src); -} - -static irqreturn_t dsicm_te_isr(int irq, void *data) -{ - struct panel_drv_data *ddata = data; - struct omap_dss_device *src = ddata->src; - int old; - int r; - - old = atomic_cmpxchg(&ddata->do_update, 1, 0); - - if (old) { - cancel_delayed_work(&ddata->te_timeout_work); - - r = src->ops->dsi.update(src, ddata->channel, dsicm_framedone_cb, - ddata); - if (r) - goto err; - } - - return IRQ_HANDLED; -err: - dev_err(&ddata->pdev->dev, "start update failed\n"); - src->ops->dsi.bus_unlock(src); - return IRQ_HANDLED; -} - -static void dsicm_te_timeout_work_callback(struct work_struct *work) -{ - struct panel_drv_data *ddata = container_of(work, struct panel_drv_data, - te_timeout_work.work); - struct omap_dss_device *src = ddata->src; - - dev_err(&ddata->pdev->dev, "TE not received for 250ms!\n"); - - atomic_set(&ddata->do_update, 0); - src->ops->dsi.bus_unlock(src); -} - -static int dsicm_update(struct omap_dss_device *dssdev, - u16 x, u16 y, u16 w, u16 h) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = ddata->src; - int r; - - dev_dbg(&ddata->pdev->dev, "update %d, %d, %d x %d\n", x, y, w, h); - - mutex_lock(&ddata->lock); - src->ops->dsi.bus_lock(src); - - r = dsicm_wake_up(ddata); - if (r) - goto err; - - if (!ddata->enabled) { - r = 0; - goto err; - } - - /* XXX no need to send this every frame, but dsi break if not done */ - r = dsicm_set_update_window(ddata, 0, 0, ddata->vm.hactive, - ddata->vm.vactive); - if (r) - goto err; - - if (ddata->te_enabled && ddata->ext_te_gpio) { - schedule_delayed_work(&ddata->te_timeout_work, - msecs_to_jiffies(250)); - atomic_set(&ddata->do_update, 1); - } else { - r = src->ops->dsi.update(src, ddata->channel, dsicm_framedone_cb, - ddata); - if (r) - goto err; - } - - /* note: no bus_unlock here. unlock is src framedone_cb */ - mutex_unlock(&ddata->lock); - return 0; -err: - src->ops->dsi.bus_unlock(src); - mutex_unlock(&ddata->lock); - return r; -} - -static int dsicm_sync(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = ddata->src; - - dev_dbg(&ddata->pdev->dev, "sync\n"); - - mutex_lock(&ddata->lock); - src->ops->dsi.bus_lock(src); - src->ops->dsi.bus_unlock(src); - mutex_unlock(&ddata->lock); - - dev_dbg(&ddata->pdev->dev, "sync done\n"); - - return 0; -} - -static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable) -{ - struct omap_dss_device *src = ddata->src; - int r; - - if (enable) - r = dsicm_dcs_write_1(ddata, MIPI_DCS_SET_TEAR_ON, 0); - else - r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_TEAR_OFF); - - if (!ddata->ext_te_gpio) - src->ops->dsi.enable_te(src, enable); - - /* possible panel bug */ - msleep(100); - - return r; -} - -static int dsicm_enable_te(struct omap_dss_device *dssdev, bool enable) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = ddata->src; - int r; - - mutex_lock(&ddata->lock); - - if (ddata->te_enabled == enable) - goto end; - - src->ops->dsi.bus_lock(src); - - if (ddata->enabled) { - r = dsicm_wake_up(ddata); - if (r) - goto err; - - r = _dsicm_enable_te(ddata, enable); - if (r) - goto err; - } - - ddata->te_enabled = enable; - - src->ops->dsi.bus_unlock(src); -end: - mutex_unlock(&ddata->lock); - - return 0; -err: - src->ops->dsi.bus_unlock(src); - mutex_unlock(&ddata->lock); - - return r; -} - -static int dsicm_get_te(struct omap_dss_device *dssdev) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - int r; - - mutex_lock(&ddata->lock); - r = ddata->te_enabled; - mutex_unlock(&ddata->lock); - - return r; -} - -static int dsicm_memory_read(struct omap_dss_device *dssdev, - void *buf, size_t size, - u16 x, u16 y, u16 w, u16 h) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - struct omap_dss_device *src = ddata->src; - int r; - int first = 1; - int plen; - unsigned int buf_used = 0; - - if (size < w * h * 3) - return -ENOMEM; - - mutex_lock(&ddata->lock); - - if (!ddata->enabled) { - r = -ENODEV; - goto err1; - } - - size = min((u32)w * h * 3, - ddata->vm.hactive * ddata->vm.vactive * 3); - - src->ops->dsi.bus_lock(src); - - r = dsicm_wake_up(ddata); - if (r) - goto err2; - - /* plen 1 or 2 goes into short packet. until checksum error is fixed, - * use short packets. plen 32 works, but bigger packets seem to cause - * an error. */ - if (size % 2) - plen = 1; - else - plen = 2; - - dsicm_set_update_window(ddata, x, y, w, h); - - r = src->ops->dsi.set_max_rx_packet_size(src, ddata->channel, plen); - if (r) - goto err2; - - while (buf_used < size) { - u8 dcs_cmd = first ? 0x2e : 0x3e; - first = 0; - - r = src->ops->dsi.dcs_read(src, ddata->channel, dcs_cmd, - buf + buf_used, size - buf_used); - - if (r < 0) { - dev_err(dssdev->dev, "read error\n"); - goto err3; - } - - buf_used += r; - - if (r < plen) { - dev_err(&ddata->pdev->dev, "short read\n"); - break; - } - - if (signal_pending(current)) { - dev_err(&ddata->pdev->dev, "signal pending, " - "aborting memory read\n"); - r = -ERESTARTSYS; - goto err3; - } - } - - r = buf_used; - -err3: - src->ops->dsi.set_max_rx_packet_size(src, ddata->channel, 1); -err2: - src->ops->dsi.bus_unlock(src); -err1: - mutex_unlock(&ddata->lock); - return r; -} - -static void dsicm_ulps_work(struct work_struct *work) -{ - struct panel_drv_data *ddata = container_of(work, struct panel_drv_data, - ulps_work.work); - struct omap_dss_device *dssdev = &ddata->dssdev; - struct omap_dss_device *src = ddata->src; - - mutex_lock(&ddata->lock); - - if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE || !ddata->enabled) { - mutex_unlock(&ddata->lock); - return; - } - - src->ops->dsi.bus_lock(src); - - dsicm_enter_ulps(ddata); - - src->ops->dsi.bus_unlock(src); - mutex_unlock(&ddata->lock); -} - -static int dsicm_get_modes(struct omap_dss_device *dssdev, - struct drm_connector *connector) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - - connector->display_info.width_mm = ddata->width_mm; - connector->display_info.height_mm = ddata->height_mm; - - return omapdss_display_get_modes(connector, &ddata->vm); -} - -static int dsicm_check_timings(struct omap_dss_device *dssdev, - struct drm_display_mode *mode) -{ - struct panel_drv_data *ddata = to_panel_data(dssdev); - int ret = 0; - - if (mode->hdisplay != ddata->vm.hactive) - ret = -EINVAL; - - if (mode->vdisplay != ddata->vm.vactive) - ret = -EINVAL; - - if (ret) { - dev_warn(dssdev->dev, "wrong resolution: %d x %d", - mode->hdisplay, mode->vdisplay); - dev_warn(dssdev->dev, "panel resolution: %d x %d", - ddata->vm.hactive, ddata->vm.vactive); - } - - return ret; -} - -static const struct omap_dss_device_ops dsicm_ops = { - .connect = dsicm_connect, - .disconnect = dsicm_disconnect, - - .enable = dsicm_enable, - .disable = dsicm_disable, - - .get_modes = dsicm_get_modes, - .check_timings = dsicm_check_timings, -}; - -static const struct omap_dss_driver dsicm_dss_driver = { - .update = dsicm_update, - .sync = dsicm_sync, - - .enable_te = dsicm_enable_te, - .get_te = dsicm_get_te, - - .memory_read = dsicm_memory_read, -}; - -static int dsicm_probe_of(struct platform_device *pdev) -{ - struct device_node *node = pdev->dev.of_node; - struct backlight_device *backlight; - struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct display_timing timing; - int err; - - ddata->reset_gpio = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW); - if (IS_ERR(ddata->reset_gpio)) { - err = PTR_ERR(ddata->reset_gpio); - dev_err(&pdev->dev, "reset gpio request failed: %d", err); - return err; - } - - ddata->ext_te_gpio = devm_gpiod_get_optional(&pdev->dev, "te", - GPIOD_IN); - if (IS_ERR(ddata->ext_te_gpio)) { - err = PTR_ERR(ddata->ext_te_gpio); - dev_err(&pdev->dev, "TE gpio request failed: %d", err); - return err; - } - - err = of_get_display_timing(node, "panel-timing", &timing); - if (!err) { - videomode_from_timing(&timing, &ddata->vm); - if (!ddata->vm.pixelclock) - ddata->vm.pixelclock = - ddata->vm.hactive * ddata->vm.vactive * 60; - } else { - dev_warn(&pdev->dev, - "failed to get video timing, using defaults\n"); - } - - ddata->width_mm = 0; - of_property_read_u32(node, "width-mm", &ddata->width_mm); - - ddata->height_mm = 0; - of_property_read_u32(node, "height-mm", &ddata->height_mm); - - ddata->vpnl = devm_regulator_get_optional(&pdev->dev, "vpnl"); - if (IS_ERR(ddata->vpnl)) { - err = PTR_ERR(ddata->vpnl); - if (err == -EPROBE_DEFER) - return err; - ddata->vpnl = NULL; - } - - ddata->vddi = devm_regulator_get_optional(&pdev->dev, "vddi"); - if (IS_ERR(ddata->vddi)) { - err = PTR_ERR(ddata->vddi); - if (err == -EPROBE_DEFER) - return err; - ddata->vddi = NULL; - } - - backlight = devm_of_find_backlight(&pdev->dev); - if (IS_ERR(backlight)) - return PTR_ERR(backlight); - - /* If no backlight device is found assume native backlight support */ - if (backlight) - ddata->extbldev = backlight; - else - ddata->use_dsi_backlight = true; - - /* TODO: ulps */ - - return 0; -} - -static int dsicm_probe(struct platform_device *pdev) -{ - struct panel_drv_data *ddata; - struct backlight_device *bldev = NULL; - struct device *dev = &pdev->dev; - struct omap_dss_device *dssdev; - int r; - - dev_dbg(dev, "probe\n"); - - ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL); - if (!ddata) - return -ENOMEM; - - platform_set_drvdata(pdev, ddata); - ddata->pdev = pdev; - - ddata->vm.hactive = 864; - ddata->vm.vactive = 480; - ddata->vm.pixelclock = 864 * 480 * 60; - - r = dsicm_probe_of(pdev); - if (r) - return r; - - dssdev = &ddata->dssdev; - dssdev->dev = dev; - dssdev->ops = &dsicm_ops; - dssdev->driver = &dsicm_dss_driver; - dssdev->type = OMAP_DISPLAY_TYPE_DSI; - dssdev->display = true; - dssdev->owner = THIS_MODULE; - dssdev->of_port = 0; - dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES; - - dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE | - OMAP_DSS_DISPLAY_CAP_TEAR_ELIM; - - omapdss_display_init(dssdev); - omapdss_device_register(dssdev); - - mutex_init(&ddata->lock); - - atomic_set(&ddata->do_update, 0); - - if (ddata->ext_te_gpio) { - r = devm_request_irq(dev, gpiod_to_irq(ddata->ext_te_gpio), - dsicm_te_isr, - IRQF_TRIGGER_RISING, - "taal vsync", ddata); - - if (r) { - dev_err(dev, "IRQ request failed\n"); - goto err_reg; - } - - INIT_DEFERRABLE_WORK(&ddata->te_timeout_work, - dsicm_te_timeout_work_callback); - - dev_dbg(dev, "Using GPIO TE\n"); - } - - ddata->workqueue = create_singlethread_workqueue("dsicm_wq"); - if (!ddata->workqueue) { - r = -ENOMEM; - goto err_reg; - } - INIT_DELAYED_WORK(&ddata->ulps_work, dsicm_ulps_work); - - dsicm_hw_reset(ddata); - - if (ddata->use_dsi_backlight) { - struct backlight_properties props = { 0 }; - props.max_brightness = 255; - props.type = BACKLIGHT_RAW; - - bldev = devm_backlight_device_register(dev, dev_name(dev), - dev, ddata, &dsicm_bl_ops, &props); - if (IS_ERR(bldev)) { - r = PTR_ERR(bldev); - goto err_bl; - } - - ddata->bldev = bldev; - } - - r = sysfs_create_group(&dev->kobj, &dsicm_attr_group); - if (r) { - dev_err(dev, "failed to create sysfs files\n"); - goto err_bl; - } - - return 0; - -err_bl: - destroy_workqueue(ddata->workqueue); -err_reg: - if (ddata->extbldev) - put_device(&ddata->extbldev->dev); - - return r; -} - -static int __exit dsicm_remove(struct platform_device *pdev) -{ - struct panel_drv_data *ddata = platform_get_drvdata(pdev); - struct omap_dss_device *dssdev = &ddata->dssdev; - - dev_dbg(&pdev->dev, "remove\n"); - - omapdss_device_unregister(dssdev); - - if (omapdss_device_is_enabled(dssdev)) - dsicm_disable(dssdev); - omapdss_device_disconnect(ddata->src, dssdev); - - sysfs_remove_group(&pdev->dev.kobj, &dsicm_attr_group); - - if (ddata->extbldev) - put_device(&ddata->extbldev->dev); - - dsicm_cancel_ulps_work(ddata); - destroy_workqueue(ddata->workqueue); - - /* reset, to be sure that the panel is in a valid state */ - dsicm_hw_reset(ddata); - - return 0; -} - -static const struct of_device_id dsicm_of_match[] = { - { .compatible = "omapdss,panel-dsi-cm", }, - {}, -}; - -MODULE_DEVICE_TABLE(of, dsicm_of_match); - -static struct platform_driver dsicm_driver = { - .probe = dsicm_probe, - .remove = __exit_p(dsicm_remove), - .driver = { - .name = "panel-dsi-cm", - .of_match_table = dsicm_of_match, - .suppress_bind_attrs = true, - }, -}; - -module_platform_driver(dsicm_driver); - -MODULE_AUTHOR("Tomi Valkeinen <[email protected]>"); -MODULE_DESCRIPTION("Generic DSI Command Mode Panel Driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/omapdrm/dss/Kconfig b/drivers/gpu/drm/omapdrm/dss/Kconfig deleted file mode 100644 index e11b258a2294..000000000000 --- a/drivers/gpu/drm/omapdrm/dss/Kconfig +++ /dev/null @@ -1,135 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -config OMAP2_DSS_INIT - bool - -config OMAP_DSS_BASE - tristate - -menuconfig OMAP2_DSS - tristate "OMAP2+ Display Subsystem support" - select OMAP_DSS_BASE - select VIDEOMODE_HELPERS - select OMAP2_DSS_INIT - select HDMI - help - OMAP2+ Display Subsystem support. - -if OMAP2_DSS - -config OMAP2_DSS_DEBUG - bool "Debug support" - default n - help - This enables printing of debug messages. Alternatively, debug messages - can also be enabled by setting CONFIG_DYNAMIC_DEBUG and then setting - appropriate flags in <debugfs>/dynamic_debug/control. - -config OMAP2_DSS_DEBUGFS - bool "Debugfs filesystem support" - depends on DEBUG_FS - default n - help - This enables debugfs for OMAPDSS at <debugfs>/omapdss. This enables - querying about clock configuration and register configuration of dss, - dispc, dsi, hdmi and rfbi. - -config OMAP2_DSS_COLLECT_IRQ_STATS - bool "Collect DSS IRQ statistics" - depends on OMAP2_DSS_DEBUGFS - default n - help - Collect DSS IRQ statistics, printable via debugfs. - - The statistics can be found from - <debugfs>/omapdss/dispc_irq for DISPC interrupts, and - <debugfs>/omapdss/dsi_irq for DSI interrupts. - -config OMAP2_DSS_DPI - bool "DPI support" - default y - help - DPI Interface. This is the Parallel Display Interface. - -config OMAP2_DSS_VENC - bool "VENC support" - default y - help - OMAP Video Encoder support for S-Video and composite TV-out. - -config OMAP2_DSS_HDMI_COMMON - bool - -config OMAP4_DSS_HDMI - bool "HDMI support for OMAP4" - default y - select OMAP2_DSS_HDMI_COMMON - help - HDMI support for OMAP4 based SoCs. - -config OMAP4_DSS_HDMI_CEC - bool "Enable HDMI CEC support for OMAP4" - depends on OMAP4_DSS_HDMI - select CEC_CORE - default y - help - When selected the HDMI transmitter will support the CEC feature. - -config OMAP5_DSS_HDMI - bool "HDMI support for OMAP5" - default n - select OMAP2_DSS_HDMI_COMMON - help - HDMI Interface for OMAP5 and similar cores. This adds the High - Definition Multimedia Interface. See https://www.hdmi.org/ for HDMI - specification. - -config OMAP2_DSS_SDI - bool "SDI support" - default n - help - SDI (Serial Display Interface) support. - - SDI is a high speed one-way display serial bus between the host - processor and a display. - -config OMAP2_DSS_DSI - bool "DSI support" - default n - help - MIPI DSI (Display Serial Interface) support. - - DSI is a high speed half-duplex serial interface between the host - processor and a peripheral, such as a display or a framebuffer chip. - - See https://www.mipi.org/ for DSI specifications. - -config OMAP2_DSS_MIN_FCK_PER_PCK - int "Minimum FCK/PCK ratio (for scaling)" - range 0 32 - default 0 - help - This can be used to adjust the minimum FCK/PCK ratio. - - With this you can make sure that DISPC FCK is at least - n x PCK. Video plane scaling requires higher FCK than - normally. - - If this is set to 0, there's no extra constraint on the - DISPC FCK. However, the FCK will at minimum be - 2xPCK (if active matrix) or 3xPCK (if passive matrix). - - Max FCK is 173MHz, so this doesn't work if your PCK - is very high. - -config OMAP2_DSS_SLEEP_AFTER_VENC_RESET - bool "Sleep 20ms after VENC reset" - default y - help - There is a 20ms sleep after VENC reset which seemed to fix the - reset. The reason for the bug is unclear, and it's also unclear - on what platforms this happens. - - This option enables the sleep, and is enabled by default. You can - disable the sleep if it doesn't cause problems on your platform. - -endif diff --git a/drivers/gpu/drm/omapdrm/dss/Makefile b/drivers/gpu/drm/omapdrm/dss/Makefile deleted file mode 100644 index f967e6948f2e..000000000000 --- a/drivers/gpu/drm/omapdrm/dss/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_OMAP2_DSS_INIT) += omapdss-boot-init.o - -obj-$(CONFIG_OMAP_DSS_BASE) += omapdss-base.o -omapdss-base-y := base.o display.o output.o - -obj-$(CONFIG_OMAP2_DSS) += omapdss.o -# Core DSS files -omapdss-y := dss.o dispc.o dispc_coefs.o \ - pll.o video-pll.o -omapdss-$(CONFIG_OMAP2_DSS_DPI) += dpi.o -omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o -omapdss-$(CONFIG_OMAP2_DSS_SDI) += sdi.o -omapdss-$(CONFIG_OMAP2_DSS_DSI) += dsi.o -omapdss-$(CONFIG_OMAP2_DSS_HDMI_COMMON) += hdmi_common.o hdmi_wp.o hdmi_pll.o \ - hdmi_phy.o -omapdss-$(CONFIG_OMAP4_DSS_HDMI) += hdmi4.o hdmi4_core.o -omapdss-$(CONFIG_OMAP4_DSS_HDMI_CEC) += hdmi4_cec.o -omapdss-$(CONFIG_OMAP5_DSS_HDMI) += hdmi5.o hdmi5_core.o -ccflags-$(CONFIG_OMAP2_DSS_DEBUG) += -DDEBUG diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c index cf50430e6363..050ca7eafac5 100644 --- a/drivers/gpu/drm/omapdrm/dss/base.c +++ b/drivers/gpu/drm/omapdrm/dss/base.c @@ -16,32 +16,10 @@ #include "dss.h" #include "omapdss.h" -static struct dss_device *dss_device; - -struct dss_device *omapdss_get_dss(void) -{ - return dss_device; -} -EXPORT_SYMBOL(omapdss_get_dss); - -void omapdss_set_dss(struct dss_device *dss) -{ - dss_device = dss; -} -EXPORT_SYMBOL(omapdss_set_dss); - struct dispc_device *dispc_get_dispc(struct dss_device *dss) { return dss->dispc; } -EXPORT_SYMBOL(dispc_get_dispc); - -const struct dispc_ops *dispc_get_ops(struct dss_device *dss) -{ - return dss->dispc_ops; -} -EXPORT_SYMBOL(dispc_get_ops); - /* ----------------------------------------------------------------------------- * OMAP DSS Devices Handling @@ -56,7 +34,6 @@ void omapdss_device_register(struct omap_dss_device *dssdev) list_add_tail(&dssdev->list, &omapdss_devices_list); mutex_unlock(&omapdss_devices_lock); } -EXPORT_SYMBOL_GPL(omapdss_device_register); void omapdss_device_unregister(struct omap_dss_device *dssdev) { @@ -64,7 +41,6 @@ void omapdss_device_unregister(struct omap_dss_device *dssdev) list_del(&dssdev->list); mutex_unlock(&omapdss_devices_lock); } -EXPORT_SYMBOL_GPL(omapdss_device_unregister); static bool omapdss_device_is_registered(struct device_node *node) { @@ -86,24 +62,16 @@ static bool omapdss_device_is_registered(struct device_node *node) struct omap_dss_device *omapdss_device_get(struct omap_dss_device *dssdev) { - if (!try_module_get(dssdev->owner)) - return NULL; - - if (get_device(dssdev->dev) == NULL) { - module_put(dssdev->owner); + if (get_device(dssdev->dev) == NULL) return NULL; - } return dssdev; } -EXPORT_SYMBOL(omapdss_device_get); void omapdss_device_put(struct omap_dss_device *dssdev) { put_device(dssdev->dev); - module_put(dssdev->owner); } -EXPORT_SYMBOL(omapdss_device_put); struct omap_dss_device *omapdss_find_device_by_node(struct device_node *node) { @@ -149,7 +117,7 @@ struct omap_dss_device *omapdss_device_next_output(struct omap_dss_device *from) goto done; } - if (dssdev->id && (dssdev->next || dssdev->bridge)) + if (dssdev->id && dssdev->bridge) goto done; } @@ -164,7 +132,6 @@ done: mutex_unlock(&omapdss_devices_lock); return dssdev; } -EXPORT_SYMBOL(omapdss_device_next_output); static bool omapdss_device_is_connected(struct omap_dss_device *dssdev) { @@ -175,8 +142,6 @@ int omapdss_device_connect(struct dss_device *dss, struct omap_dss_device *src, struct omap_dss_device *dst) { - int ret; - dev_dbg(&dss->pdev->dev, "connect(%s, %s)\n", src ? dev_name(src->dev) : "NULL", dst ? dev_name(dst->dev) : "NULL"); @@ -195,17 +160,8 @@ int omapdss_device_connect(struct dss_device *dss, dst->dss = dss; - if (dst->ops && dst->ops->connect) { - ret = dst->ops->connect(src, dst); - if (ret < 0) { - dst->dss = NULL; - return ret; - } - } - return 0; } -EXPORT_SYMBOL_GPL(omapdss_device_connect); void omapdss_device_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst) @@ -222,43 +178,12 @@ void omapdss_device_disconnect(struct omap_dss_device *src, } if (!dst->id && !omapdss_device_is_connected(dst)) { - WARN_ON(!dst->display); + WARN_ON(1); return; } - WARN_ON(dst->state != OMAP_DSS_DISPLAY_DISABLED); - - if (dst->ops && dst->ops->disconnect) - dst->ops->disconnect(src, dst); dst->dss = NULL; } -EXPORT_SYMBOL_GPL(omapdss_device_disconnect); - -void omapdss_device_enable(struct omap_dss_device *dssdev) -{ - if (!dssdev) - return; - - if (dssdev->ops && dssdev->ops->enable) - dssdev->ops->enable(dssdev); - - omapdss_device_enable(dssdev->next); - - dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; -} -EXPORT_SYMBOL_GPL(omapdss_device_enable); - -void omapdss_device_disable(struct omap_dss_device *dssdev) -{ - if (!dssdev) - return; - - omapdss_device_disable(dssdev->next); - - if (dssdev->ops && dssdev->ops->disable) - dssdev->ops->disable(dssdev); -} -EXPORT_SYMBOL_GPL(omapdss_device_disable); /* ----------------------------------------------------------------------------- * Components Handling @@ -344,7 +269,6 @@ void omapdss_gather_components(struct device *dev) for_each_available_child_of_node(dev->of_node, child) omapdss_walk_device(dev, child, true); } -EXPORT_SYMBOL(omapdss_gather_components); static bool omapdss_component_is_loaded(struct omapdss_comp_node *comp) { @@ -369,8 +293,3 @@ bool omapdss_stack_is_ready(void) return true; } -EXPORT_SYMBOL(omapdss_stack_is_ready); - -MODULE_AUTHOR("Tomi Valkeinen <[email protected]>"); -MODULE_DESCRIPTION("OMAP Display Subsystem Base"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c index 599183879caf..f4cbef8ccace 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc.c +++ b/drivers/gpu/drm/omapdrm/dss/dispc.c @@ -351,8 +351,6 @@ static unsigned long dispc_plane_pclk_rate(struct dispc_device *dispc, static unsigned long dispc_plane_lclk_rate(struct dispc_device *dispc, enum omap_plane_id plane); -static void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask); - static inline void dispc_write_reg(struct dispc_device *dispc, u16 idx, u32 val) { __raw_writel(val, dispc->base + idx); @@ -379,12 +377,12 @@ static void mgr_fld_write(struct dispc_device *dispc, enum omap_channel channel, REG_FLD_MOD(dispc, rfld->reg, val, rfld->high, rfld->low); } -static int dispc_get_num_ovls(struct dispc_device *dispc) +int dispc_get_num_ovls(struct dispc_device *dispc) { return dispc->feat->num_ovls; } -static int dispc_get_num_mgrs(struct dispc_device *dispc) +int dispc_get_num_mgrs(struct dispc_device *dispc) { return dispc->feat->num_mgrs; } @@ -670,13 +668,13 @@ void dispc_runtime_put(struct dispc_device *dispc) WARN_ON(r < 0 && r != -ENOSYS); } -static u32 dispc_mgr_get_vsync_irq(struct dispc_device *dispc, +u32 dispc_mgr_get_vsync_irq(struct dispc_device *dispc, enum omap_channel channel) { return mgr_desc[channel].vsync_irq; } -static u32 dispc_mgr_get_framedone_irq(struct dispc_device *dispc, +u32 dispc_mgr_get_framedone_irq(struct dispc_device *dispc, enum omap_channel channel) { if (channel == OMAP_DSS_CHANNEL_DIGIT && dispc->feat->no_framedone_tv) @@ -685,18 +683,18 @@ static u32 dispc_mgr_get_framedone_irq(struct dispc_device *dispc, return mgr_desc[channel].framedone_irq; } -static u32 dispc_mgr_get_sync_lost_irq(struct dispc_device *dispc, +u32 dispc_mgr_get_sync_lost_irq(struct dispc_device *dispc, enum omap_channel channel) { return mgr_desc[channel].sync_lost_irq; } -static u32 dispc_wb_get_framedone_irq(struct dispc_device *dispc) +u32 dispc_wb_get_framedone_irq(struct dispc_device *dispc) { return DISPC_IRQ_FRAMEDONEWB; } -static void dispc_mgr_enable(struct dispc_device *dispc, +void dispc_mgr_enable(struct dispc_device *dispc, enum omap_channel channel, bool enable) { mgr_fld_write(dispc, channel, DISPC_MGR_FLD_ENABLE, enable); @@ -710,13 +708,13 @@ static bool dispc_mgr_is_enabled(struct dispc_device *dispc, return !!mgr_fld_read(dispc, channel, DISPC_MGR_FLD_ENABLE); } -static bool dispc_mgr_go_busy(struct dispc_device *dispc, +bool dispc_mgr_go_busy(struct dispc_device *dispc, enum omap_channel channel) { return mgr_fld_read(dispc, channel, DISPC_MGR_FLD_GO) == 1; } -static void dispc_mgr_go(struct dispc_device *dispc, enum omap_channel channel) +void dispc_mgr_go(struct dispc_device *dispc, enum omap_channel channel) { WARN_ON(!dispc_mgr_is_enabled(dispc, channel)); WARN_ON(dispc_mgr_go_busy(dispc, channel)); @@ -726,12 +724,12 @@ static void dispc_mgr_go(struct dispc_device *dispc, enum omap_channel channel) mgr_fld_write(dispc, channel, DISPC_MGR_FLD_GO, 1); } -static bool dispc_wb_go_busy(struct dispc_device *dispc) +bool dispc_wb_go_busy(struct dispc_device *dispc) { return REG_GET(dispc, DISPC_CONTROL2, 6, 6) == 1; } -static void dispc_wb_go(struct dispc_device *dispc) +void dispc_wb_go(struct dispc_device *dispc) { enum omap_plane_id plane = OMAP_DSS_WB; bool enable, go; @@ -877,50 +875,62 @@ static void dispc_ovl_write_color_conv_coef(struct dispc_device *dispc, #undef CVAL } -static void dispc_wb_write_color_conv_coef(struct dispc_device *dispc, - const struct csc_coef_rgb2yuv *ct) -{ - const enum omap_plane_id plane = OMAP_DSS_WB; - -#define CVAL(x, y) (FLD_VAL(x, 26, 16) | FLD_VAL(y, 10, 0)) +/* YUV -> RGB, ITU-R BT.601, full range */ +static const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt601_full = { + 256, 0, 358, /* ry, rcb, rcr |1.000 0.000 1.402|*/ + 256, -88, -182, /* gy, gcb, gcr |1.000 -0.344 -0.714|*/ + 256, 452, 0, /* by, bcb, bcr |1.000 1.772 0.000|*/ + true, /* full range */ +}; - dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 0), CVAL(ct->yg, ct->yr)); - dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 1), CVAL(ct->crr, ct->yb)); - dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 2), CVAL(ct->crb, ct->crg)); - dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 3), CVAL(ct->cbg, ct->cbr)); - dispc_write_reg(dispc, DISPC_OVL_CONV_COEF(plane, 4), CVAL(0, ct->cbb)); +/* YUV -> RGB, ITU-R BT.601, limited range */ +static const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt601_lim = { + 298, 0, 409, /* ry, rcb, rcr |1.164 0.000 1.596|*/ + 298, -100, -208, /* gy, gcb, gcr |1.164 -0.392 -0.813|*/ + 298, 516, 0, /* by, bcb, bcr |1.164 2.017 0.000|*/ + false, /* limited range */ +}; - REG_FLD_MOD(dispc, DISPC_OVL_ATTRIBUTES(plane), ct->full_range, 11, 11); +/* YUV -> RGB, ITU-R BT.709, full range */ +static const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt709_full = { + 256, 0, 402, /* ry, rcb, rcr |1.000 0.000 1.570|*/ + 256, -48, -120, /* gy, gcb, gcr |1.000 -0.187 -0.467|*/ + 256, 475, 0, /* by, bcb, bcr |1.000 1.856 0.000|*/ + true, /* full range */ +}; -#undef CVAL -} +/* YUV -> RGB, ITU-R BT.709, limited range */ +static const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt709_lim = { + 298, 0, 459, /* ry, rcb, rcr |1.164 0.000 1.793|*/ + 298, -55, -136, /* gy, gcb, gcr |1.164 -0.213 -0.533|*/ + 298, 541, 0, /* by, bcb, bcr |1.164 2.112 0.000|*/ + false, /* limited range */ +}; -static void dispc_setup_color_conv_coef(struct dispc_device *dispc) +static void dispc_ovl_set_csc(struct dispc_device *dispc, + enum omap_plane_id plane, + enum drm_color_encoding color_encoding, + enum drm_color_range color_range) { - int i; - int num_ovl = dispc_get_num_ovls(dispc); - - /* YUV -> RGB, ITU-R BT.601, limited range */ - const struct csc_coef_yuv2rgb coefs_yuv2rgb_bt601_lim = { - 298, 0, 409, /* ry, rcb, rcr */ - 298, -100, -208, /* gy, gcb, gcr */ - 298, 516, 0, /* by, bcb, bcr */ - false, /* limited range */ - }; + const struct csc_coef_yuv2rgb *csc; - /* RGB -> YUV, ITU-R BT.601, limited range */ - const struct csc_coef_rgb2yuv coefs_rgb2yuv_bt601_lim = { - 66, 129, 25, /* yr, yg, yb */ - -38, -74, 112, /* cbr, cbg, cbb */ - 112, -94, -18, /* crr, crg, crb */ - false, /* limited range */ - }; - - for (i = 1; i < num_ovl; i++) - dispc_ovl_write_color_conv_coef(dispc, i, &coefs_yuv2rgb_bt601_lim); + switch (color_encoding) { + default: + case DRM_COLOR_YCBCR_BT601: + if (color_range == DRM_COLOR_YCBCR_FULL_RANGE) + csc = &coefs_yuv2rgb_bt601_full; + else + csc = &coefs_yuv2rgb_bt601_lim; + break; + case DRM_COLOR_YCBCR_BT709: + if (color_range == DRM_COLOR_YCBCR_FULL_RANGE) + csc = &coefs_yuv2rgb_bt709_full; + else + csc = &coefs_yuv2rgb_bt709_lim; + break; + } - if (dispc->feat->has_writeback) - dispc_wb_write_color_conv_coef(dispc, &coefs_rgb2yuv_bt601_lim); + dispc_ovl_write_color_conv_coef(dispc, plane, csc); } static void dispc_ovl_set_ba0(struct dispc_device *dispc, @@ -1285,7 +1295,7 @@ static bool dispc_ovl_color_mode_supported(struct dispc_device *dispc, return false; } -static const u32 *dispc_ovl_get_color_modes(struct dispc_device *dispc, +const u32 *dispc_ovl_get_color_modes(struct dispc_device *dispc, enum omap_plane_id plane) { return dispc->feat->supported_color_modes[plane]; @@ -2601,7 +2611,9 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc, u8 pre_mult_alpha, u8 global_alpha, enum omap_dss_rotation_type rotation_type, bool replication, const struct videomode *vm, - bool mem_to_mem) + bool mem_to_mem, + enum drm_color_encoding color_encoding, + enum drm_color_range color_range) { bool five_taps = true; bool fieldmode = false; @@ -2750,6 +2762,9 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc, fieldmode, fourcc, rotation); dispc_ovl_set_output_size(dispc, plane, out_width, out_height); dispc_ovl_set_vid_color_conv(dispc, plane, cconv); + + if (plane != OMAP_DSS_WB) + dispc_ovl_set_csc(dispc, plane, color_encoding, color_range); } dispc_ovl_set_rotation_attrs(dispc, plane, rotation, rotation_type, @@ -2764,7 +2779,7 @@ static int dispc_ovl_setup_common(struct dispc_device *dispc, return 0; } -static int dispc_ovl_setup(struct dispc_device *dispc, +int dispc_ovl_setup(struct dispc_device *dispc, enum omap_plane_id plane, const struct omap_overlay_info *oi, const struct videomode *vm, bool mem_to_mem, @@ -2786,12 +2801,13 @@ static int dispc_ovl_setup(struct dispc_device *dispc, oi->screen_width, oi->pos_x, oi->pos_y, oi->width, oi->height, oi->out_width, oi->out_height, oi->fourcc, oi->rotation, oi->zorder, oi->pre_mult_alpha, oi->global_alpha, - oi->rotation_type, replication, vm, mem_to_mem); + oi->rotation_type, replication, vm, mem_to_mem, + oi->color_encoding, oi->color_range); return r; } -static int dispc_wb_setup(struct dispc_device *dispc, +int dispc_wb_setup(struct dispc_device *dispc, const struct omap_dss_writeback_info *wi, bool mem_to_mem, const struct videomode *vm, enum dss_writeback_channel channel_in) @@ -2819,7 +2835,8 @@ static int dispc_wb_setup(struct dispc_device *dispc, wi->buf_width, pos_x, pos_y, in_width, in_height, wi->width, wi->height, wi->fourcc, wi->rotation, zorder, wi->pre_mult_alpha, global_alpha, wi->rotation_type, - replication, vm, mem_to_mem); + replication, vm, mem_to_mem, DRM_COLOR_YCBCR_BT601, + DRM_COLOR_YCBCR_LIMITED_RANGE); if (r) return r; @@ -2874,12 +2891,12 @@ static int dispc_wb_setup(struct dispc_device *dispc, return 0; } -static bool dispc_has_writeback(struct dispc_device *dispc) +bool dispc_has_writeback(struct dispc_device *dispc) { return dispc->feat->has_writeback; } -static int dispc_ovl_enable(struct dispc_device *dispc, +int dispc_ovl_enable(struct dispc_device *dispc, enum omap_plane_id plane, bool enable) { DSSDBG("dispc_enable_plane %d, %d\n", plane, enable); @@ -2970,7 +2987,7 @@ static void dispc_mgr_enable_alpha_fixed_zorder(struct dispc_device *dispc, REG_FLD_MOD(dispc, DISPC_CONFIG, enable, 19, 19); } -static void dispc_mgr_setup(struct dispc_device *dispc, +void dispc_mgr_setup(struct dispc_device *dispc, enum omap_channel channel, const struct omap_overlay_manager_info *info) { @@ -3049,7 +3066,7 @@ static void dispc_mgr_enable_stallmode(struct dispc_device *dispc, mgr_fld_write(dispc, channel, DISPC_MGR_FLD_STALLMODE, enable); } -static void dispc_mgr_set_lcd_config(struct dispc_device *dispc, +void dispc_mgr_set_lcd_config(struct dispc_device *dispc, enum omap_channel channel, const struct dss_lcd_mgr_config *config) { @@ -3098,7 +3115,7 @@ static bool _dispc_mgr_pclk_ok(struct dispc_device *dispc, return pclk <= dispc->feat->max_tv_pclk; } -static int dispc_mgr_check_timings(struct dispc_device *dispc, +int dispc_mgr_check_timings(struct dispc_device *dispc, enum omap_channel channel, const struct videomode *vm) { @@ -3191,7 +3208,7 @@ static int vm_flag_to_int(enum display_flags flags, enum display_flags high, } /* change name to mode? */ -static void dispc_mgr_set_timings(struct dispc_device *dispc, +void dispc_mgr_set_timings(struct dispc_device *dispc, enum omap_channel channel, const struct videomode *vm) { @@ -3735,17 +3752,17 @@ int dispc_mgr_get_clock_div(struct dispc_device *dispc, return 0; } -static u32 dispc_read_irqstatus(struct dispc_device *dispc) +u32 dispc_read_irqstatus(struct dispc_device *dispc) { return dispc_read_reg(dispc, DISPC_IRQSTATUS); } -static void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask) +void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask) { dispc_write_reg(dispc, DISPC_IRQSTATUS, mask); } -static void dispc_write_irqenable(struct dispc_device *dispc, u32 mask) +void dispc_write_irqenable(struct dispc_device *dispc, u32 mask) { u32 old_mask = dispc_read_reg(dispc, DISPC_IRQENABLE); @@ -3769,7 +3786,7 @@ void dispc_disable_sidle(struct dispc_device *dispc) REG_FLD_MOD(dispc, DISPC_SYSCONFIG, 1, 4, 3); /* SIDLEMODE: no idle */ } -static u32 dispc_mgr_gamma_size(struct dispc_device *dispc, +u32 dispc_mgr_gamma_size(struct dispc_device *dispc, enum omap_channel channel) { const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma; @@ -3824,7 +3841,7 @@ static const struct drm_color_lut dispc_mgr_gamma_default_lut[] = { { .red = U16_MAX, .green = U16_MAX, .blue = U16_MAX, }, }; -static void dispc_mgr_set_gamma(struct dispc_device *dispc, +void dispc_mgr_set_gamma(struct dispc_device *dispc, enum omap_channel channel, const struct drm_color_lut *lut, unsigned int length) @@ -3930,8 +3947,6 @@ static void _omap_dispc_initial_config(struct dispc_device *dispc) dispc->feat->has_gamma_table) REG_FLD_MOD(dispc, DISPC_CONFIG, 1, 9, 9); - dispc_setup_color_conv_coef(dispc); - dispc_set_loadmode(dispc, OMAP_DSS_LOAD_FRAME_ONLY); dispc_init_fifos(dispc); @@ -4482,7 +4497,7 @@ static irqreturn_t dispc_irq_handler(int irq, void *arg) return dispc->user_handler(irq, dispc->user_data); } -static int dispc_request_irq(struct dispc_device *dispc, irq_handler_t handler, +int dispc_request_irq(struct dispc_device *dispc, irq_handler_t handler, void *dev_id) { int r; @@ -4506,7 +4521,7 @@ static int dispc_request_irq(struct dispc_device *dispc, irq_handler_t handler, return r; } -static void dispc_free_irq(struct dispc_device *dispc, void *dev_id) +void dispc_free_irq(struct dispc_device *dispc, void *dev_id) { devm_free_irq(&dispc->pdev->dev, dispc->irq, dispc); @@ -4514,7 +4529,7 @@ static void dispc_free_irq(struct dispc_device *dispc, void *dev_id) dispc->user_data = NULL; } -static u32 dispc_get_memory_bandwidth_limit(struct dispc_device *dispc) +u32 dispc_get_memory_bandwidth_limit(struct dispc_device *dispc) { u32 limit = 0; @@ -4684,47 +4699,6 @@ static void dispc_errata_i734_wa(struct dispc_device *dispc) REG_FLD_MOD(dispc, DISPC_CONFIG, gatestate, 8, 4); } -static const struct dispc_ops dispc_ops = { - .read_irqstatus = dispc_read_irqstatus, - .clear_irqstatus = dispc_clear_irqstatus, - .write_irqenable = dispc_write_irqenable, - - .request_irq = dispc_request_irq, - .free_irq = dispc_free_irq, - - .runtime_get = dispc_runtime_get, - .runtime_put = dispc_runtime_put, - - .get_num_ovls = dispc_get_num_ovls, - .get_num_mgrs = dispc_get_num_mgrs, - - .get_memory_bandwidth_limit = dispc_get_memory_bandwidth_limit, - - .mgr_enable = dispc_mgr_enable, - .mgr_is_enabled = dispc_mgr_is_enabled, - .mgr_get_vsync_irq = dispc_mgr_get_vsync_irq, - .mgr_get_framedone_irq = dispc_mgr_get_framedone_irq, - .mgr_get_sync_lost_irq = dispc_mgr_get_sync_lost_irq, - .mgr_go_busy = dispc_mgr_go_busy, - .mgr_go = dispc_mgr_go, - .mgr_set_lcd_config = dispc_mgr_set_lcd_config, - .mgr_check_timings = dispc_mgr_check_timings, - .mgr_set_timings = dispc_mgr_set_timings, - .mgr_setup = dispc_mgr_setup, - .mgr_gamma_size = dispc_mgr_gamma_size, - .mgr_set_gamma = dispc_mgr_set_gamma, - - .ovl_enable = dispc_ovl_enable, - .ovl_setup = dispc_ovl_setup, - .ovl_get_color_modes = dispc_ovl_get_color_modes, - - .wb_get_framedone_irq = dispc_wb_get_framedone_irq, - .wb_setup = dispc_wb_setup, - .has_writeback = dispc_has_writeback, - .wb_go_busy = dispc_wb_go_busy, - .wb_go = dispc_wb_go, -}; - /* DISPC HW IP initialisation */ static const struct of_device_id dispc_of_match[] = { { .compatible = "ti,omap2-dispc", .data = &omap24xx_dispc_feats }, @@ -4826,7 +4800,6 @@ static int dispc_bind(struct device *dev, struct device *master, void *data) dispc_runtime_put(dispc); dss->dispc = dispc; - dss->dispc_ops = &dispc_ops; dispc->debugfs = dss_debugfs_create_file(dss, "dispc", dispc_dump_regs, dispc); @@ -4848,7 +4821,6 @@ static void dispc_unbind(struct device *dev, struct device *master, void *data) dss_debugfs_remove_file(dispc->debugfs); dss->dispc = NULL; - dss->dispc_ops = NULL; pm_runtime_disable(dev); diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c deleted file mode 100644 index 3b82158b1bfd..000000000000 --- a/drivers/gpu/drm/omapdrm/dss/display.c +++ /dev/null @@ -1,60 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2009 Nokia Corporation - * Author: Tomi Valkeinen <[email protected]> - * - * Some code and ideas taken from drivers/video/omap/ driver - * by Imre Deak. - */ - -#define DSS_SUBSYS_NAME "DISPLAY" - -#include <linux/kernel.h> -#include <linux/of.h> - -#include <drm/drm_connector.h> -#include <drm/drm_modes.h> - -#include "omapdss.h" - -static int disp_num_counter; - -void omapdss_display_init(struct omap_dss_device *dssdev) -{ - int id; - - /* - * Note: this presumes that all displays either have an DT alias, or - * none has. - */ - id = of_alias_get_id(dssdev->dev->of_node, "display"); - if (id < 0) - id = disp_num_counter++; - - /* Use 'label' property for name, if it exists */ - of_property_read_string(dssdev->dev->of_node, "label", &dssdev->name); - - if (dssdev->name == NULL) - dssdev->name = devm_kasprintf(dssdev->dev, GFP_KERNEL, - "display%u", id); -} -EXPORT_SYMBOL_GPL(omapdss_display_init); - -int omapdss_display_get_modes(struct drm_connector *connector, - const struct videomode *vm) -{ - struct drm_display_mode *mode; - - mode = drm_mode_create(connector->dev); - if (!mode) - return 0; - - drm_display_mode_from_videomode(vm, mode); - - mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; - drm_mode_set_name(mode); - drm_mode_probed_add(connector, mode); - - return 1; -} -EXPORT_SYMBOL_GPL(omapdss_display_get_modes); diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c index 1d2992daef40..030f997eccd0 100644 --- a/drivers/gpu/drm/omapdrm/dss/dpi.c +++ b/drivers/gpu/drm/omapdrm/dss/dpi.c @@ -641,7 +641,6 @@ static int dpi_init_output_port(struct dpi_data *dpi, struct device_node *port) out->type = OMAP_DISPLAY_TYPE_DPI; out->dispc_channel = dpi_get_channel(dpi); out->of_port = port_num; - out->owner = THIS_MODULE; r = omapdss_device_init_output(out, &dpi->bridge); if (r < 0) { diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 735a4e9027d0..8e11612f5fe1 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -14,7 +14,9 @@ #include <linux/device.h> #include <linux/err.h> #include <linux/interrupt.h> +#include <linux/irq.h> #include <linux/delay.h> +#include <linux/gpio/consumer.h> #include <linux/mutex.h> #include <linux/module.h> #include <linux/semaphore.h> @@ -33,6 +35,9 @@ #include <linux/component.h> #include <linux/sys_soc.h> +#include <drm/drm_bridge.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_panel.h> #include <video/mipi_display.h> #include "omapdss.h" @@ -40,73 +45,7 @@ #define DSI_CATCH_MISSING_TE -struct dsi_reg { u16 module; u16 idx; }; - -#define DSI_REG(mod, idx) ((const struct dsi_reg) { mod, idx }) - -/* DSI Protocol Engine */ - -#define DSI_PROTO 0 -#define DSI_PROTO_SZ 0x200 - -#define DSI_REVISION DSI_REG(DSI_PROTO, 0x0000) -#define DSI_SYSCONFIG DSI_REG(DSI_PROTO, 0x0010) -#define DSI_SYSSTATUS DSI_REG(DSI_PROTO, 0x0014) -#define DSI_IRQSTATUS DSI_REG(DSI_PROTO, 0x0018) -#define DSI_IRQENABLE DSI_REG(DSI_PROTO, 0x001C) -#define DSI_CTRL DSI_REG(DSI_PROTO, 0x0040) -#define DSI_GNQ DSI_REG(DSI_PROTO, 0x0044) -#define DSI_COMPLEXIO_CFG1 DSI_REG(DSI_PROTO, 0x0048) -#define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(DSI_PROTO, 0x004C) -#define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(DSI_PROTO, 0x0050) -#define DSI_CLK_CTRL DSI_REG(DSI_PROTO, 0x0054) -#define DSI_TIMING1 DSI_REG(DSI_PROTO, 0x0058) -#define DSI_TIMING2 DSI_REG(DSI_PROTO, 0x005C) -#define DSI_VM_TIMING1 DSI_REG(DSI_PROTO, 0x0060) -#define DSI_VM_TIMING2 DSI_REG(DSI_PROTO, 0x0064) -#define DSI_VM_TIMING3 DSI_REG(DSI_PROTO, 0x0068) -#define DSI_CLK_TIMING DSI_REG(DSI_PROTO, 0x006C) -#define DSI_TX_FIFO_VC_SIZE DSI_REG(DSI_PROTO, 0x0070) -#define DSI_RX_FIFO_VC_SIZE DSI_REG(DSI_PROTO, 0x0074) -#define DSI_COMPLEXIO_CFG2 DSI_REG(DSI_PROTO, 0x0078) -#define DSI_RX_FIFO_VC_FULLNESS DSI_REG(DSI_PROTO, 0x007C) -#define DSI_VM_TIMING4 DSI_REG(DSI_PROTO, 0x0080) -#define DSI_TX_FIFO_VC_EMPTINESS DSI_REG(DSI_PROTO, 0x0084) -#define DSI_VM_TIMING5 DSI_REG(DSI_PROTO, 0x0088) -#define DSI_VM_TIMING6 DSI_REG(DSI_PROTO, 0x008C) -#define DSI_VM_TIMING7 DSI_REG(DSI_PROTO, 0x0090) -#define DSI_STOPCLK_TIMING DSI_REG(DSI_PROTO, 0x0094) -#define DSI_VC_CTRL(n) DSI_REG(DSI_PROTO, 0x0100 + (n * 0x20)) -#define DSI_VC_TE(n) DSI_REG(DSI_PROTO, 0x0104 + (n * 0x20)) -#define DSI_VC_LONG_PACKET_HEADER(n) DSI_REG(DSI_PROTO, 0x0108 + (n * 0x20)) -#define DSI_VC_LONG_PACKET_PAYLOAD(n) DSI_REG(DSI_PROTO, 0x010C + (n * 0x20)) -#define DSI_VC_SHORT_PACKET_HEADER(n) DSI_REG(DSI_PROTO, 0x0110 + (n * 0x20)) -#define DSI_VC_IRQSTATUS(n) DSI_REG(DSI_PROTO, 0x0118 + (n * 0x20)) -#define DSI_VC_IRQENABLE(n) DSI_REG(DSI_PROTO, 0x011C + (n * 0x20)) - -/* DSIPHY_SCP */ - -#define DSI_PHY 1 -#define DSI_PHY_OFFSET 0x200 -#define DSI_PHY_SZ 0x40 - -#define DSI_DSIPHY_CFG0 DSI_REG(DSI_PHY, 0x0000) -#define DSI_DSIPHY_CFG1 DSI_REG(DSI_PHY, 0x0004) -#define DSI_DSIPHY_CFG2 DSI_REG(DSI_PHY, 0x0008) -#define DSI_DSIPHY_CFG5 DSI_REG(DSI_PHY, 0x0014) -#define DSI_DSIPHY_CFG10 DSI_REG(DSI_PHY, 0x0028) - -/* DSI_PLL_CTRL_SCP */ - -#define DSI_PLL 2 -#define DSI_PLL_OFFSET 0x300 -#define DSI_PLL_SZ 0x20 - -#define DSI_PLL_CONTROL DSI_REG(DSI_PLL, 0x0000) -#define DSI_PLL_STATUS DSI_REG(DSI_PLL, 0x0004) -#define DSI_PLL_GO DSI_REG(DSI_PLL, 0x0008) -#define DSI_PLL_CONFIGURATION1 DSI_REG(DSI_PLL, 0x000C) -#define DSI_PLL_CONFIGURATION2 DSI_REG(DSI_PLL, 0x0010) +#include "dsi.h" #define REG_GET(dsi, idx, start, end) \ FLD_GET(dsi_read_reg(dsi, idx), start, end) @@ -114,324 +53,36 @@ struct dsi_reg { u16 module; u16 idx; }; #define REG_FLD_MOD(dsi, idx, val, start, end) \ dsi_write_reg(dsi, idx, FLD_MOD(dsi_read_reg(dsi, idx), val, start, end)) -/* Global interrupts */ -#define DSI_IRQ_VC0 (1 << 0) -#define DSI_IRQ_VC1 (1 << 1) -#define DSI_IRQ_VC2 (1 << 2) -#define DSI_IRQ_VC3 (1 << 3) -#define DSI_IRQ_WAKEUP (1 << 4) -#define DSI_IRQ_RESYNC (1 << 5) -#define DSI_IRQ_PLL_LOCK (1 << 7) -#define DSI_IRQ_PLL_UNLOCK (1 << 8) -#define DSI_IRQ_PLL_RECALL (1 << 9) -#define DSI_IRQ_COMPLEXIO_ERR (1 << 10) -#define DSI_IRQ_HS_TX_TIMEOUT (1 << 14) -#define DSI_IRQ_LP_RX_TIMEOUT (1 << 15) -#define DSI_IRQ_TE_TRIGGER (1 << 16) -#define DSI_IRQ_ACK_TRIGGER (1 << 17) -#define DSI_IRQ_SYNC_LOST (1 << 18) -#define DSI_IRQ_LDO_POWER_GOOD (1 << 19) -#define DSI_IRQ_TA_TIMEOUT (1 << 20) -#define DSI_IRQ_ERROR_MASK \ - (DSI_IRQ_HS_TX_TIMEOUT | DSI_IRQ_LP_RX_TIMEOUT | DSI_IRQ_SYNC_LOST | \ - DSI_IRQ_TA_TIMEOUT) -#define DSI_IRQ_CHANNEL_MASK 0xf - -/* Virtual channel interrupts */ -#define DSI_VC_IRQ_CS (1 << 0) -#define DSI_VC_IRQ_ECC_CORR (1 << 1) -#define DSI_VC_IRQ_PACKET_SENT (1 << 2) -#define DSI_VC_IRQ_FIFO_TX_OVF (1 << 3) -#define DSI_VC_IRQ_FIFO_RX_OVF (1 << 4) -#define DSI_VC_IRQ_BTA (1 << 5) -#define DSI_VC_IRQ_ECC_NO_CORR (1 << 6) -#define DSI_VC_IRQ_FIFO_TX_UDF (1 << 7) -#define DSI_VC_IRQ_PP_BUSY_CHANGE (1 << 8) -#define DSI_VC_IRQ_ERROR_MASK \ - (DSI_VC_IRQ_CS | DSI_VC_IRQ_ECC_CORR | DSI_VC_IRQ_FIFO_TX_OVF | \ - DSI_VC_IRQ_FIFO_RX_OVF | DSI_VC_IRQ_ECC_NO_CORR | \ - DSI_VC_IRQ_FIFO_TX_UDF) - -/* ComplexIO interrupts */ -#define DSI_CIO_IRQ_ERRSYNCESC1 (1 << 0) -#define DSI_CIO_IRQ_ERRSYNCESC2 (1 << 1) -#define DSI_CIO_IRQ_ERRSYNCESC3 (1 << 2) -#define DSI_CIO_IRQ_ERRSYNCESC4 (1 << 3) -#define DSI_CIO_IRQ_ERRSYNCESC5 (1 << 4) -#define DSI_CIO_IRQ_ERRESC1 (1 << 5) -#define DSI_CIO_IRQ_ERRESC2 (1 << 6) -#define DSI_CIO_IRQ_ERRESC3 (1 << 7) -#define DSI_CIO_IRQ_ERRESC4 (1 << 8) -#define DSI_CIO_IRQ_ERRESC5 (1 << 9) -#define DSI_CIO_IRQ_ERRCONTROL1 (1 << 10) -#define DSI_CIO_IRQ_ERRCONTROL2 (1 << 11) -#define DSI_CIO_IRQ_ERRCONTROL3 (1 << 12) -#define DSI_CIO_IRQ_ERRCONTROL4 (1 << 13) -#define DSI_CIO_IRQ_ERRCONTROL5 (1 << 14) -#define DSI_CIO_IRQ_STATEULPS1 (1 << 15) -#define DSI_CIO_IRQ_STATEULPS2 (1 << 16) -#define DSI_CIO_IRQ_STATEULPS3 (1 << 17) -#define DSI_CIO_IRQ_STATEULPS4 (1 << 18) -#define DSI_CIO_IRQ_STATEULPS5 (1 << 19) -#define DSI_CIO_IRQ_ERRCONTENTIONLP0_1 (1 << 20) -#define DSI_CIO_IRQ_ERRCONTENTIONLP1_1 (1 << 21) -#define DSI_CIO_IRQ_ERRCONTENTIONLP0_2 (1 << 22) -#define DSI_CIO_IRQ_ERRCONTENTIONLP1_2 (1 << 23) -#define DSI_CIO_IRQ_ERRCONTENTIONLP0_3 (1 << 24) -#define DSI_CIO_IRQ_ERRCONTENTIONLP1_3 (1 << 25) -#define DSI_CIO_IRQ_ERRCONTENTIONLP0_4 (1 << 26) -#define DSI_CIO_IRQ_ERRCONTENTIONLP1_4 (1 << 27) -#define DSI_CIO_IRQ_ERRCONTENTIONLP0_5 (1 << 28) -#define DSI_CIO_IRQ_ERRCONTENTIONLP1_5 (1 << 29) -#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 (1 << 30) -#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1 (1 << 31) -#define DSI_CIO_IRQ_ERROR_MASK \ - (DSI_CIO_IRQ_ERRSYNCESC1 | DSI_CIO_IRQ_ERRSYNCESC2 | \ - DSI_CIO_IRQ_ERRSYNCESC3 | DSI_CIO_IRQ_ERRSYNCESC4 | \ - DSI_CIO_IRQ_ERRSYNCESC5 | \ - DSI_CIO_IRQ_ERRESC1 | DSI_CIO_IRQ_ERRESC2 | \ - DSI_CIO_IRQ_ERRESC3 | DSI_CIO_IRQ_ERRESC4 | \ - DSI_CIO_IRQ_ERRESC5 | \ - DSI_CIO_IRQ_ERRCONTROL1 | DSI_CIO_IRQ_ERRCONTROL2 | \ - DSI_CIO_IRQ_ERRCONTROL3 | DSI_CIO_IRQ_ERRCONTROL4 | \ - DSI_CIO_IRQ_ERRCONTROL5 | \ - DSI_CIO_IRQ_ERRCONTENTIONLP0_1 | DSI_CIO_IRQ_ERRCONTENTIONLP1_1 | \ - DSI_CIO_IRQ_ERRCONTENTIONLP0_2 | DSI_CIO_IRQ_ERRCONTENTIONLP1_2 | \ - DSI_CIO_IRQ_ERRCONTENTIONLP0_3 | DSI_CIO_IRQ_ERRCONTENTIONLP1_3 | \ - DSI_CIO_IRQ_ERRCONTENTIONLP0_4 | DSI_CIO_IRQ_ERRCONTENTIONLP1_4 | \ - DSI_CIO_IRQ_ERRCONTENTIONLP0_5 | DSI_CIO_IRQ_ERRCONTENTIONLP1_5) - -typedef void (*omap_dsi_isr_t) (void *arg, u32 mask); -struct dsi_data; - -static int dsi_display_init_dispc(struct dsi_data *dsi); -static void dsi_display_uninit_dispc(struct dsi_data *dsi); - -static int dsi_vc_send_null(struct dsi_data *dsi, int channel); - -/* DSI PLL HSDIV indices */ -#define HSDIV_DISPC 0 -#define HSDIV_DSI 1 - -#define DSI_MAX_NR_ISRS 2 -#define DSI_MAX_NR_LANES 5 - -enum dsi_model { - DSI_MODEL_OMAP3, - DSI_MODEL_OMAP4, - DSI_MODEL_OMAP5, -}; - -enum dsi_lane_function { - DSI_LANE_UNUSED = 0, - DSI_LANE_CLK, - DSI_LANE_DATA1, - DSI_LANE_DATA2, - DSI_LANE_DATA3, - DSI_LANE_DATA4, -}; - -struct dsi_lane_config { - enum dsi_lane_function function; - u8 polarity; -}; - -struct dsi_isr_data { - omap_dsi_isr_t isr; - void *arg; - u32 mask; -}; - -enum fifo_size { - DSI_FIFO_SIZE_0 = 0, - DSI_FIFO_SIZE_32 = 1, - DSI_FIFO_SIZE_64 = 2, - DSI_FIFO_SIZE_96 = 3, - DSI_FIFO_SIZE_128 = 4, -}; - -enum dsi_vc_source { - DSI_VC_SOURCE_L4 = 0, - DSI_VC_SOURCE_VP, -}; - -struct dsi_irq_stats { - unsigned long last_reset; - unsigned int irq_count; - unsigned int dsi_irqs[32]; - unsigned int vc_irqs[4][32]; - unsigned int cio_irqs[32]; -}; - -struct dsi_isr_tables { - struct dsi_isr_data isr_table[DSI_MAX_NR_ISRS]; - struct dsi_isr_data isr_table_vc[4][DSI_MAX_NR_ISRS]; - struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS]; -}; - -struct dsi_clk_calc_ctx { - struct dsi_data *dsi; - struct dss_pll *pll; - - /* inputs */ - - const struct omap_dss_dsi_config *config; - - unsigned long req_pck_min, req_pck_nom, req_pck_max; - - /* outputs */ - - struct dss_pll_clock_info dsi_cinfo; - struct dispc_clock_info dispc_cinfo; - - struct videomode vm; - struct omap_dss_dsi_videomode_timings dsi_vm; -}; - -struct dsi_lp_clock_info { - unsigned long lp_clk; - u16 lp_clk_div; -}; - -struct dsi_module_id_data { - u32 address; - int id; -}; - -enum dsi_quirks { - DSI_QUIRK_PLL_PWR_BUG = (1 << 0), /* DSI-PLL power command 0x3 is not working */ - DSI_QUIRK_DCS_CMD_CONFIG_VC = (1 << 1), - DSI_QUIRK_VC_OCP_WIDTH = (1 << 2), - DSI_QUIRK_REVERSE_TXCLKESC = (1 << 3), - DSI_QUIRK_GNQ = (1 << 4), - DSI_QUIRK_PHY_DCC = (1 << 5), -}; - -struct dsi_of_data { - enum dsi_model model; - const struct dss_pll_hw *pll_hw; - const struct dsi_module_id_data *modules; - unsigned int max_fck_freq; - unsigned int max_pll_lpdiv; - enum dsi_quirks quirks; -}; - -struct dsi_data { - struct device *dev; - void __iomem *proto_base; - void __iomem *phy_base; - void __iomem *pll_base; - - const struct dsi_of_data *data; - int module_id; - - int irq; - - bool is_enabled; - - struct clk *dss_clk; - struct regmap *syscon; - struct dss_device *dss; - - struct dispc_clock_info user_dispc_cinfo; - struct dss_pll_clock_info user_dsi_cinfo; - - struct dsi_lp_clock_info user_lp_cinfo; - struct dsi_lp_clock_info current_lp_cinfo; - - struct dss_pll pll; - - bool vdds_dsi_enabled; - struct regulator *vdds_dsi_reg; - - struct { - enum dsi_vc_source source; - struct omap_dss_device *dssdev; - enum fifo_size tx_fifo_size; - enum fifo_size rx_fifo_size; - int vc_id; - } vc[4]; +static int dsi_init_dispc(struct dsi_data *dsi); +static void dsi_uninit_dispc(struct dsi_data *dsi); - struct mutex lock; - struct semaphore bus_lock; +static int dsi_vc_send_null(struct dsi_data *dsi, int vc, int channel); - spinlock_t irq_lock; - struct dsi_isr_tables isr_tables; - /* space for a copy used by the interrupt handler */ - struct dsi_isr_tables isr_tables_copy; - - int update_channel; -#ifdef DSI_PERF_MEASURE - unsigned int update_bytes; -#endif - - bool te_enabled; - bool ulps_enabled; - - void (*framedone_callback)(int, void *); - void *framedone_data; - - struct delayed_work framedone_timeout_work; - -#ifdef DSI_CATCH_MISSING_TE - struct timer_list te_timer; -#endif - - unsigned long cache_req_pck; - unsigned long cache_clk_freq; - struct dss_pll_clock_info cache_cinfo; - - u32 errors; - spinlock_t errors_lock; -#ifdef DSI_PERF_MEASURE - ktime_t perf_setup_time; - ktime_t perf_start_time; -#endif - int debug_read; - int debug_write; - struct { - struct dss_debugfs_entry *irqs; - struct dss_debugfs_entry *regs; - struct dss_debugfs_entry *clks; - } debugfs; - -#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS - spinlock_t irq_stats_lock; - struct dsi_irq_stats irq_stats; -#endif - - unsigned int num_lanes_supported; - unsigned int line_buffer_size; - - struct dsi_lane_config lanes[DSI_MAX_NR_LANES]; - unsigned int num_lanes_used; - - unsigned int scp_clk_refcount; - - struct dss_lcd_mgr_config mgr_config; - struct videomode vm; - enum omap_dss_dsi_pixel_format pix_fmt; - enum omap_dss_dsi_mode mode; - struct omap_dss_dsi_videomode_timings vm_timings; - - struct omap_dss_device output; -}; - -struct dsi_packet_sent_handler_data { - struct dsi_data *dsi; - struct completion *completion; -}; +static ssize_t _omap_dsi_host_transfer(struct dsi_data *dsi, int vc, + const struct mipi_dsi_msg *msg); #ifdef DSI_PERF_MEASURE static bool dsi_perf; module_param(dsi_perf, bool, 0644); #endif +/* Note: for some reason video mode seems to work only if VC_VIDEO is 0 */ +#define VC_VIDEO 0 +#define VC_CMD 1 + +#define drm_bridge_to_dsi(bridge) \ + container_of(bridge, struct dsi_data, bridge) + static inline struct dsi_data *to_dsi_data(struct omap_dss_device *dssdev) { return dev_get_drvdata(dssdev->dev); } +static inline struct dsi_data *host_to_omap(struct mipi_dsi_host *host) +{ + return container_of(host, struct dsi_data, host); +} + static inline void dsi_write_reg(struct dsi_data *dsi, const struct dsi_reg idx, u32 val) { @@ -461,17 +112,13 @@ static inline u32 dsi_read_reg(struct dsi_data *dsi, const struct dsi_reg idx) return __raw_readl(base + idx.idx); } -static void dsi_bus_lock(struct omap_dss_device *dssdev) +static void dsi_bus_lock(struct dsi_data *dsi) { - struct dsi_data *dsi = to_dsi_data(dssdev); - down(&dsi->bus_lock); } -static void dsi_bus_unlock(struct omap_dss_device *dssdev) +static void dsi_bus_unlock(struct dsi_data *dsi) { - struct dsi_data *dsi = to_dsi_data(dssdev); - up(&dsi->bus_lock); } @@ -514,22 +161,6 @@ static inline bool wait_for_bit_change(struct dsi_data *dsi, return false; } -static u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt) -{ - switch (fmt) { - case OMAP_DSS_DSI_FMT_RGB888: - case OMAP_DSS_DSI_FMT_RGB666: - return 24; - case OMAP_DSS_DSI_FMT_RGB666_PACKED: - return 18; - case OMAP_DSS_DSI_FMT_RGB565: - return 16; - default: - BUG(); - return 0; - } -} - #ifdef DSI_PERF_MEASURE static void dsi_perf_mark_setup(struct dsi_data *dsi) { @@ -623,7 +254,7 @@ static void print_irq_status(u32 status) #undef PIS } -static void print_irq_status_vc(int channel, u32 status) +static void print_irq_status_vc(int vc, u32 status) { if (status == 0) return; @@ -634,7 +265,7 @@ static void print_irq_status_vc(int channel, u32 status) #define PIS(x) (status & DSI_VC_IRQ_##x) ? (#x " ") : "" pr_debug("DSI VC(%d) IRQ 0x%x: %s%s%s%s%s%s%s%s%s\n", - channel, + vc, status, PIS(CS), PIS(ECC_CORR), @@ -1015,7 +646,7 @@ static int dsi_unregister_isr(struct dsi_data *dsi, omap_dsi_isr_t isr, return r; } -static int dsi_register_isr_vc(struct dsi_data *dsi, int channel, +static int dsi_register_isr_vc(struct dsi_data *dsi, int vc, omap_dsi_isr_t isr, void *arg, u32 mask) { unsigned long flags; @@ -1024,18 +655,18 @@ static int dsi_register_isr_vc(struct dsi_data *dsi, int channel, spin_lock_irqsave(&dsi->irq_lock, flags); r = _dsi_register_isr(isr, arg, mask, - dsi->isr_tables.isr_table_vc[channel], - ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel])); + dsi->isr_tables.isr_table_vc[vc], + ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc])); if (r == 0) - _omap_dsi_set_irqs_vc(dsi, channel); + _omap_dsi_set_irqs_vc(dsi, vc); spin_unlock_irqrestore(&dsi->irq_lock, flags); return r; } -static int dsi_unregister_isr_vc(struct dsi_data *dsi, int channel, +static int dsi_unregister_isr_vc(struct dsi_data *dsi, int vc, omap_dsi_isr_t isr, void *arg, u32 mask) { unsigned long flags; @@ -1044,49 +675,11 @@ static int dsi_unregister_isr_vc(struct dsi_data *dsi, int channel, spin_lock_irqsave(&dsi->irq_lock, flags); r = _dsi_unregister_isr(isr, arg, mask, - dsi->isr_tables.isr_table_vc[channel], - ARRAY_SIZE(dsi->isr_tables.isr_table_vc[channel])); + dsi->isr_tables.isr_table_vc[vc], + ARRAY_SIZE(dsi->isr_tables.isr_table_vc[vc])); if (r == 0) - _omap_dsi_set_irqs_vc(dsi, channel); - - spin_unlock_irqrestore(&dsi->irq_lock, flags); - - return r; -} - -static int dsi_register_isr_cio(struct dsi_data *dsi, omap_dsi_isr_t isr, - void *arg, u32 mask) -{ - unsigned long flags; - int r; - - spin_lock_irqsave(&dsi->irq_lock, flags); - - r = _dsi_register_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio, - ARRAY_SIZE(dsi->isr_tables.isr_table_cio)); - - if (r == 0) - _omap_dsi_set_irqs_cio(dsi); - - spin_unlock_irqrestore(&dsi->irq_lock, flags); - - return r; -} - -static int dsi_unregister_isr_cio(struct dsi_data *dsi, omap_dsi_isr_t isr, - void *arg, u32 mask) -{ - unsigned long flags; - int r; - - spin_lock_irqsave(&dsi->irq_lock, flags); - - r = _dsi_unregister_isr(isr, arg, mask, dsi->isr_tables.isr_table_cio, - ARRAY_SIZE(dsi->isr_tables.isr_table_cio)); - - if (r == 0) - _omap_dsi_set_irqs_cio(dsi); + _omap_dsi_set_irqs_vc(dsi, vc); spin_unlock_irqrestore(&dsi->irq_lock, flags); @@ -1819,56 +1412,6 @@ static void dsi_cio_timings(struct dsi_data *dsi) dsi_write_reg(dsi, DSI_DSIPHY_CFG2, r); } -/* lane masks have lane 0 at lsb. mask_p for positive lines, n for negative */ -static void dsi_cio_enable_lane_override(struct dsi_data *dsi, - unsigned int mask_p, - unsigned int mask_n) -{ - int i; - u32 l; - u8 lptxscp_start = dsi->num_lanes_supported == 3 ? 22 : 26; - - l = 0; - - for (i = 0; i < dsi->num_lanes_supported; ++i) { - unsigned int p = dsi->lanes[i].polarity; - - if (mask_p & (1 << i)) - l |= 1 << (i * 2 + (p ? 0 : 1)); - - if (mask_n & (1 << i)) - l |= 1 << (i * 2 + (p ? 1 : 0)); - } - - /* - * Bits in REGLPTXSCPDAT4TO0DXDY: - * 17: DY0 18: DX0 - * 19: DY1 20: DX1 - * 21: DY2 22: DX2 - * 23: DY3 24: DX3 - * 25: DY4 26: DX4 - */ - - /* Set the lane override configuration */ - - /* REGLPTXSCPDAT4TO0DXDY */ - REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, l, lptxscp_start, 17); - - /* Enable lane override */ - - /* ENLPTXSCPDAT */ - REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, 1, 27, 27); -} - -static void dsi_cio_disable_lane_override(struct dsi_data *dsi) -{ - /* Disable lane override */ - REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, 0, 27, 27); /* ENLPTXSCPDAT */ - /* Reset the lane override configuration */ - /* REGLPTXSCPDAT4TO0DXDY */ - REG_FLD_MOD(dsi, DSI_DSIPHY_CFG10, 0, 22, 17); -} - static int dsi_cio_wait_tx_clk_esc_reset(struct dsi_data *dsi) { int t, i; @@ -2043,32 +1586,6 @@ static int dsi_cio_init(struct dsi_data *dsi) l = FLD_MOD(l, 0x1fff, 12, 0); /* STOP_STATE_COUNTER_IO */ dsi_write_reg(dsi, DSI_TIMING1, l); - if (dsi->ulps_enabled) { - unsigned int mask_p; - int i; - - DSSDBG("manual ulps exit\n"); - - /* ULPS is exited by Mark-1 state for 1ms, followed by - * stop state. DSS HW cannot do this via the normal - * ULPS exit sequence, as after reset the DSS HW thinks - * that we are not in ULPS mode, and refuses to send the - * sequence. So we need to send the ULPS exit sequence - * manually by setting positive lines high and negative lines - * low for 1ms. - */ - - mask_p = 0; - - for (i = 0; i < dsi->num_lanes_supported; ++i) { - if (dsi->lanes[i].function == DSI_LANE_UNUSED) - continue; - mask_p |= 1 << i; - } - - dsi_cio_enable_lane_override(dsi, mask_p, 0); - } - r = dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_ON); if (r) goto err_cio_pwr; @@ -2087,29 +1604,15 @@ static int dsi_cio_init(struct dsi_data *dsi) if (r) goto err_tx_clk_esc_rst; - if (dsi->ulps_enabled) { - /* Keep Mark-1 state for 1ms (as per DSI spec) */ - ktime_t wait = ns_to_ktime(1000 * 1000); - set_current_state(TASK_UNINTERRUPTIBLE); - schedule_hrtimeout(&wait, HRTIMER_MODE_REL); - - /* Disable the override. The lanes should be set to Mark-11 - * state by the HW */ - dsi_cio_disable_lane_override(dsi); - } - /* FORCE_TX_STOP_MODE_IO */ REG_FLD_MOD(dsi, DSI_TIMING1, 0, 15, 15); dsi_cio_timings(dsi); - if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) { - /* DDR_CLK_ALWAYS_ON */ - REG_FLD_MOD(dsi, DSI_CLK_CTRL, - dsi->vm_timings.ddr_clk_always_on, 13, 13); - } - - dsi->ulps_enabled = false; + /* DDR_CLK_ALWAYS_ON */ + REG_FLD_MOD(dsi, DSI_CLK_CTRL, + !(dsi->dsidev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS), + 13, 13); DSSDBG("CIO init done\n"); @@ -2120,8 +1623,6 @@ err_tx_clk_esc_rst: err_cio_pwr_dom: dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_OFF); err_cio_pwr: - if (dsi->ulps_enabled) - dsi_cio_disable_lane_override(dsi); err_scp_clk_dom: dsi_disable_scp_clk(dsi); dsi_disable_pads(dsi); @@ -2218,9 +1719,9 @@ static int dsi_force_tx_stop_mode_io(struct dsi_data *dsi) return 0; } -static bool dsi_vc_is_enabled(struct dsi_data *dsi, int channel) +static bool dsi_vc_is_enabled(struct dsi_data *dsi, int vc) { - return REG_GET(dsi, DSI_VC_CTRL(channel), 0, 0); + return REG_GET(dsi, DSI_VC_CTRL(vc), 0, 0); } static void dsi_packet_sent_handler_vp(void *data, u32 mask) @@ -2228,14 +1729,14 @@ static void dsi_packet_sent_handler_vp(void *data, u32 mask) struct dsi_packet_sent_handler_data *vp_data = (struct dsi_packet_sent_handler_data *) data; struct dsi_data *dsi = vp_data->dsi; - const int channel = dsi->update_channel; + const int vc = dsi->update_vc; u8 bit = dsi->te_enabled ? 30 : 31; - if (REG_GET(dsi, DSI_VC_TE(channel), bit, bit) == 0) + if (REG_GET(dsi, DSI_VC_TE(vc), bit, bit) == 0) complete(vp_data->completion); } -static int dsi_sync_vc_vp(struct dsi_data *dsi, int channel) +static int dsi_sync_vc_vp(struct dsi_data *dsi, int vc) { DECLARE_COMPLETION_ONSTACK(completion); struct dsi_packet_sent_handler_data vp_data = { @@ -2247,13 +1748,13 @@ static int dsi_sync_vc_vp(struct dsi_data *dsi, int channel) bit = dsi->te_enabled ? 30 : 31; - r = dsi_register_isr_vc(dsi, channel, dsi_packet_sent_handler_vp, + r = dsi_register_isr_vc(dsi, vc, dsi_packet_sent_handler_vp, &vp_data, DSI_VC_IRQ_PACKET_SENT); if (r) goto err0; /* Wait for completion only if TE_EN/TE_START is still set */ - if (REG_GET(dsi, DSI_VC_TE(channel), bit, bit)) { + if (REG_GET(dsi, DSI_VC_TE(vc), bit, bit)) { if (wait_for_completion_timeout(&completion, msecs_to_jiffies(10)) == 0) { DSSERR("Failed to complete previous frame transfer\n"); @@ -2262,12 +1763,12 @@ static int dsi_sync_vc_vp(struct dsi_data *dsi, int channel) } } - dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_vp, + dsi_unregister_isr_vc(dsi, vc, dsi_packet_sent_handler_vp, &vp_data, DSI_VC_IRQ_PACKET_SENT); return 0; err1: - dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_vp, + dsi_unregister_isr_vc(dsi, vc, dsi_packet_sent_handler_vp, &vp_data, DSI_VC_IRQ_PACKET_SENT); err0: return r; @@ -2278,13 +1779,13 @@ static void dsi_packet_sent_handler_l4(void *data, u32 mask) struct dsi_packet_sent_handler_data *l4_data = (struct dsi_packet_sent_handler_data *) data; struct dsi_data *dsi = l4_data->dsi; - const int channel = dsi->update_channel; + const int vc = dsi->update_vc; - if (REG_GET(dsi, DSI_VC_CTRL(channel), 5, 5) == 0) + if (REG_GET(dsi, DSI_VC_CTRL(vc), 5, 5) == 0) complete(l4_data->completion); } -static int dsi_sync_vc_l4(struct dsi_data *dsi, int channel) +static int dsi_sync_vc_l4(struct dsi_data *dsi, int vc) { DECLARE_COMPLETION_ONSTACK(completion); struct dsi_packet_sent_handler_data l4_data = { @@ -2293,13 +1794,13 @@ static int dsi_sync_vc_l4(struct dsi_data *dsi, int channel) }; int r = 0; - r = dsi_register_isr_vc(dsi, channel, dsi_packet_sent_handler_l4, + r = dsi_register_isr_vc(dsi, vc, dsi_packet_sent_handler_l4, &l4_data, DSI_VC_IRQ_PACKET_SENT); if (r) goto err0; /* Wait for completion only if TX_FIFO_NOT_EMPTY is still set */ - if (REG_GET(dsi, DSI_VC_CTRL(channel), 5, 5)) { + if (REG_GET(dsi, DSI_VC_CTRL(vc), 5, 5)) { if (wait_for_completion_timeout(&completion, msecs_to_jiffies(10)) == 0) { DSSERR("Failed to complete previous l4 transfer\n"); @@ -2308,47 +1809,47 @@ static int dsi_sync_vc_l4(struct dsi_data *dsi, int channel) } } - dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_l4, + dsi_unregister_isr_vc(dsi, vc, dsi_packet_sent_handler_l4, &l4_data, DSI_VC_IRQ_PACKET_SENT); return 0; err1: - dsi_unregister_isr_vc(dsi, channel, dsi_packet_sent_handler_l4, + dsi_unregister_isr_vc(dsi, vc, dsi_packet_sent_handler_l4, &l4_data, DSI_VC_IRQ_PACKET_SENT); err0: return r; } -static int dsi_sync_vc(struct dsi_data *dsi, int channel) +static int dsi_sync_vc(struct dsi_data *dsi, int vc) { WARN_ON(!dsi_bus_is_locked(dsi)); WARN_ON(in_interrupt()); - if (!dsi_vc_is_enabled(dsi, channel)) + if (!dsi_vc_is_enabled(dsi, vc)) return 0; - switch (dsi->vc[channel].source) { + switch (dsi->vc[vc].source) { case DSI_VC_SOURCE_VP: - return dsi_sync_vc_vp(dsi, channel); + return dsi_sync_vc_vp(dsi, vc); case DSI_VC_SOURCE_L4: - return dsi_sync_vc_l4(dsi, channel); + return dsi_sync_vc_l4(dsi, vc); default: BUG(); return -EINVAL; } } -static int dsi_vc_enable(struct dsi_data *dsi, int channel, bool enable) +static int dsi_vc_enable(struct dsi_data *dsi, int vc, bool enable) { - DSSDBG("dsi_vc_enable channel %d, enable %d\n", - channel, enable); + DSSDBG("dsi_vc_enable vc %d, enable %d\n", + vc, enable); enable = enable ? 1 : 0; - REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), enable, 0, 0); + REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), enable, 0, 0); - if (!wait_for_bit_change(dsi, DSI_VC_CTRL(channel), 0, enable)) { + if (!wait_for_bit_change(dsi, DSI_VC_CTRL(vc), 0, enable)) { DSSERR("Failed to set dsi_vc_enable to %d\n", enable); return -EIO; } @@ -2356,17 +1857,17 @@ static int dsi_vc_enable(struct dsi_data *dsi, int channel, bool enable) return 0; } -static void dsi_vc_initial_config(struct dsi_data *dsi, int channel) +static void dsi_vc_initial_config(struct dsi_data *dsi, int vc) { u32 r; - DSSDBG("Initial config of virtual channel %d", channel); + DSSDBG("Initial config of VC %d", vc); - r = dsi_read_reg(dsi, DSI_VC_CTRL(channel)); + r = dsi_read_reg(dsi, DSI_VC_CTRL(vc)); if (FLD_GET(r, 15, 15)) /* VC_BUSY */ DSSERR("VC(%d) busy when trying to configure it!\n", - channel); + vc); r = FLD_MOD(r, 0, 1, 1); /* SOURCE, 0 = L4 */ r = FLD_MOD(r, 0, 2, 2); /* BTA_SHORT_EN */ @@ -2381,74 +1882,39 @@ static void dsi_vc_initial_config(struct dsi_data *dsi, int channel) r = FLD_MOD(r, 4, 29, 27); /* DMA_RX_REQ_NB = no dma */ r = FLD_MOD(r, 4, 23, 21); /* DMA_TX_REQ_NB = no dma */ - dsi_write_reg(dsi, DSI_VC_CTRL(channel), r); - - dsi->vc[channel].source = DSI_VC_SOURCE_L4; -} - -static int dsi_vc_config_source(struct dsi_data *dsi, int channel, - enum dsi_vc_source source) -{ - if (dsi->vc[channel].source == source) - return 0; - - DSSDBG("Source config of virtual channel %d", channel); - - dsi_sync_vc(dsi, channel); - - dsi_vc_enable(dsi, channel, 0); - - /* VC_BUSY */ - if (!wait_for_bit_change(dsi, DSI_VC_CTRL(channel), 15, 0)) { - DSSERR("vc(%d) busy when trying to config for VP\n", channel); - return -EIO; - } - - /* SOURCE, 0 = L4, 1 = video port */ - REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), source, 1, 1); + dsi_write_reg(dsi, DSI_VC_CTRL(vc), r); - /* DCS_CMD_ENABLE */ - if (dsi->data->quirks & DSI_QUIRK_DCS_CMD_CONFIG_VC) { - bool enable = source == DSI_VC_SOURCE_VP; - REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), enable, 30, 30); - } - - dsi_vc_enable(dsi, channel, 1); - - dsi->vc[channel].source = source; - - return 0; + dsi->vc[vc].source = DSI_VC_SOURCE_L4; } -static void dsi_vc_enable_hs(struct omap_dss_device *dssdev, int channel, +static void dsi_vc_enable_hs(struct omap_dss_device *dssdev, int vc, bool enable) { struct dsi_data *dsi = to_dsi_data(dssdev); - DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable); + DSSDBG("dsi_vc_enable_hs(%d, %d)\n", vc, enable); + + if (REG_GET(dsi, DSI_VC_CTRL(vc), 9, 9) == enable) + return; WARN_ON(!dsi_bus_is_locked(dsi)); - dsi_vc_enable(dsi, channel, 0); + dsi_vc_enable(dsi, vc, 0); dsi_if_enable(dsi, 0); - REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), enable, 9, 9); + REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), enable, 9, 9); - dsi_vc_enable(dsi, channel, 1); + dsi_vc_enable(dsi, vc, 1); dsi_if_enable(dsi, 1); dsi_force_tx_stop_mode_io(dsi); - - /* start the DDR clock by sending a NULL packet */ - if (dsi->vm_timings.ddr_clk_always_on && enable) - dsi_vc_send_null(dsi, channel); } -static void dsi_vc_flush_long_data(struct dsi_data *dsi, int channel) +static void dsi_vc_flush_long_data(struct dsi_data *dsi, int vc) { - while (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) { + while (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20)) { u32 val; - val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel)); + val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(vc)); DSSDBG("\t\tb1 %#02x b2 %#02x b3 %#02x b4 %#02x\n", (val >> 0) & 0xff, (val >> 8) & 0xff, @@ -2494,13 +1960,13 @@ static void dsi_show_rx_ack_with_err(u16 err) DSSERR("\t\tDSI Protocol Violation\n"); } -static u16 dsi_vc_flush_receive_data(struct dsi_data *dsi, int channel) +static u16 dsi_vc_flush_receive_data(struct dsi_data *dsi, int vc) { /* RX_FIFO_NOT_EMPTY */ - while (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) { + while (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20)) { u32 val; u8 dt; - val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel)); + val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(vc)); DSSERR("\trawval %#08x\n", val); dt = FLD_GET(val, 5, 0); if (dt == MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT) { @@ -2515,7 +1981,7 @@ static u16 dsi_vc_flush_receive_data(struct dsi_data *dsi, int channel) } else if (dt == MIPI_DSI_RX_DCS_LONG_READ_RESPONSE) { DSSERR("\tDCS long response, len %d\n", FLD_GET(val, 23, 8)); - dsi_vc_flush_long_data(dsi, channel); + dsi_vc_flush_long_data(dsi, vc); } else { DSSERR("\tunknown datatype 0x%02x\n", dt); } @@ -2523,35 +1989,35 @@ static u16 dsi_vc_flush_receive_data(struct dsi_data *dsi, int channel) return 0; } -static int dsi_vc_send_bta(struct dsi_data *dsi, int channel) +static int dsi_vc_send_bta(struct dsi_data *dsi, int vc) { if (dsi->debug_write || dsi->debug_read) - DSSDBG("dsi_vc_send_bta %d\n", channel); + DSSDBG("dsi_vc_send_bta %d\n", vc); WARN_ON(!dsi_bus_is_locked(dsi)); /* RX_FIFO_NOT_EMPTY */ - if (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) { + if (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20)) { DSSERR("rx fifo not empty when sending BTA, dumping data:\n"); - dsi_vc_flush_receive_data(dsi, channel); + dsi_vc_flush_receive_data(dsi, vc); } - REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */ + REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), 1, 6, 6); /* BTA_EN */ /* flush posted write */ - dsi_read_reg(dsi, DSI_VC_CTRL(channel)); + dsi_read_reg(dsi, DSI_VC_CTRL(vc)); return 0; } -static int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel) +static int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int vc) { struct dsi_data *dsi = to_dsi_data(dssdev); DECLARE_COMPLETION_ONSTACK(completion); int r = 0; u32 err; - r = dsi_register_isr_vc(dsi, channel, dsi_completion_handler, + r = dsi_register_isr_vc(dsi, vc, dsi_completion_handler, &completion, DSI_VC_IRQ_BTA); if (r) goto err0; @@ -2561,7 +2027,7 @@ static int dsi_vc_send_bta_sync(struct omap_dss_device *dssdev, int channel) if (r) goto err1; - r = dsi_vc_send_bta(dsi, channel); + r = dsi_vc_send_bta(dsi, vc); if (r) goto err2; @@ -2582,29 +2048,30 @@ err2: dsi_unregister_isr(dsi, dsi_completion_handler, &completion, DSI_IRQ_ERROR_MASK); err1: - dsi_unregister_isr_vc(dsi, channel, dsi_completion_handler, + dsi_unregister_isr_vc(dsi, vc, dsi_completion_handler, &completion, DSI_VC_IRQ_BTA); err0: return r; } -static inline void dsi_vc_write_long_header(struct dsi_data *dsi, int channel, - u8 data_type, u16 len, u8 ecc) +static inline void dsi_vc_write_long_header(struct dsi_data *dsi, int vc, + int channel, u8 data_type, u16 len, + u8 ecc) { u32 val; u8 data_id; WARN_ON(!dsi_bus_is_locked(dsi)); - data_id = data_type | dsi->vc[channel].vc_id << 6; + data_id = data_type | channel << 6; val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) | FLD_VAL(ecc, 31, 24); - dsi_write_reg(dsi, DSI_VC_LONG_PACKET_HEADER(channel), val); + dsi_write_reg(dsi, DSI_VC_LONG_PACKET_HEADER(vc), val); } -static inline void dsi_vc_write_long_payload(struct dsi_data *dsi, int channel, +static inline void dsi_vc_write_long_payload(struct dsi_data *dsi, int vc, u8 b1, u8 b2, u8 b3, u8 b4) { u32 val; @@ -2614,33 +2081,31 @@ static inline void dsi_vc_write_long_payload(struct dsi_data *dsi, int channel, /* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n", b1, b2, b3, b4, val); */ - dsi_write_reg(dsi, DSI_VC_LONG_PACKET_PAYLOAD(channel), val); + dsi_write_reg(dsi, DSI_VC_LONG_PACKET_PAYLOAD(vc), val); } -static int dsi_vc_send_long(struct dsi_data *dsi, int channel, u8 data_type, - u8 *data, u16 len, u8 ecc) +static int dsi_vc_send_long(struct dsi_data *dsi, int vc, + const struct mipi_dsi_msg *msg) { /*u32 val; */ int i; - u8 *p; + const u8 *p; int r = 0; u8 b1, b2, b3, b4; if (dsi->debug_write) - DSSDBG("dsi_vc_send_long, %d bytes\n", len); + DSSDBG("dsi_vc_send_long, %d bytes\n", msg->tx_len); /* len + header */ - if (dsi->vc[channel].tx_fifo_size * 32 * 4 < len + 4) { + if (dsi->vc[vc].tx_fifo_size * 32 * 4 < msg->tx_len + 4) { DSSERR("unable to send long packet: packet too long.\n"); return -EINVAL; } - dsi_vc_config_source(dsi, channel, DSI_VC_SOURCE_L4); + dsi_vc_write_long_header(dsi, vc, msg->channel, msg->type, msg->tx_len, 0); - dsi_vc_write_long_header(dsi, channel, data_type, len, ecc); - - p = data; - for (i = 0; i < len >> 2; i++) { + p = msg->tx_buf; + for (i = 0; i < msg->tx_len >> 2; i++) { if (dsi->debug_write) DSSDBG("\tsending full packet %d\n", i); @@ -2649,10 +2114,10 @@ static int dsi_vc_send_long(struct dsi_data *dsi, int channel, u8 data_type, b3 = *p++; b4 = *p++; - dsi_vc_write_long_payload(dsi, channel, b1, b2, b3, b4); + dsi_vc_write_long_payload(dsi, vc, b1, b2, b3, b4); } - i = len % 4; + i = msg->tx_len % 4; if (i) { b1 = 0; b2 = 0; b3 = 0; @@ -2674,194 +2139,89 @@ static int dsi_vc_send_long(struct dsi_data *dsi, int channel, u8 data_type, break; } - dsi_vc_write_long_payload(dsi, channel, b1, b2, b3, 0); + dsi_vc_write_long_payload(dsi, vc, b1, b2, b3, 0); } return r; } -static int dsi_vc_send_short(struct dsi_data *dsi, int channel, u8 data_type, - u16 data, u8 ecc) +static int dsi_vc_send_short(struct dsi_data *dsi, int vc, + const struct mipi_dsi_msg *msg) { + struct mipi_dsi_packet pkt; u32 r; - u8 data_id; + + r = mipi_dsi_create_packet(&pkt, msg); + if (r < 0) + return r; WARN_ON(!dsi_bus_is_locked(dsi)); if (dsi->debug_write) - DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n", - channel, - data_type, data & 0xff, (data >> 8) & 0xff); + DSSDBG("dsi_vc_send_short(vc%d, dt %#x, b1 %#x, b2 %#x)\n", + vc, msg->type, pkt.header[1], pkt.header[2]); - dsi_vc_config_source(dsi, channel, DSI_VC_SOURCE_L4); - - if (FLD_GET(dsi_read_reg(dsi, DSI_VC_CTRL(channel)), 16, 16)) { + if (FLD_GET(dsi_read_reg(dsi, DSI_VC_CTRL(vc)), 16, 16)) { DSSERR("ERROR FIFO FULL, aborting transfer\n"); return -EINVAL; } - data_id = data_type | dsi->vc[channel].vc_id << 6; - - r = (data_id << 0) | (data << 8) | (ecc << 24); + r = pkt.header[3] << 24 | pkt.header[2] << 16 | pkt.header[1] << 8 | + pkt.header[0]; - dsi_write_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel), r); + dsi_write_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(vc), r); return 0; } -static int dsi_vc_send_null(struct dsi_data *dsi, int channel) +static int dsi_vc_send_null(struct dsi_data *dsi, int vc, int channel) { - return dsi_vc_send_long(dsi, channel, MIPI_DSI_NULL_PACKET, NULL, 0, 0); -} - -static int dsi_vc_write_nosync_common(struct dsi_data *dsi, int channel, - u8 *data, int len, - enum dss_dsi_content_type type) -{ - int r; - - if (len == 0) { - BUG_ON(type == DSS_DSI_CONTENT_DCS); - r = dsi_vc_send_short(dsi, channel, - MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM, 0, 0); - } else if (len == 1) { - r = dsi_vc_send_short(dsi, channel, - type == DSS_DSI_CONTENT_GENERIC ? - MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM : - MIPI_DSI_DCS_SHORT_WRITE, data[0], 0); - } else if (len == 2) { - r = dsi_vc_send_short(dsi, channel, - type == DSS_DSI_CONTENT_GENERIC ? - MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM : - MIPI_DSI_DCS_SHORT_WRITE_PARAM, - data[0] | (data[1] << 8), 0); - } else { - r = dsi_vc_send_long(dsi, channel, - type == DSS_DSI_CONTENT_GENERIC ? - MIPI_DSI_GENERIC_LONG_WRITE : - MIPI_DSI_DCS_LONG_WRITE, data, len, 0); - } - - return r; -} - -static int dsi_vc_dcs_write_nosync(struct omap_dss_device *dssdev, int channel, - u8 *data, int len) -{ - struct dsi_data *dsi = to_dsi_data(dssdev); - - return dsi_vc_write_nosync_common(dsi, channel, data, len, - DSS_DSI_CONTENT_DCS); -} - -static int dsi_vc_generic_write_nosync(struct omap_dss_device *dssdev, int channel, - u8 *data, int len) -{ - struct dsi_data *dsi = to_dsi_data(dssdev); + const struct mipi_dsi_msg msg = { + .channel = channel, + .type = MIPI_DSI_NULL_PACKET, + }; - return dsi_vc_write_nosync_common(dsi, channel, data, len, - DSS_DSI_CONTENT_GENERIC); + return dsi_vc_send_long(dsi, vc, &msg); } -static int dsi_vc_write_common(struct omap_dss_device *dssdev, - int channel, u8 *data, int len, - enum dss_dsi_content_type type) +static int dsi_vc_write_common(struct omap_dss_device *dssdev, int vc, + const struct mipi_dsi_msg *msg) { struct dsi_data *dsi = to_dsi_data(dssdev); int r; - r = dsi_vc_write_nosync_common(dsi, channel, data, len, type); - if (r) - goto err; - - r = dsi_vc_send_bta_sync(dssdev, channel); - if (r) - goto err; - - /* RX_FIFO_NOT_EMPTY */ - if (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20)) { - DSSERR("rx fifo not empty after write, dumping data:\n"); - dsi_vc_flush_receive_data(dsi, channel); - r = -EIO; - goto err; - } - - return 0; -err: - DSSERR("dsi_vc_write_common(ch %d, cmd 0x%02x, len %d) failed\n", - channel, data[0], len); - return r; -} - -static int dsi_vc_dcs_write(struct omap_dss_device *dssdev, int channel, u8 *data, - int len) -{ - return dsi_vc_write_common(dssdev, channel, data, len, - DSS_DSI_CONTENT_DCS); -} - -static int dsi_vc_generic_write(struct omap_dss_device *dssdev, int channel, u8 *data, - int len) -{ - return dsi_vc_write_common(dssdev, channel, data, len, - DSS_DSI_CONTENT_GENERIC); -} + if (mipi_dsi_packet_format_is_short(msg->type)) + r = dsi_vc_send_short(dsi, vc, msg); + else + r = dsi_vc_send_long(dsi, vc, msg); -static int dsi_vc_dcs_send_read_request(struct dsi_data *dsi, int channel, - u8 dcs_cmd) -{ - int r; + if (r < 0) + return r; - if (dsi->debug_read) - DSSDBG("dsi_vc_dcs_send_read_request(ch%d, dcs_cmd %x)\n", - channel, dcs_cmd); + /* + * TODO: we do not always have to do the BTA sync, for example + * we can improve performance by setting the update window + * information without sending BTA sync between the commands. + * In that case we can return early. + */ - r = dsi_vc_send_short(dsi, channel, MIPI_DSI_DCS_READ, dcs_cmd, 0); + r = dsi_vc_send_bta_sync(dssdev, vc); if (r) { - DSSERR("dsi_vc_dcs_send_read_request(ch %d, cmd 0x%02x)" - " failed\n", channel, dcs_cmd); + DSSERR("bta sync failed\n"); return r; } - return 0; -} - -static int dsi_vc_generic_send_read_request(struct dsi_data *dsi, int channel, - u8 *reqdata, int reqlen) -{ - u16 data; - u8 data_type; - int r; - - if (dsi->debug_read) - DSSDBG("dsi_vc_generic_send_read_request(ch %d, reqlen %d)\n", - channel, reqlen); - - if (reqlen == 0) { - data_type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM; - data = 0; - } else if (reqlen == 1) { - data_type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM; - data = reqdata[0]; - } else if (reqlen == 2) { - data_type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM; - data = reqdata[0] | (reqdata[1] << 8); - } else { - BUG(); - return -EINVAL; - } - - r = dsi_vc_send_short(dsi, channel, data_type, data, 0); - if (r) { - DSSERR("dsi_vc_generic_send_read_request(ch %d, reqlen %d)" - " failed\n", channel, reqlen); - return r; + /* RX_FIFO_NOT_EMPTY */ + if (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20)) { + DSSERR("rx fifo not empty after write, dumping data:\n"); + dsi_vc_flush_receive_data(dsi, vc); + return -EIO; } return 0; } -static int dsi_vc_read_rx_fifo(struct dsi_data *dsi, int channel, u8 *buf, +static int dsi_vc_read_rx_fifo(struct dsi_data *dsi, int vc, u8 *buf, int buflen, enum dss_dsi_content_type type) { u32 val; @@ -2869,13 +2229,13 @@ static int dsi_vc_read_rx_fifo(struct dsi_data *dsi, int channel, u8 *buf, int r; /* RX_FIFO_NOT_EMPTY */ - if (REG_GET(dsi, DSI_VC_CTRL(channel), 20, 20) == 0) { + if (REG_GET(dsi, DSI_VC_CTRL(vc), 20, 20) == 0) { DSSERR("RX fifo empty when trying to read.\n"); r = -EIO; goto err; } - val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(channel)); + val = dsi_read_reg(dsi, DSI_VC_SHORT_PACKET_HEADER(vc)); if (dsi->debug_read) DSSDBG("\theader: %08x\n", val); dt = FLD_GET(val, 5, 0); @@ -2939,7 +2299,7 @@ static int dsi_vc_read_rx_fifo(struct dsi_data *dsi, int channel, u8 *buf, for (w = 0; w < len + 2;) { int b; val = dsi_read_reg(dsi, - DSI_VC_SHORT_PACKET_HEADER(channel)); + DSI_VC_SHORT_PACKET_HEADER(vc)); if (dsi->debug_read) DSSDBG("\t\t%02x %02x %02x %02x\n", (val >> 0) & 0xff, @@ -2963,168 +2323,73 @@ static int dsi_vc_read_rx_fifo(struct dsi_data *dsi, int channel, u8 *buf, } err: - DSSERR("dsi_vc_read_rx_fifo(ch %d type %s) failed\n", channel, + DSSERR("dsi_vc_read_rx_fifo(vc %d type %s) failed\n", vc, type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : "DCS"); return r; } -static int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd, - u8 *buf, int buflen) +static int dsi_vc_dcs_read(struct omap_dss_device *dssdev, int vc, + const struct mipi_dsi_msg *msg) { struct dsi_data *dsi = to_dsi_data(dssdev); + u8 cmd = ((u8 *)msg->tx_buf)[0]; int r; - r = dsi_vc_dcs_send_read_request(dsi, channel, dcs_cmd); + if (dsi->debug_read) + DSSDBG("%s(vc %d, cmd %x)\n", __func__, vc, cmd); + + r = dsi_vc_send_short(dsi, vc, msg); if (r) goto err; - r = dsi_vc_send_bta_sync(dssdev, channel); + r = dsi_vc_send_bta_sync(dssdev, vc); if (r) goto err; - r = dsi_vc_read_rx_fifo(dsi, channel, buf, buflen, + r = dsi_vc_read_rx_fifo(dsi, vc, msg->rx_buf, msg->rx_len, DSS_DSI_CONTENT_DCS); if (r < 0) goto err; - if (r != buflen) { + if (r != msg->rx_len) { r = -EIO; goto err; } return 0; err: - DSSERR("dsi_vc_dcs_read(ch %d, cmd 0x%02x) failed\n", channel, dcs_cmd); + DSSERR("%s(vc %d, cmd 0x%02x) failed\n", __func__, vc, cmd); return r; } -static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int channel, - u8 *reqdata, int reqlen, u8 *buf, int buflen) +static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int vc, + const struct mipi_dsi_msg *msg) { struct dsi_data *dsi = to_dsi_data(dssdev); int r; - r = dsi_vc_generic_send_read_request(dsi, channel, reqdata, reqlen); + r = dsi_vc_send_short(dsi, vc, msg); if (r) - return r; + goto err; - r = dsi_vc_send_bta_sync(dssdev, channel); + r = dsi_vc_send_bta_sync(dssdev, vc); if (r) - return r; + goto err; - r = dsi_vc_read_rx_fifo(dsi, channel, buf, buflen, + r = dsi_vc_read_rx_fifo(dsi, vc, msg->rx_buf, msg->rx_len, DSS_DSI_CONTENT_GENERIC); if (r < 0) - return r; - - if (r != buflen) { - r = -EIO; - return r; - } - - return 0; -} - -static int dsi_vc_set_max_rx_packet_size(struct omap_dss_device *dssdev, int channel, - u16 len) -{ - struct dsi_data *dsi = to_dsi_data(dssdev); - - return dsi_vc_send_short(dsi, channel, - MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE, len, 0); -} - -static int dsi_enter_ulps(struct dsi_data *dsi) -{ - DECLARE_COMPLETION_ONSTACK(completion); - int r, i; - unsigned int mask; - - DSSDBG("Entering ULPS"); - - WARN_ON(!dsi_bus_is_locked(dsi)); - - WARN_ON(dsi->ulps_enabled); - - if (dsi->ulps_enabled) - return 0; - - /* DDR_CLK_ALWAYS_ON */ - if (REG_GET(dsi, DSI_CLK_CTRL, 13, 13)) { - dsi_if_enable(dsi, 0); - REG_FLD_MOD(dsi, DSI_CLK_CTRL, 0, 13, 13); - dsi_if_enable(dsi, 1); - } - - dsi_sync_vc(dsi, 0); - dsi_sync_vc(dsi, 1); - dsi_sync_vc(dsi, 2); - dsi_sync_vc(dsi, 3); - - dsi_force_tx_stop_mode_io(dsi); - - dsi_vc_enable(dsi, 0, false); - dsi_vc_enable(dsi, 1, false); - dsi_vc_enable(dsi, 2, false); - dsi_vc_enable(dsi, 3, false); - - if (REG_GET(dsi, DSI_COMPLEXIO_CFG2, 16, 16)) { /* HS_BUSY */ - DSSERR("HS busy when enabling ULPS\n"); - return -EIO; - } - - if (REG_GET(dsi, DSI_COMPLEXIO_CFG2, 17, 17)) { /* LP_BUSY */ - DSSERR("LP busy when enabling ULPS\n"); - return -EIO; - } - - r = dsi_register_isr_cio(dsi, dsi_completion_handler, &completion, - DSI_CIO_IRQ_ULPSACTIVENOT_ALL0); - if (r) - return r; - - mask = 0; - - for (i = 0; i < dsi->num_lanes_supported; ++i) { - if (dsi->lanes[i].function == DSI_LANE_UNUSED) - continue; - mask |= 1 << i; - } - /* Assert TxRequestEsc for data lanes and TxUlpsClk for clk lane */ - /* LANEx_ULPS_SIG2 */ - REG_FLD_MOD(dsi, DSI_COMPLEXIO_CFG2, mask, 9, 5); - - /* flush posted write and wait for SCP interface to finish the write */ - dsi_read_reg(dsi, DSI_COMPLEXIO_CFG2); + goto err; - if (wait_for_completion_timeout(&completion, - msecs_to_jiffies(1000)) == 0) { - DSSERR("ULPS enable timeout\n"); + if (r != msg->rx_len) { r = -EIO; goto err; } - dsi_unregister_isr_cio(dsi, dsi_completion_handler, &completion, - DSI_CIO_IRQ_ULPSACTIVENOT_ALL0); - - /* Reset LANEx_ULPS_SIG2 */ - REG_FLD_MOD(dsi, DSI_COMPLEXIO_CFG2, 0, 9, 5); - - /* flush posted write and wait for SCP interface to finish the write */ - dsi_read_reg(dsi, DSI_COMPLEXIO_CFG2); - - dsi_cio_power(dsi, DSI_COMPLEXIO_POWER_ULPS); - - dsi_if_enable(dsi, false); - - dsi->ulps_enabled = true; - return 0; - err: - dsi_unregister_isr_cio(dsi, dsi_completion_handler, &completion, - DSI_CIO_IRQ_ULPSACTIVENOT_ALL0); + DSSERR("%s(vc %d, reqlen %d) failed\n", __func__, vc, msg->tx_len); return r; } @@ -3241,7 +2506,7 @@ static void dsi_config_vp_num_line_buffers(struct dsi_data *dsi) int num_line_buffers; if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) { - int bpp = dsi_get_pixel_size(dsi->pix_fmt); + int bpp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt); const struct videomode *vm = &dsi->vm; /* * Don't use line buffers if width is greater than the video @@ -3372,7 +2637,7 @@ static void dsi_config_cmd_mode_interleaving(struct dsi_data *dsi) int tclk_trail, ths_exit, exiths_clk; bool ddr_alwon; const struct videomode *vm = &dsi->vm; - int bpp = dsi_get_pixel_size(dsi->pix_fmt); + int bpp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt); int ndl = dsi->num_lanes_used - 1; int dsi_fclk_hsdiv = dsi->user_dsi_cinfo.mX[HSDIV_DSI] + 1; int hsa_interleave_hs = 0, hsa_interleave_lp = 0; @@ -3500,7 +2765,7 @@ static int dsi_proto_config(struct dsi_data *dsi) dsi_set_lp_rx_timeout(dsi, 0x1fff, true, true); dsi_set_hs_tx_timeout(dsi, 0x1fff, true, true); - switch (dsi_get_pixel_size(dsi->pix_fmt)) { + switch (mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt)) { case 16: buswidth = 0; break; @@ -3621,7 +2886,7 @@ static void dsi_proto_timings(struct dsi_data *dsi) int window_sync = dsi->vm_timings.window_sync; bool hsync_end; const struct videomode *vm = &dsi->vm; - int bpp = dsi_get_pixel_size(dsi->pix_fmt); + int bpp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt); int tl, t_he, width_bytes; hsync_end = dsi->vm_timings.trans_mode == OMAP_DSS_DSI_PULSE_MODE; @@ -3659,12 +2924,9 @@ static void dsi_proto_timings(struct dsi_data *dsi) } } -static int dsi_configure_pins(struct omap_dss_device *dssdev, - const struct omap_dsi_pin_config *pin_cfg) +static int dsi_configure_pins(struct dsi_data *dsi, + int num_pins, const u32 *pins) { - struct dsi_data *dsi = to_dsi_data(dssdev); - int num_pins; - const int *pins; struct dsi_lane_config lanes[DSI_MAX_NR_LANES]; int num_lanes; int i; @@ -3677,9 +2939,6 @@ static int dsi_configure_pins(struct omap_dss_device *dssdev, DSI_LANE_DATA4, }; - num_pins = pin_cfg->num_pins; - pins = pin_cfg->pins; - if (num_pins < 4 || num_pins > dsi->num_lanes_supported * 2 || num_pins % 2 != 0) return -EINVAL; @@ -3691,15 +2950,15 @@ static int dsi_configure_pins(struct omap_dss_device *dssdev, for (i = 0; i < num_pins; i += 2) { u8 lane, pol; - int dx, dy; + u32 dx, dy; dx = pins[i]; dy = pins[i + 1]; - if (dx < 0 || dx >= dsi->num_lanes_supported * 2) + if (dx >= dsi->num_lanes_supported * 2) return -EINVAL; - if (dy < 0 || dy >= dsi->num_lanes_supported * 2) + if (dy >= dsi->num_lanes_supported * 2) return -EINVAL; if (dx & 1) { @@ -3725,86 +2984,102 @@ static int dsi_configure_pins(struct omap_dss_device *dssdev, return 0; } -static int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel) +static int dsi_enable_video_mode(struct dsi_data *dsi, int vc) { - struct dsi_data *dsi = to_dsi_data(dssdev); - int bpp = dsi_get_pixel_size(dsi->pix_fmt); + int bpp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt); u8 data_type; u16 word_count; - int r; - r = dsi_display_init_dispc(dsi); - if (r) - return r; + switch (dsi->pix_fmt) { + case MIPI_DSI_FMT_RGB888: + data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24; + break; + case MIPI_DSI_FMT_RGB666: + data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18; + break; + case MIPI_DSI_FMT_RGB666_PACKED: + data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18; + break; + case MIPI_DSI_FMT_RGB565: + data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16; + break; + default: + return -EINVAL; + } - if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) { - switch (dsi->pix_fmt) { - case OMAP_DSS_DSI_FMT_RGB888: - data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24; - break; - case OMAP_DSS_DSI_FMT_RGB666: - data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18; - break; - case OMAP_DSS_DSI_FMT_RGB666_PACKED: - data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18; - break; - case OMAP_DSS_DSI_FMT_RGB565: - data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16; - break; - default: - r = -EINVAL; - goto err_pix_fmt; - } + dsi_if_enable(dsi, false); + dsi_vc_enable(dsi, vc, false); - dsi_if_enable(dsi, false); - dsi_vc_enable(dsi, channel, false); + /* MODE, 1 = video mode */ + REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), 1, 4, 4); + + word_count = DIV_ROUND_UP(dsi->vm.hactive * bpp, 8); - /* MODE, 1 = video mode */ - REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), 1, 4, 4); + dsi_vc_write_long_header(dsi, vc, dsi->dsidev->channel, data_type, + word_count, 0); - word_count = DIV_ROUND_UP(dsi->vm.hactive * bpp, 8); + dsi_vc_enable(dsi, vc, true); + dsi_if_enable(dsi, true); - dsi_vc_write_long_header(dsi, channel, data_type, - word_count, 0); + return 0; +} + +static void dsi_disable_video_mode(struct dsi_data *dsi, int vc) +{ + dsi_if_enable(dsi, false); + dsi_vc_enable(dsi, vc, false); + + /* MODE, 0 = command mode */ + REG_FLD_MOD(dsi, DSI_VC_CTRL(vc), 0, 4, 4); + + dsi_vc_enable(dsi, vc, true); + dsi_if_enable(dsi, true); +} + +static void dsi_enable_video_output(struct omap_dss_device *dssdev, int vc) +{ + struct dsi_data *dsi = to_dsi_data(dssdev); + int r; + + r = dsi_init_dispc(dsi); + if (r) { + dev_err(dsi->dev, "failed to init dispc!\n"); + return; + } - dsi_vc_enable(dsi, channel, true); - dsi_if_enable(dsi, true); + if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) { + r = dsi_enable_video_mode(dsi, vc); + if (r) + goto err_video_mode; } r = dss_mgr_enable(&dsi->output); if (r) goto err_mgr_enable; - return 0; + return; err_mgr_enable: if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) { dsi_if_enable(dsi, false); - dsi_vc_enable(dsi, channel, false); + dsi_vc_enable(dsi, vc, false); } -err_pix_fmt: - dsi_display_uninit_dispc(dsi); - return r; +err_video_mode: + dsi_uninit_dispc(dsi); + dev_err(dsi->dev, "failed to enable DSI encoder!\n"); + return; } -static void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel) +static void dsi_disable_video_output(struct omap_dss_device *dssdev, int vc) { struct dsi_data *dsi = to_dsi_data(dssdev); - if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) { - dsi_if_enable(dsi, false); - dsi_vc_enable(dsi, channel, false); - - /* MODE, 0 = command mode */ - REG_FLD_MOD(dsi, DSI_VC_CTRL(channel), 0, 4, 4); - - dsi_vc_enable(dsi, channel, true); - dsi_if_enable(dsi, true); - } + if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) + dsi_disable_video_mode(dsi, vc); dss_mgr_disable(&dsi->output); - dsi_display_uninit_dispc(dsi); + dsi_uninit_dispc(dsi); } static void dsi_update_screen_dispc(struct dsi_data *dsi) @@ -3817,16 +3092,14 @@ static void dsi_update_screen_dispc(struct dsi_data *dsi) unsigned int packet_len; u32 l; int r; - const unsigned channel = dsi->update_channel; + const unsigned vc = dsi->update_vc; const unsigned int line_buf_size = dsi->line_buffer_size; u16 w = dsi->vm.hactive; u16 h = dsi->vm.vactive; DSSDBG("dsi_update_screen_dispc(%dx%d)\n", w, h); - dsi_vc_config_source(dsi, channel, DSI_VC_SOURCE_VP); - - bytespp = dsi_get_pixel_size(dsi->pix_fmt) / 8; + bytespp = mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt) / 8; bytespl = w * bytespp; bytespf = bytespl * h; @@ -3845,16 +3118,16 @@ static void dsi_update_screen_dispc(struct dsi_data *dsi) total_len += (bytespf % packet_payload) + 1; l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */ - dsi_write_reg(dsi, DSI_VC_TE(channel), l); + dsi_write_reg(dsi, DSI_VC_TE(vc), l); - dsi_vc_write_long_header(dsi, channel, MIPI_DSI_DCS_LONG_WRITE, + dsi_vc_write_long_header(dsi, vc, dsi->dsidev->channel, MIPI_DSI_DCS_LONG_WRITE, packet_len, 0); if (dsi->te_enabled) l = FLD_MOD(l, 1, 30, 30); /* TE_EN */ else l = FLD_MOD(l, 1, 31, 31); /* TE_START */ - dsi_write_reg(dsi, DSI_VC_TE(channel), l); + dsi_write_reg(dsi, DSI_VC_TE(vc), l); /* We put SIDLEMODE to no-idle for the duration of the transfer, * because DSS interrupts are not capable of waking up the CPU and the @@ -3877,7 +3150,7 @@ static void dsi_update_screen_dispc(struct dsi_data *dsi) * for TE is longer than the timer allows */ REG_FLD_MOD(dsi, DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */ - dsi_vc_send_bta(dsi, channel); + dsi_vc_send_bta(dsi, vc); #ifdef DSI_CATCH_MISSING_TE mod_timer(&dsi->te_timer, jiffies + msecs_to_jiffies(250)); @@ -3902,7 +3175,7 @@ static void dsi_handle_framedone(struct dsi_data *dsi, int error) REG_FLD_MOD(dsi, DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */ } - dsi->framedone_callback(error, dsi->framedone_data); + dsi_bus_unlock(dsi); if (!error) dsi_perf_show(dsi, "DISPC"); @@ -3935,30 +3208,91 @@ static void dsi_framedone_irq_callback(void *data) cancel_delayed_work(&dsi->framedone_timeout_work); + DSSDBG("Framedone received!\n"); + dsi_handle_framedone(dsi, 0); } -static int dsi_update(struct omap_dss_device *dssdev, int channel, - void (*callback)(int, void *), void *data) +static int _dsi_update(struct dsi_data *dsi) { - struct dsi_data *dsi = to_dsi_data(dssdev); - dsi_perf_mark_setup(dsi); - dsi->update_channel = channel; - - dsi->framedone_callback = callback; - dsi->framedone_data = data; - #ifdef DSI_PERF_MEASURE dsi->update_bytes = dsi->vm.hactive * dsi->vm.vactive * - dsi_get_pixel_size(dsi->pix_fmt) / 8; + mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt) / 8; #endif dsi_update_screen_dispc(dsi); return 0; } +static int _dsi_send_nop(struct dsi_data *dsi, int vc, int channel) +{ + const u8 payload[] = { MIPI_DCS_NOP }; + const struct mipi_dsi_msg msg = { + .channel = channel, + .type = MIPI_DSI_DCS_SHORT_WRITE, + .tx_len = 1, + .tx_buf = payload, + }; + + WARN_ON(!dsi_bus_is_locked(dsi)); + + return _omap_dsi_host_transfer(dsi, vc, &msg); +} + +static int dsi_update_channel(struct omap_dss_device *dssdev, int vc) +{ + struct dsi_data *dsi = to_dsi_data(dssdev); + int r; + + dsi_bus_lock(dsi); + + if (!dsi->video_enabled) { + r = -EIO; + goto err; + } + + if (dsi->vm.hactive == 0 || dsi->vm.vactive == 0) { + r = -EINVAL; + goto err; + } + + DSSDBG("dsi_update_channel: %d", vc); + + /* + * Send NOP between the frames. If we don't send something here, the + * updates stop working. This is probably related to DSI spec stating + * that the DSI host should transition to LP at least once per frame. + */ + r = _dsi_send_nop(dsi, VC_CMD, dsi->dsidev->channel); + if (r < 0) { + DSSWARN("failed to send nop between frames: %d\n", r); + goto err; + } + + dsi->update_vc = vc; + + if (dsi->te_enabled && dsi->te_gpio) { + schedule_delayed_work(&dsi->te_timeout_work, + msecs_to_jiffies(250)); + atomic_set(&dsi->do_ext_te_update, 1); + } else { + _dsi_update(dsi); + } + + return 0; + +err: + dsi_bus_unlock(dsi); + return r; +} + +static int dsi_update_all(struct omap_dss_device *dssdev) +{ + return dsi_update_channel(dssdev, VC_VIDEO); +} + /* Display funcs */ static int dsi_configure_dispc_clocks(struct dsi_data *dsi) @@ -3983,12 +3317,12 @@ static int dsi_configure_dispc_clocks(struct dsi_data *dsi) return 0; } -static int dsi_display_init_dispc(struct dsi_data *dsi) +static int dsi_init_dispc(struct dsi_data *dsi) { - enum omap_channel channel = dsi->output.dispc_channel; + enum omap_channel dispc_channel = dsi->output.dispc_channel; int r; - dss_select_lcd_clk_source(dsi->dss, channel, dsi->module_id == 0 ? + dss_select_lcd_clk_source(dsi->dss, dispc_channel, dsi->module_id == 0 ? DSS_CLK_SRC_PLL1_1 : DSS_CLK_SRC_PLL2_1); @@ -4013,7 +3347,7 @@ static int dsi_display_init_dispc(struct dsi_data *dsi) dsi->mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS; dsi->mgr_config.video_port_width = - dsi_get_pixel_size(dsi->pix_fmt); + mipi_dsi_pixel_format_to_bpp(dsi->pix_fmt); dsi->mgr_config.lcden_sig_polarity = 0; dss_mgr_set_lcd_config(&dsi->output, &dsi->mgr_config); @@ -4024,19 +3358,19 @@ err1: dss_mgr_unregister_framedone_handler(&dsi->output, dsi_framedone_irq_callback, dsi); err: - dss_select_lcd_clk_source(dsi->dss, channel, DSS_CLK_SRC_FCK); + dss_select_lcd_clk_source(dsi->dss, dispc_channel, DSS_CLK_SRC_FCK); return r; } -static void dsi_display_uninit_dispc(struct dsi_data *dsi) +static void dsi_uninit_dispc(struct dsi_data *dsi) { - enum omap_channel channel = dsi->output.dispc_channel; + enum omap_channel dispc_channel = dsi->output.dispc_channel; if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) dss_mgr_unregister_framedone_handler(&dsi->output, dsi_framedone_irq_callback, dsi); - dss_select_lcd_clk_source(dsi->dss, channel, DSS_CLK_SRC_FCK); + dss_select_lcd_clk_source(dsi->dss, dispc_channel, DSS_CLK_SRC_FCK); } static int dsi_configure_dsi_clocks(struct dsi_data *dsi) @@ -4055,7 +3389,37 @@ static int dsi_configure_dsi_clocks(struct dsi_data *dsi) return 0; } -static int dsi_display_init_dsi(struct dsi_data *dsi) +static void dsi_setup_dsi_vcs(struct dsi_data *dsi) +{ + /* Setup VC_CMD for LP and cpu transfers */ + REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_CMD), 0, 9, 9); /* LP */ + + REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_CMD), 0, 1, 1); /* SOURCE_L4 */ + dsi->vc[VC_CMD].source = DSI_VC_SOURCE_L4; + + /* Setup VC_VIDEO for HS and dispc transfers */ + REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_VIDEO), 1, 9, 9); /* HS */ + + REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_VIDEO), 1, 1, 1); /* SOURCE_VP */ + dsi->vc[VC_VIDEO].source = DSI_VC_SOURCE_VP; + + if ((dsi->data->quirks & DSI_QUIRK_DCS_CMD_CONFIG_VC) && + !(dsi->dsidev->mode_flags & MIPI_DSI_MODE_VIDEO)) + REG_FLD_MOD(dsi, DSI_VC_CTRL(VC_VIDEO), 1, 30, 30); /* DCS_CMD_ENABLE */ + + dsi_vc_enable(dsi, VC_CMD, 1); + dsi_vc_enable(dsi, VC_VIDEO, 1); + + dsi_if_enable(dsi, 1); + + dsi_force_tx_stop_mode_io(dsi); + + /* start the DDR clock by sending a NULL packet */ + if (!(dsi->dsidev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)) + dsi_vc_send_null(dsi, VC_CMD, dsi->dsidev->channel); +} + +static int dsi_init_dsi(struct dsi_data *dsi) { int r; @@ -4097,13 +3461,7 @@ static int dsi_display_init_dsi(struct dsi_data *dsi) if (r) goto err3; - /* enable interface */ - dsi_vc_enable(dsi, 0, 1); - dsi_vc_enable(dsi, 1, 1); - dsi_vc_enable(dsi, 2, 1); - dsi_vc_enable(dsi, 3, 1); - dsi_if_enable(dsi, 1); - dsi_force_tx_stop_mode_io(dsi); + dsi_setup_dsi_vcs(dsi); return 0; err3: @@ -4119,12 +3477,8 @@ err0: return r; } -static void dsi_display_uninit_dsi(struct dsi_data *dsi, bool disconnect_lanes, - bool enter_ulps) +static void dsi_uninit_dsi(struct dsi_data *dsi) { - if (enter_ulps && !dsi->ulps_enabled) - dsi_enter_ulps(dsi); - /* disable interface */ dsi_if_enable(dsi, 0); dsi_vc_enable(dsi, 0, 0); @@ -4136,21 +3490,19 @@ static void dsi_display_uninit_dsi(struct dsi_data *dsi, bool disconnect_lanes, dsi_cio_uninit(dsi); dss_pll_disable(&dsi->pll); - if (disconnect_lanes) { - regulator_disable(dsi->vdds_dsi_reg); - dsi->vdds_dsi_enabled = false; - } + regulator_disable(dsi->vdds_dsi_reg); + dsi->vdds_dsi_enabled = false; } -static void dsi_display_enable(struct omap_dss_device *dssdev) +static void dsi_enable(struct dsi_data *dsi) { - struct dsi_data *dsi = to_dsi_data(dssdev); int r; - DSSDBG("dsi_display_enable\n"); - WARN_ON(!dsi_bus_is_locked(dsi)); + if (WARN_ON(dsi->iface_enabled)) + return; + mutex_lock(&dsi->lock); r = dsi_runtime_get(dsi); @@ -4159,10 +3511,12 @@ static void dsi_display_enable(struct omap_dss_device *dssdev) _dsi_initialize_irq(dsi); - r = dsi_display_init_dsi(dsi); + r = dsi_init_dsi(dsi); if (r) goto err_init_dsi; + dsi->iface_enabled = true; + mutex_unlock(&dsi->lock); return; @@ -4171,18 +3525,16 @@ err_init_dsi: dsi_runtime_put(dsi); err_get_dsi: mutex_unlock(&dsi->lock); - DSSDBG("dsi_display_enable FAILED\n"); + DSSDBG("dsi_enable FAILED\n"); } -static void dsi_display_disable(struct omap_dss_device *dssdev, - bool disconnect_lanes, bool enter_ulps) +static void dsi_disable(struct dsi_data *dsi) { - struct dsi_data *dsi = to_dsi_data(dssdev); - - DSSDBG("dsi_display_disable\n"); - WARN_ON(!dsi_bus_is_locked(dsi)); + if (WARN_ON(!dsi->iface_enabled)) + return; + mutex_lock(&dsi->lock); dsi_sync_vc(dsi, 0); @@ -4190,18 +3542,26 @@ static void dsi_display_disable(struct omap_dss_device *dssdev, dsi_sync_vc(dsi, 2); dsi_sync_vc(dsi, 3); - dsi_display_uninit_dsi(dsi, disconnect_lanes, enter_ulps); + dsi_uninit_dsi(dsi); dsi_runtime_put(dsi); + dsi->iface_enabled = false; + mutex_unlock(&dsi->lock); } -static int dsi_enable_te(struct omap_dss_device *dssdev, bool enable) +static int dsi_enable_te(struct dsi_data *dsi, bool enable) { - struct dsi_data *dsi = to_dsi_data(dssdev); - dsi->te_enabled = enable; + + if (dsi->te_gpio) { + if (enable) + enable_irq(dsi->te_irq); + else + disable_irq(dsi->te_irq); + } + return 0; } @@ -4351,7 +3711,7 @@ static bool dsi_cm_calc(struct dsi_data *dsi, unsigned long pck, txbyteclk; clkin = clk_get_rate(dsi->pll.clkin); - bitspp = dsi_get_pixel_size(cfg->pixel_format); + bitspp = mipi_dsi_pixel_format_to_bpp(cfg->pixel_format); ndl = dsi->num_lanes_used - 1; /* @@ -4384,7 +3744,7 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx) { struct dsi_data *dsi = ctx->dsi; const struct omap_dss_dsi_config *cfg = ctx->config; - int bitspp = dsi_get_pixel_size(cfg->pixel_format); + int bitspp = mipi_dsi_pixel_format_to_bpp(cfg->pixel_format); int ndl = dsi->num_lanes_used - 1; unsigned long hsclk = ctx->dsi_cinfo.clkdco / 4; unsigned long byteclk = hsclk / 4; @@ -4531,7 +3891,6 @@ static bool dsi_vm_calc_blanking(struct dsi_clk_calc_ctx *ctx) dsi_vm->hfp_blanking_mode = 1; dsi_vm->hbp_blanking_mode = 1; - dsi_vm->ddr_clk_always_on = cfg->ddr_clk_always_on; dsi_vm->window_sync = 4; /* setup DISPC videomode */ @@ -4651,7 +4010,7 @@ static bool dsi_vm_calc(struct dsi_data *dsi, unsigned long pll_min; unsigned long pll_max; int ndl = dsi->num_lanes_used - 1; - int bitspp = dsi_get_pixel_size(cfg->pixel_format); + int bitspp = mipi_dsi_pixel_format_to_bpp(cfg->pixel_format); unsigned long byteclk_min; clkin = clk_get_rate(dsi->pll.clkin); @@ -4684,39 +4043,62 @@ static bool dsi_vm_calc(struct dsi_data *dsi, dsi_vm_calc_pll_cb, ctx); } -static int dsi_set_config(struct omap_dss_device *dssdev, - const struct omap_dss_dsi_config *config) +static bool dsi_is_video_mode(struct omap_dss_device *dssdev) { struct dsi_data *dsi = to_dsi_data(dssdev); - struct dsi_clk_calc_ctx ctx; + + return dsi->mode == OMAP_DSS_DSI_VIDEO_MODE; +} + +static int __dsi_calc_config(struct dsi_data *dsi, + const struct drm_display_mode *mode, + struct dsi_clk_calc_ctx *ctx) +{ + struct omap_dss_dsi_config cfg = dsi->config; + struct videomode vm; bool ok; int r; - mutex_lock(&dsi->lock); + drm_display_mode_to_videomode(mode, &vm); - dsi->pix_fmt = config->pixel_format; - dsi->mode = config->mode; + cfg.vm = &vm; + cfg.mode = dsi->mode; + cfg.pixel_format = dsi->pix_fmt; - if (config->mode == OMAP_DSS_DSI_VIDEO_MODE) - ok = dsi_vm_calc(dsi, config, &ctx); + if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) + ok = dsi_vm_calc(dsi, &cfg, ctx); else - ok = dsi_cm_calc(dsi, config, &ctx); + ok = dsi_cm_calc(dsi, &cfg, ctx); - if (!ok) { - DSSERR("failed to find suitable DSI clock settings\n"); - r = -EINVAL; - goto err; - } + if (!ok) + return -EINVAL; + + dsi_pll_calc_dsi_fck(dsi, &ctx->dsi_cinfo); + + r = dsi_lp_clock_calc(ctx->dsi_cinfo.clkout[HSDIV_DSI], + cfg.lp_clk_min, cfg.lp_clk_max, &ctx->lp_cinfo); + if (r) + return r; - dsi_pll_calc_dsi_fck(dsi, &ctx.dsi_cinfo); + return 0; +} + +static int dsi_set_config(struct omap_dss_device *dssdev, + const struct drm_display_mode *mode) +{ + struct dsi_data *dsi = to_dsi_data(dssdev); + struct dsi_clk_calc_ctx ctx; + int r; - r = dsi_lp_clock_calc(ctx.dsi_cinfo.clkout[HSDIV_DSI], - config->lp_clk_min, config->lp_clk_max, &dsi->user_lp_cinfo); + mutex_lock(&dsi->lock); + + r = __dsi_calc_config(dsi, mode, &ctx); if (r) { - DSSERR("failed to find suitable DSI LP clock settings\n"); + DSSERR("failed to find suitable DSI clock settings\n"); goto err; } + dsi->user_lp_cinfo = ctx.lp_cinfo; dsi->user_dsi_cinfo = ctx.dsi_cinfo; dsi->user_dispc_cinfo = ctx.dispc_cinfo; @@ -4757,12 +4139,12 @@ err: } /* - * Return a hardcoded channel for the DSI output. This should work for + * Return a hardcoded dispc channel for the DSI output. This should work for * current use cases, but this can be later expanded to either resolve * the channel in some more dynamic manner, or get the channel as a user * parameter. */ -static enum omap_channel dsi_get_channel(struct dsi_data *dsi) +static enum omap_channel dsi_get_dispc_channel(struct dsi_data *dsi) { switch (dsi->data->model) { case DSI_MODEL_OMAP3: @@ -4796,59 +4178,75 @@ static enum omap_channel dsi_get_channel(struct dsi_data *dsi) } } -static int dsi_request_vc(struct omap_dss_device *dssdev, int *channel) +static ssize_t _omap_dsi_host_transfer(struct dsi_data *dsi, int vc, + const struct mipi_dsi_msg *msg) { - struct dsi_data *dsi = to_dsi_data(dssdev); - int i; - - for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) { - if (!dsi->vc[i].dssdev) { - dsi->vc[i].dssdev = dssdev; - *channel = i; - return 0; - } - } + struct omap_dss_device *dssdev = &dsi->output; + int r; - DSSERR("cannot get VC for display %s", dssdev->name); - return -ENOSPC; -} + dsi_vc_enable_hs(dssdev, vc, !(msg->flags & MIPI_DSI_MSG_USE_LPM)); -static int dsi_set_vc_id(struct omap_dss_device *dssdev, int channel, int vc_id) -{ - struct dsi_data *dsi = to_dsi_data(dssdev); - - if (vc_id < 0 || vc_id > 3) { - DSSERR("VC ID out of range\n"); - return -EINVAL; + switch (msg->type) { + case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM: + case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM: + case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM: + case MIPI_DSI_GENERIC_LONG_WRITE: + case MIPI_DSI_DCS_SHORT_WRITE: + case MIPI_DSI_DCS_SHORT_WRITE_PARAM: + case MIPI_DSI_DCS_LONG_WRITE: + case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE: + case MIPI_DSI_NULL_PACKET: + r = dsi_vc_write_common(dssdev, vc, msg); + break; + case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM: + case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM: + case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM: + r = dsi_vc_generic_read(dssdev, vc, msg); + break; + case MIPI_DSI_DCS_READ: + r = dsi_vc_dcs_read(dssdev, vc, msg); + break; + default: + r = -EINVAL; + break; } - if (channel < 0 || channel > 3) { - DSSERR("Virtual Channel out of range\n"); - return -EINVAL; - } + if (r < 0) + return r; - if (dsi->vc[channel].dssdev != dssdev) { - DSSERR("Virtual Channel not allocated to display %s\n", - dssdev->name); - return -EINVAL; - } + if (msg->type == MIPI_DSI_DCS_SHORT_WRITE || + msg->type == MIPI_DSI_DCS_SHORT_WRITE_PARAM) { + u8 cmd = ((u8 *)msg->tx_buf)[0]; - dsi->vc[channel].vc_id = vc_id; + if (cmd == MIPI_DCS_SET_TEAR_OFF) + dsi_enable_te(dsi, false); + else if (cmd == MIPI_DCS_SET_TEAR_ON) + dsi_enable_te(dsi, true); + } return 0; } -static void dsi_release_vc(struct omap_dss_device *dssdev, int channel) +static ssize_t omap_dsi_host_transfer(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg) { - struct dsi_data *dsi = to_dsi_data(dssdev); + struct dsi_data *dsi = host_to_omap(host); + int r; + int vc = VC_CMD; + + dsi_bus_lock(dsi); - if ((channel >= 0 && channel <= 3) && - dsi->vc[channel].dssdev == dssdev) { - dsi->vc[channel].dssdev = NULL; - dsi->vc[channel].vc_id = 0; + if (!dsi->iface_enabled) { + dsi_enable(dsi); + schedule_delayed_work(&dsi->dsi_disable_work, msecs_to_jiffies(2000)); } -} + r = _omap_dsi_host_transfer(dsi, vc, msg); + + dsi_bus_unlock(dsi); + + return r; +} static int dsi_get_clocks(struct dsi_data *dsi) { @@ -4865,57 +4263,167 @@ static int dsi_get_clocks(struct dsi_data *dsi) return 0; } -static int dsi_connect(struct omap_dss_device *src, - struct omap_dss_device *dst) +static const struct omapdss_dsi_ops dsi_ops = { + .update = dsi_update_all, + .is_video_mode = dsi_is_video_mode, +}; + +static irqreturn_t omap_dsi_te_irq_handler(int irq, void *dev_id) { - return omapdss_device_connect(dst->dss, dst, dst->next); + struct dsi_data *dsi = (struct dsi_data *)dev_id; + int old; + + old = atomic_cmpxchg(&dsi->do_ext_te_update, 1, 0); + if (old) { + cancel_delayed_work(&dsi->te_timeout_work); + _dsi_update(dsi); + } + + return IRQ_HANDLED; } -static void dsi_disconnect(struct omap_dss_device *src, - struct omap_dss_device *dst) +static void omap_dsi_te_timeout_work_callback(struct work_struct *work) { - omapdss_device_disconnect(dst, dst->next); + struct dsi_data *dsi = + container_of(work, struct dsi_data, te_timeout_work.work); + int old; + + old = atomic_cmpxchg(&dsi->do_ext_te_update, 1, 0); + if (old) { + dev_err(dsi->dev, "TE not received for 250ms!\n"); + _dsi_update(dsi); + } } -static const struct omap_dss_device_ops dsi_ops = { - .connect = dsi_connect, - .disconnect = dsi_disconnect, - .enable = dsi_display_enable, +static int omap_dsi_register_te_irq(struct dsi_data *dsi, + struct mipi_dsi_device *client) +{ + int err; + int te_irq; - .dsi = { - .bus_lock = dsi_bus_lock, - .bus_unlock = dsi_bus_unlock, + dsi->te_gpio = gpiod_get(&client->dev, "te-gpios", GPIOD_IN); + if (IS_ERR(dsi->te_gpio)) { + err = PTR_ERR(dsi->te_gpio); - .disable = dsi_display_disable, + if (err == -ENOENT) { + dsi->te_gpio = NULL; + return 0; + } - .enable_hs = dsi_vc_enable_hs, + dev_err(dsi->dev, "Could not get TE gpio: %d\n", err); + return err; + } - .configure_pins = dsi_configure_pins, - .set_config = dsi_set_config, + te_irq = gpiod_to_irq(dsi->te_gpio); + if (te_irq < 0) { + gpiod_put(dsi->te_gpio); + dsi->te_gpio = NULL; + return -EINVAL; + } - .enable_video_output = dsi_enable_video_output, - .disable_video_output = dsi_disable_video_output, + dsi->te_irq = te_irq; - .update = dsi_update, + irq_set_status_flags(te_irq, IRQ_NOAUTOEN); - .enable_te = dsi_enable_te, + err = request_threaded_irq(te_irq, NULL, omap_dsi_te_irq_handler, + IRQF_TRIGGER_RISING, "TE", dsi); + if (err) { + dev_err(dsi->dev, "request irq failed with %d\n", err); + gpiod_put(dsi->te_gpio); + dsi->te_gpio = NULL; + return err; + } - .request_vc = dsi_request_vc, - .set_vc_id = dsi_set_vc_id, - .release_vc = dsi_release_vc, + INIT_DEFERRABLE_WORK(&dsi->te_timeout_work, + omap_dsi_te_timeout_work_callback); - .dcs_write = dsi_vc_dcs_write, - .dcs_write_nosync = dsi_vc_dcs_write_nosync, - .dcs_read = dsi_vc_dcs_read, + dev_dbg(dsi->dev, "Using GPIO TE\n"); - .gen_write = dsi_vc_generic_write, - .gen_write_nosync = dsi_vc_generic_write_nosync, - .gen_read = dsi_vc_generic_read, + return 0; +} - .bta_sync = dsi_vc_send_bta_sync, +static void omap_dsi_unregister_te_irq(struct dsi_data *dsi) +{ + if (dsi->te_gpio) { + free_irq(dsi->te_irq, dsi); + cancel_delayed_work(&dsi->te_timeout_work); + gpiod_put(dsi->te_gpio); + dsi->te_gpio = NULL; + } +} - .set_max_rx_packet_size = dsi_vc_set_max_rx_packet_size, - }, +static int omap_dsi_host_attach(struct mipi_dsi_host *host, + struct mipi_dsi_device *client) +{ + struct dsi_data *dsi = host_to_omap(host); + int r; + + if (dsi->dsidev) { + DSSERR("dsi client already attached\n"); + return -EBUSY; + } + + if (mipi_dsi_pixel_format_to_bpp(client->format) < 0) { + DSSERR("invalid pixel format\n"); + return -EINVAL; + } + + atomic_set(&dsi->do_ext_te_update, 0); + + if (client->mode_flags & MIPI_DSI_MODE_VIDEO) { + dsi->mode = OMAP_DSS_DSI_VIDEO_MODE; + } else { + r = omap_dsi_register_te_irq(dsi, client); + if (r) + return r; + + dsi->mode = OMAP_DSS_DSI_CMD_MODE; + } + + dsi->dsidev = client; + dsi->pix_fmt = client->format; + + dsi->config.hs_clk_min = 150000000; // TODO: get from client? + dsi->config.hs_clk_max = client->hs_rate; + dsi->config.lp_clk_min = 7000000; // TODO: get from client? + dsi->config.lp_clk_max = client->lp_rate; + + if (client->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) + dsi->config.trans_mode = OMAP_DSS_DSI_BURST_MODE; + else if (client->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) + dsi->config.trans_mode = OMAP_DSS_DSI_PULSE_MODE; + else + dsi->config.trans_mode = OMAP_DSS_DSI_EVENT_MODE; + + return 0; +} + +static int omap_dsi_host_detach(struct mipi_dsi_host *host, + struct mipi_dsi_device *client) +{ + struct dsi_data *dsi = host_to_omap(host); + + if (WARN_ON(dsi->dsidev != client)) + return -EINVAL; + + cancel_delayed_work_sync(&dsi->dsi_disable_work); + + dsi_bus_lock(dsi); + + if (dsi->iface_enabled) + dsi_disable(dsi); + + dsi_bus_unlock(dsi); + + omap_dsi_unregister_te_irq(dsi); + dsi->dsidev = NULL; + return 0; +} + +static const struct mipi_dsi_host_ops omap_dsi_host_ops = { + .attach = omap_dsi_host_attach, + .detach = omap_dsi_host_detach, + .transfer = omap_dsi_host_transfer, }; /* ----------------------------------------------------------------------------- @@ -5097,6 +4605,106 @@ static const struct component_ops dsi_component_ops = { }; /* ----------------------------------------------------------------------------- + * DRM Bridge Operations + */ + +static int dsi_bridge_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct dsi_data *dsi = drm_bridge_to_dsi(bridge); + + if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) + return -EINVAL; + + return drm_bridge_attach(bridge->encoder, dsi->output.next_bridge, + bridge, flags); +} + +static enum drm_mode_status +dsi_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + struct dsi_data *dsi = drm_bridge_to_dsi(bridge); + struct dsi_clk_calc_ctx ctx; + int r; + + mutex_lock(&dsi->lock); + r = __dsi_calc_config(dsi, mode, &ctx); + mutex_unlock(&dsi->lock); + + return r ? MODE_CLOCK_RANGE : MODE_OK; +} + +static void dsi_bridge_mode_set(struct drm_bridge *bridge, + const struct drm_display_mode *mode, + const struct drm_display_mode *adjusted_mode) +{ + struct dsi_data *dsi = drm_bridge_to_dsi(bridge); + + dsi_set_config(&dsi->output, adjusted_mode); +} + +static void dsi_bridge_enable(struct drm_bridge *bridge) +{ + struct dsi_data *dsi = drm_bridge_to_dsi(bridge); + struct omap_dss_device *dssdev = &dsi->output; + + cancel_delayed_work_sync(&dsi->dsi_disable_work); + + dsi_bus_lock(dsi); + + if (!dsi->iface_enabled) + dsi_enable(dsi); + + dsi_enable_video_output(dssdev, VC_VIDEO); + + dsi->video_enabled = true; + + dsi_bus_unlock(dsi); +} + +static void dsi_bridge_disable(struct drm_bridge *bridge) +{ + struct dsi_data *dsi = drm_bridge_to_dsi(bridge); + struct omap_dss_device *dssdev = &dsi->output; + + cancel_delayed_work_sync(&dsi->dsi_disable_work); + + dsi_bus_lock(dsi); + + dsi->video_enabled = false; + + dsi_disable_video_output(dssdev, VC_VIDEO); + + dsi_disable(dsi); + + dsi_bus_unlock(dsi); +} + +static const struct drm_bridge_funcs dsi_bridge_funcs = { + .attach = dsi_bridge_attach, + .mode_valid = dsi_bridge_mode_valid, + .mode_set = dsi_bridge_mode_set, + .enable = dsi_bridge_enable, + .disable = dsi_bridge_disable, +}; + +static void dsi_bridge_init(struct dsi_data *dsi) +{ + dsi->bridge.funcs = &dsi_bridge_funcs; + dsi->bridge.of_node = dsi->host.dev->of_node; + dsi->bridge.type = DRM_MODE_CONNECTOR_DSI; + + drm_bridge_add(&dsi->bridge); +} + +static void dsi_bridge_cleanup(struct dsi_data *dsi) +{ + drm_bridge_remove(&dsi->bridge); +} + +/* ----------------------------------------------------------------------------- * Probe & Remove, Suspend & Resume */ @@ -5105,23 +4713,26 @@ static int dsi_init_output(struct dsi_data *dsi) struct omap_dss_device *out = &dsi->output; int r; + dsi_bridge_init(dsi); + out->dev = dsi->dev; out->id = dsi->module_id == 0 ? OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2; out->type = OMAP_DISPLAY_TYPE_DSI; out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1"; - out->dispc_channel = dsi_get_channel(dsi); - out->ops = &dsi_ops; - out->owner = THIS_MODULE; + out->dispc_channel = dsi_get_dispc_channel(dsi); + out->dsi_ops = &dsi_ops; out->of_port = 0; out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE | DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE; - r = omapdss_device_init_output(out, NULL); - if (r < 0) + r = omapdss_device_init_output(out, &dsi->bridge); + if (r < 0) { + dsi_bridge_cleanup(dsi); return r; + } omapdss_device_register(out); @@ -5134,6 +4745,7 @@ static void dsi_uninit_output(struct dsi_data *dsi) omapdss_device_unregister(out); omapdss_device_cleanup_output(out); + dsi_bridge_cleanup(dsi); } static int dsi_probe_of(struct dsi_data *dsi) @@ -5142,9 +4754,8 @@ static int dsi_probe_of(struct dsi_data *dsi) struct property *prop; u32 lane_arr[10]; int len, num_pins; - int r, i; + int r; struct device_node *ep; - struct omap_dsi_pin_config pin_cfg; ep = of_graph_get_endpoint_by_regs(node, 0, 0); if (!ep) @@ -5172,11 +4783,7 @@ static int dsi_probe_of(struct dsi_data *dsi) goto err; } - pin_cfg.num_pins = num_pins; - for (i = 0; i < num_pins; ++i) - pin_cfg.pins[i] = (int)lane_arr[i]; - - r = dsi_configure_pins(&dsi->output, &pin_cfg); + r = dsi_configure_pins(dsi, num_pins, lane_arr); if (r) { dev_err(dsi->dev, "failed to configure pins"); goto err; @@ -5256,6 +4863,18 @@ static const struct soc_device_attribute dsi_soc_devices[] = { { /* sentinel */ } }; +static void omap_dsi_disable_work_callback(struct work_struct *work) +{ + struct dsi_data *dsi = container_of(work, struct dsi_data, dsi_disable_work.work); + + dsi_bus_lock(dsi); + + if (dsi->iface_enabled && !dsi->video_enabled) + dsi_disable(dsi); + + dsi_bus_unlock(dsi); +} + static int dsi_probe(struct platform_device *pdev) { const struct soc_device_attribute *soc; @@ -5289,6 +4908,8 @@ static int dsi_probe(struct platform_device *pdev) INIT_DEFERRABLE_WORK(&dsi->framedone_timeout_work, dsi_framedone_timeout_work_callback); + INIT_DEFERRABLE_WORK(&dsi->dsi_disable_work, omap_dsi_disable_work_callback); + #ifdef DSI_CATCH_MISSING_TE timer_setup(&dsi->te_timer, dsi_te_timeout, 0); #endif @@ -5364,11 +4985,8 @@ static int dsi_probe(struct platform_device *pdev) } /* DSI VCs initialization */ - for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) { + for (i = 0; i < ARRAY_SIZE(dsi->vc); i++) dsi->vc[i].source = DSI_VC_SOURCE_L4; - dsi->vc[i].dssdev = NULL; - dsi->vc[i].vc_id = 0; - } r = dsi_get_clocks(dsi); if (r) @@ -5387,21 +5005,24 @@ static int dsi_probe(struct platform_device *pdev) dsi->num_lanes_supported = 3; } - r = of_platform_populate(dev->of_node, NULL, NULL, dev); + dsi->host.ops = &omap_dsi_host_ops; + dsi->host.dev = &pdev->dev; + + r = dsi_probe_of(dsi); if (r) { - DSSERR("Failed to populate DSI child devices: %d\n", r); + DSSERR("Invalid DSI DT data\n"); + goto err_pm_disable; + } + + r = mipi_dsi_host_register(&dsi->host); + if (r < 0) { + dev_err(&pdev->dev, "failed to register DSI host: %d\n", r); goto err_pm_disable; } r = dsi_init_output(dsi); if (r) - goto err_of_depopulate; - - r = dsi_probe_of(dsi); - if (r) { - DSSERR("Invalid DSI DT data\n"); - goto err_uninit_output; - } + goto err_dsi_host_unregister; r = component_add(&pdev->dev, &dsi_component_ops); if (r) @@ -5411,8 +5032,8 @@ static int dsi_probe(struct platform_device *pdev) err_uninit_output: dsi_uninit_output(dsi); -err_of_depopulate: - of_platform_depopulate(dev); +err_dsi_host_unregister: + mipi_dsi_host_unregister(&dsi->host); err_pm_disable: pm_runtime_disable(dev); return r; @@ -5426,7 +5047,7 @@ static int dsi_remove(struct platform_device *pdev) dsi_uninit_output(dsi); - of_platform_depopulate(&pdev->dev); + mipi_dsi_host_unregister(&dsi->host); pm_runtime_disable(&pdev->dev); diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.h b/drivers/gpu/drm/omapdrm/dss/dsi.h new file mode 100644 index 000000000000..601707c0ecc4 --- /dev/null +++ b/drivers/gpu/drm/omapdrm/dss/dsi.h @@ -0,0 +1,456 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2016 Texas Instruments Incorporated - http://www.ti.com/ + * Author: Tomi Valkeinen <[email protected]> + */ + +#ifndef __OMAP_DRM_DSS_DSI_H +#define __OMAP_DRM_DSS_DSI_H + +#include <drm/drm_mipi_dsi.h> + +struct dsi_reg { + u16 module; + u16 idx; +}; + +#define DSI_REG(mod, idx) ((const struct dsi_reg) { mod, idx }) + +/* DSI Protocol Engine */ + +#define DSI_PROTO 0 +#define DSI_PROTO_SZ 0x200 + +#define DSI_REVISION DSI_REG(DSI_PROTO, 0x0000) +#define DSI_SYSCONFIG DSI_REG(DSI_PROTO, 0x0010) +#define DSI_SYSSTATUS DSI_REG(DSI_PROTO, 0x0014) +#define DSI_IRQSTATUS DSI_REG(DSI_PROTO, 0x0018) +#define DSI_IRQENABLE DSI_REG(DSI_PROTO, 0x001C) +#define DSI_CTRL DSI_REG(DSI_PROTO, 0x0040) +#define DSI_GNQ DSI_REG(DSI_PROTO, 0x0044) +#define DSI_COMPLEXIO_CFG1 DSI_REG(DSI_PROTO, 0x0048) +#define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(DSI_PROTO, 0x004C) +#define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(DSI_PROTO, 0x0050) +#define DSI_CLK_CTRL DSI_REG(DSI_PROTO, 0x0054) +#define DSI_TIMING1 DSI_REG(DSI_PROTO, 0x0058) +#define DSI_TIMING2 DSI_REG(DSI_PROTO, 0x005C) +#define DSI_VM_TIMING1 DSI_REG(DSI_PROTO, 0x0060) +#define DSI_VM_TIMING2 DSI_REG(DSI_PROTO, 0x0064) +#define DSI_VM_TIMING3 DSI_REG(DSI_PROTO, 0x0068) +#define DSI_CLK_TIMING DSI_REG(DSI_PROTO, 0x006C) +#define DSI_TX_FIFO_VC_SIZE DSI_REG(DSI_PROTO, 0x0070) +#define DSI_RX_FIFO_VC_SIZE DSI_REG(DSI_PROTO, 0x0074) +#define DSI_COMPLEXIO_CFG2 DSI_REG(DSI_PROTO, 0x0078) +#define DSI_RX_FIFO_VC_FULLNESS DSI_REG(DSI_PROTO, 0x007C) +#define DSI_VM_TIMING4 DSI_REG(DSI_PROTO, 0x0080) +#define DSI_TX_FIFO_VC_EMPTINESS DSI_REG(DSI_PROTO, 0x0084) +#define DSI_VM_TIMING5 DSI_REG(DSI_PROTO, 0x0088) +#define DSI_VM_TIMING6 DSI_REG(DSI_PROTO, 0x008C) +#define DSI_VM_TIMING7 DSI_REG(DSI_PROTO, 0x0090) +#define DSI_STOPCLK_TIMING DSI_REG(DSI_PROTO, 0x0094) +#define DSI_VC_CTRL(n) DSI_REG(DSI_PROTO, 0x0100 + (n * 0x20)) +#define DSI_VC_TE(n) DSI_REG(DSI_PROTO, 0x0104 + (n * 0x20)) +#define DSI_VC_LONG_PACKET_HEADER(n) DSI_REG(DSI_PROTO, 0x0108 + (n * 0x20)) +#define DSI_VC_LONG_PACKET_PAYLOAD(n) DSI_REG(DSI_PROTO, 0x010C + (n * 0x20)) +#define DSI_VC_SHORT_PACKET_HEADER(n) DSI_REG(DSI_PROTO, 0x0110 + (n * 0x20)) +#define DSI_VC_IRQSTATUS(n) DSI_REG(DSI_PROTO, 0x0118 + (n * 0x20)) +#define DSI_VC_IRQENABLE(n) DSI_REG(DSI_PROTO, 0x011C + (n * 0x20)) + +/* DSIPHY_SCP */ + +#define DSI_PHY 1 +#define DSI_PHY_OFFSET 0x200 +#define DSI_PHY_SZ 0x40 + +#define DSI_DSIPHY_CFG0 DSI_REG(DSI_PHY, 0x0000) +#define DSI_DSIPHY_CFG1 DSI_REG(DSI_PHY, 0x0004) +#define DSI_DSIPHY_CFG2 DSI_REG(DSI_PHY, 0x0008) +#define DSI_DSIPHY_CFG5 DSI_REG(DSI_PHY, 0x0014) +#define DSI_DSIPHY_CFG10 DSI_REG(DSI_PHY, 0x0028) + +/* DSI_PLL_CTRL_SCP */ + +#define DSI_PLL 2 +#define DSI_PLL_OFFSET 0x300 +#define DSI_PLL_SZ 0x20 + +#define DSI_PLL_CONTROL DSI_REG(DSI_PLL, 0x0000) +#define DSI_PLL_STATUS DSI_REG(DSI_PLL, 0x0004) +#define DSI_PLL_GO DSI_REG(DSI_PLL, 0x0008) +#define DSI_PLL_CONFIGURATION1 DSI_REG(DSI_PLL, 0x000C) +#define DSI_PLL_CONFIGURATION2 DSI_REG(DSI_PLL, 0x0010) + +/* Global interrupts */ +#define DSI_IRQ_VC0 (1 << 0) +#define DSI_IRQ_VC1 (1 << 1) +#define DSI_IRQ_VC2 (1 << 2) +#define DSI_IRQ_VC3 (1 << 3) +#define DSI_IRQ_WAKEUP (1 << 4) +#define DSI_IRQ_RESYNC (1 << 5) +#define DSI_IRQ_PLL_LOCK (1 << 7) +#define DSI_IRQ_PLL_UNLOCK (1 << 8) +#define DSI_IRQ_PLL_RECALL (1 << 9) +#define DSI_IRQ_COMPLEXIO_ERR (1 << 10) +#define DSI_IRQ_HS_TX_TIMEOUT (1 << 14) +#define DSI_IRQ_LP_RX_TIMEOUT (1 << 15) +#define DSI_IRQ_TE_TRIGGER (1 << 16) +#define DSI_IRQ_ACK_TRIGGER (1 << 17) +#define DSI_IRQ_SYNC_LOST (1 << 18) +#define DSI_IRQ_LDO_POWER_GOOD (1 << 19) +#define DSI_IRQ_TA_TIMEOUT (1 << 20) +#define DSI_IRQ_ERROR_MASK \ + (DSI_IRQ_HS_TX_TIMEOUT | DSI_IRQ_LP_RX_TIMEOUT | DSI_IRQ_SYNC_LOST | \ + DSI_IRQ_TA_TIMEOUT) +#define DSI_IRQ_CHANNEL_MASK 0xf + +/* Virtual channel interrupts */ +#define DSI_VC_IRQ_CS (1 << 0) +#define DSI_VC_IRQ_ECC_CORR (1 << 1) +#define DSI_VC_IRQ_PACKET_SENT (1 << 2) +#define DSI_VC_IRQ_FIFO_TX_OVF (1 << 3) +#define DSI_VC_IRQ_FIFO_RX_OVF (1 << 4) +#define DSI_VC_IRQ_BTA (1 << 5) +#define DSI_VC_IRQ_ECC_NO_CORR (1 << 6) +#define DSI_VC_IRQ_FIFO_TX_UDF (1 << 7) +#define DSI_VC_IRQ_PP_BUSY_CHANGE (1 << 8) +#define DSI_VC_IRQ_ERROR_MASK \ + (DSI_VC_IRQ_CS | DSI_VC_IRQ_ECC_CORR | DSI_VC_IRQ_FIFO_TX_OVF | \ + DSI_VC_IRQ_FIFO_RX_OVF | DSI_VC_IRQ_ECC_NO_CORR | \ + DSI_VC_IRQ_FIFO_TX_UDF) + +/* ComplexIO interrupts */ +#define DSI_CIO_IRQ_ERRSYNCESC1 (1 << 0) +#define DSI_CIO_IRQ_ERRSYNCESC2 (1 << 1) +#define DSI_CIO_IRQ_ERRSYNCESC3 (1 << 2) +#define DSI_CIO_IRQ_ERRSYNCESC4 (1 << 3) +#define DSI_CIO_IRQ_ERRSYNCESC5 (1 << 4) +#define DSI_CIO_IRQ_ERRESC1 (1 << 5) +#define DSI_CIO_IRQ_ERRESC2 (1 << 6) +#define DSI_CIO_IRQ_ERRESC3 (1 << 7) +#define DSI_CIO_IRQ_ERRESC4 (1 << 8) +#define DSI_CIO_IRQ_ERRESC5 (1 << 9) +#define DSI_CIO_IRQ_ERRCONTROL1 (1 << 10) +#define DSI_CIO_IRQ_ERRCONTROL2 (1 << 11) +#define DSI_CIO_IRQ_ERRCONTROL3 (1 << 12) +#define DSI_CIO_IRQ_ERRCONTROL4 (1 << 13) +#define DSI_CIO_IRQ_ERRCONTROL5 (1 << 14) +#define DSI_CIO_IRQ_STATEULPS1 (1 << 15) +#define DSI_CIO_IRQ_STATEULPS2 (1 << 16) +#define DSI_CIO_IRQ_STATEULPS3 (1 << 17) +#define DSI_CIO_IRQ_STATEULPS4 (1 << 18) +#define DSI_CIO_IRQ_STATEULPS5 (1 << 19) +#define DSI_CIO_IRQ_ERRCONTENTIONLP0_1 (1 << 20) +#define DSI_CIO_IRQ_ERRCONTENTIONLP1_1 (1 << 21) +#define DSI_CIO_IRQ_ERRCONTENTIONLP0_2 (1 << 22) +#define DSI_CIO_IRQ_ERRCONTENTIONLP1_2 (1 << 23) +#define DSI_CIO_IRQ_ERRCONTENTIONLP0_3 (1 << 24) +#define DSI_CIO_IRQ_ERRCONTENTIONLP1_3 (1 << 25) +#define DSI_CIO_IRQ_ERRCONTENTIONLP0_4 (1 << 26) +#define DSI_CIO_IRQ_ERRCONTENTIONLP1_4 (1 << 27) +#define DSI_CIO_IRQ_ERRCONTENTIONLP0_5 (1 << 28) +#define DSI_CIO_IRQ_ERRCONTENTIONLP1_5 (1 << 29) +#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 (1 << 30) +#define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1 (1 << 31) +#define DSI_CIO_IRQ_ERROR_MASK \ + (DSI_CIO_IRQ_ERRSYNCESC1 | DSI_CIO_IRQ_ERRSYNCESC2 | \ + DSI_CIO_IRQ_ERRSYNCESC3 | DSI_CIO_IRQ_ERRSYNCESC4 | \ + DSI_CIO_IRQ_ERRSYNCESC5 | \ + DSI_CIO_IRQ_ERRESC1 | DSI_CIO_IRQ_ERRESC2 | \ + DSI_CIO_IRQ_ERRESC3 | DSI_CIO_IRQ_ERRESC4 | \ + DSI_CIO_IRQ_ERRESC5 | \ + DSI_CIO_IRQ_ERRCONTROL1 | DSI_CIO_IRQ_ERRCONTROL2 | \ + DSI_CIO_IRQ_ERRCONTROL3 | DSI_CIO_IRQ_ERRCONTROL4 | \ + DSI_CIO_IRQ_ERRCONTROL5 | \ + DSI_CIO_IRQ_ERRCONTENTIONLP0_1 | DSI_CIO_IRQ_ERRCONTENTIONLP1_1 | \ + DSI_CIO_IRQ_ERRCONTENTIONLP0_2 | DSI_CIO_IRQ_ERRCONTENTIONLP1_2 | \ + DSI_CIO_IRQ_ERRCONTENTIONLP0_3 | DSI_CIO_IRQ_ERRCONTENTIONLP1_3 | \ + DSI_CIO_IRQ_ERRCONTENTIONLP0_4 | DSI_CIO_IRQ_ERRCONTENTIONLP1_4 | \ + DSI_CIO_IRQ_ERRCONTENTIONLP0_5 | DSI_CIO_IRQ_ERRCONTENTIONLP1_5) + +enum omap_dss_dsi_mode { + OMAP_DSS_DSI_CMD_MODE = 0, + OMAP_DSS_DSI_VIDEO_MODE, +}; + +enum omap_dss_dsi_trans_mode { + /* Sync Pulses: both sync start and end packets sent */ + OMAP_DSS_DSI_PULSE_MODE, + /* Sync Events: only sync start packets sent */ + OMAP_DSS_DSI_EVENT_MODE, + /* Burst: only sync start packets sent, pixels are time compressed */ + OMAP_DSS_DSI_BURST_MODE, +}; + +struct omap_dss_dsi_videomode_timings { + unsigned long hsclk; + + unsigned int ndl; + unsigned int bitspp; + + /* pixels */ + u16 hact; + /* lines */ + u16 vact; + + /* DSI video mode blanking data */ + /* Unit: byte clock cycles */ + u16 hss; + u16 hsa; + u16 hse; + u16 hfp; + u16 hbp; + /* Unit: line clocks */ + u16 vsa; + u16 vfp; + u16 vbp; + + /* DSI blanking modes */ + int blanking_mode; + int hsa_blanking_mode; + int hbp_blanking_mode; + int hfp_blanking_mode; + + enum omap_dss_dsi_trans_mode trans_mode; + + int window_sync; +}; + +struct omap_dss_dsi_config { + enum omap_dss_dsi_mode mode; + enum mipi_dsi_pixel_format pixel_format; + const struct videomode *vm; + + unsigned long hs_clk_min, hs_clk_max; + unsigned long lp_clk_min, lp_clk_max; + + enum omap_dss_dsi_trans_mode trans_mode; +}; + +/* DSI PLL HSDIV indices */ +#define HSDIV_DISPC 0 +#define HSDIV_DSI 1 + +#define DSI_MAX_NR_ISRS 2 +#define DSI_MAX_NR_LANES 5 + +enum dsi_model { + DSI_MODEL_OMAP3, + DSI_MODEL_OMAP4, + DSI_MODEL_OMAP5, +}; + +enum dsi_lane_function { + DSI_LANE_UNUSED = 0, + DSI_LANE_CLK, + DSI_LANE_DATA1, + DSI_LANE_DATA2, + DSI_LANE_DATA3, + DSI_LANE_DATA4, +}; + +struct dsi_lane_config { + enum dsi_lane_function function; + u8 polarity; +}; + +typedef void (*omap_dsi_isr_t) (void *arg, u32 mask); + +struct dsi_isr_data { + omap_dsi_isr_t isr; + void *arg; + u32 mask; +}; + +enum fifo_size { + DSI_FIFO_SIZE_0 = 0, + DSI_FIFO_SIZE_32 = 1, + DSI_FIFO_SIZE_64 = 2, + DSI_FIFO_SIZE_96 = 3, + DSI_FIFO_SIZE_128 = 4, +}; + +enum dsi_vc_source { + DSI_VC_SOURCE_L4 = 0, + DSI_VC_SOURCE_VP, +}; + +struct dsi_irq_stats { + unsigned long last_reset; + unsigned int irq_count; + unsigned int dsi_irqs[32]; + unsigned int vc_irqs[4][32]; + unsigned int cio_irqs[32]; +}; + +struct dsi_isr_tables { + struct dsi_isr_data isr_table[DSI_MAX_NR_ISRS]; + struct dsi_isr_data isr_table_vc[4][DSI_MAX_NR_ISRS]; + struct dsi_isr_data isr_table_cio[DSI_MAX_NR_ISRS]; +}; + +struct dsi_lp_clock_info { + unsigned long lp_clk; + u16 lp_clk_div; +}; + +struct dsi_clk_calc_ctx { + struct dsi_data *dsi; + struct dss_pll *pll; + + /* inputs */ + + const struct omap_dss_dsi_config *config; + + unsigned long req_pck_min, req_pck_nom, req_pck_max; + + /* outputs */ + + struct dss_pll_clock_info dsi_cinfo; + struct dispc_clock_info dispc_cinfo; + struct dsi_lp_clock_info lp_cinfo; + + struct videomode vm; + struct omap_dss_dsi_videomode_timings dsi_vm; +}; + +struct dsi_module_id_data { + u32 address; + int id; +}; + +enum dsi_quirks { + DSI_QUIRK_PLL_PWR_BUG = (1 << 0), /* DSI-PLL power command 0x3 is not working */ + DSI_QUIRK_DCS_CMD_CONFIG_VC = (1 << 1), + DSI_QUIRK_VC_OCP_WIDTH = (1 << 2), + DSI_QUIRK_REVERSE_TXCLKESC = (1 << 3), + DSI_QUIRK_GNQ = (1 << 4), + DSI_QUIRK_PHY_DCC = (1 << 5), +}; + +struct dsi_of_data { + enum dsi_model model; + const struct dss_pll_hw *pll_hw; + const struct dsi_module_id_data *modules; + unsigned int max_fck_freq; + unsigned int max_pll_lpdiv; + enum dsi_quirks quirks; +}; + +struct dsi_data { + struct device *dev; + void __iomem *proto_base; + void __iomem *phy_base; + void __iomem *pll_base; + + const struct dsi_of_data *data; + int module_id; + + int irq; + + bool is_enabled; + + struct clk *dss_clk; + struct regmap *syscon; + struct dss_device *dss; + + struct mipi_dsi_host host; + + struct dispc_clock_info user_dispc_cinfo; + struct dss_pll_clock_info user_dsi_cinfo; + + struct dsi_lp_clock_info user_lp_cinfo; + struct dsi_lp_clock_info current_lp_cinfo; + + struct dss_pll pll; + + bool vdds_dsi_enabled; + struct regulator *vdds_dsi_reg; + + struct mipi_dsi_device *dsidev; + + struct { + enum dsi_vc_source source; + enum fifo_size tx_fifo_size; + enum fifo_size rx_fifo_size; + } vc[4]; + + struct mutex lock; + struct semaphore bus_lock; + + spinlock_t irq_lock; + struct dsi_isr_tables isr_tables; + /* space for a copy used by the interrupt handler */ + struct dsi_isr_tables isr_tables_copy; + + int update_vc; +#ifdef DSI_PERF_MEASURE + unsigned int update_bytes; +#endif + + /* external TE GPIO */ + struct gpio_desc *te_gpio; + int te_irq; + struct delayed_work te_timeout_work; + atomic_t do_ext_te_update; + + bool te_enabled; + bool iface_enabled; + bool video_enabled; + + struct delayed_work framedone_timeout_work; + +#ifdef DSI_CATCH_MISSING_TE + struct timer_list te_timer; +#endif + + unsigned long cache_req_pck; + unsigned long cache_clk_freq; + struct dss_pll_clock_info cache_cinfo; + + u32 errors; + spinlock_t errors_lock; +#ifdef DSI_PERF_MEASURE + ktime_t perf_setup_time; + ktime_t perf_start_time; +#endif + int debug_read; + int debug_write; + struct { + struct dss_debugfs_entry *irqs; + struct dss_debugfs_entry *regs; + struct dss_debugfs_entry *clks; + } debugfs; + +#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS + spinlock_t irq_stats_lock; + struct dsi_irq_stats irq_stats; +#endif + + unsigned int num_lanes_supported; + unsigned int line_buffer_size; + + struct dsi_lane_config lanes[DSI_MAX_NR_LANES]; + unsigned int num_lanes_used; + + unsigned int scp_clk_refcount; + + struct omap_dss_dsi_config config; + + struct dss_lcd_mgr_config mgr_config; + struct videomode vm; + enum mipi_dsi_pixel_format pix_fmt; + enum omap_dss_dsi_mode mode; + struct omap_dss_dsi_videomode_timings vm_timings; + + struct omap_dss_device output; + struct drm_bridge bridge; + + struct delayed_work dsi_disable_work; +}; + +struct dsi_packet_sent_handler_data { + struct dsi_data *dsi; + struct completion *completion; +}; + +#endif /* __OMAP_DRM_DSS_DSI_H */ diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index d7b2f5bcac16..d6a5862b4dbf 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -1308,6 +1308,7 @@ static int dss_bind(struct device *dev) { struct dss_device *dss = dev_get_drvdata(dev); struct platform_device *drm_pdev; + struct dss_pdata pdata; int r; r = component_bind_all(dev, NULL); @@ -1316,9 +1317,9 @@ static int dss_bind(struct device *dev) pm_set_vt_switch(0); - omapdss_set_dss(dss); - - drm_pdev = platform_device_register_simple("omapdrm", 0, NULL, 0); + pdata.dss = dss; + drm_pdev = platform_device_register_data(NULL, "omapdrm", 0, + &pdata, sizeof(pdata)); if (IS_ERR(drm_pdev)) { component_unbind_all(dev, NULL); return PTR_ERR(drm_pdev); @@ -1335,8 +1336,6 @@ static void dss_unbind(struct device *dev) platform_device_unregister(dss->drm_pdev); - omapdss_set_dss(NULL); - component_unbind_all(dev, NULL); } @@ -1569,15 +1568,7 @@ static int dss_remove(struct platform_device *pdev) static void dss_shutdown(struct platform_device *pdev) { - struct omap_dss_device *dssdev = NULL; - DSSDBG("shutdown\n"); - - for_each_dss_output(dssdev) { - if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE && - dssdev->ops && dssdev->ops->disable) - dssdev->ops->disable(dssdev); - } } static int dss_runtime_suspend(struct device *dev) @@ -1650,21 +1641,14 @@ static struct platform_driver * const omap_dss_drivers[] = { #endif }; -static int __init omap_dss_init(void) +int __init omap_dss_init(void) { return platform_register_drivers(omap_dss_drivers, ARRAY_SIZE(omap_dss_drivers)); } -static void __exit omap_dss_exit(void) +void omap_dss_exit(void) { platform_unregister_drivers(omap_dss_drivers, ARRAY_SIZE(omap_dss_drivers)); } - -module_init(omap_dss_init); -module_exit(omap_dss_exit); - -MODULE_AUTHOR("Tomi Valkeinen <[email protected]>"); -MODULE_DESCRIPTION("OMAP2/3/4/5 Display Subsystem"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h index 2b404bcb41dd..a547527bb2f3 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.h +++ b/drivers/gpu/drm/omapdrm/dss/dss.h @@ -257,8 +257,6 @@ struct dss_device { struct dss_pll *video2_pll; struct dispc_device *dispc; - const struct dispc_ops *dispc_ops; - const struct dss_mgr_ops *mgr_ops; struct omap_drm_private *mgr_ops_priv; }; @@ -393,6 +391,76 @@ void dispc_dump_clocks(struct dispc_device *dispc, struct seq_file *s); int dispc_runtime_get(struct dispc_device *dispc); void dispc_runtime_put(struct dispc_device *dispc); +int dispc_get_num_ovls(struct dispc_device *dispc); +int dispc_get_num_mgrs(struct dispc_device *dispc); + +const u32 *dispc_ovl_get_color_modes(struct dispc_device *dispc, + enum omap_plane_id plane); + +u32 dispc_read_irqstatus(struct dispc_device *dispc); +void dispc_clear_irqstatus(struct dispc_device *dispc, u32 mask); +void dispc_write_irqenable(struct dispc_device *dispc, u32 mask); + +int dispc_request_irq(struct dispc_device *dispc, irq_handler_t handler, + void *dev_id); +void dispc_free_irq(struct dispc_device *dispc, void *dev_id); + +u32 dispc_mgr_get_vsync_irq(struct dispc_device *dispc, + enum omap_channel channel); +u32 dispc_mgr_get_framedone_irq(struct dispc_device *dispc, + enum omap_channel channel); +u32 dispc_mgr_get_sync_lost_irq(struct dispc_device *dispc, + enum omap_channel channel); +u32 dispc_wb_get_framedone_irq(struct dispc_device *dispc); + +u32 dispc_get_memory_bandwidth_limit(struct dispc_device *dispc); + +void dispc_mgr_enable(struct dispc_device *dispc, + enum omap_channel channel, bool enable); + +bool dispc_mgr_go_busy(struct dispc_device *dispc, + enum omap_channel channel); + +void dispc_mgr_go(struct dispc_device *dispc, enum omap_channel channel); + +void dispc_mgr_set_lcd_config(struct dispc_device *dispc, + enum omap_channel channel, + const struct dss_lcd_mgr_config *config); +void dispc_mgr_set_timings(struct dispc_device *dispc, + enum omap_channel channel, + const struct videomode *vm); +void dispc_mgr_setup(struct dispc_device *dispc, + enum omap_channel channel, + const struct omap_overlay_manager_info *info); + +int dispc_mgr_check_timings(struct dispc_device *dispc, + enum omap_channel channel, + const struct videomode *vm); + +u32 dispc_mgr_gamma_size(struct dispc_device *dispc, + enum omap_channel channel); +void dispc_mgr_set_gamma(struct dispc_device *dispc, + enum omap_channel channel, + const struct drm_color_lut *lut, + unsigned int length); + +int dispc_ovl_setup(struct dispc_device *dispc, + enum omap_plane_id plane, + const struct omap_overlay_info *oi, + const struct videomode *vm, bool mem_to_mem, + enum omap_channel channel); + +int dispc_ovl_enable(struct dispc_device *dispc, + enum omap_plane_id plane, bool enable); + +bool dispc_has_writeback(struct dispc_device *dispc); +int dispc_wb_setup(struct dispc_device *dispc, + const struct omap_dss_writeback_info *wi, + bool mem_to_mem, const struct videomode *vm, + enum dss_writeback_channel channel_in); +bool dispc_wb_go_busy(struct dispc_device *dispc); +void dispc_wb_go(struct dispc_device *dispc); + void dispc_enable_sidle(struct dispc_device *dispc); void dispc_disable_sidle(struct dispc_device *dispc); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c index 8de41e74e8f8..35b750cebaeb 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c @@ -707,7 +707,6 @@ static int hdmi4_init_output(struct omap_hdmi *hdmi) out->type = OMAP_DISPLAY_TYPE_HDMI; out->name = "hdmi.0"; out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT; - out->owner = THIS_MODULE; out->of_port = 0; r = omapdss_device_init_output(out, &hdmi->bridge); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index 54e5cb5aa52d..65085d886da5 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -681,7 +681,6 @@ static int hdmi5_init_output(struct omap_hdmi *hdmi) out->type = OMAP_DISPLAY_TYPE_HDMI; out->name = "hdmi.0"; out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT; - out->owner = THIS_MODULE; out->of_port = 0; r = omapdss_device_init_output(out, &hdmi->bridge); diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c deleted file mode 100644 index f21b5df31213..000000000000 --- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c +++ /dev/null @@ -1,229 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2014 Texas Instruments Incorporated - https://www.ti.com/ - * Author: Tomi Valkeinen <[email protected]> - */ - -/* - * As omapdss panel drivers are omapdss specific, but we want to define the - * DT-data in generic manner, we convert the compatible strings of the panel and - * encoder nodes from "panel-foo" to "omapdss,panel-foo". This way we can have - * both correct DT data and omapdss specific drivers. - * - * When we get generic panel drivers to the kernel, this file will be removed. - */ - -#include <linux/kernel.h> -#include <linux/of.h> -#include <linux/of_graph.h> -#include <linux/slab.h> -#include <linux/list.h> - -static struct list_head dss_conv_list __initdata; - -static const char prefix[] __initconst = "omapdss,"; - -struct dss_conv_node { - struct list_head list; - struct device_node *node; - bool root; -}; - -static int __init omapdss_count_strings(const struct property *prop) -{ - const char *p = prop->value; - int l = 0, total = 0; - int i; - - for (i = 0; total < prop->length; total += l, p += l, i++) - l = strlen(p) + 1; - - return i; -} - -static void __init omapdss_update_prop(struct device_node *node, char *compat, - int len) -{ - struct property *prop; - - prop = kzalloc(sizeof(*prop), GFP_KERNEL); - if (!prop) - return; - - prop->name = "compatible"; - prop->value = compat; - prop->length = len; - - of_update_property(node, prop); -} - -static void __init omapdss_prefix_strcpy(char *dst, int dst_len, - const char *src, int src_len) -{ - size_t total = 0; - - while (total < src_len) { - size_t l = strlen(src) + 1; - - strcpy(dst, prefix); - dst += strlen(prefix); - - strcpy(dst, src); - dst += l; - - src += l; - total += l; - } -} - -/* prepend compatible property strings with "omapdss," */ -static void __init omapdss_omapify_node(struct device_node *node) -{ - struct property *prop; - char *new_compat; - int num_strs; - int new_len; - - prop = of_find_property(node, "compatible", NULL); - - if (!prop || !prop->value) - return; - - if (strnlen(prop->value, prop->length) >= prop->length) - return; - - /* is it already prefixed? */ - if (strncmp(prefix, prop->value, strlen(prefix)) == 0) - return; - - num_strs = omapdss_count_strings(prop); - - new_len = prop->length + strlen(prefix) * num_strs; - new_compat = kmalloc(new_len, GFP_KERNEL); - - omapdss_prefix_strcpy(new_compat, new_len, prop->value, prop->length); - - omapdss_update_prop(node, new_compat, new_len); -} - -static void __init omapdss_add_to_list(struct device_node *node, bool root) -{ - struct dss_conv_node *n = kmalloc(sizeof(*n), GFP_KERNEL); - if (n) { - n->node = node; - n->root = root; - list_add(&n->list, &dss_conv_list); - } -} - -static bool __init omapdss_list_contains(const struct device_node *node) -{ - struct dss_conv_node *n; - - list_for_each_entry(n, &dss_conv_list, list) { - if (n->node == node) - return true; - } - - return false; -} - -static void __init omapdss_walk_device(struct device_node *node, bool root) -{ - struct device_node *n; - - omapdss_add_to_list(node, root); - - /* - * of_graph_get_remote_port_parent() prints an error if there is no - * port/ports node. To avoid that, check first that there's the node. - */ - n = of_get_child_by_name(node, "ports"); - if (!n) - n = of_get_child_by_name(node, "port"); - if (!n) - return; - - of_node_put(n); - - n = NULL; - while ((n = of_graph_get_next_endpoint(node, n)) != NULL) { - struct device_node *pn; - - pn = of_graph_get_remote_port_parent(n); - - if (!pn) - continue; - - if (!of_device_is_available(pn) || omapdss_list_contains(pn)) { - of_node_put(pn); - continue; - } - - omapdss_walk_device(pn, false); - } -} - -static const struct of_device_id omapdss_of_match[] __initconst = { - { .compatible = "ti,omap2-dss", }, - { .compatible = "ti,omap3-dss", }, - { .compatible = "ti,omap4-dss", }, - { .compatible = "ti,omap5-dss", }, - { .compatible = "ti,dra7-dss", }, - {}, -}; - -static const struct of_device_id omapdss_of_fixups_whitelist[] __initconst = { - { .compatible = "panel-dsi-cm" }, - {}, -}; - -static void __init omapdss_find_children(struct device_node *np) -{ - struct device_node *child; - - for_each_available_child_of_node(np, child) { - if (!of_find_property(child, "compatible", NULL)) - continue; - - omapdss_walk_device(child, true); - - if (of_device_is_compatible(child, "ti,sysc")) - omapdss_find_children(child); - } -} - -static int __init omapdss_boot_init(void) -{ - struct device_node *dss; - - INIT_LIST_HEAD(&dss_conv_list); - - dss = of_find_matching_node(NULL, omapdss_of_match); - - if (dss == NULL || !of_device_is_available(dss)) - goto put_node; - - omapdss_walk_device(dss, true); - omapdss_find_children(dss); - - while (!list_empty(&dss_conv_list)) { - struct dss_conv_node *n; - - n = list_first_entry(&dss_conv_list, struct dss_conv_node, - list); - - if (of_match_node(omapdss_of_fixups_whitelist, n->node)) - omapdss_omapify_node(n->node); - - list_del(&n->list); - of_node_put(n->node); - kfree(n); - } - -put_node: - of_node_put(dss); - return 0; -} - -subsys_initcall(omapdss_boot_init); diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h index a48a9a254e33..a40abeafd2e9 100644 --- a/drivers/gpu/drm/omapdrm/dss/omapdss.h +++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h @@ -7,13 +7,14 @@ #ifndef __OMAP_DRM_DSS_H #define __OMAP_DRM_DSS_H -#include <linux/list.h> +#include <drm/drm_color_mgmt.h> +#include <drm/drm_crtc.h> +#include <drm/drm_mode.h> #include <linux/device.h> #include <linux/interrupt.h> -#include <video/videomode.h> +#include <linux/list.h> #include <linux/platform_data/omapdss.h> -#include <uapi/drm/drm_mode.h> -#include <drm/drm_crtc.h> +#include <video/videomode.h> #define DISPC_IRQ_FRAMEDONE (1 << 0) #define DISPC_IRQ_VSYNC (1 << 1) @@ -116,28 +117,6 @@ enum omap_dss_venc_type { OMAP_DSS_VENC_TYPE_SVIDEO, }; -enum omap_dss_dsi_pixel_format { - OMAP_DSS_DSI_FMT_RGB888, - OMAP_DSS_DSI_FMT_RGB666, - OMAP_DSS_DSI_FMT_RGB666_PACKED, - OMAP_DSS_DSI_FMT_RGB565, -}; - -enum omap_dss_dsi_mode { - OMAP_DSS_DSI_CMD_MODE = 0, - OMAP_DSS_DSI_VIDEO_MODE, -}; - -enum omap_display_caps { - OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE = 1 << 0, - OMAP_DSS_DISPLAY_CAP_TEAR_ELIM = 1 << 1, -}; - -enum omap_dss_display_state { - OMAP_DSS_DISPLAY_DISABLED = 0, - OMAP_DSS_DISPLAY_ACTIVE, -}; - enum omap_dss_rotation_type { OMAP_DSS_ROT_NONE = 0, OMAP_DSS_ROT_TILER = 1 << 0, @@ -162,64 +141,6 @@ enum omap_dss_output_id { OMAP_DSS_OUTPUT_HDMI = 1 << 6, }; -/* DSI */ - -enum omap_dss_dsi_trans_mode { - /* Sync Pulses: both sync start and end packets sent */ - OMAP_DSS_DSI_PULSE_MODE, - /* Sync Events: only sync start packets sent */ - OMAP_DSS_DSI_EVENT_MODE, - /* Burst: only sync start packets sent, pixels are time compressed */ - OMAP_DSS_DSI_BURST_MODE, -}; - -struct omap_dss_dsi_videomode_timings { - unsigned long hsclk; - - unsigned int ndl; - unsigned int bitspp; - - /* pixels */ - u16 hact; - /* lines */ - u16 vact; - - /* DSI video mode blanking data */ - /* Unit: byte clock cycles */ - u16 hss; - u16 hsa; - u16 hse; - u16 hfp; - u16 hbp; - /* Unit: line clocks */ - u16 vsa; - u16 vfp; - u16 vbp; - - /* DSI blanking modes */ - int blanking_mode; - int hsa_blanking_mode; - int hbp_blanking_mode; - int hfp_blanking_mode; - - enum omap_dss_dsi_trans_mode trans_mode; - - bool ddr_clk_always_on; - int window_sync; -}; - -struct omap_dss_dsi_config { - enum omap_dss_dsi_mode mode; - enum omap_dss_dsi_pixel_format pixel_format; - const struct videomode *vm; - - unsigned long hs_clk_min, hs_clk_max; - unsigned long lp_clk_min, lp_clk_max; - - bool ddr_clk_always_on; - enum omap_dss_dsi_trans_mode trans_mode; -}; - struct omap_dss_cpr_coefs { s16 rr, rg, rb; s16 gr, gg, gb; @@ -243,6 +164,9 @@ struct omap_overlay_info { u8 global_alpha; u8 pre_mult_alpha; u8 zorder; + + enum drm_color_encoding color_encoding; + enum drm_color_range color_range; }; struct omap_overlay_manager_info { @@ -258,21 +182,6 @@ struct omap_overlay_manager_info { struct omap_dss_cpr_coefs cpr_coefs; }; -/* 22 pins means 1 clk lane and 10 data lanes */ -#define OMAP_DSS_MAX_DSI_PINS 22 - -struct omap_dsi_pin_config { - int num_pins; - /* - * pin numbers in the following order: - * clk+, clk- - * data1+, data1- - * data2+, data2- - * ... - */ - int pins[OMAP_DSS_MAX_DSI_PINS]; -}; - struct omap_dss_writeback_info { u32 paddr; u32 p_uv_addr; @@ -286,89 +195,14 @@ struct omap_dss_writeback_info { }; struct omapdss_dsi_ops { - void (*disable)(struct omap_dss_device *dssdev, bool disconnect_lanes, - bool enter_ulps); - - /* bus configuration */ - int (*set_config)(struct omap_dss_device *dssdev, - const struct omap_dss_dsi_config *cfg); - int (*configure_pins)(struct omap_dss_device *dssdev, - const struct omap_dsi_pin_config *pin_cfg); - - void (*enable_hs)(struct omap_dss_device *dssdev, int channel, - bool enable); - int (*enable_te)(struct omap_dss_device *dssdev, bool enable); - - int (*update)(struct omap_dss_device *dssdev, int channel, - void (*callback)(int, void *), void *data); - - void (*bus_lock)(struct omap_dss_device *dssdev); - void (*bus_unlock)(struct omap_dss_device *dssdev); - - int (*enable_video_output)(struct omap_dss_device *dssdev, int channel); - void (*disable_video_output)(struct omap_dss_device *dssdev, - int channel); - - int (*request_vc)(struct omap_dss_device *dssdev, int *channel); - int (*set_vc_id)(struct omap_dss_device *dssdev, int channel, - int vc_id); - void (*release_vc)(struct omap_dss_device *dssdev, int channel); - - /* data transfer */ - int (*dcs_write)(struct omap_dss_device *dssdev, int channel, - u8 *data, int len); - int (*dcs_write_nosync)(struct omap_dss_device *dssdev, int channel, - u8 *data, int len); - int (*dcs_read)(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd, - u8 *data, int len); - - int (*gen_write)(struct omap_dss_device *dssdev, int channel, - u8 *data, int len); - int (*gen_write_nosync)(struct omap_dss_device *dssdev, int channel, - u8 *data, int len); - int (*gen_read)(struct omap_dss_device *dssdev, int channel, - u8 *reqdata, int reqlen, - u8 *data, int len); - - int (*bta_sync)(struct omap_dss_device *dssdev, int channel); - - int (*set_max_rx_packet_size)(struct omap_dss_device *dssdev, - int channel, u16 plen); -}; - -struct omap_dss_device_ops { - int (*connect)(struct omap_dss_device *dssdev, - struct omap_dss_device *dst); - void (*disconnect)(struct omap_dss_device *dssdev, - struct omap_dss_device *dst); - - void (*enable)(struct omap_dss_device *dssdev); - void (*disable)(struct omap_dss_device *dssdev); - - int (*check_timings)(struct omap_dss_device *dssdev, - struct drm_display_mode *mode); - - int (*get_modes)(struct omap_dss_device *dssdev, - struct drm_connector *connector); - - const struct omapdss_dsi_ops dsi; -}; - -/** - * enum omap_dss_device_ops_flag - Indicates which device ops are supported - * @OMAP_DSS_DEVICE_OP_MODES: The device supports reading modes - */ -enum omap_dss_device_ops_flag { - OMAP_DSS_DEVICE_OP_MODES = BIT(3), + int (*update)(struct omap_dss_device *dssdev); + bool (*is_video_mode)(struct omap_dss_device *dssdev); }; struct omap_dss_device { struct device *dev; - struct module *owner; - struct dss_device *dss; - struct omap_dss_device *next; struct drm_bridge *bridge; struct drm_bridge *next_bridge; struct drm_panel *panel; @@ -382,23 +216,11 @@ struct omap_dss_device { */ enum omap_display_type type; - /* - * True if the device is a display (panel or connector) at the end of - * the pipeline, false otherwise. - */ - bool display; - const char *name; - const struct omap_dss_driver *driver; - const struct omap_dss_device_ops *ops; - unsigned long ops_flags; + const struct omapdss_dsi_ops *dsi_ops; u32 bus_flags; - enum omap_display_caps caps; - - enum omap_dss_display_state state; - /* OMAP DSS output specific fields */ /* DISPC channel for this output */ @@ -411,30 +233,10 @@ struct omap_dss_device { unsigned int of_port; }; -struct omap_dss_driver { - int (*update)(struct omap_dss_device *dssdev, - u16 x, u16 y, u16 w, u16 h); - int (*sync)(struct omap_dss_device *dssdev); - - int (*enable_te)(struct omap_dss_device *dssdev, bool enable); - int (*get_te)(struct omap_dss_device *dssdev); - - int (*memory_read)(struct omap_dss_device *dssdev, - void *buf, size_t size, - u16 x, u16 y, u16 w, u16 h); +struct dss_pdata { + struct dss_device *dss; }; -struct dss_device *omapdss_get_dss(void); -void omapdss_set_dss(struct dss_device *dss); -static inline bool omapdss_is_initialized(void) -{ - return !!omapdss_get_dss(); -} - -void omapdss_display_init(struct omap_dss_device *dssdev); -int omapdss_display_get_modes(struct drm_connector *connector, - const struct videomode *vm); - void omapdss_device_register(struct omap_dss_device *dssdev); void omapdss_device_unregister(struct omap_dss_device *dssdev); struct omap_dss_device *omapdss_device_get(struct omap_dss_device *dssdev); @@ -445,8 +247,6 @@ int omapdss_device_connect(struct dss_device *dss, struct omap_dss_device *dst); void omapdss_device_disconnect(struct omap_dss_device *src, struct omap_dss_device *dst); -void omapdss_device_enable(struct omap_dss_device *dssdev); -void omapdss_device_disable(struct omap_dss_device *dssdev); int omap_dss_get_num_overlay_managers(void); @@ -466,11 +266,6 @@ int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask); int omapdss_compat_init(void); void omapdss_compat_uninit(void); -static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev) -{ - return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE; -} - enum dss_writeback_channel { DSS_WB_LCD1_MGR = 0, DSS_WB_LCD2_MGR = 1, @@ -482,31 +277,23 @@ enum dss_writeback_channel { DSS_WB_LCD3_MGR = 7, }; -struct dss_mgr_ops { - void (*start_update)(struct omap_drm_private *priv, - enum omap_channel channel); - int (*enable)(struct omap_drm_private *priv, - enum omap_channel channel); - void (*disable)(struct omap_drm_private *priv, - enum omap_channel channel); - void (*set_timings)(struct omap_drm_private *priv, - enum omap_channel channel, - const struct videomode *vm); - void (*set_lcd_config)(struct omap_drm_private *priv, - enum omap_channel channel, - const struct dss_lcd_mgr_config *config); - int (*register_framedone_handler)(struct omap_drm_private *priv, - enum omap_channel channel, - void (*handler)(void *), void *data); - void (*unregister_framedone_handler)(struct omap_drm_private *priv, - enum omap_channel channel, - void (*handler)(void *), void *data); -}; - -int dss_install_mgr_ops(struct dss_device *dss, - const struct dss_mgr_ops *mgr_ops, - struct omap_drm_private *priv); -void dss_uninstall_mgr_ops(struct dss_device *dss); +void omap_crtc_dss_start_update(struct omap_drm_private *priv, + enum omap_channel channel); +void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable); +int omap_crtc_dss_enable(struct omap_drm_private *priv, enum omap_channel channel); +void omap_crtc_dss_disable(struct omap_drm_private *priv, enum omap_channel channel); +void omap_crtc_dss_set_timings(struct omap_drm_private *priv, + enum omap_channel channel, + const struct videomode *vm); +void omap_crtc_dss_set_lcd_config(struct omap_drm_private *priv, + enum omap_channel channel, + const struct dss_lcd_mgr_config *config); +int omap_crtc_dss_register_framedone( + struct omap_drm_private *priv, enum omap_channel channel, + void (*handler)(void *), void *data); +void omap_crtc_dss_unregister_framedone( + struct omap_drm_private *priv, enum omap_channel channel, + void (*handler)(void *), void *data); void dss_mgr_set_timings(struct omap_dss_device *dssdev, const struct videomode *vm); @@ -520,80 +307,12 @@ int dss_mgr_register_framedone_handler(struct omap_dss_device *dssdev, void dss_mgr_unregister_framedone_handler(struct omap_dss_device *dssdev, void (*handler)(void *), void *data); -/* dispc ops */ - -struct dispc_ops { - u32 (*read_irqstatus)(struct dispc_device *dispc); - void (*clear_irqstatus)(struct dispc_device *dispc, u32 mask); - void (*write_irqenable)(struct dispc_device *dispc, u32 mask); - - int (*request_irq)(struct dispc_device *dispc, irq_handler_t handler, - void *dev_id); - void (*free_irq)(struct dispc_device *dispc, void *dev_id); - - int (*runtime_get)(struct dispc_device *dispc); - void (*runtime_put)(struct dispc_device *dispc); - - int (*get_num_ovls)(struct dispc_device *dispc); - int (*get_num_mgrs)(struct dispc_device *dispc); - - u32 (*get_memory_bandwidth_limit)(struct dispc_device *dispc); - - void (*mgr_enable)(struct dispc_device *dispc, - enum omap_channel channel, bool enable); - bool (*mgr_is_enabled)(struct dispc_device *dispc, - enum omap_channel channel); - u32 (*mgr_get_vsync_irq)(struct dispc_device *dispc, - enum omap_channel channel); - u32 (*mgr_get_framedone_irq)(struct dispc_device *dispc, - enum omap_channel channel); - u32 (*mgr_get_sync_lost_irq)(struct dispc_device *dispc, - enum omap_channel channel); - bool (*mgr_go_busy)(struct dispc_device *dispc, - enum omap_channel channel); - void (*mgr_go)(struct dispc_device *dispc, enum omap_channel channel); - void (*mgr_set_lcd_config)(struct dispc_device *dispc, - enum omap_channel channel, - const struct dss_lcd_mgr_config *config); - int (*mgr_check_timings)(struct dispc_device *dispc, - enum omap_channel channel, - const struct videomode *vm); - void (*mgr_set_timings)(struct dispc_device *dispc, - enum omap_channel channel, - const struct videomode *vm); - void (*mgr_setup)(struct dispc_device *dispc, enum omap_channel channel, - const struct omap_overlay_manager_info *info); - u32 (*mgr_gamma_size)(struct dispc_device *dispc, - enum omap_channel channel); - void (*mgr_set_gamma)(struct dispc_device *dispc, - enum omap_channel channel, - const struct drm_color_lut *lut, - unsigned int length); - - int (*ovl_enable)(struct dispc_device *dispc, enum omap_plane_id plane, - bool enable); - int (*ovl_setup)(struct dispc_device *dispc, enum omap_plane_id plane, - const struct omap_overlay_info *oi, - const struct videomode *vm, bool mem_to_mem, - enum omap_channel channel); - - const u32 *(*ovl_get_color_modes)(struct dispc_device *dispc, - enum omap_plane_id plane); - - u32 (*wb_get_framedone_irq)(struct dispc_device *dispc); - int (*wb_setup)(struct dispc_device *dispc, - const struct omap_dss_writeback_info *wi, - bool mem_to_mem, const struct videomode *vm, - enum dss_writeback_channel channel_in); - bool (*has_writeback)(struct dispc_device *dispc); - bool (*wb_go_busy)(struct dispc_device *dispc); - void (*wb_go)(struct dispc_device *dispc); -}; - struct dispc_device *dispc_get_dispc(struct dss_device *dss); -const struct dispc_ops *dispc_get_ops(struct dss_device *dss); bool omapdss_stack_is_ready(void); void omapdss_gather_components(struct device *dev); +int omap_dss_init(void); +void omap_dss_exit(void); + #endif /* __OMAP_DRM_DSS_H */ diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c index 5affdf078134..7378e855c278 100644 --- a/drivers/gpu/drm/omapdrm/dss/output.c +++ b/drivers/gpu/drm/omapdrm/dss/output.c @@ -30,7 +30,6 @@ int omapdss_device_init_output(struct omap_dss_device *out, return 0; } - out->next = omapdss_find_device_by_node(remote_node); out->bridge = of_drm_find_bridge(remote_node); out->panel = of_drm_find_panel(remote_node); if (IS_ERR(out->panel)) @@ -38,12 +37,6 @@ int omapdss_device_init_output(struct omap_dss_device *out, of_node_put(remote_node); - if (out->next && out->type != out->next->type) { - dev_err(out->dev, "output type and display type don't match\n"); - ret = -EINVAL; - goto error; - } - if (out->panel) { struct drm_bridge *bridge; @@ -69,7 +62,7 @@ int omapdss_device_init_output(struct omap_dss_device *out, out->bridge = local_bridge; } - if (!out->next && !out->bridge) { + if (!out->bridge) { ret = -EPROBE_DEFER; goto error; } @@ -78,98 +71,64 @@ int omapdss_device_init_output(struct omap_dss_device *out, error: omapdss_device_cleanup_output(out); - out->next = NULL; return ret; } -EXPORT_SYMBOL(omapdss_device_init_output); void omapdss_device_cleanup_output(struct omap_dss_device *out) { if (out->bridge && out->panel) drm_panel_bridge_remove(out->next_bridge ? out->next_bridge : out->bridge); - - if (out->next) - omapdss_device_put(out->next); -} -EXPORT_SYMBOL(omapdss_device_cleanup_output); - -int dss_install_mgr_ops(struct dss_device *dss, - const struct dss_mgr_ops *mgr_ops, - struct omap_drm_private *priv) -{ - if (dss->mgr_ops) - return -EBUSY; - - dss->mgr_ops = mgr_ops; - dss->mgr_ops_priv = priv; - - return 0; -} -EXPORT_SYMBOL(dss_install_mgr_ops); - -void dss_uninstall_mgr_ops(struct dss_device *dss) -{ - dss->mgr_ops = NULL; - dss->mgr_ops_priv = NULL; } -EXPORT_SYMBOL(dss_uninstall_mgr_ops); void dss_mgr_set_timings(struct omap_dss_device *dssdev, const struct videomode *vm) { - dssdev->dss->mgr_ops->set_timings(dssdev->dss->mgr_ops_priv, + omap_crtc_dss_set_timings(dssdev->dss->mgr_ops_priv, dssdev->dispc_channel, vm); } -EXPORT_SYMBOL(dss_mgr_set_timings); void dss_mgr_set_lcd_config(struct omap_dss_device *dssdev, const struct dss_lcd_mgr_config *config) { - dssdev->dss->mgr_ops->set_lcd_config(dssdev->dss->mgr_ops_priv, + omap_crtc_dss_set_lcd_config(dssdev->dss->mgr_ops_priv, dssdev->dispc_channel, config); } -EXPORT_SYMBOL(dss_mgr_set_lcd_config); int dss_mgr_enable(struct omap_dss_device *dssdev) { - return dssdev->dss->mgr_ops->enable(dssdev->dss->mgr_ops_priv, + return omap_crtc_dss_enable(dssdev->dss->mgr_ops_priv, dssdev->dispc_channel); } -EXPORT_SYMBOL(dss_mgr_enable); void dss_mgr_disable(struct omap_dss_device *dssdev) { - dssdev->dss->mgr_ops->disable(dssdev->dss->mgr_ops_priv, + omap_crtc_dss_disable(dssdev->dss->mgr_ops_priv, dssdev->dispc_channel); } -EXPORT_SYMBOL(dss_mgr_disable); void dss_mgr_start_update(struct omap_dss_device *dssdev) { - dssdev->dss->mgr_ops->start_update(dssdev->dss->mgr_ops_priv, + omap_crtc_dss_start_update(dssdev->dss->mgr_ops_priv, dssdev->dispc_channel); } -EXPORT_SYMBOL(dss_mgr_start_update); int dss_mgr_register_framedone_handler(struct omap_dss_device *dssdev, void (*handler)(void *), void *data) { struct dss_device *dss = dssdev->dss; - return dss->mgr_ops->register_framedone_handler(dss->mgr_ops_priv, + return omap_crtc_dss_register_framedone(dss->mgr_ops_priv, dssdev->dispc_channel, handler, data); } -EXPORT_SYMBOL(dss_mgr_register_framedone_handler); void dss_mgr_unregister_framedone_handler(struct omap_dss_device *dssdev, void (*handler)(void *), void *data) { struct dss_device *dss = dssdev->dss; - dss->mgr_ops->unregister_framedone_handler(dss->mgr_ops_priv, + omap_crtc_dss_unregister_framedone(dss->mgr_ops_priv, dssdev->dispc_channel, handler, data); } -EXPORT_SYMBOL(dss_mgr_unregister_framedone_handler); diff --git a/drivers/gpu/drm/omapdrm/dss/pll.c b/drivers/gpu/drm/omapdrm/dss/pll.c index 241a338ace29..4c8246a3ded9 100644 --- a/drivers/gpu/drm/omapdrm/dss/pll.c +++ b/drivers/gpu/drm/omapdrm/dss/pll.c @@ -222,6 +222,9 @@ bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin, n_stop = min((unsigned)(clkin / fint_hw_min), hw->n_max); n_inc = 1; + if (n_start > n_stop) + return false; + if (hw->errata_i886) { swap(n_start, n_stop); n_inc = -1; @@ -239,6 +242,9 @@ bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin, hw->m_max); m_inc = 1; + if (m_start > m_stop) + continue; + if (hw->errata_i886) { swap(m_start, m_stop); m_inc = -1; diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c index 033fd30074b0..91eaae3b9481 100644 --- a/drivers/gpu/drm/omapdrm/dss/sdi.c +++ b/drivers/gpu/drm/omapdrm/dss/sdi.c @@ -195,8 +195,7 @@ static void sdi_bridge_mode_set(struct drm_bridge *bridge, sdi->pixelclock = adjusted_mode->clock * 1000; } -static void sdi_bridge_enable(struct drm_bridge *bridge, - struct drm_bridge_state *bridge_state) +static void sdi_bridge_enable(struct drm_bridge *bridge) { struct sdi_device *sdi = drm_bridge_to_sdi(bridge); struct dispc_clock_info dispc_cinfo; @@ -259,8 +258,7 @@ err_get_dispc: regulator_disable(sdi->vdds_sdi_reg); } -static void sdi_bridge_disable(struct drm_bridge *bridge, - struct drm_bridge_state *bridge_state) +static void sdi_bridge_disable(struct drm_bridge *bridge) { struct sdi_device *sdi = drm_bridge_to_sdi(bridge); @@ -278,8 +276,8 @@ static const struct drm_bridge_funcs sdi_bridge_funcs = { .mode_valid = sdi_bridge_mode_valid, .mode_fixup = sdi_bridge_mode_fixup, .mode_set = sdi_bridge_mode_set, - .atomic_enable = sdi_bridge_enable, - .atomic_disable = sdi_bridge_disable, + .enable = sdi_bridge_enable, + .disable = sdi_bridge_disable, }; static void sdi_bridge_init(struct sdi_device *sdi) @@ -314,7 +312,6 @@ static int sdi_init_output(struct sdi_device *sdi) out->dispc_channel = OMAP_DSS_CHANNEL_LCD; /* We have SDI only on OMAP3, where it's on port 1 */ out->of_port = 1; - out->owner = THIS_MODULE; out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE /* 15.5.9.1.2 */ | DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE; diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c index 94cf50d837b0..e522c17955d0 100644 --- a/drivers/gpu/drm/omapdrm/dss/venc.c +++ b/drivers/gpu/drm/omapdrm/dss/venc.c @@ -733,9 +733,7 @@ static int venc_init_output(struct venc_device *venc) out->type = OMAP_DISPLAY_TYPE_VENC; out->name = "venc.0"; out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT; - out->owner = THIS_MODULE; out->of_port = 0; - out->ops_flags = OMAP_DSS_DEVICE_OP_MODES; r = omapdss_device_init_output(out, &venc->bridge); if (r < 0) { diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c deleted file mode 100644 index 47719b92e22b..000000000000 --- a/drivers/gpu/drm/omapdrm/omap_connector.c +++ /dev/null @@ -1,157 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ - * Author: Rob Clark <[email protected]> - */ - -#include <drm/drm_atomic_helper.h> -#include <drm/drm_crtc.h> -#include <drm/drm_probe_helper.h> - -#include "omap_drv.h" - -/* - * connector funcs - */ - -#define to_omap_connector(x) container_of(x, struct omap_connector, base) - -struct omap_connector { - struct drm_connector base; - struct omap_dss_device *output; -}; - -static enum drm_connector_status omap_connector_detect( - struct drm_connector *connector, bool force) -{ - return connector_status_connected; -} - -static void omap_connector_destroy(struct drm_connector *connector) -{ - struct omap_connector *omap_connector = to_omap_connector(connector); - - DBG("%s", connector->name); - - drm_connector_unregister(connector); - drm_connector_cleanup(connector); - - omapdss_device_put(omap_connector->output); - - kfree(omap_connector); -} - -static int omap_connector_get_modes(struct drm_connector *connector) -{ - struct omap_connector *omap_connector = to_omap_connector(connector); - struct omap_dss_device *dssdev = NULL; - struct omap_dss_device *d; - - DBG("%s", connector->name); - - /* - * If the display pipeline reports modes (e.g. with a fixed resolution - * panel or an analog TV output), query it. - */ - for (d = omap_connector->output; d; d = d->next) { - if (d->ops_flags & OMAP_DSS_DEVICE_OP_MODES) - dssdev = d; - } - - if (dssdev) - return dssdev->ops->get_modes(dssdev, connector); - - /* We can't retrieve modes. The KMS core will add the default modes. */ - return 0; -} - -enum drm_mode_status omap_connector_mode_fixup(struct omap_dss_device *dssdev, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode) -{ - int ret; - - drm_mode_copy(adjusted_mode, mode); - - for (; dssdev; dssdev = dssdev->next) { - if (!dssdev->ops || !dssdev->ops->check_timings) - continue; - - ret = dssdev->ops->check_timings(dssdev, adjusted_mode); - if (ret) - return MODE_BAD; - } - - return MODE_OK; -} - -static enum drm_mode_status omap_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) -{ - struct omap_connector *omap_connector = to_omap_connector(connector); - struct drm_display_mode new_mode = {}; - enum drm_mode_status status; - - status = omap_connector_mode_fixup(omap_connector->output, mode, - &new_mode); - if (status != MODE_OK) - goto done; - - /* Check if vrefresh is still valid. */ - if (drm_mode_vrefresh(mode) != drm_mode_vrefresh(&new_mode)) - status = MODE_NOCLOCK; - -done: - DBG("connector: mode %s: " DRM_MODE_FMT, - (status == MODE_OK) ? "valid" : "invalid", - DRM_MODE_ARG(mode)); - - return status; -} - -static const struct drm_connector_funcs omap_connector_funcs = { - .reset = drm_atomic_helper_connector_reset, - .detect = omap_connector_detect, - .fill_modes = drm_helper_probe_single_connector_modes, - .destroy = omap_connector_destroy, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, - .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, -}; - -static const struct drm_connector_helper_funcs omap_connector_helper_funcs = { - .get_modes = omap_connector_get_modes, - .mode_valid = omap_connector_mode_valid, -}; - -/* initialize connector */ -struct drm_connector *omap_connector_init(struct drm_device *dev, - struct omap_dss_device *output, - struct drm_encoder *encoder) -{ - struct drm_connector *connector = NULL; - struct omap_connector *omap_connector; - - DBG("%s", output->name); - - omap_connector = kzalloc(sizeof(*omap_connector), GFP_KERNEL); - if (!omap_connector) - goto fail; - - omap_connector->output = omapdss_device_get(output); - - connector = &omap_connector->base; - connector->interlace_allowed = 1; - connector->doublescan_allowed = 0; - - drm_connector_init(dev, connector, &omap_connector_funcs, - DRM_MODE_CONNECTOR_DSI); - drm_connector_helper_add(connector, &omap_connector_helper_funcs); - - return connector; - -fail: - if (connector) - omap_connector_destroy(connector); - - return NULL; -} diff --git a/drivers/gpu/drm/omapdrm/omap_connector.h b/drivers/gpu/drm/omapdrm/omap_connector.h deleted file mode 100644 index 0ecd4f1655b7..000000000000 --- a/drivers/gpu/drm/omapdrm/omap_connector.h +++ /dev/null @@ -1,28 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * omap_connector.h -- OMAP DRM Connector - * - * Copyright (C) 2011 Texas Instruments - * Author: Rob Clark <[email protected]> - */ - -#ifndef __OMAPDRM_CONNECTOR_H__ -#define __OMAPDRM_CONNECTOR_H__ - -#include <linux/types.h> - -enum drm_mode_status; - -struct drm_connector; -struct drm_device; -struct drm_encoder; -struct omap_dss_device; - -struct drm_connector *omap_connector_init(struct drm_device *dev, - struct omap_dss_device *output, - struct drm_encoder *encoder); -enum drm_mode_status omap_connector_mode_fixup(struct omap_dss_device *dssdev, - const struct drm_display_mode *mode, - struct drm_display_mode *adjusted_mode); - -#endif /* __OMAPDRM_CONNECTOR_H__ */ diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c index 7d66269ad998..06a719c104f4 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.c +++ b/drivers/gpu/drm/omapdrm/omap_crtc.c @@ -100,14 +100,14 @@ int omap_crtc_wait_pending(struct drm_crtc *crtc) * the upstream part of the video pipe. */ -static void omap_crtc_dss_start_update(struct omap_drm_private *priv, +void omap_crtc_dss_start_update(struct omap_drm_private *priv, enum omap_channel channel) { - priv->dispc_ops->mgr_enable(priv->dispc, channel, true); + dispc_mgr_enable(priv->dispc, channel, true); } /* Called only from the encoder enable/disable and suspend/resume handlers. */ -static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable) +void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable) { struct omap_crtc_state *omap_state = to_omap_crtc_state(crtc->state); struct drm_device *dev = crtc->dev; @@ -128,7 +128,7 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable) } if (omap_crtc->pipe->output->type == OMAP_DISPLAY_TYPE_HDMI) { - priv->dispc_ops->mgr_enable(priv->dispc, channel, enable); + dispc_mgr_enable(priv->dispc, channel, enable); omap_crtc->enabled = enable; return; } @@ -141,9 +141,9 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable) omap_crtc->ignore_digit_sync_lost = true; } - framedone_irq = priv->dispc_ops->mgr_get_framedone_irq(priv->dispc, + framedone_irq = dispc_mgr_get_framedone_irq(priv->dispc, channel); - vsync_irq = priv->dispc_ops->mgr_get_vsync_irq(priv->dispc, channel); + vsync_irq = dispc_mgr_get_vsync_irq(priv->dispc, channel); if (enable) { wait = omap_irq_wait_init(dev, vsync_irq, 1); @@ -163,7 +163,7 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable) wait = omap_irq_wait_init(dev, vsync_irq, 2); } - priv->dispc_ops->mgr_enable(priv->dispc, channel, enable); + dispc_mgr_enable(priv->dispc, channel, enable); omap_crtc->enabled = enable; ret = omap_irq_wait(dev, wait, msecs_to_jiffies(100)); @@ -180,21 +180,19 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable) } -static int omap_crtc_dss_enable(struct omap_drm_private *priv, - enum omap_channel channel) +int omap_crtc_dss_enable(struct omap_drm_private *priv, enum omap_channel channel) { struct drm_crtc *crtc = priv->channels[channel]->crtc; struct omap_crtc *omap_crtc = to_omap_crtc(crtc); - priv->dispc_ops->mgr_set_timings(priv->dispc, omap_crtc->channel, + dispc_mgr_set_timings(priv->dispc, omap_crtc->channel, &omap_crtc->vm); omap_crtc_set_enabled(&omap_crtc->base, true); return 0; } -static void omap_crtc_dss_disable(struct omap_drm_private *priv, - enum omap_channel channel) +void omap_crtc_dss_disable(struct omap_drm_private *priv, enum omap_channel channel) { struct drm_crtc *crtc = priv->channels[channel]->crtc; struct omap_crtc *omap_crtc = to_omap_crtc(crtc); @@ -202,7 +200,7 @@ static void omap_crtc_dss_disable(struct omap_drm_private *priv, omap_crtc_set_enabled(&omap_crtc->base, false); } -static void omap_crtc_dss_set_timings(struct omap_drm_private *priv, +void omap_crtc_dss_set_timings(struct omap_drm_private *priv, enum omap_channel channel, const struct videomode *vm) { @@ -213,7 +211,7 @@ static void omap_crtc_dss_set_timings(struct omap_drm_private *priv, omap_crtc->vm = *vm; } -static void omap_crtc_dss_set_lcd_config(struct omap_drm_private *priv, +void omap_crtc_dss_set_lcd_config(struct omap_drm_private *priv, enum omap_channel channel, const struct dss_lcd_mgr_config *config) { @@ -221,11 +219,11 @@ static void omap_crtc_dss_set_lcd_config(struct omap_drm_private *priv, struct omap_crtc *omap_crtc = to_omap_crtc(crtc); DBG("%s", omap_crtc->name); - priv->dispc_ops->mgr_set_lcd_config(priv->dispc, omap_crtc->channel, + dispc_mgr_set_lcd_config(priv->dispc, omap_crtc->channel, config); } -static int omap_crtc_dss_register_framedone( +int omap_crtc_dss_register_framedone( struct omap_drm_private *priv, enum omap_channel channel, void (*handler)(void *), void *data) { @@ -244,7 +242,7 @@ static int omap_crtc_dss_register_framedone( return 0; } -static void omap_crtc_dss_unregister_framedone( +void omap_crtc_dss_unregister_framedone( struct omap_drm_private *priv, enum omap_channel channel, void (*handler)(void *), void *data) { @@ -261,16 +259,6 @@ static void omap_crtc_dss_unregister_framedone( omap_crtc->framedone_handler_data = NULL; } -static const struct dss_mgr_ops mgr_ops = { - .start_update = omap_crtc_dss_start_update, - .enable = omap_crtc_dss_enable, - .disable = omap_crtc_dss_disable, - .set_timings = omap_crtc_dss_set_timings, - .set_lcd_config = omap_crtc_dss_set_lcd_config, - .register_framedone_handler = omap_crtc_dss_register_framedone, - .unregister_framedone_handler = omap_crtc_dss_unregister_framedone, -}; - /* ----------------------------------------------------------------------------- * Setup, Flush and Page Flip */ @@ -300,7 +288,7 @@ void omap_crtc_vblank_irq(struct drm_crtc *crtc) * If the dispc is busy we're racing the flush operation. Try again on * the next vblank interrupt. */ - if (priv->dispc_ops->mgr_go_busy(priv->dispc, omap_crtc->channel)) { + if (dispc_mgr_go_busy(priv->dispc, omap_crtc->channel)) { spin_unlock(&crtc->dev->event_lock); return; } @@ -362,27 +350,14 @@ static void omap_crtc_manual_display_update(struct work_struct *data) { struct omap_crtc *omap_crtc = container_of(data, struct omap_crtc, update_work.work); - struct drm_display_mode *mode = &omap_crtc->pipe->crtc->mode; - struct omap_dss_device *dssdev = omap_crtc->pipe->output->next; + struct omap_dss_device *dssdev = omap_crtc->pipe->output; struct drm_device *dev = omap_crtc->base.dev; - const struct omap_dss_driver *dssdrv; int ret; - if (!dssdev) { - dev_err_once(dev->dev, "missing display dssdev!"); + if (!dssdev || !dssdev->dsi_ops || !dssdev->dsi_ops->update) return; - } - - dssdrv = dssdev->driver; - if (!dssdrv || !dssdrv->update) { - dev_err_once(dev->dev, "missing or incorrect dssdrv!"); - return; - } - - if (dssdrv->sync) - dssdrv->sync(dssdev); - ret = dssdrv->update(dssdev, 0, 0, mode->hdisplay, mode->vdisplay); + ret = dssdev->dsi_ops->update(dssdev); if (ret < 0) { spin_lock_irq(&dev->event_lock); omap_crtc->pending = false; @@ -391,6 +366,33 @@ static void omap_crtc_manual_display_update(struct work_struct *data) } } +static s16 omap_crtc_s31_32_to_s2_8(s64 coef) +{ + u64 sign_bit = 1ULL << 63; + u64 cbits = (u64)coef; + + s16 ret = clamp_val(((cbits & ~sign_bit) >> 24), 0, 0x1ff); + + if (cbits & sign_bit) + ret = -ret; + + return ret; +} + +static void omap_crtc_cpr_coefs_from_ctm(const struct drm_color_ctm *ctm, + struct omap_dss_cpr_coefs *cpr) +{ + cpr->rr = omap_crtc_s31_32_to_s2_8(ctm->matrix[0]); + cpr->rg = omap_crtc_s31_32_to_s2_8(ctm->matrix[1]); + cpr->rb = omap_crtc_s31_32_to_s2_8(ctm->matrix[2]); + cpr->gr = omap_crtc_s31_32_to_s2_8(ctm->matrix[3]); + cpr->gg = omap_crtc_s31_32_to_s2_8(ctm->matrix[4]); + cpr->gb = omap_crtc_s31_32_to_s2_8(ctm->matrix[5]); + cpr->br = omap_crtc_s31_32_to_s2_8(ctm->matrix[6]); + cpr->bg = omap_crtc_s31_32_to_s2_8(ctm->matrix[7]); + cpr->bb = omap_crtc_s31_32_to_s2_8(ctm->matrix[8]); +} + static void omap_crtc_write_crtc_properties(struct drm_crtc *crtc) { struct omap_drm_private *priv = crtc->dev->dev_private; @@ -402,9 +404,17 @@ static void omap_crtc_write_crtc_properties(struct drm_crtc *crtc) info.default_color = 0x000000; info.trans_enabled = false; info.partial_alpha_enabled = false; - info.cpr_enable = false; - priv->dispc_ops->mgr_setup(priv->dispc, omap_crtc->channel, &info); + if (crtc->state->ctm) { + struct drm_color_ctm *ctm = crtc->state->ctm->data; + + info.cpr_enable = true; + omap_crtc_cpr_coefs_from_ctm(ctm, &info.cpr_coefs); + } else { + info.cpr_enable = false; + } + + dispc_mgr_setup(priv->dispc, omap_crtc->channel, &info); } /* ----------------------------------------------------------------------------- @@ -445,7 +455,7 @@ static void omap_crtc_atomic_enable(struct drm_crtc *crtc, DBG("%s", omap_crtc->name); - priv->dispc_ops->runtime_get(priv->dispc); + dispc_runtime_get(priv->dispc); /* manual updated display will not trigger vsync irq */ if (omap_state->manually_updated) @@ -484,7 +494,7 @@ static void omap_crtc_atomic_disable(struct drm_crtc *crtc, drm_crtc_vblank_off(crtc); - priv->dispc_ops->runtime_put(priv->dispc); + dispc_runtime_put(priv->dispc); } static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc, @@ -502,9 +512,8 @@ static enum drm_mode_status omap_crtc_mode_valid(struct drm_crtc *crtc, * valid DISPC mode. DSI will calculate and configure the * proper DISPC mode later. */ - if (omap_crtc->pipe->output->next == NULL || - omap_crtc->pipe->output->next->type != OMAP_DISPLAY_TYPE_DSI) { - r = priv->dispc_ops->mgr_check_timings(priv->dispc, + if (omap_crtc->pipe->output->type != OMAP_DISPLAY_TYPE_DSI) { + r = dispc_mgr_check_timings(priv->dispc, omap_crtc->channel, &vm); if (r) @@ -555,17 +564,16 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc) static bool omap_crtc_is_manually_updated(struct drm_crtc *crtc) { struct omap_crtc *omap_crtc = to_omap_crtc(crtc); - struct omap_dss_device *display = omap_crtc->pipe->output->next; + struct omap_dss_device *dssdev = omap_crtc->pipe->output; - if (!display) + if (!dssdev || !dssdev->dsi_ops || !dssdev->dsi_ops->is_video_mode) return false; - if (display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) { - DBG("detected manually updated display!"); - return true; - } + if (dssdev->dsi_ops->is_video_mode(dssdev)) + return false; - return false; + DBG("detected manually updated display!"); + return true; } static int omap_crtc_atomic_check(struct drm_crtc *crtc, @@ -575,8 +583,8 @@ static int omap_crtc_atomic_check(struct drm_crtc *crtc, crtc); struct drm_plane_state *pri_state; - if (crtc_state->color_mgmt_changed && crtc_state->gamma_lut) { - unsigned int length = crtc_state->gamma_lut->length / + if (crtc_state->color_mgmt_changed && crtc_state->degamma_lut) { + unsigned int length = crtc_state->degamma_lut->length / sizeof(struct drm_color_lut); if (length < 2) @@ -617,13 +625,13 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_color_lut *lut = NULL; unsigned int length = 0; - if (crtc->state->gamma_lut) { + if (crtc->state->degamma_lut) { lut = (struct drm_color_lut *) - crtc->state->gamma_lut->data; - length = crtc->state->gamma_lut->length / + crtc->state->degamma_lut->data; + length = crtc->state->degamma_lut->length / sizeof(*lut); } - priv->dispc_ops->mgr_set_gamma(priv->dispc, omap_crtc->channel, + dispc_mgr_set_gamma(priv->dispc, omap_crtc->channel, lut, length); } @@ -648,7 +656,7 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc, WARN_ON(ret != 0); spin_lock_irq(&crtc->dev->event_lock); - priv->dispc_ops->mgr_go(priv->dispc, omap_crtc->channel); + dispc_mgr_go(priv->dispc, omap_crtc->channel); omap_crtc_arm_event(crtc); spin_unlock_irq(&crtc->dev->event_lock); } @@ -741,7 +749,6 @@ static const struct drm_crtc_funcs omap_crtc_funcs = { .set_config = drm_atomic_helper_set_config, .destroy = omap_crtc_destroy, .page_flip = drm_atomic_helper_page_flip, - .gamma_set = drm_atomic_helper_legacy_gamma_set, .atomic_duplicate_state = omap_crtc_duplicate_state, .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, .atomic_set_property = omap_crtc_atomic_set_property, @@ -771,16 +778,6 @@ static const char *channel_names[] = { [OMAP_DSS_CHANNEL_LCD3] = "lcd3", }; -void omap_crtc_pre_init(struct omap_drm_private *priv) -{ - dss_install_mgr_ops(priv->dss, &mgr_ops, priv); -} - -void omap_crtc_pre_uninit(struct omap_drm_private *priv) -{ - dss_uninstall_mgr_ops(priv->dss); -} - /* initialize crtc */ struct drm_crtc *omap_crtc_init(struct drm_device *dev, struct omap_drm_pipeline *pipe, @@ -839,10 +836,10 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev, * extracted with dispc_mgr_gamma_size(). If it returns 0 * gamma table is not supported. */ - if (priv->dispc_ops->mgr_gamma_size(priv->dispc, channel)) { + if (dispc_mgr_gamma_size(priv->dispc, channel)) { unsigned int gamma_lut_size = 256; - drm_crtc_enable_color_mgmt(crtc, 0, false, gamma_lut_size); + drm_crtc_enable_color_mgmt(crtc, gamma_lut_size, true, 0); drm_mode_crtc_set_gamma_size(crtc, gamma_lut_size); } diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.h b/drivers/gpu/drm/omapdrm/omap_crtc.h index 2fd57751ae2b..a8b9cbee86e0 100644 --- a/drivers/gpu/drm/omapdrm/omap_crtc.h +++ b/drivers/gpu/drm/omapdrm/omap_crtc.h @@ -22,8 +22,6 @@ struct videomode; struct videomode *omap_crtc_timings(struct drm_crtc *crtc); enum omap_channel omap_crtc_channel(struct drm_crtc *crtc); -void omap_crtc_pre_init(struct omap_drm_private *priv); -void omap_crtc_pre_uninit(struct omap_drm_private *priv); struct drm_crtc *omap_crtc_init(struct drm_device *dev, struct omap_drm_pipeline *pipe, struct drm_plane *plane); diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 42c2ed752095..28bbad1353ee 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -69,7 +69,7 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state) struct drm_device *dev = old_state->dev; struct omap_drm_private *priv = dev->dev_private; - priv->dispc_ops->runtime_get(priv->dispc); + dispc_runtime_get(priv->dispc); /* Apply the atomic update. */ drm_atomic_helper_commit_modeset_disables(dev, old_state); @@ -113,7 +113,7 @@ static void omap_atomic_commit_tail(struct drm_atomic_state *old_state) drm_atomic_helper_cleanup_planes(dev, old_state); - priv->dispc_ops->runtime_put(priv->dispc); + dispc_runtime_put(priv->dispc); } static const struct drm_mode_config_helper_funcs omap_mode_config_helper_funcs = { @@ -192,7 +192,7 @@ static int omap_compare_pipelines(const void *a, const void *b) static int omap_modeset_init_properties(struct drm_device *dev) { struct omap_drm_private *priv = dev->dev_private; - unsigned int num_planes = priv->dispc_ops->get_num_ovls(priv->dispc); + unsigned int num_planes = dispc_get_num_ovls(priv->dispc); priv->zorder_prop = drm_property_create_range(dev, 0, "zorder", 0, num_planes - 1); @@ -206,14 +206,7 @@ static int omap_display_id(struct omap_dss_device *output) { struct device_node *node = NULL; - if (output->next) { - struct omap_dss_device *display = output; - - while (display->next) - display = display->next; - - node = display->dev->of_node; - } else if (output->bridge) { + if (output->bridge) { struct drm_bridge *bridge = output->bridge; while (drm_bridge_get_next_bridge(bridge)) @@ -228,8 +221,8 @@ static int omap_display_id(struct omap_dss_device *output) static int omap_modeset_init(struct drm_device *dev) { struct omap_drm_private *priv = dev->dev_private; - int num_ovls = priv->dispc_ops->get_num_ovls(priv->dispc); - int num_mgrs = priv->dispc_ops->get_num_mgrs(priv->dispc); + int num_ovls = dispc_get_num_ovls(priv->dispc); + int num_mgrs = dispc_get_num_mgrs(priv->dispc); unsigned int i; int ret; u32 plane_crtc_mask; @@ -332,19 +325,12 @@ static int omap_modeset_init(struct drm_device *dev) struct drm_encoder *encoder = pipe->encoder; struct drm_crtc *crtc; - if (pipe->output->next) { - pipe->connector = omap_connector_init(dev, pipe->output, - encoder); - if (!pipe->connector) - return -ENOMEM; - } else { - pipe->connector = drm_bridge_connector_init(dev, encoder); - if (IS_ERR(pipe->connector)) { - dev_err(priv->dev, - "unable to create bridge connector for %s\n", - pipe->output->name); - return PTR_ERR(pipe->connector); - } + pipe->connector = drm_bridge_connector_init(dev, encoder); + if (IS_ERR(pipe->connector)) { + dev_err(priv->dev, + "unable to create bridge connector for %s\n", + pipe->output->name); + return PTR_ERR(pipe->connector); } drm_connector_attach_encoder(pipe->connector, encoder); @@ -568,6 +554,7 @@ static const struct soc_device_attribute omapdrm_soc_devices[] = { static int omapdrm_init(struct omap_drm_private *priv, struct device *dev) { const struct soc_device_attribute *soc; + struct dss_pdata *pdata = dev->platform_data; struct drm_device *ddev; int ret; @@ -582,11 +569,10 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev) ddev->dev_private = priv; priv->dev = dev; - priv->dss = omapdss_get_dss(); + priv->dss = pdata->dss; priv->dispc = dispc_get_dispc(priv->dss); - priv->dispc_ops = dispc_get_ops(priv->dss); - omap_crtc_pre_init(priv); + priv->dss->mgr_ops_priv = priv; soc = soc_device_match(omapdrm_soc_devices); priv->omaprev = soc ? (unsigned int)soc->data : 0; @@ -596,9 +582,7 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev) INIT_LIST_HEAD(&priv->obj_list); /* Get memory bandwidth limits */ - if (priv->dispc_ops->get_memory_bandwidth_limit) - priv->max_bandwidth = - priv->dispc_ops->get_memory_bandwidth_limit(priv->dispc); + priv->max_bandwidth = dispc_get_memory_bandwidth_limit(priv->dispc); omap_gem_init(ddev); @@ -641,7 +625,6 @@ err_gem_deinit: omap_gem_deinit(ddev); destroy_workqueue(priv->wq); omap_disconnect_pipelines(ddev); - omap_crtc_pre_uninit(priv); drm_dev_put(ddev); return ret; } @@ -667,7 +650,6 @@ static void omapdrm_cleanup(struct omap_drm_private *priv) destroy_workqueue(priv->wq); omap_disconnect_pipelines(ddev); - omap_crtc_pre_uninit(priv); drm_dev_put(ddev); } @@ -677,9 +659,6 @@ static int pdev_probe(struct platform_device *pdev) struct omap_drm_private *priv; int ret; - if (omapdss_is_initialized() == false) - return -EPROBE_DEFER; - ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (ret) { dev_err(&pdev->dev, "Failed to set the DMA mask\n"); @@ -748,9 +727,21 @@ static struct platform_driver * const drivers[] = { static int __init omap_drm_init(void) { + int r; + DBG("init"); - return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); + r = omap_dss_init(); + if (r) + return r; + + r = platform_register_drivers(drivers, ARRAY_SIZE(drivers)); + if (r) { + omap_dss_exit(); + return r; + } + + return 0; } static void __exit omap_drm_fini(void) @@ -758,13 +749,15 @@ static void __exit omap_drm_fini(void) DBG("fini"); platform_unregister_drivers(drivers, ARRAY_SIZE(drivers)); + + omap_dss_exit(); } -/* need late_initcall() so we load after dss_driver's are loaded */ -late_initcall(omap_drm_init); +module_init(omap_drm_init); module_exit(omap_drm_fini); MODULE_AUTHOR("Rob Clark <[email protected]>"); +MODULE_AUTHOR("Tomi Valkeinen <[email protected]>"); MODULE_DESCRIPTION("OMAP DRM Display Driver"); MODULE_ALIAS("platform:" DRIVER_NAME); MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h index ae57e7ada876..d6f136984da9 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.h +++ b/drivers/gpu/drm/omapdrm/omap_drv.h @@ -12,11 +12,11 @@ #include <linux/workqueue.h> #include "dss/omapdss.h" +#include "dss/dss.h" #include <drm/drm_gem.h> #include <drm/omap_drm.h> -#include "omap_connector.h" #include "omap_crtc.h" #include "omap_encoder.h" #include "omap_fb.h" @@ -47,7 +47,6 @@ struct omap_drm_private { struct dss_device *dss; struct dispc_device *dispc; - const struct dispc_ops *dispc_ops; unsigned int num_pipes; struct omap_drm_pipeline pipes[8]; diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c index 57e92a4d5937..4dd05bc732da 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.c +++ b/drivers/gpu/drm/omapdrm/omap_encoder.c @@ -75,7 +75,6 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder, { struct omap_encoder *omap_encoder = to_omap_encoder(encoder); struct omap_dss_device *output = omap_encoder->output; - struct omap_dss_device *dssdev; struct drm_device *dev = encoder->dev; struct drm_connector *connector; struct drm_bridge *bridge; @@ -98,9 +97,6 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder, * * A better solution is to use DRM's bus-flags through the whole driver. */ - for (dssdev = output; dssdev; dssdev = dssdev->next) - omap_encoder_update_videomode_flags(&vm, dssdev->bus_flags); - for (bridge = output->bridge; bridge; bridge = drm_bridge_get_next_bridge(bridge)) { if (!bridge->timings) @@ -113,65 +109,12 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder, bus_flags = connector->display_info.bus_flags; omap_encoder_update_videomode_flags(&vm, bus_flags); - /* Set timings for the dss manager. */ + /* Set timings for all devices in the display pipeline. */ dss_mgr_set_timings(output, &vm); } -static void omap_encoder_disable(struct drm_encoder *encoder) -{ - struct omap_encoder *omap_encoder = to_omap_encoder(encoder); - struct omap_dss_device *dssdev = omap_encoder->output; - struct drm_device *dev = encoder->dev; - - dev_dbg(dev->dev, "disable(%s)\n", dssdev->name); - - /* - * Disable the chain of external devices, starting at the one at the - * internal encoder's output. This is used for DSI outputs only, as - * dssdev->next is NULL for all other outputs. - */ - omapdss_device_disable(dssdev->next); -} - -static void omap_encoder_enable(struct drm_encoder *encoder) -{ - struct omap_encoder *omap_encoder = to_omap_encoder(encoder); - struct omap_dss_device *dssdev = omap_encoder->output; - struct drm_device *dev = encoder->dev; - - dev_dbg(dev->dev, "enable(%s)\n", dssdev->name); - - /* - * Enable the chain of external devices, starting at the one at the - * internal encoder's output. This is used for DSI outputs only, as - * dssdev->next is NULL for all other outputs. - */ - omapdss_device_enable(dssdev->next); -} - -static int omap_encoder_atomic_check(struct drm_encoder *encoder, - struct drm_crtc_state *crtc_state, - struct drm_connector_state *conn_state) -{ - struct omap_encoder *omap_encoder = to_omap_encoder(encoder); - enum drm_mode_status status; - - status = omap_connector_mode_fixup(omap_encoder->output, - &crtc_state->mode, - &crtc_state->adjusted_mode); - if (status != MODE_OK) { - dev_err(encoder->dev->dev, "invalid timings: %d\n", status); - return -EINVAL; - } - - return 0; -} - static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = { .mode_set = omap_encoder_mode_set, - .disable = omap_encoder_disable, - .enable = omap_encoder_enable, - .atomic_check = omap_encoder_atomic_check, }; /* initialize encoder */ diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index 68c271f4250b..38af6195d959 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -564,9 +564,8 @@ int omap_gem_mmap_obj(struct drm_gem_object *obj, * address_space (so unmap_mapping_range does what we want, * in particular in the case of mmap'd dmabufs) */ - fput(vma->vm_file); vma->vm_pgoff = 0; - vma->vm_file = get_file(obj->filp); + vma_set_file(vma, obj->filp); vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); } @@ -1325,8 +1324,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size, } omap_obj->pages = pages; - ret = drm_prime_sg_to_page_addr_arrays(sgt, pages, NULL, - npages); + ret = drm_prime_sg_to_page_array(sgt, pages, npages); if (ret) { omap_gem_free_object(obj); obj = ERR_PTR(-ENOMEM); diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c index 97c83b959f7e..15148d4b35b5 100644 --- a/drivers/gpu/drm/omapdrm/omap_irq.c +++ b/drivers/gpu/drm/omapdrm/omap_irq.c @@ -29,7 +29,7 @@ static void omap_irq_update(struct drm_device *dev) DBG("irqmask=%08x", irqmask); - priv->dispc_ops->write_irqenable(priv->dispc, irqmask); + dispc_write_irqenable(priv->dispc, irqmask); } static void omap_irq_wait_handler(struct omap_irq_wait *wait) @@ -83,7 +83,7 @@ int omap_irq_enable_framedone(struct drm_crtc *crtc, bool enable) unsigned long flags; enum omap_channel channel = omap_crtc_channel(crtc); int framedone_irq = - priv->dispc_ops->mgr_get_framedone_irq(priv->dispc, channel); + dispc_mgr_get_framedone_irq(priv->dispc, channel); DBG("dev=%p, crtc=%u, enable=%d", dev, channel, enable); @@ -120,7 +120,7 @@ int omap_irq_enable_vblank(struct drm_crtc *crtc) DBG("dev=%p, crtc=%u", dev, channel); spin_lock_irqsave(&priv->wait_lock, flags); - priv->irq_mask |= priv->dispc_ops->mgr_get_vsync_irq(priv->dispc, + priv->irq_mask |= dispc_mgr_get_vsync_irq(priv->dispc, channel); omap_irq_update(dev); spin_unlock_irqrestore(&priv->wait_lock, flags); @@ -146,7 +146,7 @@ void omap_irq_disable_vblank(struct drm_crtc *crtc) DBG("dev=%p, crtc=%u", dev, channel); spin_lock_irqsave(&priv->wait_lock, flags); - priv->irq_mask &= ~priv->dispc_ops->mgr_get_vsync_irq(priv->dispc, + priv->irq_mask &= ~dispc_mgr_get_vsync_irq(priv->dispc, channel); omap_irq_update(dev); spin_unlock_irqrestore(&priv->wait_lock, flags); @@ -211,9 +211,9 @@ static irqreturn_t omap_irq_handler(int irq, void *arg) unsigned int id; u32 irqstatus; - irqstatus = priv->dispc_ops->read_irqstatus(priv->dispc); - priv->dispc_ops->clear_irqstatus(priv->dispc, irqstatus); - priv->dispc_ops->read_irqstatus(priv->dispc); /* flush posted write */ + irqstatus = dispc_read_irqstatus(priv->dispc); + dispc_clear_irqstatus(priv->dispc, irqstatus); + dispc_read_irqstatus(priv->dispc); /* flush posted write */ VERB("irqs: %08x", irqstatus); @@ -221,15 +221,15 @@ static irqreturn_t omap_irq_handler(int irq, void *arg) struct drm_crtc *crtc = priv->pipes[id].crtc; enum omap_channel channel = omap_crtc_channel(crtc); - if (irqstatus & priv->dispc_ops->mgr_get_vsync_irq(priv->dispc, channel)) { + if (irqstatus & dispc_mgr_get_vsync_irq(priv->dispc, channel)) { drm_handle_vblank(dev, id); omap_crtc_vblank_irq(crtc); } - if (irqstatus & priv->dispc_ops->mgr_get_sync_lost_irq(priv->dispc, channel)) + if (irqstatus & dispc_mgr_get_sync_lost_irq(priv->dispc, channel)) omap_crtc_error_irq(crtc, irqstatus); - if (irqstatus & priv->dispc_ops->mgr_get_framedone_irq(priv->dispc, channel)) + if (irqstatus & dispc_mgr_get_framedone_irq(priv->dispc, channel)) omap_crtc_framedone_irq(crtc, irqstatus); } @@ -263,7 +263,7 @@ static const u32 omap_underflow_irqs[] = { int omap_drm_irq_install(struct drm_device *dev) { struct omap_drm_private *priv = dev->dev_private; - unsigned int num_mgrs = priv->dispc_ops->get_num_mgrs(priv->dispc); + unsigned int num_mgrs = dispc_get_num_mgrs(priv->dispc); unsigned int max_planes; unsigned int i; int ret; @@ -281,13 +281,13 @@ int omap_drm_irq_install(struct drm_device *dev) } for (i = 0; i < num_mgrs; ++i) - priv->irq_mask |= priv->dispc_ops->mgr_get_sync_lost_irq(priv->dispc, i); + priv->irq_mask |= dispc_mgr_get_sync_lost_irq(priv->dispc, i); - priv->dispc_ops->runtime_get(priv->dispc); - priv->dispc_ops->clear_irqstatus(priv->dispc, 0xffffffff); - priv->dispc_ops->runtime_put(priv->dispc); + dispc_runtime_get(priv->dispc); + dispc_clear_irqstatus(priv->dispc, 0xffffffff); + dispc_runtime_put(priv->dispc); - ret = priv->dispc_ops->request_irq(priv->dispc, omap_irq_handler, dev); + ret = dispc_request_irq(priv->dispc, omap_irq_handler, dev); if (ret < 0) return ret; @@ -305,5 +305,5 @@ void omap_drm_irq_uninstall(struct drm_device *dev) dev->irq_enabled = false; - priv->dispc_ops->free_irq(priv->dispc, dev); + dispc_free_irq(priv->dispc, dev); } diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c index 21e0b9785599..51dc24acea73 100644 --- a/drivers/gpu/drm/omapdrm/omap_plane.c +++ b/drivers/gpu/drm/omapdrm/omap_plane.c @@ -59,6 +59,8 @@ static void omap_plane_atomic_update(struct drm_plane *plane, info.pre_mult_alpha = 1; else info.pre_mult_alpha = 0; + info.color_encoding = state->color_encoding; + info.color_range = state->color_range; /* update scanout: */ omap_framebuffer_update_scanout(state->fb, state, &info); @@ -70,17 +72,17 @@ static void omap_plane_atomic_update(struct drm_plane *plane, &info.paddr, &info.p_uv_addr); /* and finally, update omapdss: */ - ret = priv->dispc_ops->ovl_setup(priv->dispc, omap_plane->id, &info, + ret = dispc_ovl_setup(priv->dispc, omap_plane->id, &info, omap_crtc_timings(state->crtc), false, omap_crtc_channel(state->crtc)); if (ret) { dev_err(plane->dev->dev, "Failed to setup plane %s\n", omap_plane->name); - priv->dispc_ops->ovl_enable(priv->dispc, omap_plane->id, false); + dispc_ovl_enable(priv->dispc, omap_plane->id, false); return; } - priv->dispc_ops->ovl_enable(priv->dispc, omap_plane->id, true); + dispc_ovl_enable(priv->dispc, omap_plane->id, true); } static void omap_plane_atomic_disable(struct drm_plane *plane, @@ -93,7 +95,7 @@ static void omap_plane_atomic_disable(struct drm_plane *plane, plane->state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : omap_plane->id; - priv->dispc_ops->ovl_enable(priv->dispc, omap_plane->id, false); + dispc_ovl_enable(priv->dispc, omap_plane->id, false); } static int omap_plane_atomic_check(struct drm_plane *plane, @@ -189,6 +191,8 @@ static void omap_plane_reset(struct drm_plane *plane) */ plane->state->zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : omap_plane->id; + plane->state->color_encoding = DRM_COLOR_YCBCR_BT601; + plane->state->color_range = DRM_COLOR_YCBCR_FULL_RANGE; } static int omap_plane_atomic_set_property(struct drm_plane *plane, @@ -232,6 +236,22 @@ static const struct drm_plane_funcs omap_plane_funcs = { .atomic_get_property = omap_plane_atomic_get_property, }; +static bool omap_plane_supports_yuv(struct drm_plane *plane) +{ + struct omap_drm_private *priv = plane->dev->dev_private; + struct omap_plane *omap_plane = to_omap_plane(plane); + const u32 *formats = dispc_ovl_get_color_modes(priv->dispc, omap_plane->id); + u32 i; + + for (i = 0; formats[i]; i++) + if (formats[i] == DRM_FORMAT_YUYV || + formats[i] == DRM_FORMAT_UYVY || + formats[i] == DRM_FORMAT_NV12) + return true; + + return false; +} + static const char *plane_id_to_name[] = { [OMAP_DSS_GFX] = "gfx", [OMAP_DSS_VIDEO1] = "vid1", @@ -252,7 +272,7 @@ struct drm_plane *omap_plane_init(struct drm_device *dev, u32 possible_crtcs) { struct omap_drm_private *priv = dev->dev_private; - unsigned int num_planes = priv->dispc_ops->get_num_ovls(priv->dispc); + unsigned int num_planes = dispc_get_num_ovls(priv->dispc); struct drm_plane *plane; struct omap_plane *omap_plane; enum omap_plane_id id; @@ -271,7 +291,7 @@ struct drm_plane *omap_plane_init(struct drm_device *dev, if (!omap_plane) return ERR_PTR(-ENOMEM); - formats = priv->dispc_ops->ovl_get_color_modes(priv->dispc, id); + formats = dispc_ovl_get_color_modes(priv->dispc, id); for (nformats = 0; formats[nformats]; ++nformats) ; omap_plane->id = id; @@ -293,6 +313,15 @@ struct drm_plane *omap_plane_init(struct drm_device *dev, drm_plane_create_blend_mode_property(plane, BIT(DRM_MODE_BLEND_PREMULTI) | BIT(DRM_MODE_BLEND_COVERAGE)); + if (omap_plane_supports_yuv(plane)) + drm_plane_create_color_properties(plane, + BIT(DRM_COLOR_YCBCR_BT601) | + BIT(DRM_COLOR_YCBCR_BT709), + BIT(DRM_COLOR_YCBCR_FULL_RANGE) | + BIT(DRM_COLOR_YCBCR_LIMITED_RANGE), + DRM_COLOR_YCBCR_BT601, + DRM_COLOR_YCBCR_FULL_RANGE); + return plane; error: diff --git a/drivers/gpu/drm/omapdrm/tcm-sita.c b/drivers/gpu/drm/omapdrm/tcm-sita.c index 9e1acbd2c7aa..8338dc665301 100644 --- a/drivers/gpu/drm/omapdrm/tcm-sita.c +++ b/drivers/gpu/drm/omapdrm/tcm-sita.c @@ -254,6 +254,5 @@ struct tcm *sita_init(u16 width, u16 height) return tcm; error: - kfree(tcm); return NULL; } diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig index b4e021ea30f9..4894913936e9 100644 --- a/drivers/gpu/drm/panel/Kconfig +++ b/drivers/gpu/drm/panel/Kconfig @@ -57,6 +57,15 @@ config DRM_PANEL_BOE_TV101WUM_NL6 Say Y here if you want to support for BOE TV101WUM and AUO KD101N80 45NA WUXGA PANEL DSI Video Mode panel +config DRM_PANEL_DSI_CM + tristate "Generic DSI command mode panels" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + DRM panel driver for DSI command mode panels with support for + embedded and external backlights. + config DRM_PANEL_LVDS tristate "Generic LVDS panel driver" depends on OF @@ -145,6 +154,17 @@ config DRM_PANEL_JDI_LT070ME05000 The panel has a 1200(RGB)×1920 (WUXGA) resolution and uses 24 bit per pixel. +config DRM_PANEL_KHADAS_TS050 + tristate "Khadas TS050 panel" + depends on OF + depends on DRM_MIPI_DSI + depends on BACKLIGHT_CLASS_DEVICE + help + Say Y here if you want to enable support for Khadas TS050 TFT-LCD + panel module. The panel has a 1080x1920 resolution and uses + 24 bit RGB per pixel. It provides a MIPI DSI interface to + the host, a built-in LED backlight and touch controller. + config DRM_PANEL_KINGDISPLAY_KD097D04 tristate "Kingdisplay kd097d04 panel" depends on OF diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile index ebbf488c7eac..cae4d976c069 100644 --- a/drivers/gpu/drm/panel/Makefile +++ b/drivers/gpu/drm/panel/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o obj-$(CONFIG_DRM_PANEL_ASUS_Z00T_TM5P5_NT35596) += panel-asus-z00t-tm5p5-n35596.o obj-$(CONFIG_DRM_PANEL_BOE_HIMAX8279D) += panel-boe-himax8279d.o obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_NL6) += panel-boe-tv101wum-nl6.o +obj-$(CONFIG_DRM_PANEL_DSI_CM) += panel-dsi-cm.o obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o obj-$(CONFIG_DRM_PANEL_ELIDA_KD35T133) += panel-elida-kd35t133.o @@ -13,6 +14,7 @@ obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o +obj-$(CONFIG_DRM_PANEL_KHADAS_TS050) += panel-khadas-ts050.o obj-$(CONFIG_DRM_PANEL_KINGDISPLAY_KD097D04) += panel-kingdisplay-kd097d04.o obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK050H3146W) += panel-leadtek-ltk050h3146w.o obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o diff --git a/drivers/gpu/drm/panel/panel-dsi-cm.c b/drivers/gpu/drm/panel/panel-dsi-cm.c new file mode 100644 index 000000000000..af381d756ac1 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-dsi-cm.c @@ -0,0 +1,665 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Generic DSI Command Mode panel driver + * + * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/ + * Author: Tomi Valkeinen <[email protected]> + */ + +#include <linux/backlight.h> +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/jiffies.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/regulator/consumer.h> + +#include <drm/drm_connector.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> + +#include <video/mipi_display.h> + +#define DCS_GET_ID1 0xda +#define DCS_GET_ID2 0xdb +#define DCS_GET_ID3 0xdc + +#define DCS_REGULATOR_SUPPLY_NUM 2 + +static const struct of_device_id dsicm_of_match[]; + +struct dsic_panel_data { + u32 xres; + u32 yres; + u32 refresh; + u32 width_mm; + u32 height_mm; + u32 max_hs_rate; + u32 max_lp_rate; +}; + +struct panel_drv_data { + struct mipi_dsi_device *dsi; + struct drm_panel panel; + struct drm_display_mode mode; + + struct mutex lock; + + struct backlight_device *bldev; + struct backlight_device *extbldev; + + unsigned long hw_guard_end; /* next value of jiffies when we can + * issue the next sleep in/out command + */ + unsigned long hw_guard_wait; /* max guard time in jiffies */ + + const struct dsic_panel_data *panel_data; + + struct gpio_desc *reset_gpio; + + struct regulator_bulk_data supplies[DCS_REGULATOR_SUPPLY_NUM]; + + bool use_dsi_backlight; + + /* runtime variables */ + bool enabled; + + bool intro_printed; +}; + +static inline struct panel_drv_data *panel_to_ddata(struct drm_panel *panel) +{ + return container_of(panel, struct panel_drv_data, panel); +} + +static void dsicm_bl_power(struct panel_drv_data *ddata, bool enable) +{ + struct backlight_device *backlight; + + if (ddata->bldev) + backlight = ddata->bldev; + else if (ddata->extbldev) + backlight = ddata->extbldev; + else + return; + + if (enable) { + backlight->props.fb_blank = FB_BLANK_UNBLANK; + backlight->props.state = ~(BL_CORE_FBBLANK | BL_CORE_SUSPENDED); + backlight->props.power = FB_BLANK_UNBLANK; + } else { + backlight->props.fb_blank = FB_BLANK_NORMAL; + backlight->props.power = FB_BLANK_POWERDOWN; + backlight->props.state |= BL_CORE_FBBLANK | BL_CORE_SUSPENDED; + } + + backlight_update_status(backlight); +} + +static void hw_guard_start(struct panel_drv_data *ddata, int guard_msec) +{ + ddata->hw_guard_wait = msecs_to_jiffies(guard_msec); + ddata->hw_guard_end = jiffies + ddata->hw_guard_wait; +} + +static void hw_guard_wait(struct panel_drv_data *ddata) +{ + unsigned long wait = ddata->hw_guard_end - jiffies; + + if ((long)wait > 0 && wait <= ddata->hw_guard_wait) { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(wait); + } +} + +static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data) +{ + return mipi_dsi_dcs_read(ddata->dsi, dcs_cmd, data, 1); +} + +static int dsicm_dcs_write_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 param) +{ + return mipi_dsi_dcs_write(ddata->dsi, dcs_cmd, ¶m, 1); +} + +static int dsicm_sleep_in(struct panel_drv_data *ddata) + +{ + int r; + + hw_guard_wait(ddata); + + r = mipi_dsi_dcs_enter_sleep_mode(ddata->dsi); + if (r) + return r; + + hw_guard_start(ddata, 120); + + usleep_range(5000, 10000); + + return 0; +} + +static int dsicm_sleep_out(struct panel_drv_data *ddata) +{ + int r; + + hw_guard_wait(ddata); + + r = mipi_dsi_dcs_exit_sleep_mode(ddata->dsi); + if (r) + return r; + + hw_guard_start(ddata, 120); + + usleep_range(5000, 10000); + + return 0; +} + +static int dsicm_get_id(struct panel_drv_data *ddata, u8 *id1, u8 *id2, u8 *id3) +{ + int r; + + r = dsicm_dcs_read_1(ddata, DCS_GET_ID1, id1); + if (r) + return r; + r = dsicm_dcs_read_1(ddata, DCS_GET_ID2, id2); + if (r) + return r; + r = dsicm_dcs_read_1(ddata, DCS_GET_ID3, id3); + if (r) + return r; + + return 0; +} + +static int dsicm_set_update_window(struct panel_drv_data *ddata) +{ + struct mipi_dsi_device *dsi = ddata->dsi; + int r; + + r = mipi_dsi_dcs_set_column_address(dsi, 0, ddata->mode.hdisplay - 1); + if (r < 0) + return r; + + r = mipi_dsi_dcs_set_page_address(dsi, 0, ddata->mode.vdisplay - 1); + if (r < 0) + return r; + + return 0; +} + +static int dsicm_bl_update_status(struct backlight_device *dev) +{ + struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev); + int r = 0; + int level; + + if (dev->props.fb_blank == FB_BLANK_UNBLANK && + dev->props.power == FB_BLANK_UNBLANK) + level = dev->props.brightness; + else + level = 0; + + dev_dbg(&ddata->dsi->dev, "update brightness to %d\n", level); + + mutex_lock(&ddata->lock); + + if (ddata->enabled) + r = dsicm_dcs_write_1(ddata, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, + level); + + mutex_unlock(&ddata->lock); + + return r; +} + +static int dsicm_bl_get_intensity(struct backlight_device *dev) +{ + if (dev->props.fb_blank == FB_BLANK_UNBLANK && + dev->props.power == FB_BLANK_UNBLANK) + return dev->props.brightness; + + return 0; +} + +static const struct backlight_ops dsicm_bl_ops = { + .get_brightness = dsicm_bl_get_intensity, + .update_status = dsicm_bl_update_status, +}; + +static ssize_t num_dsi_errors_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct panel_drv_data *ddata = dev_get_drvdata(dev); + u8 errors = 0; + int r = -ENODEV; + + mutex_lock(&ddata->lock); + + if (ddata->enabled) + r = dsicm_dcs_read_1(ddata, MIPI_DCS_GET_ERROR_COUNT_ON_DSI, &errors); + + mutex_unlock(&ddata->lock); + + if (r) + return r; + + return snprintf(buf, PAGE_SIZE, "%d\n", errors); +} + +static ssize_t hw_revision_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct panel_drv_data *ddata = dev_get_drvdata(dev); + u8 id1, id2, id3; + int r = -ENODEV; + + mutex_lock(&ddata->lock); + + if (ddata->enabled) + r = dsicm_get_id(ddata, &id1, &id2, &id3); + + mutex_unlock(&ddata->lock); + + if (r) + return r; + + return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x\n", id1, id2, id3); +} + +static DEVICE_ATTR_RO(num_dsi_errors); +static DEVICE_ATTR_RO(hw_revision); + +static struct attribute *dsicm_attrs[] = { + &dev_attr_num_dsi_errors.attr, + &dev_attr_hw_revision.attr, + NULL, +}; + +static const struct attribute_group dsicm_attr_group = { + .attrs = dsicm_attrs, +}; + +static void dsicm_hw_reset(struct panel_drv_data *ddata) +{ + gpiod_set_value(ddata->reset_gpio, 1); + udelay(10); + /* reset the panel */ + gpiod_set_value(ddata->reset_gpio, 0); + /* assert reset */ + udelay(10); + gpiod_set_value(ddata->reset_gpio, 1); + /* wait after releasing reset */ + usleep_range(5000, 10000); +} + +static int dsicm_power_on(struct panel_drv_data *ddata) +{ + u8 id1, id2, id3; + int r; + + dsicm_hw_reset(ddata); + + ddata->dsi->mode_flags |= MIPI_DSI_MODE_LPM; + + r = dsicm_sleep_out(ddata); + if (r) + goto err; + + r = dsicm_get_id(ddata, &id1, &id2, &id3); + if (r) + goto err; + + r = dsicm_dcs_write_1(ddata, MIPI_DCS_SET_DISPLAY_BRIGHTNESS, 0xff); + if (r) + goto err; + + r = dsicm_dcs_write_1(ddata, MIPI_DCS_WRITE_CONTROL_DISPLAY, + (1<<2) | (1<<5)); /* BL | BCTRL */ + if (r) + goto err; + + r = mipi_dsi_dcs_set_pixel_format(ddata->dsi, MIPI_DCS_PIXEL_FMT_24BIT); + if (r) + goto err; + + r = dsicm_set_update_window(ddata); + if (r) + goto err; + + r = mipi_dsi_dcs_set_display_on(ddata->dsi); + if (r) + goto err; + + r = mipi_dsi_dcs_set_tear_on(ddata->dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK); + if (r) + goto err; + + /* possible panel bug */ + msleep(100); + + ddata->enabled = true; + + if (!ddata->intro_printed) { + dev_info(&ddata->dsi->dev, "panel revision %02x.%02x.%02x\n", + id1, id2, id3); + ddata->intro_printed = true; + } + + ddata->dsi->mode_flags &= ~MIPI_DSI_MODE_LPM; + + return 0; +err: + dev_err(&ddata->dsi->dev, "error while enabling panel, issuing HW reset\n"); + + dsicm_hw_reset(ddata); + + return r; +} + +static int dsicm_power_off(struct panel_drv_data *ddata) +{ + int r; + + ddata->enabled = false; + + r = mipi_dsi_dcs_set_display_off(ddata->dsi); + if (!r) + r = dsicm_sleep_in(ddata); + + if (r) { + dev_err(&ddata->dsi->dev, + "error disabling panel, issuing HW reset\n"); + dsicm_hw_reset(ddata); + } + + return r; +} + +static int dsicm_prepare(struct drm_panel *panel) +{ + struct panel_drv_data *ddata = panel_to_ddata(panel); + int r; + + r = regulator_bulk_enable(ARRAY_SIZE(ddata->supplies), ddata->supplies); + if (r) + dev_err(&ddata->dsi->dev, "failed to enable supplies: %d\n", r); + + return r; +} + +static int dsicm_enable(struct drm_panel *panel) +{ + struct panel_drv_data *ddata = panel_to_ddata(panel); + int r; + + mutex_lock(&ddata->lock); + + r = dsicm_power_on(ddata); + if (r) + goto err; + + mutex_unlock(&ddata->lock); + + dsicm_bl_power(ddata, true); + + return 0; +err: + dev_err(&ddata->dsi->dev, "enable failed (%d)\n", r); + mutex_unlock(&ddata->lock); + return r; +} + +static int dsicm_unprepare(struct drm_panel *panel) +{ + struct panel_drv_data *ddata = panel_to_ddata(panel); + int r; + + r = regulator_bulk_disable(ARRAY_SIZE(ddata->supplies), ddata->supplies); + if (r) + dev_err(&ddata->dsi->dev, "failed to disable supplies: %d\n", r); + + return r; +} + +static int dsicm_disable(struct drm_panel *panel) +{ + struct panel_drv_data *ddata = panel_to_ddata(panel); + int r; + + dsicm_bl_power(ddata, false); + + mutex_lock(&ddata->lock); + + r = dsicm_power_off(ddata); + + mutex_unlock(&ddata->lock); + + return r; +} + +static int dsicm_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + struct panel_drv_data *ddata = panel_to_ddata(panel); + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(connector->dev, &ddata->mode); + if (!mode) { + dev_err(&ddata->dsi->dev, "failed to add mode %ux%ux@%u kHz\n", + ddata->mode.hdisplay, ddata->mode.vdisplay, + ddata->mode.clock); + return -ENOMEM; + } + + connector->display_info.width_mm = ddata->panel_data->width_mm; + connector->display_info.height_mm = ddata->panel_data->height_mm; + + drm_mode_probed_add(connector, mode); + + return 1; +} + +static const struct drm_panel_funcs dsicm_panel_funcs = { + .unprepare = dsicm_unprepare, + .disable = dsicm_disable, + .prepare = dsicm_prepare, + .enable = dsicm_enable, + .get_modes = dsicm_get_modes, +}; + +static int dsicm_probe_of(struct mipi_dsi_device *dsi) +{ + struct backlight_device *backlight; + struct panel_drv_data *ddata = mipi_dsi_get_drvdata(dsi); + int err; + struct drm_display_mode *mode = &ddata->mode; + + ddata->reset_gpio = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW); + if (IS_ERR(ddata->reset_gpio)) { + err = PTR_ERR(ddata->reset_gpio); + dev_err(&dsi->dev, "reset gpio request failed: %d", err); + return err; + } + + mode->hdisplay = mode->hsync_start = mode->hsync_end = mode->htotal = + ddata->panel_data->xres; + mode->vdisplay = mode->vsync_start = mode->vsync_end = mode->vtotal = + ddata->panel_data->yres; + mode->clock = ddata->panel_data->xres * ddata->panel_data->yres * + ddata->panel_data->refresh / 1000; + mode->width_mm = ddata->panel_data->width_mm; + mode->height_mm = ddata->panel_data->height_mm; + mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; + drm_mode_set_name(mode); + + ddata->supplies[0].supply = "vpnl"; + ddata->supplies[1].supply = "vddi"; + err = devm_regulator_bulk_get(&dsi->dev, ARRAY_SIZE(ddata->supplies), + ddata->supplies); + if (err) + return err; + + backlight = devm_of_find_backlight(&dsi->dev); + if (IS_ERR(backlight)) + return PTR_ERR(backlight); + + /* If no backlight device is found assume native backlight support */ + if (backlight) + ddata->extbldev = backlight; + else + ddata->use_dsi_backlight = true; + + return 0; +} + +static int dsicm_probe(struct mipi_dsi_device *dsi) +{ + struct panel_drv_data *ddata; + struct backlight_device *bldev = NULL; + struct device *dev = &dsi->dev; + int r; + + dev_dbg(dev, "probe\n"); + + ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL); + if (!ddata) + return -ENOMEM; + + mipi_dsi_set_drvdata(dsi, ddata); + ddata->dsi = dsi; + + ddata->panel_data = of_device_get_match_data(dev); + if (!ddata->panel_data) + return -ENODEV; + + r = dsicm_probe_of(dsi); + if (r) + return r; + + mutex_init(&ddata->lock); + + dsicm_hw_reset(ddata); + + drm_panel_init(&ddata->panel, dev, &dsicm_panel_funcs, + DRM_MODE_CONNECTOR_DSI); + + if (ddata->use_dsi_backlight) { + struct backlight_properties props = { 0 }; + props.max_brightness = 255; + props.type = BACKLIGHT_RAW; + + bldev = devm_backlight_device_register(dev, dev_name(dev), + dev, ddata, &dsicm_bl_ops, &props); + if (IS_ERR(bldev)) { + r = PTR_ERR(bldev); + goto err_bl; + } + + ddata->bldev = bldev; + } + + r = sysfs_create_group(&dev->kobj, &dsicm_attr_group); + if (r) { + dev_err(dev, "failed to create sysfs files\n"); + goto err_bl; + } + + dsi->lanes = 2; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS | + MIPI_DSI_MODE_EOT_PACKET; + dsi->hs_rate = ddata->panel_data->max_hs_rate; + dsi->lp_rate = ddata->panel_data->max_lp_rate; + + drm_panel_add(&ddata->panel); + + r = mipi_dsi_attach(dsi); + if (r < 0) + goto err_dsi_attach; + + return 0; + +err_dsi_attach: + drm_panel_remove(&ddata->panel); + sysfs_remove_group(&dsi->dev.kobj, &dsicm_attr_group); +err_bl: + if (ddata->extbldev) + put_device(&ddata->extbldev->dev); + + return r; +} + +static int dsicm_remove(struct mipi_dsi_device *dsi) +{ + struct panel_drv_data *ddata = mipi_dsi_get_drvdata(dsi); + + dev_dbg(&dsi->dev, "remove\n"); + + mipi_dsi_detach(dsi); + + drm_panel_remove(&ddata->panel); + + sysfs_remove_group(&dsi->dev.kobj, &dsicm_attr_group); + + if (ddata->extbldev) + put_device(&ddata->extbldev->dev); + + return 0; +} + +static const struct dsic_panel_data taal_data = { + .xres = 864, + .yres = 480, + .refresh = 60, + .width_mm = 0, + .height_mm = 0, + .max_hs_rate = 300000000, + .max_lp_rate = 10000000, +}; + +static const struct dsic_panel_data himalaya_data = { + .xres = 480, + .yres = 864, + .refresh = 60, + .width_mm = 49, + .height_mm = 88, + .max_hs_rate = 300000000, + .max_lp_rate = 10000000, +}; + +static const struct dsic_panel_data droid4_data = { + .xres = 540, + .yres = 960, + .refresh = 60, + .width_mm = 50, + .height_mm = 89, + .max_hs_rate = 300000000, + .max_lp_rate = 10000000, +}; + +static const struct of_device_id dsicm_of_match[] = { + { .compatible = "tpo,taal", .data = &taal_data }, + { .compatible = "nokia,himalaya", &himalaya_data }, + { .compatible = "motorola,droid4-panel", &droid4_data }, + {}, +}; + +MODULE_DEVICE_TABLE(of, dsicm_of_match); + +static struct mipi_dsi_driver dsicm_driver = { + .probe = dsicm_probe, + .remove = dsicm_remove, + .driver = { + .name = "panel-dsi-cm", + .of_match_table = dsicm_of_match, + }, +}; +module_mipi_dsi_driver(dsicm_driver); + +MODULE_AUTHOR("Tomi Valkeinen <[email protected]>"); +MODULE_DESCRIPTION("Generic DSI Command Mode Panel Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/panel/panel-khadas-ts050.c b/drivers/gpu/drm/panel/panel-khadas-ts050.c new file mode 100644 index 000000000000..8f6ac1a40c31 --- /dev/null +++ b/drivers/gpu/drm/panel/panel-khadas-ts050.c @@ -0,0 +1,870 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 BayLibre, SAS + * Author: Neil Armstrong <[email protected]> + */ + +#include <linux/delay.h> +#include <linux/gpio/consumer.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/regulator/consumer.h> + +#include <video/mipi_display.h> + +#include <drm/drm_crtc.h> +#include <drm/drm_device.h> +#include <drm/drm_mipi_dsi.h> +#include <drm/drm_modes.h> +#include <drm/drm_panel.h> + +struct khadas_ts050_panel { + struct drm_panel base; + struct mipi_dsi_device *link; + + struct regulator *supply; + struct gpio_desc *reset_gpio; + struct gpio_desc *enable_gpio; + + bool prepared; + bool enabled; +}; + +struct khadas_ts050_panel_cmd { + u8 cmd; + u8 data; +}; + +/* Only the CMD1 User Command set is documented */ +static const struct khadas_ts050_panel_cmd init_code[] = { + /* Select Unknown CMD Page (Undocumented) */ + {0xff, 0xee}, + /* Reload CMD1: Don't reload default value to register */ + {0xfb, 0x01}, + {0x1f, 0x45}, + {0x24, 0x4f}, + {0x38, 0xc8}, + {0x39, 0x27}, + {0x1e, 0x77}, + {0x1d, 0x0f}, + {0x7e, 0x71}, + {0x7c, 0x03}, + {0xff, 0x00}, + {0xfb, 0x01}, + {0x35, 0x01}, + /* Select CMD2 Page0 (Undocumented) */ + {0xff, 0x01}, + /* Reload CMD1: Don't reload default value to register */ + {0xfb, 0x01}, + {0x00, 0x01}, + {0x01, 0x55}, + {0x02, 0x40}, + {0x05, 0x40}, + {0x06, 0x4a}, + {0x07, 0x24}, + {0x08, 0x0c}, + {0x0b, 0x7d}, + {0x0c, 0x7d}, + {0x0e, 0xb0}, + {0x0f, 0xae}, + {0x11, 0x10}, + {0x12, 0x10}, + {0x13, 0x03}, + {0x14, 0x4a}, + {0x15, 0x12}, + {0x16, 0x12}, + {0x18, 0x00}, + {0x19, 0x77}, + {0x1a, 0x55}, + {0x1b, 0x13}, + {0x1c, 0x00}, + {0x1d, 0x00}, + {0x1e, 0x13}, + {0x1f, 0x00}, + {0x23, 0x00}, + {0x24, 0x00}, + {0x25, 0x00}, + {0x26, 0x00}, + {0x27, 0x00}, + {0x28, 0x00}, + {0x35, 0x00}, + {0x66, 0x00}, + {0x58, 0x82}, + {0x59, 0x02}, + {0x5a, 0x02}, + {0x5b, 0x02}, + {0x5c, 0x82}, + {0x5d, 0x82}, + {0x5e, 0x02}, + {0x5f, 0x02}, + {0x72, 0x31}, + /* Select CMD2 Page4 (Undocumented) */ + {0xff, 0x05}, + /* Reload CMD1: Don't reload default value to register */ + {0xfb, 0x01}, + {0x00, 0x01}, + {0x01, 0x0b}, + {0x02, 0x0c}, + {0x03, 0x09}, + {0x04, 0x0a}, + {0x05, 0x00}, + {0x06, 0x0f}, + {0x07, 0x10}, + {0x08, 0x00}, + {0x09, 0x00}, + {0x0a, 0x00}, + {0x0b, 0x00}, + {0x0c, 0x00}, + {0x0d, 0x13}, + {0x0e, 0x15}, + {0x0f, 0x17}, + {0x10, 0x01}, + {0x11, 0x0b}, + {0x12, 0x0c}, + {0x13, 0x09}, + {0x14, 0x0a}, + {0x15, 0x00}, + {0x16, 0x0f}, + {0x17, 0x10}, + {0x18, 0x00}, + {0x19, 0x00}, + {0x1a, 0x00}, + {0x1b, 0x00}, + {0x1c, 0x00}, + {0x1d, 0x13}, + {0x1e, 0x15}, + {0x1f, 0x17}, + {0x20, 0x00}, + {0x21, 0x03}, + {0x22, 0x01}, + {0x23, 0x40}, + {0x24, 0x40}, + {0x25, 0xed}, + {0x29, 0x58}, + {0x2a, 0x12}, + {0x2b, 0x01}, + {0x4b, 0x06}, + {0x4c, 0x11}, + {0x4d, 0x20}, + {0x4e, 0x02}, + {0x4f, 0x02}, + {0x50, 0x20}, + {0x51, 0x61}, + {0x52, 0x01}, + {0x53, 0x63}, + {0x54, 0x77}, + {0x55, 0xed}, + {0x5b, 0x00}, + {0x5c, 0x00}, + {0x5d, 0x00}, + {0x5e, 0x00}, + {0x5f, 0x15}, + {0x60, 0x75}, + {0x61, 0x00}, + {0x62, 0x00}, + {0x63, 0x00}, + {0x64, 0x00}, + {0x65, 0x00}, + {0x66, 0x00}, + {0x67, 0x00}, + {0x68, 0x04}, + {0x69, 0x00}, + {0x6a, 0x00}, + {0x6c, 0x40}, + {0x75, 0x01}, + {0x76, 0x01}, + {0x7a, 0x80}, + {0x7b, 0xa3}, + {0x7c, 0xd8}, + {0x7d, 0x60}, + {0x7f, 0x15}, + {0x80, 0x81}, + {0x83, 0x05}, + {0x93, 0x08}, + {0x94, 0x10}, + {0x8a, 0x00}, + {0x9b, 0x0f}, + {0xea, 0xff}, + {0xec, 0x00}, + /* Select CMD2 Page0 (Undocumented) */ + {0xff, 0x01}, + /* Reload CMD1: Don't reload default value to register */ + {0xfb, 0x01}, + {0x75, 0x00}, + {0x76, 0xdf}, + {0x77, 0x00}, + {0x78, 0xe4}, + {0x79, 0x00}, + {0x7a, 0xed}, + {0x7b, 0x00}, + {0x7c, 0xf6}, + {0x7d, 0x00}, + {0x7e, 0xff}, + {0x7f, 0x01}, + {0x80, 0x07}, + {0x81, 0x01}, + {0x82, 0x10}, + {0x83, 0x01}, + {0x84, 0x18}, + {0x85, 0x01}, + {0x86, 0x20}, + {0x87, 0x01}, + {0x88, 0x3d}, + {0x89, 0x01}, + {0x8a, 0x56}, + {0x8b, 0x01}, + {0x8c, 0x84}, + {0x8d, 0x01}, + {0x8e, 0xab}, + {0x8f, 0x01}, + {0x90, 0xec}, + {0x91, 0x02}, + {0x92, 0x22}, + {0x93, 0x02}, + {0x94, 0x23}, + {0x95, 0x02}, + {0x96, 0x55}, + {0x97, 0x02}, + {0x98, 0x8b}, + {0x99, 0x02}, + {0x9a, 0xaf}, + {0x9b, 0x02}, + {0x9c, 0xdf}, + {0x9d, 0x03}, + {0x9e, 0x01}, + {0x9f, 0x03}, + {0xa0, 0x2c}, + {0xa2, 0x03}, + {0xa3, 0x39}, + {0xa4, 0x03}, + {0xa5, 0x47}, + {0xa6, 0x03}, + {0xa7, 0x56}, + {0xa9, 0x03}, + {0xaa, 0x66}, + {0xab, 0x03}, + {0xac, 0x76}, + {0xad, 0x03}, + {0xae, 0x85}, + {0xaf, 0x03}, + {0xb0, 0x90}, + {0xb1, 0x03}, + {0xb2, 0xcb}, + {0xb3, 0x00}, + {0xb4, 0xdf}, + {0xb5, 0x00}, + {0xb6, 0xe4}, + {0xb7, 0x00}, + {0xb8, 0xed}, + {0xb9, 0x00}, + {0xba, 0xf6}, + {0xbb, 0x00}, + {0xbc, 0xff}, + {0xbd, 0x01}, + {0xbe, 0x07}, + {0xbf, 0x01}, + {0xc0, 0x10}, + {0xc1, 0x01}, + {0xc2, 0x18}, + {0xc3, 0x01}, + {0xc4, 0x20}, + {0xc5, 0x01}, + {0xc6, 0x3d}, + {0xc7, 0x01}, + {0xc8, 0x56}, + {0xc9, 0x01}, + {0xca, 0x84}, + {0xcb, 0x01}, + {0xcc, 0xab}, + {0xcd, 0x01}, + {0xce, 0xec}, + {0xcf, 0x02}, + {0xd0, 0x22}, + {0xd1, 0x02}, + {0xd2, 0x23}, + {0xd3, 0x02}, + {0xd4, 0x55}, + {0xd5, 0x02}, + {0xd6, 0x8b}, + {0xd7, 0x02}, + {0xd8, 0xaf}, + {0xd9, 0x02}, + {0xda, 0xdf}, + {0xdb, 0x03}, + {0xdc, 0x01}, + {0xdd, 0x03}, + {0xde, 0x2c}, + {0xdf, 0x03}, + {0xe0, 0x39}, + {0xe1, 0x03}, + {0xe2, 0x47}, + {0xe3, 0x03}, + {0xe4, 0x56}, + {0xe5, 0x03}, + {0xe6, 0x66}, + {0xe7, 0x03}, + {0xe8, 0x76}, + {0xe9, 0x03}, + {0xea, 0x85}, + {0xeb, 0x03}, + {0xec, 0x90}, + {0xed, 0x03}, + {0xee, 0xcb}, + {0xef, 0x00}, + {0xf0, 0xbb}, + {0xf1, 0x00}, + {0xf2, 0xc0}, + {0xf3, 0x00}, + {0xf4, 0xcc}, + {0xf5, 0x00}, + {0xf6, 0xd6}, + {0xf7, 0x00}, + {0xf8, 0xe1}, + {0xf9, 0x00}, + {0xfa, 0xea}, + /* Select CMD2 Page2 (Undocumented) */ + {0xff, 0x02}, + /* Reload CMD1: Don't reload default value to register */ + {0xfb, 0x01}, + {0x00, 0x00}, + {0x01, 0xf4}, + {0x02, 0x00}, + {0x03, 0xef}, + {0x04, 0x01}, + {0x05, 0x07}, + {0x06, 0x01}, + {0x07, 0x28}, + {0x08, 0x01}, + {0x09, 0x44}, + {0x0a, 0x01}, + {0x0b, 0x76}, + {0x0c, 0x01}, + {0x0d, 0xa0}, + {0x0e, 0x01}, + {0x0f, 0xe7}, + {0x10, 0x02}, + {0x11, 0x1f}, + {0x12, 0x02}, + {0x13, 0x22}, + {0x14, 0x02}, + {0x15, 0x54}, + {0x16, 0x02}, + {0x17, 0x8b}, + {0x18, 0x02}, + {0x19, 0xaf}, + {0x1a, 0x02}, + {0x1b, 0xe0}, + {0x1c, 0x03}, + {0x1d, 0x01}, + {0x1e, 0x03}, + {0x1f, 0x2d}, + {0x20, 0x03}, + {0x21, 0x39}, + {0x22, 0x03}, + {0x23, 0x47}, + {0x24, 0x03}, + {0x25, 0x57}, + {0x26, 0x03}, + {0x27, 0x65}, + {0x28, 0x03}, + {0x29, 0x77}, + {0x2a, 0x03}, + {0x2b, 0x85}, + {0x2d, 0x03}, + {0x2f, 0x8f}, + {0x30, 0x03}, + {0x31, 0xcb}, + {0x32, 0x00}, + {0x33, 0xbb}, + {0x34, 0x00}, + {0x35, 0xc0}, + {0x36, 0x00}, + {0x37, 0xcc}, + {0x38, 0x00}, + {0x39, 0xd6}, + {0x3a, 0x00}, + {0x3b, 0xe1}, + {0x3d, 0x00}, + {0x3f, 0xea}, + {0x40, 0x00}, + {0x41, 0xf4}, + {0x42, 0x00}, + {0x43, 0xfe}, + {0x44, 0x01}, + {0x45, 0x07}, + {0x46, 0x01}, + {0x47, 0x28}, + {0x48, 0x01}, + {0x49, 0x44}, + {0x4a, 0x01}, + {0x4b, 0x76}, + {0x4c, 0x01}, + {0x4d, 0xa0}, + {0x4e, 0x01}, + {0x4f, 0xe7}, + {0x50, 0x02}, + {0x51, 0x1f}, + {0x52, 0x02}, + {0x53, 0x22}, + {0x54, 0x02}, + {0x55, 0x54}, + {0x56, 0x02}, + {0x58, 0x8b}, + {0x59, 0x02}, + {0x5a, 0xaf}, + {0x5b, 0x02}, + {0x5c, 0xe0}, + {0x5d, 0x03}, + {0x5e, 0x01}, + {0x5f, 0x03}, + {0x60, 0x2d}, + {0x61, 0x03}, + {0x62, 0x39}, + {0x63, 0x03}, + {0x64, 0x47}, + {0x65, 0x03}, + {0x66, 0x57}, + {0x67, 0x03}, + {0x68, 0x65}, + {0x69, 0x03}, + {0x6a, 0x77}, + {0x6b, 0x03}, + {0x6c, 0x85}, + {0x6d, 0x03}, + {0x6e, 0x8f}, + {0x6f, 0x03}, + {0x70, 0xcb}, + {0x71, 0x00}, + {0x72, 0x00}, + {0x73, 0x00}, + {0x74, 0x21}, + {0x75, 0x00}, + {0x76, 0x4c}, + {0x77, 0x00}, + {0x78, 0x6b}, + {0x79, 0x00}, + {0x7a, 0x85}, + {0x7b, 0x00}, + {0x7c, 0x9a}, + {0x7d, 0x00}, + {0x7e, 0xad}, + {0x7f, 0x00}, + {0x80, 0xbe}, + {0x81, 0x00}, + {0x82, 0xcd}, + {0x83, 0x01}, + {0x84, 0x01}, + {0x85, 0x01}, + {0x86, 0x29}, + {0x87, 0x01}, + {0x88, 0x68}, + {0x89, 0x01}, + {0x8a, 0x98}, + {0x8b, 0x01}, + {0x8c, 0xe5}, + {0x8d, 0x02}, + {0x8e, 0x1e}, + {0x8f, 0x02}, + {0x90, 0x30}, + {0x91, 0x02}, + {0x92, 0x52}, + {0x93, 0x02}, + {0x94, 0x88}, + {0x95, 0x02}, + {0x96, 0xaa}, + {0x97, 0x02}, + {0x98, 0xd7}, + {0x99, 0x02}, + {0x9a, 0xf7}, + {0x9b, 0x03}, + {0x9c, 0x21}, + {0x9d, 0x03}, + {0x9e, 0x2e}, + {0x9f, 0x03}, + {0xa0, 0x3d}, + {0xa2, 0x03}, + {0xa3, 0x4c}, + {0xa4, 0x03}, + {0xa5, 0x5e}, + {0xa6, 0x03}, + {0xa7, 0x71}, + {0xa9, 0x03}, + {0xaa, 0x86}, + {0xab, 0x03}, + {0xac, 0x94}, + {0xad, 0x03}, + {0xae, 0xfa}, + {0xaf, 0x00}, + {0xb0, 0x00}, + {0xb1, 0x00}, + {0xb2, 0x21}, + {0xb3, 0x00}, + {0xb4, 0x4c}, + {0xb5, 0x00}, + {0xb6, 0x6b}, + {0xb7, 0x00}, + {0xb8, 0x85}, + {0xb9, 0x00}, + {0xba, 0x9a}, + {0xbb, 0x00}, + {0xbc, 0xad}, + {0xbd, 0x00}, + {0xbe, 0xbe}, + {0xbf, 0x00}, + {0xc0, 0xcd}, + {0xc1, 0x01}, + {0xc2, 0x01}, + {0xc3, 0x01}, + {0xc4, 0x29}, + {0xc5, 0x01}, + {0xc6, 0x68}, + {0xc7, 0x01}, + {0xc8, 0x98}, + {0xc9, 0x01}, + {0xca, 0xe5}, + {0xcb, 0x02}, + {0xcc, 0x1e}, + {0xcd, 0x02}, + {0xce, 0x20}, + {0xcf, 0x02}, + {0xd0, 0x52}, + {0xd1, 0x02}, + {0xd2, 0x88}, + {0xd3, 0x02}, + {0xd4, 0xaa}, + {0xd5, 0x02}, + {0xd6, 0xd7}, + {0xd7, 0x02}, + {0xd8, 0xf7}, + {0xd9, 0x03}, + {0xda, 0x21}, + {0xdb, 0x03}, + {0xdc, 0x2e}, + {0xdd, 0x03}, + {0xde, 0x3d}, + {0xdf, 0x03}, + {0xe0, 0x4c}, + {0xe1, 0x03}, + {0xe2, 0x5e}, + {0xe3, 0x03}, + {0xe4, 0x71}, + {0xe5, 0x03}, + {0xe6, 0x86}, + {0xe7, 0x03}, + {0xe8, 0x94}, + {0xe9, 0x03}, + {0xea, 0xfa}, + /* Select CMD2 Page0 (Undocumented) */ + {0xff, 0x01}, + /* Reload CMD1: Don't reload default value to register */ + {0xfb, 0x01}, + /* Select CMD2 Page1 (Undocumented) */ + {0xff, 0x02}, + /* Reload CMD1: Don't reload default value to register */ + {0xfb, 0x01}, + /* Select CMD2 Page3 (Undocumented) */ + {0xff, 0x04}, + /* Reload CMD1: Don't reload default value to register */ + {0xfb, 0x01}, + /* Select CMD1 */ + {0xff, 0x00}, + {0xd3, 0x05}, /* RGBMIPICTRL: VSYNC back porch = 5 */ + {0xd4, 0x04}, /* RGBMIPICTRL: VSYNC front porch = 4 */ +}; + +static inline +struct khadas_ts050_panel *to_khadas_ts050_panel(struct drm_panel *panel) +{ + return container_of(panel, struct khadas_ts050_panel, base); +} + +static int khadas_ts050_panel_prepare(struct drm_panel *panel) +{ + struct khadas_ts050_panel *khadas_ts050 = to_khadas_ts050_panel(panel); + unsigned int i; + int err; + + if (khadas_ts050->prepared) + return 0; + + gpiod_set_value_cansleep(khadas_ts050->enable_gpio, 0); + + err = regulator_enable(khadas_ts050->supply); + if (err < 0) + return err; + + gpiod_set_value_cansleep(khadas_ts050->enable_gpio, 1); + + msleep(60); + + gpiod_set_value_cansleep(khadas_ts050->reset_gpio, 1); + + usleep_range(10000, 11000); + + gpiod_set_value_cansleep(khadas_ts050->reset_gpio, 0); + + /* Select CMD2 page 4 (Undocumented) */ + mipi_dsi_dcs_write(khadas_ts050->link, 0xff, (u8[]){ 0x05 }, 1); + + /* Reload CMD1: Don't reload default value to register */ + mipi_dsi_dcs_write(khadas_ts050->link, 0xfb, (u8[]){ 0x01 }, 1); + + mipi_dsi_dcs_write(khadas_ts050->link, 0xc5, (u8[]){ 0x01 }, 1); + + msleep(100); + + for (i = 0; i < ARRAY_SIZE(init_code); i++) { + err = mipi_dsi_dcs_write(khadas_ts050->link, + init_code[i].cmd, + &init_code[i].data, 1); + if (err < 0) { + dev_err(panel->dev, "failed write cmds: %d\n", err); + goto poweroff; + } + } + + err = mipi_dsi_dcs_exit_sleep_mode(khadas_ts050->link); + if (err < 0) { + dev_err(panel->dev, "failed to exit sleep mode: %d\n", err); + goto poweroff; + } + + msleep(120); + + /* Select CMD1 */ + mipi_dsi_dcs_write(khadas_ts050->link, 0xff, (u8[]){ 0x00 }, 1); + + err = mipi_dsi_dcs_set_tear_on(khadas_ts050->link, + MIPI_DSI_DCS_TEAR_MODE_VBLANK); + if (err < 0) { + dev_err(panel->dev, "failed to set tear on: %d\n", err); + goto poweroff; + } + + err = mipi_dsi_dcs_set_display_on(khadas_ts050->link); + if (err < 0) { + dev_err(panel->dev, "failed to set display on: %d\n", err); + goto poweroff; + } + + usleep_range(10000, 11000); + + khadas_ts050->prepared = true; + + return 0; + +poweroff: + gpiod_set_value_cansleep(khadas_ts050->enable_gpio, 0); + gpiod_set_value_cansleep(khadas_ts050->reset_gpio, 1); + + regulator_disable(khadas_ts050->supply); + + return err; +} + +static int khadas_ts050_panel_unprepare(struct drm_panel *panel) +{ + struct khadas_ts050_panel *khadas_ts050 = to_khadas_ts050_panel(panel); + int err; + + if (!khadas_ts050->prepared) + return 0; + + khadas_ts050->prepared = false; + + err = mipi_dsi_dcs_enter_sleep_mode(khadas_ts050->link); + if (err < 0) + dev_err(panel->dev, "failed to enter sleep mode: %d\n", err); + + msleep(150); + + gpiod_set_value_cansleep(khadas_ts050->enable_gpio, 0); + gpiod_set_value_cansleep(khadas_ts050->reset_gpio, 1); + + err = regulator_disable(khadas_ts050->supply); + if (err < 0) + return err; + + return 0; +} + +static int khadas_ts050_panel_enable(struct drm_panel *panel) +{ + struct khadas_ts050_panel *khadas_ts050 = to_khadas_ts050_panel(panel); + + khadas_ts050->enabled = true; + + return 0; +} + +static int khadas_ts050_panel_disable(struct drm_panel *panel) +{ + struct khadas_ts050_panel *khadas_ts050 = to_khadas_ts050_panel(panel); + int err; + + if (!khadas_ts050->enabled) + return 0; + + err = mipi_dsi_dcs_set_display_off(khadas_ts050->link); + if (err < 0) + dev_err(panel->dev, "failed to set display off: %d\n", err); + + usleep_range(10000, 11000); + + khadas_ts050->enabled = false; + + return 0; +} + +static const struct drm_display_mode default_mode = { + .clock = 120000, + .hdisplay = 1088, + .hsync_start = 1088 + 104, + .hsync_end = 1088 + 104 + 4, + .htotal = 1088 + 104 + 4 + 127, + .vdisplay = 1920, + .vsync_start = 1920 + 4, + .vsync_end = 1920 + 4 + 2, + .vtotal = 1920 + 4 + 2 + 3, + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, +}; + +static int khadas_ts050_panel_get_modes(struct drm_panel *panel, + struct drm_connector *connector) +{ + struct drm_display_mode *mode; + + mode = drm_mode_duplicate(connector->dev, &default_mode); + if (!mode) { + dev_err(panel->dev, "failed to add mode %ux%u@%u\n", + default_mode.hdisplay, default_mode.vdisplay, + drm_mode_vrefresh(&default_mode)); + return -ENOMEM; + } + + drm_mode_set_name(mode); + + drm_mode_probed_add(connector, mode); + + connector->display_info.width_mm = 64; + connector->display_info.height_mm = 118; + connector->display_info.bpc = 8; + + return 1; +} + +static const struct drm_panel_funcs khadas_ts050_panel_funcs = { + .prepare = khadas_ts050_panel_prepare, + .unprepare = khadas_ts050_panel_unprepare, + .enable = khadas_ts050_panel_enable, + .disable = khadas_ts050_panel_disable, + .get_modes = khadas_ts050_panel_get_modes, +}; + +static const struct of_device_id khadas_ts050_of_match[] = { + { .compatible = "khadas,ts050", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, khadas_ts050_of_match); + +static int khadas_ts050_panel_add(struct khadas_ts050_panel *khadas_ts050) +{ + struct device *dev = &khadas_ts050->link->dev; + int err; + + khadas_ts050->supply = devm_regulator_get(dev, "power"); + if (IS_ERR(khadas_ts050->supply)) + return dev_err_probe(dev, PTR_ERR(khadas_ts050->supply), + "failed to get power supply"); + + khadas_ts050->reset_gpio = devm_gpiod_get(dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(khadas_ts050->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(khadas_ts050->reset_gpio), + "failed to get reset gpio"); + + khadas_ts050->enable_gpio = devm_gpiod_get(dev, "enable", + GPIOD_OUT_HIGH); + if (IS_ERR(khadas_ts050->enable_gpio)) + return dev_err_probe(dev, PTR_ERR(khadas_ts050->enable_gpio), + "failed to get enable gpio"); + + drm_panel_init(&khadas_ts050->base, &khadas_ts050->link->dev, + &khadas_ts050_panel_funcs, DRM_MODE_CONNECTOR_DSI); + + err = drm_panel_of_backlight(&khadas_ts050->base); + if (err) + return err; + + drm_panel_add(&khadas_ts050->base); + + return 0; +} + +static int khadas_ts050_panel_probe(struct mipi_dsi_device *dsi) +{ + struct khadas_ts050_panel *khadas_ts050; + int err; + + dsi->lanes = 4; + dsi->format = MIPI_DSI_FMT_RGB888; + dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | + MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET; + + khadas_ts050 = devm_kzalloc(&dsi->dev, sizeof(*khadas_ts050), + GFP_KERNEL); + if (!khadas_ts050) + return -ENOMEM; + + mipi_dsi_set_drvdata(dsi, khadas_ts050); + khadas_ts050->link = dsi; + + err = khadas_ts050_panel_add(khadas_ts050); + if (err < 0) + return err; + + err = mipi_dsi_attach(dsi); + if (err) + drm_panel_remove(&khadas_ts050->base); + + return err; +} + +static int khadas_ts050_panel_remove(struct mipi_dsi_device *dsi) +{ + struct khadas_ts050_panel *khadas_ts050 = mipi_dsi_get_drvdata(dsi); + int err; + + err = mipi_dsi_detach(dsi); + if (err < 0) + dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err); + + drm_panel_remove(&khadas_ts050->base); + drm_panel_disable(&khadas_ts050->base); + drm_panel_unprepare(&khadas_ts050->base); + + return 0; +} + +static void khadas_ts050_panel_shutdown(struct mipi_dsi_device *dsi) +{ + struct khadas_ts050_panel *khadas_ts050 = mipi_dsi_get_drvdata(dsi); + + drm_panel_disable(&khadas_ts050->base); + drm_panel_unprepare(&khadas_ts050->base); +} + +static struct mipi_dsi_driver khadas_ts050_panel_driver = { + .driver = { + .name = "panel-khadas-ts050", + .of_match_table = khadas_ts050_of_match, + }, + .probe = khadas_ts050_panel_probe, + .remove = khadas_ts050_panel_remove, + .shutdown = khadas_ts050_panel_shutdown, +}; +module_mipi_dsi_driver(khadas_ts050_panel_driver); + +MODULE_AUTHOR("Neil Armstrong <[email protected]>"); +MODULE_DESCRIPTION("Khadas TS050 panel driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c index 0c5f22e95c2d..30f28ad4df6b 100644 --- a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c +++ b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c @@ -9,6 +9,7 @@ #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/module.h> +#include <linux/of_device.h> #include <linux/regulator/consumer.h> #include <video/mipi_display.h> @@ -22,6 +23,7 @@ /* Manufacturer specific Commands send via DSI */ #define MANTIX_CMD_OTP_STOP_RELOAD_MIPI 0x41 #define MANTIX_CMD_INT_CANCEL 0x4C +#define MANTIX_CMD_SPI_FINISH 0x90 struct mantix { struct device *dev; @@ -33,6 +35,8 @@ struct mantix { struct regulator *avdd; struct regulator *avee; struct regulator *vddi; + + const struct drm_display_mode *default_mode; }; static inline struct mantix *panel_to_mantix(struct drm_panel *panel) @@ -66,6 +70,10 @@ static int mantix_init_sequence(struct mantix *ctx) dsi_generic_write_seq(dsi, 0x80, 0x64, 0x00, 0x64, 0x00, 0x00); msleep(20); + dsi_generic_write_seq(dsi, MANTIX_CMD_SPI_FINISH, 0xA5); + dsi_generic_write_seq(dsi, MANTIX_CMD_OTP_STOP_RELOAD_MIPI, 0x00, 0x2F); + msleep(20); + dev_dbg(dev, "Panel init sequence done\n"); return 0; } @@ -182,7 +190,7 @@ static int mantix_prepare(struct drm_panel *panel) return 0; } -static const struct drm_display_mode default_mode = { +static const struct drm_display_mode default_mode_mantix = { .hdisplay = 720, .hsync_start = 720 + 45, .hsync_end = 720 + 45 + 14, @@ -197,17 +205,32 @@ static const struct drm_display_mode default_mode = { .height_mm = 130, }; +static const struct drm_display_mode default_mode_ys = { + .hdisplay = 720, + .hsync_start = 720 + 45, + .hsync_end = 720 + 45 + 14, + .htotal = 720 + 45 + 14 + 25, + .vdisplay = 1440, + .vsync_start = 1440 + 175, + .vsync_end = 1440 + 175 + 8, + .vtotal = 1440 + 175 + 8 + 50, + .clock = 85298, + .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, + .width_mm = 65, + .height_mm = 130, +}; + static int mantix_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct mantix *ctx = panel_to_mantix(panel); struct drm_display_mode *mode; - mode = drm_mode_duplicate(connector->dev, &default_mode); + mode = drm_mode_duplicate(connector->dev, ctx->default_mode); if (!mode) { dev_err(ctx->dev, "Failed to add mode %ux%u@%u\n", - default_mode.hdisplay, default_mode.vdisplay, - drm_mode_vrefresh(&default_mode)); + ctx->default_mode->hdisplay, ctx->default_mode->vdisplay, + drm_mode_vrefresh(ctx->default_mode)); return -ENOMEM; } @@ -238,6 +261,7 @@ static int mantix_probe(struct mipi_dsi_device *dsi) ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; + ctx->default_mode = of_device_get_match_data(dev); ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(ctx->reset_gpio)) { @@ -288,8 +312,8 @@ static int mantix_probe(struct mipi_dsi_device *dsi) } dev_info(dev, "%ux%u@%u %ubpp dsi %udl - ready\n", - default_mode.hdisplay, default_mode.vdisplay, - drm_mode_vrefresh(&default_mode), + ctx->default_mode->hdisplay, ctx->default_mode->vdisplay, + drm_mode_vrefresh(ctx->default_mode), mipi_dsi_pixel_format_to_bpp(dsi->format), dsi->lanes); return 0; @@ -316,7 +340,8 @@ static int mantix_remove(struct mipi_dsi_device *dsi) } static const struct of_device_id mantix_of_match[] = { - { .compatible = "mantix,mlaf057we51-x" }, + { .compatible = "mantix,mlaf057we51-x", .data = &default_mode_mantix }, + { .compatible = "ys,ys57pss36bh5gq", .data = &default_mode_ys }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, mantix_of_match); diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c index 210e70da3a15..603c5dfe8768 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c @@ -23,76 +23,262 @@ #include "panel-samsung-s6e63m0.h" /* Manufacturer Command Set */ -#define MCS_ELVSS_ON 0xb1 -#define MCS_MIECTL1 0xc0 -#define MCS_BCMODE 0xc1 +#define MCS_ELVSS_ON 0xb1 +#define MCS_TEMP_SWIRE 0xb2 +#define MCS_PENTILE_1 0xb3 +#define MCS_PENTILE_2 0xb4 +#define MCS_GAMMA_DELTA_Y_RED 0xb5 +#define MCS_GAMMA_DELTA_X_RED 0xb6 +#define MCS_GAMMA_DELTA_Y_GREEN 0xb7 +#define MCS_GAMMA_DELTA_X_GREEN 0xb8 +#define MCS_GAMMA_DELTA_Y_BLUE 0xb9 +#define MCS_GAMMA_DELTA_X_BLUE 0xba +#define MCS_MIECTL1 0xc0 +#define MCS_BCMODE 0xc1 #define MCS_ERROR_CHECK 0xd5 #define MCS_READ_ID1 0xda #define MCS_READ_ID2 0xdb #define MCS_READ_ID3 0xdc #define MCS_LEVEL_2_KEY 0xf0 #define MCS_MTP_KEY 0xf1 -#define MCS_DISCTL 0xf2 -#define MCS_SRCCTL 0xf6 -#define MCS_IFCTL 0xf7 -#define MCS_PANELCTL 0xF8 -#define MCS_PGAMMACTL 0xfa +#define MCS_DISCTL 0xf2 +#define MCS_SRCCTL 0xf6 +#define MCS_IFCTL 0xf7 +#define MCS_PANELCTL 0xf8 +#define MCS_PGAMMACTL 0xfa #define S6E63M0_LCD_ID_VALUE_M2 0xA4 #define S6E63M0_LCD_ID_VALUE_SM2 0xB4 #define S6E63M0_LCD_ID_VALUE_SM2_1 0xB6 -#define NUM_GAMMA_LEVELS 11 -#define GAMMA_TABLE_COUNT 23 +#define NUM_GAMMA_LEVELS 28 +#define GAMMA_TABLE_COUNT 23 -#define MAX_BRIGHTNESS (NUM_GAMMA_LEVELS - 1) +#define MAX_BRIGHTNESS (NUM_GAMMA_LEVELS - 1) /* array of gamma tables for gamma value 2.2 */ static u8 const s6e63m0_gamma_22[NUM_GAMMA_LEVELS][GAMMA_TABLE_COUNT] = { - { MCS_PGAMMACTL, 0x00, - 0x18, 0x08, 0x24, 0x78, 0xEC, 0x3D, 0xC8, - 0xC2, 0xB6, 0xC4, 0xC7, 0xB6, 0xD5, 0xD7, - 0xCC, 0x00, 0x39, 0x00, 0x36, 0x00, 0x51 }, - { MCS_PGAMMACTL, 0x00, - 0x18, 0x08, 0x24, 0x73, 0x4A, 0x3D, 0xC0, - 0xC2, 0xB1, 0xBB, 0xBE, 0xAC, 0xCE, 0xCF, - 0xC5, 0x00, 0x5D, 0x00, 0x5E, 0x00, 0x82 }, - { MCS_PGAMMACTL, 0x00, - 0x18, 0x08, 0x24, 0x70, 0x51, 0x3E, 0xBF, - 0xC1, 0xAF, 0xB9, 0xBC, 0xAB, 0xCC, 0xCC, - 0xC2, 0x00, 0x65, 0x00, 0x67, 0x00, 0x8D }, - { MCS_PGAMMACTL, 0x00, - 0x18, 0x08, 0x24, 0x6C, 0x54, 0x3A, 0xBC, - 0xBF, 0xAC, 0xB7, 0xBB, 0xA9, 0xC9, 0xC9, - 0xBE, 0x00, 0x71, 0x00, 0x73, 0x00, 0x9E }, - { MCS_PGAMMACTL, 0x00, - 0x18, 0x08, 0x24, 0x69, 0x54, 0x37, 0xBB, - 0xBE, 0xAC, 0xB4, 0xB7, 0xA6, 0xC7, 0xC8, - 0xBC, 0x00, 0x7B, 0x00, 0x7E, 0x00, 0xAB }, - { MCS_PGAMMACTL, 0x00, - 0x18, 0x08, 0x24, 0x66, 0x55, 0x34, 0xBA, - 0xBD, 0xAB, 0xB1, 0xB5, 0xA3, 0xC5, 0xC6, - 0xB9, 0x00, 0x85, 0x00, 0x88, 0x00, 0xBA }, - { MCS_PGAMMACTL, 0x00, - 0x18, 0x08, 0x24, 0x63, 0x53, 0x31, 0xB8, - 0xBC, 0xA9, 0xB0, 0xB5, 0xA2, 0xC4, 0xC4, - 0xB8, 0x00, 0x8B, 0x00, 0x8E, 0x00, 0xC2 }, - { MCS_PGAMMACTL, 0x00, - 0x18, 0x08, 0x24, 0x62, 0x54, 0x30, 0xB9, - 0xBB, 0xA9, 0xB0, 0xB3, 0xA1, 0xC1, 0xC3, - 0xB7, 0x00, 0x91, 0x00, 0x95, 0x00, 0xDA }, - { MCS_PGAMMACTL, 0x00, - 0x18, 0x08, 0x24, 0x66, 0x58, 0x34, 0xB6, - 0xBA, 0xA7, 0xAF, 0xB3, 0xA0, 0xC1, 0xC2, - 0xB7, 0x00, 0x97, 0x00, 0x9A, 0x00, 0xD1 }, - { MCS_PGAMMACTL, 0x00, - 0x18, 0x08, 0x24, 0x64, 0x56, 0x33, 0xB6, - 0xBA, 0xA8, 0xAC, 0xB1, 0x9D, 0xC1, 0xC1, - 0xB7, 0x00, 0x9C, 0x00, 0x9F, 0x00, 0xD6 }, - { MCS_PGAMMACTL, 0x00, - 0x18, 0x08, 0x24, 0x5f, 0x50, 0x2d, 0xB6, - 0xB9, 0xA7, 0xAd, 0xB1, 0x9f, 0xbe, 0xC0, - 0xB5, 0x00, 0xa0, 0x00, 0xa4, 0x00, 0xdb }, + /* 30 cd */ + { MCS_PGAMMACTL, 0x02, + 0x18, 0x08, 0x24, 0xA1, 0x51, 0x7B, 0xCE, + 0xCB, 0xC2, 0xC7, 0xCB, 0xBC, 0xDA, 0xDD, + 0xD3, 0x00, 0x53, 0x00, 0x52, 0x00, 0x6F, }, + /* 40 cd */ + { MCS_PGAMMACTL, 0x02, + 0x18, 0x08, 0x24, 0x97, 0x58, 0x71, 0xCC, + 0xCB, 0xC0, 0xC5, 0xC9, 0xBA, 0xD9, 0xDC, + 0xD1, 0x00, 0x5B, 0x00, 0x5A, 0x00, 0x7A, }, + /* 50 cd */ + { MCS_PGAMMACTL, 0x02, + 0x18, 0x08, 0x24, 0x96, 0x58, 0x72, 0xCB, + 0xCA, 0xBF, 0xC6, 0xC9, 0xBA, 0xD6, 0xD9, + 0xCD, 0x00, 0x61, 0x00, 0x61, 0x00, 0x83, }, + /* 60 cd */ + { MCS_PGAMMACTL, 0x02, + 0x18, 0x08, 0x24, 0x91, 0x5E, 0x6E, 0xC9, + 0xC9, 0xBD, 0xC4, 0xC9, 0xB8, 0xD3, 0xD7, + 0xCA, 0x00, 0x69, 0x00, 0x67, 0x00, 0x8D, }, + /* 70 cd */ + { MCS_PGAMMACTL, 0x02, + 0x18, 0x08, 0x24, 0x8E, 0x62, 0x6B, 0xC7, + 0xC9, 0xBB, 0xC3, 0xC7, 0xB7, 0xD3, 0xD7, + 0xCA, 0x00, 0x6E, 0x00, 0x6C, 0x00, 0x94, }, + /* 80 cd */ + { MCS_PGAMMACTL, 0x02, + 0x18, 0x08, 0x24, 0x89, 0x68, 0x65, 0xC9, + 0xC9, 0xBC, 0xC1, 0xC5, 0xB6, 0xD2, 0xD5, + 0xC9, 0x00, 0x73, 0x00, 0x72, 0x00, 0x9A, }, + /* 90 cd */ + { MCS_PGAMMACTL, 0x02, + 0x18, 0x08, 0x24, 0x89, 0x69, 0x64, 0xC7, + 0xC8, 0xBB, 0xC0, 0xC5, 0xB4, 0xD2, 0xD5, + 0xC9, 0x00, 0x77, 0x00, 0x76, 0x00, 0xA0, }, + /* 100 cd */ + { MCS_PGAMMACTL, 0x02, + 0x18, 0x08, 0x24, 0x86, 0x69, 0x60, 0xC6, + 0xC8, 0xBA, 0xBF, 0xC4, 0xB4, 0xD0, 0xD4, + 0xC6, 0x00, 0x7C, 0x00, 0x7A, 0x00, 0xA7, }, + /* 110 cd */ + { MCS_PGAMMACTL, 0x02, + 0x18, 0x08, 0x24, 0x86, 0x6A, 0x60, 0xC5, + 0xC7, 0xBA, 0xBD, 0xC3, 0xB2, 0xD0, 0xD4, + 0xC5, 0x00, 0x80, 0x00, 0x7E, 0x00, 0xAD, }, + /* 120 cd */ + { MCS_PGAMMACTL, 0x02, + 0x18, 0x08, 0x24, 0x82, 0x6B, 0x5E, 0xC4, + 0xC8, 0xB9, 0xBD, 0xC2, 0xB1, 0xCE, 0xD2, + 0xC4, 0x00, 0x85, 0x00, 0x82, 0x00, 0xB3, }, + /* 130 cd */ + { MCS_PGAMMACTL, 0x02, + 0x18, 0x08, 0x24, 0x8C, 0x6C, 0x60, 0xC3, + 0xC7, 0xB9, 0xBC, 0xC1, 0xAF, 0xCE, 0xD2, + 0xC3, 0x00, 0x88, 0x00, 0x86, 0x00, 0xB8, }, + /* 140 cd */ + { MCS_PGAMMACTL, 0x02, + 0x18, 0x08, 0x24, 0x80, 0x6C, 0x5F, 0xC1, + 0xC6, 0xB7, 0xBC, 0xC1, 0xAE, 0xCD, 0xD0, + 0xC2, 0x00, 0x8C, 0x00, 0x8A, 0x00, 0xBE, }, + /* 150 cd */ + { MCS_PGAMMACTL, 0x02, + 0x18, 0x08, 0x24, 0x80, 0x6E, 0x5F, 0xC1, + 0xC6, 0xB6, 0xBC, 0xC0, 0xAE, 0xCC, 0xD0, + 0xC2, 0x00, 0x8F, 0x00, 0x8D, 0x00, 0xC2, }, + /* 160 cd */ + { MCS_PGAMMACTL, 0x02, + 0x18, 0x08, 0x24, 0x7F, 0x6E, 0x5F, 0xC0, + 0xC6, 0xB5, 0xBA, 0xBF, 0xAD, 0xCB, 0xCF, + 0xC0, 0x00, 0x94, 0x00, 0x91, 0x00, 0xC8, }, + /* 170 cd */ + { MCS_PGAMMACTL, 0x02, + 0x18, 0x08, 0x24, 0x7C, 0x6D, 0x5C, 0xC0, + 0xC6, 0xB4, 0xBB, 0xBE, 0xAD, 0xCA, 0xCF, + 0xC0, 0x00, 0x96, 0x00, 0x94, 0x00, 0xCC, }, + /* 180 cd */ + { MCS_PGAMMACTL, 0x02, + 0x18, 0x08, 0x24, 0x7B, 0x6D, 0x5B, 0xC0, + 0xC5, 0xB3, 0xBA, 0xBE, 0xAD, 0xCA, 0xCE, + 0xBF, 0x00, 0x99, 0x00, 0x97, 0x00, 0xD0, }, + /* 190 cd */ + { MCS_PGAMMACTL, 0x02, + 0x18, 0x08, 0x24, 0x7A, 0x6D, 0x59, 0xC1, + 0xC5, 0xB4, 0xB8, 0xBD, 0xAC, 0xC9, 0xCE, + 0xBE, 0x00, 0x9D, 0x00, 0x9A, 0x00, 0xD5, }, + /* 200 cd */ + { MCS_PGAMMACTL, 0x02, + 0x18, 0x08, 0x24, 0x79, 0x6D, 0x58, 0xC1, + 0xC4, 0xB4, 0xB6, 0xBD, 0xAA, 0xCA, 0xCD, + 0xBE, 0x00, 0x9F, 0x00, 0x9D, 0x00, 0xD9, }, + /* 210 cd */ + { MCS_PGAMMACTL, 0x02, + 0x18, 0x08, 0x24, 0x79, 0x6D, 0x57, 0xC0, + 0xC4, 0xB4, 0xB7, 0xBD, 0xAA, 0xC8, 0xCC, + 0xBD, 0x00, 0xA2, 0x00, 0xA0, 0x00, 0xDD, }, + /* 220 cd */ + { MCS_PGAMMACTL, 0x02, + 0x18, 0x08, 0x24, 0x78, 0x6F, 0x58, 0xBF, + 0xC4, 0xB3, 0xB5, 0xBB, 0xA9, 0xC8, 0xCC, + 0xBC, 0x00, 0xA6, 0x00, 0xA3, 0x00, 0xE2, }, + /* 230 cd */ + { MCS_PGAMMACTL, 0x02, + 0x18, 0x08, 0x24, 0x75, 0x6F, 0x56, 0xBF, + 0xC3, 0xB2, 0xB6, 0xBB, 0xA8, 0xC7, 0xCB, + 0xBC, 0x00, 0xA8, 0x00, 0xA6, 0x00, 0xE6, }, + /* 240 cd */ + { MCS_PGAMMACTL, 0x02, + 0x18, 0x08, 0x24, 0x76, 0x6F, 0x56, 0xC0, + 0xC3, 0xB2, 0xB5, 0xBA, 0xA8, 0xC6, 0xCB, + 0xBB, 0x00, 0xAA, 0x00, 0xA8, 0x00, 0xE9, }, + /* 250 cd */ + { MCS_PGAMMACTL, 0x02, + 0x18, 0x08, 0x24, 0x74, 0x6D, 0x54, 0xBF, + 0xC3, 0xB2, 0xB4, 0xBA, 0xA7, 0xC6, 0xCA, + 0xBA, 0x00, 0xAD, 0x00, 0xAB, 0x00, 0xED, }, + /* 260 cd */ + { MCS_PGAMMACTL, 0x02, + 0x18, 0x08, 0x24, 0x74, 0x6E, 0x54, 0xBD, + 0xC2, 0xB0, 0xB5, 0xBA, 0xA7, 0xC5, 0xC9, + 0xBA, 0x00, 0xB0, 0x00, 0xAE, 0x00, 0xF1, }, + /* 270 cd */ + { MCS_PGAMMACTL, 0x02, + 0x18, 0x08, 0x24, 0x71, 0x6C, 0x50, 0xBD, + 0xC3, 0xB0, 0xB4, 0xB8, 0xA6, 0xC6, 0xC9, + 0xBB, 0x00, 0xB2, 0x00, 0xB1, 0x00, 0xF4, }, + /* 280 cd */ + { MCS_PGAMMACTL, 0x02, + 0x18, 0x08, 0x24, 0x6E, 0x6C, 0x4D, 0xBE, + 0xC3, 0xB1, 0xB3, 0xB8, 0xA5, 0xC6, 0xC8, + 0xBB, 0x00, 0xB4, 0x00, 0xB3, 0x00, 0xF7, }, + /* 290 cd */ + { MCS_PGAMMACTL, 0x02, + 0x18, 0x08, 0x24, 0x71, 0x70, 0x50, 0xBD, + 0xC1, 0xB0, 0xB2, 0xB8, 0xA4, 0xC6, 0xC7, + 0xBB, 0x00, 0xB6, 0x00, 0xB6, 0x00, 0xFA, }, + /* 300 cd */ + { MCS_PGAMMACTL, 0x02, + 0x18, 0x08, 0x24, 0x70, 0x6E, 0x4E, 0xBC, + 0xC0, 0xAF, 0xB3, 0xB8, 0xA5, 0xC5, 0xC7, + 0xBB, 0x00, 0xB9, 0x00, 0xB8, 0x00, 0xFC, }, +}; + +#define NUM_ACL_LEVELS 7 +#define ACL_TABLE_COUNT 28 + +static u8 const s6e63m0_acl[NUM_ACL_LEVELS][ACL_TABLE_COUNT] = { + /* NULL ACL */ + { MCS_BCMODE, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00 }, + /* 40P ACL */ + { MCS_BCMODE, + 0x4D, 0x96, 0x1D, 0x00, 0x00, 0x01, 0xDF, 0x00, + 0x00, 0x03, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x06, 0x0C, 0x11, 0x16, 0x1C, 0x21, 0x26, + 0x2B, 0x31, 0x36 }, + /* 43P ACL */ + { MCS_BCMODE, + 0x4D, 0x96, 0x1D, 0x00, 0x00, 0x01, 0xDF, 0x00, + 0x00, 0x03, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x07, 0x0C, 0x12, 0x18, 0x1E, 0x23, 0x29, + 0x2F, 0x34, 0x3A }, + /* 45P ACL */ + { MCS_BCMODE, + 0x4D, 0x96, 0x1D, 0x00, 0x00, 0x01, 0xDF, 0x00, + 0x00, 0x03, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x07, 0x0D, 0x13, 0x19, 0x1F, 0x25, 0x2B, + 0x31, 0x37, 0x3D }, + /* 47P ACL */ + { MCS_BCMODE, + 0x4D, 0x96, 0x1D, 0x00, 0x00, 0x01, 0xDF, 0x00, + 0x00, 0x03, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x07, 0x0E, 0x14, 0x1B, 0x21, 0x27, 0x2E, + 0x34, 0x3B, 0x41 }, + /* 48P ACL */ + { MCS_BCMODE, + 0x4D, 0x96, 0x1D, 0x00, 0x00, 0x01, 0xDF, 0x00, + 0x00, 0x03, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x08, 0x0E, 0x15, 0x1B, 0x22, 0x29, 0x2F, + 0x36, 0x3C, 0x43 }, + /* 50P ACL */ + { MCS_BCMODE, + 0x4D, 0x96, 0x1D, 0x00, 0x00, 0x01, 0xDF, 0x00, + 0x00, 0x03, 0x1F, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x01, 0x08, 0x0F, 0x16, 0x1D, 0x24, 0x2A, 0x31, + 0x38, 0x3F, 0x46 }, +}; + +/* This tells us which ACL level goes with which gamma */ +static u8 const s6e63m0_acl_per_gamma[NUM_GAMMA_LEVELS] = { + /* 30 - 60 cd: ACL off/NULL */ + 0, 0, 0, 0, + /* 70 - 250 cd: 40P ACL */ + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + /* 260 - 300 cd: 50P ACL */ + 6, 6, 6, 6, 6, +}; + +/* The ELVSS backlight regulator has 5 levels */ +#define S6E63M0_ELVSS_LEVELS 5 + +static u8 const s6e63m0_elvss_offsets[S6E63M0_ELVSS_LEVELS] = { + 0x00, /* not set */ + 0x0D, /* 30 cd - 100 cd */ + 0x09, /* 110 cd - 160 cd */ + 0x07, /* 170 cd - 200 cd */ + 0x00, /* 210 cd - 300 cd */ +}; + +/* This tells us which ELVSS level goes with which gamma */ +static u8 const s6e63m0_elvss_per_gamma[NUM_GAMMA_LEVELS] = { + /* 30 - 100 cd */ + 1, 1, 1, 1, 1, 1, 1, 1, + /* 110 - 160 cd */ + 2, 2, 2, 2, 2, 2, + /* 170 - 200 cd */ + 3, 3, 3, 3, + /* 210 - 300 cd */ + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, }; struct s6e63m0 { @@ -102,6 +288,8 @@ struct s6e63m0 { struct drm_panel panel; struct backlight_device *bl_dev; u8 lcd_type; + u8 elvss_pulse; + bool dsi_mode; struct regulator_bulk_data supplies[2]; struct gpio_desc *reset_gpio; @@ -187,17 +375,25 @@ static int s6e63m0_check_lcd_type(struct s6e63m0 *ctx) dev_info(ctx->dev, "MTP ID: %02x %02x %02x\n", id1, id2, id3); - /* We attempt to detect what panel is mounted on the controller */ + /* + * We attempt to detect what panel is mounted on the controller. + * The third ID byte represents the desired ELVSS pulse for + * some displays. + */ switch (id2) { case S6E63M0_LCD_ID_VALUE_M2: dev_info(ctx->dev, "detected LCD panel AMS397GE MIPI M2\n"); + ctx->elvss_pulse = id3; break; case S6E63M0_LCD_ID_VALUE_SM2: case S6E63M0_LCD_ID_VALUE_SM2_1: dev_info(ctx->dev, "detected LCD panel AMS397GE MIPI SM2\n"); + ctx->elvss_pulse = id3; break; default: dev_info(ctx->dev, "unknown LCD panel type %02x\n", id2); + /* Default ELVSS pulse level */ + ctx->elvss_pulse = 0x16; break; } @@ -208,9 +404,21 @@ static int s6e63m0_check_lcd_type(struct s6e63m0 *ctx) static void s6e63m0_init(struct s6e63m0 *ctx) { - s6e63m0_dcs_write_seq_static(ctx, MCS_PANELCTL, - 0x01, 0x27, 0x27, 0x07, 0x07, 0x54, 0x9f, - 0x63, 0x86, 0x1a, 0x33, 0x0d, 0x00, 0x00); + /* + * We do not know why there is a difference in the DSI mode. + * (No datasheet.) + * + * In the vendor driver this sequence is called + * "SEQ_PANEL_CONDITION_SET" or "DCS_CMD_SEQ_PANEL_COND_SET". + */ + if (ctx->dsi_mode) + s6e63m0_dcs_write_seq_static(ctx, MCS_PANELCTL, + 0x01, 0x2c, 0x2c, 0x07, 0x07, 0x5f, 0xb3, + 0x6d, 0x97, 0x1d, 0x3a, 0x0f, 0x00, 0x00); + else + s6e63m0_dcs_write_seq_static(ctx, MCS_PANELCTL, + 0x01, 0x27, 0x27, 0x07, 0x07, 0x54, 0x9f, + 0x63, 0x8f, 0x1a, 0x33, 0x0d, 0x00, 0x00); s6e63m0_dcs_write_seq_static(ctx, MCS_DISCTL, 0x02, 0x03, 0x1c, 0x10, 0x10); @@ -226,39 +434,41 @@ static void s6e63m0_init(struct s6e63m0 *ctx) 0x01); s6e63m0_dcs_write_seq_static(ctx, MCS_SRCCTL, - 0x00, 0x8c, 0x07); - s6e63m0_dcs_write_seq_static(ctx, 0xb3, - 0xc); + 0x00, 0x8e, 0x07); + s6e63m0_dcs_write_seq_static(ctx, MCS_PENTILE_1, 0x6c); - s6e63m0_dcs_write_seq_static(ctx, 0xb5, + s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_Y_RED, 0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17, 0x13, 0x1f, 0x1a, 0x2a, 0x24, 0x1f, 0x1b, 0x1a, 0x17, 0x2b, 0x26, 0x22, 0x20, 0x3a, 0x34, 0x30, 0x2c, 0x29, 0x26, 0x25, 0x23, 0x21, 0x20, 0x1e, 0x1e); - s6e63m0_dcs_write_seq_static(ctx, 0xb6, + s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_X_RED, 0x00, 0x00, 0x11, 0x22, 0x33, 0x44, 0x44, 0x44, 0x55, 0x55, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66); - s6e63m0_dcs_write_seq_static(ctx, 0xb7, + s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_Y_GREEN, 0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17, 0x13, 0x1f, 0x1a, 0x2a, 0x24, 0x1f, 0x1b, 0x1a, 0x17, 0x2b, 0x26, 0x22, 0x20, 0x3a, 0x34, 0x30, 0x2c, 0x29, 0x26, 0x25, 0x23, - 0x21, 0x20, 0x1e, 0x1e, 0x00, 0x00, 0x11, - 0x22, 0x33, 0x44, 0x44, 0x44, 0x55, 0x55, - 0x66, 0x66, 0x66, 0x66, 0x66, 0x66); + 0x21, 0x20, 0x1e, 0x1e); + + s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_X_GREEN, + 0x00, 0x00, 0x11, 0x22, 0x33, 0x44, 0x44, + 0x44, 0x55, 0x55, 0x66, 0x66, 0x66, 0x66, + 0x66, 0x66); - s6e63m0_dcs_write_seq_static(ctx, 0xb9, + s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_Y_BLUE, 0x2c, 0x12, 0x0c, 0x0a, 0x10, 0x0e, 0x17, 0x13, 0x1f, 0x1a, 0x2a, 0x24, 0x1f, 0x1b, 0x1a, 0x17, 0x2b, 0x26, 0x22, 0x20, 0x3a, 0x34, 0x30, 0x2c, 0x29, 0x26, 0x25, 0x23, 0x21, 0x20, 0x1e, 0x1e); - s6e63m0_dcs_write_seq_static(ctx, 0xba, + s6e63m0_dcs_write_seq_static(ctx, MCS_GAMMA_DELTA_X_BLUE, 0x00, 0x00, 0x11, 0x22, 0x33, 0x44, 0x44, 0x44, 0x55, 0x55, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66); @@ -269,7 +479,7 @@ static void s6e63m0_init(struct s6e63m0 *ctx) 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x06, 0x09, 0x0d, 0x0f, 0x12, 0x15, 0x18); - s6e63m0_dcs_write_seq_static(ctx, 0xb2, + s6e63m0_dcs_write_seq_static(ctx, MCS_TEMP_SWIRE, 0x10, 0x10, 0x0b, 0x05); s6e63m0_dcs_write_seq_static(ctx, MCS_MIECTL1, @@ -447,15 +657,33 @@ static const struct drm_panel_funcs s6e63m0_drm_funcs = { static int s6e63m0_set_brightness(struct backlight_device *bd) { struct s6e63m0 *ctx = bl_get_data(bd); - int brightness = bd->props.brightness; + u8 elvss_val; + u8 elvss_cmd_set[5]; + int i; + + /* Adjust ELVSS to candela level */ + i = s6e63m0_elvss_per_gamma[brightness]; + elvss_val = ctx->elvss_pulse + s6e63m0_elvss_offsets[i]; + if (elvss_val > 0x1f) + elvss_val = 0x1f; + elvss_cmd_set[0] = MCS_TEMP_SWIRE; + elvss_cmd_set[1] = elvss_val; + elvss_cmd_set[2] = elvss_val; + elvss_cmd_set[3] = elvss_val; + elvss_cmd_set[4] = elvss_val; + s6e63m0_dcs_write(ctx, elvss_cmd_set, 5); - /* disable and set new gamma */ + /* Update the ACL per gamma value */ + i = s6e63m0_acl_per_gamma[brightness]; + s6e63m0_dcs_write(ctx, s6e63m0_acl[i], + ARRAY_SIZE(s6e63m0_acl[i])); + + /* Update gamma table */ s6e63m0_dcs_write(ctx, s6e63m0_gamma_22[brightness], ARRAY_SIZE(s6e63m0_gamma_22[brightness])); + s6e63m0_dcs_write_seq_static(ctx, MCS_PGAMMACTL, 0x03); - /* update gamma table. */ - s6e63m0_dcs_write_seq_static(ctx, MCS_PGAMMACTL, 0x01); return s6e63m0_clear_error(ctx); } @@ -464,12 +692,12 @@ static const struct backlight_ops s6e63m0_backlight_ops = { .update_status = s6e63m0_set_brightness, }; -static int s6e63m0_backlight_register(struct s6e63m0 *ctx) +static int s6e63m0_backlight_register(struct s6e63m0 *ctx, u32 max_brightness) { struct backlight_properties props = { .type = BACKLIGHT_RAW, - .brightness = MAX_BRIGHTNESS, - .max_brightness = MAX_BRIGHTNESS + .brightness = max_brightness, + .max_brightness = max_brightness, }; struct device *dev = ctx->dev; int ret = 0; @@ -491,12 +719,14 @@ int s6e63m0_probe(struct device *dev, bool dsi_mode) { struct s6e63m0 *ctx; + u32 max_brightness; int ret; ctx = devm_kzalloc(dev, sizeof(struct s6e63m0), GFP_KERNEL); if (!ctx) return -ENOMEM; + ctx->dsi_mode = dsi_mode; ctx->dcs_read = dcs_read; ctx->dcs_write = dcs_write; dev_set_drvdata(dev, ctx); @@ -505,6 +735,14 @@ int s6e63m0_probe(struct device *dev, ctx->enabled = false; ctx->prepared = false; + ret = device_property_read_u32(dev, "max-brightness", &max_brightness); + if (ret) + max_brightness = MAX_BRIGHTNESS; + if (max_brightness > MAX_BRIGHTNESS) { + dev_err(dev, "illegal max brightness specified\n"); + max_brightness = MAX_BRIGHTNESS; + } + ctx->supplies[0].supply = "vdd3"; ctx->supplies[1].supply = "vci"; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies), @@ -524,7 +762,7 @@ int s6e63m0_probe(struct device *dev, dsi_mode ? DRM_MODE_CONNECTOR_DSI : DRM_MODE_CONNECTOR_DPI); - ret = s6e63m0_backlight_register(ctx); + ret = s6e63m0_backlight_register(ctx, max_brightness); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 597f676a6591..4e2dad314c79 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -39,72 +39,145 @@ #include <drm/drm_panel.h> /** - * struct panel_desc - * @modes: Pointer to array of fixed modes appropriate for this panel. If - * only one mode then this can just be the address of this the mode. - * NOTE: cannot be used with "timings" and also if this is specified - * then you cannot override the mode in the device tree. - * @num_modes: Number of elements in modes array. - * @timings: Pointer to array of display timings. NOTE: cannot be used with - * "modes" and also these will be used to validate a device tree - * override if one is present. - * @num_timings: Number of elements in timings array. - * @bpc: Bits per color. - * @size: Structure containing the physical size of this panel. - * @delay: Structure containing various delay values for this panel. - * @bus_format: See MEDIA_BUS_FMT_... defines. - * @bus_flags: See DRM_BUS_FLAG_... defines. - * @connector_type: LVDS, eDP, DSI, DPI, etc. + * struct panel_desc - Describes a simple panel. */ struct panel_desc { + /** + * @modes: Pointer to array of fixed modes appropriate for this panel. + * + * If only one mode then this can just be the address of the mode. + * NOTE: cannot be used with "timings" and also if this is specified + * then you cannot override the mode in the device tree. + */ const struct drm_display_mode *modes; + + /** @num_modes: Number of elements in modes array. */ unsigned int num_modes; + + /** + * @timings: Pointer to array of display timings + * + * NOTE: cannot be used with "modes" and also these will be used to + * validate a device tree override if one is present. + */ const struct display_timing *timings; + + /** @num_timings: Number of elements in timings array. */ unsigned int num_timings; + /** @bpc: Bits per color. */ unsigned int bpc; - /** - * @width: width (in millimeters) of the panel's active display area - * @height: height (in millimeters) of the panel's active display area - */ + /** @size: Structure containing the physical size of this panel. */ struct { + /** + * @size.width: Width (in mm) of the active display area. + */ unsigned int width; + + /** + * @size.height: Height (in mm) of the active display area. + */ unsigned int height; } size; - /** - * @prepare: the time (in milliseconds) that it takes for the panel to - * become ready and start receiving video data - * @hpd_absent_delay: Add this to the prepare delay if we know Hot - * Plug Detect isn't used. - * @enable: the time (in milliseconds) that it takes for the panel to - * display the first valid frame after starting to receive - * video data - * @disable: the time (in milliseconds) that it takes for the panel to - * turn the display off (no content is visible) - * @unprepare: the time (in milliseconds) that it takes for the panel - * to power itself down completely - */ + /** @delay: Structure containing various delay values for this panel. */ struct { + /** + * @delay.prepare: Time for the panel to become ready. + * + * The time (in milliseconds) that it takes for the panel to + * become ready and start receiving video data + */ unsigned int prepare; + + /** + * @delay.hpd_absent_delay: Time to wait if HPD isn't hooked up. + * + * Add this to the prepare delay if we know Hot Plug Detect + * isn't used. + */ unsigned int hpd_absent_delay; + + /** + * @delay.prepare_to_enable: Time between prepare and enable. + * + * The minimum time, in milliseconds, that needs to have passed + * between when prepare finished and enable may begin. If at + * enable time less time has passed since prepare finished, + * the driver waits for the remaining time. + * + * If a fixed enable delay is also specified, we'll start + * counting before delaying for the fixed delay. + * + * If a fixed prepare delay is also specified, we won't start + * counting until after the fixed delay. We can't overlap this + * fixed delay with the min time because the fixed delay + * doesn't happen at the end of the function if a HPD GPIO was + * specified. + * + * In other words: + * prepare() + * ... + * // do fixed prepare delay + * // wait for HPD GPIO if applicable + * // start counting for prepare_to_enable + * + * enable() + * // do fixed enable delay + * // enforce prepare_to_enable min time + */ + unsigned int prepare_to_enable; + + /** + * @delay.enable: Time for the panel to display a valid frame. + * + * The time (in milliseconds) that it takes for the panel to + * display the first valid frame after starting to receive + * video data. + */ unsigned int enable; + + /** + * @delay.disable: Time for the panel to turn the display off. + * + * The time (in milliseconds) that it takes for the panel to + * turn the display off (no content is visible). + */ unsigned int disable; + + /** + * @delay.unprepare: Time to power down completely. + * + * The time (in milliseconds) that it takes for the panel + * to power itself down completely. + * + * This time is used to prevent a future "prepare" from + * starting until at least this many milliseconds has passed. + * If at prepare time less time has passed since unprepare + * finished, the driver waits for the remaining time. + */ unsigned int unprepare; } delay; + /** @bus_format: See MEDIA_BUS_FMT_... defines. */ u32 bus_format; + + /** @bus_flags: See DRM_BUS_FLAG_... defines. */ u32 bus_flags; + + /** @connector_type: LVDS, eDP, DSI, DPI, etc. */ int connector_type; }; struct panel_simple { struct drm_panel base; - bool prepared; bool enabled; bool no_hpd; + ktime_t prepared_time; + ktime_t unprepared_time; + const struct panel_desc *desc; struct regulator *supply; @@ -232,6 +305,20 @@ static int panel_simple_get_non_edid_modes(struct panel_simple *panel, return num; } +static void panel_simple_wait(ktime_t start_ktime, unsigned int min_ms) +{ + ktime_t now_ktime, min_ktime; + + if (!min_ms) + return; + + min_ktime = ktime_add(start_ktime, ms_to_ktime(min_ms)); + now_ktime = ktime_get(); + + if (ktime_before(now_ktime, min_ktime)) + msleep(ktime_to_ms(ktime_sub(min_ktime, now_ktime)) + 1); +} + static int panel_simple_disable(struct drm_panel *panel) { struct panel_simple *p = to_panel_simple(panel); @@ -251,17 +338,15 @@ static int panel_simple_unprepare(struct drm_panel *panel) { struct panel_simple *p = to_panel_simple(panel); - if (!p->prepared) + if (p->prepared_time == 0) return 0; gpiod_set_value_cansleep(p->enable_gpio, 0); regulator_disable(p->supply); - if (p->desc->delay.unprepare) - msleep(p->desc->delay.unprepare); - - p->prepared = false; + p->prepared_time = 0; + p->unprepared_time = ktime_get(); return 0; } @@ -298,9 +383,11 @@ static int panel_simple_prepare(struct drm_panel *panel) int err; int hpd_asserted; - if (p->prepared) + if (p->prepared_time != 0) return 0; + panel_simple_wait(p->unprepared_time, p->desc->delay.unprepare); + err = regulator_enable(p->supply); if (err < 0) { dev_err(panel->dev, "failed to enable supply: %d\n", err); @@ -335,7 +422,7 @@ static int panel_simple_prepare(struct drm_panel *panel) } } - p->prepared = true; + p->prepared_time = ktime_get(); return 0; } @@ -350,6 +437,8 @@ static int panel_simple_enable(struct drm_panel *panel) if (p->desc->delay.enable) msleep(p->desc->delay.enable); + panel_simple_wait(p->prepared_time, p->desc->delay.prepare_to_enable); + p->enabled = true; return 0; @@ -516,7 +605,7 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc) return -ENOMEM; panel->enabled = false; - panel->prepared = false; + panel->prepared_time = 0; panel->desc = desc; panel->no_hpd = of_property_read_bool(dev->of_node, "no-hpd"); @@ -1318,6 +1407,51 @@ static const struct panel_desc boe_nv101wxmn51 = { }, }; +static const struct drm_display_mode boe_nv110wtm_n61_modes[] = { + { + .clock = 207800, + .hdisplay = 2160, + .hsync_start = 2160 + 48, + .hsync_end = 2160 + 48 + 32, + .htotal = 2160 + 48 + 32 + 100, + .vdisplay = 1440, + .vsync_start = 1440 + 3, + .vsync_end = 1440 + 3 + 6, + .vtotal = 1440 + 3 + 6 + 31, + .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, + }, + { + .clock = 138500, + .hdisplay = 2160, + .hsync_start = 2160 + 48, + .hsync_end = 2160 + 48 + 32, + .htotal = 2160 + 48 + 32 + 100, + .vdisplay = 1440, + .vsync_start = 1440 + 3, + .vsync_end = 1440 + 3 + 6, + .vtotal = 1440 + 3 + 6 + 31, + .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC, + }, +}; + +static const struct panel_desc boe_nv110wtm_n61 = { + .modes = boe_nv110wtm_n61_modes, + .num_modes = ARRAY_SIZE(boe_nv110wtm_n61_modes), + .bpc = 8, + .size = { + .width = 233, + .height = 155, + }, + .delay = { + .hpd_absent_delay = 200, + .prepare_to_enable = 80, + .unprepare = 500, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, + .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB, + .connector_type = DRM_MODE_CONNECTOR_eDP, +}; + /* Also used for boe_nv133fhm_n62 */ static const struct drm_display_mode boe_nv133fhm_n61_modes = { .clock = 147840, @@ -2265,6 +2399,33 @@ static const struct panel_desc innolux_n116bge = { .width = 256, .height = 144, }, + .bus_format = MEDIA_BUS_FMT_RGB666_1X18, + .connector_type = DRM_MODE_CONNECTOR_eDP, +}; + +static const struct drm_display_mode innolux_n125hce_gn1_mode = { + .clock = 162000, + .hdisplay = 1920, + .hsync_start = 1920 + 40, + .hsync_end = 1920 + 40 + 40, + .htotal = 1920 + 40 + 40 + 80, + .vdisplay = 1080, + .vsync_start = 1080 + 4, + .vsync_end = 1080 + 4 + 4, + .vtotal = 1080 + 4 + 4 + 24, +}; + +static const struct panel_desc innolux_n125hce_gn1 = { + .modes = &innolux_n125hce_gn1_mode, + .num_modes = 1, + .bpc = 8, + .size = { + .width = 276, + .height = 155, + }, + .bus_format = MEDIA_BUS_FMT_RGB888_1X24, + .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB, + .connector_type = DRM_MODE_CONNECTOR_eDP, }; static const struct drm_display_mode innolux_n156bge_l21_mode = { @@ -4009,6 +4170,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "boe,nv101wxmn51", .data = &boe_nv101wxmn51, }, { + .compatible = "boe,nv110wtm-n61", + .data = &boe_nv110wtm_n61, + }, { .compatible = "boe,nv133fhm-n61", .data = &boe_nv133fhm_n61, }, { @@ -4123,6 +4287,9 @@ static const struct of_device_id platform_of_match[] = { .compatible = "innolux,n116bge", .data = &innolux_n116bge, }, { + .compatible = "innolux,n125hce-gn1", + .data = &innolux_n125hce_gn1, + }, { .compatible = "innolux,n156bge-l21", .data = &innolux_n156bge_l21, }, { diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7703.c b/drivers/gpu/drm/panel/panel-sitronix-st7703.c index b30510b1696a..a2c303e5732c 100644 --- a/drivers/gpu/drm/panel/panel-sitronix-st7703.c +++ b/drivers/gpu/drm/panel/panel-sitronix-st7703.c @@ -530,10 +530,8 @@ static int st7703_probe(struct mipi_dsi_device *dsi) return -ENOMEM; ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW); - if (IS_ERR(ctx->reset_gpio)) { - dev_err(dev, "cannot get reset gpio\n"); - return PTR_ERR(ctx->reset_gpio); - } + if (IS_ERR(ctx->reset_gpio)) + return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio), "Failed to get reset gpio\n"); mipi_dsi_set_drvdata(dsi, ctx); @@ -545,19 +543,13 @@ static int st7703_probe(struct mipi_dsi_device *dsi) dsi->lanes = ctx->desc->lanes; ctx->vcc = devm_regulator_get(dev, "vcc"); - if (IS_ERR(ctx->vcc)) { - ret = PTR_ERR(ctx->vcc); - if (ret != -EPROBE_DEFER) - dev_err(dev, "Failed to request vcc regulator: %d\n", ret); - return ret; - } + if (IS_ERR(ctx->vcc)) + return dev_err_probe(dev, PTR_ERR(ctx->vcc), "Failed to request vcc regulator\n"); + ctx->iovcc = devm_regulator_get(dev, "iovcc"); - if (IS_ERR(ctx->iovcc)) { - ret = PTR_ERR(ctx->iovcc); - if (ret != -EPROBE_DEFER) - dev_err(dev, "Failed to request iovcc regulator: %d\n", ret); - return ret; - } + if (IS_ERR(ctx->iovcc)) + return dev_err_probe(dev, PTR_ERR(ctx->iovcc), + "Failed to request iovcc regulator\n"); drm_panel_init(&ctx->panel, dev, &st7703_drm_funcs, DRM_MODE_CONNECTOR_DSI); diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c index e95fdfb16b6c..ba0b3ead150f 100644 --- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c @@ -629,7 +629,7 @@ static int acx565akm_probe(struct spi_device *spi) lcd->spi = spi; mutex_init(&lcd->mutex); - lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW); + lcd->reset_gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(lcd->reset_gpio)) { dev_err(&spi->dev, "failed to get reset GPIO\n"); return PTR_ERR(lcd->reset_gpio); diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c index 913eaa6d0bc6..56b3f5935703 100644 --- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c +++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c @@ -76,6 +76,7 @@ static int panfrost_devfreq_get_dev_status(struct device *dev, } static struct devfreq_dev_profile panfrost_devfreq_profile = { + .timer = DEVFREQ_TIMER_DELAYED, .polling_ms = 50, /* ~3 frames */ .target = panfrost_devfreq_target, .get_dev_status = panfrost_devfreq_get_dev_status, @@ -138,7 +139,7 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev) } pfdevfreq->devfreq = devfreq; - cooling = of_devfreq_cooling_register(dev->of_node, devfreq); + cooling = devfreq_cooling_em_register(devfreq, NULL); if (IS_ERR(cooling)) DRM_DEV_INFO(dev, "Failed to register cooling device\n"); else @@ -165,10 +166,8 @@ void panfrost_devfreq_fini(struct panfrost_device *pfdev) pfdevfreq->opp_of_table_added = false; } - if (pfdevfreq->regulators_opp_table) { - dev_pm_opp_put_regulators(pfdevfreq->regulators_opp_table); - pfdevfreq->regulators_opp_table = NULL; - } + dev_pm_opp_put_regulators(pfdevfreq->regulators_opp_table); + pfdevfreq->regulators_opp_table = NULL; } void panfrost_devfreq_resume(struct panfrost_device *pfdev) diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index 57a31dd0ffed..3e0723bc36bd 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -228,7 +228,7 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t INIT_LIST_HEAD(&obj->mappings.list); mutex_init(&obj->mappings.lock); obj->base.base.funcs = &panfrost_gem_funcs; - obj->base.map_cached = pfdev->coherent; + obj->base.map_wc = !pfdev->coherent; return &obj->base.base; } diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index be8d68fb0e11..7c1b3481b785 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -347,16 +347,9 @@ static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule, mmu_tlb_sync_context(cookie); } -static void mmu_tlb_flush_leaf(unsigned long iova, size_t size, size_t granule, - void *cookie) -{ - mmu_tlb_sync_context(cookie); -} - static const struct iommu_flush_ops mmu_tlb_ops = { .tlb_flush_all = mmu_tlb_inv_context_s1, .tlb_flush_walk = mmu_tlb_flush_walk, - .tlb_flush_leaf = mmu_tlb_flush_leaf, }; int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv) diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c index 40e6708fbbe2..e4dcaef6c143 100644 --- a/drivers/gpu/drm/pl111/pl111_drv.c +++ b/drivers/gpu/drm/pl111/pl111_drv.c @@ -228,7 +228,7 @@ static const struct drm_driver pl111_drm_driver = { .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = pl111_gem_import_sg_table, - .gem_prime_mmap = drm_gem_cma_prime_mmap, + .gem_prime_mmap = drm_gem_prime_mmap, #if defined(CONFIG_DEBUG_FS) .debugfs_init = pl111_debugfs_init, diff --git a/drivers/gpu/drm/qxl/qxl_dev.h b/drivers/gpu/drm/qxl/qxl_dev.h index a7bc31f6d565..06caa61b5d66 100644 --- a/drivers/gpu/drm/qxl/qxl_dev.h +++ b/drivers/gpu/drm/qxl/qxl_dev.h @@ -271,7 +271,7 @@ struct qxl_mode { /* qxl-1 compat: fixed */ struct qxl_modes { uint32_t n_modes; - struct qxl_mode modes[0]; + struct qxl_mode modes[]; }; /* qxl-1 compat: append only */ @@ -382,12 +382,12 @@ struct qxl_data_chunk { uint32_t data_size; QXLPHYSICAL prev_chunk; QXLPHYSICAL next_chunk; - uint8_t data[0]; + uint8_t data[]; }; struct qxl_message { union qxl_release_info release_info; - uint8_t data[0]; + uint8_t data[]; }; struct qxl_compat_update_cmd { @@ -469,7 +469,7 @@ struct qxl_raster_glyph { struct qxl_point glyph_origin; uint16_t width; uint16_t height; - uint8_t data[0]; + uint8_t data[]; }; struct qxl_string { @@ -768,7 +768,7 @@ enum { struct qxl_path_seg { uint32_t flags; uint32_t count; - struct qxl_point_fix points[0]; + struct qxl_point_fix points[]; }; struct qxl_path { @@ -819,7 +819,7 @@ struct qxl_image_descriptor { struct qxl_palette { uint64_t unique; uint16_t num_ents; - uint32_t ents[0]; + uint32_t ents[]; }; struct qxl_bitmap { @@ -838,7 +838,7 @@ struct qxl_surface_id { struct qxl_encoder_data { uint32_t data_size; - uint8_t data[0]; + uint8_t data[]; }; struct qxl_image { @@ -868,7 +868,7 @@ struct qxl_monitors_config { uint16_t count; uint16_t max_allowed; /* If it is 0 no fixed limit is given by the driver */ - struct qxl_head heads[0]; + struct qxl_head heads[]; }; #pragma pack(pop) diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 6e7f16f4cec7..fb5f6a5e81d7 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -163,7 +163,7 @@ DEFINE_DRM_GEM_FOPS(qxl_fops); static int qxl_drm_freeze(struct drm_device *dev) { - struct pci_dev *pdev = dev->pdev; + struct pci_dev *pdev = to_pci_dev(dev->dev); struct qxl_device *qdev = to_qxl(dev); int ret; diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 8bd0f916dfbc..83b54f0dad61 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -46,7 +46,6 @@ #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_execbuf_util.h> -#include <drm/ttm/ttm_module.h> #include <drm/ttm/ttm_placement.h> #include "qxl_dev.h" diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index 16e1e589508e..b6075f452b9e 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -370,13 +370,14 @@ static int qxl_clientcap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct qxl_device *qdev = to_qxl(dev); + struct pci_dev *pdev = to_pci_dev(dev->dev); struct drm_qxl_clientcap *param = data; int byte, idx; byte = param->index / 8; idx = param->index % 8; - if (dev->pdev->revision < 4) + if (pdev->revision < 4) return -ENOSYS; if (byte >= 58) diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c index 1ba5a702d763..ddf6588a2a38 100644 --- a/drivers/gpu/drm/qxl/qxl_irq.c +++ b/drivers/gpu/drm/qxl/qxl_irq.c @@ -81,6 +81,7 @@ static void qxl_client_monitors_config_work_func(struct work_struct *work) int qxl_irq_init(struct qxl_device *qdev) { + struct pci_dev *pdev = to_pci_dev(qdev->ddev.dev); int ret; init_waitqueue_head(&qdev->display_event); @@ -93,7 +94,7 @@ int qxl_irq_init(struct qxl_device *qdev) atomic_set(&qdev->irq_received_cursor, 0); atomic_set(&qdev->irq_received_io_cmd, 0); qdev->irq_received_error = 0; - ret = drm_irq_install(&qdev->ddev, qdev->ddev.pdev->irq); + ret = drm_irq_install(&qdev->ddev, pdev->irq); qdev->ram_header->int_mask = QXL_INTERRUPT_MASK; if (unlikely(ret != 0)) { DRM_ERROR("Failed installing irq: %d\n", ret); diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c index 228e2b9198f1..4a60a52ab62e 100644 --- a/drivers/gpu/drm/qxl/qxl_kms.c +++ b/drivers/gpu/drm/qxl/qxl_kms.c @@ -111,7 +111,6 @@ int qxl_device_init(struct qxl_device *qdev, { int r, sb; - qdev->ddev.pdev = pdev; pci_set_drvdata(pdev, &qdev->ddev); mutex_init(&qdev->gem.mutex); diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h index ebf24c9d2bf2..e60a8f88e226 100644 --- a/drivers/gpu/drm/qxl/qxl_object.h +++ b/drivers/gpu/drm/qxl/qxl_object.h @@ -50,7 +50,7 @@ static inline void qxl_bo_unreserve(struct qxl_bo *bo) static inline unsigned long qxl_bo_size(struct qxl_bo *bo) { - return bo->tbo.num_pages << PAGE_SHIFT; + return bo->tbo.base.size; } static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo) diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index e75e364655b8..0fcfc952d5e9 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -456,7 +456,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) bo = entry->bo; dma_resv_add_shared_fence(bo->base.resv, &release->base); - ttm_bo_move_to_lru_tail(bo, NULL); + ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); dma_resv_unlock(bo->base.resv); } spin_unlock(&ttm_bo_glob.lru_lock); diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 128c38c8a837..33c09dc94f8b 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -31,7 +31,6 @@ #include <drm/qxl_drm.h> #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> -#include <drm/ttm/ttm_module.h> #include <drm/ttm/ttm_placement.h> #include "qxl_drv.h" @@ -115,7 +114,7 @@ static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo, ttm = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL); if (ttm == NULL) return NULL; - if (ttm_dma_tt_init(ttm, bo, page_flags, ttm_cached)) { + if (ttm_tt_init(ttm, bo, page_flags, ttm_cached)) { kfree(ttm); return NULL; } diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c index 6ac71755c22d..cdeb1db87222 100644 --- a/drivers/gpu/drm/r128/r128_ioc32.c +++ b/drivers/gpu/drm/r128/r128_ioc32.c @@ -1,4 +1,4 @@ -/** +/* * \file r128_ioc32.c * * 32-bit ioctl compatibility routines for the R128 DRM. @@ -170,13 +170,13 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = { }; /** - * Called whenever a 32-bit process running under a 64-bit kernel - * performs an ioctl on /dev/dri/card<n>. + * r128_compat_ioctl - Called whenever a 32-bit process running under + * a 64-bit kernel performs an ioctl on /dev/dri/card<n>. * - * \param filp file pointer. - * \param cmd command. - * \param arg user argument. - * \return zero on success or negative number on failure. + * @filp: file pointer. + * @cmd: command. + * @arg: user argument. + * return: zero on success or negative number on failure. */ long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 683de198e18d..0fce73b9a646 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -2062,9 +2062,9 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder, struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); /* Funky macbooks */ - if ((dev->pdev->device == 0x71C5) && - (dev->pdev->subsystem_vendor == 0x106b) && - (dev->pdev->subsystem_device == 0x0080)) { + if ((rdev->pdev->device == 0x71C5) && + (rdev->pdev->subsystem_vendor == 0x106b) && + (rdev->pdev->subsystem_device == 0x0080)) { if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) { uint32_t lvtma_bit_depth_control = RREG32(AVIVO_LVTMA_BIT_DEPTH_CONTROL); diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index aef4efc692b1..2955bb32d5ad 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -2612,7 +2612,6 @@ int r100_asic_reset(struct radeon_device *rdev, bool hard) void r100_set_common_regs(struct radeon_device *rdev) { - struct drm_device *dev = rdev->ddev; bool force_dac2 = false; u32 tmp; @@ -2630,7 +2629,7 @@ void r100_set_common_regs(struct radeon_device *rdev) * don't report it in the bios connector * table. */ - switch (dev->pdev->device) { + switch (rdev->pdev->device) { /* RN50 */ case 0x515e: case 0x5969: @@ -2640,17 +2639,17 @@ void r100_set_common_regs(struct radeon_device *rdev) case 0x5159: case 0x515a: /* DELL triple head servers */ - if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) && - ((dev->pdev->subsystem_device == 0x016c) || - (dev->pdev->subsystem_device == 0x016d) || - (dev->pdev->subsystem_device == 0x016e) || - (dev->pdev->subsystem_device == 0x016f) || - (dev->pdev->subsystem_device == 0x0170) || - (dev->pdev->subsystem_device == 0x017d) || - (dev->pdev->subsystem_device == 0x017e) || - (dev->pdev->subsystem_device == 0x0183) || - (dev->pdev->subsystem_device == 0x018a) || - (dev->pdev->subsystem_device == 0x019a))) + if ((rdev->pdev->subsystem_vendor == 0x1028 /* DELL */) && + ((rdev->pdev->subsystem_device == 0x016c) || + (rdev->pdev->subsystem_device == 0x016d) || + (rdev->pdev->subsystem_device == 0x016e) || + (rdev->pdev->subsystem_device == 0x016f) || + (rdev->pdev->subsystem_device == 0x0170) || + (rdev->pdev->subsystem_device == 0x017d) || + (rdev->pdev->subsystem_device == 0x017e) || + (rdev->pdev->subsystem_device == 0x0183) || + (rdev->pdev->subsystem_device == 0x018a) || + (rdev->pdev->subsystem_device == 0x019a))) force_dac2 = true; break; } @@ -2798,7 +2797,7 @@ void r100_vram_init_sizes(struct radeon_device *rdev) rdev->mc.real_vram_size = 8192 * 1024; WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size); } - /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - + /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM - * Novell bug 204882 + along with lots of ubuntu ones */ if (rdev->mc.aper_size > config_aper_size) diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 5f3adba43e47..f09989bdce98 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -75,7 +75,6 @@ #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> -#include <drm/ttm/ttm_module.h> #include <drm/ttm/ttm_execbuf_util.h> #include <drm/drm_gem.h> @@ -2314,6 +2313,9 @@ struct radeon_device { struct device *dev; struct drm_device *ddev; struct pci_dev *pdev; +#ifdef __alpha__ + struct pci_controller *hose; +#endif struct rw_semaphore exclusive_lock; /* ASIC */ union radeon_asic_config config; @@ -2623,14 +2625,14 @@ void r100_pll_errata_after_index(struct radeon_device *rdev); (rdev->family == CHIP_RV410) || \ (rdev->family == CHIP_RS400) || \ (rdev->family == CHIP_RS480)) -#define ASIC_IS_X2(rdev) ((rdev->ddev->pdev->device == 0x9441) || \ - (rdev->ddev->pdev->device == 0x9443) || \ - (rdev->ddev->pdev->device == 0x944B) || \ - (rdev->ddev->pdev->device == 0x9506) || \ - (rdev->ddev->pdev->device == 0x9509) || \ - (rdev->ddev->pdev->device == 0x950F) || \ - (rdev->ddev->pdev->device == 0x689C) || \ - (rdev->ddev->pdev->device == 0x689D)) +#define ASIC_IS_X2(rdev) ((rdev->pdev->device == 0x9441) || \ + (rdev->pdev->device == 0x9443) || \ + (rdev->pdev->device == 0x944B) || \ + (rdev->pdev->device == 0x9506) || \ + (rdev->pdev->device == 0x9509) || \ + (rdev->pdev->device == 0x950F) || \ + (rdev->pdev->device == 0x689C) || \ + (rdev->pdev->device == 0x689D)) #define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600)) #define ASIC_IS_DCE2(rdev) ((rdev->family == CHIP_RS600) || \ (rdev->family == CHIP_RS690) || \ @@ -2653,14 +2655,14 @@ void r100_pll_errata_after_index(struct radeon_device *rdev); #define ASIC_IS_DCE83(rdev) ((rdev->family == CHIP_KABINI) || \ (rdev->family == CHIP_MULLINS)) -#define ASIC_IS_LOMBOK(rdev) ((rdev->ddev->pdev->device == 0x6849) || \ - (rdev->ddev->pdev->device == 0x6850) || \ - (rdev->ddev->pdev->device == 0x6858) || \ - (rdev->ddev->pdev->device == 0x6859) || \ - (rdev->ddev->pdev->device == 0x6840) || \ - (rdev->ddev->pdev->device == 0x6841) || \ - (rdev->ddev->pdev->device == 0x6842) || \ - (rdev->ddev->pdev->device == 0x6843)) +#define ASIC_IS_LOMBOK(rdev) ((rdev->pdev->device == 0x6849) || \ + (rdev->pdev->device == 0x6850) || \ + (rdev->pdev->device == 0x6858) || \ + (rdev->pdev->device == 0x6859) || \ + (rdev->pdev->device == 0x6840) || \ + (rdev->pdev->device == 0x6841) || \ + (rdev->pdev->device == 0x6842) || \ + (rdev->pdev->device == 0x6843)) /* * BIOS helpers. diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index be96d9b64e43..42301b4e56f5 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -284,46 +284,47 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, uint16_t *line_mux, struct radeon_hpd *hpd) { + struct pci_dev *pdev = to_pci_dev(dev->dev); /* Asus M2A-VM HDMI board lists the DVI port as HDMI */ - if ((dev->pdev->device == 0x791e) && - (dev->pdev->subsystem_vendor == 0x1043) && - (dev->pdev->subsystem_device == 0x826d)) { + if ((pdev->device == 0x791e) && + (pdev->subsystem_vendor == 0x1043) && + (pdev->subsystem_device == 0x826d)) { if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) && (supported_device == ATOM_DEVICE_DFP3_SUPPORT)) *connector_type = DRM_MODE_CONNECTOR_DVID; } /* Asrock RS600 board lists the DVI port as HDMI */ - if ((dev->pdev->device == 0x7941) && - (dev->pdev->subsystem_vendor == 0x1849) && - (dev->pdev->subsystem_device == 0x7941)) { + if ((pdev->device == 0x7941) && + (pdev->subsystem_vendor == 0x1849) && + (pdev->subsystem_device == 0x7941)) { if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) && (supported_device == ATOM_DEVICE_DFP3_SUPPORT)) *connector_type = DRM_MODE_CONNECTOR_DVID; } /* MSI K9A2GM V2/V3 board has no HDMI or DVI */ - if ((dev->pdev->device == 0x796e) && - (dev->pdev->subsystem_vendor == 0x1462) && - (dev->pdev->subsystem_device == 0x7302)) { + if ((pdev->device == 0x796e) && + (pdev->subsystem_vendor == 0x1462) && + (pdev->subsystem_device == 0x7302)) { if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) || (supported_device == ATOM_DEVICE_DFP3_SUPPORT)) return false; } /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ - if ((dev->pdev->device == 0x7941) && - (dev->pdev->subsystem_vendor == 0x147b) && - (dev->pdev->subsystem_device == 0x2412)) { + if ((pdev->device == 0x7941) && + (pdev->subsystem_vendor == 0x147b) && + (pdev->subsystem_device == 0x2412)) { if (*connector_type == DRM_MODE_CONNECTOR_DVII) return false; } /* Falcon NW laptop lists vga ddc line for LVDS */ - if ((dev->pdev->device == 0x5653) && - (dev->pdev->subsystem_vendor == 0x1462) && - (dev->pdev->subsystem_device == 0x0291)) { + if ((pdev->device == 0x5653) && + (pdev->subsystem_vendor == 0x1462) && + (pdev->subsystem_device == 0x0291)) { if (*connector_type == DRM_MODE_CONNECTOR_LVDS) { i2c_bus->valid = false; *line_mux = 53; @@ -331,26 +332,26 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, } /* HIS X1300 is DVI+VGA, not DVI+DVI */ - if ((dev->pdev->device == 0x7146) && - (dev->pdev->subsystem_vendor == 0x17af) && - (dev->pdev->subsystem_device == 0x2058)) { + if ((pdev->device == 0x7146) && + (pdev->subsystem_vendor == 0x17af) && + (pdev->subsystem_device == 0x2058)) { if (supported_device == ATOM_DEVICE_DFP1_SUPPORT) return false; } /* Gigabyte X1300 is DVI+VGA, not DVI+DVI */ - if ((dev->pdev->device == 0x7142) && - (dev->pdev->subsystem_vendor == 0x1458) && - (dev->pdev->subsystem_device == 0x2134)) { + if ((pdev->device == 0x7142) && + (pdev->subsystem_vendor == 0x1458) && + (pdev->subsystem_device == 0x2134)) { if (supported_device == ATOM_DEVICE_DFP1_SUPPORT) return false; } /* Funky macbooks */ - if ((dev->pdev->device == 0x71C5) && - (dev->pdev->subsystem_vendor == 0x106b) && - (dev->pdev->subsystem_device == 0x0080)) { + if ((pdev->device == 0x71C5) && + (pdev->subsystem_vendor == 0x106b) && + (pdev->subsystem_device == 0x0080)) { if ((supported_device == ATOM_DEVICE_CRT1_SUPPORT) || (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) return false; @@ -366,27 +367,27 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, } /* ASUS HD 3600 XT board lists the DVI port as HDMI */ - if ((dev->pdev->device == 0x9598) && - (dev->pdev->subsystem_vendor == 0x1043) && - (dev->pdev->subsystem_device == 0x01da)) { + if ((pdev->device == 0x9598) && + (pdev->subsystem_vendor == 0x1043) && + (pdev->subsystem_device == 0x01da)) { if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) { *connector_type = DRM_MODE_CONNECTOR_DVII; } } /* ASUS HD 3600 board lists the DVI port as HDMI */ - if ((dev->pdev->device == 0x9598) && - (dev->pdev->subsystem_vendor == 0x1043) && - (dev->pdev->subsystem_device == 0x01e4)) { + if ((pdev->device == 0x9598) && + (pdev->subsystem_vendor == 0x1043) && + (pdev->subsystem_device == 0x01e4)) { if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) { *connector_type = DRM_MODE_CONNECTOR_DVII; } } /* ASUS HD 3450 board lists the DVI port as HDMI */ - if ((dev->pdev->device == 0x95C5) && - (dev->pdev->subsystem_vendor == 0x1043) && - (dev->pdev->subsystem_device == 0x01e2)) { + if ((pdev->device == 0x95C5) && + (pdev->subsystem_vendor == 0x1043) && + (pdev->subsystem_device == 0x01e2)) { if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) { *connector_type = DRM_MODE_CONNECTOR_DVII; } @@ -411,9 +412,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, * with different crtcs which isn't possible on the hardware * side and leaves no crtcs for LVDS or VGA. */ - if (((dev->pdev->device == 0x95c4) || (dev->pdev->device == 0x9591)) && - (dev->pdev->subsystem_vendor == 0x1025) && - (dev->pdev->subsystem_device == 0x013c)) { + if (((pdev->device == 0x95c4) || (pdev->device == 0x9591)) && + (pdev->subsystem_vendor == 0x1025) && + (pdev->subsystem_device == 0x013c)) { if ((*connector_type == DRM_MODE_CONNECTOR_DVII) && (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) { /* actually it's a DVI-D port not DVI-I */ @@ -425,9 +426,9 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, /* XFX Pine Group device rv730 reports no VGA DDC lines * even though they are wired up to record 0x93 */ - if ((dev->pdev->device == 0x9498) && - (dev->pdev->subsystem_vendor == 0x1682) && - (dev->pdev->subsystem_device == 0x2452) && + if ((pdev->device == 0x9498) && + (pdev->subsystem_vendor == 0x1682) && + (pdev->subsystem_device == 0x2452) && (i2c_bus->valid == false) && !(supported_device & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))) { struct radeon_device *rdev = dev->dev_private; @@ -435,11 +436,11 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, } /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */ - if (((dev->pdev->device == 0x9802) || - (dev->pdev->device == 0x9805) || - (dev->pdev->device == 0x9806)) && - (dev->pdev->subsystem_vendor == 0x1734) && - (dev->pdev->subsystem_device == 0x11bd)) { + if (((pdev->device == 0x9802) || + (pdev->device == 0x9805) || + (pdev->device == 0x9806)) && + (pdev->subsystem_vendor == 0x1734) && + (pdev->subsystem_device == 0x11bd)) { if (*connector_type == DRM_MODE_CONNECTOR_VGA) { *connector_type = DRM_MODE_CONNECTOR_DVII; *line_mux = 0x3103; diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c index bb29cf02974d..500796dc5d74 100644 --- a/drivers/gpu/drm/radeon/radeon_bios.c +++ b/drivers/gpu/drm/radeon/radeon_bios.c @@ -528,7 +528,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev) crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL); fp2_gen_cntl = 0; - if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) { + if (rdev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) { fp2_gen_cntl = RREG32(RADEON_FP2_GEN_CNTL); } @@ -565,7 +565,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev) (RADEON_CRTC_SYNC_TRISTAT | RADEON_CRTC_DISPLAY_DIS))); - if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) { + if (rdev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) { WREG32(RADEON_FP2_GEN_CNTL, (fp2_gen_cntl & ~RADEON_FP2_ON)); } @@ -583,7 +583,7 @@ static bool legacy_read_disabled_bios(struct radeon_device *rdev) WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl); } WREG32(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl); - if (rdev->ddev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) { + if (rdev->pdev->device == PCI_DEVICE_ID_ATI_RADEON_QY) { WREG32(RADEON_FP2_GEN_CNTL, fp2_gen_cntl); } return r; diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index ff2135059c07..783a6b8802d5 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -894,13 +894,13 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct /* quirks */ /* Radeon 7000 (RV100) */ - if (((dev->pdev->device == 0x5159) && - (dev->pdev->subsystem_vendor == 0x174B) && - (dev->pdev->subsystem_device == 0x7c28)) || + if (((rdev->pdev->device == 0x5159) && + (rdev->pdev->subsystem_vendor == 0x174B) && + (rdev->pdev->subsystem_device == 0x7c28)) || /* Radeon 9100 (R200) */ - ((dev->pdev->device == 0x514D) && - (dev->pdev->subsystem_vendor == 0x174B) && - (dev->pdev->subsystem_device == 0x7149))) { + ((rdev->pdev->device == 0x514D) && + (rdev->pdev->subsystem_vendor == 0x174B) && + (rdev->pdev->subsystem_device == 0x7149))) { /* vbios value is bad, use the default */ found = 0; } @@ -2221,20 +2221,21 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev, struct radeon_i2c_bus_rec *ddc_i2c, struct radeon_hpd *hpd) { + struct radeon_device *rdev = dev->dev_private; /* Certain IBM chipset RN50s have a BIOS reporting two VGAs, one with VGA DDC and one with CRT2 DDC. - kill the CRT2 DDC one */ - if (dev->pdev->device == 0x515e && - dev->pdev->subsystem_vendor == 0x1014) { + if (rdev->pdev->device == 0x515e && + rdev->pdev->subsystem_vendor == 0x1014) { if (*legacy_connector == CONNECTOR_CRT_LEGACY && ddc_i2c->mask_clk_reg == RADEON_GPIO_CRT2_DDC) return false; } /* X300 card with extra non-existent DVI port */ - if (dev->pdev->device == 0x5B60 && - dev->pdev->subsystem_vendor == 0x17af && - dev->pdev->subsystem_device == 0x201e && bios_index == 2) { + if (rdev->pdev->device == 0x5B60 && + rdev->pdev->subsystem_vendor == 0x17af && + rdev->pdev->subsystem_device == 0x201e && bios_index == 2) { if (*legacy_connector == CONNECTOR_DVI_I_LEGACY) return false; } @@ -2244,22 +2245,24 @@ static bool radeon_apply_legacy_quirks(struct drm_device *dev, static bool radeon_apply_legacy_tv_quirks(struct drm_device *dev) { + struct radeon_device *rdev = dev->dev_private; + /* Acer 5102 has non-existent TV port */ - if (dev->pdev->device == 0x5975 && - dev->pdev->subsystem_vendor == 0x1025 && - dev->pdev->subsystem_device == 0x009f) + if (rdev->pdev->device == 0x5975 && + rdev->pdev->subsystem_vendor == 0x1025 && + rdev->pdev->subsystem_device == 0x009f) return false; /* HP dc5750 has non-existent TV port */ - if (dev->pdev->device == 0x5974 && - dev->pdev->subsystem_vendor == 0x103c && - dev->pdev->subsystem_device == 0x280a) + if (rdev->pdev->device == 0x5974 && + rdev->pdev->subsystem_vendor == 0x103c && + rdev->pdev->subsystem_device == 0x280a) return false; /* MSI S270 has non-existent TV port */ - if (dev->pdev->device == 0x5955 && - dev->pdev->subsystem_vendor == 0x1462 && - dev->pdev->subsystem_device == 0x0131) + if (rdev->pdev->device == 0x5955 && + rdev->pdev->subsystem_vendor == 0x1462 && + rdev->pdev->subsystem_device == 0x0131) return false; return true; @@ -2413,9 +2416,9 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) /* RV100 board with external TDMS bit mis-set. * Actually uses internal TMDS, clear the bit. */ - if (dev->pdev->device == 0x5159 && - dev->pdev->subsystem_vendor == 0x1014 && - dev->pdev->subsystem_device == 0x029A) { + if (rdev->pdev->device == 0x5159 && + rdev->pdev->subsystem_vendor == 0x1014 && + rdev->pdev->subsystem_device == 0x029A) { tmp &= ~(1 << 4); } if ((tmp >> 4) & 0x1) { @@ -2707,9 +2710,9 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev) /* boards with a thermal chip, but no overdrive table */ /* Asus 9600xt has an f75375 on the monid bus */ - if ((dev->pdev->device == 0x4152) && - (dev->pdev->subsystem_vendor == 0x1043) && - (dev->pdev->subsystem_device == 0xc002)) { + if ((rdev->pdev->device == 0x4152) && + (rdev->pdev->subsystem_vendor == 0x1043) && + (rdev->pdev->subsystem_device == 0xc002)) { i2c_bus = combios_setup_i2c_bus(rdev, DDC_MONID, 0, 0); rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); if (rdev->pm.i2c_bus) { diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index c6262fce7440..35e937d39b51 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -130,8 +130,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p) * IGP chips to avoid image corruptions */ if (p->ring == R600_RING_TYPE_UVD_INDEX && - (i <= 0 || pci_find_capability(p->rdev->ddev->pdev, - PCI_CAP_ID_AGP) || + (i <= 0 || pci_find_capability(p->rdev->pdev, PCI_CAP_ID_AGP) || p->rdev->family == CHIP_RS780 || p->rdev->family == CHIP_RS880)) { @@ -401,7 +400,8 @@ static int cmp_size_smaller_first(void *priv, struct list_head *a, struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head); /* Sort A before B if A is smaller. */ - return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; + return (int)la->robj->tbo.mem.num_pages - + (int)lb->robj->tbo.mem.num_pages; } /** diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index ebccaa5b2d0e..2cbf14fc6ece 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1562,6 +1562,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon, bool freeze) { struct radeon_device *rdev; + struct pci_dev *pdev; struct drm_crtc *crtc; struct drm_connector *connector; int i, r; @@ -1571,6 +1572,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, } rdev = dev->dev_private; + pdev = to_pci_dev(dev->dev); if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; @@ -1636,14 +1638,14 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, radeon_agp_suspend(rdev); - pci_save_state(dev->pdev); + pci_save_state(pdev); if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) { rdev->asic->asic_reset(rdev, true); - pci_restore_state(dev->pdev); + pci_restore_state(pdev); } else if (suspend) { /* Shut down the device */ - pci_disable_device(dev->pdev); - pci_set_power_state(dev->pdev, PCI_D3hot); + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); } if (fbcon) { @@ -1665,6 +1667,7 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) { struct drm_connector *connector; struct radeon_device *rdev = dev->dev_private; + struct pci_dev *pdev = to_pci_dev(dev->dev); struct drm_crtc *crtc; int r; @@ -1675,9 +1678,9 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) console_lock(); } if (resume) { - pci_set_power_state(dev->pdev, PCI_D0); - pci_restore_state(dev->pdev); - if (pci_enable_device(dev->pdev)) { + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + if (pci_enable_device(pdev)) { if (fbcon) console_unlock(); return -1; diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 3a6fedad002d..652af7a134bd 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1317,7 +1317,7 @@ radeon_user_framebuffer_create(struct drm_device *dev, obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]); if (obj == NULL) { - dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, " + dev_err(dev->dev, "No GEM object associated to handle 0x%08X, " "can't create framebuffer\n", mode_cmd->handles[0]); return ERR_PTR(-ENOENT); } diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index e45d7344ac2b..efeb115ae70e 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -342,14 +342,9 @@ static int radeon_pci_probe(struct pci_dev *pdev, if (ret) goto err_free; - dev->pdev = pdev; -#ifdef __alpha__ - dev->hose = pdev->sysdata; -#endif - pci_set_drvdata(pdev, dev); - if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP)) + if (pci_find_capability(pdev, PCI_CAP_ID_AGP)) dev->agp = drm_agp_init(dev); if (dev->agp) { dev->agp->agp_mtrr = arch_phys_wc_add( diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index fc4212633bdf..0b206b052972 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c @@ -290,7 +290,7 @@ static int radeonfb_create(struct drm_fb_helper *helper, DRM_INFO("fb depth is %d\n", fb->format->depth); DRM_INFO(" pitch is %d\n", fb->pitches[0]); - vga_switcheroo_client_fb_set(rdev->ddev->pdev, info); + vga_switcheroo_client_fb_set(rdev->pdev, info); return 0; out: diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index b6b21d2e7262..941826923247 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -651,7 +651,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data, } if (args->offset < RADEON_VA_RESERVED_SIZE) { - dev_err(&dev->pdev->dev, + dev_err(dev->dev, "offset 0x%lX is in reserved area 0x%X\n", (unsigned long)args->offset, RADEON_VA_RESERVED_SIZE); @@ -665,7 +665,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data, */ invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM; if ((args->flags & invalid_flags)) { - dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n", + dev_err(dev->dev, "invalid flags 0x%08X vs 0x%08X\n", args->flags, invalid_flags); args->operation = RADEON_VA_RESULT_ERROR; return -EINVAL; @@ -676,7 +676,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data, case RADEON_VA_UNMAP: break; default: - dev_err(&dev->pdev->dev, "unsupported operation %d\n", + dev_err(dev->dev, "unsupported operation %d\n", args->operation); args->operation = RADEON_VA_RESULT_ERROR; return -EINVAL; diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c index e543d993f73e..314d066e68e9 100644 --- a/drivers/gpu/drm/radeon/radeon_i2c.c +++ b/drivers/gpu/drm/radeon/radeon_i2c.c @@ -919,7 +919,7 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev, i2c->rec = *rec; i2c->adapter.owner = THIS_MODULE; i2c->adapter.class = I2C_CLASS_DDC; - i2c->adapter.dev.parent = &dev->pdev->dev; + i2c->adapter.dev.parent = dev->dev; i2c->dev = dev; i2c_set_adapdata(&i2c->adapter, i2c); mutex_init(&i2c->mutex); diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c index b8b7f627f0a9..84d0b1a3355f 100644 --- a/drivers/gpu/drm/radeon/radeon_irq_kms.c +++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c @@ -314,7 +314,7 @@ int radeon_irq_kms_init(struct radeon_device *rdev) INIT_WORK(&rdev->audio_work, r600_audio_update_hdmi); rdev->irq.installed = true; - r = drm_irq_install(rdev->ddev, rdev->ddev->pdev->irq); + r = drm_irq_install(rdev->ddev, rdev->pdev->irq); if (r) { rdev->irq.installed = false; flush_delayed_work(&rdev->hotplug_work); diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 50cee4880bb4..2479d6ab7a36 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -76,7 +76,7 @@ void radeon_driver_unload_kms(struct drm_device *dev) } radeon_acpi_fini(rdev); - + radeon_modeset_fini(rdev); radeon_device_fini(rdev); @@ -105,6 +105,7 @@ done_free: */ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) { + struct pci_dev *pdev = to_pci_dev(dev->dev); struct radeon_device *rdev; int r, acpi_status; @@ -114,10 +115,14 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) } dev->dev_private = (void *)rdev; +#ifdef __alpha__ + rdev->hose = pdev->sysdata; +#endif + /* update BUS flag */ - if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP)) { + if (pci_find_capability(pdev, PCI_CAP_ID_AGP)) { flags |= RADEON_IS_AGP; - } else if (pci_is_pcie(dev->pdev)) { + } else if (pci_is_pcie(pdev)) { flags |= RADEON_IS_PCIE; } else { flags |= RADEON_IS_PCI; @@ -126,7 +131,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) if ((radeon_runtime_pm != 0) && radeon_has_atpx() && ((flags & RADEON_IS_IGP) == 0) && - !pci_is_thunderbolt_attached(dev->pdev)) + !pci_is_thunderbolt_attached(pdev)) flags |= RADEON_IS_PX; /* radeon_device_init should report only fatal error @@ -135,9 +140,9 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) * properly initialize the GPU MC controller and permit * VRAM allocation */ - r = radeon_device_init(rdev, dev, dev->pdev, flags); + r = radeon_device_init(rdev, dev, pdev, flags); if (r) { - dev_err(&dev->pdev->dev, "Fatal error during GPU init\n"); + dev_err(dev->dev, "Fatal error during GPU init\n"); goto out; } @@ -147,7 +152,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) */ r = radeon_modeset_init(rdev); if (r) - dev_err(&dev->pdev->dev, "Fatal error during modeset init\n"); + dev_err(dev->dev, "Fatal error during modeset init\n"); /* Call ACPI methods: require modeset init * but failure is not fatal @@ -155,8 +160,7 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) if (!r) { acpi_status = radeon_acpi_init(rdev); if (acpi_status) - dev_dbg(&dev->pdev->dev, - "Error during ACPI methods call\n"); + dev_dbg(dev->dev, "Error during ACPI methods call\n"); } if (radeon_is_px(dev)) { @@ -239,7 +243,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) switch (info->request) { case RADEON_INFO_DEVICE_ID: - *value = dev->pdev->device; + *value = to_pci_dev(dev->dev)->device; break; case RADEON_INFO_NUM_GB_PIPES: *value = rdev->num_gb_pipes; diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index e64fd0ce6707..7fdb77d48d6a 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c @@ -974,9 +974,9 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, /* XXX: these are oem specific */ if (ASIC_IS_R300(rdev)) { - if ((dev->pdev->device == 0x4850) && - (dev->pdev->subsystem_vendor == 0x1028) && - (dev->pdev->subsystem_device == 0x2001)) /* Dell Inspiron 8600 */ + if ((rdev->pdev->device == 0x4850) && + (rdev->pdev->subsystem_vendor == 0x1028) && + (rdev->pdev->subsystem_device == 0x2001)) /* Dell Inspiron 8600 */ fp2_gen_cntl |= R300_FP2_DVO_CLOCK_MODE_SINGLE; else fp2_gen_cntl |= RADEON_FP2_PAD_FLOP_EN | R300_FP2_DVO_CLOCK_MODE_SINGLE; diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 8bc5ad1d6585..9b81786782de 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -53,20 +53,19 @@ static void radeon_update_memory_usage(struct radeon_bo *bo, unsigned mem_type, int sign) { struct radeon_device *rdev = bo->rdev; - u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT; switch (mem_type) { case TTM_PL_TT: if (sign > 0) - atomic64_add(size, &rdev->gtt_usage); + atomic64_add(bo->tbo.base.size, &rdev->gtt_usage); else - atomic64_sub(size, &rdev->gtt_usage); + atomic64_sub(bo->tbo.base.size, &rdev->gtt_usage); break; case TTM_PL_VRAM: if (sign > 0) - atomic64_add(size, &rdev->vram_usage); + atomic64_add(bo->tbo.base.size, &rdev->vram_usage); else - atomic64_sub(size, &rdev->vram_usage); + atomic64_sub(bo->tbo.base.size, &rdev->vram_usage); break; } } @@ -255,7 +254,7 @@ int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) } return 0; } - r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); + r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap); if (r) { return r; } @@ -609,7 +608,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo) out: radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, bo->tbo.mem.start << PAGE_SHIFT, - bo->tbo.num_pages << PAGE_SHIFT); + bo->tbo.base.size); return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index d606e9a935e3..9896d8231fe5 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -109,12 +109,12 @@ static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo) static inline unsigned long radeon_bo_size(struct radeon_bo *bo) { - return bo->tbo.num_pages << PAGE_SHIFT; + return bo->tbo.base.size; } static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo) { - return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE; + return bo->tbo.base.size / RADEON_GPU_PAGE_SIZE; } static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo) diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c index dd482edc819c..ab29eb9e8667 100644 --- a/drivers/gpu/drm/radeon/radeon_prime.c +++ b/drivers/gpu/drm/radeon/radeon_prime.c @@ -35,9 +35,9 @@ struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj) { struct radeon_bo *bo = gem_to_radeon_bo(obj); - int npages = bo->tbo.num_pages; - return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages, npages); + return drm_prime_pages_to_sg(obj->dev, bo->tbo.ttm->pages, + bo->tbo.ttm->num_pages); } struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev, diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h index c93f3ab3c4e3..1729cb9a95c5 100644 --- a/drivers/gpu/drm/radeon/radeon_trace.h +++ b/drivers/gpu/drm/radeon/radeon_trace.h @@ -22,7 +22,7 @@ TRACE_EVENT(radeon_bo_create, TP_fast_assign( __entry->bo = bo; - __entry->pages = bo->tbo.num_pages; + __entry->pages = bo->tbo.mem.num_pages; ), TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages) ); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 28b300ed200e..c6d575f50c48 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -46,7 +46,6 @@ #include <drm/radeon_drm.h> #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> -#include <drm/ttm/ttm_module.h> #include <drm/ttm/ttm_placement.h> #include "radeon_reg.h" @@ -217,27 +216,15 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_resource *old_mem = &bo->mem; int r; - if ((old_mem->mem_type == TTM_PL_SYSTEM && - new_mem->mem_type == TTM_PL_VRAM) || - (old_mem->mem_type == TTM_PL_VRAM && - new_mem->mem_type == TTM_PL_SYSTEM)) { - hop->fpfn = 0; - hop->lpfn = 0; - hop->mem_type = TTM_PL_TT; - hop->flags = 0; - return -EMULTIHOP; - } - if (new_mem->mem_type == TTM_PL_TT) { r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, new_mem); if (r) return r; } - radeon_bo_move_notify(bo, evict, new_mem); r = ttm_bo_wait_ctx(bo, ctx); if (r) - goto fail; + return r; /* Can't move a pinned BO */ rbo = container_of(bo, struct radeon_bo, tbo); @@ -247,12 +234,12 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, rdev = radeon_get_rdev(bo->bdev); if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { ttm_bo_move_null(bo, new_mem); - return 0; + goto out; } if (old_mem->mem_type == TTM_PL_SYSTEM && new_mem->mem_type == TTM_PL_TT) { ttm_bo_move_null(bo, new_mem); - return 0; + goto out; } if (old_mem->mem_type == TTM_PL_TT && @@ -260,31 +247,37 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, radeon_ttm_tt_unbind(bo->bdev, bo->ttm); ttm_resource_free(bo, &bo->mem); ttm_bo_assign_mem(bo, new_mem); - return 0; + goto out; } - if (!rdev->ring[radeon_copy_ring_index(rdev)].ready || - rdev->asic->copy.copy == NULL) { - /* use memcpy */ - goto memcpy; + if (rdev->ring[radeon_copy_ring_index(rdev)].ready && + rdev->asic->copy.copy != NULL) { + if ((old_mem->mem_type == TTM_PL_SYSTEM && + new_mem->mem_type == TTM_PL_VRAM) || + (old_mem->mem_type == TTM_PL_VRAM && + new_mem->mem_type == TTM_PL_SYSTEM)) { + hop->fpfn = 0; + hop->lpfn = 0; + hop->mem_type = TTM_PL_TT; + hop->flags = 0; + return -EMULTIHOP; + } + + r = radeon_move_blit(bo, evict, new_mem, old_mem); + } else { + r = -ENODEV; } - r = radeon_move_blit(bo, evict, new_mem, old_mem); if (r) { -memcpy: r = ttm_bo_move_memcpy(bo, ctx, new_mem); - if (r) { - goto fail; - } + if (r) + return r; } +out: /* update statistics */ - atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved); + atomic64_add(bo->base.size, &rdev->num_bytes_moved); + radeon_bo_move_notify(bo, evict, new_mem); return 0; -fail: - swap(*new_mem, bo->mem); - radeon_bo_move_notify(bo, false, new_mem); - swap(*new_mem, bo->mem); - return r; } static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem) @@ -331,7 +324,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso * access, as done in ttm_bo_vm_fault(). */ mem->bus.offset = (mem->bus.offset & 0x0ffffffffUL) + - rdev->ddev->hose->dense_mem_base; + rdev->hose->dense_mem_base; #endif break; default: @@ -402,8 +395,8 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt * if (r) goto release_sg; - drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, - gtt->ttm.dma_address, ttm->num_pages); + drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, + ttm->num_pages); return 0; @@ -541,7 +534,7 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo, else caching = ttm_cached; - if (ttm_dma_tt_init(>t->ttm, bo, page_flags, caching)) { + if (ttm_sg_tt_init(>t->ttm, bo, page_flags, caching)) { kfree(gtt); return NULL; } @@ -579,8 +572,8 @@ static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev, } if (slave && ttm->sg) { - drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, - gtt->ttm.dma_address, ttm->num_pages); + drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, + ttm->num_pages); return 0; } diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c index 17390074277a..24ad12409120 100644 --- a/drivers/gpu/drm/radeon/rs780_dpm.c +++ b/drivers/gpu/drm/radeon/rs780_dpm.c @@ -223,16 +223,15 @@ static void rs780_preset_starting_fbdiv(struct radeon_device *rdev) static void rs780_voltage_scaling_init(struct radeon_device *rdev) { struct igp_power_info *pi = rs780_get_pi(rdev); - struct drm_device *dev = rdev->ddev; u32 fv_throt_pwm_fb_div_range[3]; u32 fv_throt_pwm_range[4]; - if (dev->pdev->device == 0x9614) { + if (rdev->pdev->device == 0x9614) { fv_throt_pwm_fb_div_range[0] = RS780D_FVTHROTPWMFBDIVRANGEREG0_DFLT; fv_throt_pwm_fb_div_range[1] = RS780D_FVTHROTPWMFBDIVRANGEREG1_DFLT; fv_throt_pwm_fb_div_range[2] = RS780D_FVTHROTPWMFBDIVRANGEREG2_DFLT; - } else if ((dev->pdev->device == 0x9714) || - (dev->pdev->device == 0x9715)) { + } else if ((rdev->pdev->device == 0x9714) || + (rdev->pdev->device == 0x9715)) { fv_throt_pwm_fb_div_range[0] = RS880D_FVTHROTPWMFBDIVRANGEREG0_DFLT; fv_throt_pwm_fb_div_range[1] = RS880D_FVTHROTPWMFBDIVRANGEREG1_DFLT; fv_throt_pwm_fb_div_range[2] = RS880D_FVTHROTPWMFBDIVRANGEREG2_DFLT; diff --git a/drivers/gpu/drm/rcar-du/rcar_cmm.c b/drivers/gpu/drm/rcar-du/rcar_cmm.c index c578095b09a5..382d53f8a22e 100644 --- a/drivers/gpu/drm/rcar-du/rcar_cmm.c +++ b/drivers/gpu/drm/rcar-du/rcar_cmm.c @@ -122,7 +122,7 @@ int rcar_cmm_enable(struct platform_device *pdev) { int ret; - ret = pm_runtime_get_sync(&pdev->dev); + ret = pm_runtime_resume_and_get(&pdev->dev); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index b5fb941e0f53..ea7e39d03545 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -730,13 +730,10 @@ static void rcar_du_crtc_atomic_enable(struct drm_crtc *crtc, */ if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) && rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) { - struct rcar_du_encoder *encoder = - rcdu->encoders[RCAR_DU_OUTPUT_LVDS0 + rcrtc->index]; + struct drm_bridge *bridge = rcdu->lvds[rcrtc->index]; const struct drm_display_mode *mode = &crtc->state->adjusted_mode; - struct drm_bridge *bridge; - bridge = drm_bridge_chain_get_first_bridge(&encoder->base); rcar_lvds_clk_enable(bridge, mode->clock * 1000); } @@ -764,15 +761,12 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc, if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index) && rstate->outputs == BIT(RCAR_DU_OUTPUT_DPAD0)) { - struct rcar_du_encoder *encoder = - rcdu->encoders[RCAR_DU_OUTPUT_LVDS0 + rcrtc->index]; - struct drm_bridge *bridge; + struct drm_bridge *bridge = rcdu->lvds[rcrtc->index]; /* * Disable the LVDS clock output, see * rcar_du_crtc_atomic_enable(). */ - bridge = drm_bridge_chain_get_first_bridge(&encoder->base); rcar_lvds_clk_disable(bridge); } @@ -1144,7 +1138,6 @@ static const struct drm_crtc_funcs crtc_funcs_gen3 = { .set_crc_source = rcar_du_crtc_set_crc_source, .verify_crc_source = rcar_du_crtc_verify_crc_source, .get_crc_sources = rcar_du_crtc_get_crc_sources, - .gamma_set = drm_atomic_helper_legacy_gamma_set, }; /* ----------------------------------------------------------------------------- @@ -1257,7 +1250,7 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex, else primary = &rgrp->planes[swindex % 2].plane; - ret = drm_crtc_init_with_planes(rcdu->ddev, crtc, primary, NULL, + ret = drm_crtc_init_with_planes(&rcdu->ddev, crtc, primary, NULL, rcdu->info->gen <= 2 ? &crtc_funcs_gen2 : &crtc_funcs_gen3, NULL); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index 600056dff374..bfbff90588cb 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -18,10 +18,11 @@ #include <linux/wait.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_drv.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_fb_helper.h> -#include <drm/drm_drv.h> #include <drm/drm_gem_cma_helper.h> +#include <drm/drm_managed.h> #include <drm/drm_probe_helper.h> #include "rcar_du_drv.h" @@ -527,14 +528,14 @@ static int rcar_du_pm_suspend(struct device *dev) { struct rcar_du_device *rcdu = dev_get_drvdata(dev); - return drm_mode_config_helper_suspend(rcdu->ddev); + return drm_mode_config_helper_suspend(&rcdu->ddev); } static int rcar_du_pm_resume(struct device *dev) { struct rcar_du_device *rcdu = dev_get_drvdata(dev); - return drm_mode_config_helper_resume(rcdu->ddev); + return drm_mode_config_helper_resume(&rcdu->ddev); } #endif @@ -549,7 +550,7 @@ static const struct dev_pm_ops rcar_du_pm_ops = { static int rcar_du_remove(struct platform_device *pdev) { struct rcar_du_device *rcdu = platform_get_drvdata(pdev); - struct drm_device *ddev = rcdu->ddev; + struct drm_device *ddev = &rcdu->ddev; drm_dev_unregister(ddev); @@ -563,14 +564,14 @@ static int rcar_du_remove(struct platform_device *pdev) static int rcar_du_probe(struct platform_device *pdev) { struct rcar_du_device *rcdu; - struct drm_device *ddev; struct resource *mem; int ret; /* Allocate and initialize the R-Car device structure. */ - rcdu = devm_kzalloc(&pdev->dev, sizeof(*rcdu), GFP_KERNEL); - if (rcdu == NULL) - return -ENOMEM; + rcdu = devm_drm_dev_alloc(&pdev->dev, &rcar_du_driver, + struct rcar_du_device, ddev); + if (IS_ERR(rcdu)) + return PTR_ERR(rcdu); rcdu->dev = &pdev->dev; rcdu->info = of_device_get_match_data(rcdu->dev); @@ -584,13 +585,6 @@ static int rcar_du_probe(struct platform_device *pdev) return PTR_ERR(rcdu->mmio); /* DRM/KMS objects */ - ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev); - if (IS_ERR(ddev)) - return PTR_ERR(ddev); - - rcdu->ddev = ddev; - ddev->dev_private = rcdu; - ret = rcar_du_modeset_init(rcdu); if (ret < 0) { if (ret != -EPROBE_DEFER) @@ -599,25 +593,24 @@ static int rcar_du_probe(struct platform_device *pdev) goto error; } - ddev->irq_enabled = 1; + rcdu->ddev.irq_enabled = 1; /* * Register the DRM device with the core and the connectors with * sysfs. */ - ret = drm_dev_register(ddev, 0); + ret = drm_dev_register(&rcdu->ddev, 0); if (ret) goto error; DRM_INFO("Device %s probed\n", dev_name(&pdev->dev)); - drm_fbdev_generic_setup(ddev, 32); + drm_fbdev_generic_setup(&rcdu->ddev, 32); return 0; error: - rcar_du_remove(pdev); - + drm_kms_helper_poll_fini(&rcdu->ddev); return ret; } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h index 61504c54e2ec..02ca2d0e1b55 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h @@ -13,6 +13,8 @@ #include <linux/kernel.h> #include <linux/wait.h> +#include <drm/drm_device.h> + #include "rcar_cmm.h" #include "rcar_du_crtc.h" #include "rcar_du_group.h" @@ -20,10 +22,9 @@ struct clk; struct device; -struct drm_device; +struct drm_bridge; struct drm_property; struct rcar_du_device; -struct rcar_du_encoder; #define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK BIT(0) /* Per-CRTC IRQ and clock */ #define RCAR_DU_FEATURE_VSP1_SOURCE BIT(1) /* Has inputs from VSP1 */ @@ -71,6 +72,7 @@ struct rcar_du_device_info { #define RCAR_DU_MAX_CRTCS 4 #define RCAR_DU_MAX_GROUPS DIV_ROUND_UP(RCAR_DU_MAX_CRTCS, 2) #define RCAR_DU_MAX_VSPS 4 +#define RCAR_DU_MAX_LVDS 2 struct rcar_du_device { struct device *dev; @@ -78,16 +80,15 @@ struct rcar_du_device { void __iomem *mmio; - struct drm_device *ddev; + struct drm_device ddev; struct rcar_du_crtc crtcs[RCAR_DU_MAX_CRTCS]; unsigned int num_crtcs; - struct rcar_du_encoder *encoders[RCAR_DU_OUTPUT_MAX]; - struct rcar_du_group groups[RCAR_DU_MAX_GROUPS]; struct platform_device *cmms[RCAR_DU_MAX_CRTCS]; struct rcar_du_vsp vsps[RCAR_DU_MAX_VSPS]; + struct drm_bridge *lvds[RCAR_DU_MAX_LVDS]; struct { struct drm_property *colorkey; @@ -98,6 +99,11 @@ struct rcar_du_device { unsigned int vspd1_sink; }; +static inline struct rcar_du_device *to_rcar_du_device(struct drm_device *dev) +{ + return container_of(dev, struct rcar_du_device, ddev); +} + static inline bool rcar_du_has(struct rcar_du_device *rcdu, unsigned int feature) { diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c index b0335da0c161..ba8c6038cd63 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c @@ -8,12 +8,13 @@ */ #include <linux/export.h> +#include <linux/slab.h> #include <drm/drm_bridge.h> #include <drm/drm_crtc.h> +#include <drm/drm_managed.h> #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_panel.h> -#include <drm/drm_simple_kms_helper.h> #include "rcar_du_drv.h" #include "rcar_du_encoder.h" @@ -44,26 +45,25 @@ static unsigned int rcar_du_encoder_count_ports(struct device_node *node) return num_ports; } +static const struct drm_encoder_funcs rcar_du_encoder_funcs = { +}; + +static void rcar_du_encoder_release(struct drm_device *dev, void *res) +{ + struct rcar_du_encoder *renc = res; + + drm_encoder_cleanup(&renc->base); + kfree(renc); +} + int rcar_du_encoder_init(struct rcar_du_device *rcdu, enum rcar_du_output output, struct device_node *enc_node) { struct rcar_du_encoder *renc; - struct drm_encoder *encoder; struct drm_bridge *bridge; int ret; - renc = devm_kzalloc(rcdu->dev, sizeof(*renc), GFP_KERNEL); - if (renc == NULL) - return -ENOMEM; - - rcdu->encoders[output] = renc; - renc->output = output; - encoder = rcar_encoder_to_drm_encoder(renc); - - dev_dbg(rcdu->dev, "initializing encoder %pOF for output %u\n", - enc_node, output); - /* * Locate the DRM bridge from the DT node. For the DPAD outputs, if the * DT node has a single port, assume that it describes a panel and @@ -74,57 +74,57 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu, rcar_du_encoder_count_ports(enc_node) == 1) { struct drm_panel *panel = of_drm_find_panel(enc_node); - if (IS_ERR(panel)) { - ret = PTR_ERR(panel); - goto done; - } + if (IS_ERR(panel)) + return PTR_ERR(panel); bridge = devm_drm_panel_bridge_add_typed(rcdu->dev, panel, DRM_MODE_CONNECTOR_DPI); - if (IS_ERR(bridge)) { - ret = PTR_ERR(bridge); - goto done; - } + if (IS_ERR(bridge)) + return PTR_ERR(bridge); } else { bridge = of_drm_find_bridge(enc_node); - if (!bridge) { - ret = -EPROBE_DEFER; - goto done; - } + if (!bridge) + return -EPROBE_DEFER; + + if (output == RCAR_DU_OUTPUT_LVDS0 || + output == RCAR_DU_OUTPUT_LVDS1) + rcdu->lvds[output - RCAR_DU_OUTPUT_LVDS0] = bridge; } /* - * On Gen3 skip the LVDS1 output if the LVDS1 encoder is used as a - * companion for LVDS0 in dual-link mode. + * Create and initialize the encoder. On Gen3 skip the LVDS1 output if + * the LVDS1 encoder is used as a companion for LVDS0 in dual-link + * mode. */ if (rcdu->info->gen >= 3 && output == RCAR_DU_OUTPUT_LVDS1) { - if (rcar_lvds_dual_link(bridge)) { - ret = -ENOLINK; - goto done; - } + if (rcar_lvds_dual_link(bridge)) + return -ENOLINK; } - ret = drm_simple_encoder_init(rcdu->ddev, encoder, - DRM_MODE_ENCODER_NONE); - if (ret < 0) - goto done; + renc = kzalloc(sizeof(*renc), GFP_KERNEL); + if (renc == NULL) + return -ENOMEM; - /* - * Attach the bridge to the encoder. The bridge will create the - * connector. - */ - ret = drm_bridge_attach(encoder, bridge, NULL, 0); - if (ret) { - drm_encoder_cleanup(encoder); - return ret; - } + renc->output = output; + + dev_dbg(rcdu->dev, "initializing encoder %pOF for output %u\n", + enc_node, output); -done: + ret = drm_encoder_init(&rcdu->ddev, &renc->base, &rcar_du_encoder_funcs, + DRM_MODE_ENCODER_NONE, NULL); if (ret < 0) { - if (encoder->name) - encoder->funcs->destroy(encoder); - devm_kfree(rcdu->dev, renc); + kfree(renc); + return ret; } - return ret; + ret = drmm_add_action_or_reset(&rcdu->ddev, rcar_du_encoder_release, + renc); + if (ret) + return ret; + + /* + * Attach the bridge to the encoder. The bridge will create the + * connector. + */ + return drm_bridge_attach(&renc->base, bridge, NULL, 0); } diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h index df9be4524301..73560563fb31 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h @@ -22,8 +22,6 @@ struct rcar_du_encoder { #define to_rcar_encoder(e) \ container_of(e, struct rcar_du_encoder, base) -#define rcar_encoder_to_drm_encoder(e) (&(e)->base) - int rcar_du_encoder_init(struct rcar_du_device *rcdu, enum rcar_du_output output, struct device_node *enc_node); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index 72dda446355f..fdb8a0d127ad 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -14,6 +14,7 @@ #include <drm/drm_fb_cma_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_managed.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> @@ -327,7 +328,7 @@ const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc) int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args) { - struct rcar_du_device *rcdu = dev->dev_private; + struct rcar_du_device *rcdu = to_rcar_du_device(dev); unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); unsigned int align; @@ -349,7 +350,7 @@ static struct drm_framebuffer * rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd) { - struct rcar_du_device *rcdu = dev->dev_private; + struct rcar_du_device *rcdu = to_rcar_du_device(dev); const struct rcar_du_format_info *format; unsigned int chroma_pitch; unsigned int max_pitch; @@ -421,7 +422,7 @@ rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv, static int rcar_du_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) { - struct rcar_du_device *rcdu = dev->dev_private; + struct rcar_du_device *rcdu = to_rcar_du_device(dev); int ret; ret = drm_atomic_helper_check(dev, state); @@ -437,7 +438,7 @@ static int rcar_du_atomic_check(struct drm_device *dev, static void rcar_du_atomic_commit_tail(struct drm_atomic_state *old_state) { struct drm_device *dev = old_state->dev; - struct rcar_du_device *rcdu = dev->dev_private; + struct rcar_du_device *rcdu = to_rcar_du_device(dev); struct drm_crtc_state *crtc_state; struct drm_crtc *crtc; unsigned int i; @@ -583,7 +584,7 @@ static int rcar_du_properties_init(struct rcar_du_device *rcdu) * or enable source color keying (1). */ rcdu->props.colorkey = - drm_property_create_range(rcdu->ddev, 0, "colorkey", + drm_property_create_range(&rcdu->ddev, 0, "colorkey", 0, 0x01ffffff); if (rcdu->props.colorkey == NULL) return -ENOMEM; @@ -700,10 +701,10 @@ static int rcar_du_cmm_init(struct rcar_du_device *rcdu) int ret; cmm = of_parse_phandle(np, "renesas,cmms", i); - if (IS_ERR(cmm)) { + if (!cmm) { dev_err(rcdu->dev, "Failed to parse 'renesas,cmms' property\n"); - return PTR_ERR(cmm); + return -EINVAL; } if (!of_device_is_available(cmm)) { @@ -713,10 +714,10 @@ static int rcar_du_cmm_init(struct rcar_du_device *rcdu) } pdev = of_find_device_by_node(cmm); - if (IS_ERR(pdev)) { + if (!pdev) { dev_err(rcdu->dev, "No device found for CMM%u\n", i); of_node_put(cmm); - return PTR_ERR(pdev); + return -EINVAL; } of_node_put(cmm); @@ -726,8 +727,12 @@ static int rcar_du_cmm_init(struct rcar_du_device *rcdu) * disabled: return 0 and let the DU continue probing. */ ret = rcar_cmm_init(pdev); - if (ret) + if (ret) { + platform_device_put(pdev); return ret == -ENODEV ? 0 : ret; + } + + rcdu->cmms[i] = pdev; /* * Enforce suspend/resume ordering by making the CMM a provider @@ -739,20 +744,27 @@ static int rcar_du_cmm_init(struct rcar_du_device *rcdu) "Failed to create device link to CMM%u\n", i); return -EINVAL; } - - rcdu->cmms[i] = pdev; } return 0; } +static void rcar_du_modeset_cleanup(struct drm_device *dev, void *res) +{ + struct rcar_du_device *rcdu = to_rcar_du_device(dev); + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(rcdu->cmms); ++i) + platform_device_put(rcdu->cmms[i]); +} + int rcar_du_modeset_init(struct rcar_du_device *rcdu) { static const unsigned int mmio_offsets[] = { DU0_REG_OFFSET, DU2_REG_OFFSET }; - struct drm_device *dev = rcdu->ddev; + struct drm_device *dev = &rcdu->ddev; struct drm_encoder *encoder; unsigned int dpad0_sources; unsigned int num_encoders; @@ -766,6 +778,10 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu) if (ret) return ret; + ret = drmm_add_action(&rcdu->ddev, rcar_du_modeset_cleanup, NULL); + if (ret) + return ret; + dev->mode_config.min_width = 0; dev->mode_config.min_height = 0; dev->mode_config.normalize_zpos = true; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c index a0021fc25b27..02e5f11f38eb 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c @@ -128,7 +128,7 @@ static int rcar_du_plane_hwalloc(struct rcar_du_plane *plane, int rcar_du_atomic_check_planes(struct drm_device *dev, struct drm_atomic_state *state) { - struct rcar_du_device *rcdu = dev->dev_private; + struct rcar_du_device *rcdu = to_rcar_du_device(dev); unsigned int group_freed_planes[RCAR_DU_MAX_GROUPS] = { 0, }; unsigned int group_free_planes[RCAR_DU_MAX_GROUPS] = { 0, }; bool needs_realloc = false; @@ -773,9 +773,9 @@ int rcar_du_planes_init(struct rcar_du_group *rgrp) plane->group = rgrp; - ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, crtcs, - &rcar_du_plane_funcs, formats, - ARRAY_SIZE(formats), + ret = drm_universal_plane_init(&rcdu->ddev, &plane->plane, + crtcs, &rcar_du_plane_funcs, + formats, ARRAY_SIZE(formats), NULL, type, NULL); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c index f6a69aa116e6..53221d8473c1 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c @@ -21,6 +21,7 @@ #include <linux/dma-mapping.h> #include <linux/of_platform.h> #include <linux/scatterlist.h> +#include <linux/slab.h> #include <linux/videodev2.h> #include <media/vsp1.h> @@ -344,6 +345,15 @@ static const struct drm_plane_funcs rcar_du_vsp_plane_funcs = { static void rcar_du_vsp_cleanup(struct drm_device *dev, void *res) { struct rcar_du_vsp *vsp = res; + unsigned int i; + + for (i = 0; i < vsp->num_planes; ++i) { + struct rcar_du_vsp_plane *plane = &vsp->planes[i]; + + drm_plane_cleanup(&plane->plane); + } + + kfree(vsp->planes); put_device(vsp->vsp); } @@ -354,6 +364,7 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np, struct rcar_du_device *rcdu = vsp->dev; struct platform_device *pdev; unsigned int num_crtcs = hweight32(crtcs); + unsigned int num_planes; unsigned int i; int ret; @@ -364,7 +375,7 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np, vsp->vsp = &pdev->dev; - ret = drmm_add_action(rcdu->ddev, rcar_du_vsp_cleanup, vsp); + ret = drmm_add_action_or_reset(&rcdu->ddev, rcar_du_vsp_cleanup, vsp); if (ret < 0) return ret; @@ -376,14 +387,13 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np, * The VSP2D (Gen3) has 5 RPFs, but the VSP1D (Gen2) is limited to * 4 RPFs. */ - vsp->num_planes = rcdu->info->gen >= 3 ? 5 : 4; + num_planes = rcdu->info->gen >= 3 ? 5 : 4; - vsp->planes = devm_kcalloc(rcdu->dev, vsp->num_planes, - sizeof(*vsp->planes), GFP_KERNEL); + vsp->planes = kcalloc(num_planes, sizeof(*vsp->planes), GFP_KERNEL); if (!vsp->planes) return -ENOMEM; - for (i = 0; i < vsp->num_planes; ++i) { + for (i = 0; i < num_planes; ++i) { enum drm_plane_type type = i < num_crtcs ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; @@ -392,8 +402,8 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np, plane->vsp = vsp; plane->index = i; - ret = drm_universal_plane_init(rcdu->ddev, &plane->plane, crtcs, - &rcar_du_vsp_plane_funcs, + ret = drm_universal_plane_init(&rcdu->ddev, &plane->plane, + crtcs, &rcar_du_vsp_plane_funcs, rcar_du_vsp_formats, ARRAY_SIZE(rcar_du_vsp_formats), NULL, type, NULL); @@ -409,8 +419,10 @@ int rcar_du_vsp_init(struct rcar_du_vsp *vsp, struct device_node *np, } else { drm_plane_create_alpha_property(&plane->plane); drm_plane_create_zpos_property(&plane->plane, 1, 1, - vsp->num_planes - 1); + num_planes - 1); } + + vsp->num_planes++; } return 0; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c index 04efa78d70b6..c79d1259e49b 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_writeback.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_writeback.c @@ -204,7 +204,7 @@ int rcar_du_writeback_init(struct rcar_du_device *rcdu, drm_connector_helper_add(&wb_conn->base, &rcar_du_wb_conn_helper_funcs); - return drm_writeback_connector_init(rcdu->ddev, wb_conn, + return drm_writeback_connector_init(&rcdu->ddev, wb_conn, &rcar_du_wb_conn_funcs, &rcar_du_wb_enc_helper_funcs, writeback_formats, diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig index 310aa1546893..cb25c0e8fc9b 100644 --- a/drivers/gpu/drm/rockchip/Kconfig +++ b/drivers/gpu/drm/rockchip/Kconfig @@ -49,7 +49,7 @@ config ROCKCHIP_DW_MIPI_DSI select GENERIC_PHY_MIPI_DPHY help This selects support for Rockchip SoC specific extensions - for the Synopsys DesignWare HDMI driver. If you want to + for the Synopsys DesignWare dsi driver. If you want to enable MIPI DSI on RK3288 or RK3399 based SoC, you should select this option. diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c index e84325e56d98..24a71091759c 100644 --- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c @@ -1089,7 +1089,7 @@ static int dw_mipi_dsi_rockchip_probe(struct platform_device *pdev) dsi->grf_regmap = syscon_regmap_lookup_by_phandle(np, "rockchip,grf"); if (IS_ERR(dsi->grf_regmap)) { - DRM_DEV_ERROR(dsi->dev, "Unable to get rockchip,grf\n"); + DRM_DEV_ERROR(dev, "Unable to get rockchip,grf\n"); return PTR_ERR(dsi->grf_regmap); } diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index 23de359a1dec..830bdd5e9b7c 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -202,7 +202,7 @@ static int rockchip_hdmi_parse_dt(struct rockchip_hdmi *hdmi) } else if (PTR_ERR(hdmi->vpll_clk) == -EPROBE_DEFER) { return -EPROBE_DEFER; } else if (IS_ERR(hdmi->vpll_clk)) { - DRM_DEV_ERROR(hdmi->dev, "failed to get grf clock\n"); + DRM_DEV_ERROR(hdmi->dev, "failed to get vpll clock\n"); return PTR_ERR(hdmi->vpll_clk); } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index d1e05482641b..8d15cabdcb02 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -1643,7 +1643,6 @@ static const struct drm_crtc_funcs vop_crtc_funcs = { .disable_vblank = vop_crtc_disable_vblank, .set_crc_source = vop_crtc_set_crc_source, .verify_crc_source = vop_crtc_verify_crc_source, - .gamma_set = drm_atomic_helper_legacy_gamma_set, }; static void vop_fb_unref_worker(struct drm_flip_work *work, void *val) diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c index 8658ef82d937..654bc52d9ff3 100644 --- a/drivers/gpu/drm/rockchip/rockchip_lvds.c +++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c @@ -544,7 +544,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master, struct device_node *port, *endpoint; int ret = 0, child_count = 0; const char *name; - u32 endpoint_id; + u32 endpoint_id = 0; lvds->drm_dev = drm_dev; port = of_graph_get_port_by_id(dev->of_node, 1); diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index b498d474ef9e..92637b70c9bf 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -60,8 +60,6 @@ #define to_drm_sched_job(sched_job) \ container_of((sched_job), struct drm_sched_job, queue_node) -static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb); - /** * drm_sched_rq_init - initialize a given run queue struct * @@ -164,6 +162,40 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq) } /** + * drm_sched_job_done - complete a job + * @s_job: pointer to the job which is done + * + * Finish the job's fence and wake up the worker thread. + */ +static void drm_sched_job_done(struct drm_sched_job *s_job) +{ + struct drm_sched_fence *s_fence = s_job->s_fence; + struct drm_gpu_scheduler *sched = s_fence->sched; + + atomic_dec(&sched->hw_rq_count); + atomic_dec(&sched->score); + + trace_drm_sched_process_job(s_fence); + + dma_fence_get(&s_fence->finished); + drm_sched_fence_finished(s_fence); + dma_fence_put(&s_fence->finished); + wake_up_interruptible(&sched->wake_up_worker); +} + +/** + * drm_sched_job_done_cb - the callback for a done job + * @f: fence + * @cb: fence callbacks + */ +static void drm_sched_job_done_cb(struct dma_fence *f, struct dma_fence_cb *cb) +{ + struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb); + + drm_sched_job_done(s_job); +} + +/** * drm_sched_dependency_optimized * * @fence: the dependency fence @@ -199,7 +231,7 @@ EXPORT_SYMBOL(drm_sched_dependency_optimized); static void drm_sched_start_timeout(struct drm_gpu_scheduler *sched) { if (sched->timeout != MAX_SCHEDULE_TIMEOUT && - !list_empty(&sched->ring_mirror_list)) + !list_empty(&sched->pending_list)) schedule_delayed_work(&sched->work_tdr, sched->timeout); } @@ -259,7 +291,7 @@ void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched, { spin_lock(&sched->job_list_lock); - if (list_empty(&sched->ring_mirror_list)) + if (list_empty(&sched->pending_list)) cancel_delayed_work(&sched->work_tdr); else mod_delayed_work(system_wq, &sched->work_tdr, remaining); @@ -273,7 +305,7 @@ static void drm_sched_job_begin(struct drm_sched_job *s_job) struct drm_gpu_scheduler *sched = s_job->sched; spin_lock(&sched->job_list_lock); - list_add_tail(&s_job->node, &sched->ring_mirror_list); + list_add_tail(&s_job->list, &sched->pending_list); drm_sched_start_timeout(sched); spin_unlock(&sched->job_list_lock); } @@ -287,8 +319,8 @@ static void drm_sched_job_timedout(struct work_struct *work) /* Protects against concurrent deletion in drm_sched_get_cleanup_job */ spin_lock(&sched->job_list_lock); - job = list_first_entry_or_null(&sched->ring_mirror_list, - struct drm_sched_job, node); + job = list_first_entry_or_null(&sched->pending_list, + struct drm_sched_job, list); if (job) { /* @@ -296,7 +328,7 @@ static void drm_sched_job_timedout(struct work_struct *work) * drm_sched_cleanup_jobs. It will be reinserted back after sched->thread * is parked at which point it's safe. */ - list_del_init(&job->node); + list_del_init(&job->list); spin_unlock(&sched->job_list_lock); job->sched->ops->timedout_job(job); @@ -372,7 +404,7 @@ EXPORT_SYMBOL(drm_sched_increase_karma); * Stop the scheduler and also removes and frees all completed jobs. * Note: bad job will not be freed as it might be used later and so it's * callers responsibility to release it manually if it's not part of the - * mirror list any more. + * pending list any more. * */ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) @@ -393,26 +425,27 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad) * Add at the head of the queue to reflect it was the earliest * job extracted. */ - list_add(&bad->node, &sched->ring_mirror_list); + list_add(&bad->list, &sched->pending_list); /* * Iterate the job list from later to earlier one and either deactive - * their HW callbacks or remove them from mirror list if they already + * their HW callbacks or remove them from pending list if they already * signaled. * This iteration is thread safe as sched thread is stopped. */ - list_for_each_entry_safe_reverse(s_job, tmp, &sched->ring_mirror_list, node) { + list_for_each_entry_safe_reverse(s_job, tmp, &sched->pending_list, + list) { if (s_job->s_fence->parent && dma_fence_remove_callback(s_job->s_fence->parent, &s_job->cb)) { atomic_dec(&sched->hw_rq_count); } else { /* - * remove job from ring_mirror_list. + * remove job from pending_list. * Locking here is for concurrent resume timeout */ spin_lock(&sched->job_list_lock); - list_del_init(&s_job->node); + list_del_init(&s_job->list); spin_unlock(&sched->job_list_lock); /* @@ -463,7 +496,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) * so no new jobs are being inserted or removed. Also concurrent * GPU recovers can't run in parallel. */ - list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { + list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { struct dma_fence *fence = s_job->s_fence->parent; atomic_inc(&sched->hw_rq_count); @@ -473,14 +506,14 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) if (fence) { r = dma_fence_add_callback(fence, &s_job->cb, - drm_sched_process_job); + drm_sched_job_done_cb); if (r == -ENOENT) - drm_sched_process_job(fence, &s_job->cb); + drm_sched_job_done(s_job); else if (r) DRM_ERROR("fence add callback failed (%d)\n", r); } else - drm_sched_process_job(NULL, &s_job->cb); + drm_sched_job_done(s_job); } if (full_recovery) { @@ -494,7 +527,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) EXPORT_SYMBOL(drm_sched_start); /** - * drm_sched_resubmit_jobs - helper to relunch job from mirror ring list + * drm_sched_resubmit_jobs - helper to relunch job from pending ring list * * @sched: scheduler instance * @@ -506,7 +539,7 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) bool found_guilty = false; struct dma_fence *fence; - list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) { + list_for_each_entry_safe(s_job, tmp, &sched->pending_list, list) { struct drm_sched_fence *s_fence = s_job->s_fence; if (!found_guilty && atomic_read(&s_job->karma) > sched->hang_limit) { @@ -566,7 +599,7 @@ int drm_sched_job_init(struct drm_sched_job *job, return -ENOMEM; job->id = atomic64_inc_return(&sched->job_id_count); - INIT_LIST_HEAD(&job->node); + INIT_LIST_HEAD(&job->list); return 0; } @@ -636,36 +669,11 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched) } /** - * drm_sched_process_job - process a job - * - * @f: fence - * @cb: fence callbacks - * - * Called after job has finished execution. - */ -static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb) -{ - struct drm_sched_job *s_job = container_of(cb, struct drm_sched_job, cb); - struct drm_sched_fence *s_fence = s_job->s_fence; - struct drm_gpu_scheduler *sched = s_fence->sched; - - atomic_dec(&sched->hw_rq_count); - atomic_dec(&sched->score); - - trace_drm_sched_process_job(s_fence); - - dma_fence_get(&s_fence->finished); - drm_sched_fence_finished(s_fence); - dma_fence_put(&s_fence->finished); - wake_up_interruptible(&sched->wake_up_worker); -} - -/** * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed * * @sched: scheduler instance * - * Returns the next finished job from the mirror list (if there is one) + * Returns the next finished job from the pending list (if there is one) * ready for it to be destroyed. */ static struct drm_sched_job * @@ -675,7 +683,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) /* * Don't destroy jobs while the timeout worker is running OR thread - * is being parked and hence assumed to not touch ring_mirror_list + * is being parked and hence assumed to not touch pending_list */ if ((sched->timeout != MAX_SCHEDULE_TIMEOUT && !cancel_delayed_work(&sched->work_tdr)) || @@ -684,12 +692,12 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) spin_lock(&sched->job_list_lock); - job = list_first_entry_or_null(&sched->ring_mirror_list, - struct drm_sched_job, node); + job = list_first_entry_or_null(&sched->pending_list, + struct drm_sched_job, list); if (job && dma_fence_is_signaled(&job->s_fence->finished)) { - /* remove job from ring_mirror_list */ - list_del_init(&job->node); + /* remove job from pending_list */ + list_del_init(&job->list); } else { job = NULL; /* queue timeout for next job */ @@ -809,9 +817,9 @@ static int drm_sched_main(void *param) if (!IS_ERR_OR_NULL(fence)) { s_fence->parent = dma_fence_get(fence); r = dma_fence_add_callback(fence, &sched_job->cb, - drm_sched_process_job); + drm_sched_job_done_cb); if (r == -ENOENT) - drm_sched_process_job(fence, &sched_job->cb); + drm_sched_job_done(sched_job); else if (r) DRM_ERROR("fence add callback failed (%d)\n", r); @@ -820,7 +828,7 @@ static int drm_sched_main(void *param) if (IS_ERR(fence)) dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); - drm_sched_process_job(NULL, &sched_job->cb); + drm_sched_job_done(sched_job); } wake_up(&sched->job_scheduled); @@ -858,7 +866,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, init_waitqueue_head(&sched->wake_up_worker); init_waitqueue_head(&sched->job_scheduled); - INIT_LIST_HEAD(&sched->ring_mirror_list); + INIT_LIST_HEAD(&sched->pending_list); spin_lock_init(&sched->job_list_lock); atomic_set(&sched->hw_rq_count, 0); INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout); @@ -891,6 +899,9 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched) if (sched->thread) kthread_stop(sched->thread); + /* Confirm no work left behind accessing device structures */ + cancel_delayed_work_sync(&sched->work_tdr); + sched->ready = false; } EXPORT_SYMBOL(drm_sched_fini); diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c index a98057431023..7476301d7142 100644 --- a/drivers/gpu/drm/sti/sti_cursor.c +++ b/drivers/gpu/drm/sti/sti_cursor.c @@ -330,13 +330,6 @@ static const struct drm_plane_helper_funcs sti_cursor_helpers_funcs = { .atomic_disable = sti_cursor_atomic_disable, }; -static void sti_cursor_destroy(struct drm_plane *drm_plane) -{ - DRM_DEBUG_DRIVER("\n"); - - drm_plane_cleanup(drm_plane); -} - static int sti_cursor_late_register(struct drm_plane *drm_plane) { struct sti_plane *plane = to_sti_plane(drm_plane); @@ -350,7 +343,7 @@ static int sti_cursor_late_register(struct drm_plane *drm_plane) static const struct drm_plane_funcs sti_cursor_plane_helpers_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, - .destroy = sti_cursor_destroy, + .destroy = drm_plane_cleanup, .reset = sti_plane_reset, .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c index 2d5a2b5b78b8..2f4a34f14d33 100644 --- a/drivers/gpu/drm/sti/sti_gdp.c +++ b/drivers/gpu/drm/sti/sti_gdp.c @@ -884,13 +884,6 @@ static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = { .atomic_disable = sti_gdp_atomic_disable, }; -static void sti_gdp_destroy(struct drm_plane *drm_plane) -{ - DRM_DEBUG_DRIVER("\n"); - - drm_plane_cleanup(drm_plane); -} - static int sti_gdp_late_register(struct drm_plane *drm_plane) { struct sti_plane *plane = to_sti_plane(drm_plane); @@ -902,7 +895,7 @@ static int sti_gdp_late_register(struct drm_plane *drm_plane) static const struct drm_plane_funcs sti_gdp_plane_helpers_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, - .destroy = sti_gdp_destroy, + .destroy = drm_plane_cleanup, .reset = sti_plane_reset, .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c index 5a4e12194a77..62f824cd5f21 100644 --- a/drivers/gpu/drm/sti/sti_hqvdp.c +++ b/drivers/gpu/drm/sti/sti_hqvdp.c @@ -1262,13 +1262,6 @@ static const struct drm_plane_helper_funcs sti_hqvdp_helpers_funcs = { .atomic_disable = sti_hqvdp_atomic_disable, }; -static void sti_hqvdp_destroy(struct drm_plane *drm_plane) -{ - DRM_DEBUG_DRIVER("\n"); - - drm_plane_cleanup(drm_plane); -} - static int sti_hqvdp_late_register(struct drm_plane *drm_plane) { struct sti_plane *plane = to_sti_plane(drm_plane); @@ -1282,7 +1275,7 @@ static int sti_hqvdp_late_register(struct drm_plane *drm_plane) static const struct drm_plane_funcs sti_hqvdp_plane_helpers_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, - .destroy = sti_hqvdp_destroy, + .destroy = drm_plane_cleanup, .reset = sti_plane_reset, .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c index 3980677435cb..7812094f93d6 100644 --- a/drivers/gpu/drm/stm/ltdc.c +++ b/drivers/gpu/drm/stm/ltdc.c @@ -713,7 +713,6 @@ static const struct drm_crtc_funcs ltdc_crtc_funcs = { .enable_vblank = ltdc_crtc_enable_vblank, .disable_vblank = ltdc_crtc_disable_vblank, .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, - .gamma_set = drm_atomic_helper_legacy_gamma_set, }; /* diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c index 77497b45f9a2..522e51a404cc 100644 --- a/drivers/gpu/drm/sun4i/sun4i_backend.c +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c @@ -805,19 +805,6 @@ static int sun4i_backend_bind(struct device *dev, struct device *master, ret = of_dma_configure(drm->dev, dev->of_node, true); if (ret) return ret; - } else { - /* - * If we don't have the interconnect property, most likely - * because of an old DT, we need to set the DMA offset by hand - * on our device since the RAM mapping is at 0 for the DMA bus, - * unlike the CPU. - * - * XXX(hch): this has no business in a driver and needs to move - * to the device tree. - */ - ret = dma_direct_set_offset(drm->dev, PHYS_OFFSET, 0, SZ_4G); - if (ret) - return ret; } backend->engine.node = dev->of_node; diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index eaaf5d70e352..6b9af4c08cd6 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -569,30 +569,13 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, if (info->bus_flags & DRM_BUS_FLAG_DE_LOW) val |= SUN4I_TCON0_IO_POL_DE_NEGATIVE; - /* - * On A20 and similar SoCs, the only way to achieve Positive Edge - * (Rising Edge), is setting dclk clock phase to 2/3(240°). - * By default TCON works in Negative Edge(Falling Edge), - * this is why phase is set to 0 in that case. - * Unfortunately there's no way to logically invert dclk through - * IO_POL register. - * The only acceptable way to work, triple checked with scope, - * is using clock phase set to 0° for Negative Edge and set to 240° - * for Positive Edge. - * On A33 and similar SoCs there would be a 90° phase option, - * but it divides also dclk by 2. - * Following code is a way to avoid quirks all around TCON - * and DOTCLOCK drivers. - */ - if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE) - clk_set_phase(tcon->dclk, 240); - if (info->bus_flags & DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE) - clk_set_phase(tcon->dclk, 0); + val |= SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE; regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG, SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE | + SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE | SUN4I_TCON0_IO_POL_DE_NEGATIVE, val); diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h index cfbf4e6c1679..c5ac1b02482c 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.h +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h @@ -113,6 +113,7 @@ #define SUN4I_TCON0_IO_POL_REG 0x88 #define SUN4I_TCON0_IO_POL_DCLK_PHASE(phase) ((phase & 3) << 28) #define SUN4I_TCON0_IO_POL_DE_NEGATIVE BIT(27) +#define SUN4I_TCON0_IO_POL_DCLK_DRIVE_NEGEDGE BIT(26) #define SUN4I_TCON0_IO_POL_HSYNC_POSITIVE BIT(25) #define SUN4I_TCON0_IO_POL_VSYNC_POSITIVE BIT(24) diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.c b/drivers/gpu/drm/sun4i/sun8i_csc.c index 781955dd4995..9bd62de0c288 100644 --- a/drivers/gpu/drm/sun4i/sun8i_csc.c +++ b/drivers/gpu/drm/sun4i/sun8i_csc.c @@ -46,33 +46,6 @@ static const u32 yuv2rgb[2][2][12] = { }, }; -static const u32 yvu2rgb[2][2][12] = { - [DRM_COLOR_YCBCR_LIMITED_RANGE] = { - [DRM_COLOR_YCBCR_BT601] = { - 0x000004A8, 0x00000662, 0x00000000, 0xFFFC8451, - 0x000004A8, 0xFFFFFCC0, 0xFFFFFE6F, 0x00021E4D, - 0x000004A8, 0x00000000, 0x00000811, 0xFFFBACA9, - }, - [DRM_COLOR_YCBCR_BT709] = { - 0x000004A8, 0x0000072B, 0x00000000, 0xFFFC1F99, - 0x000004A8, 0xFFFFFDDF, 0xFFFFFF26, 0x00013383, - 0x000004A8, 0x00000000, 0x00000873, 0xFFFB7BEF, - } - }, - [DRM_COLOR_YCBCR_FULL_RANGE] = { - [DRM_COLOR_YCBCR_BT601] = { - 0x00000400, 0x0000059B, 0x00000000, 0xFFFD322E, - 0x00000400, 0xFFFFFD25, 0xFFFFFEA0, 0x00021DD5, - 0x00000400, 0x00000000, 0x00000716, 0xFFFC74BD, - }, - [DRM_COLOR_YCBCR_BT709] = { - 0x00000400, 0x0000064C, 0x00000000, 0xFFFCD9B4, - 0x00000400, 0xFFFFFE21, 0xFFFFFF41, 0x00014F96, - 0x00000400, 0x00000000, 0x0000076C, 0xFFFC49EF, - } - }, -}; - /* * DE3 has a bit different CSC units. Factors are in two's complement format. * First three factors in a row are multiplication factors which have 17 bits @@ -96,7 +69,7 @@ static const u32 yvu2rgb[2][2][12] = { * c20 c21 c22 [d2 const2] */ -static const u32 yuv2rgb_de3[2][2][12] = { +static const u32 yuv2rgb_de3[2][3][12] = { [DRM_COLOR_YCBCR_LIMITED_RANGE] = { [DRM_COLOR_YCBCR_BT601] = { 0x0002542A, 0x00000000, 0x0003312A, 0xFFC00000, @@ -107,6 +80,11 @@ static const u32 yuv2rgb_de3[2][2][12] = { 0x0002542A, 0x00000000, 0x000395E2, 0xFFC00000, 0x0002542A, 0xFFFF92D2, 0xFFFEEF27, 0xFE000000, 0x0002542A, 0x0004398C, 0x00000000, 0xFE000000, + }, + [DRM_COLOR_YCBCR_BT2020] = { + 0x0002542A, 0x00000000, 0x00035B7B, 0xFFC00000, + 0x0002542A, 0xFFFFA017, 0xFFFEB2FC, 0xFE000000, + 0x0002542A, 0x00044896, 0x00000000, 0xFE000000, } }, [DRM_COLOR_YCBCR_FULL_RANGE] = { @@ -119,33 +97,11 @@ static const u32 yuv2rgb_de3[2][2][12] = { 0x00020000, 0x00000000, 0x0003264C, 0x00000000, 0x00020000, 0xFFFFA018, 0xFFFF1053, 0xFE000000, 0x00020000, 0x0003B611, 0x00000000, 0xFE000000, - } - }, -}; - -static const u32 yvu2rgb_de3[2][2][12] = { - [DRM_COLOR_YCBCR_LIMITED_RANGE] = { - [DRM_COLOR_YCBCR_BT601] = { - 0x0002542A, 0x0003312A, 0x00000000, 0xFFC00000, - 0x0002542A, 0xFFFE5FC3, 0xFFFF376B, 0xFE000000, - 0x0002542A, 0x00000000, 0x000408D2, 0xFE000000, }, - [DRM_COLOR_YCBCR_BT709] = { - 0x0002542A, 0x000395E2, 0x00000000, 0xFFC00000, - 0x0002542A, 0xFFFEEF27, 0xFFFF92D2, 0xFE000000, - 0x0002542A, 0x00000000, 0x0004398C, 0xFE000000, - } - }, - [DRM_COLOR_YCBCR_FULL_RANGE] = { - [DRM_COLOR_YCBCR_BT601] = { - 0x00020000, 0x0002CDD2, 0x00000000, 0x00000000, - 0x00020000, 0xFFFE925D, 0xFFFF4FCE, 0xFE000000, - 0x00020000, 0x00000000, 0x00038B43, 0xFE000000, - }, - [DRM_COLOR_YCBCR_BT709] = { - 0x00020000, 0x0003264C, 0x00000000, 0x00000000, - 0x00020000, 0xFFFF1053, 0xFFFFA018, 0xFE000000, - 0x00020000, 0x00000000, 0x0003B611, 0xFE000000, + [DRM_COLOR_YCBCR_BT2020] = { + 0x00020000, 0x00000000, 0x0002F2FE, 0x00000000, + 0x00020000, 0xFFFFABC0, 0xFFFEDB78, 0xFE000000, + 0x00020000, 0x0003C346, 0x00000000, 0xFE000000, } }, }; @@ -157,21 +113,30 @@ static void sun8i_csc_set_coefficients(struct regmap *map, u32 base, { const u32 *table; u32 base_reg; + int i; + + table = yuv2rgb[range][encoding]; switch (mode) { case SUN8I_CSC_MODE_YUV2RGB: - table = yuv2rgb[range][encoding]; + base_reg = SUN8I_CSC_COEFF(base, 0); + regmap_bulk_write(map, base_reg, table, 12); break; case SUN8I_CSC_MODE_YVU2RGB: - table = yvu2rgb[range][encoding]; + for (i = 0; i < 12; i++) { + if ((i & 3) == 1) + base_reg = SUN8I_CSC_COEFF(base, i + 1); + else if ((i & 3) == 2) + base_reg = SUN8I_CSC_COEFF(base, i - 1); + else + base_reg = SUN8I_CSC_COEFF(base, i); + regmap_write(map, base_reg, table[i]); + } break; default: DRM_WARN("Wrong CSC mode specified.\n"); return; } - - base_reg = SUN8I_CSC_COEFF(base, 0); - regmap_bulk_write(map, base_reg, table, 12); } static void sun8i_de3_ccsc_set_coefficients(struct regmap *map, int layer, @@ -180,22 +145,36 @@ static void sun8i_de3_ccsc_set_coefficients(struct regmap *map, int layer, enum drm_color_range range) { const u32 *table; - u32 base_reg; + u32 addr; + int i; + + table = yuv2rgb_de3[range][encoding]; switch (mode) { case SUN8I_CSC_MODE_YUV2RGB: - table = yuv2rgb_de3[range][encoding]; + addr = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE, layer, 0); + regmap_bulk_write(map, addr, table, 12); break; case SUN8I_CSC_MODE_YVU2RGB: - table = yvu2rgb_de3[range][encoding]; + for (i = 0; i < 12; i++) { + if ((i & 3) == 1) + addr = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE, + layer, + i + 1); + else if ((i & 3) == 2) + addr = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE, + layer, + i - 1); + else + addr = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE, + layer, i); + regmap_write(map, addr, table[i]); + } break; default: DRM_WARN("Wrong CSC mode specified.\n"); return; } - - base_reg = SUN50I_MIXER_BLEND_CSC_COEFF(DE3_BLD_BASE, layer, 0, 0); - regmap_bulk_write(map, base_reg, table, 12); } static void sun8i_csc_enable(struct regmap *map, u32 base, bool enable) diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c index d4c08043dd81..92add2cef2e7 100644 --- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c +++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c @@ -208,6 +208,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master, phy_node = of_parse_phandle(dev->of_node, "phys", 0); if (!phy_node) { dev_err(dev, "Can't found PHY phandle\n"); + ret = -EINVAL; goto err_disable_clk_tmds; } diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h index 7576b523fdbb..145833a9d82d 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.h +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h @@ -50,10 +50,8 @@ #define SUN8I_MIXER_BLEND_CK_MIN(base, x) ((base) + 0xe0 + 0x04 * (x)) #define SUN8I_MIXER_BLEND_OUTCTL(base) ((base) + 0xfc) #define SUN50I_MIXER_BLEND_CSC_CTL(base) ((base) + 0x100) -#define SUN50I_MIXER_BLEND_CSC_COEFF(base, layer, x, y) \ - ((base) + 0x110 + (layer) * 0x30 + (x) * 0x10 + 4 * (y)) -#define SUN50I_MIXER_BLEND_CSC_CONST(base, layer, i) \ - ((base) + 0x110 + (layer) * 0x30 + (i) * 0x10 + 0x0c) +#define SUN50I_MIXER_BLEND_CSC_COEFF(base, layer, x) \ + ((base) + 0x110 + (layer) * 0x30 + (x) * 4) #define SUN8I_MIXER_BLEND_PIPE_CTL_EN_MSK GENMASK(12, 8) #define SUN8I_MIXER_BLEND_PIPE_CTL_EN(pipe) BIT(8 + pipe) diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c index 76393fc976fe..8cc294a9969d 100644 --- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c +++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c @@ -543,6 +543,8 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm, supported_encodings = BIT(DRM_COLOR_YCBCR_BT601) | BIT(DRM_COLOR_YCBCR_BT709); + if (mixer->cfg->is_de3) + supported_encodings |= BIT(DRM_COLOR_YCBCR_BT2020); supported_ranges = BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | BIT(DRM_COLOR_YCBCR_FULL_RANGE); diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c index ab699bf0ac5c..58c185c299f4 100644 --- a/drivers/gpu/drm/tdfx/tdfx_drv.c +++ b/drivers/gpu/drm/tdfx/tdfx_drv.c @@ -56,7 +56,7 @@ static const struct file_operations tdfx_driver_fops = { .llseek = noop_llseek, }; -static struct drm_driver driver = { +static const struct drm_driver driver = { .driver_features = DRIVER_LEGACY, .fops = &tdfx_driver_fops, .name = DRIVER_NAME, diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 19ffb0626505..e45c8414e2a3 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -90,7 +90,7 @@ static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp) if (!fpriv) return -ENOMEM; - idr_init(&fpriv->contexts); + idr_init_base(&fpriv->contexts, 1); mutex_init(&fpriv->lock); filp->driver_priv = fpriv; diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c index 5a4fd0dbf4cf..47d26b5d9945 100644 --- a/drivers/gpu/drm/tegra/output.c +++ b/drivers/gpu/drm/tegra/output.c @@ -129,7 +129,6 @@ int tegra_output_probe(struct tegra_output *output) if (!output->ddc) { err = -EPROBE_DEFER; - of_node_put(ddc); return err; } } diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index e88a17c2937f..cc2aa2308a51 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -397,7 +397,6 @@ struct tegra_sor; struct tegra_sor_ops { const char *name; int (*probe)(struct tegra_sor *sor); - int (*remove)(struct tegra_sor *sor); void (*audio_enable)(struct tegra_sor *sor); void (*audio_disable)(struct tegra_sor *sor); }; @@ -2942,6 +2941,24 @@ static const struct drm_encoder_helper_funcs tegra_sor_dp_helpers = { .atomic_check = tegra_sor_encoder_atomic_check, }; +static void tegra_sor_disable_regulator(void *data) +{ + struct regulator *reg = data; + + regulator_disable(reg); +} + +static int tegra_sor_enable_regulator(struct tegra_sor *sor, struct regulator *reg) +{ + int err; + + err = regulator_enable(reg); + if (err) + return err; + + return devm_add_action_or_reset(sor->dev, tegra_sor_disable_regulator, reg); +} + static int tegra_sor_hdmi_probe(struct tegra_sor *sor) { int err; @@ -2953,7 +2970,7 @@ static int tegra_sor_hdmi_probe(struct tegra_sor *sor) return PTR_ERR(sor->avdd_io_supply); } - err = regulator_enable(sor->avdd_io_supply); + err = tegra_sor_enable_regulator(sor, sor->avdd_io_supply); if (err < 0) { dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n", err); @@ -2967,7 +2984,7 @@ static int tegra_sor_hdmi_probe(struct tegra_sor *sor) return PTR_ERR(sor->vdd_pll_supply); } - err = regulator_enable(sor->vdd_pll_supply); + err = tegra_sor_enable_regulator(sor, sor->vdd_pll_supply); if (err < 0) { dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n", err); @@ -2981,7 +2998,7 @@ static int tegra_sor_hdmi_probe(struct tegra_sor *sor) return PTR_ERR(sor->hdmi_supply); } - err = regulator_enable(sor->hdmi_supply); + err = tegra_sor_enable_regulator(sor, sor->hdmi_supply); if (err < 0) { dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err); return err; @@ -2992,19 +3009,9 @@ static int tegra_sor_hdmi_probe(struct tegra_sor *sor) return 0; } -static int tegra_sor_hdmi_remove(struct tegra_sor *sor) -{ - regulator_disable(sor->hdmi_supply); - regulator_disable(sor->vdd_pll_supply); - regulator_disable(sor->avdd_io_supply); - - return 0; -} - static const struct tegra_sor_ops tegra_sor_hdmi_ops = { .name = "HDMI", .probe = tegra_sor_hdmi_probe, - .remove = tegra_sor_hdmi_remove, .audio_enable = tegra_sor_hdmi_audio_enable, .audio_disable = tegra_sor_hdmi_audio_disable, }; @@ -3017,7 +3024,7 @@ static int tegra_sor_dp_probe(struct tegra_sor *sor) if (IS_ERR(sor->avdd_io_supply)) return PTR_ERR(sor->avdd_io_supply); - err = regulator_enable(sor->avdd_io_supply); + err = tegra_sor_enable_regulator(sor, sor->avdd_io_supply); if (err < 0) return err; @@ -3025,25 +3032,16 @@ static int tegra_sor_dp_probe(struct tegra_sor *sor) if (IS_ERR(sor->vdd_pll_supply)) return PTR_ERR(sor->vdd_pll_supply); - err = regulator_enable(sor->vdd_pll_supply); + err = tegra_sor_enable_regulator(sor, sor->vdd_pll_supply); if (err < 0) return err; return 0; } -static int tegra_sor_dp_remove(struct tegra_sor *sor) -{ - regulator_disable(sor->vdd_pll_supply); - regulator_disable(sor->avdd_io_supply); - - return 0; -} - static const struct tegra_sor_ops tegra_sor_dp_ops = { .name = "DP", .probe = tegra_sor_dp_probe, - .remove = tegra_sor_dp_remove, }; static int tegra_sor_init(struct host1x_client *client) @@ -3145,6 +3143,7 @@ static int tegra_sor_init(struct host1x_client *client) if (err < 0) { dev_err(sor->dev, "failed to deassert SOR reset: %d\n", err); + clk_disable_unprepare(sor->clk); return err; } @@ -3152,12 +3151,17 @@ static int tegra_sor_init(struct host1x_client *client) } err = clk_prepare_enable(sor->clk_safe); - if (err < 0) + if (err < 0) { + clk_disable_unprepare(sor->clk); return err; + } err = clk_prepare_enable(sor->clk_dp); - if (err < 0) + if (err < 0) { + clk_disable_unprepare(sor->clk_safe); + clk_disable_unprepare(sor->clk); return err; + } return 0; } @@ -3764,17 +3768,16 @@ static int tegra_sor_probe(struct platform_device *pdev) return err; err = tegra_output_probe(&sor->output); - if (err < 0) { - dev_err(&pdev->dev, "failed to probe output: %d\n", err); - return err; - } + if (err < 0) + return dev_err_probe(&pdev->dev, err, + "failed to probe output\n"); if (sor->ops && sor->ops->probe) { err = sor->ops->probe(sor); if (err < 0) { dev_err(&pdev->dev, "failed to probe %s: %d\n", sor->ops->name, err); - goto output; + goto remove; } } @@ -3955,9 +3958,6 @@ unregister: rpm_disable: pm_runtime_disable(&pdev->dev); remove: - if (sor->ops && sor->ops->remove) - sor->ops->remove(sor); -output: tegra_output_remove(&sor->output); return err; } @@ -3976,12 +3976,6 @@ static int tegra_sor_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); - if (sor->ops && sor->ops->remove) { - err = sor->ops->remove(sor); - if (err < 0) - dev_err(&pdev->dev, "failed to remove SOR: %d\n", err); - } - tegra_output_remove(&sor->output); return 0; diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c index 561c49d8657a..a043e602199e 100644 --- a/drivers/gpu/drm/tiny/cirrus.c +++ b/drivers/gpu/drm/tiny/cirrus.c @@ -602,7 +602,6 @@ static int cirrus_pci_probe(struct pci_dev *pdev, drm_mode_config_reset(dev); - dev->pdev = pdev; pci_set_drvdata(pdev, dev); ret = drm_dev_register(dev, 0); if (ret) diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c index 03c86628e4ac..8f9fa4188897 100644 --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c @@ -32,7 +32,6 @@ #define pr_fmt(fmt) "[TTM] " fmt -#include <drm/ttm/ttm_module.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> #include <linux/agp_backend.h> diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 9a03c7834b1e..b65f4b12f986 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -31,7 +31,6 @@ #define pr_fmt(fmt) "[TTM] " fmt -#include <drm/ttm/ttm_module.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> #include <linux/jiffies.h> @@ -43,6 +42,8 @@ #include <linux/atomic.h> #include <linux/dma-resv.h> +#include "ttm_module.h" + static void ttm_bo_global_kobj_release(struct kobject *kobj); /* @@ -71,9 +72,9 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, struct ttm_resource_manager *man; int i, mem_type; - drm_printf(&p, "No space for %p (%lu pages, %luK, %luM)\n", - bo, bo->mem.num_pages, bo->mem.size >> 10, - bo->mem.size >> 20); + drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n", + bo, bo->mem.num_pages, bo->base.size >> 10, + bo->base.size >> 20); for (i = 0; i < placement->num_placement; i++) { mem_type = placement->placement[i].mem_type; drm_printf(&p, " placement[%d]=0x%08X (%d)\n", @@ -109,40 +110,14 @@ static struct kobj_type ttm_bo_glob_kobj_type = { .default_attrs = ttm_bo_global_attrs }; -static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo, - struct ttm_resource *mem) -{ - struct ttm_bo_device *bdev = bo->bdev; - struct ttm_resource_manager *man; - - if (!list_empty(&bo->lru) || bo->pin_count) - return; - - man = ttm_manager_type(bdev, mem->mem_type); - list_add_tail(&bo->lru, &man->lru[bo->priority]); - - if (man->use_tt && bo->ttm && - !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG | - TTM_PAGE_FLAG_SWAPPED))) { - list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]); - } -} - static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; - bool notify = false; - if (!list_empty(&bo->swap)) { - list_del_init(&bo->swap); - notify = true; - } - if (!list_empty(&bo->lru)) { - list_del_init(&bo->lru); - notify = true; - } + list_del_init(&bo->swap); + list_del_init(&bo->lru); - if (notify && bdev->driver->del_from_lru_notify) + if (bdev->driver->del_from_lru_notify) bdev->driver->del_from_lru_notify(bo); } @@ -155,12 +130,32 @@ static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos, } void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, + struct ttm_resource *mem, struct ttm_lru_bulk_move *bulk) { + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_resource_manager *man; + dma_resv_assert_held(bo->base.resv); - ttm_bo_del_from_lru(bo); - ttm_bo_add_mem_to_lru(bo, &bo->mem); + if (bo->pin_count) { + ttm_bo_del_from_lru(bo); + return; + } + + man = ttm_manager_type(bdev, mem->mem_type); + list_move_tail(&bo->lru, &man->lru[bo->priority]); + if (man->use_tt && bo->ttm && + !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG | + TTM_PAGE_FLAG_SWAPPED))) { + struct list_head *swap; + + swap = &ttm_bo_glob.swap_lru[bo->priority]; + list_move_tail(&bo->swap, swap); + } + + if (bdev->driver->del_from_lru_notify) + bdev->driver->del_from_lru_notify(bo); if (bulk && !bo->pin_count) { switch (bo->mem.mem_type) { @@ -267,7 +262,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, goto out_err; } - ctx->bytes_moved += bo->num_pages << PAGE_SHIFT; + ctx->bytes_moved += bo->base.size; return 0; out_err: @@ -514,10 +509,9 @@ static void ttm_bo_release(struct kref *kref) * shrinkers, now that they are queued for * destruction. */ - if (bo->pin_count) { + if (WARN_ON(bo->pin_count)) { bo->pin_count = 0; - ttm_bo_del_from_lru(bo); - ttm_bo_add_mem_to_lru(bo, &bo->mem); + ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); } kref_init(&bo->kref); @@ -859,8 +853,7 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, mem->placement = place->flags; spin_lock(&ttm_bo_glob.lru_lock); - ttm_bo_del_from_lru(bo); - ttm_bo_add_mem_to_lru(bo, mem); + ttm_bo_move_to_lru_tail(bo, mem, NULL); spin_unlock(&ttm_bo_glob.lru_lock); return 0; @@ -937,9 +930,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, } error: - if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) { + if (bo->mem.mem_type == TTM_PL_SYSTEM && !bo->pin_count) ttm_bo_move_to_lru_tail_unlocked(bo); - } return ret; } @@ -984,8 +976,7 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, memset(&hop, 0, sizeof(hop)); - mem.num_pages = bo->num_pages; - mem.size = mem.num_pages << PAGE_SHIFT; + mem.num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT; mem.page_alignment = bo->mem.page_alignment; mem.bus.offset = 0; mem.bus.addr = NULL; @@ -1101,7 +1092,7 @@ EXPORT_SYMBOL(ttm_bo_validate); int ttm_bo_init_reserved(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo, - unsigned long size, + size_t size, enum ttm_bo_type type, struct ttm_placement *placement, uint32_t page_alignment, @@ -1112,9 +1103,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, void (*destroy) (struct ttm_buffer_object *)) { struct ttm_mem_global *mem_glob = &ttm_mem_glob; - int ret = 0; - unsigned long num_pages; bool locked; + int ret = 0; ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx); if (ret) { @@ -1126,16 +1116,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, return -ENOMEM; } - num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; - if (num_pages == 0) { - pr_err("Illegal buffer object size\n"); - if (destroy) - (*destroy)(bo); - else - kfree(bo); - ttm_mem_global_free(mem_glob, acc_size); - return -EINVAL; - } bo->destroy = destroy ? destroy : ttm_bo_default_destroy; kref_init(&bo->kref); @@ -1144,10 +1124,8 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, INIT_LIST_HEAD(&bo->swap); bo->bdev = bdev; bo->type = type; - bo->num_pages = num_pages; - bo->mem.size = num_pages << PAGE_SHIFT; bo->mem.mem_type = TTM_PL_SYSTEM; - bo->mem.num_pages = bo->num_pages; + bo->mem.num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; bo->mem.mm_node = NULL; bo->mem.page_alignment = page_alignment; bo->mem.bus.offset = 0; @@ -1165,9 +1143,10 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, } if (!ttm_bo_uses_embedded_gem_object(bo)) { /* - * bo.gem is not initialized, so we have to setup the + * bo.base is not initialized, so we have to setup the * struct elements we want use regardless. */ + bo->base.size = size; dma_resv_init(&bo->base._resv); drm_vma_node_reset(&bo->base.vma_node); } @@ -1209,7 +1188,7 @@ EXPORT_SYMBOL(ttm_bo_init_reserved); int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo, - unsigned long size, + size_t size, enum ttm_bo_type type, struct ttm_placement *placement, uint32_t page_alignment, diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 7ccb2295cac1..398d5013fc39 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -310,7 +310,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, kref_init(&fbo->base.kref); fbo->base.destroy = &ttm_transfered_destroy; fbo->base.acc_size = 0; - fbo->base.pin_count = 1; + fbo->base.pin_count = 0; if (bo->type != ttm_bo_type_sg) fbo->base.base.resv = &fbo->base.base._resv; @@ -319,6 +319,8 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, ret = dma_resv_trylock(&fbo->base.base._resv); WARN_ON(!ret); + ttm_bo_move_to_lru_tail_unlocked(&fbo->base); + *new_obj = &fbo->base; return 0; } @@ -429,9 +431,9 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, map->virtual = NULL; map->bo = bo; - if (num_pages > bo->num_pages) + if (num_pages > bo->mem.num_pages) return -EINVAL; - if (start_page > bo->num_pages) + if ((start_page + num_pages) > bo->mem.num_pages) return -EINVAL; ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); @@ -483,14 +485,14 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map) if (mem->bus.is_iomem) { void __iomem *vaddr_iomem; - size_t size = bo->num_pages << PAGE_SHIFT; if (mem->bus.addr) vaddr_iomem = (void __iomem *)mem->bus.addr; else if (mem->bus.caching == ttm_write_combined) - vaddr_iomem = ioremap_wc(mem->bus.offset, size); + vaddr_iomem = ioremap_wc(mem->bus.offset, + bo->base.size); else - vaddr_iomem = ioremap(mem->bus.offset, size); + vaddr_iomem = ioremap(mem->bus.offset, bo->base.size); if (!vaddr_iomem) return -ENOMEM; @@ -515,7 +517,7 @@ int ttm_bo_vmap(struct ttm_buffer_object *bo, struct dma_buf_map *map) * or to make the buffer object look contiguous. */ prot = ttm_io_prot(bo, mem, PAGE_KERNEL); - vaddr = vmap(ttm->pages, bo->num_pages, 0, prot); + vaddr = vmap(ttm->pages, ttm->num_pages, 0, prot); if (!vaddr) return -ENOMEM; diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 2944fa0af493..6dc96cf66744 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -31,7 +31,6 @@ #define pr_fmt(fmt) "[TTM] " fmt -#include <drm/ttm/ttm_module.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> #include <drm/drm_vma_manager.h> @@ -199,7 +198,7 @@ static vm_fault_t ttm_bo_vm_insert_huge(struct vm_fault *vmf, /* Fault should not cross bo boundary. */ page_offset &= ~(fault_page_size - 1); - if (page_offset + fault_page_size > bo->num_pages) + if (page_offset + fault_page_size > bo->mem.num_pages) goto out_fallback; if (bo->mem.bus.is_iomem) @@ -307,7 +306,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, page_last = vma_pages(vma) + vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node); - if (unlikely(page_offset >= bo->num_pages)) + if (unlikely(page_offset >= bo->mem.num_pages)) return VM_FAULT_SIGBUS; prot = ttm_io_prot(bo, &bo->mem, prot); @@ -470,7 +469,7 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, << PAGE_SHIFT); int ret; - if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages) + if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->mem.num_pages) return -EIO; ret = ttm_bo_reserve(bo, true, false, NULL); diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index 8a8f1a6a83a6..9fa36ed59429 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -55,7 +55,7 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; - ttm_bo_move_to_lru_tail(bo, NULL); + ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); dma_resv_unlock(bo->base.resv); } spin_unlock(&ttm_bo_glob.lru_lock); @@ -162,7 +162,7 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, dma_resv_add_shared_fence(bo->base.resv, fence); else dma_resv_add_excl_fence(bo->base.resv, fence); - ttm_bo_move_to_lru_tail(bo, NULL); + ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL); dma_resv_unlock(bo->base.resv); } spin_unlock(&ttm_bo_glob.lru_lock); diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index 5ed1fc8f2ace..a3bfbd9cea68 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c @@ -29,7 +29,6 @@ #define pr_fmt(fmt) "[TTM] " fmt #include <drm/ttm/ttm_memory.h> -#include <drm/ttm/ttm_module.h> #include <linux/spinlock.h> #include <linux/sched.h> #include <linux/wait.h> @@ -39,6 +38,8 @@ #include <linux/swap.h> #include <drm/ttm/ttm_pool.h> +#include "ttm_module.h" + #define TTM_MEMORY_ALLOC_RETRIES 4 struct ttm_mem_global ttm_mem_glob; diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c index 6ff40c041d79..c0906437cb1c 100644 --- a/drivers/gpu/drm/ttm/ttm_module.c +++ b/drivers/gpu/drm/ttm/ttm_module.c @@ -32,9 +32,10 @@ #include <linux/module.h> #include <linux/device.h> #include <linux/sched.h> -#include <drm/ttm/ttm_module.h> #include <drm/drm_sysfs.h> +#include "ttm_module.h" + static DECLARE_WAIT_QUEUE_HEAD(exit_q); static atomic_t device_released; diff --git a/drivers/gpu/drm/ttm/ttm_module.h b/drivers/gpu/drm/ttm/ttm_module.h new file mode 100644 index 000000000000..45fa318c1585 --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_module.h @@ -0,0 +1,40 @@ +/************************************************************************** + * + * Copyright 2008-2009 VMware, Inc., Palo Alto, CA., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> + */ + +#ifndef _TTM_MODULE_H_ +#define _TTM_MODULE_H_ + +#include <linux/kernel.h> +struct kobject; + +#define TTM_PFX "[TTM] " +extern struct kobject *ttm_get_kobj(void); + +#endif /* _TTM_MODULE_H_ */ diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c index 1b96780b4989..7b2f60616750 100644 --- a/drivers/gpu/drm/ttm/ttm_pool.c +++ b/drivers/gpu/drm/ttm/ttm_pool.c @@ -63,6 +63,9 @@ static atomic_long_t allocated_pages; static struct ttm_pool_type global_write_combined[MAX_ORDER]; static struct ttm_pool_type global_uncached[MAX_ORDER]; +static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER]; +static struct ttm_pool_type global_dma32_uncached[MAX_ORDER]; + static spinlock_t shrinker_lock; static struct list_head shrinker_list; static struct shrinker mm_shrinker; @@ -236,21 +239,6 @@ static struct page *ttm_pool_type_take(struct ttm_pool_type *pt) return p; } -/* Count the number of pages available in a pool_type */ -static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt) -{ - unsigned int count = 0; - struct page *p; - - spin_lock(&pt->lock); - /* Only used for debugfs, the overhead doesn't matter */ - list_for_each_entry(p, &pt->pages, lru) - ++count; - spin_unlock(&pt->lock); - - return count; -} - /* Initialize and add a pool type to the global shrinker list */ static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool, enum ttm_caching caching, unsigned int order) @@ -290,8 +278,14 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool, #ifdef CONFIG_X86 switch (caching) { case ttm_write_combined: + if (pool->use_dma32) + return &global_dma32_write_combined[order]; + return &global_write_combined[order]; case ttm_uncached: + if (pool->use_dma32) + return &global_dma32_uncached[order]; + return &global_uncached[order]; default: break; @@ -534,6 +528,20 @@ void ttm_pool_fini(struct ttm_pool *pool) EXPORT_SYMBOL(ttm_pool_fini); #ifdef CONFIG_DEBUG_FS +/* Count the number of pages available in a pool_type */ +static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt) +{ + unsigned int count = 0; + struct page *p; + + spin_lock(&pt->lock); + /* Only used for debugfs, the overhead doesn't matter */ + list_for_each_entry(p, &pt->pages, lru) + ++count; + spin_unlock(&pt->lock); + + return count; +} /* Dump information about the different pool types */ static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt, @@ -570,6 +578,11 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m) seq_puts(m, "uc\t:"); ttm_pool_debugfs_orders(global_uncached, m); + seq_puts(m, "wc 32\t:"); + ttm_pool_debugfs_orders(global_dma32_write_combined, m); + seq_puts(m, "uc 32\t:"); + ttm_pool_debugfs_orders(global_dma32_uncached, m); + for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) { seq_puts(m, "DMA "); switch (i) { @@ -640,6 +653,11 @@ int ttm_pool_mgr_init(unsigned long num_pages) ttm_pool_type_init(&global_write_combined[i], NULL, ttm_write_combined, i); ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i); + + ttm_pool_type_init(&global_dma32_write_combined[i], NULL, + ttm_write_combined, i); + ttm_pool_type_init(&global_dma32_uncached[i], NULL, + ttm_uncached, i); } mm_shrinker.count_objects = ttm_pool_shrinker_count; @@ -660,6 +678,9 @@ void ttm_pool_mgr_fini(void) for (i = 0; i < MAX_ORDER; ++i) { ttm_pool_type_fini(&global_write_combined[i]); ttm_pool_type_fini(&global_uncached[i]); + + ttm_pool_type_fini(&global_dma32_write_combined[i]); + ttm_pool_type_fini(&global_dma32_uncached[i]); } unregister_shrinker(&mm_shrinker); diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c index e0952444cea9..a39305f742da 100644 --- a/drivers/gpu/drm/ttm/ttm_range_manager.c +++ b/drivers/gpu/drm/ttm/ttm_range_manager.c @@ -29,7 +29,6 @@ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> */ -#include <drm/ttm/ttm_module.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> #include <drm/drm_mm.h> diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index da9eeffe0c6d..7f75a13163f0 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -129,7 +129,7 @@ static void ttm_tt_init_fields(struct ttm_tt *ttm, uint32_t page_flags, enum ttm_caching caching) { - ttm->num_pages = bo->num_pages; + ttm->num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT; ttm->caching = ttm_cached; ttm->page_flags = page_flags; ttm->dma_address = NULL; @@ -162,19 +162,6 @@ void ttm_tt_fini(struct ttm_tt *ttm) } EXPORT_SYMBOL(ttm_tt_fini); -int ttm_dma_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, - uint32_t page_flags, enum ttm_caching caching) -{ - ttm_tt_init_fields(ttm, bo, page_flags, caching); - - if (ttm_dma_tt_alloc_page_directory(ttm)) { - pr_err("Failed allocating page table\n"); - return -ENOMEM; - } - return 0; -} -EXPORT_SYMBOL(ttm_dma_tt_init); - int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, uint32_t page_flags, enum ttm_caching caching) { diff --git a/drivers/gpu/drm/tve200/tve200_display.c b/drivers/gpu/drm/tve200/tve200_display.c index 17ff24d999d1..cb0e837d3dba 100644 --- a/drivers/gpu/drm/tve200/tve200_display.c +++ b/drivers/gpu/drm/tve200/tve200_display.c @@ -11,7 +11,6 @@ */ #include <linux/clk.h> -#include <linux/version.h> #include <linux/dma-buf.h> #include <linux/of_graph.h> #include <linux/delay.h> diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c index 07140e0b90a3..7fa71c8bb828 100644 --- a/drivers/gpu/drm/tve200/tve200_drv.c +++ b/drivers/gpu/drm/tve200/tve200_drv.c @@ -35,7 +35,6 @@ #include <linux/platform_device.h> #include <linux/shmem_fs.h> #include <linux/slab.h> -#include <linux/version.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index b5a8dd9fdf02..9269092697d8 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -38,8 +38,6 @@ static const struct drm_driver driver = { .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET, /* GEM hooks */ - .gem_create_object = drm_gem_shmem_create_object_cached, - .fops = &udl_driver_fops, DRM_GEM_SHMEM_DRIVER_OPS, diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c index 8b52cb25877c..6a8731ab9d7d 100644 --- a/drivers/gpu/drm/v3d/v3d_bo.c +++ b/drivers/gpu/drm/v3d/v3d_bo.c @@ -78,7 +78,7 @@ struct drm_gem_object *v3d_create_object(struct drm_device *dev, size_t size) obj = &bo->base.base; obj->funcs = &v3d_gem_funcs; - + bo->base.map_wc = true; INIT_LIST_HEAD(&bo->unref_head); return &bo->base.base; diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index 42d401fd244e..99e22beea90b 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -232,8 +232,8 @@ static int v3d_platform_drm_probe(struct platform_device *pdev) return ret; mmu_debug = V3D_READ(V3D_MMU_DEBUG_INFO); - dev->coherent_dma_mask = - DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH)); + dma_set_mask_and_coherent(dev, + DMA_BIT_MASK(30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_PA_WIDTH))); v3d->va_width = 30 + V3D_GET_FIELD(mmu_debug, V3D_MMU_VA_WIDTH); ident1 = V3D_READ(V3D_HUB_IDENT1); diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c index c88686489b88..e714d5318f30 100644 --- a/drivers/gpu/drm/v3d/v3d_irq.c +++ b/drivers/gpu/drm/v3d/v3d_irq.c @@ -178,10 +178,7 @@ v3d_hub_irq(int irq, void *arg) }; const char *client = "?"; - V3D_WRITE(V3D_MMU_CTL, - V3D_READ(V3D_MMU_CTL) & (V3D_MMU_CTL_CAP_EXCEEDED | - V3D_MMU_CTL_PT_INVALID | - V3D_MMU_CTL_WRITE_VIOLATION)); + V3D_WRITE(V3D_MMU_CTL, V3D_READ(V3D_MMU_CTL)); if (v3d->ver >= 41) { axi_id = axi_id >> 5; @@ -217,7 +214,7 @@ v3d_irq_init(struct v3d_dev *v3d) V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS); V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS); - irq1 = platform_get_irq(v3d_to_pdev(v3d), 1); + irq1 = platform_get_irq_optional(v3d_to_pdev(v3d), 1); if (irq1 == -EPROBE_DEFER) return irq1; if (irq1 > 0) { diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c index f3eac72cb46e..e534896b6cfd 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_drv.c +++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c @@ -51,7 +51,6 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (IS_ERR(vbox)) return PTR_ERR(vbox); - vbox->ddev.pdev = pdev; pci_set_drvdata(pdev, vbox); mutex_init(&vbox->hw_mutex); @@ -109,15 +108,16 @@ static void vbox_pci_remove(struct pci_dev *pdev) static int vbox_pm_suspend(struct device *dev) { struct vbox_private *vbox = dev_get_drvdata(dev); + struct pci_dev *pdev = to_pci_dev(dev); int error; error = drm_mode_config_helper_suspend(&vbox->ddev); if (error) return error; - pci_save_state(vbox->ddev.pdev); - pci_disable_device(vbox->ddev.pdev); - pci_set_power_state(vbox->ddev.pdev, PCI_D3hot); + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); return 0; } @@ -125,8 +125,9 @@ static int vbox_pm_suspend(struct device *dev) static int vbox_pm_resume(struct device *dev) { struct vbox_private *vbox = dev_get_drvdata(dev); + struct pci_dev *pdev = to_pci_dev(dev); - if (pci_enable_device(vbox->ddev.pdev)) + if (pci_enable_device(pdev)) return -EIO; return drm_mode_config_helper_resume(&vbox->ddev); diff --git a/drivers/gpu/drm/vboxvideo/vbox_irq.c b/drivers/gpu/drm/vboxvideo/vbox_irq.c index 631657fa554f..b3ded68603ba 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_irq.c +++ b/drivers/gpu/drm/vboxvideo/vbox_irq.c @@ -170,10 +170,12 @@ static void vbox_hotplug_worker(struct work_struct *work) int vbox_irq_init(struct vbox_private *vbox) { + struct pci_dev *pdev = to_pci_dev(vbox->ddev.dev); + INIT_WORK(&vbox->hotplug_work, vbox_hotplug_worker); vbox_update_mode_hints(vbox); - return drm_irq_install(&vbox->ddev, vbox->ddev.pdev->irq); + return drm_irq_install(&vbox->ddev, pdev->irq); } void vbox_irq_fini(struct vbox_private *vbox) diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c index d68d9bad7674..f28779715ccd 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_main.c +++ b/drivers/gpu/drm/vboxvideo/vbox_main.c @@ -8,7 +8,9 @@ * Hans de Goede <[email protected]> */ +#include <linux/pci.h> #include <linux/vbox_err.h> + #include <drm/drm_fb_helper.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_damage_helper.h> @@ -30,6 +32,7 @@ void vbox_report_caps(struct vbox_private *vbox) static int vbox_accel_init(struct vbox_private *vbox) { + struct pci_dev *pdev = to_pci_dev(vbox->ddev.dev); struct vbva_buffer *vbva; unsigned int i; @@ -41,7 +44,7 @@ static int vbox_accel_init(struct vbox_private *vbox) /* Take a command buffer for each screen from the end of usable VRAM. */ vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE; - vbox->vbva_buffers = pci_iomap_range(vbox->ddev.pdev, 0, + vbox->vbva_buffers = pci_iomap_range(pdev, 0, vbox->available_vram_size, vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE); @@ -106,6 +109,7 @@ bool vbox_check_supported(u16 id) int vbox_hw_init(struct vbox_private *vbox) { + struct pci_dev *pdev = to_pci_dev(vbox->ddev.dev); int ret = -ENOMEM; vbox->full_vram_size = inl(VBE_DISPI_IOPORT_DATA); @@ -115,7 +119,7 @@ int vbox_hw_init(struct vbox_private *vbox) /* Map guest-heap at end of vram */ vbox->guest_heap = - pci_iomap_range(vbox->ddev.pdev, 0, GUEST_HEAP_OFFSET(vbox), + pci_iomap_range(pdev, 0, GUEST_HEAP_OFFSET(vbox), GUEST_HEAP_SIZE); if (!vbox->guest_heap) return -ENOMEM; diff --git a/drivers/gpu/drm/vboxvideo/vbox_ttm.c b/drivers/gpu/drm/vboxvideo/vbox_ttm.c index f5a06675da43..0066a3c1dfc9 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_ttm.c +++ b/drivers/gpu/drm/vboxvideo/vbox_ttm.c @@ -15,8 +15,9 @@ int vbox_mm_init(struct vbox_private *vbox) struct drm_vram_mm *vmm; int ret; struct drm_device *dev = &vbox->ddev; + struct pci_dev *pdev = to_pci_dev(dev->dev); - vmm = drm_vram_helper_alloc_mm(dev, pci_resource_start(dev->pdev, 0), + vmm = drm_vram_helper_alloc_mm(dev, pci_resource_start(pdev, 0), vbox->available_vram_size); if (IS_ERR(vmm)) { ret = PTR_ERR(vmm); @@ -24,8 +25,8 @@ int vbox_mm_init(struct vbox_private *vbox) return ret; } - vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0), - pci_resource_len(dev->pdev, 0)); + vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); return 0; } diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index 469d1b4f2643..fddaeb0b09c1 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -21,7 +21,7 @@ #include "vc4_drv.h" #include "uapi/drm/vc4_drm.h" -static vm_fault_t vc4_fault(struct vm_fault *vmf); +static const struct drm_gem_object_funcs vc4_gem_object_funcs; static const char * const bo_type_names[] = { "kernel", @@ -376,20 +376,6 @@ out: return bo; } -static const struct vm_operations_struct vc4_vm_ops = { - .fault = vc4_fault, - .open = drm_gem_vm_open, - .close = drm_gem_vm_close, -}; - -static const struct drm_gem_object_funcs vc4_gem_object_funcs = { - .free = vc4_free_object, - .export = vc4_prime_export, - .get_sg_table = drm_gem_cma_prime_get_sg_table, - .vmap = vc4_prime_vmap, - .vm_ops = &vc4_vm_ops, -}; - /** * vc4_create_object - Implementation of driver->gem_create_object. * @dev: DRM device @@ -538,7 +524,7 @@ static void vc4_bo_cache_free_old(struct drm_device *dev) /* Called on the last userspace/kernel unreference of the BO. Returns * it to the BO cache if possible, otherwise frees it. */ -void vc4_free_object(struct drm_gem_object *gem_bo) +static void vc4_free_object(struct drm_gem_object *gem_bo) { struct drm_device *dev = gem_bo->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); @@ -673,7 +659,7 @@ static void vc4_bo_cache_time_timer(struct timer_list *t) schedule_work(&vc4->bo_cache.time_work); } -struct dma_buf * vc4_prime_export(struct drm_gem_object *obj, int flags) +static struct dma_buf *vc4_prime_export(struct drm_gem_object *obj, int flags) { struct vc4_bo *bo = to_vc4_bo(obj); struct dma_buf *dmabuf; @@ -718,19 +704,9 @@ static vm_fault_t vc4_fault(struct vm_fault *vmf) return VM_FAULT_SIGBUS; } -int vc4_mmap(struct file *filp, struct vm_area_struct *vma) +static int vc4_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) { - struct drm_gem_object *gem_obj; - unsigned long vm_pgoff; - struct vc4_bo *bo; - int ret; - - ret = drm_gem_mmap(filp, vma); - if (ret) - return ret; - - gem_obj = vma->vm_private_data; - bo = to_vc4_bo(gem_obj); + struct vc4_bo *bo = to_vc4_bo(obj); if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) { DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n"); @@ -744,72 +720,23 @@ int vc4_mmap(struct file *filp, struct vm_area_struct *vma) return -EINVAL; } - /* - * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the - * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map - * the whole buffer. - */ - vma->vm_flags &= ~VM_PFNMAP; - - /* This ->vm_pgoff dance is needed to make all parties happy: - * - dma_mmap_wc() uses ->vm_pgoff as an offset within the allocated - * mem-region, hence the need to set it to zero (the value set by - * the DRM core is a virtual offset encoding the GEM object-id) - * - the mmap() core logic needs ->vm_pgoff to be restored to its - * initial value before returning from this function because it - * encodes the offset of this GEM in the dev->anon_inode pseudo-file - * and this information will be used when we invalidate userspace - * mappings with drm_vma_node_unmap() (called from vc4_gem_purge()). - */ - vm_pgoff = vma->vm_pgoff; - vma->vm_pgoff = 0; - ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr, - bo->base.paddr, vma->vm_end - vma->vm_start); - vma->vm_pgoff = vm_pgoff; - - if (ret) - drm_gem_vm_close(vma); - - return ret; + return drm_gem_cma_mmap(obj, vma); } -int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) -{ - struct vc4_bo *bo = to_vc4_bo(obj); - - if (bo->validated_shader && (vma->vm_flags & VM_WRITE)) { - DRM_DEBUG("mmaping of shader BOs for writing not allowed.\n"); - return -EINVAL; - } - - return drm_gem_cma_prime_mmap(obj, vma); -} - -int vc4_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map) -{ - struct vc4_bo *bo = to_vc4_bo(obj); - - if (bo->validated_shader) { - DRM_DEBUG("mmaping of shader BOs not allowed.\n"); - return -EINVAL; - } - - return drm_gem_cma_prime_vmap(obj, map); -} - -struct drm_gem_object * -vc4_prime_import_sg_table(struct drm_device *dev, - struct dma_buf_attachment *attach, - struct sg_table *sgt) -{ - struct drm_gem_object *obj; - - obj = drm_gem_cma_prime_import_sg_table(dev, attach, sgt); - if (IS_ERR(obj)) - return obj; +static const struct vm_operations_struct vc4_vm_ops = { + .fault = vc4_fault, + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; - return obj; -} +static const struct drm_gem_object_funcs vc4_gem_object_funcs = { + .free = vc4_free_object, + .export = vc4_prime_export, + .get_sg_table = drm_gem_cma_get_sg_table, + .vmap = drm_gem_cma_vmap, + .mmap = vc4_gem_object_mmap, + .vm_ops = &vc4_vm_ops, +}; static int vc4_grab_bin_bo(struct vc4_dev *vc4, struct vc4_file *vc4file) { diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index ea710beb8e00..269390bc586e 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -403,7 +403,9 @@ static void require_hvs_enabled(struct drm_device *dev) SCALER_DISPCTRL_ENABLE); } -static int vc4_crtc_disable(struct drm_crtc *crtc, unsigned int channel) +static int vc4_crtc_disable(struct drm_crtc *crtc, + struct drm_atomic_state *state, + unsigned int channel) { struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc); struct vc4_encoder *vc4_encoder = to_vc4_encoder(encoder); @@ -435,13 +437,13 @@ static int vc4_crtc_disable(struct drm_crtc *crtc, unsigned int channel) mdelay(20); if (vc4_encoder && vc4_encoder->post_crtc_disable) - vc4_encoder->post_crtc_disable(encoder); + vc4_encoder->post_crtc_disable(encoder, state); vc4_crtc_pixelvalve_reset(crtc); vc4_hvs_stop_channel(dev, channel); if (vc4_encoder && vc4_encoder->post_crtc_powerdown) - vc4_encoder->post_crtc_powerdown(encoder); + vc4_encoder->post_crtc_powerdown(encoder, state); return 0; } @@ -468,7 +470,7 @@ int vc4_crtc_disable_at_boot(struct drm_crtc *crtc) if (channel < 0) return 0; - return vc4_crtc_disable(crtc, channel); + return vc4_crtc_disable(crtc, NULL, channel); } static void vc4_crtc_atomic_disable(struct drm_crtc *crtc, @@ -484,7 +486,7 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc, /* Disable vblank irq handling before crtc is disabled. */ drm_crtc_vblank_off(crtc); - vc4_crtc_disable(crtc, old_vc4_state->assigned_channel); + vc4_crtc_disable(crtc, state, old_vc4_state->assigned_channel); /* * Make sure we issue a vblank event after disabling the CRTC if @@ -503,8 +505,6 @@ static void vc4_crtc_atomic_disable(struct drm_crtc *crtc, static void vc4_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state) { - struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, - crtc); struct drm_device *dev = crtc->dev; struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); struct drm_encoder *encoder = vc4_get_crtc_encoder(crtc); @@ -517,17 +517,17 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc, */ drm_crtc_vblank_on(crtc); - vc4_hvs_atomic_enable(crtc, old_state); + vc4_hvs_atomic_enable(crtc, state); if (vc4_encoder->pre_crtc_configure) - vc4_encoder->pre_crtc_configure(encoder); + vc4_encoder->pre_crtc_configure(encoder, state); vc4_crtc_config_pv(crtc); CRTC_WRITE(PV_CONTROL, CRTC_READ(PV_CONTROL) | PV_CONTROL_EN); if (vc4_encoder->pre_crtc_enable) - vc4_encoder->pre_crtc_enable(encoder); + vc4_encoder->pre_crtc_enable(encoder, state); /* When feeding the transposer block the pixelvalve is unneeded and * should not be enabled. @@ -536,7 +536,7 @@ static void vc4_crtc_atomic_enable(struct drm_crtc *crtc, CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN); if (vc4_encoder->post_crtc_enable) - vc4_encoder->post_crtc_enable(encoder); + vc4_encoder->post_crtc_enable(encoder, state); } static enum drm_mode_status vc4_crtc_mode_valid(struct drm_crtc *crtc, @@ -593,7 +593,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc, struct drm_connector_state *conn_state; int ret, i; - ret = vc4_hvs_atomic_check(crtc, crtc_state); + ret = vc4_hvs_atomic_check(crtc, state); if (ret) return ret; @@ -697,7 +697,6 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb) container_of(cb, struct vc4_async_flip_state, cb); struct drm_crtc *crtc = flip_state->crtc; struct drm_device *dev = crtc->dev; - struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_plane *plane = crtc->primary; vc4_plane_async_set_fb(plane, flip_state->fb); @@ -729,8 +728,6 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb) } kfree(flip_state); - - up(&vc4->async_modeset); } /* Implements async (non-vblank-synced) page flips. @@ -745,7 +742,6 @@ static int vc4_async_page_flip(struct drm_crtc *crtc, uint32_t flags) { struct drm_device *dev = crtc->dev; - struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_plane *plane = crtc->primary; int ret = 0; struct vc4_async_flip_state *flip_state; @@ -774,15 +770,6 @@ static int vc4_async_page_flip(struct drm_crtc *crtc, flip_state->crtc = crtc; flip_state->event = event; - /* Make sure all other async modesetes have landed. */ - ret = down_interruptible(&vc4->async_modeset); - if (ret) { - drm_framebuffer_put(fb); - vc4_bo_dec_usecnt(bo); - kfree(flip_state); - return ret; - } - /* Save the current FB before it's replaced by the new one in * drm_atomic_set_fb_for_plane(). We'll need the old FB in * vc4_async_page_flip_complete() to decrement the BO usecnt and keep @@ -884,7 +871,6 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = { .reset = vc4_crtc_reset, .atomic_duplicate_state = vc4_crtc_duplicate_state, .atomic_destroy_state = vc4_crtc_destroy_state, - .gamma_set = drm_atomic_helper_legacy_gamma_set, .enable_vblank = vc4_enable_vblank, .disable_vblank = vc4_disable_vblank, .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 2cd97a39c286..556ad0f02a0d 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -140,17 +140,7 @@ static void vc4_close(struct drm_device *dev, struct drm_file *file) kfree(vc4file); } -static const struct file_operations vc4_drm_fops = { - .owner = THIS_MODULE, - .open = drm_open, - .release = drm_release, - .unlocked_ioctl = drm_ioctl, - .mmap = vc4_mmap, - .poll = drm_poll, - .read = drm_read, - .compat_ioctl = drm_compat_ioctl, - .llseek = noop_llseek, -}; +DEFINE_DRM_GEM_FOPS(vc4_drm_fops); static const struct drm_ioctl_desc vc4_drm_ioctls[] = { DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, DRM_RENDER_ALLOW), @@ -190,12 +180,7 @@ static struct drm_driver vc4_drm_driver = { .gem_create_object = vc4_create_object, - .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_import_sg_table = vc4_prime_import_sg_table, - .gem_prime_mmap = vc4_prime_mmap, - - .dumb_create = vc4_dumb_create, + DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc4_dumb_create), .ioctls = vc4_drm_ioctls, .num_ioctls = ARRAY_SIZE(vc4_drm_ioctls), diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 137c382256d5..a7500716cf3f 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -77,7 +77,6 @@ struct vc4_dev { struct vc4_hvs *hvs; struct vc4_v3d *v3d; struct vc4_dpi *dpi; - struct vc4_dsi *dsi1; struct vc4_vec *vec; struct vc4_txp *txp; @@ -215,10 +214,9 @@ struct vc4_dev { struct work_struct reset_work; } hangcheck; - struct semaphore async_modeset; - struct drm_modeset_lock ctm_state_lock; struct drm_private_obj ctm_manager; + struct drm_private_obj hvs_channels; struct drm_private_obj load_tracker; /* List of vc4_debugfs_info_entry for adding to debugfs once @@ -443,12 +441,12 @@ struct vc4_encoder { enum vc4_encoder_type type; u32 clock_select; - void (*pre_crtc_configure)(struct drm_encoder *encoder); - void (*pre_crtc_enable)(struct drm_encoder *encoder); - void (*post_crtc_enable)(struct drm_encoder *encoder); + void (*pre_crtc_configure)(struct drm_encoder *encoder, struct drm_atomic_state *state); + void (*pre_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state); + void (*post_crtc_enable)(struct drm_encoder *encoder, struct drm_atomic_state *state); - void (*post_crtc_disable)(struct drm_encoder *encoder); - void (*post_crtc_powerdown)(struct drm_encoder *encoder); + void (*post_crtc_disable)(struct drm_encoder *encoder, struct drm_atomic_state *state); + void (*post_crtc_powerdown)(struct drm_encoder *encoder, struct drm_atomic_state *state); }; static inline struct vc4_encoder * @@ -531,6 +529,9 @@ struct vc4_crtc_state { unsigned int top; unsigned int bottom; } margins; + + /* Transitional state below, only valid during atomic commits */ + bool update_muxing; }; #define VC4_HVS_CHANNEL_DISABLED ((unsigned int)-1) @@ -781,13 +782,11 @@ struct vc4_validated_shader_info { /* vc4_bo.c */ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size); -void vc4_free_object(struct drm_gem_object *gem_obj); struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size, bool from_cache, enum vc4_kernel_bo_type type); int vc4_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); -struct dma_buf *vc4_prime_export(struct drm_gem_object *obj, int flags); int vc4_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, @@ -802,12 +801,6 @@ int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int vc4_label_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); -int vc4_mmap(struct file *filp, struct vm_area_struct *vma); -int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); -struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev, - struct dma_buf_attachment *attach, - struct sg_table *sgt); -int vc4_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map); int vc4_bo_cache_init(struct drm_device *dev); int vc4_bo_inc_usecnt(struct vc4_bo *bo); void vc4_bo_dec_usecnt(struct vc4_bo *bo); @@ -912,11 +905,10 @@ void vc4_irq_reset(struct drm_device *dev); extern struct platform_driver vc4_hvs_driver; void vc4_hvs_stop_channel(struct drm_device *dev, unsigned int output); int vc4_hvs_get_fifo_from_output(struct drm_device *dev, unsigned int output); -int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state); -void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state); -void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_crtc_state *old_state); -void vc4_hvs_atomic_flush(struct drm_crtc *crtc, - struct drm_atomic_state *state); +int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state); +void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state); +void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state); +void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state); void vc4_hvs_dump_state(struct drm_device *dev); void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel); void vc4_hvs_mask_underrun(struct drm_device *dev, int channel); diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c index 19aab4e7e209..a55256ed0955 100644 --- a/drivers/gpu/drm/vc4/vc4_dsi.c +++ b/drivers/gpu/drm/vc4/vc4_dsi.c @@ -306,11 +306,11 @@ # define DSI0_PHY_AFEC0_RESET BIT(11) # define DSI1_PHY_AFEC0_PD_BG BIT(11) # define DSI0_PHY_AFEC0_PD BIT(10) -# define DSI1_PHY_AFEC0_PD_DLANE3 BIT(10) +# define DSI1_PHY_AFEC0_PD_DLANE1 BIT(10) # define DSI0_PHY_AFEC0_PD_BG BIT(9) # define DSI1_PHY_AFEC0_PD_DLANE2 BIT(9) # define DSI0_PHY_AFEC0_PD_DLANE1 BIT(8) -# define DSI1_PHY_AFEC0_PD_DLANE1 BIT(8) +# define DSI1_PHY_AFEC0_PD_DLANE3 BIT(8) # define DSI_PHY_AFEC0_PTATADJ_MASK VC4_MASK(7, 4) # define DSI_PHY_AFEC0_PTATADJ_SHIFT 4 # define DSI_PHY_AFEC0_CTATADJ_MASK VC4_MASK(3, 0) @@ -493,6 +493,18 @@ */ #define DSI1_ID 0x8c +struct vc4_dsi_variant { + /* Whether we're on bcm2835's DSI0 or DSI1. */ + unsigned int port; + + bool broken_axi_workaround; + + const char *debugfs_name; + const struct debugfs_reg32 *regs; + size_t nregs; + +}; + /* General DSI hardware state. */ struct vc4_dsi { struct platform_device *pdev; @@ -509,8 +521,7 @@ struct vc4_dsi { u32 *reg_dma_mem; dma_addr_t reg_paddr; - /* Whether we're on bcm2835's DSI0 or DSI1. */ - int port; + const struct vc4_dsi_variant *variant; /* DSI channel for the panel we're connected to. */ u32 channel; @@ -586,10 +597,10 @@ dsi_dma_workaround_write(struct vc4_dsi *dsi, u32 offset, u32 val) #define DSI_READ(offset) readl(dsi->regs + (offset)) #define DSI_WRITE(offset, val) dsi_dma_workaround_write(dsi, offset, val) #define DSI_PORT_READ(offset) \ - DSI_READ(dsi->port ? DSI1_##offset : DSI0_##offset) + DSI_READ(dsi->variant->port ? DSI1_##offset : DSI0_##offset) #define DSI_PORT_WRITE(offset, val) \ - DSI_WRITE(dsi->port ? DSI1_##offset : DSI0_##offset, val) -#define DSI_PORT_BIT(bit) (dsi->port ? DSI1_##bit : DSI0_##bit) + DSI_WRITE(dsi->variant->port ? DSI1_##offset : DSI0_##offset, val) +#define DSI_PORT_BIT(bit) (dsi->variant->port ? DSI1_##bit : DSI0_##bit) /* VC4 DSI encoder KMS struct */ struct vc4_dsi_encoder { @@ -837,7 +848,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder) ret = pm_runtime_get_sync(dev); if (ret) { - DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->port); + DRM_ERROR("Failed to runtime PM enable on DSI%d\n", dsi->variant->port); return; } @@ -871,7 +882,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder) DSI_PORT_WRITE(STAT, DSI_PORT_READ(STAT)); /* Set AFE CTR00/CTR1 to release powerdown of analog. */ - if (dsi->port == 0) { + if (dsi->variant->port == 0) { u32 afec0 = (VC4_SET_FIELD(7, DSI_PHY_AFEC0_PTATADJ) | VC4_SET_FIELD(7, DSI_PHY_AFEC0_CTATADJ)); @@ -1017,7 +1028,7 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder) DSI_PORT_BIT(PHYC_CLANE_ENABLE) | ((dsi->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) ? 0 : DSI_PORT_BIT(PHYC_HS_CLK_CONTINUOUS)) | - (dsi->port == 0 ? + (dsi->variant->port == 0 ? VC4_SET_FIELD(lpx - 1, DSI0_PHYC_ESC_CLK_LPDT) : VC4_SET_FIELD(lpx - 1, DSI1_PHYC_ESC_CLK_LPDT))); @@ -1043,13 +1054,13 @@ static void vc4_dsi_encoder_enable(struct drm_encoder *encoder) DSI_DISP1_ENABLE); /* Ungate the block. */ - if (dsi->port == 0) + if (dsi->variant->port == 0) DSI_PORT_WRITE(CTRL, DSI_PORT_READ(CTRL) | DSI0_CTRL_CTRL0); else DSI_PORT_WRITE(CTRL, DSI_PORT_READ(CTRL) | DSI1_CTRL_EN); /* Bring AFE out of reset. */ - if (dsi->port == 0) { + if (dsi->variant->port == 0) { } else { DSI_PORT_WRITE(PHY_AFEC0, DSI_PORT_READ(PHY_AFEC0) & @@ -1313,8 +1324,32 @@ static const struct drm_encoder_helper_funcs vc4_dsi_encoder_helper_funcs = { .mode_fixup = vc4_dsi_encoder_mode_fixup, }; +static const struct vc4_dsi_variant bcm2711_dsi1_variant = { + .port = 1, + .debugfs_name = "dsi1_regs", + .regs = dsi1_regs, + .nregs = ARRAY_SIZE(dsi1_regs), +}; + +static const struct vc4_dsi_variant bcm2835_dsi0_variant = { + .port = 0, + .debugfs_name = "dsi0_regs", + .regs = dsi0_regs, + .nregs = ARRAY_SIZE(dsi0_regs), +}; + +static const struct vc4_dsi_variant bcm2835_dsi1_variant = { + .port = 1, + .broken_axi_workaround = true, + .debugfs_name = "dsi1_regs", + .regs = dsi1_regs, + .nregs = ARRAY_SIZE(dsi1_regs), +}; + static const struct of_device_id vc4_dsi_dt_match[] = { - { .compatible = "brcm,bcm2835-dsi1", (void *)(uintptr_t)1 }, + { .compatible = "brcm,bcm2711-dsi1", &bcm2711_dsi1_variant }, + { .compatible = "brcm,bcm2835-dsi0", &bcm2835_dsi0_variant }, + { .compatible = "brcm,bcm2835-dsi1", &bcm2835_dsi1_variant }, {} }; @@ -1325,7 +1360,7 @@ static void dsi_handle_error(struct vc4_dsi *dsi, if (!(stat & bit)) return; - DRM_ERROR("DSI%d: %s error\n", dsi->port, type); + DRM_ERROR("DSI%d: %s error\n", dsi->variant->port, type); *ret = IRQ_HANDLED; } @@ -1398,12 +1433,12 @@ vc4_dsi_init_phy_clocks(struct vc4_dsi *dsi) struct device *dev = &dsi->pdev->dev; const char *parent_name = __clk_get_name(dsi->pll_phy_clock); static const struct { - const char *dsi0_name, *dsi1_name; + const char *name; int div; } phy_clocks[] = { - { "dsi0_byte", "dsi1_byte", 8 }, - { "dsi0_ddr2", "dsi1_ddr2", 4 }, - { "dsi0_ddr", "dsi1_ddr", 2 }, + { "byte", 8 }, + { "ddr2", 4 }, + { "ddr", 2 }, }; int i; @@ -1419,8 +1454,12 @@ vc4_dsi_init_phy_clocks(struct vc4_dsi *dsi) for (i = 0; i < ARRAY_SIZE(phy_clocks); i++) { struct clk_fixed_factor *fix = &dsi->phy_clocks[i]; struct clk_init_data init; + char clk_name[16]; int ret; + snprintf(clk_name, sizeof(clk_name), + "dsi%u_%s", dsi->variant->port, phy_clocks[i].name); + /* We just use core fixed factor clock ops for the PHY * clocks. The clocks are actually gated by the * PHY_AFEC0_DDRCLK_EN bits, which we should be @@ -1437,10 +1476,7 @@ vc4_dsi_init_phy_clocks(struct vc4_dsi *dsi) memset(&init, 0, sizeof(init)); init.parent_names = &parent_name; init.num_parents = 1; - if (dsi->port == 1) - init.name = phy_clocks[i].dsi1_name; - else - init.name = phy_clocks[i].dsi0_name; + init.name = clk_name; init.ops = &clk_fixed_factor_ops; ret = devm_clk_hw_register(dev, &fix->hw); @@ -1459,7 +1495,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data) { struct platform_device *pdev = to_platform_device(dev); struct drm_device *drm = dev_get_drvdata(master); - struct vc4_dev *vc4 = to_vc4_dev(drm); struct vc4_dsi *dsi = dev_get_drvdata(dev); struct vc4_dsi_encoder *vc4_dsi_encoder; struct drm_panel *panel; @@ -1471,7 +1506,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data) if (!match) return -ENODEV; - dsi->port = (uintptr_t)match->data; + dsi->variant = match->data; vc4_dsi_encoder = devm_kzalloc(dev, sizeof(*vc4_dsi_encoder), GFP_KERNEL); @@ -1488,13 +1523,8 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data) return PTR_ERR(dsi->regs); dsi->regset.base = dsi->regs; - if (dsi->port == 0) { - dsi->regset.regs = dsi0_regs; - dsi->regset.nregs = ARRAY_SIZE(dsi0_regs); - } else { - dsi->regset.regs = dsi1_regs; - dsi->regset.nregs = ARRAY_SIZE(dsi1_regs); - } + dsi->regset.regs = dsi->variant->regs; + dsi->regset.nregs = dsi->variant->nregs; if (DSI_PORT_READ(ID) != DSI_ID_VALUE) { dev_err(dev, "Port returned 0x%08x for ID instead of 0x%08x\n", @@ -1502,11 +1532,11 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data) return -ENODEV; } - /* DSI1 has a broken AXI slave that doesn't respond to writes - * from the ARM. It does handle writes from the DMA engine, + /* DSI1 on BCM2835/6/7 has a broken AXI slave that doesn't respond to + * writes from the ARM. It does handle writes from the DMA engine, * so set up a channel for talking to it. */ - if (dsi->port == 1) { + if (dsi->variant->broken_axi_workaround) { dsi->reg_dma_mem = dma_alloc_coherent(dev, 4, &dsi->reg_dma_paddr, GFP_KERNEL); @@ -1612,9 +1642,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data) if (ret) return ret; - if (dsi->port == 1) - vc4->dsi1 = dsi; - drm_simple_encoder_init(drm, dsi->encoder, DRM_MODE_ENCODER_DSI); drm_encoder_helper_add(dsi->encoder, &vc4_dsi_encoder_helper_funcs); @@ -1630,10 +1657,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data) */ list_splice_init(&dsi->encoder->bridge_chain, &dsi->bridge_chain); - if (dsi->port == 0) - vc4_debugfs_add_regset32(drm, "dsi0_regs", &dsi->regset); - else - vc4_debugfs_add_regset32(drm, "dsi1_regs", &dsi->regset); + vc4_debugfs_add_regset32(drm, dsi->variant->debugfs_name, &dsi->regset); pm_runtime_enable(dev); @@ -1643,8 +1667,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data) static void vc4_dsi_unbind(struct device *dev, struct device *master, void *data) { - struct drm_device *drm = dev_get_drvdata(master); - struct vc4_dev *vc4 = to_vc4_dev(drm); struct vc4_dsi *dsi = dev_get_drvdata(dev); if (dsi->bridge) @@ -1656,9 +1678,6 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master, */ list_splice_init(&dsi->bridge_chain, &dsi->encoder->bridge_chain); drm_encoder_cleanup(dsi->encoder); - - if (dsi->port == 1) - vc4->dsi1 = NULL; } static const struct component_ops vc4_dsi_ops = { diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index b80eb9d3d9d5..2e5449b25ce4 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -76,12 +76,25 @@ #define VC5_HDMI_VERTB_VSPO_SHIFT 16 #define VC5_HDMI_VERTB_VSPO_MASK VC4_MASK(29, 16) +#define VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE_SHIFT 8 +#define VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE_MASK VC4_MASK(10, 8) + +#define VC5_HDMI_DEEP_COLOR_CONFIG_1_COLOR_DEPTH_SHIFT 0 +#define VC5_HDMI_DEEP_COLOR_CONFIG_1_COLOR_DEPTH_MASK VC4_MASK(3, 0) + +#define VC5_HDMI_GCP_CONFIG_GCP_ENABLE BIT(31) + +#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_SHIFT 8 +#define VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_MASK VC4_MASK(15, 8) + # define VC4_HD_M_SW_RST BIT(2) # define VC4_HD_M_ENABLE BIT(0) #define CEC_CLOCK_FREQ 40000 #define VC4_HSM_MID_CLOCK 149985000 +#define HDMI_14_MAX_TMDS_CLK (340 * 1000 * 1000) + static int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused) { struct drm_info_node *node = (struct drm_info_node *)m->private; @@ -170,16 +183,48 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector) static void vc4_hdmi_connector_reset(struct drm_connector *connector) { - drm_atomic_helper_connector_reset(connector); + struct vc4_hdmi_connector_state *old_state = + conn_state_to_vc4_hdmi_conn_state(connector->state); + struct vc4_hdmi_connector_state *new_state = + kzalloc(sizeof(*new_state), GFP_KERNEL); + + if (connector->state) + __drm_atomic_helper_connector_destroy_state(connector->state); + + kfree(old_state); + __drm_atomic_helper_connector_reset(connector, &new_state->base); + + if (!new_state) + return; + + new_state->base.max_bpc = 8; + new_state->base.max_requested_bpc = 8; drm_atomic_helper_connector_tv_reset(connector); } +static struct drm_connector_state * +vc4_hdmi_connector_duplicate_state(struct drm_connector *connector) +{ + struct drm_connector_state *conn_state = connector->state; + struct vc4_hdmi_connector_state *vc4_state = conn_state_to_vc4_hdmi_conn_state(conn_state); + struct vc4_hdmi_connector_state *new_state; + + new_state = kzalloc(sizeof(*new_state), GFP_KERNEL); + if (!new_state) + return NULL; + + new_state->pixel_rate = vc4_state->pixel_rate; + __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base); + + return &new_state->base; +} + static const struct drm_connector_funcs vc4_hdmi_connector_funcs = { .detect = vc4_hdmi_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = vc4_hdmi_connector_destroy, .reset = vc4_hdmi_connector_reset, - .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_duplicate_state = vc4_hdmi_connector_duplicate_state, .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, }; @@ -200,12 +245,20 @@ static int vc4_hdmi_connector_init(struct drm_device *dev, vc4_hdmi->ddc); drm_connector_helper_add(connector, &vc4_hdmi_connector_helper_funcs); + /* + * Some of the properties below require access to state, like bpc. + * Allocate some default initial connector state with our reset helper. + */ + if (connector->funcs->reset) + connector->funcs->reset(connector); + /* Create and attach TV margin props to this connector. */ ret = drm_mode_create_tv_margin_properties(dev); if (ret) return ret; drm_connector_attach_tv_margin_properties(connector); + drm_connector_attach_max_bpc_property(connector, 8, 12); connector->polled = (DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT); @@ -219,7 +272,8 @@ static int vc4_hdmi_connector_init(struct drm_device *dev, } static int vc4_hdmi_stop_packet(struct drm_encoder *encoder, - enum hdmi_infoframe_type type) + enum hdmi_infoframe_type type, + bool poll) { struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); u32 packet_id = type - 0x80; @@ -227,6 +281,9 @@ static int vc4_hdmi_stop_packet(struct drm_encoder *encoder, HDMI_WRITE(HDMI_RAM_PACKET_CONFIG, HDMI_READ(HDMI_RAM_PACKET_CONFIG) & ~BIT(packet_id)); + if (!poll) + return 0; + return wait_for(!(HDMI_READ(HDMI_RAM_PACKET_STATUS) & BIT(packet_id)), 100); } @@ -253,7 +310,7 @@ static void vc4_hdmi_write_infoframe(struct drm_encoder *encoder, if (len < 0) return; - ret = vc4_hdmi_stop_packet(encoder, frame->any.type); + ret = vc4_hdmi_stop_packet(encoder, frame->any.type, true); if (ret) { DRM_ERROR("Failed to wait for infoframe to go idle: %d\n", ret); return; @@ -356,7 +413,8 @@ static void vc4_hdmi_set_infoframes(struct drm_encoder *encoder) vc4_hdmi_set_audio_infoframe(encoder); } -static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder) +static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder, + struct drm_atomic_state *state) { struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); @@ -369,7 +427,8 @@ static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder) HDMI_READ(HDMI_VID_CTL) | VC4_HD_VID_CTL_BLANKPIX); } -static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder) +static void vc4_hdmi_encoder_post_crtc_powerdown(struct drm_encoder *encoder, + struct drm_atomic_state *state) { struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); int ret; @@ -468,6 +527,7 @@ static void vc5_hdmi_csc_setup(struct vc4_hdmi *vc4_hdmi, bool enable) } static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi, + struct drm_connector_state *state, struct drm_display_mode *mode) { bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC; @@ -511,7 +571,9 @@ static void vc4_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi, HDMI_WRITE(HDMI_VERTB0, vertb_even); HDMI_WRITE(HDMI_VERTB1, vertb); } + static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi, + struct drm_connector_state *state, struct drm_display_mode *mode) { bool hsync_pos = mode->flags & DRM_MODE_FLAG_PHSYNC; @@ -531,6 +593,9 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi, mode->crtc_vsync_end - interlaced, VC4_HDMI_VERTB_VBP)); + unsigned char gcp; + bool gcp_en; + u32 reg; HDMI_WRITE(HDMI_VEC_INTERFACE_XBAR, 0x354021); HDMI_WRITE(HDMI_HORZA, @@ -556,6 +621,39 @@ static void vc5_hdmi_set_timings(struct vc4_hdmi *vc4_hdmi, HDMI_WRITE(HDMI_VERTB0, vertb_even); HDMI_WRITE(HDMI_VERTB1, vertb); + switch (state->max_bpc) { + case 12: + gcp = 6; + gcp_en = true; + break; + case 10: + gcp = 5; + gcp_en = true; + break; + case 8: + default: + gcp = 4; + gcp_en = false; + break; + } + + reg = HDMI_READ(HDMI_DEEP_COLOR_CONFIG_1); + reg &= ~(VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE_MASK | + VC5_HDMI_DEEP_COLOR_CONFIG_1_COLOR_DEPTH_MASK); + reg |= VC4_SET_FIELD(2, VC5_HDMI_DEEP_COLOR_CONFIG_1_INIT_PACK_PHASE) | + VC4_SET_FIELD(gcp, VC5_HDMI_DEEP_COLOR_CONFIG_1_COLOR_DEPTH); + HDMI_WRITE(HDMI_DEEP_COLOR_CONFIG_1, reg); + + reg = HDMI_READ(HDMI_GCP_WORD_1); + reg &= ~VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1_MASK; + reg |= VC4_SET_FIELD(gcp, VC5_HDMI_GCP_WORD_1_GCP_SUBPACKET_BYTE_1); + HDMI_WRITE(HDMI_GCP_WORD_1, reg); + + reg = HDMI_READ(HDMI_GCP_CONFIG); + reg &= ~VC5_HDMI_GCP_CONFIG_GCP_ENABLE; + reg |= gcp_en ? VC5_HDMI_GCP_CONFIG_GCP_ENABLE : 0; + HDMI_WRITE(HDMI_GCP_CONFIG, reg); + HDMI_WRITE(HDMI_CLOCK_STOP, 0); } @@ -583,8 +681,29 @@ static void vc4_hdmi_recenter_fifo(struct vc4_hdmi *vc4_hdmi) "VC4_HDMI_FIFO_CTL_RECENTER_DONE"); } -static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder) +static struct drm_connector_state * +vc4_hdmi_encoder_get_connector_state(struct drm_encoder *encoder, + struct drm_atomic_state *state) +{ + struct drm_connector_state *conn_state; + struct drm_connector *connector; + unsigned int i; + + for_each_new_connector_in_state(state, connector, conn_state, i) { + if (conn_state->best_encoder == encoder) + return conn_state; + } + + return NULL; +} + +static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder, + struct drm_atomic_state *state) { + struct drm_connector_state *conn_state = + vc4_hdmi_encoder_get_connector_state(encoder, state); + struct vc4_hdmi_connector_state *vc4_conn_state = + conn_state_to_vc4_hdmi_conn_state(conn_state); struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); unsigned long pixel_rate, hsm_rate; @@ -596,7 +715,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder) return; } - pixel_rate = mode->clock * 1000 * ((mode->flags & DRM_MODE_FLAG_DBLCLK) ? 2 : 1); + pixel_rate = vc4_conn_state->pixel_rate; ret = clk_set_rate(vc4_hdmi->pixel_clock, pixel_rate); if (ret) { DRM_ERROR("Failed to set pixel clock rate: %d\n", ret); @@ -664,7 +783,7 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder) vc4_hdmi->variant->reset(vc4_hdmi); if (vc4_hdmi->variant->phy_init) - vc4_hdmi->variant->phy_init(vc4_hdmi, mode); + vc4_hdmi->variant->phy_init(vc4_hdmi, vc4_conn_state); HDMI_WRITE(HDMI_SCHEDULER_CONTROL, HDMI_READ(HDMI_SCHEDULER_CONTROL) | @@ -672,10 +791,11 @@ static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder) VC4_HDMI_SCHEDULER_CONTROL_IGNORE_VSYNC_PREDICTS); if (vc4_hdmi->variant->set_timings) - vc4_hdmi->variant->set_timings(vc4_hdmi, mode); + vc4_hdmi->variant->set_timings(vc4_hdmi, conn_state, mode); } -static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder) +static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder, + struct drm_atomic_state *state) { struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; struct vc4_hdmi_encoder *vc4_encoder = to_vc4_hdmi_encoder(encoder); @@ -697,7 +817,8 @@ static void vc4_hdmi_encoder_pre_crtc_enable(struct drm_encoder *encoder) HDMI_WRITE(HDMI_FIFO_CTL, VC4_HDMI_FIFO_CTL_MASTER_SLAVE_N); } -static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder) +static void vc4_hdmi_encoder_post_crtc_enable(struct drm_encoder *encoder, + struct drm_atomic_state *state) { struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); @@ -759,12 +880,68 @@ static void vc4_hdmi_encoder_enable(struct drm_encoder *encoder) { } +#define WIFI_2_4GHz_CH1_MIN_FREQ 2400000000ULL +#define WIFI_2_4GHz_CH1_MAX_FREQ 2422000000ULL + +static int vc4_hdmi_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct vc4_hdmi_connector_state *vc4_state = conn_state_to_vc4_hdmi_conn_state(conn_state); + struct drm_display_mode *mode = &crtc_state->adjusted_mode; + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + unsigned long long pixel_rate = mode->clock * 1000; + unsigned long long tmds_rate; + + if (vc4_hdmi->variant->unsupported_odd_h_timings && + ((mode->hdisplay % 2) || (mode->hsync_start % 2) || + (mode->hsync_end % 2) || (mode->htotal % 2))) + return -EINVAL; + + /* + * The 1440p@60 pixel rate is in the same range than the first + * WiFi channel (between 2.4GHz and 2.422GHz with 22MHz + * bandwidth). Slightly lower the frequency to bring it out of + * the WiFi range. + */ + tmds_rate = pixel_rate * 10; + if (vc4_hdmi->disable_wifi_frequencies && + (tmds_rate >= WIFI_2_4GHz_CH1_MIN_FREQ && + tmds_rate <= WIFI_2_4GHz_CH1_MAX_FREQ)) { + mode->clock = 238560; + pixel_rate = mode->clock * 1000; + } + + if (conn_state->max_bpc == 12) { + pixel_rate = pixel_rate * 150; + do_div(pixel_rate, 100); + } else if (conn_state->max_bpc == 10) { + pixel_rate = pixel_rate * 125; + do_div(pixel_rate, 100); + } + + if (mode->flags & DRM_MODE_FLAG_DBLCLK) + pixel_rate = pixel_rate * 2; + + if (pixel_rate > vc4_hdmi->variant->max_pixel_clock) + return -EINVAL; + + vc4_state->pixel_rate = pixel_rate; + + return 0; +} + static enum drm_mode_status vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder, const struct drm_display_mode *mode) { struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + if (vc4_hdmi->variant->unsupported_odd_h_timings && + ((mode->hdisplay % 2) || (mode->hsync_start % 2) || + (mode->hsync_end % 2) || (mode->htotal % 2))) + return MODE_H_ILLEGAL; + if ((mode->clock * 1000) > vc4_hdmi->variant->max_pixel_clock) return MODE_CLOCK_HIGH; @@ -772,6 +949,7 @@ vc4_hdmi_encoder_mode_valid(struct drm_encoder *encoder, } static const struct drm_encoder_helper_funcs vc4_hdmi_encoder_helper_funcs = { + .atomic_check = vc4_hdmi_encoder_atomic_check, .mode_valid = vc4_hdmi_encoder_mode_valid, .disable = vc4_hdmi_encoder_disable, .enable = vc4_hdmi_encoder_enable, @@ -893,7 +1071,7 @@ static void vc4_hdmi_audio_reset(struct vc4_hdmi *vc4_hdmi) int ret; vc4_hdmi->audio.streaming = false; - ret = vc4_hdmi_stop_packet(encoder, HDMI_INFOFRAME_TYPE_AUDIO); + ret = vc4_hdmi_stop_packet(encoder, HDMI_INFOFRAME_TYPE_AUDIO, false); if (ret) dev_err(dev, "Failed to stop audio infoframe: %d\n", ret); @@ -1693,6 +1871,9 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) vc4_hdmi->hpd_active_low = hpd_gpio_flags & OF_GPIO_ACTIVE_LOW; } + vc4_hdmi->disable_wifi_frequencies = + of_property_read_bool(dev->of_node, "wifi-2.4ghz-coexistence"); + pm_runtime_enable(dev); drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS); @@ -1807,7 +1988,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = { .encoder_type = VC4_ENCODER_TYPE_HDMI0, .debugfs_name = "hdmi0_regs", .card_name = "vc4-hdmi-0", - .max_pixel_clock = 297000000, + .max_pixel_clock = HDMI_14_MAX_TMDS_CLK, .registers = vc5_hdmi_hdmi0_fields, .num_registers = ARRAY_SIZE(vc5_hdmi_hdmi0_fields), .phy_lane_mapping = { @@ -1816,6 +1997,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = { PHY_LANE_2, PHY_LANE_CK, }, + .unsupported_odd_h_timings = true, .init_resources = vc5_hdmi_init_resources, .csc_setup = vc5_hdmi_csc_setup, @@ -1832,7 +2014,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = { .encoder_type = VC4_ENCODER_TYPE_HDMI1, .debugfs_name = "hdmi1_regs", .card_name = "vc4-hdmi-1", - .max_pixel_clock = 297000000, + .max_pixel_clock = HDMI_14_MAX_TMDS_CLK, .registers = vc5_hdmi_hdmi1_fields, .num_registers = ARRAY_SIZE(vc5_hdmi_hdmi1_fields), .phy_lane_mapping = { @@ -1841,6 +2023,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = { PHY_LANE_CK, PHY_LANE_2, }, + .unsupported_odd_h_timings = true, .init_resources = vc5_hdmi_init_resources, .csc_setup = vc5_hdmi_csc_setup, diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h index 63c6f8bddf1d..4c8994cfd932 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.h +++ b/drivers/gpu/drm/vc4/vc4_hdmi.h @@ -21,10 +21,9 @@ to_vc4_hdmi_encoder(struct drm_encoder *encoder) return container_of(encoder, struct vc4_hdmi_encoder, base.base); } -struct drm_display_mode; - struct vc4_hdmi; struct vc4_hdmi_register; +struct vc4_hdmi_connector_state; enum vc4_hdmi_phy_channel { PHY_LANE_0 = 0, @@ -62,6 +61,9 @@ struct vc4_hdmi_variant { */ enum vc4_hdmi_phy_channel phy_lane_mapping[4]; + /* The BCM2711 cannot deal with odd horizontal pixel timings */ + bool unsupported_odd_h_timings; + /* Callback to get the resources (memory region, interrupts, * clocks, etc) for that variant. */ @@ -75,11 +77,12 @@ struct vc4_hdmi_variant { /* Callback to configure the video timings in the HDMI block */ void (*set_timings)(struct vc4_hdmi *vc4_hdmi, + struct drm_connector_state *state, struct drm_display_mode *mode); - /* Callback to initialize the PHY according to the mode */ + /* Callback to initialize the PHY according to the connector state */ void (*phy_init)(struct vc4_hdmi *vc4_hdmi, - struct drm_display_mode *mode); + struct vc4_hdmi_connector_state *vc4_conn_state); /* Callback to disable the PHY */ void (*phy_disable)(struct vc4_hdmi *vc4_hdmi); @@ -139,6 +142,14 @@ struct vc4_hdmi { int hpd_gpio; bool hpd_active_low; + /* + * On some systems (like the RPi4), some modes are in the same + * frequency range than the WiFi channels (1440p@60Hz for + * example). Should we take evasive actions because that system + * has a wifi adapter? + */ + bool disable_wifi_frequencies; + struct cec_adapter *cec_adap; struct cec_msg cec_rx_msg; bool cec_tx_ok; @@ -169,14 +180,25 @@ encoder_to_vc4_hdmi(struct drm_encoder *encoder) return container_of(_encoder, struct vc4_hdmi, encoder); } +struct vc4_hdmi_connector_state { + struct drm_connector_state base; + unsigned long long pixel_rate; +}; + +static inline struct vc4_hdmi_connector_state * +conn_state_to_vc4_hdmi_conn_state(struct drm_connector_state *conn_state) +{ + return container_of(conn_state, struct vc4_hdmi_connector_state, base); +} + void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, - struct drm_display_mode *mode); + struct vc4_hdmi_connector_state *vc4_conn_state); void vc4_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi); void vc4_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi); void vc4_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi); void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, - struct drm_display_mode *mode); + struct vc4_hdmi_connector_state *vc4_conn_state); void vc5_hdmi_phy_disable(struct vc4_hdmi *vc4_hdmi); void vc5_hdmi_phy_rng_enable(struct vc4_hdmi *vc4_hdmi); void vc5_hdmi_phy_rng_disable(struct vc4_hdmi *vc4_hdmi); diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_phy.c b/drivers/gpu/drm/vc4/vc4_hdmi_phy.c index 057796b54c51..36535480f8e2 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi_phy.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi_phy.c @@ -127,7 +127,8 @@ #define OSCILLATOR_FREQUENCY 54000000 -void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, struct drm_display_mode *mode) +void vc4_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, + struct vc4_hdmi_connector_state *conn_state) { /* PHY should be in reset, like * vc4_hdmi_encoder_disable() does. @@ -339,11 +340,12 @@ static void vc5_hdmi_reset_phy(struct vc4_hdmi *vc4_hdmi) HDMI_WRITE(HDMI_TX_PHY_POWERDOWN_CTL, BIT(10)); } -void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, struct drm_display_mode *mode) +void vc5_hdmi_phy_init(struct vc4_hdmi *vc4_hdmi, + struct vc4_hdmi_connector_state *conn_state) { const struct phy_lane_settings *chan0_settings, *chan1_settings, *chan2_settings, *clock_settings; const struct vc4_hdmi_variant *variant = vc4_hdmi->variant; - unsigned long long pixel_freq = mode->clock * 1000; + unsigned long long pixel_freq = conn_state->pixel_rate; unsigned long long vco_freq; unsigned char word_sel; u8 vco_sel, vco_div; diff --git a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h index 96d764ebfe67..401863cb8c98 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi_regs.h +++ b/drivers/gpu/drm/vc4/vc4_hdmi_regs.h @@ -59,9 +59,12 @@ enum vc4_hdmi_field { */ HDMI_CTS_0, HDMI_CTS_1, + HDMI_DEEP_COLOR_CONFIG_1, HDMI_DVP_CTL, HDMI_FIFO_CTL, HDMI_FRAME_COUNT, + HDMI_GCP_CONFIG, + HDMI_GCP_WORD_1, HDMI_HORZA, HDMI_HORZB, HDMI_HOTPLUG, @@ -229,6 +232,9 @@ static const struct vc4_hdmi_register __maybe_unused vc5_hdmi_hdmi0_fields[] = { VC4_HDMI_REG(HDMI_VERTB1, 0x0f8), VC4_HDMI_REG(HDMI_MAI_CHANNEL_MAP, 0x09c), VC4_HDMI_REG(HDMI_MAI_CONFIG, 0x0a0), + VC4_HDMI_REG(HDMI_DEEP_COLOR_CONFIG_1, 0x170), + VC4_HDMI_REG(HDMI_GCP_CONFIG, 0x178), + VC4_HDMI_REG(HDMI_GCP_WORD_1, 0x17c), VC4_HDMI_REG(HDMI_HOTPLUG, 0x1a8), VC5_DVP_REG(HDMI_CLOCK_STOP, 0x0bc), @@ -305,6 +311,9 @@ static const struct vc4_hdmi_register __maybe_unused vc5_hdmi_hdmi1_fields[] = { VC4_HDMI_REG(HDMI_VERTB1, 0x0f8), VC4_HDMI_REG(HDMI_MAI_CHANNEL_MAP, 0x09c), VC4_HDMI_REG(HDMI_MAI_CONFIG, 0x0a0), + VC4_HDMI_REG(HDMI_DEEP_COLOR_CONFIG_1, 0x170), + VC4_HDMI_REG(HDMI_GCP_CONFIG, 0x178), + VC4_HDMI_REG(HDMI_GCP_WORD_1, 0x17c), VC4_HDMI_REG(HDMI_HOTPLUG, 0x1a8), VC5_DVP_REG(HDMI_CLOCK_STOP, 0x0bc), diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c index cccd341e5d67..2b3a597fa65f 100644 --- a/drivers/gpu/drm/vc4/vc4_hvs.c +++ b/drivers/gpu/drm/vc4/vc4_hvs.c @@ -326,10 +326,10 @@ void vc4_hvs_stop_channel(struct drm_device *dev, unsigned int chan) SCALER_DISPSTATX_EMPTY); } -int vc4_hvs_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *state) +int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state) { - struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state); + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_plane *plane; @@ -341,10 +341,10 @@ int vc4_hvs_atomic_check(struct drm_crtc *crtc, /* The pixelvalve can only feed one encoder (and encoders are * 1:1 with connectors.) */ - if (hweight32(state->connector_mask) > 1) + if (hweight32(crtc_state->connector_mask) > 1) return -EINVAL; - drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, state) + drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) dlist_count += vc4_plane_dlist_size(plane_state); dlist_count++; /* Account for SCALER_CTL0_END. */ @@ -391,11 +391,12 @@ static void vc4_hvs_update_dlist(struct drm_crtc *crtc) } void vc4_hvs_atomic_enable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); - struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); + struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(new_crtc_state); struct drm_display_mode *mode = &crtc->state->adjusted_mode; bool oneshot = vc4_state->feed_txp; @@ -404,9 +405,10 @@ void vc4_hvs_atomic_enable(struct drm_crtc *crtc, } void vc4_hvs_atomic_disable(struct drm_crtc *crtc, - struct drm_crtc_state *old_state) + struct drm_atomic_state *state) { struct drm_device *dev = crtc->dev; + struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, crtc); struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(old_state); unsigned int chan = vc4_state->assigned_channel; diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index 2b951cae04ad..f09254c2497d 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -24,6 +24,8 @@ #include "vc4_drv.h" #include "vc4_regs.h" +#define HVS_NUM_CHANNELS 3 + struct vc4_ctm_state { struct drm_private_state base; struct drm_color_ctm *ctm; @@ -35,6 +37,21 @@ static struct vc4_ctm_state *to_vc4_ctm_state(struct drm_private_state *priv) return container_of(priv, struct vc4_ctm_state, base); } +struct vc4_hvs_state { + struct drm_private_state base; + + struct { + unsigned in_use: 1; + struct drm_crtc_commit *pending_commit; + } fifo_state[HVS_NUM_CHANNELS]; +}; + +static struct vc4_hvs_state * +to_vc4_hvs_state(struct drm_private_state *priv) +{ + return container_of(priv, struct vc4_hvs_state, base); +} + struct vc4_load_tracker_state { struct drm_private_state base; u64 hvs_load; @@ -113,7 +130,7 @@ static int vc4_ctm_obj_init(struct vc4_dev *vc4) drm_atomic_private_obj_init(&vc4->base, &vc4->ctm_manager, &ctm_state->base, &vc4_ctm_state_funcs); - return drmm_add_action(&vc4->base, vc4_ctm_obj_fini, NULL); + return drmm_add_action_or_reset(&vc4->base, vc4_ctm_obj_fini, NULL); } /* Converts a DRM S31.32 value to the HW S0.9 format. */ @@ -169,6 +186,45 @@ vc4_ctm_commit(struct vc4_dev *vc4, struct drm_atomic_state *state) VC4_SET_FIELD(ctm_state->fifo, SCALER_OLEDOFFS_DISPFIFO)); } +static struct vc4_hvs_state * +vc4_hvs_get_new_global_state(struct drm_atomic_state *state) +{ + struct vc4_dev *vc4 = to_vc4_dev(state->dev); + struct drm_private_state *priv_state; + + priv_state = drm_atomic_get_new_private_obj_state(state, &vc4->hvs_channels); + if (IS_ERR(priv_state)) + return ERR_CAST(priv_state); + + return to_vc4_hvs_state(priv_state); +} + +static struct vc4_hvs_state * +vc4_hvs_get_old_global_state(struct drm_atomic_state *state) +{ + struct vc4_dev *vc4 = to_vc4_dev(state->dev); + struct drm_private_state *priv_state; + + priv_state = drm_atomic_get_old_private_obj_state(state, &vc4->hvs_channels); + if (IS_ERR(priv_state)) + return ERR_CAST(priv_state); + + return to_vc4_hvs_state(priv_state); +} + +static struct vc4_hvs_state * +vc4_hvs_get_global_state(struct drm_atomic_state *state) +{ + struct vc4_dev *vc4 = to_vc4_dev(state->dev); + struct drm_private_state *priv_state; + + priv_state = drm_atomic_get_private_obj_state(state, &vc4->hvs_channels); + if (IS_ERR(priv_state)) + return ERR_CAST(priv_state); + + return to_vc4_hvs_state(priv_state); +} + static void vc4_hvs_pv_muxing_commit(struct vc4_dev *vc4, struct drm_atomic_state *state) { @@ -213,10 +269,7 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4, { struct drm_crtc_state *crtc_state; struct drm_crtc *crtc; - unsigned char dsp2_mux = 0; - unsigned char dsp3_mux = 3; - unsigned char dsp4_mux = 3; - unsigned char dsp5_mux = 3; + unsigned char mux; unsigned int i; u32 reg; @@ -224,60 +277,70 @@ static void vc5_hvs_pv_muxing_commit(struct vc4_dev *vc4, struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); - if (!crtc_state->active) + if (!vc4_state->update_muxing) continue; switch (vc4_crtc->data->hvs_output) { case 2: - dsp2_mux = (vc4_state->assigned_channel == 2) ? 0 : 1; + mux = (vc4_state->assigned_channel == 2) ? 0 : 1; + reg = HVS_READ(SCALER_DISPECTRL); + HVS_WRITE(SCALER_DISPECTRL, + (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) | + VC4_SET_FIELD(mux, SCALER_DISPECTRL_DSP2_MUX)); break; case 3: - dsp3_mux = vc4_state->assigned_channel; + if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED) + mux = 3; + else + mux = vc4_state->assigned_channel; + + reg = HVS_READ(SCALER_DISPCTRL); + HVS_WRITE(SCALER_DISPCTRL, + (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) | + VC4_SET_FIELD(mux, SCALER_DISPCTRL_DSP3_MUX)); break; case 4: - dsp4_mux = vc4_state->assigned_channel; + if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED) + mux = 3; + else + mux = vc4_state->assigned_channel; + + reg = HVS_READ(SCALER_DISPEOLN); + HVS_WRITE(SCALER_DISPEOLN, + (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) | + VC4_SET_FIELD(mux, SCALER_DISPEOLN_DSP4_MUX)); + break; case 5: - dsp5_mux = vc4_state->assigned_channel; + if (vc4_state->assigned_channel == VC4_HVS_CHANNEL_DISABLED) + mux = 3; + else + mux = vc4_state->assigned_channel; + + reg = HVS_READ(SCALER_DISPDITHER); + HVS_WRITE(SCALER_DISPDITHER, + (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) | + VC4_SET_FIELD(mux, SCALER_DISPDITHER_DSP5_MUX)); break; default: break; } } - - reg = HVS_READ(SCALER_DISPECTRL); - HVS_WRITE(SCALER_DISPECTRL, - (reg & ~SCALER_DISPECTRL_DSP2_MUX_MASK) | - VC4_SET_FIELD(dsp2_mux, SCALER_DISPECTRL_DSP2_MUX)); - - reg = HVS_READ(SCALER_DISPCTRL); - HVS_WRITE(SCALER_DISPCTRL, - (reg & ~SCALER_DISPCTRL_DSP3_MUX_MASK) | - VC4_SET_FIELD(dsp3_mux, SCALER_DISPCTRL_DSP3_MUX)); - - reg = HVS_READ(SCALER_DISPEOLN); - HVS_WRITE(SCALER_DISPEOLN, - (reg & ~SCALER_DISPEOLN_DSP4_MUX_MASK) | - VC4_SET_FIELD(dsp4_mux, SCALER_DISPEOLN_DSP4_MUX)); - - reg = HVS_READ(SCALER_DISPDITHER); - HVS_WRITE(SCALER_DISPDITHER, - (reg & ~SCALER_DISPDITHER_DSP5_MUX_MASK) | - VC4_SET_FIELD(dsp5_mux, SCALER_DISPDITHER_DSP5_MUX)); } -static void -vc4_atomic_complete_commit(struct drm_atomic_state *state) +static void vc4_atomic_commit_tail(struct drm_atomic_state *state) { struct drm_device *dev = state->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_hvs *hvs = vc4->hvs; + struct drm_crtc_state *old_crtc_state; struct drm_crtc_state *new_crtc_state; struct drm_crtc *crtc; + struct vc4_hvs_state *old_hvs_state; int i; for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { @@ -293,9 +356,35 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state) if (vc4->hvs->hvs5) clk_set_min_rate(hvs->core_clk, 500000000); - drm_atomic_helper_wait_for_fences(dev, state, false); + old_hvs_state = vc4_hvs_get_old_global_state(state); + if (!old_hvs_state) + return; + + for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) { + struct vc4_crtc_state *vc4_crtc_state = + to_vc4_crtc_state(old_crtc_state); + struct drm_crtc_commit *commit; + unsigned int channel = vc4_crtc_state->assigned_channel; + unsigned long done; + + if (channel == VC4_HVS_CHANNEL_DISABLED) + continue; - drm_atomic_helper_wait_for_dependencies(state); + if (!old_hvs_state->fifo_state[channel].in_use) + continue; + + commit = old_hvs_state->fifo_state[i].pending_commit; + if (!commit) + continue; + + done = wait_for_completion_timeout(&commit->hw_done, 10 * HZ); + if (!done) + drm_err(dev, "Timed out waiting for hw_done\n"); + + done = wait_for_completion_timeout(&commit->flip_done, 10 * HZ); + if (!done) + drm_err(dev, "Timed out waiting for flip_done\n"); + } drm_atomic_helper_commit_modeset_disables(dev, state); @@ -318,125 +407,37 @@ vc4_atomic_complete_commit(struct drm_atomic_state *state) drm_atomic_helper_cleanup_planes(dev, state); - drm_atomic_helper_commit_cleanup_done(state); - if (vc4->hvs->hvs5) clk_set_min_rate(hvs->core_clk, 0); - - drm_atomic_state_put(state); - - up(&vc4->async_modeset); } -static void commit_work(struct work_struct *work) +static int vc4_atomic_commit_setup(struct drm_atomic_state *state) { - struct drm_atomic_state *state = container_of(work, - struct drm_atomic_state, - commit_work); - vc4_atomic_complete_commit(state); -} - -/** - * vc4_atomic_commit - commit validated state object - * @dev: DRM device - * @state: the driver state object - * @nonblock: nonblocking commit - * - * This function commits a with drm_atomic_helper_check() pre-validated state - * object. This can still fail when e.g. the framebuffer reservation fails. For - * now this doesn't implement asynchronous commits. - * - * RETURNS - * Zero for success or -errno. - */ -static int vc4_atomic_commit(struct drm_device *dev, - struct drm_atomic_state *state, - bool nonblock) -{ - struct vc4_dev *vc4 = to_vc4_dev(dev); - int ret; - - if (state->async_update) { - ret = down_interruptible(&vc4->async_modeset); - if (ret) - return ret; - - ret = drm_atomic_helper_prepare_planes(dev, state); - if (ret) { - up(&vc4->async_modeset); - return ret; - } - - drm_atomic_helper_async_commit(dev, state); - - drm_atomic_helper_cleanup_planes(dev, state); - - up(&vc4->async_modeset); + struct drm_crtc_state *crtc_state; + struct vc4_hvs_state *hvs_state; + struct drm_crtc *crtc; + unsigned int i; - return 0; - } + hvs_state = vc4_hvs_get_new_global_state(state); + if (!hvs_state) + return -EINVAL; - /* We know for sure we don't want an async update here. Set - * state->legacy_cursor_update to false to prevent - * drm_atomic_helper_setup_commit() from auto-completing - * commit->flip_done. - */ - state->legacy_cursor_update = false; - ret = drm_atomic_helper_setup_commit(state, nonblock); - if (ret) - return ret; + for_each_new_crtc_in_state(state, crtc, crtc_state, i) { + struct vc4_crtc_state *vc4_crtc_state = + to_vc4_crtc_state(crtc_state); + unsigned int channel = + vc4_crtc_state->assigned_channel; - INIT_WORK(&state->commit_work, commit_work); + if (channel == VC4_HVS_CHANNEL_DISABLED) + continue; - ret = down_interruptible(&vc4->async_modeset); - if (ret) - return ret; + if (!hvs_state->fifo_state[channel].in_use) + continue; - ret = drm_atomic_helper_prepare_planes(dev, state); - if (ret) { - up(&vc4->async_modeset); - return ret; + hvs_state->fifo_state[channel].pending_commit = + drm_crtc_commit_get(crtc_state->commit); } - if (!nonblock) { - ret = drm_atomic_helper_wait_for_fences(dev, state, true); - if (ret) { - drm_atomic_helper_cleanup_planes(dev, state); - up(&vc4->async_modeset); - return ret; - } - } - - /* - * This is the point of no return - everything below never fails except - * when the hw goes bonghits. Which means we can commit the new state on - * the software side now. - */ - - BUG_ON(drm_atomic_helper_swap_state(state, false) < 0); - - /* - * Everything below can be run asynchronously without the need to grab - * any modeset locks at all under one condition: It must be guaranteed - * that the asynchronous work has either been cancelled (if the driver - * supports it, which at least requires that the framebuffers get - * cleaned up with drm_atomic_helper_cleanup_planes()) or completed - * before the new state gets committed on the software side with - * drm_atomic_helper_swap_state(). - * - * This scheme allows new atomic state updates to be prepared and - * checked in parallel to the asynchronous completion of the previous - * update. Which is important since compositors need to figure out the - * composition of the next frame right after having submitted the - * current layout. - */ - - drm_atomic_state_get(state); - if (nonblock) - queue_work(system_unbound_wq, &state->commit_work); - else - vc4_atomic_complete_commit(state); - return 0; } @@ -657,53 +658,147 @@ static int vc4_load_tracker_obj_init(struct vc4_dev *vc4) &load_state->base, &vc4_load_tracker_state_funcs); - return drmm_add_action(&vc4->base, vc4_load_tracker_obj_fini, NULL); + return drmm_add_action_or_reset(&vc4->base, vc4_load_tracker_obj_fini, NULL); } -#define NUM_OUTPUTS 6 -#define NUM_CHANNELS 3 - -static int -vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) +static struct drm_private_state * +vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj) { - unsigned long unassigned_channels = GENMASK(NUM_CHANNELS - 1, 0); - struct drm_crtc_state *old_crtc_state, *new_crtc_state; - struct drm_crtc *crtc; - int i, ret; + struct vc4_hvs_state *old_state = to_vc4_hvs_state(obj->state); + struct vc4_hvs_state *state; + unsigned int i; - /* - * Since the HVS FIFOs are shared across all the pixelvalves and - * the TXP (and thus all the CRTCs), we need to pull the current - * state of all the enabled CRTCs so that an update to a single - * CRTC still keeps the previous FIFOs enabled and assigned to - * the same CRTCs, instead of evaluating only the CRTC being - * modified. - */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct drm_crtc_state *crtc_state; + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); + + + for (i = 0; i < HVS_NUM_CHANNELS; i++) { + state->fifo_state[i].in_use = old_state->fifo_state[i].in_use; - if (!crtc->state->enable) + if (!old_state->fifo_state[i].pending_commit) continue; - crtc_state = drm_atomic_get_crtc_state(state, crtc); - if (IS_ERR(crtc_state)) - return PTR_ERR(crtc_state); + state->fifo_state[i].pending_commit = + drm_crtc_commit_get(old_state->fifo_state[i].pending_commit); } + return &state->base; +} + +static void vc4_hvs_channels_destroy_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + struct vc4_hvs_state *hvs_state = to_vc4_hvs_state(state); + unsigned int i; + + for (i = 0; i < HVS_NUM_CHANNELS; i++) { + if (!hvs_state->fifo_state[i].pending_commit) + continue; + + drm_crtc_commit_put(hvs_state->fifo_state[i].pending_commit); + } + + kfree(hvs_state); +} + +static const struct drm_private_state_funcs vc4_hvs_state_funcs = { + .atomic_duplicate_state = vc4_hvs_channels_duplicate_state, + .atomic_destroy_state = vc4_hvs_channels_destroy_state, +}; + +static void vc4_hvs_channels_obj_fini(struct drm_device *dev, void *unused) +{ + struct vc4_dev *vc4 = to_vc4_dev(dev); + + drm_atomic_private_obj_fini(&vc4->hvs_channels); +} + +static int vc4_hvs_channels_obj_init(struct vc4_dev *vc4) +{ + struct vc4_hvs_state *state; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + drm_atomic_private_obj_init(&vc4->base, &vc4->hvs_channels, + &state->base, + &vc4_hvs_state_funcs); + + return drmm_add_action_or_reset(&vc4->base, vc4_hvs_channels_obj_fini, NULL); +} + +/* + * The BCM2711 HVS has up to 7 outputs connected to the pixelvalves and + * the TXP (and therefore all the CRTCs found on that platform). + * + * The naive (and our initial) implementation would just iterate over + * all the active CRTCs, try to find a suitable FIFO, and then remove it + * from the pool of available FIFOs. However, there are a few corner + * cases that need to be considered: + * + * - When running in a dual-display setup (so with two CRTCs involved), + * we can update the state of a single CRTC (for example by changing + * its mode using xrandr under X11) without affecting the other. In + * this case, the other CRTC wouldn't be in the state at all, so we + * need to consider all the running CRTCs in the DRM device to assign + * a FIFO, not just the one in the state. + * + * - To fix the above, we can't use drm_atomic_get_crtc_state on all + * enabled CRTCs to pull their CRTC state into the global state, since + * a page flip would start considering their vblank to complete. Since + * we don't have a guarantee that they are actually active, that + * vblank might never happen, and shouldn't even be considered if we + * want to do a page flip on a single CRTC. That can be tested by + * doing a modetest -v first on HDMI1 and then on HDMI0. + * + * - Since we need the pixelvalve to be disabled and enabled back when + * the FIFO is changed, we should keep the FIFO assigned for as long + * as the CRTC is enabled, only considering it free again once that + * CRTC has been disabled. This can be tested by booting X11 on a + * single display, and changing the resolution down and then back up. + */ +static int vc4_pv_muxing_atomic_check(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct vc4_hvs_state *hvs_new_state; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct drm_crtc *crtc; + unsigned int unassigned_channels = 0; + unsigned int i; + + hvs_new_state = vc4_hvs_get_global_state(state); + if (!hvs_new_state) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++) + if (!hvs_new_state->fifo_state[i].in_use) + unassigned_channels |= BIT(i); + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + struct vc4_crtc_state *old_vc4_crtc_state = + to_vc4_crtc_state(old_crtc_state); struct vc4_crtc_state *new_vc4_crtc_state = to_vc4_crtc_state(new_crtc_state); struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); unsigned int matching_channels; + unsigned int channel; - if (old_crtc_state->enable && !new_crtc_state->enable) - new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED; - - if (!new_crtc_state->enable) + /* Nothing to do here, let's skip it */ + if (old_crtc_state->enable == new_crtc_state->enable) continue; - if (new_vc4_crtc_state->assigned_channel != VC4_HVS_CHANNEL_DISABLED) { - unassigned_channels &= ~BIT(new_vc4_crtc_state->assigned_channel); + /* Muxing will need to be modified, mark it as such */ + new_vc4_crtc_state->update_muxing = true; + + /* If we're disabling our CRTC, we put back our channel */ + if (!new_crtc_state->enable) { + channel = old_vc4_crtc_state->assigned_channel; + hvs_new_state->fifo_state[channel].in_use = false; + new_vc4_crtc_state->assigned_channel = VC4_HVS_CHANNEL_DISABLED; continue; } @@ -732,16 +827,27 @@ vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) * but it works so far. */ matching_channels = unassigned_channels & vc4_crtc->data->hvs_available_channels; - if (matching_channels) { - unsigned int channel = ffs(matching_channels) - 1; - - new_vc4_crtc_state->assigned_channel = channel; - unassigned_channels &= ~BIT(channel); - } else { + if (!matching_channels) return -EINVAL; - } + + channel = ffs(matching_channels) - 1; + new_vc4_crtc_state->assigned_channel = channel; + unassigned_channels &= ~BIT(channel); + hvs_new_state->fifo_state[channel].in_use = true; } + return 0; +} + +static int +vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) +{ + int ret; + + ret = vc4_pv_muxing_atomic_check(dev, state); + if (ret) + return ret; + ret = vc4_ctm_atomic_check(dev, state); if (ret < 0) return ret; @@ -753,9 +859,14 @@ vc4_atomic_check(struct drm_device *dev, struct drm_atomic_state *state) return vc4_load_tracker_atomic_check(state); } +static struct drm_mode_config_helper_funcs vc4_mode_config_helpers = { + .atomic_commit_setup = vc4_atomic_commit_setup, + .atomic_commit_tail = vc4_atomic_commit_tail, +}; + static const struct drm_mode_config_funcs vc4_mode_funcs = { .atomic_check = vc4_atomic_check, - .atomic_commit = vc4_atomic_commit, + .atomic_commit = drm_atomic_helper_commit, .fb_create = vc4_fb_create, }; @@ -775,8 +886,6 @@ int vc4_kms_load(struct drm_device *dev) vc4->load_tracker_enabled = true; } - sema_init(&vc4->async_modeset, 1); - /* Set support for vblank irq fast disable, before drm_vblank_init() */ dev->vblank_disable_immediate = true; @@ -796,6 +905,7 @@ int vc4_kms_load(struct drm_device *dev) } dev->mode_config.funcs = &vc4_mode_funcs; + dev->mode_config.helper_private = &vc4_mode_config_helpers; dev->mode_config.preferred_depth = 24; dev->mode_config.async_page_flip = true; dev->mode_config.allow_fb_modifiers = true; @@ -808,6 +918,10 @@ int vc4_kms_load(struct drm_device *dev) if (ret) return ret; + ret = vc4_hvs_channels_obj_init(vc4); + if (ret) + return ret; + drm_mode_config_reset(dev); drm_kms_helper_poll_init(dev); diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index 6b39cc2ca18d..6bd8260aa9f2 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -1268,11 +1268,6 @@ static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = { .atomic_async_update = vc4_plane_atomic_async_update, }; -static void vc4_plane_destroy(struct drm_plane *plane) -{ - drm_plane_cleanup(plane); -} - static bool vc4_format_mod_supported(struct drm_plane *plane, uint32_t format, uint64_t modifier) @@ -1323,7 +1318,7 @@ static bool vc4_format_mod_supported(struct drm_plane *plane, static const struct drm_plane_funcs vc4_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, - .destroy = vc4_plane_destroy, + .destroy = drm_plane_cleanup, .set_property = NULL, .reset = vc4_plane_reset, .atomic_duplicate_state = vc4_plane_duplicate_state, diff --git a/drivers/gpu/drm/vc4/vc4_txp.c b/drivers/gpu/drm/vc4/vc4_txp.c index 34612edcabbd..c0122d83b651 100644 --- a/drivers/gpu/drm/vc4/vc4_txp.c +++ b/drivers/gpu/drm/vc4/vc4_txp.c @@ -273,8 +273,10 @@ static int vc4_txp_connector_atomic_check(struct drm_connector *conn, } static void vc4_txp_connector_atomic_commit(struct drm_connector *conn, - struct drm_connector_state *conn_state) + struct drm_atomic_state *state) { + struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(state, + conn); struct vc4_txp *txp = connector_to_vc4_txp(conn); struct drm_gem_cma_object *gem; struct drm_display_mode *mode; @@ -380,7 +382,6 @@ static const struct drm_crtc_funcs vc4_txp_crtc_funcs = { .reset = vc4_crtc_reset, .atomic_duplicate_state = vc4_crtc_duplicate_state, .atomic_destroy_state = vc4_crtc_destroy_state, - .gamma_set = drm_atomic_helper_legacy_gamma_set, .enable_vblank = vc4_txp_enable_vblank, .disable_vblank = vc4_txp_disable_vblank, }; @@ -393,7 +394,7 @@ static int vc4_txp_atomic_check(struct drm_crtc *crtc, struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state); int ret; - ret = vc4_hvs_atomic_check(crtc, crtc_state); + ret = vc4_hvs_atomic_check(crtc, state); if (ret) return ret; @@ -406,23 +407,19 @@ static int vc4_txp_atomic_check(struct drm_crtc *crtc, static void vc4_txp_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state) { - struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, - crtc); drm_crtc_vblank_on(crtc); - vc4_hvs_atomic_enable(crtc, old_state); + vc4_hvs_atomic_enable(crtc, state); } static void vc4_txp_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state) { - struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state, - crtc); struct drm_device *dev = crtc->dev; /* Disable vblank irq handling before crtc is disabled. */ drm_crtc_vblank_off(crtc); - vc4_hvs_atomic_disable(crtc, old_state); + vc4_hvs_atomic_disable(crtc, state); /* * Make sure we issue a vblank event after disabling the CRTC if diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index 9a413091abb6..a0e75f1d5d01 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -356,8 +356,7 @@ static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev, } obj->pages_pin_count++; /* perma-pinned */ - drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL, - npages); + drm_prime_sg_to_page_array(obj->table, obj->pages, npages); return &obj->base; } @@ -403,8 +402,7 @@ static int vgem_prime_mmap(struct drm_gem_object *obj, if (ret) return ret; - fput(vma->vm_file); - vma->vm_file = get_file(obj->filp); + vma_set_file(vma, obj->filp); vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c index 24cc445169e2..faeae5d881fb 100644 --- a/drivers/gpu/drm/via/via_irq.c +++ b/drivers/gpu/drm/via/via_irq.c @@ -308,7 +308,7 @@ int via_driver_irq_postinstall(struct drm_device *dev) drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; u32 status; - DRM_DEBUG("via_driver_irq_postinstall\n"); + DRM_DEBUG("fun: %s\n", __func__); if (!dev_priv) return -EINVAL; @@ -364,6 +364,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv) irqwait->request.sequence += atomic_read(&cur_irq->irq_received); irqwait->request.type &= ~_DRM_VBLANK_RELATIVE; + break; case VIA_IRQ_ABSOLUTE: break; default: diff --git a/drivers/gpu/drm/via/via_verifier.c b/drivers/gpu/drm/via/via_verifier.c index 8d8135f424ef..3d6e3a70f318 100644 --- a/drivers/gpu/drm/via/via_verifier.c +++ b/drivers/gpu/drm/via/via_verifier.c @@ -1001,8 +1001,8 @@ via_verify_command_stream(const uint32_t * buf, unsigned int size, state = via_check_vheader6(&buf, buf_end); break; case state_command: - if ((HALCYON_HEADER2 == (cmd = *buf)) && - supported_3d) + cmd = *buf; + if ((cmd == HALCYON_HEADER2) && supported_3d) state = state_header2; else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1) state = state_header1; @@ -1064,7 +1064,8 @@ via_parse_command_stream(struct drm_device *dev, const uint32_t *buf, state = via_parse_vheader6(dev_priv, &buf, buf_end); break; case state_command: - if (HALCYON_HEADER2 == (cmd = *buf)) + cmd = *buf; + if (cmd == HALCYON_HEADER2) state = state_header2; else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1) state = state_header1; diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig index b925b8b1da16..51ec7c3240c9 100644 --- a/drivers/gpu/drm/virtio/Kconfig +++ b/drivers/gpu/drm/virtio/Kconfig @@ -1,7 +1,8 @@ # SPDX-License-Identifier: GPL-2.0-only config DRM_VIRTIO_GPU tristate "Virtio GPU driver" - depends on DRM && VIRTIO && VIRTIO_MENU && MMU + depends on DRM && VIRTIO_MENU && MMU + select VIRTIO select DRM_KMS_HELPER select DRM_GEM_SHMEM_HELPER select VIRTIO_DMA_SHARED_BUFFER diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c index f336a8fa6666..c2b20e0ee030 100644 --- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c +++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c @@ -28,14 +28,13 @@ #include "virtgpu_drv.h" -static void virtio_add_bool(struct seq_file *m, const char *name, - bool value) +static void virtio_gpu_add_bool(struct seq_file *m, const char *name, + bool value) { seq_printf(m, "%-16s : %s\n", name, value ? "yes" : "no"); } -static void virtio_add_int(struct seq_file *m, const char *name, - int value) +static void virtio_gpu_add_int(struct seq_file *m, const char *name, int value) { seq_printf(m, "%-16s : %d\n", name, value); } @@ -45,13 +44,16 @@ static int virtio_gpu_features(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *)m->private; struct virtio_gpu_device *vgdev = node->minor->dev->dev_private; - virtio_add_bool(m, "virgl", vgdev->has_virgl_3d); - virtio_add_bool(m, "edid", vgdev->has_edid); - virtio_add_bool(m, "indirect", vgdev->has_indirect); - virtio_add_bool(m, "resource uuid", vgdev->has_resource_assign_uuid); - virtio_add_bool(m, "blob resources", vgdev->has_resource_blob); - virtio_add_int(m, "cap sets", vgdev->num_capsets); - virtio_add_int(m, "scanouts", vgdev->num_scanouts); + virtio_gpu_add_bool(m, "virgl", vgdev->has_virgl_3d); + virtio_gpu_add_bool(m, "edid", vgdev->has_edid); + virtio_gpu_add_bool(m, "indirect", vgdev->has_indirect); + + virtio_gpu_add_bool(m, "resource uuid", + vgdev->has_resource_assign_uuid); + + virtio_gpu_add_bool(m, "blob resources", vgdev->has_resource_blob); + virtio_gpu_add_int(m, "cap sets", vgdev->num_capsets); + virtio_gpu_add_int(m, "scanouts", vgdev->num_scanouts); if (vgdev->host_visible_region.len) { seq_printf(m, "%-16s : 0x%lx +0x%lx\n", "host visible region", (unsigned long)vgdev->host_visible_region.addr, @@ -67,8 +69,8 @@ virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data) struct virtio_gpu_device *vgdev = node->minor->dev->dev_private; seq_printf(m, "fence %llu %lld\n", - (u64)atomic64_read(&vgdev->fence_drv.last_seq), - vgdev->fence_drv.sync_seq); + (u64)atomic64_read(&vgdev->fence_drv.last_fence_id), + vgdev->fence_drv.current_fence_id); return 0; } diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index 27f13bd29c13..a21dc3ad6f88 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -54,7 +54,6 @@ static int virtio_gpu_pci_quirk(struct drm_device *dev, struct virtio_device *vd DRM_INFO("pci: %s detected at %s\n", vga ? "virtio-vga" : "virtio-gpu-pci", pname); - dev->pdev = pdev; if (vga) drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "virtiodrmfb"); diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index 3c0e17212c33..d9dbc4f258f3 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -127,8 +127,8 @@ typedef void (*virtio_gpu_resp_cb)(struct virtio_gpu_device *vgdev, struct virtio_gpu_vbuffer *vbuf); struct virtio_gpu_fence_driver { - atomic64_t last_seq; - uint64_t sync_seq; + atomic64_t last_fence_id; + uint64_t current_fence_id; uint64_t context; struct list_head fences; spinlock_t lock; @@ -136,6 +136,7 @@ struct virtio_gpu_fence_driver { struct virtio_gpu_fence { struct dma_fence f; + uint64_t fence_id; struct virtio_gpu_fence_driver *drv; struct list_head node; }; @@ -257,7 +258,7 @@ struct virtio_gpu_fpriv { struct mutex context_lock; }; -/* virtio_ioctl.c */ +/* virtgpu_ioctl.c */ #define DRM_VIRTIO_NUM_IOCTLS 11 extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS]; void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file); @@ -420,7 +421,7 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, struct virtio_gpu_ctrl_hdr *cmd_hdr, struct virtio_gpu_fence *fence); void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev, - u64 last_seq); + u64 fence_id); /* virtgpu_object.c */ void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo); diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c index 5b2a4146c5bd..d28e25e8409b 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fence.c +++ b/drivers/gpu/drm/virtio/virtgpu_fence.c @@ -27,50 +27,48 @@ #include "virtgpu_drv.h" -#define to_virtio_fence(x) \ +#define to_virtio_gpu_fence(x) \ container_of(x, struct virtio_gpu_fence, f) -static const char *virtio_get_driver_name(struct dma_fence *f) +static const char *virtio_gpu_get_driver_name(struct dma_fence *f) { return "virtio_gpu"; } -static const char *virtio_get_timeline_name(struct dma_fence *f) +static const char *virtio_gpu_get_timeline_name(struct dma_fence *f) { return "controlq"; } -static bool virtio_fence_signaled(struct dma_fence *f) +static bool virtio_gpu_fence_signaled(struct dma_fence *f) { - struct virtio_gpu_fence *fence = to_virtio_fence(f); - - if (WARN_ON_ONCE(fence->f.seqno == 0)) - /* leaked fence outside driver before completing - * initialization with virtio_gpu_fence_emit */ - return false; - if (atomic64_read(&fence->drv->last_seq) >= fence->f.seqno) - return true; + /* leaked fence outside driver before completing + * initialization with virtio_gpu_fence_emit. + */ + WARN_ON_ONCE(f->seqno == 0); return false; } -static void virtio_fence_value_str(struct dma_fence *f, char *str, int size) +static void virtio_gpu_fence_value_str(struct dma_fence *f, char *str, int size) { - snprintf(str, size, "%llu", f->seqno); + snprintf(str, size, "[%llu, %llu]", f->context, f->seqno); } -static void virtio_timeline_value_str(struct dma_fence *f, char *str, int size) +static void virtio_gpu_timeline_value_str(struct dma_fence *f, char *str, + int size) { - struct virtio_gpu_fence *fence = to_virtio_fence(f); + struct virtio_gpu_fence *fence = to_virtio_gpu_fence(f); - snprintf(str, size, "%llu", (u64)atomic64_read(&fence->drv->last_seq)); + snprintf(str, size, "%llu", + (u64)atomic64_read(&fence->drv->last_fence_id)); } -static const struct dma_fence_ops virtio_fence_ops = { - .get_driver_name = virtio_get_driver_name, - .get_timeline_name = virtio_get_timeline_name, - .signaled = virtio_fence_signaled, - .fence_value_str = virtio_fence_value_str, - .timeline_value_str = virtio_timeline_value_str, +static const struct dma_fence_ops virtio_gpu_fence_ops = { + .get_driver_name = virtio_gpu_get_driver_name, + .get_timeline_name = virtio_gpu_get_timeline_name, + .signaled = virtio_gpu_fence_signaled, + .fence_value_str = virtio_gpu_fence_value_str, + .timeline_value_str = virtio_gpu_timeline_value_str, }; struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev) @@ -87,7 +85,8 @@ struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev) * unknown yet. The fence must not be used outside of the driver * until virtio_gpu_fence_emit is called. */ - dma_fence_init(&fence->f, &virtio_fence_ops, &drv->lock, drv->context, 0); + dma_fence_init(&fence->f, &virtio_gpu_fence_ops, &drv->lock, drv->context, + 0); return fence; } @@ -100,7 +99,7 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, unsigned long irq_flags; spin_lock_irqsave(&drv->lock, irq_flags); - fence->f.seqno = ++drv->sync_seq; + fence->fence_id = fence->f.seqno = ++drv->current_fence_id; dma_fence_get(&fence->f); list_add_tail(&fence->node, &drv->fences); spin_unlock_irqrestore(&drv->lock, irq_flags); @@ -108,24 +107,45 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, trace_dma_fence_emit(&fence->f); cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE); - cmd_hdr->fence_id = cpu_to_le64(fence->f.seqno); + cmd_hdr->fence_id = cpu_to_le64(fence->fence_id); } void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev, - u64 last_seq) + u64 fence_id) { struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv; - struct virtio_gpu_fence *fence, *tmp; + struct virtio_gpu_fence *signaled, *curr, *tmp; unsigned long irq_flags; spin_lock_irqsave(&drv->lock, irq_flags); - atomic64_set(&vgdev->fence_drv.last_seq, last_seq); - list_for_each_entry_safe(fence, tmp, &drv->fences, node) { - if (last_seq < fence->f.seqno) + atomic64_set(&vgdev->fence_drv.last_fence_id, fence_id); + list_for_each_entry_safe(curr, tmp, &drv->fences, node) { + if (fence_id != curr->fence_id) continue; - dma_fence_signal_locked(&fence->f); - list_del(&fence->node); - dma_fence_put(&fence->f); + + signaled = curr; + + /* + * Signal any fences with a strictly smaller sequence number + * than the current signaled fence. + */ + list_for_each_entry_safe(curr, tmp, &drv->fences, node) { + /* dma-fence contexts must match */ + if (signaled->f.context != curr->f.context) + continue; + + if (!dma_fence_is_later(&signaled->f, &curr->f)) + continue; + + dma_fence_signal_locked(&curr->f); + list_del(&curr->node); + dma_fence_put(&curr->f); + } + + dma_fence_signal_locked(&signaled->f); + list_del(&signaled->node); + dma_fence_put(&signaled->f); + break; } spin_unlock_irqrestore(&drv->lock, irq_flags); } diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c index c30c75ee83fc..8502400b2f9c 100644 --- a/drivers/gpu/drm/virtio/virtgpu_gem.c +++ b/drivers/gpu/drm/virtio/virtgpu_gem.c @@ -39,9 +39,6 @@ static int virtio_gpu_gem_create(struct drm_file *file, int ret; u32 handle; - if (vgdev->has_virgl_3d) - virtio_gpu_create_context(dev, file); - ret = virtio_gpu_object_create(vgdev, params, &obj, NULL); if (ret < 0) return ret; @@ -119,6 +116,11 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj, if (!vgdev->has_virgl_3d) goto out_notify; + /* the context might still be missing when the first ioctl is + * DRM_IOCTL_MODE_CREATE_DUMB or DRM_IOCTL_PRIME_FD_TO_HANDLE + */ + virtio_gpu_create_context(obj->dev, file); + objs = virtio_gpu_array_alloc(1); if (!objs) return -ENOMEM; diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 5417f365d1a3..23eb6d772e40 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -591,8 +591,9 @@ static int verify_blob(struct virtio_gpu_device *vgdev, return 0; } -static int virtio_gpu_resource_create_blob(struct drm_device *dev, - void *data, struct drm_file *file) +static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev, + void *data, + struct drm_file *file) { int ret = 0; uint32_t handle = 0; @@ -696,6 +697,6 @@ struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = { DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB, - virtio_gpu_resource_create_blob, + virtio_gpu_resource_create_blob_ioctl, DRM_RENDER_ALLOW), }; diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index d9ad27e00905..d69a5b6da553 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -144,7 +144,6 @@ struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev, dshmem = &shmem->base.base; dshmem->base.funcs = &virtio_gpu_shmem_funcs; - dshmem->map_cached = true; return &dshmem->base; } diff --git a/drivers/gpu/drm/virtio/virtgpu_vram.c b/drivers/gpu/drm/virtio/virtgpu_vram.c index 23c21bc4d01e..5cc34e7330fa 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vram.c +++ b/drivers/gpu/drm/virtio/virtgpu_vram.c @@ -69,6 +69,7 @@ static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = { .close = virtio_gpu_gem_object_close, .free = virtio_gpu_vram_free, .mmap = virtio_gpu_vram_mmap, + .export = virtgpu_gem_prime_export, }; bool virtio_gpu_is_vram(struct virtio_gpu_object *bo) @@ -134,6 +135,8 @@ int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev, obj = &vram->base.base.base; obj->funcs = &virtio_gpu_vram_funcs; + + params->size = PAGE_ALIGN(params->size); drm_gem_private_object_init(vgdev->ddev, obj, params->size); /* Create fake offset */ diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index 1a1b5bc8e121..2173b82606f6 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -34,12 +34,16 @@ #define DRIVER_MAJOR 1 #define DRIVER_MINOR 0 -static struct vkms_device *vkms_device; +static struct vkms_config *default_config; -bool enable_cursor = true; +static bool enable_cursor = true; module_param_named(enable_cursor, enable_cursor, bool, 0444); MODULE_PARM_DESC(enable_cursor, "Enable/Disable cursor support"); +static bool enable_writeback = true; +module_param_named(enable_writeback, enable_writeback, bool, 0444); +MODULE_PARM_DESC(enable_writeback, "Enable/Disable writeback connector support"); + DEFINE_DRM_GEM_FOPS(vkms_driver_fops); static void vkms_release(struct drm_device *dev) @@ -82,7 +86,6 @@ static const struct drm_driver vkms_driver = { .driver_features = DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM, .release = vkms_release, .fops = &vkms_driver_fops, - .gem_create_object = drm_gem_shmem_create_object_cached, DRM_GEM_SHMEM_DRIVER_OPS, .name = DRIVER_NAME, @@ -114,16 +117,20 @@ static int vkms_modeset_init(struct vkms_device *vkmsdev) dev->mode_config.max_height = YRES_MAX; dev->mode_config.cursor_width = 512; dev->mode_config.cursor_height = 512; - dev->mode_config.preferred_depth = 32; + /* FIXME: There's a confusion between bpp and depth between this and + * fbdev helpers. We have to go with 0, meaning "pick the default", + * which ix XRGB8888 in all cases. */ + dev->mode_config.preferred_depth = 0; dev->mode_config.helper_private = &vkms_mode_config_helpers; return vkms_output_init(vkmsdev, 0); } -static int __init vkms_init(void) +static int vkms_create(struct vkms_config *config) { int ret; struct platform_device *pdev; + struct vkms_device *vkms_device; pdev = platform_device_register_simple(DRIVER_NAME, -1, NULL, 0); if (IS_ERR(pdev)) @@ -141,6 +148,8 @@ static int __init vkms_init(void) goto out_devres; } vkms_device->platform = pdev; + vkms_device->config = config; + config->dev = vkms_device; ret = dma_coerce_mask_and_coherent(vkms_device->drm.dev, DMA_BIT_MASK(64)); @@ -177,21 +186,47 @@ out_unregister: return ret; } -static void __exit vkms_exit(void) +static int __init vkms_init(void) +{ + struct vkms_config *config; + + config = kmalloc(sizeof(*config), GFP_KERNEL); + if (!config) + return -ENOMEM; + + default_config = config; + + config->cursor = enable_cursor; + config->writeback = enable_writeback; + + return vkms_create(config); +} + +static void vkms_destroy(struct vkms_config *config) { struct platform_device *pdev; - if (!vkms_device) { + if (!config->dev) { DRM_INFO("vkms_device is NULL.\n"); return; } - pdev = vkms_device->platform; + pdev = config->dev->platform; - drm_dev_unregister(&vkms_device->drm); - drm_atomic_helper_shutdown(&vkms_device->drm); + drm_dev_unregister(&config->dev->drm); + drm_atomic_helper_shutdown(&config->dev->drm); devres_release_group(&pdev->dev, NULL); platform_device_unregister(pdev); + + config->dev = NULL; +} + +static void __exit vkms_exit(void) +{ + if (default_config->dev) + vkms_destroy(default_config); + + kfree(default_config); } module_init(vkms_init); diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index 5ed91ff08cb3..35540c7c4416 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -19,8 +19,6 @@ #define XRES_MAX 8192 #define YRES_MAX 8192 -extern bool enable_cursor; - struct vkms_composer { struct drm_framebuffer fb; struct drm_rect src, dst; @@ -82,10 +80,20 @@ struct vkms_output { spinlock_t composer_lock; }; +struct vkms_device; + +struct vkms_config { + bool writeback; + bool cursor; + /* only set when instantiated */ + struct vkms_device *dev; +}; + struct vkms_device { struct drm_device drm; struct platform_device *platform; struct vkms_output output; + const struct vkms_config *config; }; #define drm_crtc_to_vkms_output(target) \ diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c index 4a1848b0318f..f5f6f15c362c 100644 --- a/drivers/gpu/drm/vkms/vkms_output.c +++ b/drivers/gpu/drm/vkms/vkms_output.c @@ -41,12 +41,13 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index) struct drm_crtc *crtc = &output->crtc; struct drm_plane *primary, *cursor = NULL; int ret; + int writeback; primary = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_PRIMARY, index); if (IS_ERR(primary)) return PTR_ERR(primary); - if (enable_cursor) { + if (vkmsdev->config->cursor) { cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR, index); if (IS_ERR(cursor)) { ret = PTR_ERR(cursor); @@ -80,9 +81,11 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index) goto err_attach; } - ret = vkms_enable_writeback_connector(vkmsdev); - if (ret) - DRM_ERROR("Failed to init writeback connector\n"); + if (vkmsdev->config->writeback) { + writeback = vkms_enable_writeback_connector(vkmsdev); + if (writeback) + DRM_ERROR("Failed to init writeback connector\n"); + } drm_mode_config_reset(dev); @@ -98,7 +101,7 @@ err_connector: drm_crtc_cleanup(crtc); err_crtc: - if (enable_cursor) + if (vkmsdev->config->cursor) drm_plane_cleanup(cursor); err_cursor: diff --git a/drivers/gpu/drm/vkms/vkms_writeback.c b/drivers/gpu/drm/vkms/vkms_writeback.c index 67f80ab1e85f..78fdc1d59186 100644 --- a/drivers/gpu/drm/vkms/vkms_writeback.c +++ b/drivers/gpu/drm/vkms/vkms_writeback.c @@ -2,6 +2,7 @@ #include <linux/dma-buf-map.h> +#include <drm/drm_atomic.h> #include <drm/drm_fourcc.h> #include <drm/drm_writeback.h> #include <drm/drm_probe_helper.h> @@ -105,8 +106,10 @@ static void vkms_wb_cleanup_job(struct drm_writeback_connector *connector, } static void vkms_wb_atomic_commit(struct drm_connector *conn, - struct drm_connector_state *state) + struct drm_atomic_state *state) { + struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state, + conn); struct vkms_device *vkmsdev = drm_device_to_vkms_device(conn->dev); struct vkms_output *output = &vkmsdev->output; struct drm_writeback_connector *wb_conn = &output->wb_connector; @@ -122,7 +125,7 @@ static void vkms_wb_atomic_commit(struct drm_connector *conn, crtc_state->active_writeback = conn_state->writeback_job->priv; crtc_state->wb_pending = true; spin_unlock_irq(&output->composer_lock); - drm_writeback_queue_job(wb_conn, state); + drm_writeback_queue_job(wb_conn, connector_state); } static const struct drm_connector_helper_funcs vkms_wb_conn_helper_funcs = { diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index 31f85f09f1fc..cc4cdca7176e 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile @@ -1,9 +1,9 @@ # SPDX-License-Identifier: GPL-2.0 vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \ - vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ - vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \ - vmwgfx_fence.o vmwgfx_bo.o vmwgfx_scrn.o vmwgfx_context.o \ + vmwgfx_cmd.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \ + vmwgfx_overlay.o vmwgfx_gmrid_manager.o vmwgfx_fence.o \ + vmwgfx_bo.o vmwgfx_scrn.o vmwgfx_context.o \ vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \ vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \ vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \ diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c index 16077785ad47..0fe869d0fad1 100644 --- a/drivers/gpu/drm/vmwgfx/ttm_object.c +++ b/drivers/gpu/drm/vmwgfx/ttm_object.c @@ -59,7 +59,6 @@ #define pr_fmt(fmt) "[TTM] " fmt -#include <drm/ttm/ttm_module.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/slab.h> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c index f41550797970..180f6dbc9460 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c @@ -555,7 +555,7 @@ static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind) SVGA3dCmdSetShader body; } *cmd; - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) return -ENOMEM; @@ -564,7 +564,7 @@ static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind) cmd->body.cid = bi->ctx->id; cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN; cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); return 0; } @@ -587,7 +587,7 @@ static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi, SVGA3dCmdSetRenderTarget body; } *cmd; - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) return -ENOMEM; @@ -598,7 +598,7 @@ static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi, cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); cmd->body.target.face = 0; cmd->body.target.mipmap = 0; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); return 0; } @@ -626,7 +626,7 @@ static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, } body; } *cmd; - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) return -ENOMEM; @@ -636,7 +636,7 @@ static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, cmd->body.s1.stage = binding->texture_stage; cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); return 0; } @@ -657,7 +657,7 @@ static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind) SVGA3dCmdDXSetShader body; } *cmd; - cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id); + cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id); if (unlikely(cmd == NULL)) return -ENOMEM; @@ -665,7 +665,7 @@ static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind) cmd->header.size = sizeof(cmd->body); cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN; cmd->body.shaderId = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); return 0; } @@ -686,7 +686,7 @@ static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind) SVGA3dCmdDXSetSingleConstantBuffer body; } *cmd; - cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id); + cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id); if (unlikely(cmd == NULL)) return -ENOMEM; @@ -703,7 +703,7 @@ static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind) cmd->body.sizeInBytes = 0; cmd->body.sid = SVGA3D_INVALID_ID; } - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); return 0; } @@ -810,7 +810,7 @@ static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs, view_id_size = cbs->bind_cmd_count*sizeof(uint32); cmd_size = sizeof(*cmd) + view_id_size; - cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id); + cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id); if (unlikely(cmd == NULL)) return -ENOMEM; @@ -821,7 +821,7 @@ static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs, memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size); - vmw_fifo_commit(ctx->dev_priv, cmd_size); + vmw_cmd_commit(ctx->dev_priv, cmd_size); bitmap_clear(cbs->per_shader[shader_slot].dirty_sr, cbs->bind_first_slot, cbs->bind_cmd_count); @@ -846,7 +846,7 @@ static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs) vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS); view_id_size = cbs->bind_cmd_count*sizeof(uint32); cmd_size = sizeof(*cmd) + view_id_size; - cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id); + cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id); if (unlikely(cmd == NULL)) return -ENOMEM; @@ -860,7 +860,7 @@ static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs) memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size); - vmw_fifo_commit(ctx->dev_priv, cmd_size); + vmw_cmd_commit(ctx->dev_priv, cmd_size); return 0; @@ -930,7 +930,7 @@ static int vmw_emit_set_so_target(struct vmw_ctx_binding_state *cbs) so_target_size = cbs->bind_cmd_count*sizeof(SVGA3dSoTarget); cmd_size = sizeof(*cmd) + so_target_size; - cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id); + cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id); if (unlikely(cmd == NULL)) return -ENOMEM; @@ -938,7 +938,7 @@ static int vmw_emit_set_so_target(struct vmw_ctx_binding_state *cbs) cmd->header.size = sizeof(cmd->body) + so_target_size; memcpy(&cmd[1], cbs->bind_cmd_buffer, so_target_size); - vmw_fifo_commit(ctx->dev_priv, cmd_size); + vmw_cmd_commit(ctx->dev_priv, cmd_size); return 0; @@ -1044,7 +1044,7 @@ static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs) set_vb_size = cbs->bind_cmd_count*sizeof(SVGA3dVertexBuffer); cmd_size = sizeof(*cmd) + set_vb_size; - cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id); + cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id); if (unlikely(cmd == NULL)) return -ENOMEM; @@ -1054,7 +1054,7 @@ static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs) memcpy(&cmd[1], cbs->bind_cmd_buffer, set_vb_size); - vmw_fifo_commit(ctx->dev_priv, cmd_size); + vmw_cmd_commit(ctx->dev_priv, cmd_size); bitmap_clear(cbs->dirty_vb, cbs->bind_first_slot, cbs->bind_cmd_count); @@ -1074,7 +1074,7 @@ static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs) vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS); view_id_size = cbs->bind_cmd_count*sizeof(uint32); cmd_size = sizeof(*cmd) + view_id_size; - cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id); + cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id); if (!cmd) return -ENOMEM; @@ -1086,7 +1086,7 @@ static int vmw_emit_set_uav(struct vmw_ctx_binding_state *cbs) memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size); - vmw_fifo_commit(ctx->dev_priv, cmd_size); + vmw_cmd_commit(ctx->dev_priv, cmd_size); return 0; } @@ -1104,7 +1104,7 @@ static int vmw_emit_set_cs_uav(struct vmw_ctx_binding_state *cbs) vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_UAVIEWS); view_id_size = cbs->bind_cmd_count*sizeof(uint32); cmd_size = sizeof(*cmd) + view_id_size; - cmd = VMW_FIFO_RESERVE_DX(ctx->dev_priv, cmd_size, ctx->id); + cmd = VMW_CMD_CTX_RESERVE(ctx->dev_priv, cmd_size, ctx->id); if (!cmd) return -ENOMEM; @@ -1116,7 +1116,7 @@ static int vmw_emit_set_cs_uav(struct vmw_ctx_binding_state *cbs) memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size); - vmw_fifo_commit(ctx->dev_priv, cmd_size); + vmw_cmd_commit(ctx->dev_priv, cmd_size); return 0; } @@ -1263,7 +1263,7 @@ static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind) SVGA3dCmdDXSetIndexBuffer body; } *cmd; - cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id); + cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id); if (unlikely(cmd == NULL)) return -ENOMEM; @@ -1279,7 +1279,7 @@ static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind) cmd->body.offset = 0; } - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); return 0; } @@ -1315,14 +1315,14 @@ static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind) SVGA3dCmdDXSetStreamOutput body; } *cmd; - cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), bi->ctx->id); + cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), bi->ctx->id); if (!cmd) return -ENOMEM; cmd->header.id = SVGA_3D_CMD_DX_SET_STREAMOUTPUT; cmd->header.size = sizeof(cmd->body); cmd->body.soid = rebind ? bi->res->id : SVGA3D_INVALID_ID; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c index f21881e087db..9f2779ddcf08 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c @@ -482,8 +482,8 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst, d.src_addr = NULL; d.dst_pages = dst->ttm->pages; d.src_pages = src->ttm->pages; - d.dst_num_pages = dst->num_pages; - d.src_num_pages = src->num_pages; + d.dst_num_pages = dst->mem.num_pages; + d.src_num_pages = src->mem.num_pages; d.dst_prot = ttm_io_prot(dst, &dst->mem, PAGE_KERNEL); d.src_prot = ttm_io_prot(src, &src->mem, PAGE_KERNEL); d.diff = diff; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index 263d76ae43f0..63dbc44eebe0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -223,7 +223,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv, uint32_t new_flags; place = vmw_vram_placement.placement[0]; - place.lpfn = bo->num_pages; + place.lpfn = bo->mem.num_pages; placement.num_placement = 1; placement.placement = &place; placement.num_busy_placement = 1; @@ -244,7 +244,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv, * that situation. */ if (bo->mem.mem_type == TTM_PL_VRAM && - bo->mem.start < bo->num_pages && + bo->mem.start < bo->mem.num_pages && bo->mem.start > 0 && buf->base.pin_count == 0) { ctx.interruptible = false; @@ -391,7 +391,7 @@ void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo) if (virtual) return virtual; - ret = ttm_bo_kmap(bo, 0, bo->num_pages, &vbo->map); + ret = ttm_bo_kmap(bo, 0, bo->mem.num_pages, &vbo->map); if (ret) DRM_ERROR("Buffer object map failed: %d.\n", ret); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c index a95156fc5db7..7400d617ae3c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * - * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA + * Copyright 2009-2020 VMware, Inc., Palo Alto, CA., USA * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the @@ -36,9 +36,8 @@ struct vmw_temp_set_context { SVGA3dCmdDXTempSetContext body; }; -bool vmw_fifo_have_3d(struct vmw_private *dev_priv) +bool vmw_supports_3d(struct vmw_private *dev_priv) { - u32 *fifo_mem = dev_priv->mmio_virt; uint32_t fifo_min, hwversion; const struct vmw_fifo_state *fifo = &dev_priv->fifo; @@ -62,15 +61,15 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) return false; - fifo_min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN); + fifo_min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN); if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int)) return false; - hwversion = vmw_mmio_read(fifo_mem + - ((fifo->capabilities & - SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ? - SVGA_FIFO_3D_HWVERSION_REVISED : - SVGA_FIFO_3D_HWVERSION)); + hwversion = vmw_fifo_mem_read(dev_priv, + ((fifo->capabilities & + SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ? + SVGA_FIFO_3D_HWVERSION_REVISED : + SVGA_FIFO_3D_HWVERSION)); if (hwversion == 0) return false; @@ -87,13 +86,12 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv) bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv) { - u32 *fifo_mem = dev_priv->mmio_virt; uint32_t caps; if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) return false; - caps = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES); + caps = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES); if (caps & SVGA_FIFO_CAP_PITCHLOCK) return true; @@ -102,7 +100,6 @@ bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv) int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) { - u32 *fifo_mem = dev_priv->mmio_virt; uint32_t max; uint32_t min; @@ -129,6 +126,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE | SVGA_REG_ENABLE_HIDE); + vmw_write(dev_priv, SVGA_REG_TRACES, 0); min = 4; @@ -139,19 +137,19 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) if (min < PAGE_SIZE) min = PAGE_SIZE; - vmw_mmio_write(min, fifo_mem + SVGA_FIFO_MIN); - vmw_mmio_write(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX); + vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MIN, min); + vmw_fifo_mem_write(dev_priv, SVGA_FIFO_MAX, dev_priv->fifo_mem_size); wmb(); - vmw_mmio_write(min, fifo_mem + SVGA_FIFO_NEXT_CMD); - vmw_mmio_write(min, fifo_mem + SVGA_FIFO_STOP); - vmw_mmio_write(0, fifo_mem + SVGA_FIFO_BUSY); + vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, min); + vmw_fifo_mem_write(dev_priv, SVGA_FIFO_STOP, min); + vmw_fifo_mem_write(dev_priv, SVGA_FIFO_BUSY, 0); mb(); vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); - max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX); - min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN); - fifo->capabilities = vmw_mmio_read(fifo_mem + SVGA_FIFO_CAPABILITIES); + max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX); + min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN); + fifo->capabilities = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CAPABILITIES); DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n", (unsigned int) max, @@ -159,15 +157,14 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) (unsigned int) fifo->capabilities); atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno); - vmw_mmio_write(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE); - vmw_marker_queue_init(&fifo->marker_queue); + vmw_fifo_mem_write(dev_priv, SVGA_FIFO_FENCE, dev_priv->last_read_seqno); return 0; } void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) { - u32 *fifo_mem = dev_priv->mmio_virt; + u32 *fifo_mem = dev_priv->fifo_mem; if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0) vmw_write(dev_priv, SVGA_REG_SYNC, reason); @@ -175,13 +172,11 @@ void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) { - u32 *fifo_mem = dev_priv->mmio_virt; - vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) ; - dev_priv->last_read_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE); + dev_priv->last_read_seqno = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE); vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, dev_priv->config_done_state); @@ -190,8 +185,6 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) vmw_write(dev_priv, SVGA_REG_TRACES, dev_priv->traces_state); - vmw_marker_queue_takedown(&fifo->marker_queue); - if (likely(fifo->static_buffer != NULL)) { vfree(fifo->static_buffer); fifo->static_buffer = NULL; @@ -205,11 +198,10 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes) { - u32 *fifo_mem = dev_priv->mmio_virt; - uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX); - uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD); - uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN); - uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP); + uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX); + uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD); + uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN); + uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP); return ((max - next_cmd) + (stop - min) <= bytes); } @@ -298,7 +290,7 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) { struct vmw_fifo_state *fifo_state = &dev_priv->fifo; - u32 *fifo_mem = dev_priv->mmio_virt; + u32 *fifo_mem = dev_priv->fifo_mem; uint32_t max; uint32_t min; uint32_t next_cmd; @@ -306,9 +298,9 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv, int ret; mutex_lock(&fifo_state->fifo_mutex); - max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX); - min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN); - next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD); + max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX); + min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN); + next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD); if (unlikely(bytes >= (max - min))) goto out_err; @@ -319,7 +311,7 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv, fifo_state->reserved_size = bytes; while (1) { - uint32_t stop = vmw_mmio_read(fifo_mem + SVGA_FIFO_STOP); + uint32_t stop = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_STOP); bool need_bounce = false; bool reserve_in_place = false; @@ -353,8 +345,9 @@ static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv, fifo_state->using_bounce_buffer = false; if (reserveable) - vmw_mmio_write(bytes, fifo_mem + - SVGA_FIFO_RESERVED); + vmw_fifo_mem_write(dev_priv, + SVGA_FIFO_RESERVED, + bytes); return (void __force *) (fifo_mem + (next_cmd >> 2)); } else { @@ -381,7 +374,7 @@ out_err: return NULL; } -void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, +void *vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id) { void *ret; @@ -402,10 +395,11 @@ void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, } static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state, - u32 *fifo_mem, + struct vmw_private *vmw, uint32_t next_cmd, uint32_t max, uint32_t min, uint32_t bytes) { + u32 *fifo_mem = vmw->fifo_mem; uint32_t chunk_size = max - next_cmd; uint32_t rest; uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ? @@ -414,7 +408,7 @@ static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state, if (bytes < chunk_size) chunk_size = bytes; - vmw_mmio_write(bytes, fifo_mem + SVGA_FIFO_RESERVED); + vmw_fifo_mem_write(vmw, SVGA_FIFO_RESERVED, bytes); mb(); memcpy(fifo_mem + (next_cmd >> 2), buffer, chunk_size); rest = bytes - chunk_size; @@ -423,7 +417,7 @@ static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state, } static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state, - u32 *fifo_mem, + struct vmw_private *vmw, uint32_t next_cmd, uint32_t max, uint32_t min, uint32_t bytes) { @@ -431,12 +425,12 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state, fifo_state->dynamic_buffer : fifo_state->static_buffer; while (bytes > 0) { - vmw_mmio_write(*buffer++, fifo_mem + (next_cmd >> 2)); + vmw_fifo_mem_write(vmw, (next_cmd >> 2), *buffer++); next_cmd += sizeof(uint32_t); if (unlikely(next_cmd == max)) next_cmd = min; mb(); - vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD); + vmw_fifo_mem_write(vmw, SVGA_FIFO_NEXT_CMD, next_cmd); mb(); bytes -= sizeof(uint32_t); } @@ -445,10 +439,9 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state, static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) { struct vmw_fifo_state *fifo_state = &dev_priv->fifo; - u32 *fifo_mem = dev_priv->mmio_virt; - uint32_t next_cmd = vmw_mmio_read(fifo_mem + SVGA_FIFO_NEXT_CMD); - uint32_t max = vmw_mmio_read(fifo_mem + SVGA_FIFO_MAX); - uint32_t min = vmw_mmio_read(fifo_mem + SVGA_FIFO_MIN); + uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD); + uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX); + uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN); bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; if (fifo_state->dx) @@ -462,10 +455,10 @@ static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) if (fifo_state->using_bounce_buffer) { if (reserveable) - vmw_fifo_res_copy(fifo_state, fifo_mem, + vmw_fifo_res_copy(fifo_state, dev_priv, next_cmd, max, min, bytes); else - vmw_fifo_slow_copy(fifo_state, fifo_mem, + vmw_fifo_slow_copy(fifo_state, dev_priv, next_cmd, max, min, bytes); if (fifo_state->dynamic_buffer) { @@ -481,18 +474,18 @@ static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) if (next_cmd >= max) next_cmd -= max - min; mb(); - vmw_mmio_write(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD); + vmw_fifo_mem_write(dev_priv, SVGA_FIFO_NEXT_CMD, next_cmd); } if (reserveable) - vmw_mmio_write(0, fifo_mem + SVGA_FIFO_RESERVED); + vmw_fifo_mem_write(dev_priv, SVGA_FIFO_RESERVED, 0); mb(); up_write(&fifo_state->rwsem); vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); mutex_unlock(&fifo_state->fifo_mutex); } -void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) +void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes) { if (dev_priv->cman) vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, false); @@ -507,7 +500,7 @@ void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) * @dev_priv: Pointer to device private structure. * @bytes: Number of bytes to commit. */ -void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes) +void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes) { if (dev_priv->cman) vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true); @@ -522,7 +515,7 @@ void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes) * @dev_priv: Pointer to device private structure. * @interruptible: Whether to wait interruptible if function needs to sleep. */ -int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible) +int vmw_cmd_flush(struct vmw_private *dev_priv, bool interruptible) { might_sleep(); @@ -532,7 +525,7 @@ int vmw_fifo_flush(struct vmw_private *dev_priv, bool interruptible) return 0; } -int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) +int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) { struct vmw_fifo_state *fifo_state = &dev_priv->fifo; struct svga_fifo_cmd_fence *cmd_fence; @@ -540,7 +533,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) int ret = 0; uint32_t bytes = sizeof(u32) + sizeof(*cmd_fence); - fm = VMW_FIFO_RESERVE(dev_priv, bytes); + fm = VMW_CMD_RESERVE(dev_priv, bytes); if (unlikely(fm == NULL)) { *seqno = atomic_read(&dev_priv->marker_seq); ret = -ENOMEM; @@ -560,15 +553,14 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) * waiting code in vmwgfx_irq.c will emulate this. */ - vmw_fifo_commit(dev_priv, 0); + vmw_cmd_commit(dev_priv, 0); return 0; } *fm++ = SVGA_CMD_FENCE; cmd_fence = (struct svga_fifo_cmd_fence *) fm; cmd_fence->fence = *seqno; - vmw_fifo_commit_flush(dev_priv, bytes); - (void) vmw_marker_push(&fifo_state->marker_queue, *seqno); + vmw_cmd_commit_flush(dev_priv, bytes); vmw_update_seqno(dev_priv, fifo_state); out_err: @@ -599,7 +591,7 @@ static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv, SVGA3dCmdWaitForQuery body; } *cmd; - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) return -ENOMEM; @@ -616,7 +608,7 @@ static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv, cmd->body.guestResult.offset = 0; } - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); return 0; } @@ -645,7 +637,7 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv, SVGA3dCmdWaitForGBQuery body; } *cmd; - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) return -ENOMEM; @@ -657,7 +649,7 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv, cmd->body.mobid = bo->mem.start; cmd->body.offset = 0; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); return 0; } @@ -681,7 +673,7 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv, * * Returns -ENOMEM on failure to reserve fifo space. */ -int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, +int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv, uint32_t cid) { if (dev_priv->has_mob) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c index 9a9fe10d829b..45fbc41440f1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c @@ -610,7 +610,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work) /* Send a new fence in case one was removed */ if (send_fence) { - vmw_fifo_send_fence(man->dev_priv, &dummy); + vmw_cmd_send_fence(man->dev_priv, &dummy); wake_up_all(&man->idle_queue); } @@ -1208,18 +1208,14 @@ static int vmw_cmdbuf_startstop(struct vmw_cmdbuf_man *man, u32 context, * * @man: The command buffer manager. * @size: The size of the main space pool. - * @default_size: The default size of the command buffer for small kernel - * submissions. * - * Set the size and allocate the main command buffer space pool, - * as well as the default size of the command buffer for - * small kernel submissions. If successful, this enables large command - * submissions. Note that this function requires that rudimentary command + * Set the size and allocate the main command buffer space pool. + * If successful, this enables large command submissions. + * Note that this function requires that rudimentary command * submission is already available and that the MOB memory manager is alive. * Returns 0 on success. Negative error code on failure. */ -int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, - size_t size, size_t default_size) +int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size) { struct vmw_private *dev_priv = man->dev_priv; bool dummy; @@ -1230,7 +1226,7 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, /* First, try to allocate a huge chunk of DMA memory */ size = PAGE_ALIGN(size); - man->map = dma_alloc_coherent(&dev_priv->dev->pdev->dev, size, + man->map = dma_alloc_coherent(dev_priv->drm.dev, size, &man->handle, GFP_KERNEL); if (man->map) { man->using_mob = false; @@ -1313,7 +1309,7 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv) man->num_contexts = (dev_priv->capabilities & SVGA_CAP_HP_CMD_QUEUE) ? 2 : 1; man->headers = dma_pool_create("vmwgfx cmdbuf", - &dev_priv->dev->pdev->dev, + dev_priv->drm.dev, sizeof(SVGACBHeader), 64, PAGE_SIZE); if (!man->headers) { @@ -1322,7 +1318,7 @@ struct vmw_cmdbuf_man *vmw_cmdbuf_man_create(struct vmw_private *dev_priv) } man->dheaders = dma_pool_create("vmwgfx inline cmdbuf", - &dev_priv->dev->pdev->dev, + dev_priv->drm.dev, sizeof(struct vmw_cmdbuf_dheader), 64, PAGE_SIZE); if (!man->dheaders) { @@ -1387,7 +1383,7 @@ void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man) ttm_bo_put(man->cmd_space); man->cmd_space = NULL; } else { - dma_free_coherent(&man->dev_priv->dev->pdev->dev, + dma_free_coherent(man->dev_priv->drm.dev, man->size, man->map, man->handle); } } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c index 61c246335e66..6f4d0da11ad8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c @@ -163,7 +163,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) } vmw_execbuf_release_pinned_bo(dev_priv); - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) return; @@ -171,7 +171,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) cmd->header.size = sizeof(cmd->body); cmd->body.cid = res->id; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); vmw_fifo_resource_dec(dev_priv); } @@ -265,7 +265,7 @@ static int vmw_context_init(struct vmw_private *dev_priv, return -ENOMEM; } - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) { vmw_resource_unreference(&res); return -ENOMEM; @@ -275,7 +275,7 @@ static int vmw_context_init(struct vmw_private *dev_priv, cmd->header.size = sizeof(cmd->body); cmd->body.cid = res->id; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); vmw_fifo_resource_inc(dev_priv); res->hw_destroy = vmw_hw_context_destroy; return 0; @@ -316,7 +316,7 @@ static int vmw_gb_context_create(struct vmw_resource *res) goto out_no_fifo; } - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) { ret = -ENOMEM; goto out_no_fifo; @@ -325,7 +325,7 @@ static int vmw_gb_context_create(struct vmw_resource *res) cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT; cmd->header.size = sizeof(cmd->body); cmd->body.cid = res->id; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); vmw_fifo_resource_inc(dev_priv); return 0; @@ -348,7 +348,7 @@ static int vmw_gb_context_bind(struct vmw_resource *res, BUG_ON(bo->mem.mem_type != VMW_PL_MOB); - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) return -ENOMEM; @@ -358,7 +358,7 @@ static int vmw_gb_context_bind(struct vmw_resource *res, cmd->body.mobid = bo->mem.start; cmd->body.validContents = res->backup_dirty; res->backup_dirty = false; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); return 0; } @@ -392,7 +392,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res, submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); - cmd = VMW_FIFO_RESERVE(dev_priv, submit_size); + cmd = VMW_CMD_RESERVE(dev_priv, submit_size); if (unlikely(cmd == NULL)) { mutex_unlock(&dev_priv->binding_mutex); return -ENOMEM; @@ -411,7 +411,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res, cmd2->body.cid = res->id; cmd2->body.mobid = SVGA3D_INVALID_ID; - vmw_fifo_commit(dev_priv, submit_size); + vmw_cmd_commit(dev_priv, submit_size); mutex_unlock(&dev_priv->binding_mutex); /* @@ -440,14 +440,14 @@ static int vmw_gb_context_destroy(struct vmw_resource *res) if (likely(res->id == -1)) return 0; - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) return -ENOMEM; cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT; cmd->header.size = sizeof(cmd->body); cmd->body.cid = res->id; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); if (dev_priv->query_cid == res->id) dev_priv->query_cid_valid = false; vmw_resource_release_id(res); @@ -483,7 +483,7 @@ static int vmw_dx_context_create(struct vmw_resource *res) goto out_no_fifo; } - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) { ret = -ENOMEM; goto out_no_fifo; @@ -492,7 +492,7 @@ static int vmw_dx_context_create(struct vmw_resource *res) cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT; cmd->header.size = sizeof(cmd->body); cmd->body.cid = res->id; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); vmw_fifo_resource_inc(dev_priv); return 0; @@ -515,7 +515,7 @@ static int vmw_dx_context_bind(struct vmw_resource *res, BUG_ON(bo->mem.mem_type != VMW_PL_MOB); - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) return -ENOMEM; @@ -525,7 +525,7 @@ static int vmw_dx_context_bind(struct vmw_resource *res, cmd->body.mobid = bo->mem.start; cmd->body.validContents = res->backup_dirty; res->backup_dirty = false; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); return 0; @@ -608,7 +608,7 @@ static int vmw_dx_context_unbind(struct vmw_resource *res, submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); - cmd = VMW_FIFO_RESERVE(dev_priv, submit_size); + cmd = VMW_CMD_RESERVE(dev_priv, submit_size); if (unlikely(cmd == NULL)) { mutex_unlock(&dev_priv->binding_mutex); return -ENOMEM; @@ -627,7 +627,7 @@ static int vmw_dx_context_unbind(struct vmw_resource *res, cmd2->body.cid = res->id; cmd2->body.mobid = SVGA3D_INVALID_ID; - vmw_fifo_commit(dev_priv, submit_size); + vmw_cmd_commit(dev_priv, submit_size); mutex_unlock(&dev_priv->binding_mutex); /* @@ -656,14 +656,14 @@ static int vmw_dx_context_destroy(struct vmw_resource *res) if (likely(res->id == -1)) return 0; - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) return -ENOMEM; cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT; cmd->header.size = sizeof(cmd->body); cmd->body.cid = res->id; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); if (dev_priv->query_cid == res->id) dev_priv->query_cid_valid = false; vmw_resource_release_id(res); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index 984d8884357d..ba658fa9cf6c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c @@ -175,7 +175,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res) WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB); dma_resv_assert_held(bo->base.resv); - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (!cmd) return -ENOMEM; @@ -188,7 +188,7 @@ static int vmw_cotable_unscrub(struct vmw_resource *res) cmd->body.mobid = bo->mem.start; cmd->body.validSizeInBytes = vcotbl->size_read_back; - vmw_fifo_commit_flush(dev_priv, sizeof(*cmd)); + vmw_cmd_commit_flush(dev_priv, sizeof(*cmd)); vcotbl->scrubbed = false; return 0; @@ -263,7 +263,7 @@ int vmw_cotable_scrub(struct vmw_resource *res, bool readback) if (readback) submit_size += sizeof(*cmd0); - cmd1 = VMW_FIFO_RESERVE(dev_priv, submit_size); + cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size); if (!cmd1) return -ENOMEM; @@ -283,7 +283,7 @@ int vmw_cotable_scrub(struct vmw_resource *res, bool readback) cmd1->body.type = vcotbl->type; cmd1->body.mobid = SVGA3D_INVALID_ID; cmd1->body.validSizeInBytes = 0; - vmw_fifo_commit_flush(dev_priv, submit_size); + vmw_cmd_commit_flush(dev_priv, submit_size); vcotbl->scrubbed = true; /* Trigger a create() on next validate. */ @@ -349,7 +349,7 @@ static int vmw_cotable_readback(struct vmw_resource *res) struct vmw_fence_obj *fence; if (!vcotbl->scrubbed) { - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (!cmd) return -ENOMEM; @@ -358,7 +358,7 @@ static int vmw_cotable_readback(struct vmw_resource *res) cmd->body.cid = vcotbl->ctx->id; cmd->body.type = vcotbl->type; vcotbl->size_read_back = res->backup_size; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); } (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); @@ -430,7 +430,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) * Do a page by page copy of COTables. This eliminates slow vmap()s. * This should really be a TTM utility. */ - for (i = 0; i < old_bo->num_pages; ++i) { + for (i = 0; i < old_bo->mem.num_pages; ++i) { bool dummy; ret = ttm_bo_kmap(old_bo, i, 1, &old_map); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 216daf93022c..408847a68cf3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -32,10 +32,10 @@ #include <linux/mem_encrypt.h> #include <drm/drm_drv.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_ioctl.h> #include <drm/drm_sysfs.h> #include <drm/ttm/ttm_bo_driver.h> -#include <drm/ttm/ttm_module.h> #include <drm/ttm/ttm_placement.h> #include "ttm_object.h" @@ -43,8 +43,6 @@ #include "vmwgfx_drv.h" #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" -#define VMWGFX_CHIP_SVGAII 0 -#define VMW_FB_RESERVATION 0 #define VMW_MIN_INITIAL_WIDTH 800 #define VMW_MIN_INITIAL_HEIGHT 600 @@ -253,8 +251,8 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { }; static const struct pci_device_id vmw_pci_id_list[] = { - {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII}, - {0, 0, 0} + { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA2) }, + { } }; MODULE_DEVICE_TABLE(pci, vmw_pci_id_list); @@ -425,8 +423,7 @@ static int vmw_request_device_late(struct vmw_private *dev_priv) } if (dev_priv->cman) { - ret = vmw_cmdbuf_set_pool_size(dev_priv->cman, - 256*4096, 2*4096); + ret = vmw_cmdbuf_set_pool_size(dev_priv->cman, 256*4096); if (ret) { struct vmw_cmdbuf_man *man = dev_priv->cman; @@ -609,7 +606,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) */ static int vmw_dma_masks(struct vmw_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; int ret = 0; ret = dma_set_mask_and_coherent(dev->dev, DMA_BIT_MASK(64)); @@ -644,26 +641,83 @@ static void vmw_vram_manager_fini(struct vmw_private *dev_priv) #endif } -static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) +static int vmw_setup_pci_resources(struct vmw_private *dev, + unsigned long pci_id) { - struct vmw_private *dev_priv; + resource_size_t fifo_start; + resource_size_t fifo_size; int ret; + struct pci_dev *pdev = to_pci_dev(dev->drm.dev); + + pci_set_master(pdev); + + ret = pci_request_regions(pdev, "vmwgfx probe"); + if (ret) + return ret; + + dev->io_start = pci_resource_start(pdev, 0); + dev->vram_start = pci_resource_start(pdev, 1); + dev->vram_size = pci_resource_len(pdev, 1); + fifo_start = pci_resource_start(pdev, 2); + fifo_size = pci_resource_len(pdev, 2); + + DRM_INFO("FIFO at %pa size is %llu kiB\n", + &fifo_start, (uint64_t)fifo_size / 1024); + dev->fifo_mem = devm_memremap(dev->drm.dev, + fifo_start, + fifo_size, + MEMREMAP_WB); + + if (unlikely(dev->fifo_mem == NULL)) { + DRM_ERROR("Failed mapping FIFO memory.\n"); + return -ENOMEM; + } + + /* + * This is approximate size of the vram, the exact size will only + * be known after we read SVGA_REG_VRAM_SIZE. The PCI resource + * size will be equal to or bigger than the size reported by + * SVGA_REG_VRAM_SIZE. + */ + DRM_INFO("VRAM at %pa size is %llu kiB\n", + &dev->vram_start, (uint64_t)dev->vram_size / 1024); + + return 0; +} + +static int vmw_detect_version(struct vmw_private *dev) +{ uint32_t svga_id; + + vmw_write(dev, SVGA_REG_ID, SVGA_ID_2); + svga_id = vmw_read(dev, SVGA_REG_ID); + if (svga_id != SVGA_ID_2) { + DRM_ERROR("Unsupported SVGA ID 0x%x on chipset 0x%x\n", + svga_id, dev->vmw_chipset); + return -ENOSYS; + } + return 0; +} + +static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) +{ + int ret; enum vmw_res_type i; bool refuse_dma = false; char host_log[100] = {0}; + struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); - dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); - if (unlikely(!dev_priv)) { - DRM_ERROR("Failed allocating a device private struct.\n"); - return -ENOMEM; - } + dev_priv->vmw_chipset = pci_id; + dev_priv->last_read_seqno = (uint32_t) -100; + dev_priv->drm.dev_private = dev_priv; - pci_set_master(dev->pdev); + ret = vmw_setup_pci_resources(dev_priv, pci_id); + if (ret) + return ret; + ret = vmw_detect_version(dev_priv); + if (ret) + return ret; - dev_priv->dev = dev; - dev_priv->vmw_chipset = chipset; - dev_priv->last_read_seqno = (uint32_t) -100; mutex_init(&dev_priv->cmdbuf_mutex); mutex_init(&dev_priv->release_mutex); mutex_init(&dev_priv->binding_mutex); @@ -673,7 +727,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) spin_lock_init(&dev_priv->hw_lock); spin_lock_init(&dev_priv->waiter_lock); spin_lock_init(&dev_priv->cap_lock); - spin_lock_init(&dev_priv->svga_lock); spin_lock_init(&dev_priv->cursor_lock); for (i = vmw_res_context; i < vmw_res_max; ++i) { @@ -688,21 +741,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) dev_priv->used_memory_size = 0; - dev_priv->io_start = pci_resource_start(dev->pdev, 0); - dev_priv->vram_start = pci_resource_start(dev->pdev, 1); - dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); - dev_priv->assume_16bpp = !!vmw_assume_16bpp; dev_priv->enable_fb = enable_fbdev; - vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); - svga_id = vmw_read(dev_priv, SVGA_REG_ID); - if (svga_id != SVGA_ID_2) { - ret = -ENOSYS; - DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); - goto out_err0; - } dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); @@ -720,7 +762,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) } dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); - dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); + dev_priv->fifo_mem_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH); dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT); @@ -794,7 +836,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) if (unlikely(ret != 0)) goto out_err0; - dma_set_max_seg_size(dev->dev, U32_MAX); + dma_set_max_seg_size(dev_priv->drm.dev, U32_MAX); if (dev_priv->capabilities & SVGA_CAP_GMR2) { DRM_INFO("Max GMR ids is %u\n", @@ -804,21 +846,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", (unsigned)dev_priv->memory_size / 1024); } - DRM_INFO("Maximum display memory size is %u kiB\n", - dev_priv->prim_bb_mem / 1024); - DRM_INFO("VRAM at 0x%08x size is %u kiB\n", - dev_priv->vram_start, dev_priv->vram_size / 1024); - DRM_INFO("MMIO at 0x%08x size is %u kiB\n", - dev_priv->mmio_start, dev_priv->mmio_size / 1024); - - dev_priv->mmio_virt = memremap(dev_priv->mmio_start, - dev_priv->mmio_size, MEMREMAP_WB); - - if (unlikely(dev_priv->mmio_virt == NULL)) { - ret = -ENOMEM; - DRM_ERROR("Failed mapping MMIO.\n"); - goto out_err0; - } + DRM_INFO("Maximum display memory size is %llu kiB\n", + (uint64_t)dev_priv->prim_bb_mem / 1024); /* Need mmio memory to check for fifo pitchlock cap. */ if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && @@ -826,7 +855,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) !vmw_fifo_have_pitchlock(dev_priv)) { ret = -ENOSYS; DRM_ERROR("Hardware has no pitchlock\n"); - goto out_err4; + goto out_err0; } dev_priv->tdev = ttm_object_device_init(&ttm_mem_glob, 12, @@ -835,29 +864,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) if (unlikely(dev_priv->tdev == NULL)) { DRM_ERROR("Unable to initialize TTM object management.\n"); ret = -ENOMEM; - goto out_err4; - } - - dev->dev_private = dev_priv; - - ret = pci_request_regions(dev->pdev, "vmwgfx probe"); - dev_priv->stealth = (ret != 0); - if (dev_priv->stealth) { - /** - * Request at least the mmio PCI resource. - */ - - DRM_INFO("It appears like vesafb is loaded. " - "Ignore above error if any.\n"); - ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); - if (unlikely(ret != 0)) { - DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); - goto out_no_device; - } + goto out_err0; } if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { - ret = vmw_irq_install(dev, dev->pdev->irq); + ret = vmw_irq_install(&dev_priv->drm, pdev->irq); if (ret != 0) { DRM_ERROR("Failed installing irq: %d\n", ret); goto out_no_irq; @@ -874,8 +885,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) DRM_FILE_PAGE_OFFSET_START, DRM_FILE_PAGE_OFFSET_SIZE); ret = ttm_bo_device_init(&dev_priv->bdev, &vmw_bo_driver, - dev_priv->dev->dev, - dev->anon_inode->i_mapping, + dev_priv->drm.dev, + dev_priv->drm.anon_inode->i_mapping, &dev_priv->vma_manager, dev_priv->map_mode == vmw_dma_alloc_coherent, false); @@ -955,7 +966,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) if (ret) goto out_no_fifo; - DRM_INFO("Atomic: %s\n", (dev->driver->driver_features & DRIVER_ATOMIC) + DRM_INFO("Atomic: %s\n", (dev_priv->drm.driver->driver_features & DRIVER_ATOMIC) ? "yes." : "no."); if (dev_priv->sm_type == VMW_SM_5) DRM_INFO("SM5 support available.\n"); @@ -1000,16 +1011,10 @@ out_no_bdev: vmw_fence_manager_takedown(dev_priv->fman); out_no_fman: if (dev_priv->capabilities & SVGA_CAP_IRQMASK) - vmw_irq_uninstall(dev_priv->dev); + vmw_irq_uninstall(&dev_priv->drm); out_no_irq: - if (dev_priv->stealth) - pci_release_region(dev->pdev, 2); - else - pci_release_regions(dev->pdev); -out_no_device: + pci_release_regions(pdev); ttm_object_device_release(&dev_priv->tdev); -out_err4: - memunmap(dev_priv->mmio_virt); out_err0: for (i = vmw_res_context; i < vmw_res_max; ++i) idr_destroy(&dev_priv->res_idr[i]); @@ -1023,6 +1028,7 @@ out_err0: static void vmw_driver_unload(struct drm_device *dev) { struct vmw_private *dev_priv = vmw_priv(dev); + struct pci_dev *pdev = to_pci_dev(dev->dev); enum vmw_res_type i; unregister_pm_notifier(&dev_priv->pm_nb); @@ -1052,14 +1058,10 @@ static void vmw_driver_unload(struct drm_device *dev) vmw_release_device_late(dev_priv); vmw_fence_manager_takedown(dev_priv->fman); if (dev_priv->capabilities & SVGA_CAP_IRQMASK) - vmw_irq_uninstall(dev_priv->dev); - if (dev_priv->stealth) - pci_release_region(dev->pdev, 2); - else - pci_release_regions(dev->pdev); + vmw_irq_uninstall(&dev_priv->drm); + pci_release_regions(pdev); ttm_object_device_release(&dev_priv->tdev); - memunmap(dev_priv->mmio_virt); if (dev_priv->ctx.staged_bindings) vmw_binding_state_free(dev_priv->ctx.staged_bindings); @@ -1190,12 +1192,10 @@ static void __vmw_svga_enable(struct vmw_private *dev_priv) { struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); - spin_lock(&dev_priv->svga_lock); if (!ttm_resource_manager_used(man)) { vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE); ttm_resource_manager_set_used(man, true); } - spin_unlock(&dev_priv->svga_lock); } /** @@ -1221,14 +1221,12 @@ static void __vmw_svga_disable(struct vmw_private *dev_priv) { struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM); - spin_lock(&dev_priv->svga_lock); if (ttm_resource_manager_used(man)) { ttm_resource_manager_set_used(man, false); vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_HIDE | SVGA_REG_ENABLE_ENABLE); } - spin_unlock(&dev_priv->svga_lock); } /** @@ -1253,19 +1251,16 @@ void vmw_svga_disable(struct vmw_private *dev_priv) * to be inconsistent with the device, causing modesetting problems. * */ - vmw_kms_lost_device(dev_priv->dev); + vmw_kms_lost_device(&dev_priv->drm); ttm_write_lock(&dev_priv->reservation_sem, false); - spin_lock(&dev_priv->svga_lock); if (ttm_resource_manager_used(man)) { - ttm_resource_manager_set_used(man, false); - spin_unlock(&dev_priv->svga_lock); if (ttm_resource_manager_evict_all(&dev_priv->bdev, man)) DRM_ERROR("Failed evicting VRAM buffers.\n"); + ttm_resource_manager_set_used(man, false); vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_HIDE | SVGA_REG_ENABLE_ENABLE); - } else - spin_unlock(&dev_priv->svga_lock); + } ttm_write_unlock(&dev_priv->reservation_sem); } @@ -1275,8 +1270,6 @@ static void vmw_remove(struct pci_dev *pdev) drm_dev_unregister(dev); vmw_driver_unload(dev); - drm_dev_put(dev); - pci_disable_device(pdev); } static unsigned long @@ -1377,7 +1370,7 @@ static int vmw_pm_freeze(struct device *kdev) * No user-space processes should be running now. */ ttm_suspend_unlock(&dev_priv->reservation_sem); - ret = vmw_kms_suspend(dev_priv->dev); + ret = vmw_kms_suspend(&dev_priv->drm); if (ret) { ttm_suspend_lock(&dev_priv->reservation_sem); DRM_ERROR("Failed to freeze modesetting.\n"); @@ -1409,7 +1402,7 @@ static int vmw_pm_freeze(struct device *kdev) vmw_fence_fifo_down(dev_priv->fman); __vmw_svga_disable(dev_priv); - + vmw_release_device_late(dev_priv); return 0; } @@ -1438,7 +1431,7 @@ static int vmw_pm_restore(struct device *kdev) dev_priv->suspend_locked = false; ttm_suspend_unlock(&dev_priv->reservation_sem); if (dev_priv->suspend_state) - vmw_kms_resume(dev_priv->dev); + vmw_kms_resume(&dev_priv->drm); if (dev_priv->enable_fb) vmw_fb_on(dev_priv); @@ -1507,39 +1500,36 @@ static struct pci_driver vmw_pci_driver = { static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - struct drm_device *dev; + struct vmw_private *vmw; int ret; - ret = pci_enable_device(pdev); + ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "svgadrmfb"); if (ret) return ret; - dev = drm_dev_alloc(&driver, &pdev->dev); - if (IS_ERR(dev)) { - ret = PTR_ERR(dev); - goto err_pci_disable_device; - } + ret = pcim_enable_device(pdev); + if (ret) + return ret; - dev->pdev = pdev; - pci_set_drvdata(pdev, dev); + vmw = devm_drm_dev_alloc(&pdev->dev, &driver, + struct vmw_private, drm); + if (IS_ERR(vmw)) + return PTR_ERR(vmw); - ret = vmw_driver_load(dev, ent->driver_data); - if (ret) - goto err_drm_dev_put; + vmw->drm.pdev = pdev; + pci_set_drvdata(pdev, &vmw->drm); - ret = drm_dev_register(dev, ent->driver_data); + ret = vmw_driver_load(vmw, ent->device); if (ret) - goto err_vmw_driver_unload; + return ret; - return 0; + ret = drm_dev_register(&vmw->drm, 0); + if (ret) { + vmw_driver_unload(&vmw->drm); + return ret; + } -err_vmw_driver_unload: - vmw_driver_unload(dev); -err_drm_dev_put: - drm_dev_put(dev); -err_pci_disable_device: - pci_disable_device(pdev); - return ret; + return 0; } static int __init vmwgfx_init(void) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index b45becbb00f8..5fa5bcd20cc5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -39,7 +39,6 @@ #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_execbuf_util.h> -#include <drm/ttm/ttm_module.h> #include "ttm_lock.h" #include "ttm_object.h" @@ -67,6 +66,8 @@ #define VMWGFX_CMD_BOUNCE_INIT_SIZE 32768 #define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1 +#define VMWGFX_PCI_ID_SVGA2 0x0405 + /* * Perhaps we should have sysfs entries for these. */ @@ -275,13 +276,6 @@ struct vmw_surface { struct list_head view_list; }; -struct vmw_marker_queue { - struct list_head head; - u64 lag; - u64 lag_time; - spinlock_t lock; -}; - struct vmw_fifo_state { unsigned long reserved_size; u32 *dynamic_buffer; @@ -291,7 +285,6 @@ struct vmw_fifo_state { uint32_t capabilities; struct mutex fifo_mutex; struct rw_semaphore rwsem; - struct vmw_marker_queue marker_queue; bool dx; }; @@ -490,19 +483,19 @@ enum vmw_sm_type { }; struct vmw_private { + struct drm_device drm; struct ttm_bo_device bdev; struct vmw_fifo_state fifo; - struct drm_device *dev; struct drm_vma_offset_manager vma_manager; - unsigned long vmw_chipset; - unsigned int io_start; - uint32_t vram_start; - uint32_t vram_size; - uint32_t prim_bb_mem; - uint32_t mmio_start; - uint32_t mmio_size; + u32 vmw_chipset; + resource_size_t io_start; + resource_size_t vram_start; + resource_size_t vram_size; + resource_size_t prim_bb_mem; + u32 *fifo_mem; + resource_size_t fifo_mem_size; uint32_t fb_max_width; uint32_t fb_max_height; uint32_t texture_max_width; @@ -511,7 +504,6 @@ struct vmw_private { uint32_t stdu_max_height; uint32_t initial_width; uint32_t initial_height; - u32 *mmio_virt; uint32_t capabilities; uint32_t capabilities2; uint32_t max_gmr_ids; @@ -591,13 +583,7 @@ struct vmw_private { struct mutex cmdbuf_mutex; struct mutex binding_mutex; - /** - * Operating mode. - */ - - bool stealth; bool enable_fb; - spinlock_t svga_lock; /** * PM management. @@ -967,30 +953,29 @@ extern int vmw_fifo_init(struct vmw_private *dev_priv, extern void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo); extern void * -vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id); -extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes); -extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes); -extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, - uint32_t *seqno); +vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id); +extern void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes); +extern void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes); +extern int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno); +extern bool vmw_supports_3d(struct vmw_private *dev_priv); extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason); -extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv); extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv); -extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, - uint32_t cid); -extern int vmw_fifo_flush(struct vmw_private *dev_priv, - bool interruptible); +extern int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv, + uint32_t cid); +extern int vmw_cmd_flush(struct vmw_private *dev_priv, + bool interruptible); -#define VMW_FIFO_RESERVE_DX(__priv, __bytes, __ctx_id) \ +#define VMW_CMD_CTX_RESERVE(__priv, __bytes, __ctx_id) \ ({ \ - vmw_fifo_reserve_dx(__priv, __bytes, __ctx_id) ? : ({ \ + vmw_cmd_ctx_reserve(__priv, __bytes, __ctx_id) ? : ({ \ DRM_ERROR("FIFO reserve failed at %s for %u bytes\n", \ __func__, (unsigned int) __bytes); \ NULL; \ }); \ }) -#define VMW_FIFO_RESERVE(__priv, __bytes) \ - VMW_FIFO_RESERVE_DX(__priv, __bytes, SVGA3D_INVALID_ID) +#define VMW_CMD_RESERVE(__priv, __bytes) \ + VMW_CMD_CTX_RESERVE(__priv, __bytes, SVGA3D_INVALID_ID) /** * TTM glue - vmwgfx_ttm_glue.c @@ -1125,19 +1110,6 @@ extern void vmw_generic_waiter_add(struct vmw_private *dev_priv, u32 flag, extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv, u32 flag, int *waiter_count); -/** - * Rudimentary fence-like objects currently used only for throttling - - * vmwgfx_marker.c - */ - -extern void vmw_marker_queue_init(struct vmw_marker_queue *queue); -extern void vmw_marker_queue_takedown(struct vmw_marker_queue *queue); -extern int vmw_marker_push(struct vmw_marker_queue *queue, - uint32_t seqno); -extern int vmw_marker_pull(struct vmw_marker_queue *queue, - uint32_t signaled_seqno); -extern int vmw_wait_lag(struct vmw_private *dev_priv, - struct vmw_marker_queue *queue, uint32_t us); /** * Kernel framebuffer - vmwgfx_fb.c @@ -1411,8 +1383,7 @@ struct vmw_cmdbuf_header; extern struct vmw_cmdbuf_man * vmw_cmdbuf_man_create(struct vmw_private *dev_priv); -extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, - size_t size, size_t default_size); +extern int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man, size_t size); extern void vmw_cmdbuf_remove_pool(struct vmw_cmdbuf_man *man); extern void vmw_cmdbuf_man_destroy(struct vmw_cmdbuf_man *man); extern int vmw_cmdbuf_idle(struct vmw_cmdbuf_man *man, bool interruptible, @@ -1581,28 +1552,29 @@ static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv) } /** - * vmw_mmio_read - Perform a MMIO read from volatile memory + * vmw_fifo_mem_read - Perform a MMIO read from the fifo memory * - * @addr: The address to read from + * @fifo_reg: The fifo register to read from * * This function is intended to be equivalent to ioread32() on * memremap'd memory, but without byteswapping. */ -static inline u32 vmw_mmio_read(u32 *addr) +static inline u32 vmw_fifo_mem_read(struct vmw_private *vmw, uint32 fifo_reg) { - return READ_ONCE(*addr); + return READ_ONCE(*(vmw->fifo_mem + fifo_reg)); } /** - * vmw_mmio_write - Perform a MMIO write to volatile memory + * vmw_fifo_mem_write - Perform a MMIO write to volatile memory * - * @addr: The address to write to + * @addr: The fifo register to write to * * This function is intended to be equivalent to iowrite32 on * memremap'd memory, but without byteswapping. */ -static inline void vmw_mmio_write(u32 value, u32 *addr) +static inline void vmw_fifo_mem_write(struct vmw_private *vmw, u32 fifo_reg, + u32 value) { - WRITE_ONCE(*addr, value); + WRITE_ONCE(*(vmw->fifo_mem + fifo_reg), value); } #endif diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index e67e2e8f6e6f..462f17320708 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -724,7 +724,7 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res) if (!dx_query_mob || dx_query_mob->dx_query_ctx) return 0; - cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), ctx_res->id); + cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), ctx_res->id); if (cmd == NULL) return -ENOMEM; @@ -732,7 +732,7 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res) cmd->header.size = sizeof(cmd->body); cmd->body.cid = ctx_res->id; cmd->body.mobid = dx_query_mob->base.mem.start; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); vmw_context_bind_dx_query(ctx_res, dx_query_mob); @@ -1042,7 +1042,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, if (unlikely(new_query_bo != sw_context->cur_query_bo)) { - if (unlikely(new_query_bo->base.num_pages > 4)) { + if (unlikely(new_query_bo->base.mem.num_pages > 4)) { VMW_DEBUG_USER("Query buffer too large.\n"); return -EINVAL; } @@ -1100,7 +1100,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, BUG_ON(!ctx_entry->valid); ctx = ctx_entry->res; - ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id); + ret = vmw_cmd_emit_dummy_query(dev_priv, ctx->id); if (unlikely(ret != 0)) VMW_DEBUG_USER("Out of fifo space for dummy query.\n"); @@ -1541,7 +1541,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, return ret; /* Make sure DMA doesn't cross BO boundaries. */ - bo_size = vmw_bo->base.num_pages * PAGE_SIZE; + bo_size = vmw_bo->base.base.size; if (unlikely(cmd->body.guest.ptr.offset > bo_size)) { VMW_DEBUG_USER("Invalid DMA offset.\n"); return -EINVAL; @@ -3762,7 +3762,7 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv, /* p_handle implies file_priv. */ BUG_ON(p_handle != NULL && file_priv == NULL); - ret = vmw_fifo_send_fence(dev_priv, &sequence); + ret = vmw_cmd_send_fence(dev_priv, &sequence); if (unlikely(ret != 0)) { VMW_DEBUG_USER("Fence submission error. Syncing.\n"); synced = true; @@ -3876,10 +3876,10 @@ static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv, void *cmd; if (sw_context->dx_ctx_node) - cmd = VMW_FIFO_RESERVE_DX(dev_priv, command_size, + cmd = VMW_CMD_CTX_RESERVE(dev_priv, command_size, sw_context->dx_ctx_node->ctx->id); else - cmd = VMW_FIFO_RESERVE(dev_priv, command_size); + cmd = VMW_CMD_RESERVE(dev_priv, command_size); if (!cmd) return -ENOMEM; @@ -3888,7 +3888,7 @@ static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv, memcpy(cmd, kernel_commands, command_size); vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); vmw_resource_relocations_free(&sw_context->res_relocations); - vmw_fifo_commit(dev_priv, command_size); + vmw_cmd_commit(dev_priv, command_size); return 0; } @@ -4046,11 +4046,7 @@ int vmw_execbuf_process(struct drm_file *file_priv, } if (throttle_us) { - ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue, - throttle_us); - - if (ret) - goto out_free_fence_fd; + VMW_DEBUG_USER("Throttling is no longer supported.\n"); } kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands, @@ -4329,7 +4325,7 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv, if (dev_priv->query_cid_valid) { BUG_ON(fence != NULL); - ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid); + ret = vmw_cmd_emit_dummy_query(dev_priv, dev_priv->query_cid); if (ret) goto out_no_emit; dev_priv->query_cid_valid = false; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c index 4d60201037d1..33f07abfc3ae 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c @@ -258,7 +258,7 @@ out_unreserve: if (w && h) { WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0, &clip, 1)); - vmw_fifo_flush(vmw_priv, false); + vmw_cmd_flush(vmw_priv, false); } out_unlock: mutex_unlock(&par->bo_mutex); @@ -481,7 +481,7 @@ static int vmw_fb_kms_detach(struct vmw_fb_par *par, DRM_ERROR("Could not unset a mode.\n"); return ret; } - drm_mode_destroy(par->vmw_priv->dev, par->set_mode); + drm_mode_destroy(&par->vmw_priv->drm, par->set_mode); par->set_mode = NULL; } @@ -567,7 +567,7 @@ static int vmw_fb_set_par(struct fb_info *info) struct drm_display_mode *mode; int ret; - mode = drm_mode_duplicate(vmw_priv->dev, &new_mode); + mode = drm_mode_duplicate(&vmw_priv->drm, &new_mode); if (!mode) { DRM_ERROR("Could not create new fb mode.\n"); return -ENOMEM; @@ -581,7 +581,7 @@ static int vmw_fb_set_par(struct fb_info *info) mode->hdisplay * DIV_ROUND_UP(var->bits_per_pixel, 8), mode->vdisplay)) { - drm_mode_destroy(vmw_priv->dev, mode); + drm_mode_destroy(&vmw_priv->drm, mode); return -EINVAL; } @@ -615,7 +615,7 @@ static int vmw_fb_set_par(struct fb_info *info) out_unlock: if (par->set_mode) - drm_mode_destroy(vmw_priv->dev, par->set_mode); + drm_mode_destroy(&vmw_priv->drm, par->set_mode); par->set_mode = mode; mutex_unlock(&par->bo_mutex); @@ -638,7 +638,7 @@ static const struct fb_ops vmw_fb_ops = { int vmw_fb_init(struct vmw_private *vmw_priv) { - struct device *device = &vmw_priv->dev->pdev->dev; + struct device *device = vmw_priv->drm.dev; struct vmw_fb_par *par; struct fb_info *info; unsigned fb_width, fb_height; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index 0f8d29397157..378ec7600154 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -141,8 +141,7 @@ static bool vmw_fence_enable_signaling(struct dma_fence *f) struct vmw_fence_manager *fman = fman_from_fence(fence); struct vmw_private *dev_priv = fman->dev_priv; - u32 *fifo_mem = dev_priv->mmio_virt; - u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE); + u32 seqno = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE); if (seqno - fence->base.seqno < VMW_FENCE_WRAP) return false; @@ -401,14 +400,12 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman, u32 passed_seqno) { u32 goal_seqno; - u32 *fifo_mem; struct vmw_fence_obj *fence; if (likely(!fman->seqno_valid)) return false; - fifo_mem = fman->dev_priv->mmio_virt; - goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL); + goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL); if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP)) return false; @@ -416,8 +413,9 @@ static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman, list_for_each_entry(fence, &fman->fence_list, head) { if (!list_empty(&fence->seq_passed_actions)) { fman->seqno_valid = true; - vmw_mmio_write(fence->base.seqno, - fifo_mem + SVGA_FIFO_FENCE_GOAL); + vmw_fifo_mem_write(fman->dev_priv, + SVGA_FIFO_FENCE_GOAL, + fence->base.seqno); break; } } @@ -445,18 +443,17 @@ static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence) { struct vmw_fence_manager *fman = fman_from_fence(fence); u32 goal_seqno; - u32 *fifo_mem; if (dma_fence_is_signaled_locked(&fence->base)) return false; - fifo_mem = fman->dev_priv->mmio_virt; - goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL); + goal_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE_GOAL); if (likely(fman->seqno_valid && goal_seqno - fence->base.seqno < VMW_FENCE_WRAP)) return false; - vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL); + vmw_fifo_mem_write(fman->dev_priv, SVGA_FIFO_FENCE_GOAL, + fence->base.seqno); fman->seqno_valid = true; return true; @@ -468,9 +465,8 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman) struct list_head action_list; bool needs_rerun; uint32_t seqno, new_seqno; - u32 *fifo_mem = fman->dev_priv->mmio_virt; - seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE); + seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE); rerun: list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { if (seqno - fence->base.seqno < VMW_FENCE_WRAP) { @@ -492,7 +488,7 @@ rerun: needs_rerun = vmw_fence_goal_new_locked(fman, seqno); if (unlikely(needs_rerun)) { - new_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE); + new_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE); if (new_seqno != seqno) { seqno = new_seqno; goto rerun; @@ -1033,7 +1029,7 @@ int vmw_event_fence_action_queue(struct drm_file *file_priv, eaction->action.type = VMW_ACTION_EVENT; eaction->fence = vmw_fence_obj_reference(fence); - eaction->dev = fman->dev_priv->dev; + eaction->dev = &fman->dev_priv->drm; eaction->tv_sec = tv_sec; eaction->tv_usec = tv_usec; @@ -1055,7 +1051,7 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv, { struct vmw_event_fence_pending *event; struct vmw_fence_manager *fman = fman_from_fence(fence); - struct drm_device *dev = fman->dev_priv->dev; + struct drm_device *dev = &fman->dev_priv->drm; int ret; event = kzalloc(sizeof(*event), GFP_KERNEL); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c index 83c0d5a3e4fd..964ddf1ca57a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c @@ -51,7 +51,7 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv, uint32_t cmd_size = define_size + remap_size; uint32_t i; - cmd_orig = cmd = VMW_FIFO_RESERVE(dev_priv, cmd_size); + cmd_orig = cmd = VMW_CMD_RESERVE(dev_priv, cmd_size); if (unlikely(cmd == NULL)) return -ENOMEM; @@ -98,7 +98,7 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv, BUG_ON(cmd != cmd_orig + cmd_size / sizeof(*cmd)); - vmw_fifo_commit(dev_priv, cmd_size); + vmw_cmd_commit(dev_priv, cmd_size); return 0; } @@ -110,7 +110,7 @@ static void vmw_gmr2_unbind(struct vmw_private *dev_priv, uint32_t define_size = sizeof(define_cmd) + 4; uint32_t *cmd; - cmd = VMW_FIFO_RESERVE(dev_priv, define_size); + cmd = VMW_CMD_RESERVE(dev_priv, define_size); if (unlikely(cmd == NULL)) return; @@ -120,7 +120,7 @@ static void vmw_gmr2_unbind(struct vmw_private *dev_priv, *cmd++ = SVGA_CMD_DEFINE_GMR2; memcpy(cmd, &define_cmd, sizeof(define_cmd)); - vmw_fifo_commit(dev_priv, define_size); + vmw_cmd_commit(dev_priv, define_size); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index be325a62c178..1774960d1b89 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -29,7 +29,6 @@ */ #include "vmwgfx_drv.h" -#include <drm/ttm/ttm_module.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> #include <linux/idr.h> @@ -65,20 +64,19 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man, spin_lock(&gman->lock); if (gman->max_gmr_pages > 0) { - gman->used_gmr_pages += bo->num_pages; + gman->used_gmr_pages += mem->num_pages; if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages)) goto nospace; } mem->mm_node = gman; mem->start = id; - mem->num_pages = bo->num_pages; spin_unlock(&gman->lock); return 0; nospace: - gman->used_gmr_pages -= bo->num_pages; + gman->used_gmr_pages -= mem->num_pages; spin_unlock(&gman->lock); ida_free(&gman->gmr_ida, id); return -ENOSPC; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index f681b7b4df1b..80af8772b8c2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -51,7 +51,7 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, param->value = vmw_overlay_num_free_overlays(dev_priv); break; case DRM_VMW_PARAM_3D: - param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0; + param->value = vmw_supports_3d(dev_priv) ? 1 : 0; break; case DRM_VMW_PARAM_HW_CAPS: param->value = dev_priv->capabilities; @@ -67,7 +67,6 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, break; case DRM_VMW_PARAM_FIFO_HW_VERSION: { - u32 *fifo_mem = dev_priv->mmio_virt; const struct vmw_fifo_state *fifo = &dev_priv->fifo; if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) { @@ -76,11 +75,11 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data, } param->value = - vmw_mmio_read(fifo_mem + - ((fifo->capabilities & - SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ? - SVGA_FIFO_3D_HWVERSION_REVISED : - SVGA_FIFO_3D_HWVERSION)); + vmw_fifo_mem_read(dev_priv, + ((fifo->capabilities & + SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ? + SVGA_FIFO_3D_HWVERSION_REVISED : + SVGA_FIFO_3D_HWVERSION)); break; } case DRM_VMW_PARAM_MAX_SURF_MEMORY: @@ -235,7 +234,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, if (unlikely(ret != 0)) goto out_err; } else { - fifo_mem = dev_priv->mmio_virt; + fifo_mem = dev_priv->fifo_mem; memcpy(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index 75f3efee21a4..6c2a569f1fcb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c @@ -117,12 +117,10 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) void vmw_update_seqno(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo_state) { - u32 *fifo_mem = dev_priv->mmio_virt; - uint32_t seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE); + uint32_t seqno = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE); if (dev_priv->last_read_seqno != seqno) { dev_priv->last_read_seqno = seqno; - vmw_marker_pull(&fifo_state->marker_queue, seqno); vmw_fences_update(dev_priv->fman); } } @@ -222,11 +220,9 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, } } finish_wait(&dev_priv->fence_queue, &__wait); - if (ret == 0 && fifo_idle) { - u32 *fifo_mem = dev_priv->mmio_virt; + if (ret == 0 && fifo_idle) + vmw_fifo_mem_write(dev_priv, SVGA_FIFO_FENCE, signal_seq); - vmw_mmio_write(signal_seq, fifo_mem + SVGA_FIFO_FENCE); - } wake_up_all(&dev_priv->fence_queue); out_err: if (fifo_idle) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index bc67f2b930e1..9a89f658e501 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -36,9 +36,6 @@ #include "vmwgfx_kms.h" -/* Might need a hrtimer here? */ -#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1) - void vmw_du_cleanup(struct vmw_display_unit *du) { drm_plane_cleanup(&du->primary); @@ -68,7 +65,7 @@ static int vmw_cursor_update_image(struct vmw_private *dev_priv, if (!image) return -EINVAL; - cmd = VMW_FIFO_RESERVE(dev_priv, cmd_size); + cmd = VMW_CMD_RESERVE(dev_priv, cmd_size); if (unlikely(cmd == NULL)) return -ENOMEM; @@ -83,7 +80,7 @@ static int vmw_cursor_update_image(struct vmw_private *dev_priv, cmd->cursor.hotspotX = hotspotX; cmd->cursor.hotspotY = hotspotY; - vmw_fifo_commit_flush(dev_priv, cmd_size); + vmw_cmd_commit_flush(dev_priv, cmd_size); return 0; } @@ -128,15 +125,14 @@ err_unreserve: static void vmw_cursor_update_position(struct vmw_private *dev_priv, bool show, int x, int y) { - u32 *fifo_mem = dev_priv->mmio_virt; uint32_t count; spin_lock(&dev_priv->cursor_lock); - vmw_mmio_write(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON); - vmw_mmio_write(x, fifo_mem + SVGA_FIFO_CURSOR_X); - vmw_mmio_write(y, fifo_mem + SVGA_FIFO_CURSOR_Y); - count = vmw_mmio_read(fifo_mem + SVGA_FIFO_CURSOR_COUNT); - vmw_mmio_write(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT); + vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, show ? 1 : 0); + vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x); + vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y); + count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT); + vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count); spin_unlock(&dev_priv->cursor_lock); } @@ -236,7 +232,7 @@ err_unreserve: */ void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct vmw_display_unit *du; struct drm_crtc *crtc; @@ -252,7 +248,7 @@ void vmw_kms_legacy_hotspot_clear(struct vmw_private *dev_priv) void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct vmw_display_unit *du; struct drm_crtc *crtc; @@ -891,7 +887,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, bool is_bo_proxy) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct vmw_framebuffer_surface *vfbs; enum SVGA3dSurfaceFormat format; int ret; @@ -1003,11 +999,11 @@ static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer, struct drm_clip_rect norect; int ret, increment = 1; - drm_modeset_lock_all(dev_priv->dev); + drm_modeset_lock_all(&dev_priv->drm); ret = ttm_read_lock(&dev_priv->reservation_sem, true); if (unlikely(ret != 0)) { - drm_modeset_unlock_all(dev_priv->dev); + drm_modeset_unlock_all(&dev_priv->drm); return ret; } @@ -1033,10 +1029,10 @@ static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer, break; } - vmw_fifo_flush(dev_priv, false); + vmw_cmd_flush(dev_priv, false); ttm_read_unlock(&dev_priv->reservation_sem); - drm_modeset_unlock_all(dev_priv->dev); + drm_modeset_unlock_all(&dev_priv->drm); return ret; } @@ -1213,14 +1209,14 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv, *mode_cmd) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct vmw_framebuffer_bo *vfbd; unsigned int requested_size; struct drm_format_name_buf format_name; int ret; requested_size = mode_cmd->height * mode_cmd->pitches[0]; - if (unlikely(requested_size > bo->base.num_pages * PAGE_SIZE)) { + if (unlikely(requested_size > bo->base.base.size)) { DRM_ERROR("Screen buffer object size is too small " "for requested mode.\n"); return -EINVAL; @@ -1319,7 +1315,7 @@ vmw_kms_new_framebuffer(struct vmw_private *dev_priv, bo && only_2d && mode_cmd->width > 64 && /* Don't create a proxy for cursor */ dev_priv->active_display_unit == vmw_du_screen_target) { - ret = vmw_create_bo_proxy(dev_priv->dev, mode_cmd, + ret = vmw_create_bo_proxy(&dev_priv->drm, mode_cmd, bo, &surface); if (ret) return ERR_PTR(ret); @@ -1768,7 +1764,7 @@ int vmw_kms_present(struct vmw_private *dev_priv, if (ret) return ret; - vmw_fifo_flush(dev_priv, false); + vmw_cmd_flush(dev_priv, false); return 0; } @@ -1780,7 +1776,7 @@ vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv) return; dev_priv->hotplug_mode_update_property = - drm_property_create_range(dev_priv->dev, + drm_property_create_range(&dev_priv->drm, DRM_MODE_PROP_IMMUTABLE, "hotplug_mode_update", 0, 1); @@ -1791,7 +1787,7 @@ vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv) int vmw_kms_init(struct vmw_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; int ret; drm_mode_config_init(dev); @@ -1823,7 +1819,7 @@ int vmw_kms_close(struct vmw_private *dev_priv) * but since it destroys encoders and our destructor calls * drm_encoder_cleanup which takes the lock we deadlock. */ - drm_mode_config_cleanup(dev_priv->dev); + drm_mode_config_cleanup(&dev_priv->drm); if (dev_priv->active_display_unit == vmw_du_legacy) ret = vmw_kms_ldu_close_display(dev_priv); @@ -1876,11 +1872,11 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv, if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK) vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch); else if (vmw_fifo_have_pitchlock(vmw_priv)) - vmw_mmio_write(pitch, vmw_priv->mmio_virt + - SVGA_FIFO_PITCHLOCK); + vmw_fifo_mem_write(vmw_priv, SVGA_FIFO_PITCHLOCK, pitch); vmw_write(vmw_priv, SVGA_REG_WIDTH, width); vmw_write(vmw_priv, SVGA_REG_HEIGHT, height); - vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp); + if ((vmw_priv->capabilities & SVGA_CAP_8BIT_EMULATION) != 0) + vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp); if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) { DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n", @@ -1934,7 +1930,7 @@ void vmw_disable_vblank(struct drm_crtc *crtc) static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned int num_rects, struct drm_rect *rects) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct vmw_display_unit *du; struct drm_connector *con; struct drm_connector_list_iter conn_iter; @@ -2366,7 +2362,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv, if (dirty->crtc) { units[num_units++] = vmw_crtc_to_du(dirty->crtc); } else { - list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, + list_for_each_entry(crtc, &dev_priv->drm.mode_config.crtc_list, head) { struct drm_plane *plane = crtc->primary; @@ -2386,7 +2382,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv, dirty->unit = unit; if (dirty->fifo_reserve_size > 0) { - dirty->cmd = VMW_FIFO_RESERVE(dev_priv, + dirty->cmd = VMW_CMD_RESERVE(dev_priv, dirty->fifo_reserve_size); if (!dirty->cmd) return -ENOMEM; @@ -2520,7 +2516,7 @@ int vmw_kms_update_proxy(struct vmw_resource *res, if (!clips) return 0; - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd) * num_clips); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd) * num_clips); if (!cmd) return -ENOMEM; @@ -2549,7 +2545,7 @@ int vmw_kms_update_proxy(struct vmw_resource *res, copy_size += sizeof(*cmd); } - vmw_fifo_commit(dev_priv, copy_size); + vmw_cmd_commit(dev_priv, copy_size); return 0; } @@ -2568,8 +2564,8 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, int i = 0; int ret = 0; - mutex_lock(&dev_priv->dev->mode_config.mutex); - list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list, + mutex_lock(&dev_priv->drm.mode_config.mutex); + list_for_each_entry(con, &dev_priv->drm.mode_config.connector_list, head) { if (i == unit) break; @@ -2577,7 +2573,7 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, ++i; } - if (&con->head == &dev_priv->dev->mode_config.connector_list) { + if (&con->head == &dev_priv->drm.mode_config.connector_list) { DRM_ERROR("Could not find initial display unit.\n"); ret = -EINVAL; goto out_unlock; @@ -2611,7 +2607,7 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv, } out_unlock: - mutex_unlock(&dev_priv->dev->mode_config.mutex); + mutex_unlock(&dev_priv->drm.mode_config.mutex); return ret; } @@ -2631,7 +2627,7 @@ vmw_kms_create_implicit_placement_property(struct vmw_private *dev_priv) return; dev_priv->implicit_placement_property = - drm_property_create_range(dev_priv->dev, + drm_property_create_range(&dev_priv->drm, DRM_MODE_PROP_IMMUTABLE, "implicit_placement", 0, 1); } @@ -2752,7 +2748,7 @@ int vmw_du_helper_plane_update(struct vmw_du_update_plane *update) goto out_unref; reserved_size = update->calc_fifo_size(update, num_hits); - cmd_start = VMW_FIFO_RESERVE(update->dev_priv, reserved_size); + cmd_start = VMW_CMD_RESERVE(update->dev_priv, reserved_size); if (!cmd_start) { ret = -ENOMEM; goto out_revert; @@ -2801,7 +2797,7 @@ int vmw_du_helper_plane_update(struct vmw_du_update_plane *update) if (reserved_size < submit_size) submit_size = 0; - vmw_fifo_commit(update->dev_priv, submit_size); + vmw_cmd_commit(update->dev_priv, submit_size); vmw_kms_helper_validation_finish(update->dev_priv, NULL, &val_ctx, update->out_fence, NULL); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index 9d1de5b5cc6a..9a9508edbc9e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -125,7 +125,6 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv) vmw_write(dev_priv, SVGA_REG_DISPLAY_POSITION_Y, crtc->y); vmw_write(dev_priv, SVGA_REG_DISPLAY_WIDTH, crtc->mode.hdisplay); vmw_write(dev_priv, SVGA_REG_DISPLAY_HEIGHT, crtc->mode.vdisplay); - vmw_write(dev_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); i++; } @@ -355,7 +354,7 @@ static const struct drm_crtc_helper_funcs vmw_ldu_crtc_helper_funcs = { static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) { struct vmw_legacy_display_unit *ldu; - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct drm_connector *connector; struct drm_encoder *encoder; struct drm_plane *primary, *cursor; @@ -479,7 +478,7 @@ err_free: int vmw_kms_ldu_init_display(struct vmw_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; int i, ret; if (dev_priv->ldu_priv) { @@ -554,7 +553,7 @@ int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv, } *cmd; fifo_size = sizeof(*cmd) * num_clips; - cmd = VMW_FIFO_RESERVE(dev_priv, fifo_size); + cmd = VMW_CMD_RESERVE(dev_priv, fifo_size); if (unlikely(cmd == NULL)) return -ENOMEM; @@ -567,6 +566,6 @@ int vmw_kms_ldu_do_bo_dirty(struct vmw_private *dev_priv, cmd[i].body.height = clips->y2 - clips->y1; } - vmw_fifo_commit(dev_priv, fifo_size); + vmw_cmd_commit(dev_priv, fifo_size); return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c deleted file mode 100644 index e53bc639a754..000000000000 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c +++ /dev/null @@ -1,155 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 OR MIT -/************************************************************************** - * - * Copyright 2010 VMware, Inc., Palo Alto, CA., USA - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ - - -#include "vmwgfx_drv.h" - -struct vmw_marker { - struct list_head head; - uint32_t seqno; - u64 submitted; -}; - -void vmw_marker_queue_init(struct vmw_marker_queue *queue) -{ - INIT_LIST_HEAD(&queue->head); - queue->lag = 0; - queue->lag_time = ktime_get_raw_ns(); - spin_lock_init(&queue->lock); -} - -void vmw_marker_queue_takedown(struct vmw_marker_queue *queue) -{ - struct vmw_marker *marker, *next; - - spin_lock(&queue->lock); - list_for_each_entry_safe(marker, next, &queue->head, head) { - kfree(marker); - } - spin_unlock(&queue->lock); -} - -int vmw_marker_push(struct vmw_marker_queue *queue, - uint32_t seqno) -{ - struct vmw_marker *marker = kmalloc(sizeof(*marker), GFP_KERNEL); - - if (unlikely(!marker)) - return -ENOMEM; - - marker->seqno = seqno; - marker->submitted = ktime_get_raw_ns(); - spin_lock(&queue->lock); - list_add_tail(&marker->head, &queue->head); - spin_unlock(&queue->lock); - - return 0; -} - -int vmw_marker_pull(struct vmw_marker_queue *queue, - uint32_t signaled_seqno) -{ - struct vmw_marker *marker, *next; - bool updated = false; - u64 now; - - spin_lock(&queue->lock); - now = ktime_get_raw_ns(); - - if (list_empty(&queue->head)) { - queue->lag = 0; - queue->lag_time = now; - updated = true; - goto out_unlock; - } - - list_for_each_entry_safe(marker, next, &queue->head, head) { - if (signaled_seqno - marker->seqno > (1 << 30)) - continue; - - queue->lag = now - marker->submitted; - queue->lag_time = now; - updated = true; - list_del(&marker->head); - kfree(marker); - } - -out_unlock: - spin_unlock(&queue->lock); - - return (updated) ? 0 : -EBUSY; -} - -static u64 vmw_fifo_lag(struct vmw_marker_queue *queue) -{ - u64 now; - - spin_lock(&queue->lock); - now = ktime_get_raw_ns(); - queue->lag += now - queue->lag_time; - queue->lag_time = now; - spin_unlock(&queue->lock); - return queue->lag; -} - - -static bool vmw_lag_lt(struct vmw_marker_queue *queue, - uint32_t us) -{ - u64 cond = (u64) us * NSEC_PER_USEC; - - return vmw_fifo_lag(queue) <= cond; -} - -int vmw_wait_lag(struct vmw_private *dev_priv, - struct vmw_marker_queue *queue, uint32_t us) -{ - struct vmw_marker *marker; - uint32_t seqno; - int ret; - - while (!vmw_lag_lt(queue, us)) { - spin_lock(&queue->lock); - if (list_empty(&queue->head)) - seqno = atomic_read(&dev_priv->marker_seq); - else { - marker = list_first_entry(&queue->head, - struct vmw_marker, head); - seqno = marker->seqno; - } - spin_unlock(&queue->lock); - - ret = vmw_wait_seqno(dev_priv, false, seqno, true, - 3*HZ); - - if (unlikely(ret != 0)) - return ret; - - (void) vmw_marker_pull(queue, seqno); - } - return 0; -} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c index 7f95ed6aa224..a372980fe6a5 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c @@ -148,7 +148,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv, mob->pt_level += VMW_MOBFMT_PTDEPTH_1 - SVGA3D_MOBFMT_PTDEPTH_1; } - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) { ret = -ENOMEM; goto out_no_fifo; @@ -170,7 +170,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv, */ BUG_ON(mob->pt_level == VMW_MOBFMT_PTDEPTH_2); - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); otable->page_table = mob; return 0; @@ -203,7 +203,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv, return; bo = otable->page_table->pt_bo; - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) return; @@ -215,7 +215,7 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv, cmd->body.sizeInBytes = 0; cmd->body.validSizeInBytes = 0; cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); if (bo) { int ret; @@ -558,12 +558,12 @@ void vmw_mob_unbind(struct vmw_private *dev_priv, BUG_ON(ret != 0); } - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (cmd) { cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB; cmd->header.size = sizeof(cmd->body); cmd->body.mobid = mob->id; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); } if (bo) { @@ -625,7 +625,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv, vmw_fifo_resource_inc(dev_priv); - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) goto out_no_cmd_space; @@ -636,7 +636,7 @@ int vmw_mob_bind(struct vmw_private *dev_priv, cmd->body.base = mob->pt_root_page >> PAGE_SHIFT; cmd->body.sizeInBytes = num_data_pages * PAGE_SIZE; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); return 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c index cd7ed1650d60..d6d282c13b7f 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c @@ -122,7 +122,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv, fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items; - cmds = VMW_FIFO_RESERVE(dev_priv, fifo_size); + cmds = VMW_CMD_RESERVE(dev_priv, fifo_size); /* hardware has hung, can't do anything here */ if (!cmds) return -ENOMEM; @@ -169,7 +169,7 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv, fill_flush(flush, arg->stream_id); - vmw_fifo_commit(dev_priv, fifo_size); + vmw_cmd_commit(dev_priv, fifo_size); return 0; } @@ -192,7 +192,7 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv, int ret; for (;;) { - cmds = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmds)); + cmds = VMW_CMD_RESERVE(dev_priv, sizeof(*cmds)); if (cmds) break; @@ -211,7 +211,7 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv, cmds->body.items[0].value = false; fill_flush(&cmds->flush, stream_id); - vmw_fifo_commit(dev_priv, sizeof(*cmds)); + vmw_cmd_commit(dev_priv, sizeof(*cmds)); return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c index 0b76b3d17d4c..0a900afc66ff 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c @@ -232,7 +232,7 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo, int vmw_bo_dirty_add(struct vmw_buffer_object *vbo) { struct vmw_bo_dirty *dirty = vbo->dirty; - pgoff_t num_pages = vbo->base.num_pages; + pgoff_t num_pages = vbo->base.mem.num_pages; size_t size, acc_size; int ret; static struct ttm_operation_ctx ctx = { @@ -413,7 +413,7 @@ vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf) return ret; page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node); - if (unlikely(page_offset >= bo->num_pages)) { + if (unlikely(page_offset >= bo->mem.num_pages)) { ret = VM_FAULT_SIGBUS; goto out_unlock; } @@ -456,7 +456,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf) page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node); - if (page_offset >= bo->num_pages || + if (page_offset >= bo->mem.num_pages || vmw_resources_clean(vbo, page_offset, page_offset + PAGE_SIZE, &allowed_prefault)) { @@ -531,7 +531,7 @@ vm_fault_t vmw_bo_vm_huge_fault(struct vm_fault *vmf, page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node); - if (page_offset >= bo->num_pages || + if (page_offset >= bo->mem.num_pages || vmw_resources_clean(vbo, page_offset, page_offset + PAGE_SIZE, &allowed_prefault)) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 00b535831a7a..d1e7b9608145 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -360,7 +360,7 @@ static int vmw_resource_buf_alloc(struct vmw_resource *res, int ret; if (likely(res->backup)) { - BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size); + BUG_ON(res->backup->base.base.size < size); return 0; } @@ -827,7 +827,7 @@ int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob) dx_query_ctx = dx_query_mob->dx_query_ctx; dev_priv = dx_query_ctx->dev_priv; - cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), dx_query_ctx->id); + cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), dx_query_ctx->id); if (unlikely(cmd == NULL)) return -ENOMEM; @@ -835,7 +835,7 @@ int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob) cmd->header.size = sizeof(cmd->body); cmd->body.cid = dx_query_ctx->id; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); /* Triggers a rebind the next time affected context is bound */ dx_query_mob->dx_query_ctx = NULL; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 4bdad2f2d130..b0db059b8cfb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -132,7 +132,7 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv, BUG_ON(!sou->buffer); fifo_size = sizeof(*cmd); - cmd = VMW_FIFO_RESERVE(dev_priv, fifo_size); + cmd = VMW_CMD_RESERVE(dev_priv, fifo_size); if (unlikely(cmd == NULL)) return -ENOMEM; @@ -153,7 +153,7 @@ static int vmw_sou_fifo_create(struct vmw_private *dev_priv, vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr); cmd->obj.backingStore.pitch = mode->hdisplay * 4; - vmw_fifo_commit(dev_priv, fifo_size); + vmw_cmd_commit(dev_priv, fifo_size); sou->defined = true; @@ -181,7 +181,7 @@ static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv, return 0; fifo_size = sizeof(*cmd); - cmd = VMW_FIFO_RESERVE(dev_priv, fifo_size); + cmd = VMW_CMD_RESERVE(dev_priv, fifo_size); if (unlikely(cmd == NULL)) return -ENOMEM; @@ -189,7 +189,7 @@ static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv, cmd->header.cmdType = SVGA_CMD_DESTROY_SCREEN; cmd->body.screenId = sou->base.unit; - vmw_fifo_commit(dev_priv, fifo_size); + vmw_cmd_commit(dev_priv, fifo_size); /* Force sync */ ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ); @@ -829,7 +829,7 @@ static const struct drm_crtc_helper_funcs vmw_sou_crtc_helper_funcs = { static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit) { struct vmw_screen_object_unit *sou; - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct drm_connector *connector; struct drm_encoder *encoder; struct drm_plane *primary, *cursor; @@ -946,7 +946,7 @@ err_free: int vmw_kms_sou_init_display(struct vmw_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; int i, ret; if (!(dev_priv->capabilities & SVGA_CAP_SCREEN_OBJECT_2)) { @@ -992,7 +992,7 @@ static int do_bo_define_gmrfb(struct vmw_private *dev_priv, if (depth == 32) depth = 24; - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (!cmd) return -ENOMEM; @@ -1003,7 +1003,7 @@ static int do_bo_define_gmrfb(struct vmw_private *dev_priv, cmd->body.bytesPerLine = framebuffer->base.pitches[0]; /* Buffer is reserved in vram or GMR */ vmw_bo_get_guest_ptr(&buf->base, &cmd->body.ptr); - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); return 0; } @@ -1029,7 +1029,7 @@ static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty) int i; if (!dirty->num_hits) { - vmw_fifo_commit(dirty->dev_priv, 0); + vmw_cmd_commit(dirty->dev_priv, 0); return; } @@ -1061,7 +1061,7 @@ static void vmw_sou_surface_fifo_commit(struct vmw_kms_dirty *dirty) blit->bottom -= sdirty->top; } - vmw_fifo_commit(dirty->dev_priv, region_size + sizeof(*cmd)); + vmw_cmd_commit(dirty->dev_priv, region_size + sizeof(*cmd)); sdirty->left = sdirty->top = S32_MAX; sdirty->right = sdirty->bottom = S32_MIN; @@ -1185,11 +1185,11 @@ out_unref: static void vmw_sou_bo_fifo_commit(struct vmw_kms_dirty *dirty) { if (!dirty->num_hits) { - vmw_fifo_commit(dirty->dev_priv, 0); + vmw_cmd_commit(dirty->dev_priv, 0); return; } - vmw_fifo_commit(dirty->dev_priv, + vmw_cmd_commit(dirty->dev_priv, sizeof(struct vmw_kms_sou_bo_blit) * dirty->num_hits); } @@ -1295,11 +1295,11 @@ out_unref: static void vmw_sou_readback_fifo_commit(struct vmw_kms_dirty *dirty) { if (!dirty->num_hits) { - vmw_fifo_commit(dirty->dev_priv, 0); + vmw_cmd_commit(dirty->dev_priv, 0); return; } - vmw_fifo_commit(dirty->dev_priv, + vmw_cmd_commit(dirty->dev_priv, sizeof(struct vmw_kms_sou_readback_blit) * dirty->num_hits); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c index f328aa5839a2..905ae50aaa2a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c @@ -222,7 +222,7 @@ static int vmw_gb_shader_create(struct vmw_resource *res) goto out_no_fifo; } - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) { ret = -ENOMEM; goto out_no_fifo; @@ -233,7 +233,7 @@ static int vmw_gb_shader_create(struct vmw_resource *res) cmd->body.shid = res->id; cmd->body.type = shader->type; cmd->body.sizeInBytes = shader->size; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); vmw_fifo_resource_inc(dev_priv); return 0; @@ -256,7 +256,7 @@ static int vmw_gb_shader_bind(struct vmw_resource *res, BUG_ON(bo->mem.mem_type != VMW_PL_MOB); - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) return -ENOMEM; @@ -266,7 +266,7 @@ static int vmw_gb_shader_bind(struct vmw_resource *res, cmd->body.mobid = bo->mem.start; cmd->body.offsetInBytes = res->backup_offset; res->backup_dirty = false; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); return 0; } @@ -284,7 +284,7 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res, BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB); - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) return -ENOMEM; @@ -293,7 +293,7 @@ static int vmw_gb_shader_unbind(struct vmw_resource *res, cmd->body.shid = res->id; cmd->body.mobid = SVGA3D_INVALID_ID; cmd->body.offsetInBytes = 0; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); /* * Create a fence object and fence the backup buffer. @@ -324,7 +324,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res) mutex_lock(&dev_priv->binding_mutex); vmw_binding_res_list_scrub(&res->binding_head); - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) { mutex_unlock(&dev_priv->binding_mutex); return -ENOMEM; @@ -333,7 +333,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res) cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SHADER; cmd->header.size = sizeof(cmd->body); cmd->body.shid = res->id; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); mutex_unlock(&dev_priv->binding_mutex); vmw_resource_release_id(res); vmw_fifo_resource_dec(dev_priv); @@ -394,7 +394,7 @@ static int vmw_dx_shader_unscrub(struct vmw_resource *res) if (!list_empty(&shader->cotable_head) || !shader->committed) return 0; - cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), shader->ctx->id); + cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), shader->ctx->id); if (unlikely(cmd == NULL)) return -ENOMEM; @@ -404,7 +404,7 @@ static int vmw_dx_shader_unscrub(struct vmw_resource *res) cmd->body.shid = shader->id; cmd->body.mobid = res->backup->base.mem.start; cmd->body.offsetInBytes = res->backup_offset; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); vmw_cotable_add_resource(shader->cotable, &shader->cotable_head); @@ -481,7 +481,7 @@ static int vmw_dx_shader_scrub(struct vmw_resource *res) return 0; WARN_ON_ONCE(!shader->committed); - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) return -ENOMEM; @@ -491,7 +491,7 @@ static int vmw_dx_shader_scrub(struct vmw_resource *res) cmd->body.shid = res->id; cmd->body.mobid = SVGA3D_INVALID_ID; cmd->body.offsetInBytes = 0; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); res->id = -1; list_del_init(&shader->cotable_head); @@ -856,8 +856,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, return ret; } - if ((u64)buffer->base.num_pages * PAGE_SIZE < - (u64)size + (u64)offset) { + if ((u64)buffer->base.base.size < (u64)size + (u64)offset) { VMW_DEBUG_USER("Illegal buffer- or shader size.\n"); ret = -EINVAL; goto out_bad_arg; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c index 3f97b61dd5d8..7369dd86d3a9 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c @@ -170,7 +170,7 @@ static int vmw_view_create(struct vmw_resource *res) return 0; } - cmd = VMW_FIFO_RESERVE_DX(res->dev_priv, view->cmd_size, view->ctx->id); + cmd = VMW_CMD_CTX_RESERVE(res->dev_priv, view->cmd_size, view->ctx->id); if (!cmd) { mutex_unlock(&dev_priv->binding_mutex); return -ENOMEM; @@ -181,7 +181,7 @@ static int vmw_view_create(struct vmw_resource *res) /* Sid may have changed due to surface eviction. */ WARN_ON(view->srf->id == SVGA3D_INVALID_ID); cmd->body.sid = view->srf->id; - vmw_fifo_commit(res->dev_priv, view->cmd_size); + vmw_cmd_commit(res->dev_priv, view->cmd_size); res->id = view->view_id; list_add_tail(&view->srf_head, &srf->view_list); vmw_cotable_add_resource(view->cotable, &view->cotable_head); @@ -213,14 +213,14 @@ static int vmw_view_destroy(struct vmw_resource *res) if (!view->committed || res->id == -1) return 0; - cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), view->ctx->id); + cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), view->ctx->id); if (!cmd) return -ENOMEM; cmd->header.id = vmw_view_destroy_cmds[view->view_type]; cmd->header.size = sizeof(cmd->body); cmd->body.view_id = view->view_id; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); res->id = -1; list_del_init(&view->cotable_head); list_del_init(&view->srf_head); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index 5b04ec047ef3..fbe977881364 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -170,7 +170,7 @@ static int vmw_stdu_define_st(struct vmw_private *dev_priv, SVGA3dCmdDefineGBScreenTarget body; } *cmd; - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) return -ENOMEM; @@ -188,7 +188,7 @@ static int vmw_stdu_define_st(struct vmw_private *dev_priv, stdu->base.set_gui_x = cmd->body.xRoot; stdu->base.set_gui_y = cmd->body.yRoot; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); stdu->defined = true; stdu->display_width = mode->hdisplay; @@ -229,7 +229,7 @@ static int vmw_stdu_bind_st(struct vmw_private *dev_priv, memset(&image, 0, sizeof(image)); image.sid = res ? res->id : SVGA3D_INVALID_ID; - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) return -ENOMEM; @@ -239,7 +239,7 @@ static int vmw_stdu_bind_st(struct vmw_private *dev_priv, cmd->body.stid = stdu->base.unit; cmd->body.image = image; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); return 0; } @@ -293,7 +293,7 @@ static int vmw_stdu_update_st(struct vmw_private *dev_priv, return -EINVAL; } - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) return -ENOMEM; @@ -301,7 +301,7 @@ static int vmw_stdu_update_st(struct vmw_private *dev_priv, 0, stdu->display_width, 0, stdu->display_height); - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); return 0; } @@ -329,7 +329,7 @@ static int vmw_stdu_destroy_st(struct vmw_private *dev_priv, if (unlikely(!stdu->defined)) return 0; - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(cmd == NULL)) return -ENOMEM; @@ -338,7 +338,7 @@ static int vmw_stdu_destroy_st(struct vmw_private *dev_priv, cmd->body.stid = stdu->base.unit; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); /* Force sync */ ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ); @@ -499,7 +499,7 @@ static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty) size_t blit_size = sizeof(*blit) * dirty->num_hits + sizeof(*suffix); if (!dirty->num_hits) { - vmw_fifo_commit(dirty->dev_priv, 0); + vmw_cmd_commit(dirty->dev_priv, 0); return; } @@ -512,7 +512,7 @@ static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty) cmd->body.host.mipmap = 0; cmd->body.transfer = ddirty->transfer; suffix->suffixSize = sizeof(*suffix); - suffix->maximumOffset = ddirty->buf->base.num_pages * PAGE_SIZE; + suffix->maximumOffset = ddirty->buf->base.base.size; if (ddirty->transfer == SVGA3D_WRITE_HOST_VRAM) { blit_size += sizeof(struct vmw_stdu_update); @@ -522,7 +522,7 @@ static void vmw_stdu_bo_fifo_commit(struct vmw_kms_dirty *dirty) ddirty->top, ddirty->bottom); } - vmw_fifo_commit(dirty->dev_priv, sizeof(*cmd) + blit_size); + vmw_cmd_commit(dirty->dev_priv, sizeof(*cmd) + blit_size); stdu->display_srf->res.res_dirty = true; ddirty->left = ddirty->top = S32_MAX; @@ -628,7 +628,7 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty) dev_priv = vmw_priv(stdu->base.crtc.dev); - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (!cmd) goto out_cleanup; @@ -636,7 +636,7 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty) region.x1, region.x2, region.y1, region.y2); - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); } out_cleanup: @@ -795,7 +795,7 @@ static void vmw_kms_stdu_surface_fifo_commit(struct vmw_kms_dirty *dirty) size_t commit_size; if (!dirty->num_hits) { - vmw_fifo_commit(dirty->dev_priv, 0); + vmw_cmd_commit(dirty->dev_priv, 0); return; } @@ -817,7 +817,7 @@ static void vmw_kms_stdu_surface_fifo_commit(struct vmw_kms_dirty *dirty) vmw_stdu_populate_update(update, stdu->base.unit, sdirty->left, sdirty->right, sdirty->top, sdirty->bottom); - vmw_fifo_commit(dirty->dev_priv, commit_size); + vmw_cmd_commit(dirty->dev_priv, commit_size); sdirty->left = sdirty->top = S32_MAX; sdirty->right = sdirty->bottom = S32_MIN; @@ -1238,7 +1238,7 @@ static uint32_t vmw_stdu_bo_populate_update(struct vmw_du_update_plane *update, vfbbo = container_of(update->vfb, typeof(*vfbbo), base); suffix->suffixSize = sizeof(*suffix); - suffix->maximumOffset = vfbbo->buffer->base.num_pages * PAGE_SIZE; + suffix->maximumOffset = vfbbo->buffer->base.base.size; vmw_stdu_populate_update(&suffix[1], stdu->base.unit, bb->x1, bb->x2, bb->y1, bb->y2); @@ -1713,7 +1713,7 @@ static const struct drm_crtc_helper_funcs vmw_stdu_crtc_helper_funcs = { static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit) { struct vmw_screen_target_display_unit *stdu; - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; struct drm_connector *connector; struct drm_encoder *encoder; struct drm_plane *primary, *cursor; @@ -1861,7 +1861,7 @@ static void vmw_stdu_destroy(struct vmw_screen_target_display_unit *stdu) */ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv) { - struct drm_device *dev = dev_priv->dev; + struct drm_device *dev = &dev_priv->drm; int i, ret; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c index 193192456663..1dd042a20a66 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_streamoutput.c @@ -99,7 +99,7 @@ static int vmw_dx_streamoutput_unscrub(struct vmw_resource *res) if (!list_empty(&so->cotable_head) || !so->committed ) return 0; - cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), so->ctx->id); + cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), so->ctx->id); if (!cmd) return -ENOMEM; @@ -109,7 +109,7 @@ static int vmw_dx_streamoutput_unscrub(struct vmw_resource *res) cmd->body.mobid = res->backup->base.mem.start; cmd->body.offsetInBytes = res->backup_offset; cmd->body.sizeInBytes = so->size; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); vmw_cotable_add_resource(so->cotable, &so->cotable_head); @@ -172,7 +172,7 @@ static int vmw_dx_streamoutput_scrub(struct vmw_resource *res) WARN_ON_ONCE(!so->committed); - cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), so->ctx->id); + cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), so->ctx->id); if (!cmd) return -ENOMEM; @@ -182,7 +182,7 @@ static int vmw_dx_streamoutput_scrub(struct vmw_resource *res) cmd->body.mobid = SVGA3D_INVALID_ID; cmd->body.offsetInBytes = 0; cmd->body.sizeInBytes = so->size; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); res->id = -1; list_del_init(&so->cotable_head); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 3914bfee0533..f6cab77075a0 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -372,12 +372,12 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res) if (res->id != -1) { - cmd = VMW_FIFO_RESERVE(dev_priv, vmw_surface_destroy_size()); + cmd = VMW_CMD_RESERVE(dev_priv, vmw_surface_destroy_size()); if (unlikely(!cmd)) return; vmw_surface_destroy_encode(res->id, cmd); - vmw_fifo_commit(dev_priv, vmw_surface_destroy_size()); + vmw_cmd_commit(dev_priv, vmw_surface_destroy_size()); /* * used_memory_size_atomic, or separate lock @@ -440,14 +440,14 @@ static int vmw_legacy_srf_create(struct vmw_resource *res) */ submit_size = vmw_surface_define_size(srf); - cmd = VMW_FIFO_RESERVE(dev_priv, submit_size); + cmd = VMW_CMD_RESERVE(dev_priv, submit_size); if (unlikely(!cmd)) { ret = -ENOMEM; goto out_no_fifo; } vmw_surface_define_encode(srf, cmd); - vmw_fifo_commit(dev_priv, submit_size); + vmw_cmd_commit(dev_priv, submit_size); vmw_fifo_resource_inc(dev_priv); /* @@ -492,14 +492,14 @@ static int vmw_legacy_srf_dma(struct vmw_resource *res, BUG_ON(!val_buf->bo); submit_size = vmw_surface_dma_size(srf); - cmd = VMW_FIFO_RESERVE(dev_priv, submit_size); + cmd = VMW_CMD_RESERVE(dev_priv, submit_size); if (unlikely(!cmd)) return -ENOMEM; vmw_bo_get_guest_ptr(val_buf->bo, &ptr); vmw_surface_dma_encode(srf, cmd, &ptr, bind); - vmw_fifo_commit(dev_priv, submit_size); + vmw_cmd_commit(dev_priv, submit_size); /* * Create a fence object and fence the backup buffer. @@ -578,12 +578,12 @@ static int vmw_legacy_srf_destroy(struct vmw_resource *res) */ submit_size = vmw_surface_destroy_size(); - cmd = VMW_FIFO_RESERVE(dev_priv, submit_size); + cmd = VMW_CMD_RESERVE(dev_priv, submit_size); if (unlikely(!cmd)) return -ENOMEM; vmw_surface_destroy_encode(res->id, cmd); - vmw_fifo_commit(dev_priv, submit_size); + vmw_cmd_commit(dev_priv, submit_size); /* * Surface memory usage accounting. @@ -1121,7 +1121,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res) submit_len = sizeof(*cmd); } - cmd = VMW_FIFO_RESERVE(dev_priv, submit_len); + cmd = VMW_CMD_RESERVE(dev_priv, submit_len); cmd2 = (typeof(cmd2))cmd; cmd3 = (typeof(cmd3))cmd; cmd4 = (typeof(cmd4))cmd; @@ -1188,7 +1188,7 @@ static int vmw_gb_surface_create(struct vmw_resource *res) cmd->body.size.depth = metadata->base_size.depth; } - vmw_fifo_commit(dev_priv, submit_len); + vmw_cmd_commit(dev_priv, submit_len); return 0; @@ -1219,7 +1219,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res, submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0); - cmd1 = VMW_FIFO_RESERVE(dev_priv, submit_size); + cmd1 = VMW_CMD_RESERVE(dev_priv, submit_size); if (unlikely(!cmd1)) return -ENOMEM; @@ -1233,7 +1233,7 @@ static int vmw_gb_surface_bind(struct vmw_resource *res, cmd2->header.size = sizeof(cmd2->body); cmd2->body.sid = res->id; } - vmw_fifo_commit(dev_priv, submit_size); + vmw_cmd_commit(dev_priv, submit_size); if (res->backup->dirty && res->backup_dirty) { /* We've just made a full upload. Cear dirty regions. */ @@ -1272,7 +1272,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res, BUG_ON(bo->mem.mem_type != VMW_PL_MOB); submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2)); - cmd = VMW_FIFO_RESERVE(dev_priv, submit_size); + cmd = VMW_CMD_RESERVE(dev_priv, submit_size); if (unlikely(!cmd)) return -ENOMEM; @@ -1295,7 +1295,7 @@ static int vmw_gb_surface_unbind(struct vmw_resource *res, cmd3->body.sid = res->id; cmd3->body.mobid = SVGA3D_INVALID_ID; - vmw_fifo_commit(dev_priv, submit_size); + vmw_cmd_commit(dev_priv, submit_size); /* * Create a fence object and fence the backup buffer. @@ -1328,7 +1328,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res) vmw_view_surface_list_destroy(dev_priv, &srf->view_list); vmw_binding_res_list_scrub(&res->binding_head); - cmd = VMW_FIFO_RESERVE(dev_priv, sizeof(*cmd)); + cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd)); if (unlikely(!cmd)) { mutex_unlock(&dev_priv->binding_mutex); return -ENOMEM; @@ -1337,7 +1337,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res) cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE; cmd->header.size = sizeof(cmd->body); cmd->body.sid = res->id; - vmw_fifo_commit(dev_priv, sizeof(*cmd)); + vmw_cmd_commit(dev_priv, sizeof(*cmd)); mutex_unlock(&dev_priv->binding_mutex); vmw_resource_release_id(res); vmw_fifo_resource_dec(dev_priv); @@ -1550,8 +1550,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev, &res->backup, &user_srf->backup_base); if (ret == 0) { - if (res->backup->base.num_pages * PAGE_SIZE < - res->backup_size) { + if (res->backup->base.base.size < res->backup_size) { VMW_DEBUG_USER("Surface backup buffer too small.\n"); vmw_bo_unreference(&res->backup); ret = -EINVAL; @@ -1614,7 +1613,7 @@ vmw_gb_surface_define_internal(struct drm_device *dev, if (res->backup) { rep->buffer_map_handle = drm_vma_node_offset_addr(&res->backup->base.base.vma_node); - rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE; + rep->buffer_size = res->backup->base.base.size; rep->buffer_handle = backup_handle; } else { rep->buffer_map_handle = 0; @@ -1692,7 +1691,7 @@ vmw_gb_surface_reference_internal(struct drm_device *dev, rep->crep.buffer_handle = backup_handle; rep->crep.buffer_map_handle = drm_vma_node_offset_addr(&srf->res.backup->base.base.vma_node); - rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE; + rep->crep.buffer_size = srf->res.backup->base.base.size; rep->creq.version = drm_vmw_gb_surface_v1; rep->creq.svga3d_flags_upper_32_bits = @@ -1896,7 +1895,7 @@ static int vmw_surface_dirty_sync(struct vmw_resource *res) goto out; alloc_size = num_dirty * ((has_dx) ? sizeof(*cmd1) : sizeof(*cmd2)); - cmd = VMW_FIFO_RESERVE(dev_priv, alloc_size); + cmd = VMW_CMD_RESERVE(dev_priv, alloc_size); if (!cmd) return -ENOMEM; @@ -1932,7 +1931,7 @@ static int vmw_surface_dirty_sync(struct vmw_resource *res) } } - vmw_fifo_commit(dev_priv, alloc_size); + vmw_cmd_commit(dev_priv, alloc_size); out: memset(&dirty->boxes[0], 0, sizeof(dirty->boxes[0]) * dirty->num_subres); @@ -2032,14 +2031,14 @@ static int vmw_surface_clean(struct vmw_resource *res) } *cmd; alloc_size = sizeof(*cmd); - cmd = VMW_FIFO_RESERVE(dev_priv, alloc_size); + cmd = VMW_CMD_RESERVE(dev_priv, alloc_size); if (!cmd) return -ENOMEM; cmd->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE; cmd->header.size = sizeof(cmd->body); cmd->body.sid = res->id; - vmw_fifo_commit(dev_priv, alloc_size); + vmw_cmd_commit(dev_priv, alloc_size); return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c index 155ca3a5c7e5..e8e79de255cf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c @@ -5,7 +5,6 @@ * Copyright (C) 2007-2019 Vmware, Inc. All rights reservedd. */ #include "vmwgfx_drv.h" -#include <drm/ttm/ttm_module.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index 6a04261ce760..dbb068830d80 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -309,7 +309,7 @@ void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt, */ static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt) { - struct device *dev = vmw_tt->dev_priv->dev->dev; + struct device *dev = vmw_tt->dev_priv->drm.dev; dma_unmap_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0); vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents; @@ -330,7 +330,7 @@ static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt) */ static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt) { - struct device *dev = vmw_tt->dev_priv->dev->dev; + struct device *dev = vmw_tt->dev_priv->drm.dev; return dma_map_sgtable(dev, &vmw_tt->sgt, DMA_BIDIRECTIONAL, 0); } @@ -385,7 +385,7 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) sg = __sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0, (unsigned long) vsgt->num_pages << PAGE_SHIFT, - dma_get_max_seg_size(dev_priv->dev->dev), + dma_get_max_seg_size(dev_priv->drm.dev), NULL, 0, GFP_KERNEL); if (IS_ERR(sg)) { ret = PTR_ERR(sg); @@ -611,8 +611,8 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo, vmw_be->mob = NULL; if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) - ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags, - ttm_cached); + ret = ttm_sg_tt_init(&vmw_be->dma_ttm, bo, page_flags, + ttm_cached); else ret = ttm_tt_init(&vmw_be->dma_ttm, bo, page_flags, ttm_cached); diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c index 74db5a840bed..b293c67230ef 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.c +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c @@ -220,8 +220,8 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev, xen_obj->sgt_imported = sgt; - ret = drm_prime_sg_to_page_addr_arrays(sgt, xen_obj->pages, - NULL, xen_obj->num_pages); + ret = drm_prime_sg_to_page_array(sgt, xen_obj->pages, + xen_obj->num_pages); if (ret < 0) return ERR_PTR(ret); diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c index c8f7b21fa09e..78d787afe594 100644 --- a/drivers/gpu/drm/zte/zx_plane.c +++ b/drivers/gpu/drm/zte/zx_plane.c @@ -438,15 +438,10 @@ static const struct drm_plane_helper_funcs zx_gl_plane_helper_funcs = { .atomic_disable = zx_plane_atomic_disable, }; -static void zx_plane_destroy(struct drm_plane *plane) -{ - drm_plane_cleanup(plane); -} - static const struct drm_plane_funcs zx_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, - .destroy = zx_plane_destroy, + .destroy = drm_plane_cleanup, .reset = drm_atomic_helper_plane_reset, .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c index b4a31d506fcc..e617f60afeea 100644 --- a/drivers/gpu/ipu-v3/ipu-di.c +++ b/drivers/gpu/ipu-v3/ipu-di.c @@ -310,10 +310,6 @@ static void ipu_di_sync_config_noninterlaced(struct ipu_di *di, /* unused */ } , { /* unused */ - } , { - /* unused */ - } , { - /* unused */ }, }; /* can't use #7 and #8 for line active and pixel active counters */ diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index 1401fd52f37a..365e6ddbe90f 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -1038,7 +1038,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev) mutex_lock(&vgasr_mutex); vga_switcheroo_power_switch(pdev, VGA_SWITCHEROO_ON); mutex_unlock(&vgasr_mutex); - pci_wakeup_bus(pdev->bus); + pci_resume_bus(pdev->bus); return dev->bus->pm->runtime_resume(dev); } |
