drm-misc-next for 6.2:

UAPI Changes:
   - Documentation for page-flip flags
 
 Cross-subsystem Changes:
   - dma-buf: Add unlocked variant of vmapping and attachment-mapping
     functions
 
 Core Changes:
   - atomic-helpers: CRTC primary plane test fixes
   - connector: TV API consistency improvements, cmdline parsing
     improvements
   - crtc-helpers: Introduce drm_crtc_helper_atomic_check() helper
   - edid: Fixes for HFVSDB parsing,
   - fourcc: Addition of the Vivante tiled modifier
   - makefile: Sort and reorganize the objects files
   - mode_config: Remove fb_base from drm_mode_config_funcs
   - sched: Add a module parameter to change the scheduling policy,
     refcounting fix for fences
   - tests: Sort the Kunit tests in the Makefile, improvements to the
     DP-MST tests
   - ttm: Remove unnecessary drm_mm_clean() call
 
 Driver Changes:
   - New driver: ofdrm
   - Move all drivers to a common dma-buf locking convention
   - bridge:
     - adv7533: Remove dynamic lane switching
     - it6505: Runtime PM support
     - ps8640: Handle AUX defer messages
     - tc358775: Drop soft-reset over I2C
   - ast: Atomic Gamma LUT Support, Convert to SHMEM, various
     improvements
   - lcdif: Support for YUV planes
   - mgag200: Fix PLL Setup on some revisions
   - udl: Modesetting improvements, hot-unplug support
   - vc4: Fix support for PAL-M
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQRcEzekXsqa64kGDp7j7w1vZxhRxQUCY1D3fQAKCRDj7w1vZxhR
 xZZgAPoCSfyU3+M3ALT0vQ/OpktE5tuwUBMac2Lxkqgx4dnQ0gEAnYeez0Dedod8
 HNdgBH7FTklYa/zT8n17SwHUOzJJ5gc=
 =YvuW
 -----END PGP SIGNATURE-----

Merge tag 'drm-misc-next-2022-10-20' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next for 6.2:

UAPI Changes:
  - Documentation for page-flip flags

Cross-subsystem Changes:
  - dma-buf: Add unlocked variant of vmapping and attachment-mapping
    functions

Core Changes:
  - atomic-helpers: CRTC primary plane test fixes
  - connector: TV API consistency improvements, cmdline parsing
    improvements
  - crtc-helpers: Introduce drm_crtc_helper_atomic_check() helper
  - edid: Fixes for HFVSDB parsing,
  - fourcc: Addition of the Vivante tiled modifier
  - makefile: Sort and reorganize the objects files
  - mode_config: Remove fb_base from drm_mode_config_funcs
  - sched: Add a module parameter to change the scheduling policy,
    refcounting fix for fences
  - tests: Sort the Kunit tests in the Makefile, improvements to the
    DP-MST tests
  - ttm: Remove unnecessary drm_mm_clean() call

Driver Changes:
  - New driver: ofdrm
  - Move all drivers to a common dma-buf locking convention
  - bridge:
    - adv7533: Remove dynamic lane switching
    - it6505: Runtime PM support
    - ps8640: Handle AUX defer messages
    - tc358775: Drop soft-reset over I2C
  - ast: Atomic Gamma LUT Support, Convert to SHMEM, various
    improvements
  - lcdif: Support for YUV planes
  - mgag200: Fix PLL Setup on some revisions
  - udl: Modesetting improvements, hot-unplug support
  - vc4: Fix support for PAL-M

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Maxime Ripard <maxime@cerno.tech>
Link: https://patchwork.freedesktop.org/patch/msgid/20221020072405.g3o4hxuk75gmeumw@houat
This commit is contained in:
Dave Airlie 2022-10-25 11:42:01 +10:00
commit b837d3db9a
110 changed files with 3854 additions and 1358 deletions

View File

@ -119,6 +119,12 @@ DMA Buffer ioctls
.. kernel-doc:: include/uapi/linux/dma-buf.h
DMA-BUF locking convention
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. kernel-doc:: drivers/dma-buf/dma-buf.c
:doc: locking convention
Kernel Functions and Structures Reference
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

View File

@ -6692,6 +6692,7 @@ L: dri-devel@lists.freedesktop.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
F: drivers/gpu/drm/drm_aperture.c
F: drivers/gpu/drm/tiny/ofdrm.c
F: drivers/gpu/drm/tiny/simpledrm.c
F: drivers/video/aperture.c
F: include/drm/drm_aperture.h

View File

@ -657,7 +657,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
dmabuf->file = file;
mutex_init(&dmabuf->lock);
INIT_LIST_HEAD(&dmabuf->attachments);
mutex_lock(&db_list.lock);
@ -795,6 +794,70 @@ static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
return sg_table;
}
/**
* DOC: locking convention
*
* In order to avoid deadlock situations between dma-buf exports and importers,
* all dma-buf API users must follow the common dma-buf locking convention.
*
* Convention for importers
*
* 1. Importers must hold the dma-buf reservation lock when calling these
* functions:
*
* - dma_buf_pin()
* - dma_buf_unpin()
* - dma_buf_map_attachment()
* - dma_buf_unmap_attachment()
* - dma_buf_vmap()
* - dma_buf_vunmap()
*
* 2. Importers must not hold the dma-buf reservation lock when calling these
* functions:
*
* - dma_buf_attach()
* - dma_buf_dynamic_attach()
* - dma_buf_detach()
* - dma_buf_export(
* - dma_buf_fd()
* - dma_buf_get()
* - dma_buf_put()
* - dma_buf_mmap()
* - dma_buf_begin_cpu_access()
* - dma_buf_end_cpu_access()
* - dma_buf_map_attachment_unlocked()
* - dma_buf_unmap_attachment_unlocked()
* - dma_buf_vmap_unlocked()
* - dma_buf_vunmap_unlocked()
*
* Convention for exporters
*
* 1. These &dma_buf_ops callbacks are invoked with unlocked dma-buf
* reservation and exporter can take the lock:
*
* - &dma_buf_ops.attach()
* - &dma_buf_ops.detach()
* - &dma_buf_ops.release()
* - &dma_buf_ops.begin_cpu_access()
* - &dma_buf_ops.end_cpu_access()
*
* 2. These &dma_buf_ops callbacks are invoked with locked dma-buf
* reservation and exporter can't take the lock:
*
* - &dma_buf_ops.pin()
* - &dma_buf_ops.unpin()
* - &dma_buf_ops.map_dma_buf()
* - &dma_buf_ops.unmap_dma_buf()
* - &dma_buf_ops.mmap()
* - &dma_buf_ops.vmap()
* - &dma_buf_ops.vunmap()
*
* 3. Exporters must hold the dma-buf reservation lock when calling these
* functions:
*
* - dma_buf_move_notify()
*/
/**
* dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
* @dmabuf: [in] buffer to attach device to.
@ -859,8 +922,8 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
dma_buf_is_dynamic(dmabuf)) {
struct sg_table *sgt;
dma_resv_lock(attach->dmabuf->resv, NULL);
if (dma_buf_is_dynamic(attach->dmabuf)) {
dma_resv_lock(attach->dmabuf->resv, NULL);
ret = dmabuf->ops->pin(attach);
if (ret)
goto err_unlock;
@ -873,8 +936,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
ret = PTR_ERR(sgt);
goto err_unpin;
}
if (dma_buf_is_dynamic(attach->dmabuf))
dma_resv_unlock(attach->dmabuf->resv);
dma_resv_unlock(attach->dmabuf->resv);
attach->sgt = sgt;
attach->dir = DMA_BIDIRECTIONAL;
}
@ -890,8 +952,7 @@ err_unpin:
dmabuf->ops->unpin(attach);
err_unlock:
if (dma_buf_is_dynamic(attach->dmabuf))
dma_resv_unlock(attach->dmabuf->resv);
dma_resv_unlock(attach->dmabuf->resv);
dma_buf_detach(dmabuf, attach);
return ERR_PTR(ret);
@ -937,21 +998,19 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
if (WARN_ON(!dmabuf || !attach))
return;
dma_resv_lock(attach->dmabuf->resv, NULL);
if (attach->sgt) {
if (dma_buf_is_dynamic(attach->dmabuf))
dma_resv_lock(attach->dmabuf->resv, NULL);
__unmap_dma_buf(attach, attach->sgt, attach->dir);
if (dma_buf_is_dynamic(attach->dmabuf)) {
if (dma_buf_is_dynamic(attach->dmabuf))
dmabuf->ops->unpin(attach);
dma_resv_unlock(attach->dmabuf->resv);
}
}
dma_resv_lock(dmabuf->resv, NULL);
list_del(&attach->node);
dma_resv_unlock(dmabuf->resv);
if (dmabuf->ops->detach)
dmabuf->ops->detach(dmabuf, attach);
@ -1042,8 +1101,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
if (WARN_ON(!attach || !attach->dmabuf))
return ERR_PTR(-EINVAL);
if (dma_buf_attachment_is_dynamic(attach))
dma_resv_assert_held(attach->dmabuf->resv);
dma_resv_assert_held(attach->dmabuf->resv);
if (attach->sgt) {
/*
@ -1058,7 +1116,6 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
}
if (dma_buf_is_dynamic(attach->dmabuf)) {
dma_resv_assert_held(attach->dmabuf->resv);
if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
r = attach->dmabuf->ops->pin(attach);
if (r)
@ -1100,6 +1157,34 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
}
EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF);
/**
* dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment;
* mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
* dma_buf_ops.
* @attach: [in] attachment whose scatterlist is to be returned
* @direction: [in] direction of DMA transfer
*
* Unlocked variant of dma_buf_map_attachment().
*/
struct sg_table *
dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
enum dma_data_direction direction)
{
struct sg_table *sg_table;
might_sleep();
if (WARN_ON(!attach || !attach->dmabuf))
return ERR_PTR(-EINVAL);
dma_resv_lock(attach->dmabuf->resv, NULL);
sg_table = dma_buf_map_attachment(attach, direction);
dma_resv_unlock(attach->dmabuf->resv);
return sg_table;
}
EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, DMA_BUF);
/**
* dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
* deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
@ -1119,15 +1204,11 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
return;
if (dma_buf_attachment_is_dynamic(attach))
dma_resv_assert_held(attach->dmabuf->resv);
dma_resv_assert_held(attach->dmabuf->resv);
if (attach->sgt == sg_table)
return;
if (dma_buf_is_dynamic(attach->dmabuf))
dma_resv_assert_held(attach->dmabuf->resv);
__unmap_dma_buf(attach, sg_table, direction);
if (dma_buf_is_dynamic(attach->dmabuf) &&
@ -1136,6 +1217,31 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
}
EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF);
/**
* dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might
* deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
* dma_buf_ops.
* @attach: [in] attachment to unmap buffer from
* @sg_table: [in] scatterlist info of the buffer to unmap
* @direction: [in] direction of DMA transfer
*
* Unlocked variant of dma_buf_unmap_attachment().
*/
void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
struct sg_table *sg_table,
enum dma_data_direction direction)
{
might_sleep();
if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
return;
dma_resv_lock(attach->dmabuf->resv, NULL);
dma_buf_unmap_attachment(attach, sg_table, direction);
dma_resv_unlock(attach->dmabuf->resv);
}
EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, DMA_BUF);
/**
* dma_buf_move_notify - notify attachments that DMA-buf is moving
*
@ -1347,6 +1453,8 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);
int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
unsigned long pgoff)
{
int ret;
if (WARN_ON(!dmabuf || !vma))
return -EINVAL;
@ -1367,7 +1475,11 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
vma_set_file(vma, dmabuf->file);
vma->vm_pgoff = pgoff;
return dmabuf->ops->mmap(dmabuf, vma);
dma_resv_lock(dmabuf->resv, NULL);
ret = dmabuf->ops->mmap(dmabuf, vma);
dma_resv_unlock(dmabuf->resv);
return ret;
}
EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);
@ -1390,41 +1502,67 @@ EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);
int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
{
struct iosys_map ptr;
int ret = 0;
int ret;
iosys_map_clear(map);
if (WARN_ON(!dmabuf))
return -EINVAL;
dma_resv_assert_held(dmabuf->resv);
if (!dmabuf->ops->vmap)
return -EINVAL;
mutex_lock(&dmabuf->lock);
if (dmabuf->vmapping_counter) {
dmabuf->vmapping_counter++;
BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
*map = dmabuf->vmap_ptr;
goto out_unlock;
return 0;
}
BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr));
ret = dmabuf->ops->vmap(dmabuf, &ptr);
if (WARN_ON_ONCE(ret))
goto out_unlock;
return ret;
dmabuf->vmap_ptr = ptr;
dmabuf->vmapping_counter = 1;
*map = dmabuf->vmap_ptr;
out_unlock:
mutex_unlock(&dmabuf->lock);
return ret;
return 0;
}
EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF);
/**
* dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel
* address space. Same restrictions as for vmap and friends apply.
* @dmabuf: [in] buffer to vmap
* @map: [out] returns the vmap pointer
*
* Unlocked version of dma_buf_vmap()
*
* Returns 0 on success, or a negative errno code otherwise.
*/
int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
{
int ret;
iosys_map_clear(map);
if (WARN_ON(!dmabuf))
return -EINVAL;
dma_resv_lock(dmabuf->resv, NULL);
ret = dma_buf_vmap(dmabuf, map);
dma_resv_unlock(dmabuf->resv);
return ret;
}
EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, DMA_BUF);
/**
* dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
* @dmabuf: [in] buffer to vunmap
@ -1435,20 +1573,36 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
if (WARN_ON(!dmabuf))
return;
dma_resv_assert_held(dmabuf->resv);
BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
BUG_ON(dmabuf->vmapping_counter == 0);
BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map));
mutex_lock(&dmabuf->lock);
if (--dmabuf->vmapping_counter == 0) {
if (dmabuf->ops->vunmap)
dmabuf->ops->vunmap(dmabuf, map);
iosys_map_clear(&dmabuf->vmap_ptr);
}
mutex_unlock(&dmabuf->lock);
}
EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
/**
* dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap.
* @dmabuf: [in] buffer to vunmap
* @map: [in] vmap pointer to vunmap
*/
void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
{
if (WARN_ON(!dmabuf))
return;
dma_resv_lock(dmabuf->resv, NULL);
dma_buf_vunmap(dmabuf, map);
dma_resv_unlock(dmabuf->resv);
}
EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, DMA_BUF);
#ifdef CONFIG_DEBUG_FS
static int dma_buf_debug_show(struct seq_file *s, void *unused)
{

View File

@ -5,32 +5,71 @@
CFLAGS-$(CONFIG_DRM_USE_DYNAMIC_DEBUG) += -DDYNAMIC_DEBUG_MODULE
drm-y := drm_aperture.o drm_auth.o drm_cache.o \
drm_file.o drm_gem.o drm_ioctl.o \
drm_drv.o \
drm_sysfs.o drm_mm.o \
drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o drm_displayid.o \
drm_trace_points.o drm_prime.o \
drm_vma_manager.o \
drm_modeset_lock.o drm_atomic.o drm_bridge.o \
drm_framebuffer.o drm_connector.o drm_blend.o \
drm_encoder.o drm_mode_object.o drm_property.o \
drm_plane.o drm_color_mgmt.o drm_print.o \
drm_dumb_buffers.o drm_mode_config.o drm_vblank.o \
drm_syncobj.o drm_lease.o drm_writeback.o drm_client.o \
drm_client_modeset.o drm_atomic_uapi.o \
drm_managed.o drm_vblank_work.o
drm-$(CONFIG_DRM_LEGACY) += drm_agpsupport.o drm_bufs.o drm_context.o drm_dma.o \
drm_hashtab.o drm_irq.o drm_legacy_misc.o drm_lock.o \
drm_memory.o drm_scatter.o drm_vm.o
drm-y := \
drm_aperture.o \
drm_atomic.o \
drm_atomic_uapi.o \
drm_auth.o \
drm_blend.o \
drm_bridge.o \
drm_cache.o \
drm_client.o \
drm_client_modeset.o \
drm_color_mgmt.o \
drm_connector.o \
drm_crtc.o \
drm_displayid.o \
drm_drv.o \
drm_dumb_buffers.o \
drm_edid.o \
drm_encoder.o \
drm_file.o \
drm_fourcc.o \
drm_framebuffer.o \
drm_gem.o \
drm_ioctl.o \
drm_lease.o \
drm_managed.o \
drm_mm.o \
drm_mode_config.o \
drm_mode_object.o \
drm_modes.o \
drm_modeset_lock.o \
drm_plane.o \
drm_prime.o \
drm_print.o \
drm_property.o \
drm_syncobj.o \
drm_sysfs.o \
drm_trace_points.o \
drm_vblank.o \
drm_vblank_work.o \
drm_vma_manager.o \
drm_writeback.o
drm-$(CONFIG_DRM_LEGACY) += \
drm_agpsupport.o \
drm_bufs.o \
drm_context.o \
drm_dma.o \
drm_hashtab.o \
drm_irq.o \
drm_legacy_misc.o \
drm_lock.o \
drm_memory.o \
drm_scatter.o \
drm_vm.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
drm-$(CONFIG_OF) += drm_of.o
drm-$(CONFIG_PCI) += drm_pci.o
drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
drm-$(CONFIG_DEBUG_FS) += \
drm_debugfs.o \
drm_debugfs_crc.o
drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
drm-$(CONFIG_DRM_PRIVACY_SCREEN) += drm_privacy_screen.o drm_privacy_screen_x86.o
drm-$(CONFIG_DRM_PRIVACY_SCREEN) += \
drm_privacy_screen.o \
drm_privacy_screen_x86.o
obj-$(CONFIG_DRM) += drm.o
obj-$(CONFIG_DRM_NOMODESET) += drm_nomodeset.o
@ -59,16 +98,24 @@ obj-$(CONFIG_DRM_TTM_HELPER) += drm_ttm_helper.o
# Modesetting helpers
#
drm_kms_helper-y := drm_bridge_connector.o drm_crtc_helper.o \
drm_encoder_slave.o drm_flip_work.o \
drm_probe_helper.o \
drm_plane_helper.o drm_atomic_helper.o \
drm_kms_helper_common.o \
drm_simple_kms_helper.o drm_modeset_helper.o \
drm_gem_atomic_helper.o \
drm_gem_framebuffer_helper.o \
drm_atomic_state_helper.o drm_damage_helper.o \
drm_format_helper.o drm_self_refresh_helper.o drm_rect.o
drm_kms_helper-y := \
drm_atomic_helper.o \
drm_atomic_state_helper.o \
drm_bridge_connector.o \
drm_crtc_helper.o \
drm_damage_helper.o \
drm_encoder_slave.o \
drm_flip_work.o \
drm_format_helper.o \
drm_gem_atomic_helper.o \
drm_gem_framebuffer_helper.o \
drm_kms_helper_common.o \
drm_modeset_helper.o \
drm_plane_helper.o \
drm_probe_helper.o \
drm_rect.o \
drm_self_refresh_helper.o \
drm_simple_kms_helper.o
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o

View File

@ -498,8 +498,6 @@ static int amdgpu_vkms_sw_init(void *handle)
adev_to_drm(adev)->mode_config.preferred_depth = 24;
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;

View File

@ -2800,8 +2800,6 @@ static int dce_v10_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;

View File

@ -2918,8 +2918,6 @@ static int dce_v11_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;

View File

@ -2675,7 +2675,6 @@ static int dce_v6_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.preferred_depth = 24;
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
r = amdgpu_display_modeset_create_props(adev);
if (r)

View File

@ -2701,8 +2701,6 @@ static int dce_v8_0_sw_init(void *handle)
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;

View File

@ -3816,8 +3816,6 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
/* indicates support for immediate flip */
adev_to_drm(adev)->mode_config.async_page_flip = true;
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;

View File

@ -66,8 +66,8 @@ void armada_gem_free_object(struct drm_gem_object *obj)
if (dobj->obj.import_attach) {
/* We only ever display imported data */
if (dobj->sgt)
dma_buf_unmap_attachment(dobj->obj.import_attach,
dobj->sgt, DMA_TO_DEVICE);
dma_buf_unmap_attachment_unlocked(dobj->obj.import_attach,
dobj->sgt, DMA_TO_DEVICE);
drm_prime_gem_destroy(&dobj->obj, NULL);
}
@ -539,8 +539,8 @@ int armada_gem_map_import(struct armada_gem_object *dobj)
{
int ret;
dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
DMA_TO_DEVICE);
dobj->sgt = dma_buf_map_attachment_unlocked(dobj->obj.import_attach,
DMA_TO_DEVICE);
if (IS_ERR(dobj->sgt)) {
ret = PTR_ERR(dobj->sgt);
dobj->sgt = NULL;

View File

@ -2,10 +2,8 @@
config DRM_AST
tristate "AST server chips"
depends on DRM && PCI && MMU
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
select DRM_VRAM_HELPER
select DRM_TTM
select DRM_TTM_HELPER
help
Say yes for experimental AST GPU driver. Do not enable
this driver without having a working -modesetting,

View File

@ -33,7 +33,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_module.h>
#include <drm/drm_probe_helper.h>
@ -63,7 +63,7 @@ static const struct drm_driver ast_driver = {
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
DRM_GEM_VRAM_DRIVER
DRM_GEM_SHMEM_DRIVER_OPS
};
/*

View File

@ -87,7 +87,7 @@ enum ast_tx_chip {
#define AST_DRAM_8Gx16 8
/*
* Cursor plane
* Hardware cursor
*/
#define AST_MAX_HWC_WIDTH 64
@ -96,8 +96,6 @@ enum ast_tx_chip {
#define AST_HWC_SIZE (AST_MAX_HWC_WIDTH * AST_MAX_HWC_HEIGHT * 2)
#define AST_HWC_SIGNATURE_SIZE 32
#define AST_DEFAULT_HWC_NUM 2
/* define for signature structure */
#define AST_HWC_SIGNATURE_CHECKSUM 0x00
#define AST_HWC_SIGNATURE_SizeX 0x04
@ -107,22 +105,21 @@ enum ast_tx_chip {
#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
struct ast_cursor_plane {
/*
* Planes
*/
struct ast_plane {
struct drm_plane base;
struct {
struct drm_gem_vram_object *gbo;
struct iosys_map map;
u64 off;
} hwc[AST_DEFAULT_HWC_NUM];
unsigned int next_hwc_index;
void __iomem *vaddr;
u64 offset;
unsigned long size;
};
static inline struct ast_cursor_plane *
to_ast_cursor_plane(struct drm_plane *plane)
static inline struct ast_plane *to_ast_plane(struct drm_plane *plane)
{
return container_of(plane, struct ast_cursor_plane, base);
return container_of(plane, struct ast_plane, base);
}
/*
@ -175,8 +172,13 @@ struct ast_private {
uint32_t dram_type;
uint32_t mclk;
struct drm_plane primary_plane;
struct ast_cursor_plane cursor_plane;
void __iomem *vram;
unsigned long vram_base;
unsigned long vram_size;
unsigned long vram_fb_available;
struct ast_plane primary_plane;
struct ast_plane cursor_plane;
struct drm_crtc crtc;
struct {
struct {

View File

@ -32,7 +32,6 @@
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_managed.h>
#include "ast_drv.h"
@ -461,8 +460,8 @@ struct ast_private *ast_device_create(const struct drm_driver *drv,
/* map reserved buffer */
ast->dp501_fw_buf = NULL;
if (dev->vram_mm->vram_size < pci_resource_len(pdev, 0)) {
ast->dp501_fw_buf = pci_iomap_range(pdev, 0, dev->vram_mm->vram_size, 0);
if (ast->vram_size < pci_resource_len(pdev, 0)) {
ast->dp501_fw_buf = pci_iomap_range(pdev, 0, ast->vram_size, 0);
if (!ast->dp501_fw_buf)
drm_info(dev, "failed to map reserved buffer!\n");
}

View File

@ -28,7 +28,6 @@
#include <linux/pci.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
@ -80,7 +79,6 @@ int ast_mm_init(struct ast_private *ast)
struct pci_dev *pdev = to_pci_dev(dev->dev);
resource_size_t base, size;
u32 vram_size;
int ret;
base = pci_resource_start(pdev, 0);
size = pci_resource_len(pdev, 0);
@ -91,11 +89,13 @@ int ast_mm_init(struct ast_private *ast)
vram_size = ast_get_vram_size(ast);
ret = drmm_vram_helper_init(dev, base, vram_size);
if (ret) {
drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
return ret;
}
ast->vram = devm_ioremap_wc(dev->dev, base, vram_size);
if (!ast->vram)
return -ENOMEM;
ast->vram_base = base;
ast->vram_size = vram_size;
ast->vram_fb_available = vram_size;
return 0;
}

View File

@ -36,11 +36,13 @@
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_format_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_managed.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
@ -48,6 +50,8 @@
#include "ast_drv.h"
#include "ast_tables.h"
#define AST_LUT_SIZE 256
static inline void ast_load_palette_index(struct ast_private *ast,
u8 index, u8 red, u8 green,
u8 blue)
@ -62,20 +66,46 @@ static inline void ast_load_palette_index(struct ast_private *ast,
ast_io_read8(ast, AST_IO_SEQ_PORT);
}
static void ast_crtc_load_lut(struct ast_private *ast, struct drm_crtc *crtc)
static void ast_crtc_set_gamma_linear(struct ast_private *ast,
const struct drm_format_info *format)
{
u16 *r, *g, *b;
int i;
if (!crtc->enabled)
return;
switch (format->format) {
case DRM_FORMAT_C8: /* In this case, gamma table is used as color palette */
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XRGB8888:
for (i = 0; i < AST_LUT_SIZE; i++)
ast_load_palette_index(ast, i, i, i, i);
break;
default:
drm_warn_once(&ast->base, "Unsupported format %p4cc for gamma correction\n",
&format->format);
break;
}
}
r = crtc->gamma_store;
g = r + crtc->gamma_size;
b = g + crtc->gamma_size;
static void ast_crtc_set_gamma(struct ast_private *ast,
const struct drm_format_info *format,
struct drm_color_lut *lut)
{
int i;
for (i = 0; i < 256; i++)
ast_load_palette_index(ast, i, *r++ >> 8, *g++ >> 8, *b++ >> 8);
switch (format->format) {
case DRM_FORMAT_C8: /* In this case, gamma table is used as color palette */
case DRM_FORMAT_RGB565:
case DRM_FORMAT_XRGB8888:
for (i = 0; i < AST_LUT_SIZE; i++)
ast_load_palette_index(ast, i,
lut[i].red >> 8,
lut[i].green >> 8,
lut[i].blue >> 8);
break;
default:
drm_warn_once(&ast->base, "Unsupported format %p4cc for gamma correction\n",
&format->format);
break;
}
}
static bool ast_get_vbios_mode_info(const struct drm_format_info *format,
@ -537,6 +567,29 @@ static void ast_wait_for_vretrace(struct ast_private *ast)
} while (!(vgair1 & AST_IO_VGAIR1_VREFRESH) && time_before(jiffies, timeout));
}
/*
* Planes
*/
static int ast_plane_init(struct drm_device *dev, struct ast_plane *ast_plane,
void __iomem *vaddr, u64 offset, unsigned long size,
uint32_t possible_crtcs,
const struct drm_plane_funcs *funcs,
const uint32_t *formats, unsigned int format_count,
const uint64_t *format_modifiers,
enum drm_plane_type type)
{
struct drm_plane *plane = &ast_plane->base;
ast_plane->vaddr = vaddr;
ast_plane->offset = offset;
ast_plane->size = size;
return drm_universal_plane_init(dev, plane, possible_crtcs, funcs,
formats, format_count, format_modifiers,
type, NULL);
}
/*
* Primary plane
*/
@ -550,52 +603,62 @@ static const uint32_t ast_primary_plane_formats[] = {
static int ast_primary_plane_helper_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_crtc_state *crtc_state;
struct ast_crtc_state *ast_crtc_state;
struct drm_device *dev = plane->dev;
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_crtc_state *new_crtc_state = NULL;
struct ast_crtc_state *new_ast_crtc_state;
int ret;
if (!new_plane_state->crtc)
return 0;
if (new_plane_state->crtc)
new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
crtc_state = drm_atomic_get_new_crtc_state(state,
new_plane_state->crtc);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
false, true);
if (ret)
if (ret) {
return ret;
} else if (!new_plane_state->visible) {
if (drm_WARN_ON(dev, new_plane_state->crtc)) /* cannot legally happen */
return -EINVAL;
else
return 0;
}
if (!new_plane_state->visible)
return 0;
new_ast_crtc_state = to_ast_crtc_state(new_crtc_state);
ast_crtc_state = to_ast_crtc_state(crtc_state);
ast_crtc_state->format = new_plane_state->fb->format;
new_ast_crtc_state->format = new_plane_state->fb->format;
return 0;
}
static void
ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
static void ast_handle_damage(struct ast_plane *ast_plane, struct iosys_map *src,
struct drm_framebuffer *fb,
const struct drm_rect *clip)
{
struct iosys_map dst = IOSYS_MAP_INIT_VADDR(ast_plane->vaddr);
iosys_map_incr(&dst, drm_fb_clip_offset(fb->pitches[0], fb->format, clip));
drm_fb_memcpy(&dst, fb->pitches, src, fb, clip);
}
static void ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct drm_device *dev = plane->dev;
struct ast_private *ast = to_ast_private(dev);
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_gem_vram_object *gbo;
s64 gpu_addr;
struct drm_framebuffer *fb = new_state->fb;
struct drm_framebuffer *old_fb = old_state->fb;
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
struct drm_framebuffer *old_fb = old_plane_state->fb;
struct ast_plane *ast_plane = to_ast_plane(plane);
struct drm_rect damage;
struct drm_atomic_helper_damage_iter iter;
if (!old_fb || (fb->format != old_fb->format)) {
struct drm_crtc_state *crtc_state = new_state->crtc->state;
struct drm_crtc *crtc = plane_state->crtc;
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
struct ast_vbios_mode_info *vbios_mode_info = &ast_crtc_state->vbios_mode_info;
@ -603,20 +666,28 @@ ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
ast_set_vbios_color_reg(ast, fb->format, vbios_mode_info);
}
gbo = drm_gem_vram_of_gem(fb->obj[0]);
gpu_addr = drm_gem_vram_offset(gbo);
if (drm_WARN_ON_ONCE(dev, gpu_addr < 0))
return; /* Bug: we didn't pin the BO to VRAM in prepare_fb. */
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drm_atomic_for_each_plane_damage(&iter, &damage) {
ast_handle_damage(ast_plane, shadow_plane_state->data, fb, &damage);
}
ast_set_offset_reg(ast, fb);
ast_set_start_address_crt1(ast, (u32)gpu_addr);
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x00);
/*
* Some BMCs stop scanning out the video signal after the driver
* reprogrammed the offset or scanout address. This stalls display
* output for several seconds and makes the display unusable.
* Therefore only update the offset if it changes and reprogram the
* address after enabling the plane.
*/
if (!old_fb || old_fb->pitches[0] != fb->pitches[0])
ast_set_offset_reg(ast, fb);
if (!old_fb) {
ast_set_start_address_crt1(ast, (u32)ast_plane->offset);
ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x1, 0xdf, 0x00);
}
}
static void
ast_primary_plane_helper_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
static void ast_primary_plane_helper_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct ast_private *ast = to_ast_private(plane->dev);
@ -624,7 +695,7 @@ ast_primary_plane_helper_atomic_disable(struct drm_plane *plane,
}
static const struct drm_plane_helper_funcs ast_primary_plane_helper_funcs = {
DRM_GEM_VRAM_PLANE_HELPER_FUNCS,
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
.atomic_check = ast_primary_plane_helper_atomic_check,
.atomic_update = ast_primary_plane_helper_atomic_update,
.atomic_disable = ast_primary_plane_helper_atomic_disable,
@ -634,27 +705,30 @@ static const struct drm_plane_funcs ast_primary_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
DRM_GEM_SHADOW_PLANE_FUNCS,
};
static int ast_primary_plane_init(struct ast_private *ast)
{
struct drm_device *dev = &ast->base;
struct drm_plane *primary_plane = &ast->primary_plane;
struct ast_plane *ast_primary_plane = &ast->primary_plane;
struct drm_plane *primary_plane = &ast_primary_plane->base;
void __iomem *vaddr = ast->vram;
u64 offset = ast->vram_base;
unsigned long cursor_size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
unsigned long size = ast->vram_fb_available - cursor_size;
int ret;
ret = drm_universal_plane_init(dev, primary_plane, 0x01,
&ast_primary_plane_funcs,
ast_primary_plane_formats,
ARRAY_SIZE(ast_primary_plane_formats),
NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
ret = ast_plane_init(dev, ast_primary_plane, vaddr, offset, size,
0x01, &ast_primary_plane_funcs,
ast_primary_plane_formats, ARRAY_SIZE(ast_primary_plane_formats),
NULL, DRM_PLANE_TYPE_PRIMARY);
if (ret) {
drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret);
drm_err(dev, "ast_plane_init() failed: %d\n", ret);
return ret;
}
drm_plane_helper_add(primary_plane, &ast_primary_plane_helper_funcs);
drm_plane_enable_fb_damage_clips(primary_plane);
return 0;
}
@ -774,99 +848,79 @@ static const uint32_t ast_cursor_plane_formats[] = {
static int ast_cursor_plane_helper_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_framebuffer *fb = new_plane_state->fb;
struct drm_crtc_state *crtc_state;
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_framebuffer *new_fb = new_plane_state->fb;
struct drm_crtc_state *new_crtc_state = NULL;
int ret;
if (!new_plane_state->crtc)
return 0;
if (new_plane_state->crtc)
new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
crtc_state = drm_atomic_get_new_crtc_state(state,
new_plane_state->crtc);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
ret = drm_atomic_helper_check_plane_state(new_plane_state, new_crtc_state,
DRM_PLANE_NO_SCALING,
DRM_PLANE_NO_SCALING,
true, true);
if (ret)
if (ret || !new_plane_state->visible)
return ret;
if (!new_plane_state->visible)
return 0;
if (fb->width > AST_MAX_HWC_WIDTH || fb->height > AST_MAX_HWC_HEIGHT)
if (new_fb->width > AST_MAX_HWC_WIDTH || new_fb->height > AST_MAX_HWC_HEIGHT)
return -EINVAL;
return 0;
}
static void
ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
static void ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct ast_cursor_plane *ast_cursor_plane = to_ast_cursor_plane(plane);
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(new_state);
struct drm_framebuffer *fb = new_state->fb;
struct ast_plane *ast_plane = to_ast_plane(plane);
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
struct ast_private *ast = to_ast_private(plane->dev);
struct iosys_map dst_map =
ast_cursor_plane->hwc[ast_cursor_plane->next_hwc_index].map;
u64 dst_off =
ast_cursor_plane->hwc[ast_cursor_plane->next_hwc_index].off;
struct iosys_map src_map = shadow_plane_state->data[0];
struct drm_rect damage;
const u8 *src = src_map.vaddr; /* TODO: Use mapping abstraction properly */
u64 dst_off = ast_plane->offset;
u8 __iomem *dst = ast_plane->vaddr; /* TODO: Use mapping abstraction properly */
u8 __iomem *sig = dst + AST_HWC_SIZE; /* TODO: Use mapping abstraction properly */
unsigned int offset_x, offset_y;
u16 x, y;
u8 x_offset, y_offset;
u8 __iomem *dst;
u8 __iomem *sig;
const u8 *src;
src = src_map.vaddr; /* TODO: Use mapping abstraction properly */
dst = dst_map.vaddr_iomem; /* TODO: Use mapping abstraction properly */
sig = dst + AST_HWC_SIZE; /* TODO: Use mapping abstraction properly */
/*
* Do data transfer to HW cursor BO. If a new cursor image was installed,
* point the scanout engine to dst_gbo's offset and page-flip the HWC buffers.
* Do data transfer to hardware buffer and point the scanout
* engine to the offset.
*/
ast_update_cursor_image(dst, src, fb->width, fb->height);
if (new_state->fb != old_state->fb) {
if (drm_atomic_helper_damage_merged(old_plane_state, plane_state, &damage)) {
ast_update_cursor_image(dst, src, fb->width, fb->height);
ast_set_cursor_base(ast, dst_off);
++ast_cursor_plane->next_hwc_index;
ast_cursor_plane->next_hwc_index %= ARRAY_SIZE(ast_cursor_plane->hwc);
}
/*
* Update location in HWC signature and registers.
*/
writel(new_state->crtc_x, sig + AST_HWC_SIGNATURE_X);
writel(new_state->crtc_y, sig + AST_HWC_SIGNATURE_Y);
writel(plane_state->crtc_x, sig + AST_HWC_SIGNATURE_X);
writel(plane_state->crtc_y, sig + AST_HWC_SIGNATURE_Y);
offset_x = AST_MAX_HWC_WIDTH - fb->width;
offset_y = AST_MAX_HWC_HEIGHT - fb->height;
if (new_state->crtc_x < 0) {
x_offset = (-new_state->crtc_x) + offset_x;
if (plane_state->crtc_x < 0) {
x_offset = (-plane_state->crtc_x) + offset_x;
x = 0;
} else {
x_offset = offset_x;
x = new_state->crtc_x;
x = plane_state->crtc_x;
}
if (new_state->crtc_y < 0) {
y_offset = (-new_state->crtc_y) + offset_y;
if (plane_state->crtc_y < 0) {
y_offset = (-plane_state->crtc_y) + offset_y;
y = 0;
} else {
y_offset = offset_y;
y = new_state->crtc_y;
y = plane_state->crtc_y;
}
ast_set_cursor_location(ast, x, y, x_offset, y_offset);
@ -875,9 +929,8 @@ ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
ast_set_cursor_enabled(ast, true);
}
static void
ast_cursor_plane_helper_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
static void ast_cursor_plane_helper_atomic_disable(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct ast_private *ast = to_ast_private(plane->dev);
@ -891,41 +944,22 @@ static const struct drm_plane_helper_funcs ast_cursor_plane_helper_funcs = {
.atomic_disable = ast_cursor_plane_helper_atomic_disable,
};
static void ast_cursor_plane_destroy(struct drm_plane *plane)
{
struct ast_cursor_plane *ast_cursor_plane = to_ast_cursor_plane(plane);
size_t i;
struct drm_gem_vram_object *gbo;
struct iosys_map map;
for (i = 0; i < ARRAY_SIZE(ast_cursor_plane->hwc); ++i) {
gbo = ast_cursor_plane->hwc[i].gbo;
map = ast_cursor_plane->hwc[i].map;
drm_gem_vram_vunmap(gbo, &map);
drm_gem_vram_unpin(gbo);
drm_gem_vram_put(gbo);
}
drm_plane_cleanup(plane);
}
static const struct drm_plane_funcs ast_cursor_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = ast_cursor_plane_destroy,
.destroy = drm_plane_cleanup,
DRM_GEM_SHADOW_PLANE_FUNCS,
};
static int ast_cursor_plane_init(struct ast_private *ast)
{
struct drm_device *dev = &ast->base;
struct ast_cursor_plane *ast_cursor_plane = &ast->cursor_plane;
struct ast_plane *ast_cursor_plane = &ast->cursor_plane;
struct drm_plane *cursor_plane = &ast_cursor_plane->base;
size_t size, i;
struct drm_gem_vram_object *gbo;
struct iosys_map map;
size_t size;
void __iomem *vaddr;
u64 offset;
int ret;
s64 off;
/*
* Allocate backing storage for cursors. The BOs are permanently
@ -934,60 +968,26 @@ static int ast_cursor_plane_init(struct ast_private *ast)
size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
for (i = 0; i < ARRAY_SIZE(ast_cursor_plane->hwc); ++i) {
gbo = drm_gem_vram_create(dev, size, 0);
if (IS_ERR(gbo)) {
ret = PTR_ERR(gbo);
goto err_hwc;
}
ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM |
DRM_GEM_VRAM_PL_FLAG_TOPDOWN);
if (ret)
goto err_drm_gem_vram_put;
ret = drm_gem_vram_vmap(gbo, &map);
if (ret)
goto err_drm_gem_vram_unpin;
off = drm_gem_vram_offset(gbo);
if (off < 0) {
ret = off;
goto err_drm_gem_vram_vunmap;
}
ast_cursor_plane->hwc[i].gbo = gbo;
ast_cursor_plane->hwc[i].map = map;
ast_cursor_plane->hwc[i].off = off;
}
if (ast->vram_fb_available < size)
return -ENOMEM;
/*
* Create the cursor plane. The plane's destroy callback will release
* the backing storages' BO memory.
*/
vaddr = ast->vram + ast->vram_fb_available - size;
offset = ast->vram_base + ast->vram_fb_available - size;
ret = drm_universal_plane_init(dev, cursor_plane, 0x01,
&ast_cursor_plane_funcs,
ast_cursor_plane_formats,
ARRAY_SIZE(ast_cursor_plane_formats),
NULL, DRM_PLANE_TYPE_CURSOR, NULL);
ret = ast_plane_init(dev, ast_cursor_plane, vaddr, offset, size,
0x01, &ast_cursor_plane_funcs,
ast_cursor_plane_formats, ARRAY_SIZE(ast_cursor_plane_formats),
NULL, DRM_PLANE_TYPE_CURSOR);
if (ret) {
drm_err(dev, "drm_universal_plane failed(): %d\n", ret);
goto err_hwc;
drm_err(dev, "ast_plane_init() failed: %d\n", ret);
return ret;
}
drm_plane_helper_add(cursor_plane, &ast_cursor_plane_helper_funcs);
drm_plane_enable_fb_damage_clips(cursor_plane);
ast->vram_fb_available -= size;
return 0;
err_hwc:
while (i) {
--i;
gbo = ast_cursor_plane->hwc[i].gbo;
map = ast_cursor_plane->hwc[i].map;
err_drm_gem_vram_vunmap:
drm_gem_vram_vunmap(gbo, &map);
err_drm_gem_vram_unpin:
drm_gem_vram_unpin(gbo);
err_drm_gem_vram_put:
drm_gem_vram_put(gbo);
}
return ret;
}
/*
@ -1026,9 +1026,11 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
ast_set_color_reg(ast, format);
ast_set_vbios_color_reg(ast, format, vbios_mode_info);
if (crtc->state->gamma_lut)
ast_crtc_set_gamma(ast, format, crtc->state->gamma_lut->data);
else
ast_crtc_set_gamma_linear(ast, format);
}
ast_crtc_load_lut(ast, crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
@ -1123,47 +1125,50 @@ static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
struct ast_crtc_state *old_ast_crtc_state = to_ast_crtc_state(old_crtc_state);
struct drm_device *dev = crtc->dev;
struct ast_crtc_state *ast_state;
const struct drm_format_info *format;
bool succ;
int ret;
ret = drm_atomic_helper_check_crtc_state(crtc_state, false);
if (!crtc_state->enable)
return 0;
ret = drm_atomic_helper_check_crtc_primary_plane(crtc_state);
if (ret)
return ret;
if (!crtc_state->enable)
goto out;
ast_state = to_ast_crtc_state(crtc_state);
format = ast_state->format;
if (drm_WARN_ON_ONCE(dev, !format))
return -EINVAL; /* BUG: We didn't set format in primary check(). */
/*
* The gamma LUT has to be reloaded after changing the primary
* plane's color format.
*/
if (old_ast_crtc_state->format != format)
crtc_state->color_mgmt_changed = true;
if (crtc_state->color_mgmt_changed && crtc_state->gamma_lut) {
if (crtc_state->gamma_lut->length !=
AST_LUT_SIZE * sizeof(struct drm_color_lut)) {
drm_err(dev, "Wrong size for gamma_lut %zu\n",
crtc_state->gamma_lut->length);
return -EINVAL;
}
}
succ = ast_get_vbios_mode_info(format, &crtc_state->mode,
&crtc_state->adjusted_mode,
&ast_state->vbios_mode_info);
if (!succ)
return -EINVAL;
out:
return drm_atomic_add_affected_planes(state, crtc);
}
static void ast_crtc_helper_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct ast_private *ast = to_ast_private(dev);
/*
* Concurrent operations could possibly trigger a call to
* drm_connector_helper_funcs.get_modes by trying to read the
* display modes. Protect access to I/O registers by acquiring
* the I/O-register lock. Released in atomic_flush().
*/
mutex_lock(&ast->ioregs_lock);
return 0;
}
static void
@ -1172,35 +1177,34 @@ ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
{
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
crtc);
struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
crtc);
struct drm_device *dev = crtc->dev;
struct ast_private *ast = to_ast_private(dev);
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
struct ast_crtc_state *old_ast_crtc_state = to_ast_crtc_state(old_crtc_state);
struct ast_vbios_mode_info *vbios_mode_info = &ast_crtc_state->vbios_mode_info;
/*
* The gamma LUT has to be reloaded after changing the primary
* plane's color format.
*/
if (old_ast_crtc_state->format != ast_crtc_state->format)
ast_crtc_load_lut(ast, crtc);
if (crtc_state->enable && crtc_state->color_mgmt_changed) {
if (crtc_state->gamma_lut)
ast_crtc_set_gamma(ast,
ast_crtc_state->format,
crtc_state->gamma_lut->data);
else
ast_crtc_set_gamma_linear(ast, ast_crtc_state->format);
}
//Set Aspeed Display-Port
if (ast->tx_chip_types & AST_TX_ASTDP_BIT)
ast_dp_set_mode(crtc, vbios_mode_info);
mutex_unlock(&ast->ioregs_lock);
}
static void
ast_crtc_helper_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
static void ast_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_device *dev = crtc->dev;
struct ast_private *ast = to_ast_private(dev);
struct drm_crtc_state *crtc_state = crtc->state;
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
struct ast_vbios_mode_info *vbios_mode_info =
&ast_crtc_state->vbios_mode_info;
@ -1217,12 +1221,9 @@ ast_crtc_helper_atomic_enable(struct drm_crtc *crtc,
ast_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
}
static void
ast_crtc_helper_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
static void ast_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
crtc);
struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
struct drm_device *dev = crtc->dev;
struct ast_private *ast = to_ast_private(dev);
@ -1250,7 +1251,6 @@ ast_crtc_helper_atomic_disable(struct drm_crtc *crtc,
static const struct drm_crtc_helper_funcs ast_crtc_helper_funcs = {
.mode_valid = ast_crtc_helper_mode_valid,
.atomic_check = ast_crtc_helper_atomic_check,
.atomic_begin = ast_crtc_helper_atomic_begin,
.atomic_flush = ast_crtc_helper_atomic_flush,
.atomic_enable = ast_crtc_helper_atomic_enable,
.atomic_disable = ast_crtc_helper_atomic_disable,
@ -1317,13 +1317,15 @@ static int ast_crtc_init(struct drm_device *dev)
struct drm_crtc *crtc = &ast->crtc;
int ret;
ret = drm_crtc_init_with_planes(dev, crtc, &ast->primary_plane,
ret = drm_crtc_init_with_planes(dev, crtc, &ast->primary_plane.base,
&ast->cursor_plane.base, &ast_crtc_funcs,
NULL);
if (ret)
return ret;
drm_mode_crtc_set_gamma_size(crtc, 256);
drm_mode_crtc_set_gamma_size(crtc, AST_LUT_SIZE);
drm_crtc_enable_color_mgmt(crtc, 0, false, AST_LUT_SIZE);
drm_crtc_helper_add(crtc, &ast_crtc_helper_funcs);
return 0;
@ -1718,13 +1720,46 @@ static int ast_astdp_output_init(struct ast_private *ast)
* Mode config
*/
static void ast_mode_config_helper_atomic_commit_tail(struct drm_atomic_state *state)
{
struct ast_private *ast = to_ast_private(state->dev);
/*
* Concurrent operations could possibly trigger a call to
* drm_connector_helper_funcs.get_modes by trying to read the
* display modes. Protect access to I/O registers by acquiring
* the I/O-register lock. Released in atomic_flush().
*/
mutex_lock(&ast->ioregs_lock);
drm_atomic_helper_commit_tail_rpm(state);
mutex_unlock(&ast->ioregs_lock);
}
static const struct drm_mode_config_helper_funcs ast_mode_config_helper_funcs = {
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
.atomic_commit_tail = ast_mode_config_helper_atomic_commit_tail,
};
static enum drm_mode_status ast_mode_config_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
{
static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGB8888 */
struct ast_private *ast = to_ast_private(dev);
unsigned long fbsize, fbpages, max_fbpages;
max_fbpages = (ast->vram_fb_available) >> PAGE_SHIFT;
fbsize = mode->hdisplay * mode->vdisplay * max_bpp;
fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
if (fbpages > max_fbpages)
return MODE_MEM;
return MODE_OK;
}
static const struct drm_mode_config_funcs ast_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.mode_valid = drm_vram_helper_mode_valid,
.fb_create = drm_gem_fb_create_with_dirty,
.mode_valid = ast_mode_config_mode_valid,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
@ -1732,7 +1767,6 @@ static const struct drm_mode_config_funcs ast_mode_config_funcs = {
int ast_mode_config_init(struct ast_private *ast)
{
struct drm_device *dev = &ast->base;
struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
ret = drmm_mode_config_init(dev);
@ -1743,8 +1777,6 @@ int ast_mode_config_init(struct ast_private *ast)
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
dev->mode_config.fb_base = pci_resource_start(pdev, 0);
if (ast->chip == AST2100 ||
ast->chip == AST2200 ||
@ -1761,7 +1793,6 @@ int ast_mode_config_init(struct ast_private *ast)
dev->mode_config.helper_private = &ast_mode_config_helper_funcs;
ret = ast_primary_plane_init(ast);
if (ret)
return ret;

View File

@ -402,7 +402,8 @@ static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
void adv7533_dsi_power_on(struct adv7511 *adv);
void adv7533_dsi_power_off(struct adv7511 *adv);
void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode);
enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv,
const struct drm_display_mode *mode);
int adv7533_patch_registers(struct adv7511 *adv);
int adv7533_patch_cec_registers(struct adv7511 *adv);
int adv7533_attach_dsi(struct adv7511 *adv);

View File

@ -697,7 +697,7 @@ adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
}
static enum drm_mode_status adv7511_mode_valid(struct adv7511 *adv7511,
struct drm_display_mode *mode)
const struct drm_display_mode *mode)
{
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
@ -791,9 +791,6 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
regmap_update_bits(adv7511->regmap, 0x17,
0x60, (vsync_polarity << 6) | (hsync_polarity << 5));
if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
adv7533_mode_set(adv7511, adj_mode);
drm_mode_copy(&adv7511->curr_mode, adj_mode);
/*
@ -913,6 +910,18 @@ static void adv7511_bridge_mode_set(struct drm_bridge *bridge,
adv7511_mode_set(adv, mode, adj_mode);
}
static enum drm_mode_status adv7511_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
const struct drm_display_mode *mode)
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
if (adv->type == ADV7533 || adv->type == ADV7535)
return adv7533_mode_valid(adv, mode);
else
return adv7511_mode_valid(adv, mode);
}
static int adv7511_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
@ -960,6 +969,7 @@ static const struct drm_bridge_funcs adv7511_bridge_funcs = {
.enable = adv7511_bridge_enable,
.disable = adv7511_bridge_disable,
.mode_set = adv7511_bridge_mode_set,
.mode_valid = adv7511_bridge_mode_valid,
.attach = adv7511_bridge_attach,
.detect = adv7511_bridge_detect,
.get_edid = adv7511_bridge_get_edid,

View File

@ -100,26 +100,27 @@ void adv7533_dsi_power_off(struct adv7511 *adv)
regmap_write(adv->regmap_cec, 0x27, 0x0b);
}
void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode)
enum drm_mode_status adv7533_mode_valid(struct adv7511 *adv,
const struct drm_display_mode *mode)
{
int lanes;
struct mipi_dsi_device *dsi = adv->dsi;
int lanes, ret;
if (adv->num_dsi_lanes != 4)
return;
if (mode->clock > 80000)
lanes = 4;
else
lanes = 3;
if (lanes != dsi->lanes) {
mipi_dsi_detach(dsi);
dsi->lanes = lanes;
ret = mipi_dsi_attach(dsi);
if (ret)
dev_err(&dsi->dev, "failed to change host lanes\n");
}
/*
* TODO: add support for dynamic switching of lanes
* by using the bridge pre_enable() op . Till then filter
* out the modes which shall need different number of lanes
* than what was configured in the device tree.
*/
if (lanes != dsi->lanes)
return MODE_BAD;
return MODE_OK;
}
int adv7533_patch_registers(struct adv7511 *adv)

View File

@ -421,6 +421,7 @@ struct it6505 {
struct notifier_block event_nb;
struct extcon_dev *extcon;
struct work_struct extcon_wq;
int extcon_state;
enum drm_connector_status connector_status;
enum link_train_status link_state;
struct work_struct link_works;
@ -2685,31 +2686,41 @@ static void it6505_extcon_work(struct work_struct *work)
{
struct it6505 *it6505 = container_of(work, struct it6505, extcon_wq);
struct device *dev = &it6505->client->dev;
int state = extcon_get_state(it6505->extcon, EXTCON_DISP_DP);
unsigned int pwroffretry = 0;
int state, ret;
if (it6505->enable_drv_hold)
return;
mutex_lock(&it6505->extcon_lock);
state = extcon_get_state(it6505->extcon, EXTCON_DISP_DP);
DRM_DEV_DEBUG_DRIVER(dev, "EXTCON_DISP_DP = 0x%02x", state);
if (state > 0) {
if (state == it6505->extcon_state || unlikely(state < 0))
goto unlock;
it6505->extcon_state = state;
if (state) {
DRM_DEV_DEBUG_DRIVER(dev, "start to power on");
msleep(100);
it6505_poweron(it6505);
ret = pm_runtime_get_sync(dev);
/*
* On system resume, extcon_work can be triggered before
* pm_runtime_force_resume re-enables runtime power management.
* Handling the error here to make sure the bridge is powered on.
*/
if (ret)
it6505_poweron(it6505);
} else {
DRM_DEV_DEBUG_DRIVER(dev, "start to power off");
while (it6505_poweroff(it6505) && pwroffretry++ < 5) {
DRM_DEV_DEBUG_DRIVER(dev, "power off fail %d times",
pwroffretry);
}
pm_runtime_put_sync(dev);
drm_helper_hpd_irq_event(it6505->bridge.dev);
memset(it6505->dpcd, 0, sizeof(it6505->dpcd));
DRM_DEV_DEBUG_DRIVER(dev, "power off it6505 success!");
}
unlock:
mutex_unlock(&it6505->extcon_lock);
}
@ -2980,6 +2991,28 @@ static void it6505_bridge_atomic_disable(struct drm_bridge *bridge,
}
}
static void it6505_bridge_atomic_pre_enable(struct drm_bridge *bridge,
struct drm_bridge_state *old_state)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
struct device *dev = &it6505->client->dev;
DRM_DEV_DEBUG_DRIVER(dev, "start");
pm_runtime_get_sync(dev);
}
static void it6505_bridge_atomic_post_disable(struct drm_bridge *bridge,
struct drm_bridge_state *old_state)
{
struct it6505 *it6505 = bridge_to_it6505(bridge);
struct device *dev = &it6505->client->dev;
DRM_DEV_DEBUG_DRIVER(dev, "start");
pm_runtime_put_sync(dev);
}
static enum drm_connector_status
it6505_bridge_detect(struct drm_bridge *bridge)
{
@ -3014,6 +3047,8 @@ static const struct drm_bridge_funcs it6505_bridge_funcs = {
.mode_valid = it6505_bridge_mode_valid,
.atomic_enable = it6505_bridge_atomic_enable,
.atomic_disable = it6505_bridge_atomic_disable,
.atomic_pre_enable = it6505_bridge_atomic_pre_enable,
.atomic_post_disable = it6505_bridge_atomic_post_disable,
.detect = it6505_bridge_detect,
.get_edid = it6505_bridge_get_edid,
};
@ -3032,8 +3067,10 @@ static __maybe_unused int it6505_bridge_suspend(struct device *dev)
return it6505_poweroff(it6505);
}
static SIMPLE_DEV_PM_OPS(it6505_bridge_pm_ops, it6505_bridge_suspend,
it6505_bridge_resume);
static const struct dev_pm_ops it6505_bridge_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(it6505_bridge_suspend, it6505_bridge_resume, NULL)
};
static int it6505_init_pdata(struct it6505 *it6505)
{
@ -3315,6 +3352,7 @@ static int it6505_i2c_probe(struct i2c_client *client,
DRM_DEV_DEBUG_DRIVER(dev, "it6505 device name: %s", dev_name(dev));
debugfs_init(it6505);
pm_runtime_enable(dev);
it6505->bridge.funcs = &it6505_bridge_funcs;
it6505->bridge.type = DRM_MODE_CONNECTOR_DisplayPort;

View File

@ -286,7 +286,6 @@ static ssize_t ps8640_aux_transfer_msg(struct drm_dp_aux *aux,
}
switch (data & SWAUX_STATUS_MASK) {
/* Ignore the DEFER cases as they are already handled in hardware */
case SWAUX_STATUS_NACK:
case SWAUX_STATUS_I2C_NACK:
/*
@ -303,6 +302,14 @@ static ssize_t ps8640_aux_transfer_msg(struct drm_dp_aux *aux,
case SWAUX_STATUS_ACKM:
len = data & SWAUX_M_MASK;
break;
case SWAUX_STATUS_DEFER:
case SWAUX_STATUS_I2C_DEFER:
if (is_native_aux)
msg->reply |= DP_AUX_NATIVE_REPLY_DEFER;
else
msg->reply |= DP_AUX_I2C_REPLY_DEFER;
len = data & SWAUX_M_MASK;
break;
case SWAUX_STATUS_INVALID:
return -EOPNOTSUPP;
case SWAUX_STATUS_TIMEOUT:

View File

@ -408,7 +408,7 @@ static void tc_bridge_enable(struct drm_bridge *bridge)
(val >> 8) & 0xFF, val & 0xFF);
d2l_write(tc->i2c, SYSRST, SYS_RST_REG | SYS_RST_DSIRX | SYS_RST_BM |
SYS_RST_LCD | SYS_RST_I2CM | SYS_RST_I2CS);
SYS_RST_LCD | SYS_RST_I2CM);
usleep_range(30000, 40000);
d2l_write(tc->i2c, PPI_TX_RX_TA, TTA_GET | TTA_SURE);

View File

@ -3,13 +3,15 @@
obj-$(CONFIG_DRM_DP_AUX_BUS) += drm_dp_aux_bus.o
drm_display_helper-y := drm_display_helper_mod.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_HELPER) += drm_dp_dual_mode_helper.o \
drm_dp_helper.o \
drm_dp_mst_topology.o \
drm_dsc_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_HELPER) += \
drm_dp_dual_mode_helper.o \
drm_dp_helper.o \
drm_dp_mst_topology.o \
drm_dsc_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDCP_HELPER) += drm_hdcp_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += drm_hdmi_helper.o \
drm_scdc_helper.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_HDMI_HELPER) += \
drm_hdmi_helper.o \
drm_scdc_helper.o
drm_display_helper-$(CONFIG_DRM_DP_AUX_CHARDEV) += drm_dp_aux_dev.o
drm_display_helper-$(CONFIG_DRM_DP_CEC) += drm_dp_cec.o

View File

@ -924,59 +924,35 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
EXPORT_SYMBOL(drm_atomic_helper_check_plane_state);
/**
* drm_atomic_helper_check_crtc_state() - Check CRTC state for validity
* drm_atomic_helper_check_crtc_primary_plane() - Check CRTC state for primary plane
* @crtc_state: CRTC state to check
* @can_disable_primary_planes: can the CRTC be enabled without a primary plane?
*
* Checks that a desired CRTC update is valid. Drivers that provide
* their own CRTC handling rather than helper-provided implementations may
* still wish to call this function to avoid duplication of error checking
* code.
*
* Note that @can_disable_primary_planes only tests if the CRTC can be
* enabled without a primary plane. To test if a primary plane can be updated
* without a CRTC, use drm_atomic_helper_check_plane_state() in the plane's
* atomic check.
* Checks that a CRTC has at least one primary plane attached to it, which is
* a requirement on some hardware. Note that this only involves the CRTC side
* of the test. To test if the primary plane is visible or if it can be updated
* without the CRTC being enabled, use drm_atomic_helper_check_plane_state() in
* the plane's atomic check.
*
* RETURNS:
* Zero if update appears valid, error code on failure
* 0 if a primary plane is attached to the CRTC, or an error code otherwise
*/
int drm_atomic_helper_check_crtc_state(struct drm_crtc_state *crtc_state,
bool can_disable_primary_planes)
int drm_atomic_helper_check_crtc_primary_plane(struct drm_crtc_state *crtc_state)
{
struct drm_device *dev = crtc_state->crtc->dev;
struct drm_atomic_state *state = crtc_state->state;
if (!crtc_state->enable)
return 0;
struct drm_crtc *crtc = crtc_state->crtc;
struct drm_device *dev = crtc->dev;
struct drm_plane *plane;
/* needs at least one primary plane to be enabled */
if (!can_disable_primary_planes) {
bool has_primary_plane = false;
struct drm_plane *plane;
drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
struct drm_plane_state *plane_state;
if (plane->type != DRM_PLANE_TYPE_PRIMARY)
continue;
plane_state = drm_atomic_get_plane_state(state, plane);
if (IS_ERR(plane_state))
return PTR_ERR(plane_state);
if (plane_state->fb && plane_state->crtc) {
has_primary_plane = true;
break;
}
}
if (!has_primary_plane) {
drm_dbg_kms(dev, "Cannot enable CRTC without a primary plane.\n");
return -EINVAL;
}
drm_for_each_plane_mask(plane, dev, crtc_state->plane_mask) {
if (plane->type == DRM_PLANE_TYPE_PRIMARY)
return 0;
}
return 0;
drm_dbg_atomic(dev, "[CRTC:%d:%s] primary plane missing\n", crtc->base.id, crtc->name);
return -EINVAL;
}
EXPORT_SYMBOL(drm_atomic_helper_check_crtc_state);
EXPORT_SYMBOL(drm_atomic_helper_check_crtc_primary_plane);
/**
* drm_atomic_helper_check_planes - validate state object for planes changes

View File

@ -464,12 +464,12 @@ void drm_atomic_helper_connector_reset(struct drm_connector *connector)
EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
/**
* drm_atomic_helper_connector_tv_reset - Resets TV connector properties
* drm_atomic_helper_connector_tv_margins_reset - Resets TV connector properties
* @connector: DRM connector
*
* Resets the TV-related properties attached to a connector.
*/
void drm_atomic_helper_connector_tv_reset(struct drm_connector *connector)
void drm_atomic_helper_connector_tv_margins_reset(struct drm_connector *connector)
{
struct drm_cmdline_mode *cmdline = &connector->cmdline_mode;
struct drm_connector_state *state = connector->state;
@ -479,7 +479,7 @@ void drm_atomic_helper_connector_tv_reset(struct drm_connector *connector)
state->tv.margins.top = cmdline->tv_margins.top;
state->tv.margins.bottom = cmdline->tv_margins.bottom;
}
EXPORT_SYMBOL(drm_atomic_helper_connector_tv_reset);
EXPORT_SYMBOL(drm_atomic_helper_connector_tv_margins_reset);
/**
* __drm_atomic_helper_connector_duplicate_state - copy atomic connector state

View File

@ -687,6 +687,8 @@ static int drm_atomic_connector_set_property(struct drm_connector *connector,
*/
return -EINVAL;
} else if (property == config->tv_select_subconnector_property) {
state->tv.select_subconnector = val;
} else if (property == config->tv_subconnector_property) {
state->tv.subconnector = val;
} else if (property == config->tv_left_margin_property) {
state->tv.margins.left = val;
@ -795,6 +797,8 @@ drm_atomic_connector_get_property(struct drm_connector *connector,
else
*val = connector->dpms;
} else if (property == config->tv_select_subconnector_property) {
*val = state->tv.select_subconnector;
} else if (property == config->tv_subconnector_property) {
*val = state->tv.subconnector;
} else if (property == config->tv_left_margin_property) {
*val = state->tv.margins.left;

View File

@ -323,7 +323,7 @@ drm_client_buffer_vmap(struct drm_client_buffer *buffer,
* fd_install step out of the driver backend hooks, to make that
* final step optional for internal users.
*/
ret = drm_gem_vmap(buffer->gem, map);
ret = drm_gem_vmap_unlocked(buffer->gem, map);
if (ret)
return ret;
@ -345,7 +345,7 @@ void drm_client_buffer_vunmap(struct drm_client_buffer *buffer)
{
struct iosys_map *map = &buffer->map;
drm_gem_vunmap(buffer->gem, map);
drm_gem_vunmap_unlocked(buffer->gem, map);
}
EXPORT_SYMBOL(drm_client_buffer_vunmap);

View File

@ -434,6 +434,32 @@ done:
}
EXPORT_SYMBOL(drm_crtc_helper_set_mode);
/**
* drm_crtc_helper_atomic_check() - Helper to check CRTC atomic-state
* @crtc: CRTC to check
* @state: atomic state object
*
* Provides a default CRTC-state check handler for CRTCs that only have
* one primary plane attached to it.
*
* This is often the case for the CRTC of simple framebuffers. See also
* drm_plane_helper_atomic_check() for the respective plane-state check
* helper function.
*
* RETURNS:
* Zero on success, or an errno code otherwise.
*/
int drm_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
if (!new_crtc_state->enable)
return 0;
return drm_atomic_helper_check_crtc_primary_plane(new_crtc_state);
}
EXPORT_SYMBOL(drm_crtc_helper_atomic_check);
static void
drm_crtc_helper_disable(struct drm_crtc *crtc)
{

View File

@ -1572,15 +1572,6 @@ struct drm_edid {
const struct edid *edid;
};
static bool version_greater(const struct drm_edid *drm_edid,
u8 version, u8 revision)
{
const struct edid *edid = drm_edid->edid;
return edid->version > version ||
(edid->version == version && edid->revision > revision);
}
static int edid_hfeeodb_extension_block_count(const struct edid *edid);
static int edid_hfeeodb_block_count(const struct edid *edid)
@ -2984,7 +2975,7 @@ is_rb(const struct detailed_timing *descriptor, void *data)
BUILD_BUG_ON(offsetof(typeof(*descriptor), data.other_data.data.range.formula.cvt.flags) != 15);
if (descriptor->data.other_data.data.range.flags == DRM_EDID_CVT_SUPPORT_FLAG &&
descriptor->data.other_data.data.range.formula.cvt.flags & 0x10)
descriptor->data.other_data.data.range.formula.cvt.flags & DRM_EDID_CVT_FLAGS_REDUCED_BLANKING)
*res = true;
}
@ -3012,7 +3003,7 @@ find_gtf2(const struct detailed_timing *descriptor, void *data)
BUILD_BUG_ON(offsetof(typeof(*descriptor), data.other_data.data.range.flags) != 10);
if (descriptor->data.other_data.data.range.flags == 0x02)
if (descriptor->data.other_data.data.range.flags == DRM_EDID_SECONDARY_GTF_SUPPORT_FLAG)
*res = descriptor;
}
@ -3077,20 +3068,53 @@ drm_gtf2_2j(const struct drm_edid *drm_edid)
return descriptor ? descriptor->data.other_data.data.range.formula.gtf2.j : 0;
}
static void
get_timing_level(const struct detailed_timing *descriptor, void *data)
{
int *res = data;
if (!is_display_descriptor(descriptor, EDID_DETAIL_MONITOR_RANGE))
return;
BUILD_BUG_ON(offsetof(typeof(*descriptor), data.other_data.data.range.flags) != 10);
switch (descriptor->data.other_data.data.range.flags) {
case DRM_EDID_DEFAULT_GTF_SUPPORT_FLAG:
*res = LEVEL_GTF;
break;
case DRM_EDID_SECONDARY_GTF_SUPPORT_FLAG:
*res = LEVEL_GTF2;
break;
case DRM_EDID_CVT_SUPPORT_FLAG:
*res = LEVEL_CVT;
break;
default:
break;
}
}
/* Get standard timing level (CVT/GTF/DMT). */
static int standard_timing_level(const struct drm_edid *drm_edid)
{
const struct edid *edid = drm_edid->edid;
if (edid->revision >= 2) {
if (edid->revision >= 4 && (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF))
return LEVEL_CVT;
if (drm_gtf2_hbreak(drm_edid))
return LEVEL_GTF2;
if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
return LEVEL_GTF;
if (edid->revision >= 4) {
/*
* If the range descriptor doesn't
* indicate otherwise default to CVT
*/
int ret = LEVEL_CVT;
drm_for_each_detailed_block(drm_edid, get_timing_level, &ret);
return ret;
} else if (edid->revision >= 3 && drm_gtf2_hbreak(drm_edid)) {
return LEVEL_GTF2;
} else if (edid->revision >= 2) {
return LEVEL_GTF;
} else {
return LEVEL_DMT;
}
return LEVEL_DMT;
}
/*
@ -3113,6 +3137,35 @@ static int drm_mode_hsync(const struct drm_display_mode *mode)
return DIV_ROUND_CLOSEST(mode->clock, mode->htotal);
}
static struct drm_display_mode *
drm_gtf2_mode(struct drm_device *dev,
const struct drm_edid *drm_edid,
int hsize, int vsize, int vrefresh_rate)
{
struct drm_display_mode *mode;
/*
* This is potentially wrong if there's ever a monitor with
* more than one ranges section, each claiming a different
* secondary GTF curve. Please don't do that.
*/
mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
if (!mode)
return NULL;
if (drm_mode_hsync(mode) > drm_gtf2_hbreak(drm_edid)) {
drm_mode_destroy(dev, mode);
mode = drm_gtf_mode_complex(dev, hsize, vsize,
vrefresh_rate, 0, 0,
drm_gtf2_m(drm_edid),
drm_gtf2_2c(drm_edid),
drm_gtf2_k(drm_edid),
drm_gtf2_2j(drm_edid));
}
return mode;
}
/*
* Take the standard timing params (in this case width, aspect, and refresh)
* and convert them into a real mode using CVT/GTF/DMT.
@ -3201,23 +3254,7 @@ static struct drm_display_mode *drm_mode_std(struct drm_connector *connector,
mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
break;
case LEVEL_GTF2:
/*
* This is potentially wrong if there's ever a monitor with
* more than one ranges section, each claiming a different
* secondary GTF curve. Please don't do that.
*/
mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
if (!mode)
return NULL;
if (drm_mode_hsync(mode) > drm_gtf2_hbreak(drm_edid)) {
drm_mode_destroy(dev, mode);
mode = drm_gtf_mode_complex(dev, hsize, vsize,
vrefresh_rate, 0, 0,
drm_gtf2_m(drm_edid),
drm_gtf2_2c(drm_edid),
drm_gtf2_k(drm_edid),
drm_gtf2_2j(drm_edid));
}
mode = drm_gtf2_mode(dev, drm_edid, hsize, vsize, vrefresh_rate);
break;
case LEVEL_CVT:
mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
@ -3415,7 +3452,7 @@ range_pixel_clock(const struct edid *edid, const u8 *t)
return 0;
/* 1.4 with CVT support gives us real precision, yay */
if (edid->revision >= 4 && t[10] == 0x04)
if (edid->revision >= 4 && t[10] == DRM_EDID_CVT_SUPPORT_FLAG)
return (t[9] * 10000) - ((t[12] >> 2) * 250);
/* 1.3 is pathetic, so fuzz up a bit */
@ -3441,7 +3478,7 @@ static bool mode_in_range(const struct drm_display_mode *mode,
return false;
/* 1.4 max horizontal check */
if (edid->revision >= 4 && t[10] == 0x04)
if (edid->revision >= 4 && t[10] == DRM_EDID_CVT_SUPPORT_FLAG)
if (t[13] && mode->hdisplay > 8 * (t[13] + (256 * (t[12]&0x3))))
return false;
@ -3533,6 +3570,35 @@ static int drm_gtf_modes_for_range(struct drm_connector *connector,
return modes;
}
static int drm_gtf2_modes_for_range(struct drm_connector *connector,
const struct drm_edid *drm_edid,
const struct detailed_timing *timing)
{
int i, modes = 0;
struct drm_display_mode *newmode;
struct drm_device *dev = connector->dev;
for (i = 0; i < ARRAY_SIZE(extra_modes); i++) {
const struct minimode *m = &extra_modes[i];
newmode = drm_gtf2_mode(dev, drm_edid, m->w, m->h, m->r);
if (!newmode)
return modes;
drm_mode_fixup_1366x768(newmode);
if (!mode_in_range(newmode, drm_edid, timing) ||
!valid_inferred_mode(connector, newmode)) {
drm_mode_destroy(dev, newmode);
continue;
}
drm_mode_probed_add(connector, newmode);
modes++;
}
return modes;
}
static int drm_cvt_modes_for_range(struct drm_connector *connector,
const struct drm_edid *drm_edid,
const struct detailed_timing *timing)
@ -3577,25 +3643,29 @@ do_inferred_modes(const struct detailed_timing *timing, void *c)
closure->drm_edid,
timing);
if (!version_greater(closure->drm_edid, 1, 1))
if (closure->drm_edid->edid->revision < 2)
return; /* GTF not defined yet */
switch (range->flags) {
case 0x02: /* secondary gtf, XXX could do more */
case 0x00: /* default gtf */
case DRM_EDID_SECONDARY_GTF_SUPPORT_FLAG:
closure->modes += drm_gtf2_modes_for_range(closure->connector,
closure->drm_edid,
timing);
break;
case DRM_EDID_DEFAULT_GTF_SUPPORT_FLAG:
closure->modes += drm_gtf_modes_for_range(closure->connector,
closure->drm_edid,
timing);
break;
case 0x04: /* cvt, only in 1.4+ */
if (!version_greater(closure->drm_edid, 1, 3))
case DRM_EDID_CVT_SUPPORT_FLAG:
if (closure->drm_edid->edid->revision < 4)
break;
closure->modes += drm_cvt_modes_for_range(closure->connector,
closure->drm_edid,
timing);
break;
case 0x01: /* just the ranges, no formula */
case DRM_EDID_RANGE_LIMITS_ONLY_FLAG:
default:
break;
}
@ -3609,7 +3679,7 @@ static int add_inferred_modes(struct drm_connector *connector,
.drm_edid = drm_edid,
};
if (version_greater(drm_edid, 1, 0))
if (drm_edid->edid->revision >= 1)
drm_for_each_detailed_block(drm_edid, do_inferred_modes, &closure);
return closure.modes;
@ -3686,7 +3756,7 @@ static int add_established_modes(struct drm_connector *connector,
}
}
if (version_greater(drm_edid, 1, 0))
if (edid->revision >= 1)
drm_for_each_detailed_block(drm_edid, do_established_modes,
&closure);
@ -3741,7 +3811,7 @@ static int add_standard_modes(struct drm_connector *connector,
}
}
if (version_greater(drm_edid, 1, 0))
if (drm_edid->edid->revision >= 1)
drm_for_each_detailed_block(drm_edid, do_standard_modes,
&closure);
@ -3821,7 +3891,7 @@ add_cvt_modes(struct drm_connector *connector, const struct drm_edid *drm_edid)
.drm_edid = drm_edid,
};
if (version_greater(drm_edid, 1, 2))
if (drm_edid->edid->revision >= 3)
drm_for_each_detailed_block(drm_edid, do_cvt_mode, &closure);
/* XXX should also look for CVT codes in VTB blocks */
@ -3873,13 +3943,14 @@ static int add_detailed_modes(struct drm_connector *connector,
struct detailed_mode_closure closure = {
.connector = connector,
.drm_edid = drm_edid,
.preferred = true,
.quirks = quirks,
};
if (closure.preferred && !version_greater(drm_edid, 1, 3))
if (drm_edid->edid->revision >= 4)
closure.preferred = true; /* first detailed timing is always preferred */
else
closure.preferred =
(drm_edid->edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
drm_edid->edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING;
drm_for_each_detailed_block(drm_edid, do_detailed_mode, &closure);
@ -5752,12 +5823,87 @@ static void drm_parse_ycbcr420_deep_color_info(struct drm_connector *connector,
hdmi->y420_dc_modes = dc_mask;
}
static void drm_parse_dsc_info(struct drm_hdmi_dsc_cap *hdmi_dsc,
const u8 *hf_scds)
{
hdmi_dsc->v_1p2 = hf_scds[11] & DRM_EDID_DSC_1P2;
if (!hdmi_dsc->v_1p2)
return;
hdmi_dsc->native_420 = hf_scds[11] & DRM_EDID_DSC_NATIVE_420;
hdmi_dsc->all_bpp = hf_scds[11] & DRM_EDID_DSC_ALL_BPP;
if (hf_scds[11] & DRM_EDID_DSC_16BPC)
hdmi_dsc->bpc_supported = 16;
else if (hf_scds[11] & DRM_EDID_DSC_12BPC)
hdmi_dsc->bpc_supported = 12;
else if (hf_scds[11] & DRM_EDID_DSC_10BPC)
hdmi_dsc->bpc_supported = 10;
else
/* Supports min 8 BPC if DSC 1.2 is supported*/
hdmi_dsc->bpc_supported = 8;
if (cea_db_payload_len(hf_scds) >= 12 && hf_scds[12]) {
u8 dsc_max_slices;
u8 dsc_max_frl_rate;
dsc_max_frl_rate = (hf_scds[12] & DRM_EDID_DSC_MAX_FRL_RATE_MASK) >> 4;
drm_get_max_frl_rate(dsc_max_frl_rate, &hdmi_dsc->max_lanes,
&hdmi_dsc->max_frl_rate_per_lane);
dsc_max_slices = hf_scds[12] & DRM_EDID_DSC_MAX_SLICES;
switch (dsc_max_slices) {
case 1:
hdmi_dsc->max_slices = 1;
hdmi_dsc->clk_per_slice = 340;
break;
case 2:
hdmi_dsc->max_slices = 2;
hdmi_dsc->clk_per_slice = 340;
break;
case 3:
hdmi_dsc->max_slices = 4;
hdmi_dsc->clk_per_slice = 340;
break;
case 4:
hdmi_dsc->max_slices = 8;
hdmi_dsc->clk_per_slice = 340;
break;
case 5:
hdmi_dsc->max_slices = 8;
hdmi_dsc->clk_per_slice = 400;
break;
case 6:
hdmi_dsc->max_slices = 12;
hdmi_dsc->clk_per_slice = 400;
break;
case 7:
hdmi_dsc->max_slices = 16;
hdmi_dsc->clk_per_slice = 400;
break;
case 0:
default:
hdmi_dsc->max_slices = 0;
hdmi_dsc->clk_per_slice = 0;
}
}
if (cea_db_payload_len(hf_scds) >= 13 && hf_scds[13])
hdmi_dsc->total_chunk_kbytes = hf_scds[13] & DRM_EDID_DSC_TOTAL_CHUNK_KBYTES;
}
/* Sink Capability Data Structure */
static void drm_parse_hdmi_forum_scds(struct drm_connector *connector,
const u8 *hf_scds)
{
struct drm_display_info *display = &connector->display_info;
struct drm_hdmi_info *hdmi = &display->hdmi;
struct drm_hdmi_dsc_cap *hdmi_dsc = &hdmi->dsc_cap;
int max_tmds_clock = 0;
u8 max_frl_rate = 0;
bool dsc_support = false;
display->has_hdmi_infoframe = true;
@ -5777,14 +5923,13 @@ static void drm_parse_hdmi_forum_scds(struct drm_connector *connector,
*/
if (hf_scds[5]) {
/* max clock is 5000 KHz times block value */
u32 max_tmds_clock = hf_scds[5] * 5000;
struct drm_scdc *scdc = &hdmi->scdc;
/* max clock is 5000 KHz times block value */
max_tmds_clock = hf_scds[5] * 5000;
if (max_tmds_clock > 340000) {
display->max_tmds_clock = max_tmds_clock;
DRM_DEBUG_KMS("HF-VSDB: max TMDS clock %d kHz\n",
display->max_tmds_clock);
}
if (scdc->supported) {
@ -5797,74 +5942,21 @@ static void drm_parse_hdmi_forum_scds(struct drm_connector *connector,
}
if (hf_scds[7]) {
u8 max_frl_rate;
u8 dsc_max_frl_rate;
u8 dsc_max_slices;
struct drm_hdmi_dsc_cap *hdmi_dsc = &hdmi->dsc_cap;
DRM_DEBUG_KMS("hdmi_21 sink detected. parsing edid\n");
max_frl_rate = (hf_scds[7] & DRM_EDID_MAX_FRL_RATE_MASK) >> 4;
drm_get_max_frl_rate(max_frl_rate, &hdmi->max_lanes,
&hdmi->max_frl_rate_per_lane);
hdmi_dsc->v_1p2 = hf_scds[11] & DRM_EDID_DSC_1P2;
if (hdmi_dsc->v_1p2) {
hdmi_dsc->native_420 = hf_scds[11] & DRM_EDID_DSC_NATIVE_420;
hdmi_dsc->all_bpp = hf_scds[11] & DRM_EDID_DSC_ALL_BPP;
if (hf_scds[11] & DRM_EDID_DSC_16BPC)
hdmi_dsc->bpc_supported = 16;
else if (hf_scds[11] & DRM_EDID_DSC_12BPC)
hdmi_dsc->bpc_supported = 12;
else if (hf_scds[11] & DRM_EDID_DSC_10BPC)
hdmi_dsc->bpc_supported = 10;
else
hdmi_dsc->bpc_supported = 0;
dsc_max_frl_rate = (hf_scds[12] & DRM_EDID_DSC_MAX_FRL_RATE_MASK) >> 4;
drm_get_max_frl_rate(dsc_max_frl_rate, &hdmi_dsc->max_lanes,
&hdmi_dsc->max_frl_rate_per_lane);
hdmi_dsc->total_chunk_kbytes = hf_scds[13] & DRM_EDID_DSC_TOTAL_CHUNK_KBYTES;
dsc_max_slices = hf_scds[12] & DRM_EDID_DSC_MAX_SLICES;
switch (dsc_max_slices) {
case 1:
hdmi_dsc->max_slices = 1;
hdmi_dsc->clk_per_slice = 340;
break;
case 2:
hdmi_dsc->max_slices = 2;
hdmi_dsc->clk_per_slice = 340;
break;
case 3:
hdmi_dsc->max_slices = 4;
hdmi_dsc->clk_per_slice = 340;
break;
case 4:
hdmi_dsc->max_slices = 8;
hdmi_dsc->clk_per_slice = 340;
break;
case 5:
hdmi_dsc->max_slices = 8;
hdmi_dsc->clk_per_slice = 400;
break;
case 6:
hdmi_dsc->max_slices = 12;
hdmi_dsc->clk_per_slice = 400;
break;
case 7:
hdmi_dsc->max_slices = 16;
hdmi_dsc->clk_per_slice = 400;
break;
case 0:
default:
hdmi_dsc->max_slices = 0;
hdmi_dsc->clk_per_slice = 0;
}
}
}
drm_parse_ycbcr420_deep_color_info(connector, hf_scds);
if (cea_db_payload_len(hf_scds) >= 11 && hf_scds[11]) {
drm_parse_dsc_info(hdmi_dsc, hf_scds);
dsc_support = true;
}
drm_dbg_kms(connector->dev,
"HF-VSDB: max TMDS clock: %d KHz, HDMI 2.1 support: %s, DSC 1.2 support: %s\n",
max_tmds_clock, str_yes_no(max_frl_rate), str_yes_no(dsc_support));
}
static void drm_parse_hdmi_deep_color_info(struct drm_connector *connector,
@ -6033,10 +6125,13 @@ void get_monitor_range(const struct detailed_timing *timing, void *c)
return;
/*
* Check for flag range limits only. If flag == 1 then
* no additional timing information provided.
* Default GTF, GTF Secondary curve and CVT are not
* supported
* These limits are used to determine the VRR refresh
* rate range. Only the "range limits only" variant
* of the range descriptor seems to guarantee that
* any and all timings are accepted by the sink, as
* opposed to just timings conforming to the indicated
* formula (GTF/GTF2/CVT). Thus other variants of the
* range descriptor are not accepted here.
*/
if (range->flags != DRM_EDID_RANGE_LIMITS_ONLY_FLAG)
return;
@ -6061,7 +6156,10 @@ static void drm_get_monitor_range(struct drm_connector *connector,
.drm_edid = drm_edid,
};
if (!version_greater(drm_edid, 1, 1))
if (drm_edid->edid->revision < 4)
return;
if (!(drm_edid->edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ))
return;
drm_for_each_detailed_block(drm_edid, get_monitor_range, &closure);
@ -6390,7 +6488,7 @@ static int _drm_edid_connector_update(struct drm_connector *connector,
num_modes += add_cea_modes(connector, drm_edid);
num_modes += add_alternate_cea_modes(connector, drm_edid);
num_modes += add_displayid_detailed_modes(connector, drm_edid);
if (drm_edid->edid->features & DRM_EDID_FEATURE_DEFAULT_GTF)
if (drm_edid->edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ)
num_modes += add_inferred_modes(connector, drm_edid);
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
@ -6837,7 +6935,7 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
* by non-zero YQ when receiving RGB. There doesn't seem to be any
* good way to tell which version of CEA-861 the sink supports, so
* we limit non-zero YQ to HDMI 2.0 sinks only as HDMI 2.0 is based
* on on CEA-861-F.
* on CEA-861-F.
*/
if (!is_hdmi2_sink(connector) ||
rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED)

View File

@ -660,6 +660,11 @@ int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t d
drm_fb_xrgb8888_to_rgb565(dst, dst_pitch, src, fb, clip, false);
return 0;
}
} else if (dst_format == (DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN)) {
if (fb_format == DRM_FORMAT_RGB565) {
drm_fb_swab(dst, dst_pitch, src, fb, clip, false);
return 0;
}
} else if (dst_format == DRM_FORMAT_RGB888) {
if (fb_format == DRM_FORMAT_XRGB8888) {
drm_fb_xrgb8888_to_rgb888(dst, dst_pitch, src, fb, clip);
@ -678,6 +683,11 @@ int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t d
drm_fb_xrgb8888_to_xrgb2101010(dst, dst_pitch, src, fb, clip);
return 0;
}
} else if (dst_format == DRM_FORMAT_BGRX8888) {
if (fb_format == DRM_FORMAT_XRGB8888) {
drm_fb_swab(dst, dst_pitch, src, fb, clip, false);
return 0;
}
}
drm_warn_once(fb->dev, "No conversion helper from %p4cc to %p4cc found.\n",

View File

@ -1158,6 +1158,8 @@ int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
{
int ret;
dma_resv_assert_held(obj->resv);
if (!obj->funcs->vmap)
return -EOPNOTSUPP;
@ -1173,6 +1175,8 @@ EXPORT_SYMBOL(drm_gem_vmap);
void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
{
dma_resv_assert_held(obj->resv);
if (iosys_map_is_null(map))
return;
@ -1184,6 +1188,26 @@ void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map)
}
EXPORT_SYMBOL(drm_gem_vunmap);
int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
{
int ret;
dma_resv_lock(obj->resv, NULL);
ret = drm_gem_vmap(obj, map);
dma_resv_unlock(obj->resv);
return ret;
}
EXPORT_SYMBOL(drm_gem_vmap_unlocked);
void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map)
{
dma_resv_lock(obj->resv, NULL);
drm_gem_vunmap(obj, map);
dma_resv_unlock(obj->resv);
}
EXPORT_SYMBOL(drm_gem_vunmap_unlocked);
/**
* drm_gem_lock_reservations - Sets up the ww context and acquires
* the lock on an array of GEM objects.

View File

@ -230,7 +230,7 @@ void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj)
if (gem_obj->import_attach) {
if (dma_obj->vaddr)
dma_buf_vunmap(gem_obj->import_attach->dmabuf, &map);
dma_buf_vunmap_unlocked(gem_obj->import_attach->dmabuf, &map);
drm_prime_gem_destroy(gem_obj, dma_obj->sgt);
} else if (dma_obj->vaddr) {
if (dma_obj->map_noncoherent)
@ -581,7 +581,7 @@ drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *dev,
struct iosys_map map;
int ret;
ret = dma_buf_vmap(attach->dmabuf, &map);
ret = dma_buf_vmap_unlocked(attach->dmabuf, &map);
if (ret) {
DRM_ERROR("Failed to vmap PRIME buffer\n");
return ERR_PTR(ret);
@ -589,7 +589,7 @@ drm_gem_dma_prime_import_sg_table_vmap(struct drm_device *dev,
obj = drm_gem_dma_prime_import_sg_table(dev, attach, sgt);
if (IS_ERR(obj)) {
dma_buf_vunmap(attach->dmabuf, &map);
dma_buf_vunmap_unlocked(attach->dmabuf, &map);
return obj;
}

View File

@ -354,7 +354,7 @@ int drm_gem_fb_vmap(struct drm_framebuffer *fb, struct iosys_map *map,
ret = -EINVAL;
goto err_drm_gem_vunmap;
}
ret = drm_gem_vmap(obj, &map[i]);
ret = drm_gem_vmap_unlocked(obj, &map[i]);
if (ret)
goto err_drm_gem_vunmap;
}
@ -376,7 +376,7 @@ err_drm_gem_vunmap:
obj = drm_gem_fb_get_obj(fb, i);
if (!obj)
continue;
drm_gem_vunmap(obj, &map[i]);
drm_gem_vunmap_unlocked(obj, &map[i]);
}
return ret;
}
@ -403,7 +403,7 @@ void drm_gem_fb_vunmap(struct drm_framebuffer *fb, struct iosys_map *map)
continue;
if (iosys_map_is_null(&map[i]))
continue;
drm_gem_vunmap(obj, &map[i]);
drm_gem_vunmap_unlocked(obj, &map[i]);
}
}
EXPORT_SYMBOL(drm_gem_fb_vunmap);

View File

@ -64,13 +64,8 @@ int drm_gem_ttm_vmap(struct drm_gem_object *gem,
struct iosys_map *map)
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
int ret;
dma_resv_lock(gem->resv, NULL);
ret = ttm_bo_vmap(bo, map);
dma_resv_unlock(gem->resv);
return ret;
return ttm_bo_vmap(bo, map);
}
EXPORT_SYMBOL(drm_gem_ttm_vmap);
@ -87,9 +82,7 @@ void drm_gem_ttm_vunmap(struct drm_gem_object *gem,
{
struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
dma_resv_lock(gem->resv, NULL);
ttm_bo_vunmap(bo, map);
dma_resv_unlock(gem->resv);
}
EXPORT_SYMBOL(drm_gem_ttm_vunmap);

View File

@ -1801,19 +1801,23 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
name = mode_option;
/* Try to locate the bpp and refresh specifiers, if any */
bpp_ptr = strchr(name, '-');
if (bpp_ptr)
bpp_off = bpp_ptr - name;
refresh_ptr = strchr(name, '@');
if (refresh_ptr)
refresh_off = refresh_ptr - name;
/* Locate the start of named options */
options_ptr = strchr(name, ',');
if (options_ptr)
options_off = options_ptr - name;
else
options_off = strlen(name);
/* Try to locate the bpp and refresh specifiers, if any */
bpp_ptr = strnchr(name, options_off, '-');
while (bpp_ptr && !isdigit(bpp_ptr[1]))
bpp_ptr = strnchr(bpp_ptr + 1, options_off, '-');
if (bpp_ptr)
bpp_off = bpp_ptr - name;
refresh_ptr = strnchr(name, options_off, '@');
if (refresh_ptr)
refresh_off = refresh_ptr - name;
/* Locate the end of the name / resolution, and parse it */
if (bpp_ptr) {

View File

@ -298,7 +298,9 @@ EXPORT_SYMBOL(drm_plane_helper_destroy);
* scale and positioning are not expected to change since the plane is always
* a fullscreen scanout buffer.
*
* This is often the case for the primary plane of simple framebuffers.
* This is often the case for the primary plane of simple framebuffers. See
* also drm_crtc_helper_atomic_check() for the respective CRTC-state check
* helper function.
*
* RETURNS:
* Zero on success, or an errno code otherwise.

View File

@ -940,7 +940,7 @@ struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
get_dma_buf(dma_buf);
sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
goto fail_detach;
@ -958,7 +958,7 @@ struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
return obj;
fail_unmap:
dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
fail_detach:
dma_buf_detach(dma_buf, attach);
dma_buf_put(dma_buf);
@ -1056,7 +1056,7 @@ void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg)
attach = obj->import_attach;
if (sg)
dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
dma_buf_unmap_attachment_unlocked(attach, sg, DMA_BIDIRECTIONAL);
dma_buf = attach->dmabuf;
dma_buf_detach(attach->dmabuf, attach);
/* remove the reference */

View File

@ -102,10 +102,14 @@ static int drm_simple_kms_crtc_check(struct drm_crtc *crtc,
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
int ret;
ret = drm_atomic_helper_check_crtc_state(crtc_state, false);
if (!crtc_state->enable)
goto out;
ret = drm_atomic_helper_check_crtc_primary_plane(crtc_state);
if (ret)
return ret;
out:
return drm_atomic_add_affected_planes(state, crtc);
}

View File

@ -65,7 +65,7 @@ static void etnaviv_gem_prime_release(struct etnaviv_gem_object *etnaviv_obj)
struct iosys_map map = IOSYS_MAP_INIT_VADDR(etnaviv_obj->vaddr);
if (etnaviv_obj->vaddr)
dma_buf_vunmap(etnaviv_obj->base.import_attach->dmabuf, &map);
dma_buf_vunmap_unlocked(etnaviv_obj->base.import_attach->dmabuf, &map);
/* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated:

View File

@ -286,7 +286,7 @@ static int psbfb_create(struct drm_fb_helper *fb_helper,
info->fbops = &psbfb_unaccel_ops;
info->fix.smem_start = dev->mode_config.fb_base;
info->fix.smem_start = dev_priv->fb_base;
info->fix.smem_len = size;
info->fix.ywrapstep = 0;
info->fix.ypanstep = 0;
@ -296,7 +296,7 @@ static int psbfb_create(struct drm_fb_helper *fb_helper,
info->screen_size = size;
if (dev_priv->gtt.stolen_size) {
info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].base = dev_priv->fb_base;
info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
}
@ -527,7 +527,7 @@ void psb_modeset_init(struct drm_device *dev)
/* set memory base */
/* Oaktrail and Poulsbo should use BAR 2*/
pci_read_config_dword(pdev, PSB_BSM, (u32 *)&(dev->mode_config.fb_base));
pci_read_config_dword(pdev, PSB_BSM, (u32 *)&(dev_priv->fb_base));
/* num pipes is 2 for PSB but 1 for Mrst */
for (i = 0; i < dev_priv->num_pipe; i++)

View File

@ -523,6 +523,7 @@ struct drm_psb_private {
uint32_t blc_adj2;
struct drm_fb_helper *fb_helper;
resource_size_t fb_base;
bool dsr_enable;
u32 dsr_fb_update;

View File

@ -355,7 +355,7 @@ static void gud_connector_reset(struct drm_connector *connector)
drm_atomic_helper_connector_reset(connector);
connector->state->tv = gconn->initial_tv_state;
/* Set margins from command line */
drm_atomic_helper_connector_tv_reset(connector);
drm_atomic_helper_connector_tv_margins_reset(connector);
if (gconn->initial_brightness >= 0)
connector->state->tv.brightness = gconn->initial_brightness;
}

View File

@ -105,7 +105,6 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv)
dev->mode_config.max_width = 1920;
dev->mode_config.max_height = 1200;
dev->mode_config.fb_base = priv->fb_base;
dev->mode_config.preferred_depth = 32;
dev->mode_config.prefer_shadow = 1;
@ -212,7 +211,7 @@ static int hibmc_hw_map(struct hibmc_drm_private *priv)
{
struct drm_device *dev = &priv->dev;
struct pci_dev *pdev = to_pci_dev(dev->dev);
resource_size_t addr, size, ioaddr, iosize;
resource_size_t ioaddr, iosize;
ioaddr = pci_resource_start(pdev, 1);
iosize = pci_resource_len(pdev, 1);
@ -222,16 +221,6 @@ static int hibmc_hw_map(struct hibmc_drm_private *priv)
return -ENOMEM;
}
addr = pci_resource_start(pdev, 0);
size = pci_resource_len(pdev, 0);
priv->fb_map = devm_ioremap(dev->dev, addr, size);
if (!priv->fb_map) {
drm_err(dev, "Cannot map framebuffer\n");
return -ENOMEM;
}
priv->fb_base = addr;
priv->fb_size = size;
return 0;
}
@ -271,7 +260,8 @@ static int hibmc_load(struct drm_device *dev)
if (ret)
goto err;
ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0), priv->fb_size);
ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (ret) {
drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
goto err;

View File

@ -32,9 +32,6 @@ struct hibmc_connector {
struct hibmc_drm_private {
/* hw */
void __iomem *mmio;
void __iomem *fb_map;
resource_size_t fb_base;
resource_size_t fb_size;
/* drm */
struct drm_device dev;

View File

@ -72,7 +72,7 @@ static int i915_gem_dmabuf_vmap(struct dma_buf *dma_buf,
struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
void *vaddr;
vaddr = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WB);
vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
if (IS_ERR(vaddr))
return PTR_ERR(vaddr);

View File

@ -290,7 +290,21 @@ void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj)
__i915_gem_object_free_mmaps(obj);
atomic_set(&obj->mm.pages_pin_count, 0);
/*
* dma_buf_unmap_attachment() requires reservation to be
* locked. The imported GEM shouldn't share reservation lock
* and ttm_bo_cleanup_memtype_use() shouldn't be invoked for
* dma-buf, so it's safe to take the lock.
*/
if (obj->base.import_attach)
i915_gem_object_lock(obj, NULL);
__i915_gem_object_put_pages(obj);
if (obj->base.import_attach)
i915_gem_object_unlock(obj);
GEM_BUG_ON(i915_gem_object_has_pages(obj));
}

View File

@ -213,7 +213,7 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
goto out_import;
}
st = dma_buf_map_attachment(import_attach, DMA_BIDIRECTIONAL);
st = dma_buf_map_attachment_unlocked(import_attach, DMA_BIDIRECTIONAL);
if (IS_ERR(st)) {
err = PTR_ERR(st);
goto out_detach;
@ -226,7 +226,7 @@ static int igt_dmabuf_import_same_driver(struct drm_i915_private *i915,
timeout = -ETIME;
}
err = timeout > 0 ? 0 : timeout;
dma_buf_unmap_attachment(import_attach, st, DMA_BIDIRECTIONAL);
dma_buf_unmap_attachment_unlocked(import_attach, st, DMA_BIDIRECTIONAL);
out_detach:
dma_buf_detach(dmabuf, import_attach);
out_import:
@ -296,7 +296,7 @@ static int igt_dmabuf_import(void *arg)
goto out_obj;
}
err = dma_buf_vmap(dmabuf, &map);
err = dma_buf_vmap_unlocked(dmabuf, &map);
dma_map = err ? NULL : map.vaddr;
if (!dma_map) {
pr_err("dma_buf_vmap failed\n");
@ -337,7 +337,7 @@ static int igt_dmabuf_import(void *arg)
err = 0;
out_dma_map:
dma_buf_vunmap(dmabuf, &map);
dma_buf_vunmap_unlocked(dmabuf, &map);
out_obj:
i915_gem_object_put(obj);
out_dmabuf:
@ -358,7 +358,7 @@ static int igt_dmabuf_import_ownership(void *arg)
if (IS_ERR(dmabuf))
return PTR_ERR(dmabuf);
err = dma_buf_vmap(dmabuf, &map);
err = dma_buf_vmap_unlocked(dmabuf, &map);
ptr = err ? NULL : map.vaddr;
if (!ptr) {
pr_err("dma_buf_vmap failed\n");
@ -367,7 +367,7 @@ static int igt_dmabuf_import_ownership(void *arg)
}
memset(ptr, 0xc5, PAGE_SIZE);
dma_buf_vunmap(dmabuf, &map);
dma_buf_vunmap_unlocked(dmabuf, &map);
obj = to_intel_bo(i915_gem_prime_import(&i915->drm, dmabuf));
if (IS_ERR(obj)) {
@ -418,7 +418,7 @@ static int igt_dmabuf_export_vmap(void *arg)
}
i915_gem_object_put(obj);
err = dma_buf_vmap(dmabuf, &map);
err = dma_buf_vmap_unlocked(dmabuf, &map);
ptr = err ? NULL : map.vaddr;
if (!ptr) {
pr_err("dma_buf_vmap failed\n");
@ -435,7 +435,7 @@ static int igt_dmabuf_export_vmap(void *arg)
memset(ptr, 0xc5, dmabuf->size);
err = 0;
dma_buf_vunmap(dmabuf, &map);
dma_buf_vunmap_unlocked(dmabuf, &map);
out:
dma_buf_put(dmabuf);
return err;

View File

@ -371,7 +371,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task)
} else {
buffer_chunk->size = lima_bo_size(bo);
ret = drm_gem_shmem_vmap(&bo->base, &map);
ret = drm_gem_vmap_unlocked(&bo->base.base, &map);
if (ret) {
kvfree(et);
goto out;
@ -379,7 +379,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task)
memcpy(buffer_chunk + 1, map.vaddr, buffer_chunk->size);
drm_gem_shmem_vunmap(&bo->base, &map);
drm_gem_vunmap_unlocked(&bo->base.base, &map);
}
buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size;

View File

@ -284,7 +284,8 @@ static void mgag200_g200se_04_pixpllc_atomic_update(struct drm_crtc *crtc,
pixpllcp = pixpllc->p - 1;
pixpllcs = pixpllc->s;
xpixpllcm = pixpllcm | ((pixpllcn & BIT(8)) >> 1);
// For G200SE A, BIT(7) should be set unconditionally.
xpixpllcm = BIT(7) | pixpllcm;
xpixpllcn = pixpllcn;
xpixpllcp = (pixpllcs << 3) | pixpllcp;

View File

@ -579,13 +579,13 @@ int mgag200_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_st
struct drm_property_blob *new_gamma_lut = new_crtc_state->gamma_lut;
int ret;
ret = drm_atomic_helper_check_crtc_state(new_crtc_state, false);
if (ret)
return ret;
if (!new_crtc_state->enable)
return 0;
ret = drm_atomic_helper_check_crtc_primary_plane(new_crtc_state);
if (ret)
return ret;
if (new_crtc_state->mode_changed) {
if (funcs->pixpllc_atomic_check) {
ret = funcs->pixpllc_atomic_check(crtc, new_state);
@ -601,7 +601,7 @@ int mgag200_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_st
}
}
return drm_atomic_add_affected_planes(new_state, crtc);
return 0;
}
void mgag200_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *old_state)
@ -824,7 +824,6 @@ int mgag200_mode_config_init(struct mga_device *mdev, resource_size_t vram_avail
dev->mode_config.max_width = MGAG200_MAX_FB_WIDTH;
dev->mode_config.max_height = MGAG200_MAX_FB_HEIGHT;
dev->mode_config.preferred_depth = 24;
dev->mode_config.fb_base = mdev->vram_res->start;
dev->mode_config.funcs = &mgag200_mode_config_funcs;
dev->mode_config.helper_private = &mgag200_mode_config_helper_funcs;

View File

@ -109,8 +109,6 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
drm_fb_helper_fill_info(fbi, helper, sizes);
dev->mode_config.fb_base = paddr;
fbi->screen_base = msm_gem_get_vaddr(bo);
if (IS_ERR(fbi->screen_base)) {
ret = PTR_ERR(fbi->screen_base);

View File

@ -15,6 +15,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_color_mgmt.h>
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_dma_helper.h>
@ -31,13 +32,126 @@
/* -----------------------------------------------------------------------------
* CRTC
*/
/*
* For conversion from YCbCr to RGB, the CSC operates as follows:
*
* |R| |A1 A2 A3| |Y + D1|
* |G| = |B1 B2 B3| * |Cb + D2|
* |B| |C1 C2 C3| |Cr + D3|
*
* The A, B and C coefficients are expressed as Q2.8 fixed point values, and
* the D coefficients as Q0.8. Despite the reference manual stating the
* opposite, the D1, D2 and D3 offset values are added to Y, Cb and Cr, not
* subtracted. They must thus be programmed with negative values.
*/
static const u32 lcdif_yuv2rgb_coeffs[3][2][6] = {
[DRM_COLOR_YCBCR_BT601] = {
[DRM_COLOR_YCBCR_LIMITED_RANGE] = {
/*
* BT.601 limited range:
*
* |R| |1.1644 0.0000 1.5960| |Y - 16 |
* |G| = |1.1644 -0.3917 -0.8129| * |Cb - 128|
* |B| |1.1644 2.0172 0.0000| |Cr - 128|
*/
CSC0_COEF0_A1(0x12a) | CSC0_COEF0_A2(0x000),
CSC0_COEF1_A3(0x199) | CSC0_COEF1_B1(0x12a),
CSC0_COEF2_B2(0x79c) | CSC0_COEF2_B3(0x730),
CSC0_COEF3_C1(0x12a) | CSC0_COEF3_C2(0x204),
CSC0_COEF4_C3(0x000) | CSC0_COEF4_D1(0x1f0),
CSC0_COEF5_D2(0x180) | CSC0_COEF5_D3(0x180),
},
[DRM_COLOR_YCBCR_FULL_RANGE] = {
/*
* BT.601 full range:
*
* |R| |1.0000 0.0000 1.4020| |Y - 0 |
* |G| = |1.0000 -0.3441 -0.7141| * |Cb - 128|
* |B| |1.0000 1.7720 0.0000| |Cr - 128|
*/
CSC0_COEF0_A1(0x100) | CSC0_COEF0_A2(0x000),
CSC0_COEF1_A3(0x167) | CSC0_COEF1_B1(0x100),
CSC0_COEF2_B2(0x7a8) | CSC0_COEF2_B3(0x749),
CSC0_COEF3_C1(0x100) | CSC0_COEF3_C2(0x1c6),
CSC0_COEF4_C3(0x000) | CSC0_COEF4_D1(0x000),
CSC0_COEF5_D2(0x180) | CSC0_COEF5_D3(0x180),
},
},
[DRM_COLOR_YCBCR_BT709] = {
[DRM_COLOR_YCBCR_LIMITED_RANGE] = {
/*
* Rec.709 limited range:
*
* |R| |1.1644 0.0000 1.7927| |Y - 16 |
* |G| = |1.1644 -0.2132 -0.5329| * |Cb - 128|
* |B| |1.1644 2.1124 0.0000| |Cr - 128|
*/
CSC0_COEF0_A1(0x12a) | CSC0_COEF0_A2(0x000),
CSC0_COEF1_A3(0x1cb) | CSC0_COEF1_B1(0x12a),
CSC0_COEF2_B2(0x7c9) | CSC0_COEF2_B3(0x778),
CSC0_COEF3_C1(0x12a) | CSC0_COEF3_C2(0x21d),
CSC0_COEF4_C3(0x000) | CSC0_COEF4_D1(0x1f0),
CSC0_COEF5_D2(0x180) | CSC0_COEF5_D3(0x180),
},
[DRM_COLOR_YCBCR_FULL_RANGE] = {
/*
* Rec.709 full range:
*
* |R| |1.0000 0.0000 1.5748| |Y - 0 |
* |G| = |1.0000 -0.1873 -0.4681| * |Cb - 128|
* |B| |1.0000 1.8556 0.0000| |Cr - 128|
*/
CSC0_COEF0_A1(0x100) | CSC0_COEF0_A2(0x000),
CSC0_COEF1_A3(0x193) | CSC0_COEF1_B1(0x100),
CSC0_COEF2_B2(0x7d0) | CSC0_COEF2_B3(0x788),
CSC0_COEF3_C1(0x100) | CSC0_COEF3_C2(0x1db),
CSC0_COEF4_C3(0x000) | CSC0_COEF4_D1(0x000),
CSC0_COEF5_D2(0x180) | CSC0_COEF5_D3(0x180),
},
},
[DRM_COLOR_YCBCR_BT2020] = {
[DRM_COLOR_YCBCR_LIMITED_RANGE] = {
/*
* BT.2020 limited range:
*
* |R| |1.1644 0.0000 1.6787| |Y - 16 |
* |G| = |1.1644 -0.1874 -0.6505| * |Cb - 128|
* |B| |1.1644 2.1418 0.0000| |Cr - 128|
*/
CSC0_COEF0_A1(0x12a) | CSC0_COEF0_A2(0x000),
CSC0_COEF1_A3(0x1ae) | CSC0_COEF1_B1(0x12a),
CSC0_COEF2_B2(0x7d0) | CSC0_COEF2_B3(0x759),
CSC0_COEF3_C1(0x12a) | CSC0_COEF3_C2(0x224),
CSC0_COEF4_C3(0x000) | CSC0_COEF4_D1(0x1f0),
CSC0_COEF5_D2(0x180) | CSC0_COEF5_D3(0x180),
},
[DRM_COLOR_YCBCR_FULL_RANGE] = {
/*
* BT.2020 full range:
*
* |R| |1.0000 0.0000 1.4746| |Y - 0 |
* |G| = |1.0000 -0.1646 -0.5714| * |Cb - 128|
* |B| |1.0000 1.8814 0.0000| |Cr - 128|
*/
CSC0_COEF0_A1(0x100) | CSC0_COEF0_A2(0x000),
CSC0_COEF1_A3(0x179) | CSC0_COEF1_B1(0x100),
CSC0_COEF2_B2(0x7d6) | CSC0_COEF2_B3(0x76e),
CSC0_COEF3_C1(0x100) | CSC0_COEF3_C2(0x1e2),
CSC0_COEF4_C3(0x000) | CSC0_COEF4_D1(0x000),
CSC0_COEF5_D2(0x180) | CSC0_COEF5_D3(0x180),
},
},
};
static void lcdif_set_formats(struct lcdif_drm_private *lcdif,
struct drm_plane_state *plane_state,
const u32 bus_format)
{
struct drm_device *drm = lcdif->drm;
const u32 format = lcdif->crtc.primary->state->fb->format->format;
writel(CSC0_CTRL_BYPASS, lcdif->base + LCDC_V8_CSC0_CTRL);
const u32 format = plane_state->fb->format->format;
bool in_yuv = false;
bool out_yuv = false;
switch (bus_format) {
case MEDIA_BUS_FMT_RGB565_1X16:
@ -51,24 +165,7 @@ static void lcdif_set_formats(struct lcdif_drm_private *lcdif,
case MEDIA_BUS_FMT_UYVY8_1X16:
writel(DISP_PARA_LINE_PATTERN_UYVY_H,
lcdif->base + LCDC_V8_DISP_PARA);
/* CSC: BT.601 Full Range RGB to YCbCr coefficients. */
writel(CSC0_COEF0_A2(0x096) | CSC0_COEF0_A1(0x04c),
lcdif->base + LCDC_V8_CSC0_COEF0);
writel(CSC0_COEF1_B1(0x7d5) | CSC0_COEF1_A3(0x01d),
lcdif->base + LCDC_V8_CSC0_COEF1);
writel(CSC0_COEF2_B3(0x080) | CSC0_COEF2_B2(0x7ac),
lcdif->base + LCDC_V8_CSC0_COEF2);
writel(CSC0_COEF3_C2(0x795) | CSC0_COEF3_C1(0x080),
lcdif->base + LCDC_V8_CSC0_COEF3);
writel(CSC0_COEF4_D1(0x000) | CSC0_COEF4_C3(0x7ec),
lcdif->base + LCDC_V8_CSC0_COEF4);
writel(CSC0_COEF5_D3(0x080) | CSC0_COEF5_D2(0x080),
lcdif->base + LCDC_V8_CSC0_COEF5);
writel(CSC0_CTRL_CSC_MODE_RGB2YCbCr,
lcdif->base + LCDC_V8_CSC0_CTRL);
out_yuv = true;
break;
default:
dev_err(drm->dev, "Unknown media bus format 0x%x\n", bus_format);
@ -76,6 +173,7 @@ static void lcdif_set_formats(struct lcdif_drm_private *lcdif,
}
switch (format) {
/* RGB Formats */
case DRM_FORMAT_RGB565:
writel(CTRLDESCL0_5_BPP_16_RGB565,
lcdif->base + LCDC_V8_CTRLDESCL0_5);
@ -100,10 +198,84 @@ static void lcdif_set_formats(struct lcdif_drm_private *lcdif,
writel(CTRLDESCL0_5_BPP_32_ARGB8888,
lcdif->base + LCDC_V8_CTRLDESCL0_5);
break;
/* YUV Formats */
case DRM_FORMAT_YUYV:
writel(CTRLDESCL0_5_BPP_YCbCr422 | CTRLDESCL0_5_YUV_FORMAT_VY2UY1,
lcdif->base + LCDC_V8_CTRLDESCL0_5);
in_yuv = true;
break;
case DRM_FORMAT_YVYU:
writel(CTRLDESCL0_5_BPP_YCbCr422 | CTRLDESCL0_5_YUV_FORMAT_UY2VY1,
lcdif->base + LCDC_V8_CTRLDESCL0_5);
in_yuv = true;
break;
case DRM_FORMAT_UYVY:
writel(CTRLDESCL0_5_BPP_YCbCr422 | CTRLDESCL0_5_YUV_FORMAT_Y2VY1U,
lcdif->base + LCDC_V8_CTRLDESCL0_5);
in_yuv = true;
break;
case DRM_FORMAT_VYUY:
writel(CTRLDESCL0_5_BPP_YCbCr422 | CTRLDESCL0_5_YUV_FORMAT_Y2UY1V,
lcdif->base + LCDC_V8_CTRLDESCL0_5);
in_yuv = true;
break;
default:
dev_err(drm->dev, "Unknown pixel format 0x%x\n", format);
break;
}
/*
* The CSC differentiates between "YCbCr" and "YUV", but the reference
* manual doesn't detail how they differ. Experiments showed that the
* luminance value is unaffected, only the calculations involving chroma
* values differ. The YCbCr mode behaves as expected, with chroma values
* being offset by 128. The YUV mode isn't fully understood.
*/
if (!in_yuv && out_yuv) {
/* RGB -> YCbCr */
writel(CSC0_CTRL_CSC_MODE_RGB2YCbCr,
lcdif->base + LCDC_V8_CSC0_CTRL);
/*
* CSC: BT.601 Limited Range RGB to YCbCr coefficients.
*
* |Y | | 0.2568 0.5041 0.0979| |R| |16 |
* |Cb| = |-0.1482 -0.2910 0.4392| * |G| + |128|
* |Cr| | 0.4392 0.4392 -0.3678| |B| |128|
*/
writel(CSC0_COEF0_A2(0x081) | CSC0_COEF0_A1(0x041),
lcdif->base + LCDC_V8_CSC0_COEF0);
writel(CSC0_COEF1_B1(0x7db) | CSC0_COEF1_A3(0x019),
lcdif->base + LCDC_V8_CSC0_COEF1);
writel(CSC0_COEF2_B3(0x070) | CSC0_COEF2_B2(0x7b6),
lcdif->base + LCDC_V8_CSC0_COEF2);
writel(CSC0_COEF3_C2(0x7a2) | CSC0_COEF3_C1(0x070),
lcdif->base + LCDC_V8_CSC0_COEF3);
writel(CSC0_COEF4_D1(0x010) | CSC0_COEF4_C3(0x7ee),
lcdif->base + LCDC_V8_CSC0_COEF4);
writel(CSC0_COEF5_D3(0x080) | CSC0_COEF5_D2(0x080),
lcdif->base + LCDC_V8_CSC0_COEF5);
} else if (in_yuv && !out_yuv) {
/* YCbCr -> RGB */
const u32 *coeffs =
lcdif_yuv2rgb_coeffs[plane_state->color_encoding]
[plane_state->color_range];
writel(CSC0_CTRL_CSC_MODE_YCbCr2RGB,
lcdif->base + LCDC_V8_CSC0_CTRL);
writel(coeffs[0], lcdif->base + LCDC_V8_CSC0_COEF0);
writel(coeffs[1], lcdif->base + LCDC_V8_CSC0_COEF1);
writel(coeffs[2], lcdif->base + LCDC_V8_CSC0_COEF2);
writel(coeffs[3], lcdif->base + LCDC_V8_CSC0_COEF3);
writel(coeffs[4], lcdif->base + LCDC_V8_CSC0_COEF4);
writel(coeffs[5], lcdif->base + LCDC_V8_CSC0_COEF5);
} else {
/* RGB -> RGB, YCbCr -> YCbCr: bypass colorspace converter. */
writel(CSC0_CTRL_BYPASS, lcdif->base + LCDC_V8_CSC0_CTRL);
}
}
static void lcdif_set_mode(struct lcdif_drm_private *lcdif, u32 bus_flags)
@ -188,6 +360,7 @@ static void lcdif_reset_block(struct lcdif_drm_private *lcdif)
}
static void lcdif_crtc_mode_set_nofb(struct lcdif_drm_private *lcdif,
struct drm_plane_state *plane_state,
struct drm_bridge_state *bridge_state,
const u32 bus_format)
{
@ -210,7 +383,7 @@ static void lcdif_crtc_mode_set_nofb(struct lcdif_drm_private *lcdif,
/* Mandatory eLCDIF reset as per the Reference Manual */
lcdif_reset_block(lcdif);
lcdif_set_formats(lcdif, bus_format);
lcdif_set_formats(lcdif, plane_state, bus_format);
lcdif_set_mode(lcdif, bus_flags);
}
@ -293,7 +466,7 @@ static void lcdif_crtc_atomic_enable(struct drm_crtc *crtc,
pm_runtime_get_sync(drm->dev);
lcdif_crtc_mode_set_nofb(lcdif, bridge_state, bus_format);
lcdif_crtc_mode_set_nofb(lcdif, new_pstate, bridge_state, bus_format);
/* Write cur_buf as well to avoid an initial corrupt frame */
paddr = drm_fb_dma_get_gem_addr(new_pstate->fb, new_pstate, 0);
@ -437,12 +610,19 @@ static const struct drm_plane_funcs lcdif_plane_funcs = {
};
static const u32 lcdif_primary_plane_formats[] = {
/* RGB */
DRM_FORMAT_RGB565,
DRM_FORMAT_RGB888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB1555,
DRM_FORMAT_XRGB4444,
DRM_FORMAT_XRGB8888,
/* Packed YCbCr */
DRM_FORMAT_YUYV,
DRM_FORMAT_YVYU,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
};
static const u64 lcdif_modifiers[] = {
@ -456,6 +636,11 @@ static const u64 lcdif_modifiers[] = {
int lcdif_kms_init(struct lcdif_drm_private *lcdif)
{
const u32 supported_encodings = BIT(DRM_COLOR_YCBCR_BT601) |
BIT(DRM_COLOR_YCBCR_BT709) |
BIT(DRM_COLOR_YCBCR_BT2020);
const u32 supported_ranges = BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
BIT(DRM_COLOR_YCBCR_FULL_RANGE);
struct drm_encoder *encoder = &lcdif->encoder;
struct drm_crtc *crtc = &lcdif->crtc;
int ret;
@ -471,6 +656,14 @@ int lcdif_kms_init(struct lcdif_drm_private *lcdif)
if (ret)
return ret;
ret = drm_plane_create_color_properties(&lcdif->planes.primary,
supported_encodings,
supported_ranges,
DRM_COLOR_YCBCR_BT601,
DRM_COLOR_YCBCR_LIMITED_RANGE);
if (ret)
return ret;
drm_crtc_helper_add(crtc, &lcdif_crtc_helper_funcs);
ret = drm_crtc_init_with_planes(lcdif->drm, crtc,
&lcdif->planes.primary, NULL,

View File

@ -130,7 +130,7 @@
#define CTRL_FETCH_START_OPTION_BPV BIT(9)
#define CTRL_FETCH_START_OPTION_RESV GENMASK(9, 8)
#define CTRL_FETCH_START_OPTION_MASK GENMASK(9, 8)
#define CTRL_NEG BIT(4)
#define CTRL_NEG BIT(4)
#define CTRL_INV_PXCK BIT(3)
#define CTRL_INV_DE BIT(2)
#define CTRL_INV_VS BIT(1)
@ -138,9 +138,9 @@
#define DISP_PARA_DISP_ON BIT(31)
#define DISP_PARA_SWAP_EN BIT(30)
#define DISP_PARA_LINE_PATTERN_UYVY_H (GENMASK(29, 28) | BIT(26))
#define DISP_PARA_LINE_PATTERN_RGB565 GENMASK(28, 26)
#define DISP_PARA_LINE_PATTERN_RGB888 0
#define DISP_PARA_LINE_PATTERN_UYVY_H (0xd << 26)
#define DISP_PARA_LINE_PATTERN_RGB565 (0x7 << 26)
#define DISP_PARA_LINE_PATTERN_RGB888 (0x0 << 26)
#define DISP_PARA_LINE_PATTERN_MASK GENMASK(29, 26)
#define DISP_PARA_DISP_MODE_MASK GENMASK(25, 24)
#define DISP_PARA_BGND_R_MASK GENMASK(23, 16)
@ -186,7 +186,7 @@
#define INT_ENABLE_D1_PLANE_PANIC_EN BIT(0)
#define CTRLDESCL0_1_HEIGHT(n) (((n) & 0xffff) << 16)
#define CTRLDESCL0_1_HEIGHT_MASK GENMASK(31, 16)
#define CTRLDESCL0_1_HEIGHT_MASK GENMASK(31, 16)
#define CTRLDESCL0_1_WIDTH(n) ((n) & 0xffff)
#define CTRLDESCL0_1_WIDTH_MASK GENMASK(15, 0)
@ -198,21 +198,24 @@
#define CTRLDESCL0_5_EN BIT(31)
#define CTRLDESCL0_5_SHADOW_LOAD_EN BIT(30)
#define CTRLDESCL0_5_BPP_16_RGB565 BIT(26)
#define CTRLDESCL0_5_BPP_16_ARGB1555 (BIT(26) | BIT(24))
#define CTRLDESCL0_5_BPP_16_ARGB4444 (BIT(26) | BIT(25))
#define CTRLDESCL0_5_BPP_YCbCr422 (BIT(26) | BIT(25) | BIT(24))
#define CTRLDESCL0_5_BPP_24_RGB888 BIT(27)
#define CTRLDESCL0_5_BPP_32_ARGB8888 (BIT(27) | BIT(24))
#define CTRLDESCL0_5_BPP_32_ABGR8888 (BIT(27) | BIT(25))
#define CTRLDESCL0_5_BPP_16_RGB565 (0x4 << 24)
#define CTRLDESCL0_5_BPP_16_ARGB1555 (0x5 << 24)
#define CTRLDESCL0_5_BPP_16_ARGB4444 (0x6 << 24)
#define CTRLDESCL0_5_BPP_YCbCr422 (0x7 << 24)
#define CTRLDESCL0_5_BPP_24_RGB888 (0x8 << 24)
#define CTRLDESCL0_5_BPP_32_ARGB8888 (0x9 << 24)
#define CTRLDESCL0_5_BPP_32_ABGR8888 (0xa << 24)
#define CTRLDESCL0_5_BPP_MASK GENMASK(27, 24)
#define CTRLDESCL0_5_YUV_FORMAT_Y2VY1U 0
#define CTRLDESCL0_5_YUV_FORMAT_Y2UY1V BIT(14)
#define CTRLDESCL0_5_YUV_FORMAT_VY2UY1 BIT(15)
#define CTRLDESCL0_5_YUV_FORMAT_UY2VY1 (BIT(15) | BIT(14))
#define CTRLDESCL0_5_YUV_FORMAT_Y2VY1U (0x0 << 14)
#define CTRLDESCL0_5_YUV_FORMAT_Y2UY1V (0x1 << 14)
#define CTRLDESCL0_5_YUV_FORMAT_VY2UY1 (0x2 << 14)
#define CTRLDESCL0_5_YUV_FORMAT_UY2VY1 (0x3 << 14)
#define CTRLDESCL0_5_YUV_FORMAT_MASK GENMASK(15, 14)
#define CSC0_CTRL_CSC_MODE_RGB2YCbCr GENMASK(2, 1)
#define CSC0_CTRL_CSC_MODE_YUV2RGB (0x0 << 1)
#define CSC0_CTRL_CSC_MODE_YCbCr2RGB (0x1 << 1)
#define CSC0_CTRL_CSC_MODE_RGB2YUV (0x2 << 1)
#define CSC0_CTRL_CSC_MODE_RGB2YCbCr (0x3 << 1)
#define CSC0_CTRL_CSC_MODE_MASK GENMASK(2, 1)
#define CSC0_CTRL_BYPASS BIT(0)

View File

@ -131,7 +131,7 @@ nv50_dmac_kick(struct nvif_push *push)
{
struct nv50_dmac *dmac = container_of(push, typeof(*dmac), _push);
dmac->cur = push->cur - (u32 *)dmac->_push.mem.object.map.ptr;
dmac->cur = push->cur - (u32 __iomem *)dmac->_push.mem.object.map.ptr;
if (dmac->put != dmac->cur) {
/* Push buffer fetches are not coherent with BAR1, we need to ensure
* writes have been flushed right through to VRAM before writing PUT.
@ -194,7 +194,7 @@ nv50_dmac_wait(struct nvif_push *push, u32 size)
if (WARN_ON(size > dmac->max))
return -EINVAL;
dmac->cur = push->cur - (u32 *)dmac->_push.mem.object.map.ptr;
dmac->cur = push->cur - (u32 __iomem *)dmac->_push.mem.object.map.ptr;
if (dmac->cur + size >= dmac->max) {
int ret = nv50_dmac_wind(dmac);
if (ret)

View File

@ -672,7 +672,6 @@ nouveau_display_create(struct drm_device *dev)
drm_mode_create_dvi_i_properties(dev);
dev->mode_config.funcs = &nouveau_mode_config_funcs;
dev->mode_config.fb_base = device->func->resource_addr(device, 1);
dev->mode_config.min_width = 0;
dev->mode_config.min_height = 0;

View File

@ -137,6 +137,8 @@ nv04_fbcon_accel_init(struct fb_info *info)
struct nouveau_channel *chan = drm->channel;
struct nvif_device *device = &drm->client.device;
struct nvif_push *push = chan->chan.push;
struct nvkm_device *nvkm_device = nvxx_device(&drm->client.device);
resource_size_t fb_base = nvkm_device->func->resource_addr(nvkm_device, 1);
int surface_fmt, pattern_fmt, rect_fmt;
int ret;
@ -210,8 +212,8 @@ nv04_fbcon_accel_init(struct fb_info *info)
0x0188, chan->vram.handle);
PUSH_NVSQ(push, NV042, 0x0300, surface_fmt,
0x0304, info->fix.line_length | (info->fix.line_length << 16),
0x0308, info->fix.smem_start - dev->mode_config.fb_base,
0x030c, info->fix.smem_start - dev->mode_config.fb_base);
0x0308, info->fix.smem_start - fb_base,
0x030c, info->fix.smem_start - fb_base);
PUSH_NVSQ(push, NV043, 0x0000, nfbdev->rop.handle);
PUSH_NVSQ(push, NV043, 0x0300, 0x55);

View File

@ -177,8 +177,6 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
drm_fb_helper_fill_info(fbi, helper, sizes);
dev->mode_config.fb_base = dma_addr;
fbi->screen_buffer = omap_gem_vaddr(fbdev->bo);
fbi->screen_size = fbdev->bo->size;
fbi->fix.smem_start = dma_addr;

View File

@ -125,7 +125,7 @@ struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
get_dma_buf(dma_buf);
sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
goto fail_detach;
@ -142,7 +142,7 @@ struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
return obj;
fail_unmap:
dma_buf_unmap_attachment(attach, sgt, DMA_TO_DEVICE);
dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_TO_DEVICE);
fail_detach:
dma_buf_detach(dma_buf, attach);
dma_buf_put(dma_buf);

View File

@ -331,9 +331,16 @@ static const struct of_device_id db7430_match[] = {
};
MODULE_DEVICE_TABLE(of, db7430_match);
static const struct spi_device_id db7430_ids[] = {
{ "lms397kf04" },
{ },
};
MODULE_DEVICE_TABLE(spi, db7430_ids);
static struct spi_driver db7430_driver = {
.probe = db7430_probe,
.remove = db7430_remove,
.id_table = db7430_ids,
.driver = {
.name = "db7430-panel",
.of_match_table = db7430_match,

View File

@ -463,9 +463,16 @@ static const struct of_device_id tpg110_match[] = {
};
MODULE_DEVICE_TABLE(of, tpg110_match);
static const struct spi_device_id tpg110_ids[] = {
{ "tpg110" },
{ },
};
MODULE_DEVICE_TABLE(spi, tpg110_ids);
static struct spi_driver tpg110_driver = {
.probe = tpg110_probe,
.remove = tpg110_remove,
.id_table = tpg110_ids,
.driver = {
.name = "tpo-tpg110-panel",
.of_match_table = tpg110_match,

View File

@ -425,9 +425,16 @@ static const struct of_device_id ws2401_match[] = {
};
MODULE_DEVICE_TABLE(of, ws2401_match);
static const struct spi_device_id ws2401_ids[] = {
{ "lms380kf01" },
{ },
};
MODULE_DEVICE_TABLE(spi, ws2401_ids);
static struct spi_driver ws2401_driver = {
.probe = ws2401_probe,
.remove = ws2401_remove,
.id_table = ws2401_ids,
.driver = {
.name = "ws2401-panel",
.of_match_table = ws2401_match,

View File

@ -209,7 +209,7 @@ void panfrost_core_dump(struct panfrost_job *job)
goto dump_header;
}
ret = drm_gem_shmem_vmap(&bo->base, &map);
ret = drm_gem_vmap_unlocked(&bo->base.base, &map);
if (ret) {
dev_err(pfdev->dev, "Panfrost Dump: couldn't map Buffer Object\n");
iter.hdr->bomap.valid = 0;
@ -236,7 +236,7 @@ void panfrost_core_dump(struct panfrost_job *job)
vaddr = map.vaddr;
memcpy(iter.data, vaddr, bo->base.base.size);
drm_gem_shmem_vunmap(&bo->base, &map);
drm_gem_vunmap_unlocked(&bo->base.base, &map);
iter.hdr->bomap.valid = 1;

View File

@ -106,7 +106,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
goto err_close_bo;
}
ret = drm_gem_shmem_vmap(bo, &map);
ret = drm_gem_vmap_unlocked(&bo->base, &map);
if (ret)
goto err_put_mapping;
perfcnt->buf = map.vaddr;
@ -165,7 +165,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
return 0;
err_vunmap:
drm_gem_shmem_vunmap(bo, &map);
drm_gem_vunmap_unlocked(&bo->base, &map);
err_put_mapping:
panfrost_gem_mapping_put(perfcnt->mapping);
err_close_bo:
@ -195,7 +195,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF));
perfcnt->user = NULL;
drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base, &map);
drm_gem_vunmap_unlocked(&perfcnt->mapping->obj->base.base, &map);
perfcnt->buf = NULL;
panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu);

View File

@ -1261,8 +1261,6 @@ int qxl_modeset_init(struct qxl_device *qdev)
qdev->ddev.mode_config.max_width = 8192;
qdev->ddev.mode_config.max_height = 8192;
qdev->ddev.mode_config.fb_base = qdev->vram_base;
drm_mode_create_suggested_offset_properties(&qdev->ddev);
qxl_mode_create_hotplug_mode_update_property(qdev);

View File

@ -168,9 +168,16 @@ int qxl_bo_vmap_locked(struct qxl_bo *bo, struct iosys_map *map)
bo->map_count++;
goto out;
}
r = ttm_bo_vmap(&bo->tbo, &bo->map);
r = __qxl_bo_pin(bo);
if (r)
return r;
r = ttm_bo_vmap(&bo->tbo, &bo->map);
if (r) {
__qxl_bo_unpin(bo);
return r;
}
bo->map_count = 1;
/* TODO: Remove kptr in favor of map everywhere. */
@ -192,12 +199,6 @@ int qxl_bo_vmap(struct qxl_bo *bo, struct iosys_map *map)
if (r)
return r;
r = __qxl_bo_pin(bo);
if (r) {
qxl_bo_unreserve(bo);
return r;
}
r = qxl_bo_vmap_locked(bo, map);
qxl_bo_unreserve(bo);
return r;
@ -247,6 +248,7 @@ void qxl_bo_vunmap_locked(struct qxl_bo *bo)
return;
bo->kptr = NULL;
ttm_bo_vunmap(&bo->tbo, &bo->map);
__qxl_bo_unpin(bo);
}
int qxl_bo_vunmap(struct qxl_bo *bo)
@ -258,7 +260,6 @@ int qxl_bo_vunmap(struct qxl_bo *bo)
return r;
qxl_bo_vunmap_locked(bo);
__qxl_bo_unpin(bo);
qxl_bo_unreserve(bo);
return 0;
}

View File

@ -59,7 +59,7 @@ int qxl_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map)
struct qxl_bo *bo = gem_to_qxl_bo(obj);
int ret;
ret = qxl_bo_vmap(bo, map);
ret = qxl_bo_vmap_locked(bo, map);
if (ret < 0)
return ret;
@ -71,5 +71,5 @@ void qxl_gem_prime_vunmap(struct drm_gem_object *obj,
{
struct qxl_bo *bo = gem_to_qxl_bo(obj);
qxl_bo_vunmap(bo);
qxl_bo_vunmap_locked(bo);
}

View File

@ -1604,8 +1604,6 @@ int radeon_modeset_init(struct radeon_device *rdev)
rdev->ddev->mode_config.fb_modifiers_not_supported = true;
rdev->ddev->mode_config.fb_base = rdev->mc.aper_base;
ret = radeon_modeset_create_props(rdev);
if (ret) {
return ret;

View File

@ -276,7 +276,7 @@ static int radeonfb_create(struct drm_fb_helper *helper,
drm_fb_helper_fill_info(info, &rfbdev->helper, sizes);
/* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
info->apertures->ranges[0].base = rdev->mc.aper_base;
info->apertures->ranges[0].size = rdev->mc.aper_size;
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */

View File

@ -73,6 +73,7 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
entity->priority = priority;
entity->sched_list = num_sched_list > 1 ? sched_list : NULL;
entity->last_scheduled = NULL;
RB_CLEAR_NODE(&entity->rb_tree_node);
if(num_sched_list)
entity->rq = &sched_list[0]->sched_rq[entity->priority];
@ -207,6 +208,7 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
struct drm_sched_job *job = container_of(cb, struct drm_sched_job,
finish_cb);
dma_fence_put(f);
INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work);
schedule_work(&job->work);
}
@ -234,8 +236,10 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
struct drm_sched_fence *s_fence = job->s_fence;
/* Wait for all dependencies to avoid data corruptions */
while ((f = drm_sched_job_dependency(job, entity)))
while ((f = drm_sched_job_dependency(job, entity))) {
dma_fence_wait(f, false);
dma_fence_put(f);
}
drm_sched_fence_scheduled(s_fence);
dma_fence_set_error(&s_fence->finished, -ESRCH);
@ -250,6 +254,7 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
continue;
}
dma_fence_get(entity->last_scheduled);
r = dma_fence_add_callback(entity->last_scheduled,
&job->finish_cb,
drm_sched_entity_kill_jobs_cb);
@ -444,6 +449,19 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
smp_wmb();
spsc_queue_pop(&entity->job_queue);
/*
* Update the entity's location in the min heap according to
* the timestamp of the next job, if any.
*/
if (drm_sched_policy == DRM_SCHED_POLICY_FIFO) {
struct drm_sched_job *next;
next = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
if (next)
drm_sched_rq_update_fifo(entity, next->submit_ts);
}
return sched_job;
}
@ -508,6 +526,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
atomic_inc(entity->rq->sched->score);
WRITE_ONCE(entity->last_user, current->group_leader);
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
sched_job->submit_ts = ktime_get();
/* first job wakes up scheduler */
if (first) {
@ -519,8 +538,13 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
DRM_ERROR("Trying to push to a killed entity\n");
return;
}
drm_sched_rq_add_entity(entity->rq, entity);
spin_unlock(&entity->rq_lock);
if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
drm_sched_rq_update_fifo(entity, sched_job->submit_ts);
drm_sched_wakeup(entity->rq->sched);
}
}

View File

@ -62,6 +62,55 @@
#define to_drm_sched_job(sched_job) \
container_of((sched_job), struct drm_sched_job, queue_node)
int drm_sched_policy = DRM_SCHED_POLICY_RR;
/**
* DOC: sched_policy (int)
* Used to override default entities scheduling policy in a run queue.
*/
MODULE_PARM_DESC(sched_policy, "Specify schedule policy for entities on a runqueue, " __stringify(DRM_SCHED_POLICY_RR) " = Round Robin (default), " __stringify(DRM_SCHED_POLICY_FIFO) " = use FIFO.");
module_param_named(sched_policy, drm_sched_policy, int, 0444);
static __always_inline bool drm_sched_entity_compare_before(struct rb_node *a,
const struct rb_node *b)
{
struct drm_sched_entity *ent_a = rb_entry((a), struct drm_sched_entity, rb_tree_node);
struct drm_sched_entity *ent_b = rb_entry((b), struct drm_sched_entity, rb_tree_node);
return ktime_before(ent_a->oldest_job_waiting, ent_b->oldest_job_waiting);
}
static inline void drm_sched_rq_remove_fifo_locked(struct drm_sched_entity *entity)
{
struct drm_sched_rq *rq = entity->rq;
if (!RB_EMPTY_NODE(&entity->rb_tree_node)) {
rb_erase_cached(&entity->rb_tree_node, &rq->rb_tree_root);
RB_CLEAR_NODE(&entity->rb_tree_node);
}
}
void drm_sched_rq_update_fifo(struct drm_sched_entity *entity, ktime_t ts)
{
/*
* Both locks need to be grabbed, one to protect from entity->rq change
* for entity from within concurrent drm_sched_entity_select_rq and the
* other to update the rb tree structure.
*/
spin_lock(&entity->rq_lock);
spin_lock(&entity->rq->lock);
drm_sched_rq_remove_fifo_locked(entity);
entity->oldest_job_waiting = ts;
rb_add_cached(&entity->rb_tree_node, &entity->rq->rb_tree_root,
drm_sched_entity_compare_before);
spin_unlock(&entity->rq->lock);
spin_unlock(&entity->rq_lock);
}
/**
* drm_sched_rq_init - initialize a given run queue struct
*
@ -75,6 +124,7 @@ static void drm_sched_rq_init(struct drm_gpu_scheduler *sched,
{
spin_lock_init(&rq->lock);
INIT_LIST_HEAD(&rq->entities);
rq->rb_tree_root = RB_ROOT_CACHED;
rq->current_entity = NULL;
rq->sched = sched;
}
@ -92,9 +142,12 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
{
if (!list_empty(&entity->list))
return;
spin_lock(&rq->lock);
atomic_inc(rq->sched->score);
list_add_tail(&entity->list, &rq->entities);
spin_unlock(&rq->lock);
}
@ -111,23 +164,30 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
{
if (list_empty(&entity->list))
return;
spin_lock(&rq->lock);
atomic_dec(rq->sched->score);
list_del_init(&entity->list);
if (rq->current_entity == entity)
rq->current_entity = NULL;
if (drm_sched_policy == DRM_SCHED_POLICY_FIFO)
drm_sched_rq_remove_fifo_locked(entity);
spin_unlock(&rq->lock);
}
/**
* drm_sched_rq_select_entity - Select an entity which could provide a job to run
* drm_sched_rq_select_entity_rr - Select an entity which could provide a job to run
*
* @rq: scheduler run queue to check.
*
* Try to find a ready entity, returns NULL if none found.
*/
static struct drm_sched_entity *
drm_sched_rq_select_entity(struct drm_sched_rq *rq)
drm_sched_rq_select_entity_rr(struct drm_sched_rq *rq)
{
struct drm_sched_entity *entity;
@ -163,6 +223,34 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq)
return NULL;
}
/**
* drm_sched_rq_select_entity_fifo - Select an entity which provides a job to run
*
* @rq: scheduler run queue to check.
*
* Find oldest waiting ready entity, returns NULL if none found.
*/
static struct drm_sched_entity *
drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
{
struct rb_node *rb;
spin_lock(&rq->lock);
for (rb = rb_first_cached(&rq->rb_tree_root); rb; rb = rb_next(rb)) {
struct drm_sched_entity *entity;
entity = rb_entry(rb, struct drm_sched_entity, rb_tree_node);
if (drm_sched_entity_is_ready(entity)) {
rq->current_entity = entity;
reinit_completion(&entity->entity_idle);
break;
}
}
spin_unlock(&rq->lock);
return rb ? rb_entry(rb, struct drm_sched_entity, rb_tree_node) : NULL;
}
/**
* drm_sched_job_done - complete a job
* @s_job: pointer to the job which is done
@ -803,7 +891,9 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
/* Kernel run queue has higher priority than normal run queue*/
for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
entity = drm_sched_rq_select_entity(&sched->sched_rq[i]);
entity = drm_sched_policy == DRM_SCHED_POLICY_FIFO ?
drm_sched_rq_select_entity_fifo(&sched->sched_rq[i]) :
drm_sched_rq_select_entity_rr(&sched->sched_rq[i]);
if (entity)
break;
}

View File

@ -20,6 +20,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_fb_helper.h>
@ -578,21 +579,24 @@ static void ssd130x_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_atomic_helper_damage_iter iter;
struct drm_device *drm = plane->dev;
struct drm_rect src_clip, dst_clip;
struct drm_rect dst_clip;
struct drm_rect damage;
int idx;
if (!drm_atomic_helper_damage_merged(old_plane_state, plane_state, &src_clip))
return;
dst_clip = plane_state->dst;
if (!drm_rect_intersect(&dst_clip, &src_clip))
return;
if (!drm_dev_enter(drm, &idx))
return;
ssd130x_fb_blit_rect(plane_state->fb, &shadow_plane_state->data[0], &dst_clip);
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drm_atomic_for_each_plane_damage(&iter, &damage) {
dst_clip = plane_state->dst;
if (!drm_rect_intersect(&dst_clip, &damage))
continue;
ssd130x_fb_blit_rect(plane_state->fb, &shadow_plane_state->data[0], &dst_clip);
}
drm_dev_exit(idx);
}
@ -642,19 +646,6 @@ static enum drm_mode_status ssd130x_crtc_helper_mode_valid(struct drm_crtc *crtc
return MODE_OK;
}
static int ssd130x_crtc_helper_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *new_state)
{
struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
int ret;
ret = drm_atomic_helper_check_crtc_state(new_crtc_state, false);
if (ret)
return ret;
return drm_atomic_add_affected_planes(new_state, crtc);
}
/*
* The CRTC is always enabled. Screen updates are performed by
* the primary plane's atomic_update function. Disabling clears
@ -662,7 +653,7 @@ static int ssd130x_crtc_helper_atomic_check(struct drm_crtc *crtc,
*/
static const struct drm_crtc_helper_funcs ssd130x_crtc_helper_funcs = {
.mode_valid = ssd130x_crtc_helper_mode_valid,
.atomic_check = ssd130x_crtc_helper_atomic_check,
.atomic_check = drm_crtc_helper_atomic_check,
};
static void ssd130x_crtc_reset(struct drm_crtc *crtc)

View File

@ -280,7 +280,6 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
}
}
drm->mode_config.fb_base = (resource_size_t)bo->iova;
info->screen_base = (void __iomem *)bo->vaddr + offset;
info->screen_size = size;
info->fix.smem_start = (unsigned long)(bo->iova + offset);

View File

@ -84,7 +84,7 @@ static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_
goto free;
}
map->sgt = dma_buf_map_attachment(map->attach, direction);
map->sgt = dma_buf_map_attachment_unlocked(map->attach, direction);
if (IS_ERR(map->sgt)) {
dma_buf_detach(buf, map->attach);
err = PTR_ERR(map->sgt);
@ -160,7 +160,8 @@ free:
static void tegra_bo_unpin(struct host1x_bo_mapping *map)
{
if (map->attach) {
dma_buf_unmap_attachment(map->attach, map->sgt, map->direction);
dma_buf_unmap_attachment_unlocked(map->attach, map->sgt,
map->direction);
dma_buf_detach(map->attach->dmabuf, map->attach);
} else {
dma_unmap_sgtable(map->dev, map->sgt, map->direction, 0);
@ -181,7 +182,7 @@ static void *tegra_bo_mmap(struct host1x_bo *bo)
if (obj->vaddr) {
return obj->vaddr;
} else if (obj->gem.import_attach) {
ret = dma_buf_vmap(obj->gem.import_attach->dmabuf, &map);
ret = dma_buf_vmap_unlocked(obj->gem.import_attach->dmabuf, &map);
return ret ? NULL : map.vaddr;
} else {
return vmap(obj->pages, obj->num_pages, VM_MAP,
@ -197,7 +198,7 @@ static void tegra_bo_munmap(struct host1x_bo *bo, void *addr)
if (obj->vaddr)
return;
else if (obj->gem.import_attach)
dma_buf_vunmap(obj->gem.import_attach->dmabuf, &map);
dma_buf_vunmap_unlocked(obj->gem.import_attach->dmabuf, &map);
else
vunmap(addr);
}
@ -461,7 +462,7 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
get_dma_buf(buf);
bo->sgt = dma_buf_map_attachment(attach, DMA_TO_DEVICE);
bo->sgt = dma_buf_map_attachment_unlocked(attach, DMA_TO_DEVICE);
if (IS_ERR(bo->sgt)) {
err = PTR_ERR(bo->sgt);
goto detach;
@ -479,7 +480,7 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
detach:
if (!IS_ERR_OR_NULL(bo->sgt))
dma_buf_unmap_attachment(attach, bo->sgt, DMA_TO_DEVICE);
dma_buf_unmap_attachment_unlocked(attach, bo->sgt, DMA_TO_DEVICE);
dma_buf_detach(buf, attach);
dma_buf_put(buf);
@ -508,8 +509,8 @@ void tegra_bo_free_object(struct drm_gem_object *gem)
tegra_bo_iommu_unmap(tegra, bo);
if (gem->import_attach) {
dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
DMA_TO_DEVICE);
dma_buf_unmap_attachment_unlocked(gem->import_attach, bo->sgt,
DMA_TO_DEVICE);
drm_prime_gem_destroy(gem, NULL);
} else {
tegra_bo_free(gem->dev, bo);

View File

@ -1,5 +1,13 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DRM_KUNIT_TEST) += drm_format_helper_test.o drm_damage_helper_test.o \
drm_cmdline_parser_test.o drm_rect_test.o drm_format_test.o drm_plane_helper_test.o \
drm_dp_mst_helper_test.o drm_framebuffer_test.o drm_buddy_test.o drm_mm_test.o
obj-$(CONFIG_DRM_KUNIT_TEST) += \
drm_buddy_test.o \
drm_cmdline_parser_test.o \
drm_damage_helper_test.o \
drm_dp_mst_helper_test.o \
drm_format_helper_test.o \
drm_format_test.o \
drm_framebuffer_test.o \
drm_mm_test.o \
drm_plane_helper_test.o \
drm_rect_test.o

View File

@ -5,44 +5,280 @@
* Copyright (c) 2022 Maíra Canal <mairacanal@riseup.net>
*/
#define PREFIX_STR "[drm_dp_mst_helper]"
#include <kunit/test.h>
#include <linux/random.h>
#include <drm/display/drm_dp_mst_helper.h>
#include <drm/drm_print.h>
#include "../display/drm_dp_mst_topology_internal.h"
struct drm_dp_mst_calc_pbn_mode_test {
const int clock;
const int bpp;
const bool dsc;
const int expected;
};
static const struct drm_dp_mst_calc_pbn_mode_test drm_dp_mst_calc_pbn_mode_cases[] = {
{
.clock = 154000,
.bpp = 30,
.dsc = false,
.expected = 689
},
{
.clock = 234000,
.bpp = 30,
.dsc = false,
.expected = 1047
},
{
.clock = 297000,
.bpp = 24,
.dsc = false,
.expected = 1063
},
{
.clock = 332880,
.bpp = 24,
.dsc = true,
.expected = 50
},
{
.clock = 324540,
.bpp = 24,
.dsc = true,
.expected = 49
},
};
static void drm_test_dp_mst_calc_pbn_mode(struct kunit *test)
{
int pbn, i;
const struct {
int rate;
int bpp;
int expected;
bool dsc;
} test_params[] = {
{ 154000, 30, 689, false },
{ 234000, 30, 1047, false },
{ 297000, 24, 1063, false },
{ 332880, 24, 50, true },
{ 324540, 24, 49, true },
};
const struct drm_dp_mst_calc_pbn_mode_test *params = test->param_value;
for (i = 0; i < ARRAY_SIZE(test_params); i++) {
pbn = drm_dp_calc_pbn_mode(test_params[i].rate,
test_params[i].bpp,
test_params[i].dsc);
KUNIT_EXPECT_EQ_MSG(test, pbn, test_params[i].expected,
"Expected PBN %d for clock %d bpp %d, got %d\n",
test_params[i].expected, test_params[i].rate,
test_params[i].bpp, pbn);
}
KUNIT_EXPECT_EQ(test, drm_dp_calc_pbn_mode(params->clock, params->bpp, params->dsc),
params->expected);
}
static void dp_mst_calc_pbn_mode_desc(const struct drm_dp_mst_calc_pbn_mode_test *t, char *desc)
{
sprintf(desc, "Clock %d BPP %d DSC %s", t->clock, t->bpp, t->dsc ? "enabled" : "disabled");
}
KUNIT_ARRAY_PARAM(drm_dp_mst_calc_pbn_mode, drm_dp_mst_calc_pbn_mode_cases,
dp_mst_calc_pbn_mode_desc);
static u8 data[] = { 0xff, 0x00, 0xdd };
struct drm_dp_mst_sideband_msg_req_test {
const char *desc;
const struct drm_dp_sideband_msg_req_body in;
};
static const struct drm_dp_mst_sideband_msg_req_test drm_dp_mst_sideband_msg_req_cases[] = {
{
.desc = "DP_ENUM_PATH_RESOURCES with port number",
.in = {
.req_type = DP_ENUM_PATH_RESOURCES,
.u.port_num.port_number = 5,
},
},
{
.desc = "DP_POWER_UP_PHY with port number",
.in = {
.req_type = DP_POWER_UP_PHY,
.u.port_num.port_number = 5,
},
},
{
.desc = "DP_POWER_DOWN_PHY with port number",
.in = {
.req_type = DP_POWER_DOWN_PHY,
.u.port_num.port_number = 5,
},
},
{
.desc = "DP_ALLOCATE_PAYLOAD with SDP stream sinks",
.in = {
.req_type = DP_ALLOCATE_PAYLOAD,
.u.allocate_payload.number_sdp_streams = 3,
.u.allocate_payload.sdp_stream_sink = { 1, 2, 3 },
},
},
{
.desc = "DP_ALLOCATE_PAYLOAD with port number",
.in = {
.req_type = DP_ALLOCATE_PAYLOAD,
.u.allocate_payload.port_number = 0xf,
},
},
{
.desc = "DP_ALLOCATE_PAYLOAD with VCPI",
.in = {
.req_type = DP_ALLOCATE_PAYLOAD,
.u.allocate_payload.vcpi = 0x7f,
},
},
{
.desc = "DP_ALLOCATE_PAYLOAD with PBN",
.in = {
.req_type = DP_ALLOCATE_PAYLOAD,
.u.allocate_payload.pbn = U16_MAX,
},
},
{
.desc = "DP_QUERY_PAYLOAD with port number",
.in = {
.req_type = DP_QUERY_PAYLOAD,
.u.query_payload.port_number = 0xf,
},
},
{
.desc = "DP_QUERY_PAYLOAD with VCPI",
.in = {
.req_type = DP_QUERY_PAYLOAD,
.u.query_payload.vcpi = 0x7f,
},
},
{
.desc = "DP_REMOTE_DPCD_READ with port number",
.in = {
.req_type = DP_REMOTE_DPCD_READ,
.u.dpcd_read.port_number = 0xf,
},
},
{
.desc = "DP_REMOTE_DPCD_READ with DPCD address",
.in = {
.req_type = DP_REMOTE_DPCD_READ,
.u.dpcd_read.dpcd_address = 0xfedcb,
},
},
{
.desc = "DP_REMOTE_DPCD_READ with max number of bytes",
.in = {
.req_type = DP_REMOTE_DPCD_READ,
.u.dpcd_read.num_bytes = U8_MAX,
},
},
{
.desc = "DP_REMOTE_DPCD_WRITE with port number",
.in = {
.req_type = DP_REMOTE_DPCD_WRITE,
.u.dpcd_write.port_number = 0xf,
},
},
{
.desc = "DP_REMOTE_DPCD_WRITE with DPCD address",
.in = {
.req_type = DP_REMOTE_DPCD_WRITE,
.u.dpcd_write.dpcd_address = 0xfedcb,
},
},
{
.desc = "DP_REMOTE_DPCD_WRITE with data array",
.in = {
.req_type = DP_REMOTE_DPCD_WRITE,
.u.dpcd_write.num_bytes = ARRAY_SIZE(data),
.u.dpcd_write.bytes = data,
},
},
{
.desc = "DP_REMOTE_I2C_READ with port number",
.in = {
.req_type = DP_REMOTE_I2C_READ,
.u.i2c_read.port_number = 0xf,
},
},
{
.desc = "DP_REMOTE_I2C_READ with I2C device ID",
.in = {
.req_type = DP_REMOTE_I2C_READ,
.u.i2c_read.read_i2c_device_id = 0x7f,
},
},
{
.desc = "DP_REMOTE_I2C_READ with transactions array",
.in = {
.req_type = DP_REMOTE_I2C_READ,
.u.i2c_read.num_transactions = 3,
.u.i2c_read.num_bytes_read = ARRAY_SIZE(data) * 3,
.u.i2c_read.transactions = {
{ .bytes = data, .num_bytes = ARRAY_SIZE(data), .i2c_dev_id = 0x7f,
.i2c_transaction_delay = 0xf, },
{ .bytes = data, .num_bytes = ARRAY_SIZE(data), .i2c_dev_id = 0x7e,
.i2c_transaction_delay = 0xe, },
{ .bytes = data, .num_bytes = ARRAY_SIZE(data), .i2c_dev_id = 0x7d,
.i2c_transaction_delay = 0xd, },
},
},
},
{
.desc = "DP_REMOTE_I2C_WRITE with port number",
.in = {
.req_type = DP_REMOTE_I2C_WRITE,
.u.i2c_write.port_number = 0xf,
},
},
{
.desc = "DP_REMOTE_I2C_WRITE with I2C device ID",
.in = {
.req_type = DP_REMOTE_I2C_WRITE,
.u.i2c_write.write_i2c_device_id = 0x7f,
},
},
{
.desc = "DP_REMOTE_I2C_WRITE with data array",
.in = {
.req_type = DP_REMOTE_I2C_WRITE,
.u.i2c_write.num_bytes = ARRAY_SIZE(data),
.u.i2c_write.bytes = data,
},
},
{
.desc = "DP_QUERY_STREAM_ENC_STATUS with stream ID",
.in = {
.req_type = DP_QUERY_STREAM_ENC_STATUS,
.u.enc_status.stream_id = 1,
},
},
{
.desc = "DP_QUERY_STREAM_ENC_STATUS with client ID",
.in = {
.req_type = DP_QUERY_STREAM_ENC_STATUS,
.u.enc_status.client_id = { 0x4f, 0x7f, 0xb4, 0x00, 0x8c, 0x0d, 0x67 },
},
},
{
.desc = "DP_QUERY_STREAM_ENC_STATUS with stream event",
.in = {
.req_type = DP_QUERY_STREAM_ENC_STATUS,
.u.enc_status.stream_event = 3,
},
},
{
.desc = "DP_QUERY_STREAM_ENC_STATUS with valid stream event",
.in = {
.req_type = DP_QUERY_STREAM_ENC_STATUS,
.u.enc_status.valid_stream_event = 0,
},
},
{
.desc = "DP_QUERY_STREAM_ENC_STATUS with stream behavior",
.in = {
.req_type = DP_QUERY_STREAM_ENC_STATUS,
.u.enc_status.stream_behavior = 3,
},
},
{
.desc = "DP_QUERY_STREAM_ENC_STATUS with a valid stream behavior",
.in = {
.req_type = DP_QUERY_STREAM_ENC_STATUS,
.u.enc_status.valid_stream_behavior = 1,
}
},
};
static bool
sideband_msg_req_equal(const struct drm_dp_sideband_msg_req_body *in,
const struct drm_dp_sideband_msg_req_body *out)
@ -118,41 +354,41 @@ sideband_msg_req_equal(const struct drm_dp_sideband_msg_req_body *in,
return true;
}
static bool
sideband_msg_req_encode_decode(struct drm_dp_sideband_msg_req_body *in)
static void drm_test_dp_mst_msg_printf(struct drm_printer *p, struct va_format *vaf)
{
struct kunit *test = p->arg;
kunit_err(test, "%pV", vaf);
}
static void drm_test_dp_mst_sideband_msg_req_decode(struct kunit *test)
{
const struct drm_dp_mst_sideband_msg_req_test *params = test->param_value;
const struct drm_dp_sideband_msg_req_body *in = &params->in;
struct drm_dp_sideband_msg_req_body *out;
struct drm_printer p = drm_err_printer(PREFIX_STR);
struct drm_dp_sideband_msg_tx *txmsg;
int i, ret;
bool result = true;
struct drm_printer p = {
.printfn = drm_test_dp_mst_msg_printf,
.arg = test
};
int i;
out = kzalloc(sizeof(*out), GFP_KERNEL);
if (!out)
return false;
out = kunit_kzalloc(test, sizeof(*out), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, out);
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg) {
kfree(out);
return false;
}
txmsg = kunit_kzalloc(test, sizeof(*txmsg), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, txmsg);
drm_dp_encode_sideband_req(in, txmsg);
ret = drm_dp_decode_sideband_req(txmsg, out);
if (ret < 0) {
drm_printf(&p, "Failed to decode sideband request: %d\n",
ret);
result = false;
goto out;
}
KUNIT_EXPECT_GE_MSG(test, drm_dp_decode_sideband_req(txmsg, out), 0,
"Failed to decode sideband request");
if (!sideband_msg_req_equal(in, out)) {
drm_printf(&p, "Encode/decode failed, expected:\n");
KUNIT_FAIL(test, "Encode/decode failed");
kunit_err(test, "Expected:");
drm_dp_dump_sideband_msg_req_body(in, 1, &p);
drm_printf(&p, "Got:\n");
kunit_err(test, "Got:");
drm_dp_dump_sideband_msg_req_body(out, 1, &p);
result = false;
goto out;
}
switch (in->req_type) {
@ -167,112 +403,21 @@ sideband_msg_req_encode_decode(struct drm_dp_sideband_msg_req_body *in)
kfree(out->u.i2c_write.bytes);
break;
}
/* Clear everything but the req_type for the input */
memset(&in->u, 0, sizeof(in->u));
out:
kfree(out);
kfree(txmsg);
return result;
}
static void drm_test_dp_mst_sideband_msg_req_decode(struct kunit *test)
static void
drm_dp_mst_sideband_msg_req_desc(const struct drm_dp_mst_sideband_msg_req_test *t, char *desc)
{
struct drm_dp_sideband_msg_req_body in = { 0 };
u8 data[] = { 0xff, 0x0, 0xdd };
int i;
in.req_type = DP_ENUM_PATH_RESOURCES;
in.u.port_num.port_number = 5;
KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.req_type = DP_POWER_UP_PHY;
in.u.port_num.port_number = 5;
KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.req_type = DP_POWER_DOWN_PHY;
in.u.port_num.port_number = 5;
KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.req_type = DP_ALLOCATE_PAYLOAD;
in.u.allocate_payload.number_sdp_streams = 3;
for (i = 0; i < in.u.allocate_payload.number_sdp_streams; i++)
in.u.allocate_payload.sdp_stream_sink[i] = i + 1;
KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.allocate_payload.port_number = 0xf;
KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.allocate_payload.vcpi = 0x7f;
KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.allocate_payload.pbn = U16_MAX;
KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.req_type = DP_QUERY_PAYLOAD;
in.u.query_payload.port_number = 0xf;
KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.query_payload.vcpi = 0x7f;
KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.req_type = DP_REMOTE_DPCD_READ;
in.u.dpcd_read.port_number = 0xf;
KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.dpcd_read.dpcd_address = 0xfedcb;
KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.dpcd_read.num_bytes = U8_MAX;
KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.req_type = DP_REMOTE_DPCD_WRITE;
in.u.dpcd_write.port_number = 0xf;
KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.dpcd_write.dpcd_address = 0xfedcb;
KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.dpcd_write.num_bytes = ARRAY_SIZE(data);
in.u.dpcd_write.bytes = data;
KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.req_type = DP_REMOTE_I2C_READ;
in.u.i2c_read.port_number = 0xf;
KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.i2c_read.read_i2c_device_id = 0x7f;
KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.i2c_read.num_transactions = 3;
in.u.i2c_read.num_bytes_read = ARRAY_SIZE(data) * 3;
for (i = 0; i < in.u.i2c_read.num_transactions; i++) {
in.u.i2c_read.transactions[i].bytes = data;
in.u.i2c_read.transactions[i].num_bytes = ARRAY_SIZE(data);
in.u.i2c_read.transactions[i].i2c_dev_id = 0x7f & ~i;
in.u.i2c_read.transactions[i].i2c_transaction_delay = 0xf & ~i;
}
KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.req_type = DP_REMOTE_I2C_WRITE;
in.u.i2c_write.port_number = 0xf;
KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.i2c_write.write_i2c_device_id = 0x7f;
KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.i2c_write.num_bytes = ARRAY_SIZE(data);
in.u.i2c_write.bytes = data;
KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.req_type = DP_QUERY_STREAM_ENC_STATUS;
in.u.enc_status.stream_id = 1;
KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
get_random_bytes(in.u.enc_status.client_id,
sizeof(in.u.enc_status.client_id));
KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.enc_status.stream_event = 3;
KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.enc_status.valid_stream_event = 0;
KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.enc_status.stream_behavior = 3;
KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
in.u.enc_status.valid_stream_behavior = 1;
KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
strcpy(desc, t->desc);
}
KUNIT_ARRAY_PARAM(drm_dp_mst_sideband_msg_req, drm_dp_mst_sideband_msg_req_cases,
drm_dp_mst_sideband_msg_req_desc);
static struct kunit_case drm_dp_mst_helper_tests[] = {
KUNIT_CASE(drm_test_dp_mst_calc_pbn_mode),
KUNIT_CASE(drm_test_dp_mst_sideband_msg_req_decode),
KUNIT_CASE_PARAM(drm_test_dp_mst_calc_pbn_mode, drm_dp_mst_calc_pbn_mode_gen_params),
KUNIT_CASE_PARAM(drm_test_dp_mst_sideband_msg_req_decode,
drm_dp_mst_sideband_msg_req_gen_params),
{ }
};

View File

@ -51,6 +51,19 @@ config DRM_GM12U320
This is a KMS driver for projectors which use the GM12U320 chipset
for video transfer over USB2/3, such as the Acer C120 mini projector.
config DRM_OFDRM
tristate "Open Firmware display driver"
depends on DRM && OF && (PPC || COMPILE_TEST)
select APERTURE_HELPERS
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
help
DRM driver for Open Firmware framebuffers.
This driver assumes that the display hardware has been initialized
by the Open Firmware before the kernel boots. Scanout buffer, size,
and display format must be provided via device tree.
config DRM_PANEL_MIPI_DBI
tristate "DRM support for MIPI DBI compatible panels"
depends on DRM && SPI

View File

@ -4,6 +4,7 @@ obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o
obj-$(CONFIG_DRM_BOCHS) += bochs.o
obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o
obj-$(CONFIG_DRM_GM12U320) += gm12u320.o
obj-$(CONFIG_DRM_OFDRM) += ofdrm.o
obj-$(CONFIG_DRM_PANEL_MIPI_DBI) += panel-mipi-dbi.o
obj-$(CONFIG_DRM_SIMPLEDRM) += simpledrm.o
obj-$(CONFIG_TINYDRM_HX8357D) += hx8357d.o

View File

@ -543,7 +543,6 @@ static int bochs_kms_init(struct bochs_device *bochs)
bochs->dev->mode_config.max_width = 8192;
bochs->dev->mode_config.max_height = 8192;
bochs->dev->mode_config.fb_base = bochs->fb_base;
bochs->dev->mode_config.preferred_depth = 24;
bochs->dev->mode_config.prefer_shadow = 0;
bochs->dev->mode_config.prefer_shadow_fbdev = 1;

1424
drivers/gpu/drm/tiny/ofdrm.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -11,6 +11,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
@ -545,19 +546,6 @@ static enum drm_mode_status simpledrm_crtc_helper_mode_valid(struct drm_crtc *cr
return drm_crtc_helper_mode_valid_fixed(crtc, mode, &sdev->mode);
}
static int simpledrm_crtc_helper_atomic_check(struct drm_crtc *crtc,
struct drm_atomic_state *new_state)
{
struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(new_state, crtc);
int ret;
ret = drm_atomic_helper_check_crtc_state(new_crtc_state, false);
if (ret)
return ret;
return drm_atomic_add_affected_planes(new_state, crtc);
}
/*
* The CRTC is always enabled. Screen updates are performed by
* the primary plane's atomic_update function. Disabling clears
@ -565,7 +553,7 @@ static int simpledrm_crtc_helper_atomic_check(struct drm_crtc *crtc,
*/
static const struct drm_crtc_helper_funcs simpledrm_crtc_helper_funcs = {
.mode_valid = simpledrm_crtc_helper_mode_valid,
.atomic_check = simpledrm_crtc_helper_atomic_check,
.atomic_check = drm_crtc_helper_atomic_check,
};
static const struct drm_crtc_funcs simpledrm_crtc_funcs = {

View File

@ -229,7 +229,6 @@ int ttm_range_man_fini_nocheck(struct ttm_device *bdev,
return ret;
spin_lock(&rman->lock);
drm_mm_clean(mm);
drm_mm_takedown(mm);
spin_unlock(&rman->lock);

View File

@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_main.o udl_transfer.o
udl-y := udl_drv.o udl_modeset.o udl_main.o udl_transfer.o
obj-$(CONFIG_DRM_UDL) := udl.o

View File

@ -1,139 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Red Hat
* based in parts on udlfb.c:
* Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
* Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
*/
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_edid.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_probe_helper.h>
#include "udl_connector.h"
#include "udl_drv.h"
static int udl_get_edid_block(void *data, u8 *buf, unsigned int block,
size_t len)
{
int ret, i;
u8 *read_buff;
struct udl_device *udl = data;
struct usb_device *udev = udl_to_usb_device(udl);
read_buff = kmalloc(2, GFP_KERNEL);
if (!read_buff)
return -1;
for (i = 0; i < len; i++) {
int bval = (i + block * EDID_LENGTH) << 8;
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
0x02, (0x80 | (0x02 << 5)), bval,
0xA1, read_buff, 2, 1000);
if (ret < 1) {
DRM_ERROR("Read EDID byte %d failed err %x\n", i, ret);
kfree(read_buff);
return -1;
}
buf[i] = read_buff[1];
}
kfree(read_buff);
return 0;
}
static int udl_get_modes(struct drm_connector *connector)
{
struct udl_drm_connector *udl_connector =
container_of(connector,
struct udl_drm_connector,
connector);
drm_connector_update_edid_property(connector, udl_connector->edid);
if (udl_connector->edid)
return drm_add_edid_modes(connector, udl_connector->edid);
return 0;
}
static enum drm_mode_status udl_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct udl_device *udl = to_udl(connector->dev);
if (!udl->sku_pixel_limit)
return 0;
if (mode->vdisplay * mode->hdisplay > udl->sku_pixel_limit)
return MODE_VIRTUAL_Y;
return 0;
}
static enum drm_connector_status
udl_detect(struct drm_connector *connector, bool force)
{
struct udl_device *udl = to_udl(connector->dev);
struct udl_drm_connector *udl_connector =
container_of(connector,
struct udl_drm_connector,
connector);
/* cleanup previous edid */
if (udl_connector->edid != NULL) {
kfree(udl_connector->edid);
udl_connector->edid = NULL;
}
udl_connector->edid = drm_do_get_edid(connector, udl_get_edid_block, udl);
if (!udl_connector->edid)
return connector_status_disconnected;
return connector_status_connected;
}
static void udl_connector_destroy(struct drm_connector *connector)
{
struct udl_drm_connector *udl_connector =
container_of(connector,
struct udl_drm_connector,
connector);
drm_connector_cleanup(connector);
kfree(udl_connector->edid);
kfree(connector);
}
static const struct drm_connector_helper_funcs udl_connector_helper_funcs = {
.get_modes = udl_get_modes,
.mode_valid = udl_mode_valid,
};
static const struct drm_connector_funcs udl_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.detect = udl_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = udl_connector_destroy,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
struct drm_connector *udl_connector_init(struct drm_device *dev)
{
struct udl_drm_connector *udl_connector;
struct drm_connector *connector;
udl_connector = kzalloc(sizeof(struct udl_drm_connector), GFP_KERNEL);
if (!udl_connector)
return ERR_PTR(-ENOMEM);
connector = &udl_connector->connector;
drm_connector_init(dev, connector, &udl_connector_funcs,
DRM_MODE_CONNECTOR_VGA);
drm_connector_helper_add(connector, &udl_connector_helper_funcs);
connector->polled = DRM_CONNECTOR_POLL_HPD |
DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
return connector;
}

View File

@ -1,15 +0,0 @@
#ifndef __UDL_CONNECTOR_H__
#define __UDL_CONNECTOR_H__
#include <drm/drm_crtc.h>
struct edid;
struct udl_drm_connector {
struct drm_connector connector;
/* last udl_detect edid */
struct edid *edid;
};
#endif //__UDL_CONNECTOR_H__

View File

@ -14,10 +14,13 @@
#include <linux/mm_types.h>
#include <linux/usb.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <drm/drm_encoder.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_plane.h>
struct drm_mode_create_dumb;
@ -46,21 +49,31 @@ struct urb_list {
size_t size;
};
struct udl_connector {
struct drm_connector connector;
/* last udl_detect edid */
struct edid *edid;
};
static inline struct udl_connector *to_udl_connector(struct drm_connector *connector)
{
return container_of(connector, struct udl_connector, connector);
}
struct udl_device {
struct drm_device drm;
struct device *dev;
struct device *dmadev;
struct drm_simple_display_pipe display_pipe;
struct drm_plane primary_plane;
struct drm_crtc crtc;
struct drm_encoder encoder;
struct mutex gem_lock;
int sku_pixel_limit;
struct urb_list urbs;
char mode_buf[1024];
uint32_t mode_buf_len;
};
#define to_udl(x) container_of(x, struct udl_device, drm)
@ -89,23 +102,4 @@ int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
int udl_drop_usb(struct drm_device *dev);
int udl_select_std_channel(struct udl_device *udl);
#define CMD_WRITE_RAW8 "\xAF\x60" /**< 8 bit raw write command. */
#define CMD_WRITE_RL8 "\xAF\x61" /**< 8 bit run length command. */
#define CMD_WRITE_COPY8 "\xAF\x62" /**< 8 bit copy command. */
#define CMD_WRITE_RLX8 "\xAF\x63" /**< 8 bit extended run length command. */
#define CMD_WRITE_RAW16 "\xAF\x68" /**< 16 bit raw write command. */
#define CMD_WRITE_RL16 "\xAF\x69" /**< 16 bit run length command. */
#define CMD_WRITE_COPY16 "\xAF\x6A" /**< 16 bit copy command. */
#define CMD_WRITE_RLX16 "\xAF\x6B" /**< 16 bit extended run length command. */
/* On/Off for driving the DisplayLink framebuffer to the display */
#define UDL_REG_BLANK_MODE 0x1f
#define UDL_BLANK_MODE_ON 0x00 /* hsync and vsync on, visible */
#define UDL_BLANK_MODE_BLANKED 0x01 /* hsync and vsync on, blanked */
#define UDL_BLANK_MODE_VSYNC_OFF 0x03 /* vsync off, blanked */
#define UDL_BLANK_MODE_HSYNC_OFF 0x05 /* hsync off, blanked */
#define UDL_BLANK_MODE_POWERDOWN 0x07 /* powered off; requires modeset */
#endif

View File

@ -8,70 +8,91 @@
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
*/
#include <linux/bitfield.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_edid.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_vblank.h>
#include "udl_drv.h"
#define UDL_COLOR_DEPTH_16BPP 0
#include "udl_proto.h"
/*
* All DisplayLink bulk operations start with 0xAF, followed by specific code
* All operations are written to buffers which then later get sent to device
* All DisplayLink bulk operations start with 0xaf (UDL_MSG_BULK), followed by
* a specific command code. All operations are written to a command buffer, which
* the driver sends to the device.
*/
static char *udl_set_register(char *buf, u8 reg, u8 val)
{
*buf++ = 0xAF;
*buf++ = 0x20;
*buf++ = UDL_MSG_BULK;
*buf++ = UDL_CMD_WRITEREG;
*buf++ = reg;
*buf++ = val;
return buf;
}
static char *udl_vidreg_lock(char *buf)
{
return udl_set_register(buf, 0xFF, 0x00);
return udl_set_register(buf, UDL_REG_VIDREG, UDL_VIDREG_LOCK);
}
static char *udl_vidreg_unlock(char *buf)
{
return udl_set_register(buf, 0xFF, 0xFF);
return udl_set_register(buf, UDL_REG_VIDREG, UDL_VIDREG_UNLOCK);
}
static char *udl_set_blank_mode(char *buf, u8 mode)
{
return udl_set_register(buf, UDL_REG_BLANK_MODE, mode);
return udl_set_register(buf, UDL_REG_BLANKMODE, mode);
}
static char *udl_set_color_depth(char *buf, u8 selection)
{
return udl_set_register(buf, 0x00, selection);
return udl_set_register(buf, UDL_REG_COLORDEPTH, selection);
}
static char *udl_set_base16bpp(char *wrptr, u32 base)
static char *udl_set_base16bpp(char *buf, u32 base)
{
/* the base pointer is 16 bits wide, 0x20 is hi byte. */
wrptr = udl_set_register(wrptr, 0x20, base >> 16);
wrptr = udl_set_register(wrptr, 0x21, base >> 8);
return udl_set_register(wrptr, 0x22, base);
/* the base pointer is 24 bits wide, 0x20 is hi byte. */
u8 reg20 = FIELD_GET(UDL_BASE_ADDR2_MASK, base);
u8 reg21 = FIELD_GET(UDL_BASE_ADDR1_MASK, base);
u8 reg22 = FIELD_GET(UDL_BASE_ADDR0_MASK, base);
buf = udl_set_register(buf, UDL_REG_BASE16BPP_ADDR2, reg20);
buf = udl_set_register(buf, UDL_REG_BASE16BPP_ADDR1, reg21);
buf = udl_set_register(buf, UDL_REG_BASE16BPP_ADDR0, reg22);
return buf;
}
/*
* DisplayLink HW has separate 16bpp and 8bpp framebuffers.
* In 24bpp modes, the low 323 RGB bits go in the 8bpp framebuffer
*/
static char *udl_set_base8bpp(char *wrptr, u32 base)
static char *udl_set_base8bpp(char *buf, u32 base)
{
wrptr = udl_set_register(wrptr, 0x26, base >> 16);
wrptr = udl_set_register(wrptr, 0x27, base >> 8);
return udl_set_register(wrptr, 0x28, base);
/* the base pointer is 24 bits wide, 0x26 is hi byte. */
u8 reg26 = FIELD_GET(UDL_BASE_ADDR2_MASK, base);
u8 reg27 = FIELD_GET(UDL_BASE_ADDR1_MASK, base);
u8 reg28 = FIELD_GET(UDL_BASE_ADDR0_MASK, base);
buf = udl_set_register(buf, UDL_REG_BASE8BPP_ADDR2, reg26);
buf = udl_set_register(buf, UDL_REG_BASE8BPP_ADDR1, reg27);
buf = udl_set_register(buf, UDL_REG_BASE8BPP_ADDR0, reg28);
return buf;
}
static char *udl_set_register_16(char *wrptr, u8 reg, u16 value)
@ -122,84 +143,46 @@ static char *udl_set_register_lfsr16(char *wrptr, u8 reg, u16 value)
}
/*
* This takes a standard fbdev screeninfo struct and all of its monitor mode
* details and converts them into the DisplayLink equivalent register commands.
ERR(vreg(dev, 0x00, (color_depth == 16) ? 0 : 1));
ERR(vreg_lfsr16(dev, 0x01, xDisplayStart));
ERR(vreg_lfsr16(dev, 0x03, xDisplayEnd));
ERR(vreg_lfsr16(dev, 0x05, yDisplayStart));
ERR(vreg_lfsr16(dev, 0x07, yDisplayEnd));
ERR(vreg_lfsr16(dev, 0x09, xEndCount));
ERR(vreg_lfsr16(dev, 0x0B, hSyncStart));
ERR(vreg_lfsr16(dev, 0x0D, hSyncEnd));
ERR(vreg_big_endian(dev, 0x0F, hPixels));
ERR(vreg_lfsr16(dev, 0x11, yEndCount));
ERR(vreg_lfsr16(dev, 0x13, vSyncStart));
ERR(vreg_lfsr16(dev, 0x15, vSyncEnd));
ERR(vreg_big_endian(dev, 0x17, vPixels));
ERR(vreg_little_endian(dev, 0x1B, pixelClock5KHz));
ERR(vreg(dev, 0x1F, 0));
ERR(vbuf(dev, WRITE_VIDREG_UNLOCK, DSIZEOF(WRITE_VIDREG_UNLOCK)));
* Takes a DRM display mode and converts it into the DisplayLink
* equivalent register commands.
*/
static char *udl_set_vid_cmds(char *wrptr, struct drm_display_mode *mode)
static char *udl_set_display_mode(char *buf, struct drm_display_mode *mode)
{
u16 xds, yds;
u16 xde, yde;
u16 yec;
u16 reg01 = mode->crtc_htotal - mode->crtc_hsync_start;
u16 reg03 = reg01 + mode->crtc_hdisplay;
u16 reg05 = mode->crtc_vtotal - mode->crtc_vsync_start;
u16 reg07 = reg05 + mode->crtc_vdisplay;
u16 reg09 = mode->crtc_htotal - 1;
u16 reg0b = 1; /* libdlo hardcodes hsync start to 1 */
u16 reg0d = mode->crtc_hsync_end - mode->crtc_hsync_start + 1;
u16 reg0f = mode->hdisplay;
u16 reg11 = mode->crtc_vtotal;
u16 reg13 = 0; /* libdlo hardcodes vsync start to 0 */
u16 reg15 = mode->crtc_vsync_end - mode->crtc_vsync_start;
u16 reg17 = mode->crtc_vdisplay;
u16 reg1b = mode->clock / 5;
/* x display start */
xds = mode->crtc_htotal - mode->crtc_hsync_start;
wrptr = udl_set_register_lfsr16(wrptr, 0x01, xds);
/* x display end */
xde = xds + mode->crtc_hdisplay;
wrptr = udl_set_register_lfsr16(wrptr, 0x03, xde);
buf = udl_set_register_lfsr16(buf, UDL_REG_XDISPLAYSTART, reg01);
buf = udl_set_register_lfsr16(buf, UDL_REG_XDISPLAYEND, reg03);
buf = udl_set_register_lfsr16(buf, UDL_REG_YDISPLAYSTART, reg05);
buf = udl_set_register_lfsr16(buf, UDL_REG_YDISPLAYEND, reg07);
buf = udl_set_register_lfsr16(buf, UDL_REG_XENDCOUNT, reg09);
buf = udl_set_register_lfsr16(buf, UDL_REG_HSYNCSTART, reg0b);
buf = udl_set_register_lfsr16(buf, UDL_REG_HSYNCEND, reg0d);
buf = udl_set_register_16(buf, UDL_REG_HPIXELS, reg0f);
buf = udl_set_register_lfsr16(buf, UDL_REG_YENDCOUNT, reg11);
buf = udl_set_register_lfsr16(buf, UDL_REG_VSYNCSTART, reg13);
buf = udl_set_register_lfsr16(buf, UDL_REG_VSYNCEND, reg15);
buf = udl_set_register_16(buf, UDL_REG_VPIXELS, reg17);
buf = udl_set_register_16be(buf, UDL_REG_PIXELCLOCK5KHZ, reg1b);
/* y display start */
yds = mode->crtc_vtotal - mode->crtc_vsync_start;
wrptr = udl_set_register_lfsr16(wrptr, 0x05, yds);
/* y display end */
yde = yds + mode->crtc_vdisplay;
wrptr = udl_set_register_lfsr16(wrptr, 0x07, yde);
/* x end count is active + blanking - 1 */
wrptr = udl_set_register_lfsr16(wrptr, 0x09,
mode->crtc_htotal - 1);
/* libdlo hardcodes hsync start to 1 */
wrptr = udl_set_register_lfsr16(wrptr, 0x0B, 1);
/* hsync end is width of sync pulse + 1 */
wrptr = udl_set_register_lfsr16(wrptr, 0x0D,
mode->crtc_hsync_end - mode->crtc_hsync_start + 1);
/* hpixels is active pixels */
wrptr = udl_set_register_16(wrptr, 0x0F, mode->hdisplay);
/* yendcount is vertical active + vertical blanking */
yec = mode->crtc_vtotal;
wrptr = udl_set_register_lfsr16(wrptr, 0x11, yec);
/* libdlo hardcodes vsync start to 0 */
wrptr = udl_set_register_lfsr16(wrptr, 0x13, 0);
/* vsync end is width of vsync pulse */
wrptr = udl_set_register_lfsr16(wrptr, 0x15, mode->crtc_vsync_end - mode->crtc_vsync_start);
/* vpixels is active pixels */
wrptr = udl_set_register_16(wrptr, 0x17, mode->crtc_vdisplay);
wrptr = udl_set_register_16be(wrptr, 0x1B,
mode->clock / 5);
return wrptr;
return buf;
}
static char *udl_dummy_render(char *wrptr)
{
*wrptr++ = 0xAF;
*wrptr++ = 0x6A; /* copy */
*wrptr++ = UDL_MSG_BULK;
*wrptr++ = UDL_CMD_WRITECOPY16;
*wrptr++ = 0x00; /* from addr */
*wrptr++ = 0x00;
*wrptr++ = 0x00;
@ -210,31 +193,6 @@ static char *udl_dummy_render(char *wrptr)
return wrptr;
}
static int udl_crtc_write_mode_to_hw(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct udl_device *udl = to_udl(dev);
struct urb *urb;
char *buf;
int retval;
if (udl->mode_buf_len == 0) {
DRM_ERROR("No mode set\n");
return -EINVAL;
}
urb = udl_get_urb(dev);
if (!urb)
return -ENOMEM;
buf = (char *)urb->transfer_buffer;
memcpy(buf, udl->mode_buf, udl->mode_buf_len);
retval = udl_submit_urb(dev, urb, udl->mode_buf_len);
DRM_DEBUG("write mode info %d\n", udl->mode_buf_len);
return retval;
}
static long udl_log_cpp(unsigned int cpp)
{
if (WARN_ON(!is_power_of_2(cpp)))
@ -258,15 +216,9 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
return ret;
log_bpp = ret;
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
if (ret)
return ret;
urb = udl_get_urb(dev);
if (!urb) {
ret = -ENOMEM;
goto out_drm_gem_fb_end_cpu_access;
}
if (!urb)
return -ENOMEM;
cmd = urb->transfer_buffer;
for (i = clip->y1; i < clip->y2; i++) {
@ -278,145 +230,339 @@ static int udl_handle_damage(struct drm_framebuffer *fb,
&cmd, byte_offset, dev_byte_offset,
byte_width);
if (ret)
goto out_drm_gem_fb_end_cpu_access;
return ret;
}
if (cmd > (char *)urb->transfer_buffer) {
/* Send partial buffer remaining before exiting */
int len;
if (cmd < (char *)urb->transfer_buffer + urb->transfer_buffer_length)
*cmd++ = 0xAF;
*cmd++ = UDL_MSG_BULK;
len = cmd - (char *)urb->transfer_buffer;
ret = udl_submit_urb(dev, urb, len);
} else {
udl_urb_completion(urb);
}
ret = 0;
out_drm_gem_fb_end_cpu_access:
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
return ret;
return 0;
}
/*
* Simple display pipeline
* Primary plane
*/
static const uint32_t udl_simple_display_pipe_formats[] = {
static const uint32_t udl_primary_plane_formats[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
};
static enum drm_mode_status
udl_simple_display_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
const struct drm_display_mode *mode)
{
return MODE_OK;
}
static const uint64_t udl_primary_plane_fmtmods[] = {
DRM_FORMAT_MOD_LINEAR,
DRM_FORMAT_MOD_INVALID
};
static void
udl_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
struct drm_crtc_state *crtc_state,
struct drm_plane_state *plane_state)
static void udl_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *state)
{
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *dev = crtc->dev;
struct drm_framebuffer *fb = plane_state->fb;
struct udl_device *udl = to_udl(dev);
struct drm_display_mode *mode = &crtc_state->mode;
struct drm_device *dev = plane->dev;
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_rect clip = DRM_RECT_INIT(0, 0, fb->width, fb->height);
char *buf;
char *wrptr;
int color_depth = UDL_COLOR_DEPTH_16BPP;
struct drm_framebuffer *fb = plane_state->fb;
struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
struct drm_atomic_helper_damage_iter iter;
struct drm_rect damage;
int ret, idx;
buf = (char *)udl->mode_buf;
if (!fb)
return; /* no framebuffer; plane is disabled */
/* This first section has to do with setting the base address on the
* controller associated with the display. There are 2 base
* pointers, currently, we only use the 16 bpp segment.
*/
wrptr = udl_vidreg_lock(buf);
wrptr = udl_set_color_depth(wrptr, color_depth);
/* set base for 16bpp segment to 0 */
wrptr = udl_set_base16bpp(wrptr, 0);
/* set base for 8bpp segment to end of fb */
wrptr = udl_set_base8bpp(wrptr, 2 * mode->vdisplay * mode->hdisplay);
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
if (ret)
return;
wrptr = udl_set_vid_cmds(wrptr, mode);
wrptr = udl_set_blank_mode(wrptr, UDL_BLANK_MODE_ON);
wrptr = udl_vidreg_unlock(wrptr);
if (!drm_dev_enter(dev, &idx))
goto out_drm_gem_fb_end_cpu_access;
wrptr = udl_dummy_render(wrptr);
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drm_atomic_for_each_plane_damage(&iter, &damage) {
udl_handle_damage(fb, &shadow_plane_state->data[0], &damage);
}
udl->mode_buf_len = wrptr - buf;
drm_dev_exit(idx);
udl_handle_damage(fb, &shadow_plane_state->data[0], &clip);
/* enable display */
udl_crtc_write_mode_to_hw(crtc);
out_drm_gem_fb_end_cpu_access:
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
}
static void
udl_simple_display_pipe_disable(struct drm_simple_display_pipe *pipe)
static const struct drm_plane_helper_funcs udl_primary_plane_helper_funcs = {
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
.atomic_check = drm_plane_helper_atomic_check,
.atomic_update = udl_primary_plane_helper_atomic_update,
};
static const struct drm_plane_funcs udl_primary_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
DRM_GEM_SHADOW_PLANE_FUNCS,
};
/*
* CRTC
*/
static int udl_crtc_helper_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_crtc_state *new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
if (!new_crtc_state->enable)
return 0;
return drm_atomic_helper_check_crtc_primary_plane(new_crtc_state);
}
static void udl_crtc_helper_atomic_enable(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_crtc *crtc = &pipe->crtc;
struct drm_device *dev = crtc->dev;
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
struct drm_display_mode *mode = &crtc_state->mode;
struct urb *urb;
char *buf;
int idx;
if (!drm_dev_enter(dev, &idx))
return;
urb = udl_get_urb(dev);
if (!urb)
return;
goto out;
buf = (char *)urb->transfer_buffer;
buf = udl_vidreg_lock(buf);
buf = udl_set_blank_mode(buf, UDL_BLANK_MODE_POWERDOWN);
buf = udl_set_color_depth(buf, UDL_COLORDEPTH_16BPP);
/* set base for 16bpp segment to 0 */
buf = udl_set_base16bpp(buf, 0);
/* set base for 8bpp segment to end of fb */
buf = udl_set_base8bpp(buf, 2 * mode->vdisplay * mode->hdisplay);
buf = udl_set_display_mode(buf, mode);
buf = udl_set_blank_mode(buf, UDL_BLANKMODE_ON);
buf = udl_vidreg_unlock(buf);
buf = udl_dummy_render(buf);
udl_submit_urb(dev, urb, buf - (char *)urb->transfer_buffer);
out:
drm_dev_exit(idx);
}
static void
udl_simple_display_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_plane_state)
static void udl_crtc_helper_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state)
{
struct drm_plane_state *state = pipe->plane.state;
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
struct drm_framebuffer *fb = state->fb;
struct drm_rect rect;
struct drm_device *dev = crtc->dev;
struct urb *urb;
char *buf;
int idx;
if (!fb)
if (!drm_dev_enter(dev, &idx))
return;
if (drm_atomic_helper_damage_merged(old_plane_state, state, &rect))
udl_handle_damage(fb, &shadow_plane_state->data[0], &rect);
urb = udl_get_urb(dev);
if (!urb)
goto out;
buf = (char *)urb->transfer_buffer;
buf = udl_vidreg_lock(buf);
buf = udl_set_blank_mode(buf, UDL_BLANKMODE_POWERDOWN);
buf = udl_vidreg_unlock(buf);
buf = udl_dummy_render(buf);
udl_submit_urb(dev, urb, buf - (char *)urb->transfer_buffer);
out:
drm_dev_exit(idx);
}
static const struct drm_simple_display_pipe_funcs udl_simple_display_pipe_funcs = {
.mode_valid = udl_simple_display_pipe_mode_valid,
.enable = udl_simple_display_pipe_enable,
.disable = udl_simple_display_pipe_disable,
.update = udl_simple_display_pipe_update,
DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
static const struct drm_crtc_helper_funcs udl_crtc_helper_funcs = {
.atomic_check = udl_crtc_helper_atomic_check,
.atomic_enable = udl_crtc_helper_atomic_enable,
.atomic_disable = udl_crtc_helper_atomic_disable,
};
static const struct drm_crtc_funcs udl_crtc_funcs = {
.reset = drm_atomic_helper_crtc_reset,
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
/*
* Encoder
*/
static const struct drm_encoder_funcs udl_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
/*
* Connector
*/
static int udl_connector_helper_get_modes(struct drm_connector *connector)
{
struct udl_connector *udl_connector = to_udl_connector(connector);
drm_connector_update_edid_property(connector, udl_connector->edid);
if (udl_connector->edid)
return drm_add_edid_modes(connector, udl_connector->edid);
return 0;
}
static const struct drm_connector_helper_funcs udl_connector_helper_funcs = {
.get_modes = udl_connector_helper_get_modes,
};
static int udl_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len)
{
struct udl_device *udl = data;
struct drm_device *dev = &udl->drm;
struct usb_device *udev = udl_to_usb_device(udl);
u8 *read_buff;
int ret;
size_t i;
read_buff = kmalloc(2, GFP_KERNEL);
if (!read_buff)
return -ENOMEM;
for (i = 0; i < len; i++) {
int bval = (i + block * EDID_LENGTH) << 8;
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
0x02, (0x80 | (0x02 << 5)), bval,
0xA1, read_buff, 2, USB_CTRL_GET_TIMEOUT);
if (ret < 0) {
drm_err(dev, "Read EDID byte %zu failed err %x\n", i, ret);
goto err_kfree;
} else if (ret < 1) {
ret = -EIO;
drm_err(dev, "Read EDID byte %zu failed\n", i);
goto err_kfree;
}
buf[i] = read_buff[1];
}
kfree(read_buff);
return 0;
err_kfree:
kfree(read_buff);
return ret;
}
static enum drm_connector_status udl_connector_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
struct udl_device *udl = to_udl(dev);
struct udl_connector *udl_connector = to_udl_connector(connector);
enum drm_connector_status status = connector_status_disconnected;
int idx;
/* cleanup previous EDID */
kfree(udl_connector->edid);
udl_connector->edid = NULL;
if (!drm_dev_enter(dev, &idx))
return connector_status_disconnected;
udl_connector->edid = drm_do_get_edid(connector, udl_get_edid_block, udl);
if (udl_connector->edid)
status = connector_status_connected;
drm_dev_exit(idx);
return status;
}
static void udl_connector_destroy(struct drm_connector *connector)
{
struct udl_connector *udl_connector = to_udl_connector(connector);
drm_connector_cleanup(connector);
kfree(udl_connector->edid);
kfree(udl_connector);
}
static const struct drm_connector_funcs udl_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.detect = udl_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = udl_connector_destroy,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
struct drm_connector *udl_connector_init(struct drm_device *dev)
{
struct udl_connector *udl_connector;
struct drm_connector *connector;
int ret;
udl_connector = kzalloc(sizeof(*udl_connector), GFP_KERNEL);
if (!udl_connector)
return ERR_PTR(-ENOMEM);
connector = &udl_connector->connector;
ret = drm_connector_init(dev, connector, &udl_connector_funcs, DRM_MODE_CONNECTOR_VGA);
if (ret)
goto err_kfree;
drm_connector_helper_add(connector, &udl_connector_helper_funcs);
connector->polled = DRM_CONNECTOR_POLL_HPD |
DRM_CONNECTOR_POLL_CONNECT |
DRM_CONNECTOR_POLL_DISCONNECT;
return connector;
err_kfree:
kfree(udl_connector);
return ERR_PTR(ret);
}
/*
* Modesetting
*/
static const struct drm_mode_config_funcs udl_mode_funcs = {
static enum drm_mode_status udl_mode_config_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
{
struct udl_device *udl = to_udl(dev);
if (udl->sku_pixel_limit) {
if (mode->vdisplay * mode->hdisplay > udl->sku_pixel_limit)
return MODE_MEM;
}
return MODE_OK;
}
static const struct drm_mode_config_funcs udl_mode_config_funcs = {
.fb_create = drm_gem_fb_create_with_dirty,
.mode_valid = udl_mode_config_mode_valid,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
int udl_modeset_init(struct drm_device *dev)
{
size_t format_count = ARRAY_SIZE(udl_simple_display_pipe_formats);
struct udl_device *udl = to_udl(dev);
struct drm_plane *primary_plane;
struct drm_crtc *crtc;
struct drm_encoder *encoder;
struct drm_connector *connector;
int ret;
@ -426,28 +572,42 @@ int udl_modeset_init(struct drm_device *dev)
dev->mode_config.min_width = 640;
dev->mode_config.min_height = 480;
dev->mode_config.max_width = 2048;
dev->mode_config.max_height = 2048;
dev->mode_config.prefer_shadow = 0;
dev->mode_config.preferred_depth = 16;
dev->mode_config.funcs = &udl_mode_config_funcs;
dev->mode_config.funcs = &udl_mode_funcs;
primary_plane = &udl->primary_plane;
ret = drm_universal_plane_init(dev, primary_plane, 0,
&udl_primary_plane_funcs,
udl_primary_plane_formats,
ARRAY_SIZE(udl_primary_plane_formats),
udl_primary_plane_fmtmods,
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret)
return ret;
drm_plane_helper_add(primary_plane, &udl_primary_plane_helper_funcs);
drm_plane_enable_fb_damage_clips(primary_plane);
crtc = &udl->crtc;
ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
&udl_crtc_funcs, NULL);
if (ret)
return ret;
drm_crtc_helper_add(crtc, &udl_crtc_helper_funcs);
encoder = &udl->encoder;
ret = drm_encoder_init(dev, encoder, &udl_encoder_funcs, DRM_MODE_ENCODER_DAC, NULL);
if (ret)
return ret;
encoder->possible_crtcs = drm_crtc_mask(crtc);
connector = udl_connector_init(dev);
if (IS_ERR(connector))
return PTR_ERR(connector);
format_count = ARRAY_SIZE(udl_simple_display_pipe_formats);
ret = drm_simple_display_pipe_init(dev, &udl->display_pipe,
&udl_simple_display_pipe_funcs,
udl_simple_display_pipe_formats,
format_count, NULL, connector);
ret = drm_connector_attach_encoder(connector, encoder);
if (ret)
return ret;
drm_plane_enable_fb_damage_clips(&udl->display_pipe.plane);
drm_mode_config_reset(dev);

View File

@ -0,0 +1,68 @@
/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef UDL_PROTO_H
#define UDL_PROTO_H
#include <linux/bits.h>
#define UDL_MSG_BULK 0xaf
/* Register access */
#define UDL_CMD_WRITEREG 0x20 /* See register constants below */
/* Framebuffer access */
#define UDL_CMD_WRITERAW8 0x60 /* 8 bit raw write command. */
#define UDL_CMD_WRITERL8 0x61 /* 8 bit run length command. */
#define UDL_CMD_WRITECOPY8 0x62 /* 8 bit copy command. */
#define UDL_CMD_WRITERLX8 0x63 /* 8 bit extended run length command. */
#define UDL_CMD_WRITERAW16 0x68 /* 16 bit raw write command. */
#define UDL_CMD_WRITERL16 0x69 /* 16 bit run length command. */
#define UDL_CMD_WRITECOPY16 0x6a /* 16 bit copy command. */
#define UDL_CMD_WRITERLX16 0x6b /* 16 bit extended run length command. */
/* Color depth */
#define UDL_REG_COLORDEPTH 0x00
#define UDL_COLORDEPTH_16BPP 0
#define UDL_COLORDEPTH_24BPP 1
/* Display-mode settings */
#define UDL_REG_XDISPLAYSTART 0x01
#define UDL_REG_XDISPLAYEND 0x03
#define UDL_REG_YDISPLAYSTART 0x05
#define UDL_REG_YDISPLAYEND 0x07
#define UDL_REG_XENDCOUNT 0x09
#define UDL_REG_HSYNCSTART 0x0b
#define UDL_REG_HSYNCEND 0x0d
#define UDL_REG_HPIXELS 0x0f
#define UDL_REG_YENDCOUNT 0x11
#define UDL_REG_VSYNCSTART 0x13
#define UDL_REG_VSYNCEND 0x15
#define UDL_REG_VPIXELS 0x17
#define UDL_REG_PIXELCLOCK5KHZ 0x1b
/* On/Off for driving the DisplayLink framebuffer to the display */
#define UDL_REG_BLANKMODE 0x1f
#define UDL_BLANKMODE_ON 0x00 /* hsync and vsync on, visible */
#define UDL_BLANKMODE_BLANKED 0x01 /* hsync and vsync on, blanked */
#define UDL_BLANKMODE_VSYNC_OFF 0x03 /* vsync off, blanked */
#define UDL_BLANKMODE_HSYNC_OFF 0x05 /* hsync off, blanked */
#define UDL_BLANKMODE_POWERDOWN 0x07 /* powered off; requires modeset */
/* Framebuffer address */
#define UDL_REG_BASE16BPP_ADDR2 0x20
#define UDL_REG_BASE16BPP_ADDR1 0x21
#define UDL_REG_BASE16BPP_ADDR0 0x22
#define UDL_REG_BASE8BPP_ADDR2 0x26
#define UDL_REG_BASE8BPP_ADDR1 0x27
#define UDL_REG_BASE8BPP_ADDR0 0x28
#define UDL_BASE_ADDR0_MASK GENMASK(7, 0)
#define UDL_BASE_ADDR1_MASK GENMASK(15, 8)
#define UDL_BASE_ADDR2_MASK GENMASK(23, 16)
/* Lock/unlock video registers */
#define UDL_REG_VIDREG 0xff
#define UDL_VIDREG_LOCK 0x00
#define UDL_VIDREG_UNLOCK 0xff
#endif

View File

@ -10,6 +10,7 @@
#include <asm/unaligned.h>
#include "udl_drv.h"
#include "udl_proto.h"
#define MAX_CMD_PIXELS 255
@ -89,8 +90,8 @@ static void udl_compress_hline16(
const u8 *cmd_pixel_start, *cmd_pixel_end = NULL;
uint16_t pixel_val16;
*cmd++ = 0xaf;
*cmd++ = 0x6b;
*cmd++ = UDL_MSG_BULK;
*cmd++ = UDL_CMD_WRITERLX16;
*cmd++ = (uint8_t) ((dev_addr >> 16) & 0xFF);
*cmd++ = (uint8_t) ((dev_addr >> 8) & 0xFF);
*cmd++ = (uint8_t) ((dev_addr) & 0xFF);
@ -152,7 +153,7 @@ static void udl_compress_hline16(
if (cmd_buffer_end <= MIN_RLX_CMD_BYTES + cmd) {
/* Fill leftover bytes with no-ops */
if (cmd_buffer_end > cmd)
memset(cmd, 0xAF, cmd_buffer_end - cmd);
memset(cmd, UDL_MSG_BULK, cmd_buffer_end - cmd);
cmd = (uint8_t *) cmd_buffer_end;
}

View File

@ -542,7 +542,7 @@ static void vc4_hdmi_connector_reset(struct drm_connector *connector)
new_state->base.max_bpc = 8;
new_state->base.max_requested_bpc = 8;
new_state->output_format = VC4_HDMI_OUTPUT_RGB;
drm_atomic_helper_connector_tv_reset(connector);
drm_atomic_helper_connector_tv_margins_reset(connector);
}
static struct drm_connector_state *

View File

@ -69,6 +69,7 @@
#define VEC_CONFIG0_STD_MASK GENMASK(1, 0)
#define VEC_CONFIG0_NTSC_STD 0
#define VEC_CONFIG0_PAL_BDGHI_STD 1
#define VEC_CONFIG0_PAL_M_STD 2
#define VEC_CONFIG0_PAL_N_STD 3
#define VEC_SCHPH 0x108
@ -255,10 +256,9 @@ static const struct vc4_vec_tv_mode vc4_vec_tv_modes[] = {
.config1 = VEC_CONFIG1_C_CVBS_CVBS,
},
[VC4_VEC_TV_MODE_PAL_M] = {
.mode = &pal_mode,
.config0 = VEC_CONFIG0_PAL_BDGHI_STD,
.config1 = VEC_CONFIG1_C_CVBS_CVBS | VEC_CONFIG1_CUSTOM_FREQ,
.custom_freq = 0x223b61d1,
.mode = &ntsc_mode,
.config0 = VEC_CONFIG0_PAL_M_STD,
.config1 = VEC_CONFIG1_C_CVBS_CVBS,
},
};

View File

@ -26,7 +26,8 @@ int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
if (umem_dmabuf->sgt)
goto wait_fence;
sgt = dma_buf_map_attachment(umem_dmabuf->attach, DMA_BIDIRECTIONAL);
sgt = dma_buf_map_attachment_unlocked(umem_dmabuf->attach,
DMA_BIDIRECTIONAL);
if (IS_ERR(sgt))
return PTR_ERR(sgt);
@ -102,8 +103,8 @@ void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf)
umem_dmabuf->last_sg_trim = 0;
}
dma_buf_unmap_attachment(umem_dmabuf->attach, umem_dmabuf->sgt,
DMA_BIDIRECTIONAL);
dma_buf_unmap_attachment_unlocked(umem_dmabuf->attach, umem_dmabuf->sgt,
DMA_BIDIRECTIONAL);
umem_dmabuf->sgt = NULL;
}

View File

@ -101,7 +101,7 @@ static void *vb2_dc_vaddr(struct vb2_buffer *vb, void *buf_priv)
if (buf->db_attach) {
struct iosys_map map;
if (!dma_buf_vmap(buf->db_attach->dmabuf, &map))
if (!dma_buf_vmap_unlocked(buf->db_attach->dmabuf, &map))
buf->vaddr = map.vaddr;
return buf->vaddr;
@ -382,18 +382,12 @@ static struct sg_table *vb2_dc_dmabuf_ops_map(
struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
{
struct vb2_dc_attachment *attach = db_attach->priv;
/* stealing dmabuf mutex to serialize map/unmap operations */
struct mutex *lock = &db_attach->dmabuf->lock;
struct sg_table *sgt;
mutex_lock(lock);
sgt = &attach->sgt;
/* return previously mapped sg table */
if (attach->dma_dir == dma_dir) {
mutex_unlock(lock);
if (attach->dma_dir == dma_dir)
return sgt;
}
/* release any previous cache */
if (attach->dma_dir != DMA_NONE) {
@ -409,14 +403,11 @@ static struct sg_table *vb2_dc_dmabuf_ops_map(
if (dma_map_sgtable(db_attach->dev, sgt, dma_dir,
DMA_ATTR_SKIP_CPU_SYNC)) {
pr_err("failed to map scatterlist\n");
mutex_unlock(lock);
return ERR_PTR(-EIO);
}
attach->dma_dir = dma_dir;
mutex_unlock(lock);
return sgt;
}
@ -711,7 +702,7 @@ static int vb2_dc_map_dmabuf(void *mem_priv)
}
/* get the associated scatterlist for this buffer */
sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
sgt = dma_buf_map_attachment_unlocked(buf->db_attach, buf->dma_dir);
if (IS_ERR(sgt)) {
pr_err("Error getting dmabuf scatterlist\n");
return -EINVAL;
@ -722,7 +713,8 @@ static int vb2_dc_map_dmabuf(void *mem_priv)
if (contig_size < buf->size) {
pr_err("contiguous chunk is too small %lu/%lu\n",
contig_size, buf->size);
dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
dma_buf_unmap_attachment_unlocked(buf->db_attach, sgt,
buf->dma_dir);
return -EFAULT;
}
@ -750,10 +742,10 @@ static void vb2_dc_unmap_dmabuf(void *mem_priv)
}
if (buf->vaddr) {
dma_buf_vunmap(buf->db_attach->dmabuf, &map);
dma_buf_vunmap_unlocked(buf->db_attach->dmabuf, &map);
buf->vaddr = NULL;
}
dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
dma_buf_unmap_attachment_unlocked(buf->db_attach, sgt, buf->dma_dir);
buf->dma_addr = 0;
buf->dma_sgt = NULL;

View File

@ -309,7 +309,7 @@ static void *vb2_dma_sg_vaddr(struct vb2_buffer *vb, void *buf_priv)
if (!buf->vaddr) {
if (buf->db_attach) {
ret = dma_buf_vmap(buf->db_attach->dmabuf, &map);
ret = dma_buf_vmap_unlocked(buf->db_attach->dmabuf, &map);
buf->vaddr = ret ? NULL : map.vaddr;
} else {
buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1);
@ -424,18 +424,12 @@ static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
{
struct vb2_dma_sg_attachment *attach = db_attach->priv;
/* stealing dmabuf mutex to serialize map/unmap operations */
struct mutex *lock = &db_attach->dmabuf->lock;
struct sg_table *sgt;
mutex_lock(lock);
sgt = &attach->sgt;
/* return previously mapped sg table */
if (attach->dma_dir == dma_dir) {
mutex_unlock(lock);
if (attach->dma_dir == dma_dir)
return sgt;
}
/* release any previous cache */
if (attach->dma_dir != DMA_NONE) {
@ -446,14 +440,11 @@ static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
/* mapping to the client with new direction */
if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
pr_err("failed to map scatterlist\n");
mutex_unlock(lock);
return ERR_PTR(-EIO);
}
attach->dma_dir = dma_dir;
mutex_unlock(lock);
return sgt;
}
@ -565,7 +556,7 @@ static int vb2_dma_sg_map_dmabuf(void *mem_priv)
}
/* get the associated scatterlist for this buffer */
sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
sgt = dma_buf_map_attachment_unlocked(buf->db_attach, buf->dma_dir);
if (IS_ERR(sgt)) {
pr_err("Error getting dmabuf scatterlist\n");
return -EINVAL;
@ -594,10 +585,10 @@ static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
}
if (buf->vaddr) {
dma_buf_vunmap(buf->db_attach->dmabuf, &map);
dma_buf_vunmap_unlocked(buf->db_attach->dmabuf, &map);
buf->vaddr = NULL;
}
dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
dma_buf_unmap_attachment_unlocked(buf->db_attach, sgt, buf->dma_dir);
buf->dma_sgt = NULL;
}

View File

@ -267,18 +267,12 @@ static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
{
struct vb2_vmalloc_attachment *attach = db_attach->priv;
/* stealing dmabuf mutex to serialize map/unmap operations */
struct mutex *lock = &db_attach->dmabuf->lock;
struct sg_table *sgt;
mutex_lock(lock);
sgt = &attach->sgt;
/* return previously mapped sg table */
if (attach->dma_dir == dma_dir) {
mutex_unlock(lock);
if (attach->dma_dir == dma_dir)
return sgt;
}
/* release any previous cache */
if (attach->dma_dir != DMA_NONE) {
@ -289,14 +283,11 @@ static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
/* mapping to the client with new direction */
if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
pr_err("failed to map scatterlist\n");
mutex_unlock(lock);
return ERR_PTR(-EIO);
}
attach->dma_dir = dma_dir;
mutex_unlock(lock);
return sgt;
}
@ -376,7 +367,7 @@ static int vb2_vmalloc_map_dmabuf(void *mem_priv)
struct iosys_map map;
int ret;
ret = dma_buf_vmap(buf->dbuf, &map);
ret = dma_buf_vmap_unlocked(buf->dbuf, &map);
if (ret)
return -EFAULT;
buf->vaddr = map.vaddr;
@ -389,7 +380,7 @@ static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
struct vb2_vmalloc_buf *buf = mem_priv;
struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
dma_buf_vunmap(buf->dbuf, &map);
dma_buf_vunmap_unlocked(buf->dbuf, &map);
buf->vaddr = NULL;
}
@ -399,7 +390,7 @@ static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
if (buf->vaddr)
dma_buf_vunmap(buf->dbuf, &map);
dma_buf_vunmap_unlocked(buf->dbuf, &map);
kfree(buf);
}

View File

@ -38,7 +38,7 @@ static void tegra_vde_release_entry(struct tegra_vde_cache_entry *entry)
if (entry->vde->domain)
tegra_vde_iommu_unmap(entry->vde, entry->iova);
dma_buf_unmap_attachment(entry->a, entry->sgt, entry->dma_dir);
dma_buf_unmap_attachment_unlocked(entry->a, entry->sgt, entry->dma_dir);
dma_buf_detach(dmabuf, entry->a);
dma_buf_put(dmabuf);
@ -102,7 +102,7 @@ int tegra_vde_dmabuf_cache_map(struct tegra_vde *vde,
goto err_unlock;
}
sgt = dma_buf_map_attachment(attachment, dma_dir);
sgt = dma_buf_map_attachment_unlocked(attachment, dma_dir);
if (IS_ERR(sgt)) {
dev_err(dev, "Failed to get dmabufs sg_table\n");
err = PTR_ERR(sgt);
@ -152,7 +152,7 @@ ref:
err_free:
kfree(entry);
err_unmap:
dma_buf_unmap_attachment(attachment, sgt, dma_dir);
dma_buf_unmap_attachment_unlocked(attachment, sgt, dma_dir);
err_detach:
dma_buf_detach(dmabuf, attachment);
err_unlock:

View File

@ -310,8 +310,8 @@ static void fastrpc_free_map(struct kref *ref)
return;
}
}
dma_buf_unmap_attachment(map->attach, map->table,
DMA_BIDIRECTIONAL);
dma_buf_unmap_attachment_unlocked(map->attach, map->table,
DMA_BIDIRECTIONAL);
dma_buf_detach(map->buf, map->attach);
dma_buf_put(map->buf);
}
@ -726,7 +726,7 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
goto attach_err;
}
map->table = dma_buf_map_attachment(map->attach, DMA_BIDIRECTIONAL);
map->table = dma_buf_map_attachment_unlocked(map->attach, DMA_BIDIRECTIONAL);
if (IS_ERR(map->table)) {
err = PTR_ERR(map->table);
goto map_err;

View File

@ -455,6 +455,7 @@ config FB_ATARI
config FB_OF
bool "Open Firmware frame buffer device support"
depends on (FB = y) && PPC && (!PPC_PSERIES || PCI)
depends on !DRM_OFDRM
select APERTURE_HELPERS
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA

View File

@ -600,7 +600,7 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
gntdev_dmabuf->u.imp.attach = attach;
sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
sgt = dma_buf_map_attachment_unlocked(attach, DMA_BIDIRECTIONAL);
if (IS_ERR(sgt)) {
ret = ERR_CAST(sgt);
goto fail_detach;
@ -658,7 +658,7 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
fail_end_access:
dmabuf_imp_end_foreign_access(gntdev_dmabuf->u.imp.refs, count);
fail_unmap:
dma_buf_unmap_attachment(attach, sgt, DMA_BIDIRECTIONAL);
dma_buf_unmap_attachment_unlocked(attach, sgt, DMA_BIDIRECTIONAL);
fail_detach:
dma_buf_detach(dma_buf, attach);
fail_free_obj:
@ -708,8 +708,8 @@ static int dmabuf_imp_release(struct gntdev_dmabuf_priv *priv, u32 fd)
attach = gntdev_dmabuf->u.imp.attach;
if (gntdev_dmabuf->u.imp.sgt)
dma_buf_unmap_attachment(attach, gntdev_dmabuf->u.imp.sgt,
DMA_BIDIRECTIONAL);
dma_buf_unmap_attachment_unlocked(attach, gntdev_dmabuf->u.imp.sgt,
DMA_BIDIRECTIONAL);
dma_buf = attach->dmabuf;
dma_buf_detach(attach->dmabuf, attach);
dma_buf_put(dma_buf);

View File

@ -58,10 +58,9 @@ int drm_atomic_helper_check_plane_state(struct drm_plane_state *plane_state,
int max_scale,
bool can_position,
bool can_update_disabled);
int drm_atomic_helper_check_crtc_state(struct drm_crtc_state *crtc_state,
bool can_disable_primary_plane);
int drm_atomic_helper_check_planes(struct drm_device *dev,
struct drm_atomic_state *state);
int drm_atomic_helper_check_crtc_primary_plane(struct drm_crtc_state *crtc_state);
int drm_atomic_helper_check(struct drm_device *dev,
struct drm_atomic_state *state);
void drm_atomic_helper_commit_tail(struct drm_atomic_state *state);

Some files were not shown because too many files have changed in this diff Show More