drm-misc-next for 5.13:

UAPI Changes:
 
 Cross-subsystem Changes:
 
 Core Changes:
   - %p4cc printk format modifier
   - atomic: introduce drm_crtc_commit_wait, rework atomic plane state
     helpers to take the drm_commit_state structure
   - dma-buf: heaps rework to return a struct dma_buf
   - simple-kms: Add plate state helpers
   - ttm: debugfs support, removal of sysfs
 
 Driver Changes:
   - Convert drivers to shadow plane helpers
   - arc: Move to drm/tiny
   - ast: cursor plane reworks
   - gma500: Remove TTM and medfield support
   - mxsfb: imx8mm support
   - panfrost: MMU IRQ handling rework
   - qxl: rework to better handle resources deallocation, locking
   - sun4i: Add alpha properties for UI and VI layers
   - vc4: RPi4 CEC support
   - vmwgfx: doc cleanup
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQRcEzekXsqa64kGDp7j7w1vZxhRxQUCYD9fUAAKCRDj7w1vZxhR
 xcRLAQDdWKgUNeHnkKCUNh3ewPGabxvc6KQtPqAxcFv0I3ZmWgEAlfTS0pRLdyzQ
 ITRBL0T0S7cIyqnDULZkwfqB6Q8D0ws=
 =hPCS
 -----END PGP SIGNATURE-----

Merge tag 'drm-misc-next-2021-03-03' of git://anongit.freedesktop.org/drm/drm-misc into drm-next

drm-misc-next for 5.13:

UAPI Changes:

Cross-subsystem Changes:

Core Changes:
  - %p4cc printk format modifier
  - atomic: introduce drm_crtc_commit_wait, rework atomic plane state
    helpers to take the drm_commit_state structure
  - dma-buf: heaps rework to return a struct dma_buf
  - simple-kms: Add plate state helpers
  - ttm: debugfs support, removal of sysfs

Driver Changes:
  - Convert drivers to shadow plane helpers
  - arc: Move to drm/tiny
  - ast: cursor plane reworks
  - gma500: Remove TTM and medfield support
  - mxsfb: imx8mm support
  - panfrost: MMU IRQ handling rework
  - qxl: rework to better handle resources deallocation, locking
  - sun4i: Add alpha properties for UI and VI layers
  - vc4: RPi4 CEC support
  - vmwgfx: doc cleanup

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Maxime Ripard <maxime@cerno.tech>
Link: https://patchwork.freedesktop.org/patch/msgid/20210303100600.dgnkadonzuvfnu22@gilmour
This commit is contained in:
Dave Airlie 2021-03-16 16:45:12 +10:00
commit 51c3b916a4
286 changed files with 5484 additions and 4843 deletions

View File

@ -567,6 +567,24 @@ For printing netdev_features_t.
Passed by reference.
V4L2 and DRM FourCC code (pixel format)
---------------------------------------
::
%p4cc
Print a FourCC code used by V4L2 or DRM, including format endianness and
its numerical value as hexadecimal.
Passed by reference.
Examples::
%p4cc BG12 little-endian (0x32314742)
%p4cc Y10 little-endian (0x20303159)
%p4cc NV12 big-endian (0xb231564e)
Thanks
======

View File

@ -109,7 +109,7 @@ required:
- resets
- ddc
additionalProperties: false
unevaluatedProperties: false
examples:
- |

View File

@ -0,0 +1,110 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/fsl,lcdif.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Freescale/NXP i.MX LCD Interface (LCDIF)
maintainers:
- Marek Vasut <marex@denx.de>
- Stefan Agner <stefan@agner.ch>
description: |
(e)LCDIF display controller found in the Freescale/NXP i.MX SoCs.
properties:
compatible:
oneOf:
- enum:
- fsl,imx23-lcdif
- fsl,imx28-lcdif
- fsl,imx6sx-lcdif
- items:
- enum:
- fsl,imx6sl-lcdif
- fsl,imx6sll-lcdif
- fsl,imx6ul-lcdif
- fsl,imx7d-lcdif
- fsl,imx8mm-lcdif
- fsl,imx8mq-lcdif
- const: fsl,imx6sx-lcdif
reg:
maxItems: 1
clocks:
items:
- description: Pixel clock
- description: Bus clock
- description: Display AXI clock
minItems: 1
clock-names:
items:
- const: pix
- const: axi
- const: disp_axi
minItems: 1
interrupts:
maxItems: 1
port:
$ref: /schemas/graph.yaml#/properties/port
description: The LCDIF output port
required:
- compatible
- reg
- clocks
- interrupts
- port
additionalProperties: false
allOf:
- if:
properties:
compatible:
contains:
const: fsl,imx6sx-lcdif
then:
properties:
clocks:
minItems: 2
maxItems: 3
clock-names:
minItems: 2
maxItems: 3
required:
- clock-names
else:
properties:
clocks:
maxItems: 1
clock-names:
maxItems: 1
examples:
- |
#include <dt-bindings/clock/imx6sx-clock.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
display-controller@2220000 {
compatible = "fsl,imx6sx-lcdif";
reg = <0x02220000 0x4000>;
interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SX_CLK_LCDIF1_PIX>,
<&clks IMX6SX_CLK_LCDIF_APB>,
<&clks IMX6SX_CLK_DISPLAY_AXI>;
clock-names = "pix", "axi", "disp_axi";
port {
endpoint {
remote-endpoint = <&panel_in>;
};
};
};
...

View File

@ -1,87 +0,0 @@
* Freescale MXS LCD Interface (LCDIF)
New bindings:
=============
Required properties:
- compatible: Should be "fsl,imx23-lcdif" for i.MX23.
Should be "fsl,imx28-lcdif" for i.MX28.
Should be "fsl,imx6sx-lcdif" for i.MX6SX.
Should be "fsl,imx8mq-lcdif" for i.MX8MQ.
- reg: Address and length of the register set for LCDIF
- interrupts: Should contain LCDIF interrupt
- clocks: A list of phandle + clock-specifier pairs, one for each
entry in 'clock-names'.
- clock-names: A list of clock names. For MXSFB it should contain:
- "pix" for the LCDIF block clock
- (MX6SX-only) "axi", "disp_axi" for the bus interface clock
Required sub-nodes:
- port: The connection to an encoder chip.
Example:
lcdif1: display-controller@2220000 {
compatible = "fsl,imx6sx-lcdif", "fsl,imx28-lcdif";
reg = <0x02220000 0x4000>;
interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SX_CLK_LCDIF1_PIX>,
<&clks IMX6SX_CLK_LCDIF_APB>,
<&clks IMX6SX_CLK_DISPLAY_AXI>;
clock-names = "pix", "axi", "disp_axi";
port {
parallel_out: endpoint {
remote-endpoint = <&panel_in_parallel>;
};
};
};
Deprecated bindings:
====================
Required properties:
- compatible: Should be "fsl,imx23-lcdif" for i.MX23.
Should be "fsl,imx28-lcdif" for i.MX28.
- reg: Address and length of the register set for LCDIF
- interrupts: Should contain LCDIF interrupts
- display: phandle to display node (see below for details)
* display node
Required properties:
- bits-per-pixel: <16> for RGB565, <32> for RGB888/666.
- bus-width: number of data lines. Could be <8>, <16>, <18> or <24>.
Required sub-node:
- display-timings: Refer to binding doc display-timing.txt for details.
Examples:
lcdif@80030000 {
compatible = "fsl,imx28-lcdif";
reg = <0x80030000 2000>;
interrupts = <38 86>;
display: display {
bits-per-pixel = <32>;
bus-width = <24>;
display-timings {
native-mode = <&timing0>;
timing0: timing0 {
clock-frequency = <33500000>;
hactive = <800>;
vactive = <480>;
hfront-porch = <164>;
hback-porch = <89>;
hsync-len = <10>;
vback-porch = <23>;
vfront-porch = <10>;
vsync-len = <10>;
hsync-active = <0>;
vsync-active = <0>;
de-active = <1>;
pixelclk-active = <0>;
};
};
};
};

View File

@ -80,6 +80,18 @@ Atomic State Helper Reference
.. kernel-doc:: drivers/gpu/drm/drm_atomic_state_helper.c
:export:
GEM Atomic Helper Reference
---------------------------
.. kernel-doc:: drivers/gpu/drm/drm_gem_atomic_helper.c
:doc: overview
.. kernel-doc:: include/drm/drm_gem_atomic_helper.h
:internal:
.. kernel-doc:: drivers/gpu/drm/drm_gem_atomic_helper.c
:export:
Simple KMS Helper Reference
===========================

View File

@ -459,52 +459,6 @@ Contact: Emil Velikov, respective driver maintainers
Level: Intermediate
Plumb drm_atomic_state all over
-------------------------------
Currently various atomic functions take just a single or a handful of
object states (eg. plane state). While that single object state can
suffice for some simple cases, we often have to dig out additional
object states for dealing with various dependencies between the individual
objects or the hardware they represent. The process of digging out the
additional states is rather non-intuitive and error prone.
To fix that most functions should rather take the overall
drm_atomic_state as one of their parameters. The other parameters
would generally be the object(s) we mainly want to interact with.
For example, instead of
.. code-block:: c
int (*atomic_check)(struct drm_plane *plane, struct drm_plane_state *state);
we would have something like
.. code-block:: c
int (*atomic_check)(struct drm_plane *plane, struct drm_atomic_state *state);
The implementation can then trivially gain access to any required object
state(s) via drm_atomic_get_plane_state(), drm_atomic_get_new_plane_state(),
drm_atomic_get_old_plane_state(), and their equivalents for
other object types.
Additionally many drivers currently access the object->state pointer
directly in their commit functions. That is not going to work if we
eg. want to allow deeper commit pipelines as those pointers could
then point to the states corresponding to a future commit instead of
the current commit we're trying to process. Also non-blocking commits
execute locklessly so there are serious concerns with dereferencing
the object->state pointers without holding the locks that protect them.
Use of drm_atomic_get_new_plane_state(), drm_atomic_get_old_plane_state(),
etc. avoids these problems as well since they relate to a specific
commit via the passed in drm_atomic_state.
Contact: Ville Syrjälä, Daniel Vetter
Level: Intermediate
Use struct dma_buf_map throughout codebase
------------------------------------------
@ -596,20 +550,24 @@ Contact: Daniel Vetter
Level: Intermediate
KMS cleanups
------------
Object lifetime fixes
---------------------
Some of these date from the very introduction of KMS in 2008 ...
There's two related issues here
- Make ->funcs and ->helper_private vtables optional. There's a bunch of empty
function tables in drivers, but before we can remove them we need to make sure
that all the users in helpers and drivers do correctly check for a NULL
vtable.
- Cleanup up the various ->destroy callbacks, which often are all the same
simple code.
- Cleanup up the various ->destroy callbacks. A lot of them just wrapt the
drm_*_cleanup implementations and can be removed. Some tack a kfree() at the
end, for which we could add drm_*_cleanup_kfree(). And then there's the (for
historical reasons) misnamed drm_primary_helper_destroy() function.
- Lots of drivers erroneously allocate DRM modeset objects using devm_kzalloc,
which results in use-after free issues on driver unload. This can be serious
trouble even for drivers for hardware integrated on the SoC due to
EPROBE_DEFERRED backoff.
Both these problems can be solved by switching over to drmm_kzalloc(), and the
various convenience wrappers provided, e.g. drmm_crtc_alloc_with_planes(),
drmm_universal_plane_alloc(), ... and so on.
Contact: Daniel Vetter
Level: Intermediate
@ -666,8 +624,6 @@ See the documentation of :ref:`VKMS <vkms>` for more details. This is an ideal
internship task, since it only requires a virtual machine and can be sized to
fit the available time.
Contact: Daniel Vetter
Level: See details
Backlight Refactoring

View File

@ -1323,7 +1323,7 @@ ARC PGU DRM DRIVER
M: Alexey Brodkin <abrodkin@synopsys.com>
S: Supported
F: Documentation/devicetree/bindings/display/snps,arcpgu.txt
F: drivers/gpu/drm/arc/
F: drivers/gpu/drm/tiny/arcpgu.c
ARCNET NETWORK LAYER
M: Michael Grzeschik <m.grzeschik@pengutronix.de>
@ -12267,7 +12267,7 @@ M: Stefan Agner <stefan@agner.ch>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/display/mxsfb.txt
F: Documentation/devicetree/bindings/display/fsl,lcdif.yaml
F: drivers/gpu/drm/mxsfb/
MYLEX DAC960 PCI RAID Controller

View File

@ -202,6 +202,18 @@ void *dma_heap_get_drvdata(struct dma_heap *heap)
return heap->priv;
}
/**
* dma_heap_get_name() - get heap name
* @heap: DMA-Heap to retrieve private data for
*
* Returns:
* The char* for the heap name.
*/
const char *dma_heap_get_name(struct dma_heap *heap)
{
return heap->name;
}
struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
{
struct dma_heap *heap, *h, *err_ret;

View File

@ -339,6 +339,7 @@ static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
buffer->pagecount = pagecount;
/* create the dmabuf */
exp_info.exp_name = dma_heap_get_name(heap);
exp_info.ops = &cma_heap_buf_ops;
exp_info.size = buffer->len;
exp_info.flags = fd_flags;

View File

@ -390,6 +390,7 @@ static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
}
/* create the dmabuf */
exp_info.exp_name = dma_heap_get_name(heap);
exp_info.ops = &system_heap_buf_ops;
exp_info.size = buffer->len;
exp_info.flags = fd_flags;

View File

@ -352,8 +352,6 @@ source "drivers/gpu/drm/vc4/Kconfig"
source "drivers/gpu/drm/etnaviv/Kconfig"
source "drivers/gpu/drm/arc/Kconfig"
source "drivers/gpu/drm/hisilicon/Kconfig"
source "drivers/gpu/drm/mediatek/Kconfig"

View File

@ -44,7 +44,8 @@ drm_kms_helper-y := drm_bridge_connector.o drm_crtc_helper.o drm_dp_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
drm_simple_kms_helper.o drm_modeset_helper.o \
drm_scdc_helper.o drm_gem_framebuffer_helper.o \
drm_scdc_helper.o drm_gem_atomic_helper.o \
drm_gem_framebuffer_helper.o \
drm_atomic_state_helper.o drm_damage_helper.o \
drm_format_helper.o drm_self_refresh_helper.o
@ -110,7 +111,6 @@ obj-y += panel/
obj-y += bridge/
obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/
obj-$(CONFIG_DRM_ARCPGU)+= arc/
obj-y += hisilicon/
obj-$(CONFIG_DRM_ZTE) += zte/
obj-$(CONFIG_DRM_MXSFB) += mxsfb/

View File

@ -1066,7 +1066,7 @@ static inline struct drm_device *adev_to_drm(struct amdgpu_device *adev)
return &adev->ddev;
}
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev)
{
return container_of(bdev, struct amdgpu_device, mman.bdev);
}

View File

@ -40,13 +40,13 @@ static atomic_t fence_seq = ATOMIC_INIT(0);
* All the BOs in a process share an eviction fence. When process X wants
* to map VRAM memory but TTM can't find enough space, TTM will attempt to
* evict BOs from its LRU list. TTM checks if the BO is valuable to evict
* by calling ttm_bo_driver->eviction_valuable().
* by calling ttm_device_funcs->eviction_valuable().
*
* ttm_bo_driver->eviction_valuable() - will return false if the BO belongs
* ttm_device_funcs->eviction_valuable() - will return false if the BO belongs
* to process X. Otherwise, it will return true to indicate BO can be
* evicted by TTM.
*
* If ttm_bo_driver->eviction_valuable returns true, then TTM will continue
* If ttm_device_funcs->eviction_valuable returns true, then TTM will continue
* the evcition process for that BO by calling ttm_bo_evict --> amdgpu_bo_move
* --> amdgpu_copy_buffer(). This sets up job in GPU scheduler.
*

View File

@ -119,6 +119,16 @@ void amdgpu_amdkfd_gpuvm_init_mem_limits(void)
*/
#define ESTIMATE_PT_SIZE(mem_size) ((mem_size) >> 14)
static size_t amdgpu_amdkfd_acc_size(uint64_t size)
{
size >>= PAGE_SHIFT;
size *= sizeof(dma_addr_t) + sizeof(void *);
return __roundup_pow_of_two(sizeof(struct amdgpu_bo)) +
__roundup_pow_of_two(sizeof(struct ttm_tt)) +
PAGE_ALIGN(size);
}
static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
uint64_t size, u32 domain, bool sg)
{
@ -127,8 +137,7 @@ static int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
size_t acc_size, system_mem_needed, ttm_mem_needed, vram_needed;
int ret = 0;
acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
sizeof(struct amdgpu_bo));
acc_size = amdgpu_amdkfd_acc_size(size);
vram_needed = 0;
if (domain == AMDGPU_GEM_DOMAIN_GTT) {
@ -175,8 +184,7 @@ static void unreserve_mem_limit(struct amdgpu_device *adev,
{
size_t acc_size;
acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
sizeof(struct amdgpu_bo));
acc_size = amdgpu_amdkfd_acc_size(size);
spin_lock(&kfd_mem_limit.mem_limit_lock);
if (domain == AMDGPU_GEM_DOMAIN_GTT) {

View File

@ -487,7 +487,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
num_hw_submission, amdgpu_job_hang_limit,
timeout, ring->name);
timeout, NULL, ring->name);
if (r) {
DRM_ERROR("Failed to create scheduler on ring %s.\n",
ring->name);

View File

@ -71,7 +71,7 @@
*/
static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
{
struct page *dummy_page = ttm_bo_glob.dummy_read_page;
struct page *dummy_page = ttm_glob.dummy_read_page;
if (adev->dummy_page_addr)
return 0;

View File

@ -28,7 +28,7 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
static void amdgpu_job_timedout(struct drm_sched_job *s_job)
static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
@ -41,7 +41,7 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
DRM_ERROR("ring %s timeout, but soft recovered\n",
s_job->sched->name);
return;
return DRM_GPU_SCHED_STAT_NOMINAL;
}
amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
@ -53,10 +53,12 @@ static void amdgpu_job_timedout(struct drm_sched_job *s_job)
if (amdgpu_device_should_recover_gpu(ring->adev)) {
amdgpu_device_gpu_recover(ring->adev, job);
return DRM_GPU_SCHED_STAT_NOMINAL;
} else {
drm_sched_suspend_timeout(&ring->sched);
if (amdgpu_sriov_vf(adev))
adev->virt.tdr_debug = true;
return DRM_GPU_SCHED_STAT_NOMINAL;
}
}

View File

@ -523,7 +523,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
};
struct amdgpu_bo *bo;
unsigned long page_align, size = bp->size;
size_t acc_size;
int r;
/* Note that GDS/GWS/OA allocates 1 page per byte/resource. */
@ -546,9 +545,6 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
*bo_ptr = NULL;
acc_size = ttm_bo_dma_acc_size(&adev->mman.bdev, size,
sizeof(struct amdgpu_bo));
bo = kzalloc(sizeof(struct amdgpu_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
@ -577,8 +573,8 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
bo->tbo.priority = 1;
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
&bo->placement, page_align, &ctx, acc_size,
NULL, bp->resv, &amdgpu_bo_destroy);
&bo->placement, page_align, &ctx, NULL,
bp->resv, &amdgpu_bo_destroy);
if (unlikely(r != 0))
return r;

View File

@ -61,10 +61,10 @@
#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
struct ttm_tt *ttm,
struct ttm_resource *bo_mem);
static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
struct ttm_tt *ttm);
static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
@ -646,7 +646,7 @@ out:
*
* Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
*/
static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct drm_mm_node *mm_node = mem->mm_node;
@ -893,7 +893,7 @@ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
*
* Called by amdgpu_ttm_backend_bind()
**/
static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev,
static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
@ -931,7 +931,7 @@ release_sg:
/*
* amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
*/
static void amdgpu_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev,
static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
@ -1015,7 +1015,7 @@ gart_bind_fail:
* Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
* This handles binding GTT memory to the device address space.
*/
static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
struct ttm_tt *ttm,
struct ttm_resource *bo_mem)
{
@ -1155,7 +1155,7 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
* Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
* ttm_tt_destroy().
*/
static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
@ -1180,7 +1180,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
gtt->bound = false;
}
static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev,
static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
@ -1234,7 +1234,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
* Map the pages of a ttm_tt object to an address space visible
* to the underlying device.
*/
static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
struct ttm_tt *ttm,
struct ttm_operation_ctx *ctx)
{
@ -1278,7 +1278,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
* Unmaps pages of a ttm_tt object from the device address space and
* unpopulates the page array backing it.
*/
static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
@ -1603,7 +1603,7 @@ amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
amdgpu_bo_move_notify(bo, false, NULL);
}
static struct ttm_bo_driver amdgpu_bo_driver = {
static struct ttm_device_funcs amdgpu_bo_driver = {
.ttm_tt_create = &amdgpu_ttm_tt_create,
.ttm_tt_populate = &amdgpu_ttm_tt_populate,
.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
@ -1785,7 +1785,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
mutex_init(&adev->mman.gtt_window_lock);
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
adev_to_drm(adev)->anon_inode->i_mapping,
adev_to_drm(adev)->vma_offset_manager,
adev->need_swiotlb,
@ -1926,7 +1926,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
ttm_bo_device_release(&adev->mman.bdev);
ttm_device_fini(&adev->mman.bdev);
adev->mman.initialized = false;
DRM_INFO("amdgpu: ttm finalized\n");
}
@ -2002,7 +2002,7 @@ unlock:
return ret;
}
static struct vm_operations_struct amdgpu_ttm_vm_ops = {
static const struct vm_operations_struct amdgpu_ttm_vm_ops = {
.fault = amdgpu_ttm_fault,
.open = ttm_bo_vm_open,
.close = ttm_bo_vm_close,

View File

@ -60,7 +60,7 @@ struct amdgpu_gtt_mgr {
};
struct amdgpu_mman {
struct ttm_bo_device bdev;
struct ttm_device bdev;
bool initialized;
void __iomem *aper_base_kaddr;

View File

@ -638,15 +638,15 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct amdgpu_vm_bo_base *bo_base;
if (vm->bulk_moveable) {
spin_lock(&ttm_bo_glob.lru_lock);
spin_lock(&ttm_glob.lru_lock);
ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
spin_unlock(&ttm_bo_glob.lru_lock);
spin_unlock(&ttm_glob.lru_lock);
return;
}
memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
spin_lock(&ttm_bo_glob.lru_lock);
spin_lock(&ttm_glob.lru_lock);
list_for_each_entry(bo_base, &vm->idle, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
@ -660,7 +660,7 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
&bo->shadow->tbo.mem,
&vm->lru_bulk_move);
}
spin_unlock(&ttm_bo_glob.lru_lock);
spin_unlock(&ttm_glob.lru_lock);
vm->bulk_moveable = true;
}

View File

@ -1862,7 +1862,6 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 tmp, viewport_w, viewport_h;
int r;
bool bypass_lut = false;
struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@ -1981,8 +1980,8 @@ static int dce_v10_0_crtc_do_set_base(struct drm_crtc *crtc,
#endif
break;
default:
DRM_ERROR("Unsupported screen format %s\n",
drm_get_format_name(target_fb->format->format, &format_name));
DRM_ERROR("Unsupported screen format %p4cc\n",
&target_fb->format->format);
return -EINVAL;
}

View File

@ -1904,7 +1904,6 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 tmp, viewport_w, viewport_h;
int r;
bool bypass_lut = false;
struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@ -2023,8 +2022,8 @@ static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc,
#endif
break;
default:
DRM_ERROR("Unsupported screen format %s\n",
drm_get_format_name(target_fb->format->format, &format_name));
DRM_ERROR("Unsupported screen format %p4cc\n",
&target_fb->format->format);
return -EINVAL;
}

View File

@ -1820,7 +1820,6 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@ -1929,8 +1928,8 @@ static int dce_v6_0_crtc_do_set_base(struct drm_crtc *crtc,
#endif
break;
default:
DRM_ERROR("Unsupported screen format %s\n",
drm_get_format_name(target_fb->format->format, &format_name));
DRM_ERROR("Unsupported screen format %p4cc\n",
&target_fb->format->format);
return -EINVAL;
}

View File

@ -1791,7 +1791,6 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
u32 viewport_w, viewport_h;
int r;
bool bypass_lut = false;
struct drm_format_name_buf format_name;
/* no fb bound */
if (!atomic && !crtc->primary->fb) {
@ -1902,8 +1901,8 @@ static int dce_v8_0_crtc_do_set_base(struct drm_crtc *crtc,
#endif
break;
default:
DRM_ERROR("Unsupported screen format %s\n",
drm_get_format_name(target_fb->format->format, &format_name));
DRM_ERROR("Unsupported screen format %p4cc\n",
&target_fb->format->format);
return -EINVAL;
}

View File

@ -4583,7 +4583,6 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
const struct drm_framebuffer *fb = plane_state->fb;
const struct amdgpu_framebuffer *afb =
to_amdgpu_framebuffer(plane_state->fb);
struct drm_format_name_buf format_name;
int ret;
memset(plane_info, 0, sizeof(*plane_info));
@ -4631,8 +4630,8 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
break;
default:
DRM_ERROR(
"Unsupported screen format %s\n",
drm_get_format_name(fb->format->format, &format_name));
"Unsupported screen format %p4cc\n",
&fb->format->format);
return -EINVAL;
}
@ -6515,8 +6514,10 @@ static int dm_plane_helper_check_state(struct drm_plane_state *state,
}
static int dm_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct amdgpu_device *adev = drm_to_adev(plane->dev);
struct dc *dc = adev->dm.dc;
struct dm_plane_state *dm_plane_state;
@ -6524,23 +6525,24 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
struct drm_crtc_state *new_crtc_state;
int ret;
trace_amdgpu_dm_plane_atomic_check(state);
trace_amdgpu_dm_plane_atomic_check(new_plane_state);
dm_plane_state = to_dm_plane_state(state);
dm_plane_state = to_dm_plane_state(new_plane_state);
if (!dm_plane_state->dc_state)
return 0;
new_crtc_state =
drm_atomic_get_new_crtc_state(state->state, state->crtc);
drm_atomic_get_new_crtc_state(state,
new_plane_state->crtc);
if (!new_crtc_state)
return -EINVAL;
ret = dm_plane_helper_check_state(state, new_crtc_state);
ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
if (ret)
return ret;
ret = fill_dc_scaling_info(state, &scaling_info);
ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
if (ret)
return ret;
@ -6551,7 +6553,7 @@ static int dm_plane_atomic_check(struct drm_plane *plane,
}
static int dm_plane_atomic_async_check(struct drm_plane *plane,
struct drm_plane_state *new_plane_state)
struct drm_atomic_state *state)
{
/* Only support async updates on cursor planes. */
if (plane->type != DRM_PLANE_TYPE_CURSOR)
@ -6561,10 +6563,12 @@ static int dm_plane_atomic_async_check(struct drm_plane *plane,
}
static void dm_plane_atomic_async_update(struct drm_plane *plane,
struct drm_plane_state *new_state)
struct drm_atomic_state *state)
{
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_plane_state *old_state =
drm_atomic_get_old_plane_state(new_state->state, plane);
drm_atomic_get_old_plane_state(state, plane);
trace_amdgpu_dm_atomic_update_cursor(new_state);

View File

@ -1,10 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_ARCPGU
tristate "ARC PGU"
depends on DRM && OF
select DRM_KMS_CMA_HELPER
select DRM_KMS_HELPER
help
Choose this option if you have an ARC PGU controller.
If M is selected the module will be called arcpgu.

View File

@ -1,3 +0,0 @@
# SPDX-License-Identifier: GPL-2.0-only
arcpgu-y := arcpgu_crtc.o arcpgu_hdmi.o arcpgu_sim.o arcpgu_drv.o
obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o

View File

@ -1,37 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* ARC PGU DRM driver.
*
* Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
*/
#ifndef _ARCPGU_H_
#define _ARCPGU_H_
struct arcpgu_drm_private {
void __iomem *regs;
struct clk *clk;
struct drm_framebuffer *fb;
struct drm_crtc crtc;
struct drm_plane *plane;
};
#define crtc_to_arcpgu_priv(x) container_of(x, struct arcpgu_drm_private, crtc)
static inline void arc_pgu_write(struct arcpgu_drm_private *arcpgu,
unsigned int reg, u32 value)
{
iowrite32(value, arcpgu->regs + reg);
}
static inline u32 arc_pgu_read(struct arcpgu_drm_private *arcpgu,
unsigned int reg)
{
return ioread32(arcpgu->regs + reg);
}
int arc_pgu_setup_crtc(struct drm_device *dev);
int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np);
int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np);
#endif

View File

@ -1,217 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* ARC PGU DRM driver.
*
* Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
*/
#include <drm/drm_atomic_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <linux/clk.h>
#include <linux/platform_data/simplefb.h>
#include "arcpgu.h"
#include "arcpgu_regs.h"
#define ENCODE_PGU_XY(x, y) ((((x) - 1) << 16) | ((y) - 1))
static const u32 arc_pgu_supported_formats[] = {
DRM_FORMAT_RGB565,
DRM_FORMAT_XRGB8888,
DRM_FORMAT_ARGB8888,
};
static void arc_pgu_set_pxl_fmt(struct drm_crtc *crtc)
{
struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
const struct drm_framebuffer *fb = crtc->primary->state->fb;
uint32_t pixel_format = fb->format->format;
u32 format = DRM_FORMAT_INVALID;
int i;
u32 reg_ctrl;
for (i = 0; i < ARRAY_SIZE(arc_pgu_supported_formats); i++) {
if (arc_pgu_supported_formats[i] == pixel_format)
format = arc_pgu_supported_formats[i];
}
if (WARN_ON(format == DRM_FORMAT_INVALID))
return;
reg_ctrl = arc_pgu_read(arcpgu, ARCPGU_REG_CTRL);
if (format == DRM_FORMAT_RGB565)
reg_ctrl &= ~ARCPGU_MODE_XRGB8888;
else
reg_ctrl |= ARCPGU_MODE_XRGB8888;
arc_pgu_write(arcpgu, ARCPGU_REG_CTRL, reg_ctrl);
}
static const struct drm_crtc_funcs arc_pgu_crtc_funcs = {
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
};
static enum drm_mode_status arc_pgu_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
long rate, clk_rate = mode->clock * 1000;
long diff = clk_rate / 200; /* +-0.5% allowed by HDMI spec */
rate = clk_round_rate(arcpgu->clk, clk_rate);
if ((max(rate, clk_rate) - min(rate, clk_rate) < diff) && (rate > 0))
return MODE_OK;
return MODE_NOCLOCK;
}
static void arc_pgu_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
struct drm_display_mode *m = &crtc->state->adjusted_mode;
u32 val;
arc_pgu_write(arcpgu, ARCPGU_REG_FMT,
ENCODE_PGU_XY(m->crtc_htotal, m->crtc_vtotal));
arc_pgu_write(arcpgu, ARCPGU_REG_HSYNC,
ENCODE_PGU_XY(m->crtc_hsync_start - m->crtc_hdisplay,
m->crtc_hsync_end - m->crtc_hdisplay));
arc_pgu_write(arcpgu, ARCPGU_REG_VSYNC,
ENCODE_PGU_XY(m->crtc_vsync_start - m->crtc_vdisplay,
m->crtc_vsync_end - m->crtc_vdisplay));
arc_pgu_write(arcpgu, ARCPGU_REG_ACTIVE,
ENCODE_PGU_XY(m->crtc_hblank_end - m->crtc_hblank_start,
m->crtc_vblank_end - m->crtc_vblank_start));
val = arc_pgu_read(arcpgu, ARCPGU_REG_CTRL);
if (m->flags & DRM_MODE_FLAG_PVSYNC)
val |= ARCPGU_CTRL_VS_POL_MASK << ARCPGU_CTRL_VS_POL_OFST;
else
val &= ~(ARCPGU_CTRL_VS_POL_MASK << ARCPGU_CTRL_VS_POL_OFST);
if (m->flags & DRM_MODE_FLAG_PHSYNC)
val |= ARCPGU_CTRL_HS_POL_MASK << ARCPGU_CTRL_HS_POL_OFST;
else
val &= ~(ARCPGU_CTRL_HS_POL_MASK << ARCPGU_CTRL_HS_POL_OFST);
arc_pgu_write(arcpgu, ARCPGU_REG_CTRL, val);
arc_pgu_write(arcpgu, ARCPGU_REG_STRIDE, 0);
arc_pgu_write(arcpgu, ARCPGU_REG_START_SET, 1);
arc_pgu_set_pxl_fmt(crtc);
clk_set_rate(arcpgu->clk, m->crtc_clock * 1000);
}
static void arc_pgu_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
clk_prepare_enable(arcpgu->clk);
arc_pgu_write(arcpgu, ARCPGU_REG_CTRL,
arc_pgu_read(arcpgu, ARCPGU_REG_CTRL) |
ARCPGU_CTRL_ENABLE_MASK);
}
static void arc_pgu_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_atomic_state *state)
{
struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc);
clk_disable_unprepare(arcpgu->clk);
arc_pgu_write(arcpgu, ARCPGU_REG_CTRL,
arc_pgu_read(arcpgu, ARCPGU_REG_CTRL) &
~ARCPGU_CTRL_ENABLE_MASK);
}
static const struct drm_crtc_helper_funcs arc_pgu_crtc_helper_funcs = {
.mode_valid = arc_pgu_crtc_mode_valid,
.mode_set_nofb = arc_pgu_crtc_mode_set_nofb,
.atomic_enable = arc_pgu_crtc_atomic_enable,
.atomic_disable = arc_pgu_crtc_atomic_disable,
};
static void arc_pgu_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct arcpgu_drm_private *arcpgu;
struct drm_gem_cma_object *gem;
if (!plane->state->crtc || !plane->state->fb)
return;
arcpgu = crtc_to_arcpgu_priv(plane->state->crtc);
gem = drm_fb_cma_get_gem_obj(plane->state->fb, 0);
arc_pgu_write(arcpgu, ARCPGU_REG_BUF0_ADDR, gem->paddr);
}
static const struct drm_plane_helper_funcs arc_pgu_plane_helper_funcs = {
.atomic_update = arc_pgu_plane_atomic_update,
};
static const struct drm_plane_funcs arc_pgu_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
static struct drm_plane *arc_pgu_plane_init(struct drm_device *drm)
{
struct arcpgu_drm_private *arcpgu = drm->dev_private;
struct drm_plane *plane = NULL;
int ret;
plane = devm_kzalloc(drm->dev, sizeof(*plane), GFP_KERNEL);
if (!plane)
return ERR_PTR(-ENOMEM);
ret = drm_universal_plane_init(drm, plane, 0xff, &arc_pgu_plane_funcs,
arc_pgu_supported_formats,
ARRAY_SIZE(arc_pgu_supported_formats),
NULL,
DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret)
return ERR_PTR(ret);
drm_plane_helper_add(plane, &arc_pgu_plane_helper_funcs);
arcpgu->plane = plane;
return plane;
}
int arc_pgu_setup_crtc(struct drm_device *drm)
{
struct arcpgu_drm_private *arcpgu = drm->dev_private;
struct drm_plane *primary;
int ret;
primary = arc_pgu_plane_init(drm);
if (IS_ERR(primary))
return PTR_ERR(primary);
ret = drm_crtc_init_with_planes(drm, &arcpgu->crtc, primary, NULL,
&arc_pgu_crtc_funcs, NULL);
if (ret) {
drm_plane_cleanup(primary);
return ret;
}
drm_crtc_helper_add(&arcpgu->crtc, &arc_pgu_crtc_helper_funcs);
return 0;
}

View File

@ -1,224 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* ARC PGU DRM driver.
*
* Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
*/
#include <linux/clk.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include "arcpgu.h"
#include "arcpgu_regs.h"
static const struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = {
.fb_create = drm_gem_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
static void arcpgu_setup_mode_config(struct drm_device *drm)
{
drm_mode_config_init(drm);
drm->mode_config.min_width = 0;
drm->mode_config.min_height = 0;
drm->mode_config.max_width = 1920;
drm->mode_config.max_height = 1080;
drm->mode_config.funcs = &arcpgu_drm_modecfg_funcs;
}
DEFINE_DRM_GEM_CMA_FOPS(arcpgu_drm_ops);
static int arcpgu_load(struct drm_device *drm)
{
struct platform_device *pdev = to_platform_device(drm->dev);
struct arcpgu_drm_private *arcpgu;
struct device_node *encoder_node = NULL, *endpoint_node = NULL;
struct resource *res;
int ret;
arcpgu = devm_kzalloc(&pdev->dev, sizeof(*arcpgu), GFP_KERNEL);
if (arcpgu == NULL)
return -ENOMEM;
drm->dev_private = arcpgu;
arcpgu->clk = devm_clk_get(drm->dev, "pxlclk");
if (IS_ERR(arcpgu->clk))
return PTR_ERR(arcpgu->clk);
arcpgu_setup_mode_config(drm);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
arcpgu->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(arcpgu->regs))
return PTR_ERR(arcpgu->regs);
dev_info(drm->dev, "arc_pgu ID: 0x%x\n",
arc_pgu_read(arcpgu, ARCPGU_REG_ID));
/* Get the optional framebuffer memory resource */
ret = of_reserved_mem_device_init(drm->dev);
if (ret && ret != -ENODEV)
return ret;
if (dma_set_mask_and_coherent(drm->dev, DMA_BIT_MASK(32)))
return -ENODEV;
if (arc_pgu_setup_crtc(drm) < 0)
return -ENODEV;
/*
* There is only one output port inside each device. It is linked with
* encoder endpoint.
*/
endpoint_node = of_graph_get_next_endpoint(pdev->dev.of_node, NULL);
if (endpoint_node) {
encoder_node = of_graph_get_remote_port_parent(endpoint_node);
of_node_put(endpoint_node);
}
if (encoder_node) {
ret = arcpgu_drm_hdmi_init(drm, encoder_node);
of_node_put(encoder_node);
if (ret < 0)
return ret;
} else {
dev_info(drm->dev, "no encoder found. Assumed virtual LCD on simulation platform\n");
ret = arcpgu_drm_sim_init(drm, NULL);
if (ret < 0)
return ret;
}
drm_mode_config_reset(drm);
drm_kms_helper_poll_init(drm);
platform_set_drvdata(pdev, drm);
return 0;
}
static int arcpgu_unload(struct drm_device *drm)
{
drm_kms_helper_poll_fini(drm);
drm_atomic_helper_shutdown(drm);
drm_mode_config_cleanup(drm);
return 0;
}
#ifdef CONFIG_DEBUG_FS
static int arcpgu_show_pxlclock(struct seq_file *m, void *arg)
{
struct drm_info_node *node = (struct drm_info_node *)m->private;
struct drm_device *drm = node->minor->dev;
struct arcpgu_drm_private *arcpgu = drm->dev_private;
unsigned long clkrate = clk_get_rate(arcpgu->clk);
unsigned long mode_clock = arcpgu->crtc.mode.crtc_clock * 1000;
seq_printf(m, "hw : %lu\n", clkrate);
seq_printf(m, "mode: %lu\n", mode_clock);
return 0;
}
static struct drm_info_list arcpgu_debugfs_list[] = {
{ "clocks", arcpgu_show_pxlclock, 0 },
};
static void arcpgu_debugfs_init(struct drm_minor *minor)
{
drm_debugfs_create_files(arcpgu_debugfs_list,
ARRAY_SIZE(arcpgu_debugfs_list),
minor->debugfs_root, minor);
}
#endif
static const struct drm_driver arcpgu_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
.name = "arcpgu",
.desc = "ARC PGU Controller",
.date = "20160219",
.major = 1,
.minor = 0,
.patchlevel = 0,
.fops = &arcpgu_drm_ops,
DRM_GEM_CMA_DRIVER_OPS,
#ifdef CONFIG_DEBUG_FS
.debugfs_init = arcpgu_debugfs_init,
#endif
};
static int arcpgu_probe(struct platform_device *pdev)
{
struct drm_device *drm;
int ret;
drm = drm_dev_alloc(&arcpgu_drm_driver, &pdev->dev);
if (IS_ERR(drm))
return PTR_ERR(drm);
ret = arcpgu_load(drm);
if (ret)
goto err_unref;
ret = drm_dev_register(drm, 0);
if (ret)
goto err_unload;
drm_fbdev_generic_setup(drm, 16);
return 0;
err_unload:
arcpgu_unload(drm);
err_unref:
drm_dev_put(drm);
return ret;
}
static int arcpgu_remove(struct platform_device *pdev)
{
struct drm_device *drm = platform_get_drvdata(pdev);
drm_dev_unregister(drm);
arcpgu_unload(drm);
drm_dev_put(drm);
return 0;
}
static const struct of_device_id arcpgu_of_table[] = {
{.compatible = "snps,arcpgu"},
{}
};
MODULE_DEVICE_TABLE(of, arcpgu_of_table);
static struct platform_driver arcpgu_platform_driver = {
.probe = arcpgu_probe,
.remove = arcpgu_remove,
.driver = {
.name = "arcpgu",
.of_match_table = arcpgu_of_table,
},
};
module_platform_driver(arcpgu_platform_driver);
MODULE_AUTHOR("Carlos Palminha <palminha@synopsys.com>");
MODULE_DESCRIPTION("ARC PGU DRM driver");
MODULE_LICENSE("GPL");

View File

@ -1,48 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* ARC PGU DRM driver.
*
* Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
*/
#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include <drm/drm_encoder.h>
#include <drm/drm_device.h>
#include "arcpgu.h"
static struct drm_encoder_funcs arcpgu_drm_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np)
{
struct drm_encoder *encoder;
struct drm_bridge *bridge;
int ret = 0;
encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL);
if (encoder == NULL)
return -ENOMEM;
/* Locate drm bridge from the hdmi encoder DT node */
bridge = of_drm_find_bridge(np);
if (!bridge)
return -EPROBE_DEFER;
encoder->possible_crtcs = 1;
encoder->possible_clones = 0;
ret = drm_encoder_init(drm, encoder, &arcpgu_drm_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
if (ret)
return ret;
/* Link drm_bridge to encoder */
ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret)
drm_encoder_cleanup(encoder);
return ret;
}

View File

@ -1,31 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* ARC PGU DRM driver.
*
* Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
*/
#ifndef _ARC_PGU_REGS_H_
#define _ARC_PGU_REGS_H_
#define ARCPGU_REG_CTRL 0x00
#define ARCPGU_REG_STAT 0x04
#define ARCPGU_REG_FMT 0x10
#define ARCPGU_REG_HSYNC 0x14
#define ARCPGU_REG_VSYNC 0x18
#define ARCPGU_REG_ACTIVE 0x1c
#define ARCPGU_REG_BUF0_ADDR 0x40
#define ARCPGU_REG_STRIDE 0x50
#define ARCPGU_REG_START_SET 0x84
#define ARCPGU_REG_ID 0x3FC
#define ARCPGU_CTRL_ENABLE_MASK 0x02
#define ARCPGU_CTRL_VS_POL_MASK 0x1
#define ARCPGU_CTRL_VS_POL_OFST 0x3
#define ARCPGU_CTRL_HS_POL_MASK 0x1
#define ARCPGU_CTRL_HS_POL_OFST 0x4
#define ARCPGU_MODE_XRGB8888 BIT(2)
#define ARCPGU_STAT_BUSY_MASK 0x02
#endif

View File

@ -1,108 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* ARC PGU DRM driver.
*
* Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
*/
#include <drm/drm_atomic_helper.h>
#include <drm/drm_device.h>
#include <drm/drm_probe_helper.h>
#include "arcpgu.h"
#define XRES_DEF 640
#define YRES_DEF 480
#define XRES_MAX 8192
#define YRES_MAX 8192
struct arcpgu_drm_connector {
struct drm_connector connector;
};
static int arcpgu_drm_connector_get_modes(struct drm_connector *connector)
{
int count;
count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
return count;
}
static void arcpgu_drm_connector_destroy(struct drm_connector *connector)
{
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
}
static const struct drm_connector_helper_funcs
arcpgu_drm_connector_helper_funcs = {
.get_modes = arcpgu_drm_connector_get_modes,
};
static const struct drm_connector_funcs arcpgu_drm_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = arcpgu_drm_connector_destroy,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
static struct drm_encoder_funcs arcpgu_drm_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np)
{
struct arcpgu_drm_connector *arcpgu_connector;
struct drm_encoder *encoder;
struct drm_connector *connector;
int ret;
encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL);
if (encoder == NULL)
return -ENOMEM;
encoder->possible_crtcs = 1;
encoder->possible_clones = 0;
ret = drm_encoder_init(drm, encoder, &arcpgu_drm_encoder_funcs,
DRM_MODE_ENCODER_VIRTUAL, NULL);
if (ret)
return ret;
arcpgu_connector = devm_kzalloc(drm->dev, sizeof(*arcpgu_connector),
GFP_KERNEL);
if (!arcpgu_connector) {
ret = -ENOMEM;
goto error_encoder_cleanup;
}
connector = &arcpgu_connector->connector;
drm_connector_helper_add(connector, &arcpgu_drm_connector_helper_funcs);
ret = drm_connector_init(drm, connector, &arcpgu_drm_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
if (ret < 0) {
dev_err(drm->dev, "failed to initialize drm connector\n");
goto error_encoder_cleanup;
}
ret = drm_connector_attach_encoder(connector, encoder);
if (ret < 0) {
dev_err(drm->dev, "could not attach connector to encoder\n");
drm_connector_unregister(connector);
goto error_connector_cleanup;
}
return 0;
error_connector_cleanup:
drm_connector_cleanup(connector);
error_encoder_cleanup:
drm_encoder_cleanup(encoder);
return ret;
}

View File

@ -82,17 +82,6 @@ struct komeda_format_caps_table {
extern u64 komeda_supported_modifiers[];
static inline const char *komeda_get_format_name(u32 fourcc, u64 modifier)
{
struct drm_format_name_buf buf;
static char name[64];
snprintf(name, sizeof(name), "%s with modifier: 0x%llx.",
drm_get_format_name(fourcc, &buf), modifier);
return name;
}
const struct komeda_format_caps *
komeda_get_format_caps(struct komeda_format_caps_table *table,
u32 fourcc, u64 modifier);

View File

@ -276,8 +276,8 @@ bool komeda_fb_is_layer_supported(struct komeda_fb *kfb, u32 layer_type,
supported = komeda_format_mod_supported(&mdev->fmt_tbl, layer_type,
fourcc, modifier, rot);
if (!supported)
DRM_DEBUG_ATOMIC("Layer TYPE: %d doesn't support fb FMT: %s.\n",
layer_type, komeda_get_format_name(fourcc, modifier));
DRM_DEBUG_ATOMIC("Layer TYPE: %d doesn't support fb FMT: %p4cc with modifier: 0x%llx.\n",
layer_type, &fourcc, modifier);
return supported;
}

View File

@ -73,6 +73,7 @@ static const struct drm_driver komeda_kms_driver = {
static void komeda_kms_commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *dev = old_state->dev;
bool fence_cookie = dma_fence_begin_signalling();
drm_atomic_helper_commit_modeset_disables(dev, old_state);
@ -85,6 +86,8 @@ static void komeda_kms_commit_tail(struct drm_atomic_state *old_state)
drm_atomic_helper_wait_for_flip_done(dev, old_state);
dma_fence_end_signalling(fence_cookie);
drm_atomic_helper_cleanup_planes(dev, old_state);
}

View File

@ -49,10 +49,8 @@ komeda_plane_init_data_flow(struct drm_plane_state *st,
dflow->rot = drm_rotation_simplify(st->rotation, caps->supported_rots);
if (!has_bits(dflow->rot, caps->supported_rots)) {
DRM_DEBUG_ATOMIC("rotation(0x%x) isn't supported by %s.\n",
dflow->rot,
komeda_get_format_name(caps->fourcc,
fb->modifier));
DRM_DEBUG_ATOMIC("rotation(0x%x) isn't supported by %p4cc with modifier: 0x%llx.\n",
dflow->rot, &caps->fourcc, fb->modifier);
return -EINVAL;
}
@ -71,20 +69,23 @@ komeda_plane_init_data_flow(struct drm_plane_state *st,
*/
static int
komeda_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct komeda_plane *kplane = to_kplane(plane);
struct komeda_plane_state *kplane_st = to_kplane_st(state);
struct komeda_plane_state *kplane_st = to_kplane_st(new_plane_state);
struct komeda_layer *layer = kplane->layer;
struct drm_crtc_state *crtc_st;
struct komeda_crtc_state *kcrtc_st;
struct komeda_data_flow_cfg dflow;
int err;
if (!state->crtc || !state->fb)
if (!new_plane_state->crtc || !new_plane_state->fb)
return 0;
crtc_st = drm_atomic_get_crtc_state(state->state, state->crtc);
crtc_st = drm_atomic_get_crtc_state(state,
new_plane_state->crtc);
if (IS_ERR(crtc_st) || !crtc_st->enable) {
DRM_DEBUG_ATOMIC("Cannot update plane on a disabled CRTC.\n");
return -EINVAL;
@ -96,7 +97,7 @@ komeda_plane_atomic_check(struct drm_plane *plane,
kcrtc_st = to_kcrtc_st(crtc_st);
err = komeda_plane_init_data_flow(state, kcrtc_st, &dflow);
err = komeda_plane_init_data_flow(new_plane_state, kcrtc_st, &dflow);
if (err)
return err;
@ -115,7 +116,7 @@ komeda_plane_atomic_check(struct drm_plane *plane,
*/
static void
komeda_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
struct drm_atomic_state *state)
{
}

View File

@ -229,12 +229,14 @@ static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
};
static int hdlcd_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
int i;
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
u32 src_h = state->src_h >> 16;
u32 src_h = new_plane_state->src_h >> 16;
/* only the HDLCD_REG_FB_LINE_COUNT register has a limit */
if (src_h >= HDLCD_MAX_YRES) {
@ -242,23 +244,27 @@ static int hdlcd_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
for_each_new_crtc_in_state(state->state, crtc, crtc_state, i) {
for_each_new_crtc_in_state(state, crtc, crtc_state,
i) {
/* we cannot disable the plane while the CRTC is active */
if (!state->fb && crtc_state->active)
if (!new_plane_state->fb && crtc_state->active)
return -EINVAL;
return drm_atomic_helper_check_plane_state(state, crtc_state,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
false, true);
return drm_atomic_helper_check_plane_state(new_plane_state,
crtc_state,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
false, true);
}
return 0;
}
static void hdlcd_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *state)
struct drm_atomic_state *state)
{
struct drm_framebuffer *fb = plane->state->fb;
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_framebuffer *fb = new_plane_state->fb;
struct hdlcd_drm_private *hdlcd;
u32 dest_h;
dma_addr_t scanout_start;
@ -266,8 +272,8 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
if (!fb)
return;
dest_h = drm_rect_height(&plane->state->dst);
scanout_start = drm_fb_cma_get_gem_addr(fb, plane->state, 0);
dest_h = drm_rect_height(&new_plane_state->dst);
scanout_start = drm_fb_cma_get_gem_addr(fb, new_plane_state, 0);
hdlcd = plane->dev->dev_private;
hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]);

View File

@ -234,6 +234,7 @@ static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
int i;
bool fence_cookie = dma_fence_begin_signalling();
pm_runtime_get_sync(drm->dev);
@ -260,6 +261,8 @@ static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
malidp_atomic_commit_hw_done(state);
dma_fence_end_signalling(fence_cookie);
pm_runtime_put(drm->dev);
drm_atomic_helper_cleanup_planes(drm, state);

View File

@ -151,11 +151,8 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
malidp_hw_get_format_id(&malidp->dev->hw->map, SE_MEMWRITE,
fb->format->format, !!fb->modifier);
if (mw_state->format == MALIDP_INVALID_FORMAT_ID) {
struct drm_format_name_buf format_name;
DRM_DEBUG_KMS("Invalid pixel format %s\n",
drm_get_format_name(fb->format->format,
&format_name));
DRM_DEBUG_KMS("Invalid pixel format %p4cc\n",
&fb->format->format);
return -EINVAL;
}

View File

@ -502,20 +502,22 @@ static void malidp_de_prefetch_settings(struct malidp_plane *mp,
}
static int malidp_de_plane_check(struct drm_plane *plane,
struct drm_plane_state *state)
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct malidp_plane *mp = to_malidp_plane(plane);
struct malidp_plane_state *ms = to_malidp_plane_state(state);
bool rotated = state->rotation & MALIDP_ROTATED_MASK;
struct malidp_plane_state *ms = to_malidp_plane_state(new_plane_state);
bool rotated = new_plane_state->rotation & MALIDP_ROTATED_MASK;
struct drm_framebuffer *fb;
u16 pixel_alpha = state->pixel_blend_mode;
u16 pixel_alpha = new_plane_state->pixel_blend_mode;
int i, ret;
unsigned int block_w, block_h;
if (!state->crtc || WARN_ON(!state->fb))
if (!new_plane_state->crtc || WARN_ON(!new_plane_state->fb))
return 0;
fb = state->fb;
fb = new_plane_state->fb;
ms->format = malidp_hw_get_format_id(&mp->hwdev->hw->map,
mp->layer->id, fb->format->format,
@ -541,15 +543,15 @@ static int malidp_de_plane_check(struct drm_plane *plane,
DRM_DEBUG_KMS("Buffer width/height needs to be a multiple of tile sizes");
return -EINVAL;
}
if ((state->src_x >> 16) % block_w || (state->src_y >> 16) % block_h) {
if ((new_plane_state->src_x >> 16) % block_w || (new_plane_state->src_y >> 16) % block_h) {
DRM_DEBUG_KMS("Plane src_x/src_y needs to be a multiple of tile sizes");
return -EINVAL;
}
if ((state->crtc_w > mp->hwdev->max_line_size) ||
(state->crtc_h > mp->hwdev->max_line_size) ||
(state->crtc_w < mp->hwdev->min_line_size) ||
(state->crtc_h < mp->hwdev->min_line_size))
if ((new_plane_state->crtc_w > mp->hwdev->max_line_size) ||
(new_plane_state->crtc_h > mp->hwdev->max_line_size) ||
(new_plane_state->crtc_w < mp->hwdev->min_line_size) ||
(new_plane_state->crtc_h < mp->hwdev->min_line_size))
return -EINVAL;
/*
@ -559,15 +561,15 @@ static int malidp_de_plane_check(struct drm_plane *plane,
*/
if (ms->n_planes == 3 &&
!(mp->hwdev->hw->features & MALIDP_DEVICE_LV_HAS_3_STRIDES) &&
(state->fb->pitches[1] != state->fb->pitches[2]))
(new_plane_state->fb->pitches[1] != new_plane_state->fb->pitches[2]))
return -EINVAL;
ret = malidp_se_check_scaling(mp, state);
ret = malidp_se_check_scaling(mp, new_plane_state);
if (ret)
return ret;
/* validate the rotation constraints for each layer */
if (state->rotation != DRM_MODE_ROTATE_0) {
if (new_plane_state->rotation != DRM_MODE_ROTATE_0) {
if (mp->layer->rot == ROTATE_NONE)
return -EINVAL;
if ((mp->layer->rot == ROTATE_COMPRESSED) && !(fb->modifier))
@ -588,11 +590,11 @@ static int malidp_de_plane_check(struct drm_plane *plane,
}
ms->rotmem_size = 0;
if (state->rotation & MALIDP_ROTATED_MASK) {
if (new_plane_state->rotation & MALIDP_ROTATED_MASK) {
int val;
val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_w,
state->crtc_h,
val = mp->hwdev->hw->rotmem_required(mp->hwdev, new_plane_state->crtc_w,
new_plane_state->crtc_h,
fb->format->format,
!!(fb->modifier));
if (val < 0)
@ -602,7 +604,7 @@ static int malidp_de_plane_check(struct drm_plane *plane,
}
/* HW can't support plane + pixel blending */
if ((state->alpha != DRM_BLEND_ALPHA_OPAQUE) &&
if ((new_plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE) &&
(pixel_alpha != DRM_MODE_BLEND_PIXEL_NONE) &&
fb->format->has_alpha)
return -EINVAL;
@ -789,13 +791,16 @@ static void malidp_de_set_plane_afbc(struct drm_plane *plane)
}
static void malidp_de_plane_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct malidp_plane *mp;
struct malidp_plane_state *ms = to_malidp_plane_state(plane->state);
struct drm_plane_state *state = plane->state;
u16 pixel_alpha = state->pixel_blend_mode;
u8 plane_alpha = state->alpha >> 8;
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
u16 pixel_alpha = new_state->pixel_blend_mode;
u8 plane_alpha = new_state->alpha >> 8;
u32 src_w, src_h, dest_w, dest_h, val;
int i;
struct drm_framebuffer *fb = plane->state->fb;
@ -811,12 +816,12 @@ static void malidp_de_plane_update(struct drm_plane *plane,
src_h = fb->height;
} else {
/* convert src values from Q16 fixed point to integer */
src_w = state->src_w >> 16;
src_h = state->src_h >> 16;
src_w = new_state->src_w >> 16;
src_h = new_state->src_h >> 16;
}
dest_w = state->crtc_w;
dest_h = state->crtc_h;
dest_w = new_state->crtc_w;
dest_h = new_state->crtc_h;
val = malidp_hw_read(mp->hwdev, mp->layer->base);
val = (val & ~LAYER_FORMAT_MASK) | ms->format;
@ -828,7 +833,7 @@ static void malidp_de_plane_update(struct drm_plane *plane,
malidp_de_set_mmu_control(mp, ms);
malidp_de_set_plane_pitches(mp, ms->n_planes,
state->fb->pitches);
new_state->fb->pitches);
if ((plane->state->color_encoding != old_state->color_encoding) ||
(plane->state->color_range != old_state->color_range))
@ -841,8 +846,8 @@ static void malidp_de_plane_update(struct drm_plane *plane,
malidp_hw_write(mp->hwdev, LAYER_H_VAL(dest_w) | LAYER_V_VAL(dest_h),
mp->layer->base + MALIDP_LAYER_COMP_SIZE);
malidp_hw_write(mp->hwdev, LAYER_H_VAL(state->crtc_x) |
LAYER_V_VAL(state->crtc_y),
malidp_hw_write(mp->hwdev, LAYER_H_VAL(new_state->crtc_x) |
LAYER_V_VAL(new_state->crtc_y),
mp->layer->base + MALIDP_LAYER_OFFSET);
if (mp->layer->id == DE_SMART) {
@ -864,19 +869,19 @@ static void malidp_de_plane_update(struct drm_plane *plane,
val &= ~LAYER_ROT_MASK;
/* setup the rotation and axis flip bits */
if (state->rotation & DRM_MODE_ROTATE_MASK)
if (new_state->rotation & DRM_MODE_ROTATE_MASK)
val |= ilog2(plane->state->rotation & DRM_MODE_ROTATE_MASK) <<
LAYER_ROT_OFFSET;
if (state->rotation & DRM_MODE_REFLECT_X)
if (new_state->rotation & DRM_MODE_REFLECT_X)
val |= LAYER_H_FLIP;
if (state->rotation & DRM_MODE_REFLECT_Y)
if (new_state->rotation & DRM_MODE_REFLECT_Y)
val |= LAYER_V_FLIP;
val &= ~(LAYER_COMP_MASK | LAYER_PMUL_ENABLE | LAYER_ALPHA(0xff));
if (state->alpha != DRM_BLEND_ALPHA_OPAQUE) {
if (new_state->alpha != DRM_BLEND_ALPHA_OPAQUE) {
val |= LAYER_COMP_PLANE;
} else if (state->fb->format->has_alpha) {
} else if (new_state->fb->format->has_alpha) {
/* We only care about blend mode if the format has alpha */
switch (pixel_alpha) {
case DRM_MODE_BLEND_PREMULTI:
@ -890,9 +895,9 @@ static void malidp_de_plane_update(struct drm_plane *plane,
val |= LAYER_ALPHA(plane_alpha);
val &= ~LAYER_FLOWCFG(LAYER_FLOWCFG_MASK);
if (state->crtc) {
if (new_state->crtc) {
struct malidp_crtc_state *m =
to_malidp_crtc_state(state->crtc->state);
to_malidp_crtc_state(new_state->crtc->state);
if (m->scaler_config.scale_enable &&
m->scaler_config.plane_src_id == mp->layer->id)
@ -907,7 +912,7 @@ static void malidp_de_plane_update(struct drm_plane *plane,
}
static void malidp_de_plane_disable(struct drm_plane *plane,
struct drm_plane_state *state)
struct drm_atomic_state *state)
{
struct malidp_plane *mp = to_malidp_plane(plane);

View File

@ -66,9 +66,12 @@ static inline u32 armada_csc(struct drm_plane_state *state)
/* === Plane support === */
static void armada_drm_overlay_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
struct drm_atomic_state *state)
{
struct drm_plane_state *state = plane->state;
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct armada_crtc *dcrtc;
struct armada_regs *regs;
unsigned int idx;
@ -76,62 +79,64 @@ static void armada_drm_overlay_plane_atomic_update(struct drm_plane *plane,
DRM_DEBUG_KMS("[PLANE:%d:%s]\n", plane->base.id, plane->name);
if (!state->fb || WARN_ON(!state->crtc))
if (!new_state->fb || WARN_ON(!new_state->crtc))
return;
DRM_DEBUG_KMS("[PLANE:%d:%s] is on [CRTC:%d:%s] with [FB:%d] visible %u->%u\n",
plane->base.id, plane->name,
state->crtc->base.id, state->crtc->name,
state->fb->base.id,
old_state->visible, state->visible);
new_state->crtc->base.id, new_state->crtc->name,
new_state->fb->base.id,
old_state->visible, new_state->visible);
dcrtc = drm_to_armada_crtc(state->crtc);
dcrtc = drm_to_armada_crtc(new_state->crtc);
regs = dcrtc->regs + dcrtc->regs_idx;
idx = 0;
if (!old_state->visible && state->visible)
if (!old_state->visible && new_state->visible)
armada_reg_queue_mod(regs, idx,
0, CFG_PDWN16x66 | CFG_PDWN32x66,
LCD_SPU_SRAM_PARA1);
val = armada_src_hw(state);
val = armada_src_hw(new_state);
if (armada_src_hw(old_state) != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_HPXL_VLN);
val = armada_dst_yx(state);
val = armada_dst_yx(new_state);
if (armada_dst_yx(old_state) != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_OVSA_HPXL_VLN);
val = armada_dst_hw(state);
val = armada_dst_hw(new_state);
if (armada_dst_hw(old_state) != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_DZM_HPXL_VLN);
/* FIXME: overlay on an interlaced display */
if (old_state->src.x1 != state->src.x1 ||
old_state->src.y1 != state->src.y1 ||
old_state->fb != state->fb ||
state->crtc->state->mode_changed) {
if (old_state->src.x1 != new_state->src.x1 ||
old_state->src.y1 != new_state->src.y1 ||
old_state->fb != new_state->fb ||
new_state->crtc->state->mode_changed) {
const struct drm_format_info *format;
u16 src_x;
armada_reg_queue_set(regs, idx, armada_addr(state, 0, 0),
armada_reg_queue_set(regs, idx, armada_addr(new_state, 0, 0),
LCD_SPU_DMA_START_ADDR_Y0);
armada_reg_queue_set(regs, idx, armada_addr(state, 0, 1),
armada_reg_queue_set(regs, idx, armada_addr(new_state, 0, 1),
LCD_SPU_DMA_START_ADDR_U0);
armada_reg_queue_set(regs, idx, armada_addr(state, 0, 2),
armada_reg_queue_set(regs, idx, armada_addr(new_state, 0, 2),
LCD_SPU_DMA_START_ADDR_V0);
armada_reg_queue_set(regs, idx, armada_addr(state, 1, 0),
armada_reg_queue_set(regs, idx, armada_addr(new_state, 1, 0),
LCD_SPU_DMA_START_ADDR_Y1);
armada_reg_queue_set(regs, idx, armada_addr(state, 1, 1),
armada_reg_queue_set(regs, idx, armada_addr(new_state, 1, 1),
LCD_SPU_DMA_START_ADDR_U1);
armada_reg_queue_set(regs, idx, armada_addr(state, 1, 2),
armada_reg_queue_set(regs, idx, armada_addr(new_state, 1, 2),
LCD_SPU_DMA_START_ADDR_V1);
val = armada_pitch(state, 0) << 16 | armada_pitch(state, 0);
val = armada_pitch(new_state, 0) << 16 | armada_pitch(new_state,
0);
armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_PITCH_YC);
val = armada_pitch(state, 1) << 16 | armada_pitch(state, 2);
val = armada_pitch(new_state, 1) << 16 | armada_pitch(new_state,
2);
armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_PITCH_UV);
cfg = CFG_DMA_FMT(drm_fb_to_armada_fb(state->fb)->fmt) |
CFG_DMA_MOD(drm_fb_to_armada_fb(state->fb)->mod) |
cfg = CFG_DMA_FMT(drm_fb_to_armada_fb(new_state->fb)->fmt) |
CFG_DMA_MOD(drm_fb_to_armada_fb(new_state->fb)->mod) |
CFG_CBSH_ENA;
if (state->visible)
if (new_state->visible)
cfg |= CFG_DMA_ENA;
/*
@ -139,28 +144,28 @@ static void armada_drm_overlay_plane_atomic_update(struct drm_plane *plane,
* U/V planes to swap. Compensate for it by also toggling
* the UV swap.
*/
format = state->fb->format;
src_x = state->src.x1 >> 16;
format = new_state->fb->format;
src_x = new_state->src.x1 >> 16;
if (format->num_planes == 1 && src_x & (format->hsub - 1))
cfg ^= CFG_DMA_MOD(CFG_SWAPUV);
if (to_armada_plane_state(state)->interlace)
if (to_armada_plane_state(new_state)->interlace)
cfg |= CFG_DMA_FTOGGLE;
cfg_mask = CFG_CBSH_ENA | CFG_DMAFORMAT |
CFG_DMA_MOD(CFG_SWAPRB | CFG_SWAPUV |
CFG_SWAPYU | CFG_YUV2RGB) |
CFG_DMA_FTOGGLE | CFG_DMA_TSTMODE |
CFG_DMA_ENA;
} else if (old_state->visible != state->visible) {
cfg = state->visible ? CFG_DMA_ENA : 0;
} else if (old_state->visible != new_state->visible) {
cfg = new_state->visible ? CFG_DMA_ENA : 0;
cfg_mask = CFG_DMA_ENA;
} else {
cfg = cfg_mask = 0;
}
if (drm_rect_width(&old_state->src) != drm_rect_width(&state->src) ||
drm_rect_width(&old_state->dst) != drm_rect_width(&state->dst)) {
if (drm_rect_width(&old_state->src) != drm_rect_width(&new_state->src) ||
drm_rect_width(&old_state->dst) != drm_rect_width(&new_state->dst)) {
cfg_mask |= CFG_DMA_HSMOOTH;
if (drm_rect_width(&state->src) >> 16 !=
drm_rect_width(&state->dst))
if (drm_rect_width(&new_state->src) >> 16 !=
drm_rect_width(&new_state->dst))
cfg |= CFG_DMA_HSMOOTH;
}
@ -168,41 +173,41 @@ static void armada_drm_overlay_plane_atomic_update(struct drm_plane *plane,
armada_reg_queue_mod(regs, idx, cfg, cfg_mask,
LCD_SPU_DMA_CTRL0);
val = armada_spu_contrast(state);
if ((!old_state->visible && state->visible) ||
val = armada_spu_contrast(new_state);
if ((!old_state->visible && new_state->visible) ||
armada_spu_contrast(old_state) != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_CONTRAST);
val = armada_spu_saturation(state);
if ((!old_state->visible && state->visible) ||
val = armada_spu_saturation(new_state);
if ((!old_state->visible && new_state->visible) ||
armada_spu_saturation(old_state) != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_SATURATION);
if (!old_state->visible && state->visible)
if (!old_state->visible && new_state->visible)
armada_reg_queue_set(regs, idx, 0x00002000, LCD_SPU_CBSH_HUE);
val = armada_csc(state);
if ((!old_state->visible && state->visible) ||
val = armada_csc(new_state);
if ((!old_state->visible && new_state->visible) ||
armada_csc(old_state) != val)
armada_reg_queue_mod(regs, idx, val, CFG_CSC_MASK,
LCD_SPU_IOPAD_CONTROL);
val = drm_to_overlay_state(state)->colorkey_yr;
if ((!old_state->visible && state->visible) ||
val = drm_to_overlay_state(new_state)->colorkey_yr;
if ((!old_state->visible && new_state->visible) ||
drm_to_overlay_state(old_state)->colorkey_yr != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_COLORKEY_Y);
val = drm_to_overlay_state(state)->colorkey_ug;
if ((!old_state->visible && state->visible) ||
val = drm_to_overlay_state(new_state)->colorkey_ug;
if ((!old_state->visible && new_state->visible) ||
drm_to_overlay_state(old_state)->colorkey_ug != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_COLORKEY_U);
val = drm_to_overlay_state(state)->colorkey_vb;
if ((!old_state->visible && state->visible) ||
val = drm_to_overlay_state(new_state)->colorkey_vb;
if ((!old_state->visible && new_state->visible) ||
drm_to_overlay_state(old_state)->colorkey_vb != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_COLORKEY_V);
val = drm_to_overlay_state(state)->colorkey_mode;
if ((!old_state->visible && state->visible) ||
val = drm_to_overlay_state(new_state)->colorkey_mode;
if ((!old_state->visible && new_state->visible) ||
drm_to_overlay_state(old_state)->colorkey_mode != val)
armada_reg_queue_mod(regs, idx, val, CFG_CKMODE_MASK |
CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
LCD_SPU_DMA_CTRL1);
val = drm_to_overlay_state(state)->colorkey_enable;
if (((!old_state->visible && state->visible) ||
val = drm_to_overlay_state(new_state)->colorkey_enable;
if (((!old_state->visible && new_state->visible) ||
drm_to_overlay_state(old_state)->colorkey_enable != val) &&
dcrtc->variant->has_spu_adv_reg)
armada_reg_queue_mod(regs, idx, val, ADV_GRACOLORKEY |
@ -212,8 +217,10 @@ static void armada_drm_overlay_plane_atomic_update(struct drm_plane *plane,
}
static void armada_drm_overlay_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct armada_crtc *dcrtc;
struct armada_regs *regs;
unsigned int idx = 0;

View File

@ -106,59 +106,67 @@ void armada_drm_plane_cleanup_fb(struct drm_plane *plane,
}
int armada_drm_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
struct drm_atomic_state *state)
{
struct armada_plane_state *st = to_armada_plane_state(state);
struct drm_crtc *crtc = state->crtc;
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct armada_plane_state *st = to_armada_plane_state(new_plane_state);
struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_crtc_state *crtc_state;
bool interlace;
int ret;
if (!state->fb || WARN_ON(!state->crtc)) {
state->visible = false;
if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc)) {
new_plane_state->visible = false;
return 0;
}
if (state->state)
crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
if (state)
crtc_state = drm_atomic_get_existing_crtc_state(state,
crtc);
else
crtc_state = crtc->state;
ret = drm_atomic_helper_check_plane_state(state, crtc_state, 0,
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
0,
INT_MAX, true, false);
if (ret)
return ret;
interlace = crtc_state->adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE;
if (interlace) {
if ((state->dst.y1 | state->dst.y2) & 1)
if ((new_plane_state->dst.y1 | new_plane_state->dst.y2) & 1)
return -EINVAL;
st->src_hw = drm_rect_height(&state->src) >> 17;
st->dst_yx = state->dst.y1 >> 1;
st->dst_hw = drm_rect_height(&state->dst) >> 1;
st->src_hw = drm_rect_height(&new_plane_state->src) >> 17;
st->dst_yx = new_plane_state->dst.y1 >> 1;
st->dst_hw = drm_rect_height(&new_plane_state->dst) >> 1;
} else {
st->src_hw = drm_rect_height(&state->src) >> 16;
st->dst_yx = state->dst.y1;
st->dst_hw = drm_rect_height(&state->dst);
st->src_hw = drm_rect_height(&new_plane_state->src) >> 16;
st->dst_yx = new_plane_state->dst.y1;
st->dst_hw = drm_rect_height(&new_plane_state->dst);
}
st->src_hw <<= 16;
st->src_hw |= drm_rect_width(&state->src) >> 16;
st->src_hw |= drm_rect_width(&new_plane_state->src) >> 16;
st->dst_yx <<= 16;
st->dst_yx |= state->dst.x1 & 0x0000ffff;
st->dst_yx |= new_plane_state->dst.x1 & 0x0000ffff;
st->dst_hw <<= 16;
st->dst_hw |= drm_rect_width(&state->dst) & 0x0000ffff;
st->dst_hw |= drm_rect_width(&new_plane_state->dst) & 0x0000ffff;
armada_drm_plane_calc(state, st->addrs, st->pitches, interlace);
armada_drm_plane_calc(new_plane_state, st->addrs, st->pitches,
interlace);
st->interlace = interlace;
return 0;
}
static void armada_drm_primary_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
struct drm_atomic_state *state)
{
struct drm_plane_state *state = plane->state;
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct armada_crtc *dcrtc;
struct armada_regs *regs;
u32 cfg, cfg_mask, val;
@ -166,71 +174,72 @@ static void armada_drm_primary_plane_atomic_update(struct drm_plane *plane,
DRM_DEBUG_KMS("[PLANE:%d:%s]\n", plane->base.id, plane->name);
if (!state->fb || WARN_ON(!state->crtc))
if (!new_state->fb || WARN_ON(!new_state->crtc))
return;
DRM_DEBUG_KMS("[PLANE:%d:%s] is on [CRTC:%d:%s] with [FB:%d] visible %u->%u\n",
plane->base.id, plane->name,
state->crtc->base.id, state->crtc->name,
state->fb->base.id,
old_state->visible, state->visible);
new_state->crtc->base.id, new_state->crtc->name,
new_state->fb->base.id,
old_state->visible, new_state->visible);
dcrtc = drm_to_armada_crtc(state->crtc);
dcrtc = drm_to_armada_crtc(new_state->crtc);
regs = dcrtc->regs + dcrtc->regs_idx;
idx = 0;
if (!old_state->visible && state->visible) {
if (!old_state->visible && new_state->visible) {
val = CFG_PDWN64x66;
if (drm_fb_to_armada_fb(state->fb)->fmt > CFG_420)
if (drm_fb_to_armada_fb(new_state->fb)->fmt > CFG_420)
val |= CFG_PDWN256x24;
armada_reg_queue_mod(regs, idx, 0, val, LCD_SPU_SRAM_PARA1);
}
val = armada_src_hw(state);
val = armada_src_hw(new_state);
if (armada_src_hw(old_state) != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_GRA_HPXL_VLN);
val = armada_dst_yx(state);
val = armada_dst_yx(new_state);
if (armada_dst_yx(old_state) != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_GRA_OVSA_HPXL_VLN);
val = armada_dst_hw(state);
val = armada_dst_hw(new_state);
if (armada_dst_hw(old_state) != val)
armada_reg_queue_set(regs, idx, val, LCD_SPU_GZM_HPXL_VLN);
if (old_state->src.x1 != state->src.x1 ||
old_state->src.y1 != state->src.y1 ||
old_state->fb != state->fb ||
state->crtc->state->mode_changed) {
armada_reg_queue_set(regs, idx, armada_addr(state, 0, 0),
if (old_state->src.x1 != new_state->src.x1 ||
old_state->src.y1 != new_state->src.y1 ||
old_state->fb != new_state->fb ||
new_state->crtc->state->mode_changed) {
armada_reg_queue_set(regs, idx, armada_addr(new_state, 0, 0),
LCD_CFG_GRA_START_ADDR0);
armada_reg_queue_set(regs, idx, armada_addr(state, 1, 0),
armada_reg_queue_set(regs, idx, armada_addr(new_state, 1, 0),
LCD_CFG_GRA_START_ADDR1);
armada_reg_queue_mod(regs, idx, armada_pitch(state, 0), 0xffff,
armada_reg_queue_mod(regs, idx, armada_pitch(new_state, 0),
0xffff,
LCD_CFG_GRA_PITCH);
}
if (old_state->fb != state->fb ||
state->crtc->state->mode_changed) {
cfg = CFG_GRA_FMT(drm_fb_to_armada_fb(state->fb)->fmt) |
CFG_GRA_MOD(drm_fb_to_armada_fb(state->fb)->mod);
if (drm_fb_to_armada_fb(state->fb)->fmt > CFG_420)
if (old_state->fb != new_state->fb ||
new_state->crtc->state->mode_changed) {
cfg = CFG_GRA_FMT(drm_fb_to_armada_fb(new_state->fb)->fmt) |
CFG_GRA_MOD(drm_fb_to_armada_fb(new_state->fb)->mod);
if (drm_fb_to_armada_fb(new_state->fb)->fmt > CFG_420)
cfg |= CFG_PALETTE_ENA;
if (state->visible)
if (new_state->visible)
cfg |= CFG_GRA_ENA;
if (to_armada_plane_state(state)->interlace)
if (to_armada_plane_state(new_state)->interlace)
cfg |= CFG_GRA_FTOGGLE;
cfg_mask = CFG_GRAFORMAT |
CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
CFG_SWAPYU | CFG_YUV2RGB) |
CFG_PALETTE_ENA | CFG_GRA_FTOGGLE |
CFG_GRA_ENA;
} else if (old_state->visible != state->visible) {
cfg = state->visible ? CFG_GRA_ENA : 0;
} else if (old_state->visible != new_state->visible) {
cfg = new_state->visible ? CFG_GRA_ENA : 0;
cfg_mask = CFG_GRA_ENA;
} else {
cfg = cfg_mask = 0;
}
if (drm_rect_width(&old_state->src) != drm_rect_width(&state->src) ||
drm_rect_width(&old_state->dst) != drm_rect_width(&state->dst)) {
if (drm_rect_width(&old_state->src) != drm_rect_width(&new_state->src) ||
drm_rect_width(&old_state->dst) != drm_rect_width(&new_state->dst)) {
cfg_mask |= CFG_GRA_HSMOOTH;
if (drm_rect_width(&state->src) >> 16 !=
drm_rect_width(&state->dst))
if (drm_rect_width(&new_state->src) >> 16 !=
drm_rect_width(&new_state->dst))
cfg |= CFG_GRA_HSMOOTH;
}
@ -242,8 +251,10 @@ static void armada_drm_primary_plane_atomic_update(struct drm_plane *plane,
}
static void armada_drm_primary_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct armada_crtc *dcrtc;
struct armada_regs *regs;
unsigned int idx = 0;

View File

@ -26,7 +26,7 @@ int armada_drm_plane_prepare_fb(struct drm_plane *plane,
void armada_drm_plane_cleanup_fb(struct drm_plane *plane,
struct drm_plane_state *old_state);
int armada_drm_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state);
struct drm_atomic_state *state);
void armada_plane_reset(struct drm_plane *plane);
struct drm_plane_state *armada_plane_duplicate_state(struct drm_plane *plane);
void armada_plane_destroy_state(struct drm_plane *plane,

View File

@ -11,6 +11,11 @@ struct aspeed_gfx {
struct reset_control *rst;
struct regmap *scu;
u32 dac_reg;
u32 vga_scratch_reg;
u32 throd_val;
u32 scan_line_max;
struct drm_simple_display_pipe pipe;
struct drm_connector connector;
};
@ -100,6 +105,3 @@ int aspeed_gfx_create_output(struct drm_device *drm);
/* CRT_THROD */
#define CRT_THROD_LOW(x) (x)
#define CRT_THROD_HIGH(x) ((x) << 8)
/* Default Threshold Seting */
#define G5_CRT_THROD_VAL (CRT_THROD_LOW(0x24) | CRT_THROD_HIGH(0x3C))

View File

@ -9,8 +9,8 @@
#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_panel.h>
#include <drm/drm_simple_kms_helper.h>
#include <drm/drm_vblank.h>
@ -59,8 +59,8 @@ static void aspeed_gfx_enable_controller(struct aspeed_gfx *priv)
u32 ctrl1 = readl(priv->base + CRT_CTRL1);
u32 ctrl2 = readl(priv->base + CRT_CTRL2);
/* SCU2C: set DAC source for display output to Graphics CRT (GFX) */
regmap_update_bits(priv->scu, 0x2c, BIT(16), BIT(16));
/* Set DAC source for display output to Graphics CRT (GFX) */
regmap_update_bits(priv->scu, priv->dac_reg, BIT(16), BIT(16));
writel(ctrl1 | CRT_CTRL_EN, priv->base + CRT_CTRL1);
writel(ctrl2 | CRT_CTRL_DAC_EN, priv->base + CRT_CTRL2);
@ -74,7 +74,7 @@ static void aspeed_gfx_disable_controller(struct aspeed_gfx *priv)
writel(ctrl1 & ~CRT_CTRL_EN, priv->base + CRT_CTRL1);
writel(ctrl2 & ~CRT_CTRL_DAC_EN, priv->base + CRT_CTRL2);
regmap_update_bits(priv->scu, 0x2c, BIT(16), 0);
regmap_update_bits(priv->scu, priv->dac_reg, BIT(16), 0);
}
static void aspeed_gfx_crtc_mode_set_nofb(struct aspeed_gfx *priv)
@ -127,7 +127,8 @@ static void aspeed_gfx_crtc_mode_set_nofb(struct aspeed_gfx *priv)
* Terminal Count: memory size of one scan line
*/
d_offset = m->hdisplay * bpp / 8;
t_count = (m->hdisplay * bpp + 127) / 128;
t_count = DIV_ROUND_UP(m->hdisplay * bpp, priv->scan_line_max);
writel(CRT_DISP_OFFSET(d_offset) | CRT_TERM_COUNT(t_count),
priv->base + CRT_OFFSET);
@ -135,7 +136,7 @@ static void aspeed_gfx_crtc_mode_set_nofb(struct aspeed_gfx *priv)
* Threshold: FIFO thresholds of refill and stop (16 byte chunks
* per line, rounded up)
*/
writel(G5_CRT_THROD_VAL, priv->base + CRT_THROD);
writel(priv->throd_val, priv->base + CRT_THROD);
}
static void aspeed_gfx_pipe_enable(struct drm_simple_display_pipe *pipe,
@ -219,7 +220,7 @@ static const struct drm_simple_display_pipe_funcs aspeed_gfx_funcs = {
.enable = aspeed_gfx_pipe_enable,
.disable = aspeed_gfx_pipe_disable,
.update = aspeed_gfx_pipe_update,
.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
.prepare_fb = drm_gem_simple_display_pipe_prepare_fb,
.enable_vblank = aspeed_gfx_enable_vblank,
.disable_vblank = aspeed_gfx_disable_vblank,
};

View File

@ -7,6 +7,7 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@ -57,6 +58,34 @@
* which is available under NDA from ASPEED.
*/
struct aspeed_gfx_config {
u32 dac_reg; /* DAC register in SCU */
u32 vga_scratch_reg; /* VGA scratch register in SCU */
u32 throd_val; /* Default Threshold Seting */
u32 scan_line_max; /* Max memory size of one scan line */
};
static const struct aspeed_gfx_config ast2400_config = {
.dac_reg = 0x2c,
.vga_scratch_reg = 0x50,
.throd_val = CRT_THROD_LOW(0x1e) | CRT_THROD_HIGH(0x12),
.scan_line_max = 64,
};
static const struct aspeed_gfx_config ast2500_config = {
.dac_reg = 0x2c,
.vga_scratch_reg = 0x50,
.throd_val = CRT_THROD_LOW(0x24) | CRT_THROD_HIGH(0x3c),
.scan_line_max = 128,
};
static const struct of_device_id aspeed_gfx_match[] = {
{ .compatible = "aspeed,ast2400-gfx", .data = &ast2400_config },
{ .compatible = "aspeed,ast2500-gfx", .data = &ast2500_config },
{ },
};
MODULE_DEVICE_TABLE(of, aspeed_gfx_match);
static const struct drm_mode_config_funcs aspeed_gfx_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.atomic_check = drm_atomic_helper_check,
@ -97,12 +126,13 @@ static irqreturn_t aspeed_gfx_irq_handler(int irq, void *data)
return IRQ_NONE;
}
static int aspeed_gfx_load(struct drm_device *drm)
{
struct platform_device *pdev = to_platform_device(drm->dev);
struct aspeed_gfx *priv = to_aspeed_gfx(drm);
struct device_node *np = pdev->dev.of_node;
const struct aspeed_gfx_config *config;
const struct of_device_id *match;
struct resource *res;
int ret;
@ -111,10 +141,23 @@ static int aspeed_gfx_load(struct drm_device *drm)
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
priv->scu = syscon_regmap_lookup_by_compatible("aspeed,ast2500-scu");
match = of_match_device(aspeed_gfx_match, &pdev->dev);
if (!match)
return -EINVAL;
config = match->data;
priv->dac_reg = config->dac_reg;
priv->vga_scratch_reg = config->vga_scratch_reg;
priv->throd_val = config->throd_val;
priv->scan_line_max = config->scan_line_max;
priv->scu = syscon_regmap_lookup_by_phandle(np, "syscon");
if (IS_ERR(priv->scu)) {
dev_err(&pdev->dev, "failed to find SCU regmap\n");
return PTR_ERR(priv->scu);
priv->scu = syscon_regmap_lookup_by_compatible("aspeed,ast2500-scu");
if (IS_ERR(priv->scu)) {
dev_err(&pdev->dev, "failed to find SCU regmap\n");
return PTR_ERR(priv->scu);
}
}
ret = of_reserved_mem_device_init(drm->dev);
@ -202,14 +245,6 @@ static const struct drm_driver aspeed_gfx_driver = {
.minor = 0,
};
static const struct of_device_id aspeed_gfx_match[] = {
{ .compatible = "aspeed,ast2500-gfx" },
{ }
};
#define ASPEED_SCU_VGA0 0x50
#define ASPEED_SCU_MISC_CTRL 0x2c
static ssize_t dac_mux_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
@ -224,7 +259,7 @@ static ssize_t dac_mux_store(struct device *dev, struct device_attribute *attr,
if (val > 3)
return -EINVAL;
rc = regmap_update_bits(priv->scu, ASPEED_SCU_MISC_CTRL, 0x30000, val << 16);
rc = regmap_update_bits(priv->scu, priv->dac_reg, 0x30000, val << 16);
if (rc < 0)
return 0;
@ -237,7 +272,7 @@ static ssize_t dac_mux_show(struct device *dev, struct device_attribute *attr, c
u32 reg;
int rc;
rc = regmap_read(priv->scu, ASPEED_SCU_MISC_CTRL, &reg);
rc = regmap_read(priv->scu, priv->dac_reg, &reg);
if (rc)
return rc;
@ -252,7 +287,7 @@ vga_pw_show(struct device *dev, struct device_attribute *attr, char *buf)
u32 reg;
int rc;
rc = regmap_read(priv->scu, ASPEED_SCU_VGA0, &reg);
rc = regmap_read(priv->scu, priv->vga_scratch_reg, &reg);
if (rc)
return rc;
@ -284,7 +319,7 @@ static int aspeed_gfx_probe(struct platform_device *pdev)
if (ret)
return ret;
dev_set_drvdata(&pdev->dev, priv);
platform_set_drvdata(pdev, priv);
ret = sysfs_create_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group);
if (ret)

View File

@ -3,7 +3,6 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ast-y := ast_cursor.o ast_drv.o ast_main.o ast_mm.o ast_mode.o ast_post.o \
ast_dp501.o
ast-y := ast_drv.o ast_main.o ast_mm.o ast_mode.o ast_post.o ast_dp501.o
obj-$(CONFIG_DRM_AST) := ast.o

View File

@ -1,286 +0,0 @@
/*
* Copyright 2012 Red Hat Inc.
* Parts based on xf86-video-ast
* Copyright (c) 2005 ASPEED Technology Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*/
/*
* Authors: Dave Airlie <airlied@redhat.com>
*/
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_managed.h>
#include "ast_drv.h"
static void ast_cursor_fini(struct ast_private *ast)
{
size_t i;
struct drm_gem_vram_object *gbo;
for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) {
gbo = ast->cursor.gbo[i];
drm_gem_vram_unpin(gbo);
drm_gem_vram_put(gbo);
}
}
static void ast_cursor_release(struct drm_device *dev, void *ptr)
{
struct ast_private *ast = to_ast_private(dev);
ast_cursor_fini(ast);
}
/*
* Allocate cursor BOs and pin them at the end of VRAM.
*/
int ast_cursor_init(struct ast_private *ast)
{
struct drm_device *dev = &ast->base;
size_t size, i;
struct drm_gem_vram_object *gbo;
int ret;
size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) {
gbo = drm_gem_vram_create(dev, size, 0);
if (IS_ERR(gbo)) {
ret = PTR_ERR(gbo);
goto err_drm_gem_vram_put;
}
ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM |
DRM_GEM_VRAM_PL_FLAG_TOPDOWN);
if (ret) {
drm_gem_vram_put(gbo);
goto err_drm_gem_vram_put;
}
ast->cursor.gbo[i] = gbo;
}
return drmm_add_action_or_reset(dev, ast_cursor_release, NULL);
err_drm_gem_vram_put:
while (i) {
--i;
gbo = ast->cursor.gbo[i];
drm_gem_vram_unpin(gbo);
drm_gem_vram_put(gbo);
}
return ret;
}
static void update_cursor_image(u8 __iomem *dst, const u8 *src, int width, int height)
{
union {
u32 ul;
u8 b[4];
} srcdata32[2], data32;
union {
u16 us;
u8 b[2];
} data16;
u32 csum = 0;
s32 alpha_dst_delta, last_alpha_dst_delta;
u8 __iomem *dstxor;
const u8 *srcxor;
int i, j;
u32 per_pixel_copy, two_pixel_copy;
alpha_dst_delta = AST_MAX_HWC_WIDTH << 1;
last_alpha_dst_delta = alpha_dst_delta - (width << 1);
srcxor = src;
dstxor = (u8 *)dst + last_alpha_dst_delta + (AST_MAX_HWC_HEIGHT - height) * alpha_dst_delta;
per_pixel_copy = width & 1;
two_pixel_copy = width >> 1;
for (j = 0; j < height; j++) {
for (i = 0; i < two_pixel_copy; i++) {
srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0;
data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
data32.b[2] = srcdata32[1].b[1] | (srcdata32[1].b[0] >> 4);
data32.b[3] = srcdata32[1].b[3] | (srcdata32[1].b[2] >> 4);
writel(data32.ul, dstxor);
csum += data32.ul;
dstxor += 4;
srcxor += 8;
}
for (i = 0; i < per_pixel_copy; i++) {
srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
data16.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
data16.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
writew(data16.us, dstxor);
csum += (u32)data16.us;
dstxor += 2;
srcxor += 4;
}
dstxor += last_alpha_dst_delta;
}
/* write checksum + signature */
dst += AST_HWC_SIZE;
writel(csum, dst);
writel(width, dst + AST_HWC_SIGNATURE_SizeX);
writel(height, dst + AST_HWC_SIGNATURE_SizeY);
writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX);
writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
}
int ast_cursor_blit(struct ast_private *ast, struct drm_framebuffer *fb)
{
struct drm_device *dev = &ast->base;
struct drm_gem_vram_object *dst_gbo = ast->cursor.gbo[ast->cursor.next_index];
struct drm_gem_vram_object *src_gbo = drm_gem_vram_of_gem(fb->obj[0]);
struct dma_buf_map src_map, dst_map;
void __iomem *dst;
void *src;
int ret;
if (drm_WARN_ON_ONCE(dev, fb->width > AST_MAX_HWC_WIDTH) ||
drm_WARN_ON_ONCE(dev, fb->height > AST_MAX_HWC_HEIGHT))
return -EINVAL;
ret = drm_gem_vram_vmap(src_gbo, &src_map);
if (ret)
return ret;
src = src_map.vaddr; /* TODO: Use mapping abstraction properly */
ret = drm_gem_vram_vmap(dst_gbo, &dst_map);
if (ret)
goto err_drm_gem_vram_vunmap;
dst = dst_map.vaddr_iomem; /* TODO: Use mapping abstraction properly */
/* do data transfer to cursor BO */
update_cursor_image(dst, src, fb->width, fb->height);
drm_gem_vram_vunmap(dst_gbo, &dst_map);
drm_gem_vram_vunmap(src_gbo, &src_map);
return 0;
err_drm_gem_vram_vunmap:
drm_gem_vram_vunmap(src_gbo, &src_map);
return ret;
}
static void ast_cursor_set_base(struct ast_private *ast, u64 address)
{
u8 addr0 = (address >> 3) & 0xff;
u8 addr1 = (address >> 11) & 0xff;
u8 addr2 = (address >> 19) & 0xff;
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, addr0);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, addr1);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, addr2);
}
void ast_cursor_page_flip(struct ast_private *ast)
{
struct drm_device *dev = &ast->base;
struct drm_gem_vram_object *gbo;
s64 off;
gbo = ast->cursor.gbo[ast->cursor.next_index];
off = drm_gem_vram_offset(gbo);
if (drm_WARN_ON_ONCE(dev, off < 0))
return; /* Bug: we didn't pin the cursor HW BO to VRAM. */
ast_cursor_set_base(ast, off);
++ast->cursor.next_index;
ast->cursor.next_index %= ARRAY_SIZE(ast->cursor.gbo);
}
static void ast_cursor_set_location(struct ast_private *ast, u16 x, u16 y,
u8 x_offset, u8 y_offset)
{
u8 x0 = (x & 0x00ff);
u8 x1 = (x & 0x0f00) >> 8;
u8 y0 = (y & 0x00ff);
u8 y1 = (y & 0x0700) >> 8;
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc2, x_offset);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc3, y_offset);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc4, x0);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc5, x1);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc6, y0);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, y1);
}
void ast_cursor_show(struct ast_private *ast, int x, int y,
unsigned int offset_x, unsigned int offset_y)
{
struct drm_device *dev = &ast->base;
struct drm_gem_vram_object *gbo = ast->cursor.gbo[ast->cursor.next_index];
struct dma_buf_map map;
u8 x_offset, y_offset;
u8 __iomem *dst;
u8 __iomem *sig;
u8 jreg;
int ret;
ret = drm_gem_vram_vmap(gbo, &map);
if (drm_WARN_ONCE(dev, ret, "drm_gem_vram_vmap() failed, ret=%d\n", ret))
return;
dst = map.vaddr_iomem; /* TODO: Use mapping abstraction properly */
sig = dst + AST_HWC_SIZE;
writel(x, sig + AST_HWC_SIGNATURE_X);
writel(y, sig + AST_HWC_SIGNATURE_Y);
drm_gem_vram_vunmap(gbo, &map);
if (x < 0) {
x_offset = (-x) + offset_x;
x = 0;
} else {
x_offset = offset_x;
}
if (y < 0) {
y_offset = (-y) + offset_y;
y = 0;
} else {
y_offset = offset_y;
}
ast_cursor_set_location(ast, x, y, x_offset, y_offset);
/* dummy write to fire HWC */
jreg = 0x02 |
0x01; /* enable ARGB4444 cursor */
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg);
}
void ast_cursor_hide(struct ast_private *ast)
{
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00);
}

View File

@ -30,6 +30,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
@ -138,6 +139,7 @@ static void ast_pci_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
drm_dev_unregister(dev);
drm_atomic_helper_shutdown(dev);
}
static int ast_drm_freeze(struct drm_device *dev)

View File

@ -81,6 +81,9 @@ enum ast_tx_chip {
#define AST_DRAM_4Gx16 7
#define AST_DRAM_8Gx16 8
/*
* Cursor plane
*/
#define AST_MAX_HWC_WIDTH 64
#define AST_MAX_HWC_HEIGHT 64
@ -99,6 +102,28 @@ enum ast_tx_chip {
#define AST_HWC_SIGNATURE_HOTSPOTX 0x14
#define AST_HWC_SIGNATURE_HOTSPOTY 0x18
struct ast_cursor_plane {
struct drm_plane base;
struct {
struct drm_gem_vram_object *gbo;
struct dma_buf_map map;
u64 off;
} hwc[AST_DEFAULT_HWC_NUM];
unsigned int next_hwc_index;
};
static inline struct ast_cursor_plane *
to_ast_cursor_plane(struct drm_plane *plane)
{
return container_of(plane, struct ast_cursor_plane, base);
}
/*
* Connector with i2c channel
*/
struct ast_i2c_chan {
struct i2c_adapter adapter;
struct drm_device *dev;
@ -116,6 +141,10 @@ to_ast_connector(struct drm_connector *connector)
return container_of(connector, struct ast_connector, base);
}
/*
* Device
*/
struct ast_private {
struct drm_device base;
@ -130,13 +159,8 @@ struct ast_private {
int fb_mtrr;
struct {
struct drm_gem_vram_object *gbo[AST_DEFAULT_HWC_NUM];
unsigned int next_index;
} cursor;
struct drm_plane primary_plane;
struct drm_plane cursor_plane;
struct ast_cursor_plane cursor_plane;
struct drm_crtc crtc;
struct drm_encoder encoder;
struct ast_connector connector;
@ -179,6 +203,9 @@ struct ast_private *ast_device_create(const struct drm_driver *drv,
#define AST_IO_VGAIR1_VREFRESH BIT(3)
#define AST_IO_VGACRCB_HWC_ENABLED BIT(1)
#define AST_IO_VGACRCB_HWC_16BPP BIT(0) /* set: ARGB4444, cleared: 2bpp palette */
#define __ast_read(x) \
static inline u##x ast_read##x(struct ast_private *ast, u32 reg) { \
u##x val = 0;\
@ -314,12 +341,4 @@ bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata);
u8 ast_get_dp501_max_clk(struct drm_device *dev);
void ast_init_3rdtx(struct drm_device *dev);
/* ast_cursor.c */
int ast_cursor_init(struct ast_private *ast);
int ast_cursor_blit(struct ast_private *ast, struct drm_framebuffer *fb);
void ast_cursor_page_flip(struct ast_private *ast);
void ast_cursor_show(struct ast_private *ast, int x, int y,
unsigned int offset_x, unsigned int offset_y);
void ast_cursor_hide(struct ast_private *ast);
#endif

View File

@ -37,6 +37,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_plane_helper.h>
@ -535,48 +536,54 @@ static const uint32_t ast_primary_plane_formats[] = {
};
static int ast_primary_plane_helper_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_crtc_state *crtc_state;
struct ast_crtc_state *ast_crtc_state;
int ret;
if (!state->crtc)
if (!new_plane_state->crtc)
return 0;
crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
crtc_state = drm_atomic_get_new_crtc_state(state,
new_plane_state->crtc);
ret = drm_atomic_helper_check_plane_state(state, crtc_state,
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
false, true);
if (ret)
return ret;
if (!state->visible)
if (!new_plane_state->visible)
return 0;
ast_crtc_state = to_ast_crtc_state(crtc_state);
ast_crtc_state->format = state->fb->format;
ast_crtc_state->format = new_plane_state->fb->format;
return 0;
}
static void
ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct drm_device *dev = plane->dev;
struct ast_private *ast = to_ast_private(dev);
struct drm_plane_state *state = plane->state;
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_gem_vram_object *gbo;
s64 gpu_addr;
struct drm_framebuffer *fb = state->fb;
struct drm_framebuffer *fb = new_state->fb;
struct drm_framebuffer *old_fb = old_state->fb;
if (!old_fb || (fb->format != old_fb->format)) {
struct drm_crtc_state *crtc_state = state->crtc->state;
struct drm_crtc_state *crtc_state = new_state->crtc->state;
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc_state);
struct ast_vbios_mode_info *vbios_mode_info = &ast_crtc_state->vbios_mode_info;
@ -597,7 +604,7 @@ ast_primary_plane_helper_atomic_update(struct drm_plane *plane,
static void
ast_primary_plane_helper_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
struct drm_atomic_state *state)
{
struct ast_private *ast = to_ast_private(plane->dev);
@ -621,55 +628,161 @@ static const struct drm_plane_funcs ast_primary_plane_funcs = {
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
};
static int ast_primary_plane_init(struct ast_private *ast)
{
struct drm_device *dev = &ast->base;
struct drm_plane *primary_plane = &ast->primary_plane;
int ret;
ret = drm_universal_plane_init(dev, primary_plane, 0x01,
&ast_primary_plane_funcs,
ast_primary_plane_formats,
ARRAY_SIZE(ast_primary_plane_formats),
NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
drm_err(dev, "drm_universal_plane_init() failed: %d\n", ret);
return ret;
}
drm_plane_helper_add(primary_plane, &ast_primary_plane_helper_funcs);
return 0;
}
/*
* Cursor plane
*/
static void ast_update_cursor_image(u8 __iomem *dst, const u8 *src, int width, int height)
{
union {
u32 ul;
u8 b[4];
} srcdata32[2], data32;
union {
u16 us;
u8 b[2];
} data16;
u32 csum = 0;
s32 alpha_dst_delta, last_alpha_dst_delta;
u8 __iomem *dstxor;
const u8 *srcxor;
int i, j;
u32 per_pixel_copy, two_pixel_copy;
alpha_dst_delta = AST_MAX_HWC_WIDTH << 1;
last_alpha_dst_delta = alpha_dst_delta - (width << 1);
srcxor = src;
dstxor = (u8 *)dst + last_alpha_dst_delta + (AST_MAX_HWC_HEIGHT - height) * alpha_dst_delta;
per_pixel_copy = width & 1;
two_pixel_copy = width >> 1;
for (j = 0; j < height; j++) {
for (i = 0; i < two_pixel_copy; i++) {
srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
srcdata32[1].ul = *((u32 *)(srcxor + 4)) & 0xf0f0f0f0;
data32.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
data32.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
data32.b[2] = srcdata32[1].b[1] | (srcdata32[1].b[0] >> 4);
data32.b[3] = srcdata32[1].b[3] | (srcdata32[1].b[2] >> 4);
writel(data32.ul, dstxor);
csum += data32.ul;
dstxor += 4;
srcxor += 8;
}
for (i = 0; i < per_pixel_copy; i++) {
srcdata32[0].ul = *((u32 *)srcxor) & 0xf0f0f0f0;
data16.b[0] = srcdata32[0].b[1] | (srcdata32[0].b[0] >> 4);
data16.b[1] = srcdata32[0].b[3] | (srcdata32[0].b[2] >> 4);
writew(data16.us, dstxor);
csum += (u32)data16.us;
dstxor += 2;
srcxor += 4;
}
dstxor += last_alpha_dst_delta;
}
/* write checksum + signature */
dst += AST_HWC_SIZE;
writel(csum, dst);
writel(width, dst + AST_HWC_SIGNATURE_SizeX);
writel(height, dst + AST_HWC_SIGNATURE_SizeY);
writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX);
writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY);
}
static void ast_set_cursor_base(struct ast_private *ast, u64 address)
{
u8 addr0 = (address >> 3) & 0xff;
u8 addr1 = (address >> 11) & 0xff;
u8 addr2 = (address >> 19) & 0xff;
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, addr0);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, addr1);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, addr2);
}
static void ast_set_cursor_location(struct ast_private *ast, u16 x, u16 y,
u8 x_offset, u8 y_offset)
{
u8 x0 = (x & 0x00ff);
u8 x1 = (x & 0x0f00) >> 8;
u8 y0 = (y & 0x00ff);
u8 y1 = (y & 0x0700) >> 8;
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc2, x_offset);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc3, y_offset);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc4, x0);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc5, x1);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc6, y0);
ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, y1);
}
static void ast_set_cursor_enabled(struct ast_private *ast, bool enabled)
{
static const u8 mask = (u8)~(AST_IO_VGACRCB_HWC_16BPP |
AST_IO_VGACRCB_HWC_ENABLED);
u8 vgacrcb = AST_IO_VGACRCB_HWC_16BPP;
if (enabled)
vgacrcb |= AST_IO_VGACRCB_HWC_ENABLED;
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, mask, vgacrcb);
}
static const uint32_t ast_cursor_plane_formats[] = {
DRM_FORMAT_ARGB8888,
};
static int
ast_cursor_plane_helper_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
struct drm_framebuffer *fb = new_state->fb;
struct drm_crtc *crtc = new_state->crtc;
struct ast_private *ast;
int ret;
if (!crtc || !fb)
return 0;
ast = to_ast_private(plane->dev);
ret = ast_cursor_blit(ast, fb);
if (ret)
return ret;
return 0;
}
static int ast_cursor_plane_helper_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
struct drm_atomic_state *state)
{
struct drm_framebuffer *fb = state->fb;
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_framebuffer *fb = new_plane_state->fb;
struct drm_crtc_state *crtc_state;
int ret;
if (!state->crtc)
if (!new_plane_state->crtc)
return 0;
crtc_state = drm_atomic_get_new_crtc_state(state->state, state->crtc);
crtc_state = drm_atomic_get_new_crtc_state(state,
new_plane_state->crtc);
ret = drm_atomic_helper_check_plane_state(state, crtc_state,
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
DRM_PLANE_HELPER_NO_SCALING,
DRM_PLANE_HELPER_NO_SCALING,
true, true);
if (ret)
return ret;
if (!state->visible)
if (!new_plane_state->visible)
return 0;
if (fb->width > AST_MAX_HWC_WIDTH || fb->height > AST_MAX_HWC_HEIGHT)
@ -680,51 +793,192 @@ static int ast_cursor_plane_helper_atomic_check(struct drm_plane *plane,
static void
ast_cursor_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
struct drm_atomic_state *state)
{
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = state->fb;
struct ast_cursor_plane *ast_cursor_plane = to_ast_cursor_plane(plane);
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
plane);
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(new_state);
struct drm_framebuffer *fb = new_state->fb;
struct ast_private *ast = to_ast_private(plane->dev);
struct dma_buf_map dst_map =
ast_cursor_plane->hwc[ast_cursor_plane->next_hwc_index].map;
u64 dst_off =
ast_cursor_plane->hwc[ast_cursor_plane->next_hwc_index].off;
struct dma_buf_map src_map = shadow_plane_state->map[0];
unsigned int offset_x, offset_y;
u16 x, y;
u8 x_offset, y_offset;
u8 __iomem *dst;
u8 __iomem *sig;
const u8 *src;
offset_x = AST_MAX_HWC_WIDTH - fb->width;
offset_y = AST_MAX_HWC_WIDTH - fb->height;
src = src_map.vaddr; /* TODO: Use mapping abstraction properly */
dst = dst_map.vaddr_iomem; /* TODO: Use mapping abstraction properly */
sig = dst + AST_HWC_SIZE; /* TODO: Use mapping abstraction properly */
if (state->fb != old_state->fb) {
/* A new cursor image was installed. */
ast_cursor_page_flip(ast);
/*
* Do data transfer to HW cursor BO. If a new cursor image was installed,
* point the scanout engine to dst_gbo's offset and page-flip the HWC buffers.
*/
ast_update_cursor_image(dst, src, fb->width, fb->height);
if (new_state->fb != old_state->fb) {
ast_set_cursor_base(ast, dst_off);
++ast_cursor_plane->next_hwc_index;
ast_cursor_plane->next_hwc_index %= ARRAY_SIZE(ast_cursor_plane->hwc);
}
ast_cursor_show(ast, state->crtc_x, state->crtc_y,
offset_x, offset_y);
/*
* Update location in HWC signature and registers.
*/
writel(new_state->crtc_x, sig + AST_HWC_SIGNATURE_X);
writel(new_state->crtc_y, sig + AST_HWC_SIGNATURE_Y);
offset_x = AST_MAX_HWC_WIDTH - fb->width;
offset_y = AST_MAX_HWC_HEIGHT - fb->height;
if (new_state->crtc_x < 0) {
x_offset = (-new_state->crtc_x) + offset_x;
x = 0;
} else {
x_offset = offset_x;
x = new_state->crtc_x;
}
if (new_state->crtc_y < 0) {
y_offset = (-new_state->crtc_y) + offset_y;
y = 0;
} else {
y_offset = offset_y;
y = new_state->crtc_y;
}
ast_set_cursor_location(ast, x, y, x_offset, y_offset);
/* Dummy write to enable HWC and make the HW pick-up the changes. */
ast_set_cursor_enabled(ast, true);
}
static void
ast_cursor_plane_helper_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
struct drm_atomic_state *state)
{
struct ast_private *ast = to_ast_private(plane->dev);
ast_cursor_hide(ast);
ast_set_cursor_enabled(ast, false);
}
static const struct drm_plane_helper_funcs ast_cursor_plane_helper_funcs = {
.prepare_fb = ast_cursor_plane_helper_prepare_fb,
.cleanup_fb = NULL, /* not required for cursor plane */
DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
.atomic_check = ast_cursor_plane_helper_atomic_check,
.atomic_update = ast_cursor_plane_helper_atomic_update,
.atomic_disable = ast_cursor_plane_helper_atomic_disable,
};
static void ast_cursor_plane_destroy(struct drm_plane *plane)
{
struct ast_cursor_plane *ast_cursor_plane = to_ast_cursor_plane(plane);
size_t i;
struct drm_gem_vram_object *gbo;
struct dma_buf_map map;
for (i = 0; i < ARRAY_SIZE(ast_cursor_plane->hwc); ++i) {
gbo = ast_cursor_plane->hwc[i].gbo;
map = ast_cursor_plane->hwc[i].map;
drm_gem_vram_vunmap(gbo, &map);
drm_gem_vram_unpin(gbo);
drm_gem_vram_put(gbo);
}
drm_plane_cleanup(plane);
}
static const struct drm_plane_funcs ast_cursor_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
.destroy = ast_cursor_plane_destroy,
DRM_GEM_SHADOW_PLANE_FUNCS,
};
static int ast_cursor_plane_init(struct ast_private *ast)
{
struct drm_device *dev = &ast->base;
struct ast_cursor_plane *ast_cursor_plane = &ast->cursor_plane;
struct drm_plane *cursor_plane = &ast_cursor_plane->base;
size_t size, i;
struct drm_gem_vram_object *gbo;
struct dma_buf_map map;
int ret;
s64 off;
/*
* Allocate backing storage for cursors. The BOs are permanently
* pinned to the top end of the VRAM.
*/
size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE);
for (i = 0; i < ARRAY_SIZE(ast_cursor_plane->hwc); ++i) {
gbo = drm_gem_vram_create(dev, size, 0);
if (IS_ERR(gbo)) {
ret = PTR_ERR(gbo);
goto err_hwc;
}
ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM |
DRM_GEM_VRAM_PL_FLAG_TOPDOWN);
if (ret)
goto err_drm_gem_vram_put;
ret = drm_gem_vram_vmap(gbo, &map);
if (ret)
goto err_drm_gem_vram_unpin;
off = drm_gem_vram_offset(gbo);
if (off < 0) {
ret = off;
goto err_drm_gem_vram_vunmap;
}
ast_cursor_plane->hwc[i].gbo = gbo;
ast_cursor_plane->hwc[i].map = map;
ast_cursor_plane->hwc[i].off = off;
}
/*
* Create the cursor plane. The plane's destroy callback will release
* the backing storages' BO memory.
*/
ret = drm_universal_plane_init(dev, cursor_plane, 0x01,
&ast_cursor_plane_funcs,
ast_cursor_plane_formats,
ARRAY_SIZE(ast_cursor_plane_formats),
NULL, DRM_PLANE_TYPE_CURSOR, NULL);
if (ret) {
drm_err(dev, "drm_universal_plane failed(): %d\n", ret);
goto err_hwc;
}
drm_plane_helper_add(cursor_plane, &ast_cursor_plane_helper_funcs);
return 0;
err_hwc:
while (i) {
--i;
gbo = ast_cursor_plane->hwc[i].gbo;
map = ast_cursor_plane->hwc[i].map;
err_drm_gem_vram_vunmap:
drm_gem_vram_vunmap(gbo, &map);
err_drm_gem_vram_unpin:
drm_gem_vram_unpin(gbo);
err_drm_gem_vram_put:
drm_gem_vram_put(gbo);
}
return ret;
}
/*
* CRTC
*/
@ -917,7 +1171,7 @@ static int ast_crtc_init(struct drm_device *dev)
int ret;
ret = drm_crtc_init_with_planes(dev, crtc, &ast->primary_plane,
&ast->cursor_plane, &ast_crtc_funcs,
&ast->cursor_plane.base, &ast_crtc_funcs,
NULL);
if (ret)
return ret;
@ -1109,10 +1363,6 @@ int ast_mode_config_init(struct ast_private *ast)
struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
ret = ast_cursor_init(ast);
if (ret)
return ret;
ret = drmm_mode_config_init(dev);
if (ret)
return ret;
@ -1138,30 +1388,14 @@ int ast_mode_config_init(struct ast_private *ast)
dev->mode_config.helper_private = &ast_mode_config_helper_funcs;
memset(&ast->primary_plane, 0, sizeof(ast->primary_plane));
ret = drm_universal_plane_init(dev, &ast->primary_plane, 0x01,
&ast_primary_plane_funcs,
ast_primary_plane_formats,
ARRAY_SIZE(ast_primary_plane_formats),
NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
if (ret) {
drm_err(dev, "ast: drm_universal_plane_init() failed: %d\n", ret);
return ret;
}
drm_plane_helper_add(&ast->primary_plane,
&ast_primary_plane_helper_funcs);
ret = drm_universal_plane_init(dev, &ast->cursor_plane, 0x01,
&ast_cursor_plane_funcs,
ast_cursor_plane_formats,
ARRAY_SIZE(ast_cursor_plane_formats),
NULL, DRM_PLANE_TYPE_CURSOR, NULL);
if (ret) {
drm_err(dev, "drm_universal_plane_failed(): %d\n", ret);
ret = ast_primary_plane_init(ast);
if (ret)
return ret;
ret = ast_cursor_plane_init(ast);
if (ret)
return ret;
}
drm_plane_helper_add(&ast->cursor_plane,
&ast_cursor_plane_helper_funcs);
ast_crtc_init(dev);
ast_encoder_init(dev);

View File

@ -557,103 +557,10 @@ static irqreturn_t atmel_hlcdc_dc_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
struct atmel_hlcdc_dc_commit {
struct work_struct work;
struct drm_device *dev;
struct drm_atomic_state *state;
};
static void
atmel_hlcdc_dc_atomic_complete(struct atmel_hlcdc_dc_commit *commit)
{
struct drm_device *dev = commit->dev;
struct atmel_hlcdc_dc *dc = dev->dev_private;
struct drm_atomic_state *old_state = commit->state;
/* Apply the atomic update. */
drm_atomic_helper_commit_modeset_disables(dev, old_state);
drm_atomic_helper_commit_planes(dev, old_state, 0);
drm_atomic_helper_commit_modeset_enables(dev, old_state);
drm_atomic_helper_wait_for_vblanks(dev, old_state);
drm_atomic_helper_cleanup_planes(dev, old_state);
drm_atomic_state_put(old_state);
/* Complete the commit, wake up any waiter. */
spin_lock(&dc->commit.wait.lock);
dc->commit.pending = false;
wake_up_all_locked(&dc->commit.wait);
spin_unlock(&dc->commit.wait.lock);
kfree(commit);
}
static void atmel_hlcdc_dc_atomic_work(struct work_struct *work)
{
struct atmel_hlcdc_dc_commit *commit =
container_of(work, struct atmel_hlcdc_dc_commit, work);
atmel_hlcdc_dc_atomic_complete(commit);
}
static int atmel_hlcdc_dc_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state,
bool async)
{
struct atmel_hlcdc_dc *dc = dev->dev_private;
struct atmel_hlcdc_dc_commit *commit;
int ret;
ret = drm_atomic_helper_prepare_planes(dev, state);
if (ret)
return ret;
/* Allocate the commit object. */
commit = kzalloc(sizeof(*commit), GFP_KERNEL);
if (!commit) {
ret = -ENOMEM;
goto error;
}
INIT_WORK(&commit->work, atmel_hlcdc_dc_atomic_work);
commit->dev = dev;
commit->state = state;
spin_lock(&dc->commit.wait.lock);
ret = wait_event_interruptible_locked(dc->commit.wait,
!dc->commit.pending);
if (ret == 0)
dc->commit.pending = true;
spin_unlock(&dc->commit.wait.lock);
if (ret)
goto err_free;
/* We have our own synchronization through the commit lock. */
BUG_ON(drm_atomic_helper_swap_state(state, false) < 0);
/* Swap state succeeded, this is the point of no return. */
drm_atomic_state_get(state);
if (async)
queue_work(dc->wq, &commit->work);
else
atmel_hlcdc_dc_atomic_complete(commit);
return 0;
err_free:
kfree(commit);
error:
drm_atomic_helper_cleanup_planes(dev, state);
return ret;
}
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = drm_gem_fb_create,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = atmel_hlcdc_dc_atomic_commit,
.atomic_commit = drm_atomic_helper_commit,
};
static int atmel_hlcdc_dc_modeset_init(struct drm_device *dev)
@ -712,11 +619,6 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
if (!dc)
return -ENOMEM;
dc->wq = alloc_ordered_workqueue("atmel-hlcdc-dc", 0);
if (!dc->wq)
return -ENOMEM;
init_waitqueue_head(&dc->commit.wait);
dc->desc = match->data;
dc->hlcdc = dev_get_drvdata(dev->dev->parent);
dev->dev_private = dc;
@ -724,7 +626,7 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
ret = clk_prepare_enable(dc->hlcdc->periph_clk);
if (ret) {
dev_err(dev->dev, "failed to enable periph_clk\n");
goto err_destroy_wq;
return ret;
}
pm_runtime_enable(dev->dev);
@ -761,9 +663,6 @@ err_periph_clk_disable:
pm_runtime_disable(dev->dev);
clk_disable_unprepare(dc->hlcdc->periph_clk);
err_destroy_wq:
destroy_workqueue(dc->wq);
return ret;
}
@ -771,7 +670,6 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev)
{
struct atmel_hlcdc_dc *dc = dev->dev_private;
flush_workqueue(dc->wq);
drm_kms_helper_poll_fini(dev);
drm_atomic_helper_shutdown(dev);
drm_mode_config_cleanup(dev);
@ -784,7 +682,6 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev)
pm_runtime_disable(dev->dev);
clk_disable_unprepare(dc->hlcdc->periph_clk);
destroy_workqueue(dc->wq);
}
static int atmel_hlcdc_dc_irq_postinstall(struct drm_device *dev)

View File

@ -331,9 +331,7 @@ struct atmel_hlcdc_dc_desc {
* @crtc: CRTC provided by the display controller
* @planes: instantiated planes
* @layers: active HLCDC layers
* @wq: display controller workqueue
* @suspend: used to store the HLCDC state when entering suspend
* @commit: used for async commit handling
*/
struct atmel_hlcdc_dc {
const struct atmel_hlcdc_dc_desc *desc;
@ -341,15 +339,10 @@ struct atmel_hlcdc_dc {
struct atmel_hlcdc *hlcdc;
struct drm_crtc *crtc;
struct atmel_hlcdc_layer *layers[ATMEL_HLCDC_MAX_LAYERS];
struct workqueue_struct *wq;
struct {
u32 imr;
struct drm_atomic_state *state;
} suspend;
struct {
wait_queue_head_t wait;
bool pending;
} commit;
};
extern struct atmel_hlcdc_formats atmel_hlcdc_plane_rgb_formats;

View File

@ -593,22 +593,23 @@ atmel_hlcdc_plane_update_disc_area(struct atmel_hlcdc_plane *plane,
}
static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
struct drm_plane_state *s)
struct drm_atomic_state *state)
{
struct drm_plane_state *s = drm_atomic_get_new_plane_state(state, p);
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
struct atmel_hlcdc_plane_state *state =
struct atmel_hlcdc_plane_state *hstate =
drm_plane_state_to_atmel_hlcdc_plane_state(s);
const struct atmel_hlcdc_layer_desc *desc = plane->layer.desc;
struct drm_framebuffer *fb = state->base.fb;
struct drm_framebuffer *fb = hstate->base.fb;
const struct drm_display_mode *mode;
struct drm_crtc_state *crtc_state;
int ret;
int i;
if (!state->base.crtc || WARN_ON(!fb))
if (!hstate->base.crtc || WARN_ON(!fb))
return 0;
crtc_state = drm_atomic_get_existing_crtc_state(s->state, s->crtc);
crtc_state = drm_atomic_get_existing_crtc_state(state, s->crtc);
mode = &crtc_state->adjusted_mode;
ret = drm_atomic_helper_check_plane_state(s, crtc_state,
@ -617,101 +618,101 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p,
if (ret || !s->visible)
return ret;
state->src_x = s->src.x1;
state->src_y = s->src.y1;
state->src_w = drm_rect_width(&s->src);
state->src_h = drm_rect_height(&s->src);
state->crtc_x = s->dst.x1;
state->crtc_y = s->dst.y1;
state->crtc_w = drm_rect_width(&s->dst);
state->crtc_h = drm_rect_height(&s->dst);
hstate->src_x = s->src.x1;
hstate->src_y = s->src.y1;
hstate->src_w = drm_rect_width(&s->src);
hstate->src_h = drm_rect_height(&s->src);
hstate->crtc_x = s->dst.x1;
hstate->crtc_y = s->dst.y1;
hstate->crtc_w = drm_rect_width(&s->dst);
hstate->crtc_h = drm_rect_height(&s->dst);
if ((state->src_x | state->src_y | state->src_w | state->src_h) &
if ((hstate->src_x | hstate->src_y | hstate->src_w | hstate->src_h) &
SUBPIXEL_MASK)
return -EINVAL;
state->src_x >>= 16;
state->src_y >>= 16;
state->src_w >>= 16;
state->src_h >>= 16;
hstate->src_x >>= 16;
hstate->src_y >>= 16;
hstate->src_w >>= 16;
hstate->src_h >>= 16;
state->nplanes = fb->format->num_planes;
if (state->nplanes > ATMEL_HLCDC_LAYER_MAX_PLANES)
hstate->nplanes = fb->format->num_planes;
if (hstate->nplanes > ATMEL_HLCDC_LAYER_MAX_PLANES)
return -EINVAL;
for (i = 0; i < state->nplanes; i++) {
for (i = 0; i < hstate->nplanes; i++) {
unsigned int offset = 0;
int xdiv = i ? fb->format->hsub : 1;
int ydiv = i ? fb->format->vsub : 1;
state->bpp[i] = fb->format->cpp[i];
if (!state->bpp[i])
hstate->bpp[i] = fb->format->cpp[i];
if (!hstate->bpp[i])
return -EINVAL;
switch (state->base.rotation & DRM_MODE_ROTATE_MASK) {
switch (hstate->base.rotation & DRM_MODE_ROTATE_MASK) {
case DRM_MODE_ROTATE_90:
offset = (state->src_y / ydiv) *
offset = (hstate->src_y / ydiv) *
fb->pitches[i];
offset += ((state->src_x + state->src_w - 1) /
xdiv) * state->bpp[i];
state->xstride[i] = -(((state->src_h - 1) / ydiv) *
offset += ((hstate->src_x + hstate->src_w - 1) /
xdiv) * hstate->bpp[i];
hstate->xstride[i] = -(((hstate->src_h - 1) / ydiv) *
fb->pitches[i]) -
(2 * state->bpp[i]);
state->pstride[i] = fb->pitches[i] - state->bpp[i];
(2 * hstate->bpp[i]);
hstate->pstride[i] = fb->pitches[i] - hstate->bpp[i];
break;
case DRM_MODE_ROTATE_180:
offset = ((state->src_y + state->src_h - 1) /
offset = ((hstate->src_y + hstate->src_h - 1) /
ydiv) * fb->pitches[i];
offset += ((state->src_x + state->src_w - 1) /
xdiv) * state->bpp[i];
state->xstride[i] = ((((state->src_w - 1) / xdiv) - 1) *
state->bpp[i]) - fb->pitches[i];
state->pstride[i] = -2 * state->bpp[i];
offset += ((hstate->src_x + hstate->src_w - 1) /
xdiv) * hstate->bpp[i];
hstate->xstride[i] = ((((hstate->src_w - 1) / xdiv) - 1) *
hstate->bpp[i]) - fb->pitches[i];
hstate->pstride[i] = -2 * hstate->bpp[i];
break;
case DRM_MODE_ROTATE_270:
offset = ((state->src_y + state->src_h - 1) /
offset = ((hstate->src_y + hstate->src_h - 1) /
ydiv) * fb->pitches[i];
offset += (state->src_x / xdiv) * state->bpp[i];
state->xstride[i] = ((state->src_h - 1) / ydiv) *
offset += (hstate->src_x / xdiv) * hstate->bpp[i];
hstate->xstride[i] = ((hstate->src_h - 1) / ydiv) *
fb->pitches[i];
state->pstride[i] = -fb->pitches[i] - state->bpp[i];
hstate->pstride[i] = -fb->pitches[i] - hstate->bpp[i];
break;
case DRM_MODE_ROTATE_0:
default:
offset = (state->src_y / ydiv) * fb->pitches[i];
offset += (state->src_x / xdiv) * state->bpp[i];
state->xstride[i] = fb->pitches[i] -
((state->src_w / xdiv) *
state->bpp[i]);
state->pstride[i] = 0;
offset = (hstate->src_y / ydiv) * fb->pitches[i];
offset += (hstate->src_x / xdiv) * hstate->bpp[i];
hstate->xstride[i] = fb->pitches[i] -
((hstate->src_w / xdiv) *
hstate->bpp[i]);
hstate->pstride[i] = 0;
break;
}
state->offsets[i] = offset + fb->offsets[i];
hstate->offsets[i] = offset + fb->offsets[i];
}
/*
* Swap width and size in case of 90 or 270 degrees rotation
*/
if (drm_rotation_90_or_270(state->base.rotation)) {
swap(state->src_w, state->src_h);
if (drm_rotation_90_or_270(hstate->base.rotation)) {
swap(hstate->src_w, hstate->src_h);
}
if (!desc->layout.size &&
(mode->hdisplay != state->crtc_w ||
mode->vdisplay != state->crtc_h))
(mode->hdisplay != hstate->crtc_w ||
mode->vdisplay != hstate->crtc_h))
return -EINVAL;
if ((state->crtc_h != state->src_h || state->crtc_w != state->src_w) &&
if ((hstate->crtc_h != hstate->src_h || hstate->crtc_w != hstate->src_w) &&
(!desc->layout.memsize ||
state->base.fb->format->has_alpha))
hstate->base.fb->format->has_alpha))
return -EINVAL;
return 0;
}
static void atmel_hlcdc_plane_atomic_disable(struct drm_plane *p,
struct drm_plane_state *old_state)
struct drm_atomic_state *state)
{
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
@ -730,27 +731,29 @@ static void atmel_hlcdc_plane_atomic_disable(struct drm_plane *p,
}
static void atmel_hlcdc_plane_atomic_update(struct drm_plane *p,
struct drm_plane_state *old_s)
struct drm_atomic_state *state)
{
struct drm_plane_state *new_s = drm_atomic_get_new_plane_state(state,
p);
struct atmel_hlcdc_plane *plane = drm_plane_to_atmel_hlcdc_plane(p);
struct atmel_hlcdc_plane_state *state =
drm_plane_state_to_atmel_hlcdc_plane_state(p->state);
struct atmel_hlcdc_plane_state *hstate =
drm_plane_state_to_atmel_hlcdc_plane_state(new_s);
u32 sr;
if (!p->state->crtc || !p->state->fb)
if (!new_s->crtc || !new_s->fb)
return;
if (!state->base.visible) {
atmel_hlcdc_plane_atomic_disable(p, old_s);
if (!hstate->base.visible) {
atmel_hlcdc_plane_atomic_disable(p, state);
return;
}
atmel_hlcdc_plane_update_pos_and_size(plane, state);
atmel_hlcdc_plane_update_general_settings(plane, state);
atmel_hlcdc_plane_update_format(plane, state);
atmel_hlcdc_plane_update_clut(plane, state);
atmel_hlcdc_plane_update_buffers(plane, state);
atmel_hlcdc_plane_update_disc_area(plane, state);
atmel_hlcdc_plane_update_pos_and_size(plane, hstate);
atmel_hlcdc_plane_update_general_settings(plane, hstate);
atmel_hlcdc_plane_update_format(plane, hstate);
atmel_hlcdc_plane_update_clut(plane, hstate);
atmel_hlcdc_plane_update_buffers(plane, hstate);
atmel_hlcdc_plane_update_disc_area(plane, hstate);
/* Enable the overrun interrupts. */
atmel_hlcdc_layer_write_reg(&plane->layer, ATMEL_HLCDC_LAYER_IER,

View File

@ -2457,7 +2457,7 @@ clk_disable:
static int cdns_mhdp_remove(struct platform_device *pdev)
{
struct cdns_mhdp_device *mhdp = dev_get_drvdata(&pdev->dev);
struct cdns_mhdp_device *mhdp = platform_get_drvdata(pdev);
unsigned long timeout = msecs_to_jiffies(100);
bool stop_fw = false;
int ret;

View File

@ -52,6 +52,45 @@ void __drm_crtc_commit_free(struct kref *kref)
}
EXPORT_SYMBOL(__drm_crtc_commit_free);
/**
* drm_crtc_commit_wait - Waits for a commit to complete
* @commit: &drm_crtc_commit to wait for
*
* Waits for a given &drm_crtc_commit to be programmed into the
* hardware and flipped to.
*
* Returns:
*
* 0 on success, a negative error code otherwise.
*/
int drm_crtc_commit_wait(struct drm_crtc_commit *commit)
{
unsigned long timeout = 10 * HZ;
int ret;
if (!commit)
return 0;
ret = wait_for_completion_timeout(&commit->hw_done, timeout);
if (!ret) {
DRM_ERROR("hw_done timed out\n");
return -ETIMEDOUT;
}
/*
* Currently no support for overwriting flips, hence
* stall for previous one to execute completely.
*/
ret = wait_for_completion_timeout(&commit->flip_done, timeout);
if (!ret) {
DRM_ERROR("flip_done timed out\n");
return -ETIMEDOUT;
}
return 0;
}
EXPORT_SYMBOL(drm_crtc_commit_wait);
/**
* drm_atomic_state_default_release -
* release memory initialized by drm_atomic_state_init
@ -578,13 +617,9 @@ static int drm_atomic_plane_check(const struct drm_plane_state *old_plane_state,
ret = drm_plane_check_pixel_format(plane, fb->format->format,
fb->modifier);
if (ret) {
struct drm_format_name_buf format_name;
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %s, modifier 0x%llx\n",
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] invalid pixel format %p4cc, modifier 0x%llx\n",
plane->base.id, plane->name,
drm_get_format_name(fb->format->format,
&format_name),
fb->modifier);
&fb->format->format, fb->modifier);
return ret;
}

View File

@ -902,7 +902,7 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
if (!funcs || !funcs->atomic_check)
continue;
ret = funcs->atomic_check(plane, new_plane_state);
ret = funcs->atomic_check(plane, state);
if (ret) {
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
plane->base.id, plane->name);
@ -1742,7 +1742,7 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
return -EBUSY;
}
return funcs->atomic_async_check(plane, new_plane_state);
return funcs->atomic_async_check(plane, state);
}
EXPORT_SYMBOL(drm_atomic_helper_async_check);
@ -1772,7 +1772,7 @@ void drm_atomic_helper_async_commit(struct drm_device *dev,
struct drm_framebuffer *old_fb = plane->state->fb;
funcs = plane->helper_private;
funcs->atomic_async_update(plane, plane_state);
funcs->atomic_async_update(plane, state);
/*
* ->atomic_async_update() is supposed to update the
@ -2202,70 +2202,27 @@ void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *old_state)
struct drm_plane_state *old_plane_state;
struct drm_connector *conn;
struct drm_connector_state *old_conn_state;
struct drm_crtc_commit *commit;
int i;
long ret;
for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) {
commit = old_crtc_state->commit;
if (!commit)
continue;
ret = wait_for_completion_timeout(&commit->hw_done,
10*HZ);
if (ret == 0)
DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n",
crtc->base.id, crtc->name);
/* Currently no support for overwriting flips, hence
* stall for previous one to execute completely. */
ret = wait_for_completion_timeout(&commit->flip_done,
10*HZ);
if (ret == 0)
DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
ret = drm_crtc_commit_wait(old_crtc_state->commit);
if (ret)
DRM_ERROR("[CRTC:%d:%s] commit wait timed out\n",
crtc->base.id, crtc->name);
}
for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
commit = old_conn_state->commit;
if (!commit)
continue;
ret = wait_for_completion_timeout(&commit->hw_done,
10*HZ);
if (ret == 0)
DRM_ERROR("[CONNECTOR:%d:%s] hw_done timed out\n",
conn->base.id, conn->name);
/* Currently no support for overwriting flips, hence
* stall for previous one to execute completely. */
ret = wait_for_completion_timeout(&commit->flip_done,
10*HZ);
if (ret == 0)
DRM_ERROR("[CONNECTOR:%d:%s] flip_done timed out\n",
ret = drm_crtc_commit_wait(old_conn_state->commit);
if (ret)
DRM_ERROR("[CONNECTOR:%d:%s] commit wait timed out\n",
conn->base.id, conn->name);
}
for_each_old_plane_in_state(old_state, plane, old_plane_state, i) {
commit = old_plane_state->commit;
if (!commit)
continue;
ret = wait_for_completion_timeout(&commit->hw_done,
10*HZ);
if (ret == 0)
DRM_ERROR("[PLANE:%d:%s] hw_done timed out\n",
plane->base.id, plane->name);
/* Currently no support for overwriting flips, hence
* stall for previous one to execute completely. */
ret = wait_for_completion_timeout(&commit->flip_done,
10*HZ);
if (ret == 0)
DRM_ERROR("[PLANE:%d:%s] flip_done timed out\n",
ret = drm_crtc_commit_wait(old_plane_state->commit);
if (ret)
DRM_ERROR("[PLANE:%d:%s] commit wait timed out\n",
plane->base.id, plane->name);
}
}
@ -2571,9 +2528,9 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
no_disable)
continue;
funcs->atomic_disable(plane, old_plane_state);
funcs->atomic_disable(plane, old_state);
} else if (new_plane_state->crtc || disabling) {
funcs->atomic_update(plane, old_plane_state);
funcs->atomic_update(plane, old_state);
}
}
@ -2645,10 +2602,10 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
if (drm_atomic_plane_disabling(old_plane_state, new_plane_state) &&
plane_funcs->atomic_disable)
plane_funcs->atomic_disable(plane, old_plane_state);
plane_funcs->atomic_disable(plane, old_state);
else if (new_plane_state->crtc ||
drm_atomic_plane_disabling(old_plane_state, new_plane_state))
plane_funcs->atomic_update(plane, old_plane_state);
plane_funcs->atomic_update(plane, old_state);
}
if (crtc_funcs && crtc_funcs->atomic_flush)

View File

@ -735,11 +735,8 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
fb->format->format,
fb->modifier);
if (ret) {
struct drm_format_name_buf format_name;
DRM_DEBUG_KMS("Invalid pixel format %s, modifier 0x%llx\n",
drm_get_format_name(fb->format->format,
&format_name),
DRM_DEBUG_KMS("Invalid pixel format %p4cc, modifier 0x%llx\n",
&fb->format->format,
fb->modifier);
goto out;
}

View File

@ -1154,6 +1154,7 @@ static void build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE;
drm_dp_encode_sideband_req(&req, msg);
msg->path_msg = true;
}
static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
@ -2303,11 +2304,9 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
if (port->pdt != DP_PEER_DEVICE_NONE &&
drm_dp_mst_is_end_device(port->pdt, port->mcs) &&
port->port_num >= DP_MST_LOGICAL_PORT_0) {
port->port_num >= DP_MST_LOGICAL_PORT_0)
port->cached_edid = drm_get_edid(port->connector,
&port->aux.ddc);
drm_connector_set_tile_property(port->connector);
}
drm_connector_register(port->connector);
return;
@ -2824,15 +2823,21 @@ static int set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr,
req_type = txmsg->msg[0] & 0x7f;
if (req_type == DP_CONNECTION_STATUS_NOTIFY ||
req_type == DP_RESOURCE_STATUS_NOTIFY)
req_type == DP_RESOURCE_STATUS_NOTIFY ||
req_type == DP_CLEAR_PAYLOAD_ID_TABLE)
hdr->broadcast = 1;
else
hdr->broadcast = 0;
hdr->path_msg = txmsg->path_msg;
hdr->lct = mstb->lct;
hdr->lcr = mstb->lct - 1;
if (mstb->lct > 1)
memcpy(hdr->rad, mstb->rad, mstb->lct / 2);
if (hdr->broadcast) {
hdr->lct = 1;
hdr->lcr = 6;
} else {
hdr->lct = mstb->lct;
hdr->lcr = mstb->lct - 1;
}
memcpy(hdr->rad, mstb->rad, hdr->lct / 2);
return 0;
}
@ -4234,9 +4239,8 @@ drm_dp_mst_detect_port(struct drm_connector *connector,
case DP_PEER_DEVICE_SST_SINK:
ret = connector_status_connected;
/* for logical ports - cache the EDID */
if (port->port_num >= 8 && !port->cached_edid) {
if (port->port_num >= DP_MST_LOGICAL_PORT_0 && !port->cached_edid)
port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
}
break;
case DP_PEER_DEVICE_DP_LEGACY_CONV:
if (port->ldps)
@ -5121,11 +5125,16 @@ drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
if (!found)
return 0;
/* This should never happen, as it means we tried to
* set a mode before querying the full_pbn
/*
* This could happen if the sink deasserted its HPD line, but
* the branch device still reports it as attached (PDT != NONE).
*/
if (WARN_ON(!port->full_pbn))
if (!port->full_pbn) {
drm_dbg_atomic(port->mgr->dev,
"[MSTB:%p] [MST PORT:%p] no BW available for the port\n",
port->parent, port);
return -EINVAL;
}
pbn_used = vcpi->pbn;
} else {

View File

@ -61,7 +61,7 @@ static struct idr drm_minors_idr;
* prefer to embed struct drm_device into their own device
* structure and call drm_dev_init() themselves.
*/
static bool drm_core_init_complete = false;
static bool drm_core_init_complete;
static struct dentry *drm_debugfs_root;

View File

@ -177,11 +177,8 @@ static int framebuffer_check(struct drm_device *dev,
/* check if the format is supported at all */
if (!__drm_format_info(r->pixel_format)) {
struct drm_format_name_buf format_name;
DRM_DEBUG_KMS("bad framebuffer format %s\n",
drm_get_format_name(r->pixel_format,
&format_name));
DRM_DEBUG_KMS("bad framebuffer format %p4cc\n",
&r->pixel_format);
return -EINVAL;
}
@ -1160,14 +1157,12 @@ EXPORT_SYMBOL(drm_framebuffer_plane_height);
void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_framebuffer *fb)
{
struct drm_format_name_buf format_name;
unsigned int i;
drm_printf_indent(p, indent, "allocated by = %s\n", fb->comm);
drm_printf_indent(p, indent, "refcount=%u\n",
drm_framebuffer_read_refcount(fb));
drm_printf_indent(p, indent, "format=%s\n",
drm_get_format_name(fb->format->format, &format_name));
drm_printf_indent(p, indent, "format=%p4cc\n", &fb->format->format);
drm_printf_indent(p, indent, "modifier=0x%llx\n", fb->modifier);
drm_printf_indent(p, indent, "size=%ux%u\n", fb->width, fb->height);
drm_printf_indent(p, indent, "layers:\n");

View File

@ -1212,6 +1212,7 @@ int drm_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
return 0;
}
EXPORT_SYMBOL(drm_gem_vmap);
void drm_gem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
{
@ -1224,6 +1225,7 @@ void drm_gem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
/* Always set the mapping to NULL. Callers may rely on this. */
dma_buf_map_clear(map);
}
EXPORT_SYMBOL(drm_gem_vunmap);
/**
* drm_gem_lock_reservations - Sets up the ww context and acquires

View File

@ -0,0 +1,432 @@
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/dma-resv.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_gem.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_simple_kms_helper.h>
#include "drm_internal.h"
/**
* DOC: overview
*
* The GEM atomic helpers library implements generic atomic-commit
* functions for drivers that use GEM objects. Currently, it provides
* synchronization helpers, and plane state and framebuffer BO mappings
* for planes with shadow buffers.
*
* Before scanout, a plane's framebuffer needs to be synchronized with
* possible writers that draw into the framebuffer. All drivers should
* call drm_gem_plane_helper_prepare_fb() from their implementation of
* struct &drm_plane_helper.prepare_fb . It sets the plane's fence from
* the framebuffer so that the DRM core can synchronize access automatically.
*
* drm_gem_plane_helper_prepare_fb() can also be used directly as
* implementation of prepare_fb. For drivers based on
* struct drm_simple_display_pipe, drm_gem_simple_display_pipe_prepare_fb()
* provides equivalent functionality.
*
* .. code-block:: c
*
* #include <drm/drm_gem_atomic_helper.h>
*
* struct drm_plane_helper_funcs driver_plane_helper_funcs = {
* ...,
* . prepare_fb = drm_gem_plane_helper_prepare_fb,
* };
*
* struct drm_simple_display_pipe_funcs driver_pipe_funcs = {
* ...,
* . prepare_fb = drm_gem_simple_display_pipe_prepare_fb,
* };
*
* A driver using a shadow buffer copies the content of the shadow buffers
* into the HW's framebuffer memory during an atomic update. This requires
* a mapping of the shadow buffer into kernel address space. The mappings
* cannot be established by commit-tail functions, such as atomic_update,
* as this would violate locking rules around dma_buf_vmap().
*
* The helpers for shadow-buffered planes establish and release mappings,
* and provide struct drm_shadow_plane_state, which stores the plane's mapping
* for commit-tail functons.
*
* Shadow-buffered planes can easily be enabled by using the provided macros
* %DRM_GEM_SHADOW_PLANE_FUNCS and %DRM_GEM_SHADOW_PLANE_HELPER_FUNCS.
* These macros set up the plane and plane-helper callbacks to point to the
* shadow-buffer helpers.
*
* .. code-block:: c
*
* #include <drm/drm_gem_atomic_helper.h>
*
* struct drm_plane_funcs driver_plane_funcs = {
* ...,
* DRM_GEM_SHADOW_PLANE_FUNCS,
* };
*
* struct drm_plane_helper_funcs driver_plane_helper_funcs = {
* ...,
* DRM_GEM_SHADOW_PLANE_HELPER_FUNCS,
* };
*
* In the driver's atomic-update function, shadow-buffer mappings are available
* from the plane state. Use to_drm_shadow_plane_state() to upcast from
* struct drm_plane_state.
*
* .. code-block:: c
*
* void driver_plane_atomic_update(struct drm_plane *plane,
* struct drm_plane_state *old_plane_state)
* {
* struct drm_plane_state *plane_state = plane->state;
* struct drm_shadow_plane_state *shadow_plane_state =
* to_drm_shadow_plane_state(plane_state);
*
* // access shadow buffer via shadow_plane_state->map
* }
*
* A mapping address for each of the framebuffer's buffer object is stored in
* struct &drm_shadow_plane_state.map. The mappings are valid while the state
* is being used.
*
* Drivers that use struct drm_simple_display_pipe can use
* %DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS to initialize the rsp
* callbacks. Access to shadow-buffer mappings is similar to regular
* atomic_update.
*
* .. code-block:: c
*
* struct drm_simple_display_pipe_funcs driver_pipe_funcs = {
* ...,
* DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
* };
*
* void driver_pipe_enable(struct drm_simple_display_pipe *pipe,
* struct drm_crtc_state *crtc_state,
* struct drm_plane_state *plane_state)
* {
* struct drm_shadow_plane_state *shadow_plane_state =
* to_drm_shadow_plane_state(plane_state);
*
* // access shadow buffer via shadow_plane_state->map
* }
*/
/*
* Plane Helpers
*/
/**
* drm_gem_plane_helper_prepare_fb() - Prepare a GEM backed framebuffer
* @plane: Plane
* @state: Plane state the fence will be attached to
*
* This function extracts the exclusive fence from &drm_gem_object.resv and
* attaches it to plane state for the atomic helper to wait on. This is
* necessary to correctly implement implicit synchronization for any buffers
* shared as a struct &dma_buf. This function can be used as the
* &drm_plane_helper_funcs.prepare_fb callback.
*
* There is no need for &drm_plane_helper_funcs.cleanup_fb hook for simple
* GEM based framebuffer drivers which have their buffers always pinned in
* memory.
*
* See drm_atomic_set_fence_for_plane() for a discussion of implicit and
* explicit fencing in atomic modeset updates.
*/
int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
{
struct drm_gem_object *obj;
struct dma_fence *fence;
if (!state->fb)
return 0;
obj = drm_gem_fb_get_obj(state->fb, 0);
fence = dma_resv_get_excl_rcu(obj->resv);
drm_atomic_set_fence_for_plane(state, fence);
return 0;
}
EXPORT_SYMBOL_GPL(drm_gem_plane_helper_prepare_fb);
/**
* drm_gem_simple_display_pipe_prepare_fb - prepare_fb helper for &drm_simple_display_pipe
* @pipe: Simple display pipe
* @plane_state: Plane state
*
* This function uses drm_gem_plane_helper_prepare_fb() to extract the exclusive fence
* from &drm_gem_object.resv and attaches it to plane state for the atomic
* helper to wait on. This is necessary to correctly implement implicit
* synchronization for any buffers shared as a struct &dma_buf. Drivers can use
* this as their &drm_simple_display_pipe_funcs.prepare_fb callback.
*
* See drm_atomic_set_fence_for_plane() for a discussion of implicit and
* explicit fencing in atomic modeset updates.
*/
int drm_gem_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state)
{
return drm_gem_plane_helper_prepare_fb(&pipe->plane, plane_state);
}
EXPORT_SYMBOL(drm_gem_simple_display_pipe_prepare_fb);
/*
* Shadow-buffered Planes
*/
/**
* drm_gem_duplicate_shadow_plane_state - duplicates shadow-buffered plane state
* @plane: the plane
*
* This function implements struct &drm_plane_funcs.atomic_duplicate_state for
* shadow-buffered planes. It assumes the existing state to be of type
* struct drm_shadow_plane_state and it allocates the new state to be of this
* type.
*
* The function does not duplicate existing mappings of the shadow buffers.
* Mappings are maintained during the atomic commit by the plane's prepare_fb
* and cleanup_fb helpers. See drm_gem_prepare_shadow_fb() and drm_gem_cleanup_shadow_fb()
* for corresponding helpers.
*
* Returns:
* A pointer to a new plane state on success, or NULL otherwise.
*/
struct drm_plane_state *
drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane)
{
struct drm_plane_state *plane_state = plane->state;
struct drm_shadow_plane_state *new_shadow_plane_state;
if (!plane_state)
return NULL;
new_shadow_plane_state = kzalloc(sizeof(*new_shadow_plane_state), GFP_KERNEL);
if (!new_shadow_plane_state)
return NULL;
__drm_atomic_helper_plane_duplicate_state(plane, &new_shadow_plane_state->base);
return &new_shadow_plane_state->base;
}
EXPORT_SYMBOL(drm_gem_duplicate_shadow_plane_state);
/**
* drm_gem_destroy_shadow_plane_state - deletes shadow-buffered plane state
* @plane: the plane
* @plane_state: the plane state of type struct drm_shadow_plane_state
*
* This function implements struct &drm_plane_funcs.atomic_destroy_state
* for shadow-buffered planes. It expects that mappings of shadow buffers
* have been released already.
*/
void drm_gem_destroy_shadow_plane_state(struct drm_plane *plane,
struct drm_plane_state *plane_state)
{
struct drm_shadow_plane_state *shadow_plane_state =
to_drm_shadow_plane_state(plane_state);
__drm_atomic_helper_plane_destroy_state(&shadow_plane_state->base);
kfree(shadow_plane_state);
}
EXPORT_SYMBOL(drm_gem_destroy_shadow_plane_state);
/**
* drm_gem_reset_shadow_plane - resets a shadow-buffered plane
* @plane: the plane
*
* This function implements struct &drm_plane_funcs.reset_plane for
* shadow-buffered planes. It assumes the current plane state to be
* of type struct drm_shadow_plane and it allocates the new state of
* this type.
*/
void drm_gem_reset_shadow_plane(struct drm_plane *plane)
{
struct drm_shadow_plane_state *shadow_plane_state;
if (plane->state) {
drm_gem_destroy_shadow_plane_state(plane, plane->state);
plane->state = NULL; /* must be set to NULL here */
}
shadow_plane_state = kzalloc(sizeof(*shadow_plane_state), GFP_KERNEL);
if (!shadow_plane_state)
return;
__drm_atomic_helper_plane_reset(plane, &shadow_plane_state->base);
}
EXPORT_SYMBOL(drm_gem_reset_shadow_plane);
/**
* drm_gem_prepare_shadow_fb - prepares shadow framebuffers
* @plane: the plane
* @plane_state: the plane state of type struct drm_shadow_plane_state
*
* This function implements struct &drm_plane_helper_funcs.prepare_fb. It
* maps all buffer objects of the plane's framebuffer into kernel address
* space and stores them in &struct drm_shadow_plane_state.map. The
* framebuffer will be synchronized as part of the atomic commit.
*
* See drm_gem_cleanup_shadow_fb() for cleanup.
*
* Returns:
* 0 on success, or a negative errno code otherwise.
*/
int drm_gem_prepare_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state)
{
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
struct drm_gem_object *obj;
struct dma_buf_map map;
int ret;
size_t i;
if (!fb)
return 0;
ret = drm_gem_plane_helper_prepare_fb(plane, plane_state);
if (ret)
return ret;
for (i = 0; i < ARRAY_SIZE(shadow_plane_state->map); ++i) {
obj = drm_gem_fb_get_obj(fb, i);
if (!obj)
continue;
ret = drm_gem_vmap(obj, &map);
if (ret)
goto err_drm_gem_vunmap;
shadow_plane_state->map[i] = map;
}
return 0;
err_drm_gem_vunmap:
while (i) {
--i;
obj = drm_gem_fb_get_obj(fb, i);
if (!obj)
continue;
drm_gem_vunmap(obj, &shadow_plane_state->map[i]);
}
return ret;
}
EXPORT_SYMBOL(drm_gem_prepare_shadow_fb);
/**
* drm_gem_cleanup_shadow_fb - releases shadow framebuffers
* @plane: the plane
* @plane_state: the plane state of type struct drm_shadow_plane_state
*
* This function implements struct &drm_plane_helper_funcs.cleanup_fb.
* This function unmaps all buffer objects of the plane's framebuffer.
*
* See drm_gem_prepare_shadow_fb() for more inforamtion.
*/
void drm_gem_cleanup_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state)
{
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
struct drm_framebuffer *fb = plane_state->fb;
size_t i = ARRAY_SIZE(shadow_plane_state->map);
struct drm_gem_object *obj;
if (!fb)
return;
while (i) {
--i;
obj = drm_gem_fb_get_obj(fb, i);
if (!obj)
continue;
drm_gem_vunmap(obj, &shadow_plane_state->map[i]);
}
}
EXPORT_SYMBOL(drm_gem_cleanup_shadow_fb);
/**
* drm_gem_simple_kms_prepare_shadow_fb - prepares shadow framebuffers
* @pipe: the simple display pipe
* @plane_state: the plane state of type struct drm_shadow_plane_state
*
* This function implements struct drm_simple_display_funcs.prepare_fb. It
* maps all buffer objects of the plane's framebuffer into kernel address
* space and stores them in struct drm_shadow_plane_state.map. The
* framebuffer will be synchronized as part of the atomic commit.
*
* See drm_gem_simple_kms_cleanup_shadow_fb() for cleanup.
*
* Returns:
* 0 on success, or a negative errno code otherwise.
*/
int drm_gem_simple_kms_prepare_shadow_fb(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state)
{
return drm_gem_prepare_shadow_fb(&pipe->plane, plane_state);
}
EXPORT_SYMBOL(drm_gem_simple_kms_prepare_shadow_fb);
/**
* drm_gem_simple_kms_cleanup_shadow_fb - releases shadow framebuffers
* @pipe: the simple display pipe
* @plane_state: the plane state of type struct drm_shadow_plane_state
*
* This function implements struct drm_simple_display_funcs.cleanup_fb.
* This function unmaps all buffer objects of the plane's framebuffer.
*
* See drm_gem_simple_kms_prepare_shadow_fb().
*/
void drm_gem_simple_kms_cleanup_shadow_fb(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state)
{
drm_gem_cleanup_shadow_fb(&pipe->plane, plane_state);
}
EXPORT_SYMBOL(drm_gem_simple_kms_cleanup_shadow_fb);
/**
* drm_gem_simple_kms_reset_shadow_plane - resets a shadow-buffered plane
* @pipe: the simple display pipe
*
* This function implements struct drm_simple_display_funcs.reset_plane
* for shadow-buffered planes.
*/
void drm_gem_simple_kms_reset_shadow_plane(struct drm_simple_display_pipe *pipe)
{
drm_gem_reset_shadow_plane(&pipe->plane);
}
EXPORT_SYMBOL(drm_gem_simple_kms_reset_shadow_plane);
/**
* drm_gem_simple_kms_duplicate_shadow_plane_state - duplicates shadow-buffered plane state
* @pipe: the simple display pipe
*
* This function implements struct drm_simple_display_funcs.duplicate_plane_state
* for shadow-buffered planes. It does not duplicate existing mappings of the shadow
* buffers. Mappings are maintained during the atomic commit by the plane's prepare_fb
* and cleanup_fb helpers.
*
* Returns:
* A pointer to a new plane state on success, or NULL otherwise.
*/
struct drm_plane_state *
drm_gem_simple_kms_duplicate_shadow_plane_state(struct drm_simple_display_pipe *pipe)
{
return drm_gem_duplicate_shadow_plane_state(&pipe->plane);
}
EXPORT_SYMBOL(drm_gem_simple_kms_duplicate_shadow_plane_state);
/**
* drm_gem_simple_kms_destroy_shadow_plane_state - resets shadow-buffered plane state
* @pipe: the simple display pipe
* @plane_state: the plane state of type struct drm_shadow_plane_state
*
* This function implements struct drm_simple_display_funcs.destroy_plane_state
* for shadow-buffered planes. It expects that mappings of shadow buffers
* have been released already.
*/
void drm_gem_simple_kms_destroy_shadow_plane_state(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state)
{
drm_gem_destroy_shadow_plane_state(&pipe->plane, plane_state);
}
EXPORT_SYMBOL(drm_gem_simple_kms_destroy_shadow_plane_state);

View File

@ -5,13 +5,8 @@
* Copyright (C) 2017 Noralf Trønnes
*/
#include <linux/dma-buf.h>
#include <linux/dma-fence.h>
#include <linux/dma-resv.h>
#include <linux/slab.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_damage_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
@ -19,7 +14,6 @@
#include <drm/drm_gem.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_simple_kms_helper.h>
#define AFBC_HEADER_SIZE 16
#define AFBC_TH_LAYOUT_ALIGNMENT 8
@ -432,60 +426,3 @@ int drm_gem_fb_afbc_init(struct drm_device *dev,
return 0;
}
EXPORT_SYMBOL_GPL(drm_gem_fb_afbc_init);
/**
* drm_gem_fb_prepare_fb() - Prepare a GEM backed framebuffer
* @plane: Plane
* @state: Plane state the fence will be attached to
*
* This function extracts the exclusive fence from &drm_gem_object.resv and
* attaches it to plane state for the atomic helper to wait on. This is
* necessary to correctly implement implicit synchronization for any buffers
* shared as a struct &dma_buf. This function can be used as the
* &drm_plane_helper_funcs.prepare_fb callback.
*
* There is no need for &drm_plane_helper_funcs.cleanup_fb hook for simple
* gem based framebuffer drivers which have their buffers always pinned in
* memory.
*
* See drm_atomic_set_fence_for_plane() for a discussion of implicit and
* explicit fencing in atomic modeset updates.
*/
int drm_gem_fb_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct drm_gem_object *obj;
struct dma_fence *fence;
if (!state->fb)
return 0;
obj = drm_gem_fb_get_obj(state->fb, 0);
fence = dma_resv_get_excl_rcu(obj->resv);
drm_atomic_set_fence_for_plane(state, fence);
return 0;
}
EXPORT_SYMBOL_GPL(drm_gem_fb_prepare_fb);
/**
* drm_gem_fb_simple_display_pipe_prepare_fb - prepare_fb helper for
* &drm_simple_display_pipe
* @pipe: Simple display pipe
* @plane_state: Plane state
*
* This function uses drm_gem_fb_prepare_fb() to extract the exclusive fence
* from &drm_gem_object.resv and attaches it to plane state for the atomic
* helper to wait on. This is necessary to correctly implement implicit
* synchronization for any buffers shared as a struct &dma_buf. Drivers can use
* this as their &drm_simple_display_pipe_funcs.prepare_fb callback.
*
* See drm_atomic_set_fence_for_plane() for a discussion of implicit and
* explicit fencing in atomic modeset updates.
*/
int drm_gem_fb_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state)
{
return drm_gem_fb_prepare_fb(&pipe->plane, plane_state);
}
EXPORT_SYMBOL(drm_gem_fb_simple_display_pipe_prepare_fb);

View File

@ -8,7 +8,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_atomic_helper.h>
#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_managed.h>
@ -187,9 +187,8 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
struct drm_gem_vram_object *gbo;
struct drm_gem_object *gem;
struct drm_vram_mm *vmm = dev->vram_mm;
struct ttm_bo_device *bdev;
struct ttm_device *bdev;
int ret;
size_t acc_size;
if (WARN_ONCE(!vmm, "VRAM MM not initialized"))
return ERR_PTR(-EINVAL);
@ -216,7 +215,6 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
}
bdev = &vmm->bdev;
acc_size = ttm_bo_dma_acc_size(bdev, size, sizeof(*gbo));
gbo->bo.bdev = bdev;
drm_gem_vram_placement(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM);
@ -226,8 +224,8 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
* to release gbo->bo.base and kfree gbo.
*/
ret = ttm_bo_init(bdev, &gbo->bo, size, ttm_bo_type_device,
&gbo->placement, pg_align, false, acc_size,
NULL, NULL, ttm_buffer_object_destroy);
&gbo->placement, pg_align, false, NULL, NULL,
ttm_buffer_object_destroy);
if (ret)
return ERR_PTR(ret);
@ -558,7 +556,7 @@ err_drm_gem_object_put:
EXPORT_SYMBOL(drm_gem_vram_fill_create_dumb);
/*
* Helpers for struct ttm_bo_driver
* Helpers for struct ttm_device_funcs
*/
static bool drm_is_gem_vram(struct ttm_buffer_object *bo)
@ -573,9 +571,7 @@ static void drm_gem_vram_bo_driver_evict_flags(struct drm_gem_vram_object *gbo,
*pl = gbo->placement;
}
static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo,
bool evict,
struct ttm_resource *new_mem)
static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo)
{
struct ttm_buffer_object *bo = &gbo->bo;
struct drm_device *dev = bo->base.dev;
@ -592,16 +588,8 @@ static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo,
struct ttm_operation_ctx *ctx,
struct ttm_resource *new_mem)
{
int ret;
drm_gem_vram_bo_driver_move_notify(gbo, evict, new_mem);
ret = ttm_bo_move_memcpy(&gbo->bo, ctx, new_mem);
if (ret) {
swap(*new_mem, gbo->bo.mem);
drm_gem_vram_bo_driver_move_notify(gbo, false, new_mem);
swap(*new_mem, gbo->bo.mem);
}
return ret;
drm_gem_vram_bo_driver_move_notify(gbo);
return ttm_bo_move_memcpy(&gbo->bo, ctx, new_mem);
}
/*
@ -720,7 +708,7 @@ drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane,
goto err_drm_gem_vram_unpin;
}
ret = drm_gem_fb_prepare_fb(plane, new_state);
ret = drm_gem_plane_helper_prepare_fb(plane, new_state);
if (ret)
goto err_drm_gem_vram_unpin;
@ -901,7 +889,7 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
* TTM TT
*/
static void bo_driver_ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *tt)
static void bo_driver_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *tt)
{
ttm_tt_destroy_common(bdev, tt);
ttm_tt_fini(tt);
@ -957,7 +945,7 @@ static void bo_driver_delete_mem_notify(struct ttm_buffer_object *bo)
gbo = drm_gem_vram_of_bo(bo);
drm_gem_vram_bo_driver_move_notify(gbo, false, NULL);
drm_gem_vram_bo_driver_move_notify(gbo);
}
static int bo_driver_move(struct ttm_buffer_object *bo,
@ -973,7 +961,7 @@ static int bo_driver_move(struct ttm_buffer_object *bo,
return drm_gem_vram_bo_driver_move(gbo, evict, ctx, new_mem);
}
static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
static int bo_driver_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem)
{
struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev);
@ -993,7 +981,7 @@ static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
return 0;
}
static struct ttm_bo_driver bo_driver = {
static struct ttm_device_funcs bo_driver = {
.ttm_tt_create = bo_driver_ttm_tt_create,
.ttm_tt_destroy = bo_driver_ttm_tt_destroy,
.eviction_valuable = ttm_bo_eviction_valuable,
@ -1044,7 +1032,7 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
vmm->vram_base = vram_base;
vmm->vram_size = vram_size;
ret = ttm_bo_device_init(&vmm->bdev, &bo_driver, dev->dev,
ret = ttm_device_init(&vmm->bdev, &bo_driver, dev->dev,
dev->anon_inode->i_mapping,
dev->vma_offset_manager,
false, true);
@ -1062,7 +1050,7 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
static void drm_vram_mm_cleanup(struct drm_vram_mm *vmm)
{
ttm_range_man_fini(&vmm->bdev, TTM_PL_VRAM);
ttm_bo_device_release(&vmm->bdev);
ttm_device_fini(&vmm->bdev);
}
/*

View File

@ -302,12 +302,8 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_stats32_t __user *argp = (void __user *)arg;
int err;
err = drm_ioctl_kernel(file, drm_noop, NULL, 0);
if (err)
return err;
/* getstats is defunct, just clear */
if (clear_user(argp, sizeof(drm_stats32_t)))
return -EFAULT;
return 0;
@ -820,13 +816,8 @@ typedef struct drm_update_draw32 {
static int compat_drm_update_draw(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_update_draw32_t update32;
if (copy_from_user(&update32, (void __user *)arg, sizeof(update32)))
return -EFAULT;
return drm_ioctl_kernel(file, drm_noop, NULL,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
/* update_draw is defunct */
return 0;
}
#endif

View File

@ -203,7 +203,6 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0);
struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(gem);
struct dma_buf_attachment *import_attach = gem->import_attach;
struct drm_format_name_buf format_name;
void *src = cma_obj->vaddr;
int ret = 0;
@ -225,8 +224,8 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
drm_fb_xrgb8888_to_rgb565(dst, src, fb, clip, swap);
break;
default:
drm_err_once(fb->dev, "Format is not supported: %s\n",
drm_get_format_name(fb->format->format, &format_name));
drm_err_once(fb->dev, "Format is not supported: %p4cc\n",
&fb->format->format);
return -EINVAL;
}

View File

@ -84,6 +84,13 @@ static const struct drm_dmi_panel_orientation_data itworks_tw891 = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
static const struct drm_dmi_panel_orientation_data onegx1_pro = {
.width = 1200,
.height = 1920,
.bios_dates = (const char * const []){ "12/17/2020", NULL },
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
static const struct drm_dmi_panel_orientation_data lcd720x1280_rightside_up = {
.width = 720,
.height = 1280,
@ -211,6 +218,13 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* OneGX1 Pro */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SYSTEM_MANUFACTURER"),
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SYSTEM_PRODUCT_NAME"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Default string"),
},
.driver_data = (void *)&onegx1_pro,
}, { /* VIOS LTH17 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),

View File

@ -50,10 +50,8 @@
* &struct drm_plane (possibly as part of a larger structure) and registers it
* with a call to drm_universal_plane_init().
*
* The type of a plane is exposed in the immutable "type" enumeration property,
* which has one of the following values: "Overlay", "Primary", "Cursor" (see
* enum drm_plane_type). A plane can be compatible with multiple CRTCs, see
* &drm_plane.possible_crtcs.
* Each plane has a type, see enum drm_plane_type. A plane can be compatible
* with multiple CRTCs, see &drm_plane.possible_crtcs.
*
* Each CRTC must have a unique primary plane userspace can attach to enable
* the CRTC. In other words, userspace must be able to attach a different
@ -73,6 +71,58 @@
*
* DRM planes have a few standardized properties:
*
* type:
* Immutable property describing the type of the plane.
*
* For user-space which has enabled the &DRM_CLIENT_CAP_ATOMIC capability,
* the plane type is just a hint and is mostly superseded by atomic
* test-only commits. The type hint can still be used to come up more
* easily with a plane configuration accepted by the driver.
*
* The value of this property can be one of the following:
*
* "Primary":
* To light up a CRTC, attaching a primary plane is the most likely to
* work if it covers the whole CRTC and doesn't have scaling or
* cropping set up.
*
* Drivers may support more features for the primary plane, user-space
* can find out with test-only atomic commits.
*
* Some primary planes are implicitly used by the kernel in the legacy
* IOCTLs &DRM_IOCTL_MODE_SETCRTC and &DRM_IOCTL_MODE_PAGE_FLIP.
* Therefore user-space must not mix explicit usage of any primary
* plane (e.g. through an atomic commit) with these legacy IOCTLs.
*
* "Cursor":
* To enable this plane, using a framebuffer configured without scaling
* or cropping and with the following properties is the most likely to
* work:
*
* - If the driver provides the capabilities &DRM_CAP_CURSOR_WIDTH and
* &DRM_CAP_CURSOR_HEIGHT, create the framebuffer with this size.
* Otherwise, create a framebuffer with the size 64x64.
* - If the driver doesn't support modifiers, create a framebuffer with
* a linear layout. Otherwise, use the IN_FORMATS plane property.
*
* Drivers may support more features for the cursor plane, user-space
* can find out with test-only atomic commits.
*
* Some cursor planes are implicitly used by the kernel in the legacy
* IOCTLs &DRM_IOCTL_MODE_CURSOR and &DRM_IOCTL_MODE_CURSOR2.
* Therefore user-space must not mix explicit usage of any cursor
* plane (e.g. through an atomic commit) with these legacy IOCTLs.
*
* Some drivers may support cursors even if no cursor plane is exposed.
* In this case, the legacy cursor IOCTLs can be used to configure the
* cursor.
*
* "Overlay":
* Neither primary nor cursor.
*
* Overlay planes are the only planes exposed when the
* &DRM_CLIENT_CAP_UNIVERSAL_PLANES capability is disabled.
*
* IN_FORMATS:
* Blob property which contains the set of buffer format and modifier
* pairs supported by this plane. The blob is a struct
@ -719,12 +769,8 @@ static int __setplane_check(struct drm_plane *plane,
ret = drm_plane_check_pixel_format(plane, fb->format->format,
fb->modifier);
if (ret) {
struct drm_format_name_buf format_name;
DRM_DEBUG_KMS("Invalid pixel format %s, modifier 0x%llx\n",
drm_get_format_name(fb->format->format,
&format_name),
fb->modifier);
DRM_DEBUG_KMS("Invalid pixel format %p4cc, modifier 0x%llx\n",
&fb->format->format, fb->modifier);
return ret;
}

View File

@ -177,14 +177,16 @@ static const struct drm_crtc_funcs drm_simple_kms_crtc_funcs = {
};
static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *plane_state)
struct drm_atomic_state *state)
{
struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_simple_display_pipe *pipe;
struct drm_crtc_state *crtc_state;
int ret;
pipe = container_of(plane, struct drm_simple_display_pipe, plane);
crtc_state = drm_atomic_get_new_crtc_state(plane_state->state,
crtc_state = drm_atomic_get_new_crtc_state(state,
&pipe->crtc);
ret = drm_atomic_helper_check_plane_state(plane_state, crtc_state,
@ -204,8 +206,10 @@ static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane,
}
static void drm_simple_kms_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_pstate)
struct drm_atomic_state *state)
{
struct drm_plane_state *old_pstate = drm_atomic_get_old_plane_state(state,
plane);
struct drm_simple_display_pipe *pipe;
pipe = container_of(plane, struct drm_simple_display_pipe, plane);
@ -253,13 +257,47 @@ static const struct drm_plane_helper_funcs drm_simple_kms_plane_helper_funcs = {
.atomic_update = drm_simple_kms_plane_atomic_update,
};
static void drm_simple_kms_plane_reset(struct drm_plane *plane)
{
struct drm_simple_display_pipe *pipe;
pipe = container_of(plane, struct drm_simple_display_pipe, plane);
if (!pipe->funcs || !pipe->funcs->reset_plane)
return drm_atomic_helper_plane_reset(plane);
return pipe->funcs->reset_plane(pipe);
}
static struct drm_plane_state *drm_simple_kms_plane_duplicate_state(struct drm_plane *plane)
{
struct drm_simple_display_pipe *pipe;
pipe = container_of(plane, struct drm_simple_display_pipe, plane);
if (!pipe->funcs || !pipe->funcs->duplicate_plane_state)
return drm_atomic_helper_plane_duplicate_state(plane);
return pipe->funcs->duplicate_plane_state(pipe);
}
static void drm_simple_kms_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct drm_simple_display_pipe *pipe;
pipe = container_of(plane, struct drm_simple_display_pipe, plane);
if (!pipe->funcs || !pipe->funcs->destroy_plane_state)
drm_atomic_helper_plane_destroy_state(plane, state);
else
pipe->funcs->destroy_plane_state(pipe, state);
}
static const struct drm_plane_funcs drm_simple_kms_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
.reset = drm_simple_kms_plane_reset,
.atomic_duplicate_state = drm_simple_kms_plane_duplicate_state,
.atomic_destroy_state = drm_simple_kms_plane_destroy_state,
.format_mod_supported = drm_simple_kms_format_mod_supported,
};

View File

@ -387,6 +387,15 @@ int drm_syncobj_find_fence(struct drm_file *file_private,
if (!syncobj)
return -ENOENT;
/* Waiting for userspace with locks help is illegal cause that can
* trivial deadlock with page faults for example. Make lockdep complain
* about it early on.
*/
if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
might_sleep();
lockdep_assert_none_held_once();
}
*fence = drm_syncobj_fence_get(syncobj);
if (*fence) {
@ -942,6 +951,9 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
uint64_t *points;
uint32_t signaled_count, i;
if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)
lockdep_assert_none_held_once();
points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
if (points == NULL)
return -ENOMEM;

View File

@ -1470,20 +1470,7 @@ void drm_crtc_vblank_on(struct drm_crtc *crtc)
}
EXPORT_SYMBOL(drm_crtc_vblank_on);
/**
* drm_vblank_restore - estimate missed vblanks and update vblank count.
* @dev: DRM device
* @pipe: CRTC index
*
* Power manamement features can cause frame counter resets between vblank
* disable and enable. Drivers can use this function in their
* &drm_crtc_funcs.enable_vblank implementation to estimate missed vblanks since
* the last &drm_crtc_funcs.disable_vblank using timestamps and update the
* vblank counter.
*
* This function is the legacy version of drm_crtc_vblank_restore().
*/
void drm_vblank_restore(struct drm_device *dev, unsigned int pipe)
static void drm_vblank_restore(struct drm_device *dev, unsigned int pipe)
{
ktime_t t_vblank;
struct drm_vblank_crtc *vblank;
@ -1519,7 +1506,6 @@ void drm_vblank_restore(struct drm_device *dev, unsigned int pipe)
diff, diff_ns, framedur_ns, cur_vblank - vblank->last);
store_vblank(dev, pipe, diff, t_vblank, cur_vblank);
}
EXPORT_SYMBOL(drm_vblank_restore);
/**
* drm_crtc_vblank_restore - estimate missed vblanks and update vblank count.
@ -1530,9 +1516,18 @@ EXPORT_SYMBOL(drm_vblank_restore);
* &drm_crtc_funcs.enable_vblank implementation to estimate missed vblanks since
* the last &drm_crtc_funcs.disable_vblank using timestamps and update the
* vblank counter.
*
* Note that drivers must have race-free high-precision timestamping support,
* i.e. &drm_crtc_funcs.get_vblank_timestamp must be hooked up and
* &drm_driver.vblank_disable_immediate must be set to indicate the
* time-stamping functions are race-free against vblank hardware counter
* increments.
*/
void drm_crtc_vblank_restore(struct drm_crtc *crtc)
{
WARN_ON_ONCE(!crtc->funcs->get_vblank_timestamp);
WARN_ON_ONCE(!crtc->dev->vblank_disable_immediate);
drm_vblank_restore(crtc->dev, drm_crtc_index(crtc));
}
EXPORT_SYMBOL(drm_crtc_vblank_restore);

View File

@ -82,7 +82,8 @@ static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
return fence;
}
static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
*sched_job)
{
struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
struct etnaviv_gpu *gpu = submit->gpu;
@ -120,9 +121,13 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
drm_sched_resubmit_jobs(&gpu->sched);
drm_sched_start(&gpu->sched, true);
return DRM_GPU_SCHED_STAT_NOMINAL;
out_no_timeout:
/* restart scheduler after GPU is usable again */
drm_sched_start(&gpu->sched, true);
return DRM_GPU_SCHED_STAT_NOMINAL;
}
static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
@ -185,7 +190,7 @@ int etnaviv_sched_init(struct etnaviv_gpu *gpu)
ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
msecs_to_jiffies(500), dev_name(gpu->dev));
msecs_to_jiffies(500), NULL, dev_name(gpu->dev));
if (ret)
return ret;

View File

@ -228,14 +228,16 @@ exynos_drm_plane_check_size(const struct exynos_drm_plane_config *config,
}
static int exynos_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
struct drm_atomic_state *state)
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
struct exynos_drm_plane_state *exynos_state =
to_exynos_plane_state(state);
to_exynos_plane_state(new_plane_state);
int ret = 0;
if (!state->crtc || !state->fb)
if (!new_plane_state->crtc || !new_plane_state->fb)
return 0;
/* translate state into exynos_state */
@ -250,13 +252,14 @@ static int exynos_plane_atomic_check(struct drm_plane *plane,
}
static void exynos_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
struct drm_atomic_state *state)
{
struct drm_plane_state *state = plane->state;
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(state->crtc);
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(new_state->crtc);
struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
if (!state->crtc)
if (!new_state->crtc)
return;
if (exynos_crtc->ops->update_plane)
@ -264,8 +267,9 @@ static void exynos_plane_atomic_update(struct drm_plane *plane,
}
static void exynos_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
struct drm_atomic_state *state)
{
struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane);
struct exynos_drm_plane *exynos_plane = to_exynos_plane(plane);
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(old_state->crtc);

View File

@ -7,6 +7,7 @@
#include <linux/regmap.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_cma_helper.h>
@ -33,11 +34,13 @@ static int fsl_dcu_drm_plane_index(struct drm_plane *plane)
}
static int fsl_dcu_drm_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
struct drm_atomic_state *state)
{
struct drm_framebuffer *fb = state->fb;
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_framebuffer *fb = new_plane_state->fb;
if (!state->fb || !state->crtc)
if (!new_plane_state->fb || !new_plane_state->crtc)
return 0;
switch (fb->format->format) {
@ -57,7 +60,7 @@ static int fsl_dcu_drm_plane_atomic_check(struct drm_plane *plane,
}
static void fsl_dcu_drm_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
struct drm_atomic_state *state)
{
struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private;
unsigned int value;
@ -73,11 +76,12 @@ static void fsl_dcu_drm_plane_atomic_disable(struct drm_plane *plane,
}
static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
struct drm_atomic_state *state)
{
struct fsl_dcu_drm_device *fsl_dev = plane->dev->dev_private;
struct drm_plane_state *state = plane->state;
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_framebuffer *fb = plane->state->fb;
struct drm_gem_cma_object *gem;
unsigned int alpha = DCU_LAYER_AB_NONE, bpp;
@ -125,11 +129,11 @@ static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane,
}
regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 1),
DCU_LAYER_HEIGHT(state->crtc_h) |
DCU_LAYER_WIDTH(state->crtc_w));
DCU_LAYER_HEIGHT(new_state->crtc_h) |
DCU_LAYER_WIDTH(new_state->crtc_w));
regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 2),
DCU_LAYER_POSY(state->crtc_y) |
DCU_LAYER_POSX(state->crtc_x));
DCU_LAYER_POSY(new_state->crtc_y) |
DCU_LAYER_POSX(new_state->crtc_x));
regmap_write(fsl_dev->regmap,
DCU_CTRLDESCLN(index, 3), gem->paddr);
regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4),

View File

@ -9,12 +9,5 @@ config DRM_GMA500
select INPUT if ACPI
help
Say yes for an experimental 2D KMS framebuffer driver for the
Intel GMA500 ('Poulsbo') and other Intel IMG based graphics
devices.
config DRM_GMA600
bool "Intel GMA600 support (Experimental)"
depends on DRM_GMA500
help
Say yes to include support for GMA600 (Intel Moorestown/Oaktrail)
platforms with LVDS ports. MIPI is not currently supported.
Intel GMA500 (Poulsbo), Intel GMA600 (Moorestown/Oak Trail) and
Intel GMA3600/3650 (Cedar Trail).

View File

@ -4,9 +4,7 @@
#
gma500_gfx-y += \
accel_2d.o \
backlight.o \
blitter.o \
cdv_device.o \
cdv_intel_crt.o \
cdv_intel_display.o \
@ -23,6 +21,12 @@ gma500_gfx-y += \
intel_i2c.o \
mid_bios.o \
mmu.o \
oaktrail_device.o \
oaktrail_crtc.o \
oaktrail_hdmi.o \
oaktrail_hdmi_i2c.o \
oaktrail_lvds.o \
oaktrail_lvds_i2c.o \
power.o \
psb_device.o \
psb_drv.o \
@ -33,13 +37,6 @@ gma500_gfx-y += \
psb_lid.o \
psb_irq.o
gma500_gfx-$(CONFIG_ACPI) += opregion.o \
gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \
oaktrail_crtc.o \
oaktrail_lvds.o \
oaktrail_lvds_i2c.o \
oaktrail_hdmi.o \
oaktrail_hdmi_i2c.o
gma500_gfx-$(CONFIG_ACPI) += opregion.o
obj-$(CONFIG_DRM_GMA500) += gma500_gfx.o

View File

@ -1,60 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/**************************************************************************
* Copyright (c) 2007-2011, Intel Corporation.
* All Rights Reserved.
*
* Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
* develop this driver.
*
**************************************************************************/
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/tty.h>
#include <drm/drm.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include "psb_drv.h"
#include "psb_reg.h"
/**
* psb_spank - reset the 2D engine
* @dev_priv: our PSB DRM device
*
* Soft reset the graphics engine and then reload the necessary registers.
* We use this at initialisation time but it will become relevant for
* accelerated X later
*/
void psb_spank(struct drm_psb_private *dev_priv)
{
PSB_WSGX32(_PSB_CS_RESET_BIF_RESET | _PSB_CS_RESET_DPM_RESET |
_PSB_CS_RESET_TA_RESET | _PSB_CS_RESET_USE_RESET |
_PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET |
_PSB_CS_RESET_TWOD_RESET, PSB_CR_SOFT_RESET);
PSB_RSGX32(PSB_CR_SOFT_RESET);
msleep(1);
PSB_WSGX32(0, PSB_CR_SOFT_RESET);
wmb();
PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT,
PSB_CR_BIF_CTRL);
wmb();
(void) PSB_RSGX32(PSB_CR_BIF_CTRL);
msleep(1);
PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT,
PSB_CR_BIF_CTRL);
(void) PSB_RSGX32(PSB_CR_BIF_CTRL);
PSB_WSGX32(dev_priv->gtt.gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
}

View File

@ -1,43 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014, Patrik Jakobsson
* All Rights Reserved.
*
* Authors: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
*/
#include "psb_drv.h"
#include "blitter.h"
#include "psb_reg.h"
/* Wait for the blitter to be completely idle */
int gma_blt_wait_idle(struct drm_psb_private *dev_priv)
{
unsigned long stop = jiffies + HZ;
int busy = 1;
/* NOP for Cedarview */
if (IS_CDV(dev_priv->dev))
return 0;
/* First do a quick check */
if ((PSB_RSGX32(PSB_CR_2D_SOCIF) == _PSB_C2_SOCIF_EMPTY) &&
((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) & _PSB_C2B_STATUS_BUSY) == 0))
return 0;
do {
busy = (PSB_RSGX32(PSB_CR_2D_SOCIF) != _PSB_C2_SOCIF_EMPTY);
} while (busy && !time_after_eq(jiffies, stop));
if (busy)
return -EBUSY;
do {
busy = ((PSB_RSGX32(PSB_CR_2D_BLIT_STATUS) &
_PSB_C2B_STATUS_BUSY) != 0);
} while (busy && !time_after_eq(jiffies, stop));
/* If still busy, we probably have a hang */
return (busy) ? -EBUSY : 0;
}

View File

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2014, Patrik Jakobsson
* All Rights Reserved.
*
* Authors: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
*/
#ifndef __BLITTER_H
#define __BLITTER_H
struct drm_psb_private;
extern int gma_blt_wait_idle(struct drm_psb_private *dev_priv);
#endif

View File

@ -603,7 +603,7 @@ const struct psb_ops cdv_chip_ops = {
.errata = cdv_errata,
.crtc_helper = &cdv_intel_helper_funcs,
.crtc_funcs = &cdv_intel_crtc_funcs,
.crtc_funcs = &gma_intel_crtc_funcs,
.clock_funcs = &cdv_clock_funcs,
.output_init = cdv_output_init,

View File

@ -8,7 +8,6 @@ struct drm_device;
struct psb_intel_mode_device;
extern const struct drm_crtc_helper_funcs cdv_intel_helper_funcs;
extern const struct drm_crtc_funcs cdv_intel_crtc_funcs;
extern const struct gma_clock_funcs cdv_clock_funcs;
extern void cdv_intel_crt_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev);

View File

@ -248,8 +248,6 @@ void cdv_intel_crt_init(struct drm_device *dev,
struct drm_connector *connector;
struct drm_encoder *encoder;
u32 i2c_reg;
gma_encoder = kzalloc(sizeof(struct gma_encoder), GFP_KERNEL);
if (!gma_encoder)
return;
@ -269,24 +267,13 @@ void cdv_intel_crt_init(struct drm_device *dev,
gma_connector_attach_encoder(gma_connector, gma_encoder);
/* Set up the DDC bus. */
i2c_reg = GPIOA;
/* Remove the following code for CDV */
/*
if (dev_priv->crt_ddc_bus != 0)
i2c_reg = dev_priv->crt_ddc_bus;
}*/
gma_encoder->ddc_bus = psb_intel_i2c_create(dev,
i2c_reg, "CRTDDC_A");
gma_encoder->ddc_bus = psb_intel_i2c_create(dev, GPIOA, "CRTDDC_A");
if (!gma_encoder->ddc_bus) {
dev_printk(KERN_ERR, dev->dev, "DDC bus registration failed.\n");
goto failed_ddc;
}
gma_encoder->type = INTEL_OUTPUT_ANALOG;
/*
psb_intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT);
psb_intel_output->crtc_mask = (1 << 0) | (1 << 1);
*/
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;

View File

@ -582,7 +582,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
struct gma_clock_t clock;
u32 dpll = 0, dspcntr, pipeconf;
bool ok;
bool is_lvds = false, is_tv = false;
bool is_lvds = false;
bool is_dp = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
@ -603,9 +603,6 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
case INTEL_OUTPUT_TVOUT:
is_tv = true;
break;
case INTEL_OUTPUT_ANALOG:
case INTEL_OUTPUT_HDMI:
break;
@ -660,12 +657,6 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
}
dpll = DPLL_VGA_MODE_DIS;
if (is_tv) {
/* XXX: just matching BIOS for now */
/* dpll |= PLL_REF_INPUT_TVCLKINBC; */
dpll |= 3;
}
/* dpll |= PLL_REF_INPUT_DREFCLK; */
if (is_dp || is_edp) {
cdv_intel_dp_set_m_n(crtc, mode, adjusted_mode);
@ -970,18 +961,6 @@ const struct drm_crtc_helper_funcs cdv_intel_helper_funcs = {
.disable = gma_crtc_disable,
};
const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
.cursor_set = gma_crtc_cursor_set,
.cursor_move = gma_crtc_cursor_move,
.gamma_set = gma_crtc_gamma_set,
.set_config = gma_crtc_set_config,
.destroy = gma_crtc_destroy,
.page_flip = gma_crtc_page_flip,
.enable_vblank = psb_enable_vblank,
.disable_vblank = psb_disable_vblank,
.get_vblank_counter = psb_get_vblank_counter,
};
const struct gma_clock_funcs cdv_clock_funcs = {
.clock = cdv_intel_clock,
.limit = cdv_intel_limit,

View File

@ -11,7 +11,6 @@
#include <asm/set_memory.h>
#include "blitter.h"
#include "psb_drv.h"
@ -229,18 +228,9 @@ void psb_gtt_unpin(struct gtt_range *gt)
struct drm_device *dev = gt->gem.dev;
struct drm_psb_private *dev_priv = dev->dev_private;
u32 gpu_base = dev_priv->gtt.gatt_start;
int ret;
/* While holding the gtt_mutex no new blits can be initiated */
mutex_lock(&dev_priv->gtt_mutex);
/* Wait for any possible usage of the memory to be finished */
ret = gma_blt_wait_idle(dev_priv);
if (ret) {
DRM_ERROR("Failed to idle the blitter, unpin failed!");
goto out;
}
WARN_ON(!gt->in_gart);
gt->in_gart--;
@ -251,7 +241,6 @@ void psb_gtt_unpin(struct gtt_range *gt)
psb_gtt_detach_pages(gt);
}
out:
mutex_unlock(&dev_priv->gtt_mutex);
}

View File

@ -44,13 +44,13 @@
ret__ = -ETIMEDOUT; \
break; \
} \
if (W && !(in_atomic() || in_dbg_master())) msleep(W); \
if (W && !(in_dbg_master())) \
msleep(W); \
} \
ret__; \
})
#define wait_for(COND, MS) _wait_for(COND, MS, 1)
#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
#define GMBUS_REG_READ(reg) ioread32(dev_priv->gmbus_reg + (reg))
#define GMBUS_REG_WRITE(reg, val) iowrite32((val), dev_priv->gmbus_reg + (reg))

View File

@ -545,7 +545,7 @@ const struct psb_ops oaktrail_chip_ops = {
.chip_setup = oaktrail_chip_setup,
.chip_teardown = oaktrail_teardown,
.crtc_helper = &oaktrail_helper_funcs,
.crtc_funcs = &psb_intel_crtc_funcs,
.crtc_funcs = &gma_intel_crtc_funcs,
.output_init = oaktrail_output_init,

View File

@ -329,7 +329,7 @@ const struct psb_ops psb_chip_ops = {
.chip_teardown = psb_chip_teardown,
.crtc_helper = &psb_intel_helper_funcs,
.crtc_funcs = &psb_intel_crtc_funcs,
.crtc_funcs = &gma_intel_crtc_funcs,
.clock_funcs = &psb_clock_funcs,
.output_init = psb_output_init,

View File

@ -12,6 +12,7 @@
#include <linux/notifier.h>
#include <linux/pm_runtime.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
#include <asm/set_memory.h>
@ -54,7 +55,7 @@ static const struct pci_device_id pciidlist[] = {
/* Poulsbo */
{ 0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
{ 0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
#if defined(CONFIG_DRM_GMA600)
/* Oak Trail */
{ 0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
{ 0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
{ 0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
@ -64,8 +65,7 @@ static const struct pci_device_id pciidlist[] = {
{ 0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
{ 0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
{ 0x8086, 0x4108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops },
#endif
/* Cedartrail */
/* Cedar Trail */
{ 0x8086, 0x0be0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0be1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
{ 0x8086, 0x0be2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops },
@ -92,6 +92,36 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
static const struct drm_ioctl_desc psb_ioctls[] = {
};
/**
* psb_spank - reset the 2D engine
* @dev_priv: our PSB DRM device
*
* Soft reset the graphics engine and then reload the necessary registers.
*/
void psb_spank(struct drm_psb_private *dev_priv)
{
PSB_WSGX32(_PSB_CS_RESET_BIF_RESET | _PSB_CS_RESET_DPM_RESET |
_PSB_CS_RESET_TA_RESET | _PSB_CS_RESET_USE_RESET |
_PSB_CS_RESET_ISP_RESET | _PSB_CS_RESET_TSP_RESET |
_PSB_CS_RESET_TWOD_RESET, PSB_CR_SOFT_RESET);
PSB_RSGX32(PSB_CR_SOFT_RESET);
msleep(1);
PSB_WSGX32(0, PSB_CR_SOFT_RESET);
wmb();
PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_CB_CTRL_CLEAR_FAULT,
PSB_CR_BIF_CTRL);
wmb();
(void) PSB_RSGX32(PSB_CR_BIF_CTRL);
msleep(1);
PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) & ~_PSB_CB_CTRL_CLEAR_FAULT,
PSB_CR_BIF_CTRL);
(void) PSB_RSGX32(PSB_CR_BIF_CTRL);
PSB_WSGX32(dev_priv->gtt.gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
}
static int psb_do_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;

View File

@ -625,13 +625,9 @@ static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
/* psb_irq.c */
extern irqreturn_t psb_irq_handler(int irq, void *arg);
extern int psb_irq_enable_dpst(struct drm_device *dev);
extern int psb_irq_disable_dpst(struct drm_device *dev);
extern void psb_irq_preinstall(struct drm_device *dev);
extern int psb_irq_postinstall(struct drm_device *dev);
extern void psb_irq_uninstall(struct drm_device *dev);
extern void psb_irq_turn_on_dpst(struct drm_device *dev);
extern void psb_irq_turn_off_dpst(struct drm_device *dev);
extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
@ -679,7 +675,7 @@ extern void oaktrail_lvds_init(struct drm_device *dev,
/* psb_intel_display.c */
extern const struct drm_crtc_helper_funcs psb_intel_helper_funcs;
extern const struct drm_crtc_funcs psb_intel_crtc_funcs;
extern const struct drm_crtc_funcs gma_intel_crtc_funcs;
/* psb_intel_lvds.c */
extern const struct drm_connector_helper_funcs

View File

@ -426,7 +426,7 @@ const struct drm_crtc_helper_funcs psb_intel_helper_funcs = {
.disable = gma_crtc_disable,
};
const struct drm_crtc_funcs psb_intel_crtc_funcs = {
const struct drm_crtc_funcs gma_intel_crtc_funcs = {
.cursor_set = gma_crtc_cursor_set,
.cursor_move = gma_crtc_cursor_move,
.gamma_set = gma_crtc_gamma_set,

View File

@ -550,38 +550,6 @@
#define HISTOGRAM_INT_CTRL_CLEAR (1UL << 30)
#define DPST_YUV_LUMA_MODE 0
struct dpst_ie_histogram_control {
union {
uint32_t data;
struct {
uint32_t bin_reg_index:7;
uint32_t reserved:4;
uint32_t bin_reg_func_select:1;
uint32_t sync_to_phase_in:1;
uint32_t alt_enhancement_mode:2;
uint32_t reserved1:1;
uint32_t sync_to_phase_in_count:8;
uint32_t histogram_mode_select:1;
uint32_t reserved2:4;
uint32_t ie_pipe_assignment:1;
uint32_t ie_mode_table_enabled:1;
uint32_t ie_histogram_enable:1;
};
};
};
struct dpst_guardband {
union {
uint32_t data;
struct {
uint32_t guardband:22;
uint32_t guardband_interrupt_delay:8;
uint32_t interrupt_status:1;
uint32_t interrupt_enable:1;
};
};
};
#define PIPEAFRAMEHIGH 0x70040
#define PIPEAFRAMEPIXEL 0x70044
#define PIPEBFRAMEHIGH 0x71040

View File

@ -101,30 +101,6 @@ psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
}
}
static void mid_enable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
{
if (gma_power_begin(dev_priv->dev, false)) {
u32 pipe_event = mid_pipe_event(pipe);
dev_priv->vdc_irq_mask |= pipe_event;
PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
gma_power_end(dev_priv->dev);
}
}
static void mid_disable_pipe_event(struct drm_psb_private *dev_priv, int pipe)
{
if (dev_priv->pipestat[pipe] == 0) {
if (gma_power_begin(dev_priv->dev, false)) {
u32 pipe_event = mid_pipe_event(pipe);
dev_priv->vdc_irq_mask &= ~pipe_event;
PSB_WVDC32(~dev_priv->vdc_irq_mask, PSB_INT_MASK_R);
PSB_WVDC32(dev_priv->vdc_irq_mask, PSB_INT_ENABLE_R);
gma_power_end(dev_priv->dev);
}
}
}
/*
* Display controller interrupt handler for pipe event.
*/
@ -392,92 +368,6 @@ void psb_irq_uninstall(struct drm_device *dev)
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
}
void psb_irq_turn_on_dpst(struct drm_device *dev)
{
struct drm_psb_private *dev_priv =
(struct drm_psb_private *) dev->dev_private;
u32 hist_reg;
u32 pwm_reg;
if (gma_power_begin(dev, false)) {
PSB_WVDC32(1 << 31, HISTOGRAM_LOGIC_CONTROL);
hist_reg = PSB_RVDC32(HISTOGRAM_LOGIC_CONTROL);
PSB_WVDC32(1 << 31, HISTOGRAM_INT_CONTROL);
hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
PSB_WVDC32(0x80010100, PWM_CONTROL_LOGIC);
pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
PSB_WVDC32(pwm_reg | PWM_PHASEIN_ENABLE
| PWM_PHASEIN_INT_ENABLE,
PWM_CONTROL_LOGIC);
pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
psb_enable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
hist_reg = PSB_RVDC32(HISTOGRAM_INT_CONTROL);
PSB_WVDC32(hist_reg | HISTOGRAM_INT_CTRL_CLEAR,
HISTOGRAM_INT_CONTROL);
pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
PSB_WVDC32(pwm_reg | 0x80010100 | PWM_PHASEIN_ENABLE,
PWM_CONTROL_LOGIC);
gma_power_end(dev);
}
}
int psb_irq_enable_dpst(struct drm_device *dev)
{
struct drm_psb_private *dev_priv =
(struct drm_psb_private *) dev->dev_private;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
/* enable DPST */
mid_enable_pipe_event(dev_priv, 0);
psb_irq_turn_on_dpst(dev);
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
return 0;
}
void psb_irq_turn_off_dpst(struct drm_device *dev)
{
struct drm_psb_private *dev_priv =
(struct drm_psb_private *) dev->dev_private;
u32 pwm_reg;
if (gma_power_begin(dev, false)) {
PSB_WVDC32(0x00000000, HISTOGRAM_INT_CONTROL);
PSB_RVDC32(HISTOGRAM_INT_CONTROL);
psb_disable_pipestat(dev_priv, 0, PIPE_DPST_EVENT_ENABLE);
pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
PSB_WVDC32(pwm_reg & ~PWM_PHASEIN_INT_ENABLE,
PWM_CONTROL_LOGIC);
pwm_reg = PSB_RVDC32(PWM_CONTROL_LOGIC);
gma_power_end(dev);
}
}
int psb_irq_disable_dpst(struct drm_device *dev)
{
struct drm_psb_private *dev_priv =
(struct drm_psb_private *) dev->dev_private;
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
mid_disable_pipe_event(dev_priv, 0);
psb_irq_turn_off_dpst(dev);
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
return 0;
}
/*
* It is used to enable VBLANK interrupt
*/

View File

@ -23,10 +23,6 @@ int psb_irq_postinstall(struct drm_device *dev);
void psb_irq_uninstall(struct drm_device *dev);
irqreturn_t psb_irq_handler(int irq, void *arg);
int psb_irq_enable_dpst(struct drm_device *dev);
int psb_irq_disable_dpst(struct drm_device *dev);
void psb_irq_turn_on_dpst(struct drm_device *dev);
void psb_irq_turn_off_dpst(struct drm_device *dev);
int psb_enable_vblank(struct drm_crtc *crtc);
void psb_disable_vblank(struct drm_crtc *crtc);
u32 psb_get_vblank_counter(struct drm_crtc *crtc);

View File

@ -53,27 +53,29 @@ static const struct hibmc_dislay_pll_config hibmc_pll_table[] = {
};
static int hibmc_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
struct drm_atomic_state *state)
{
struct drm_framebuffer *fb = state->fb;
struct drm_crtc *crtc = state->crtc;
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_framebuffer *fb = new_plane_state->fb;
struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_crtc_state *crtc_state;
u32 src_w = state->src_w >> 16;
u32 src_h = state->src_h >> 16;
u32 src_w = new_plane_state->src_w >> 16;
u32 src_h = new_plane_state->src_h >> 16;
if (!crtc || !fb)
return 0;
crtc_state = drm_atomic_get_crtc_state(state->state, crtc);
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
if (src_w != state->crtc_w || src_h != state->crtc_h) {
if (src_w != new_plane_state->crtc_w || src_h != new_plane_state->crtc_h) {
drm_dbg_atomic(plane->dev, "scale not support\n");
return -EINVAL;
}
if (state->crtc_x < 0 || state->crtc_y < 0) {
if (new_plane_state->crtc_x < 0 || new_plane_state->crtc_y < 0) {
drm_dbg_atomic(plane->dev, "crtc_x/y of drm_plane state is invalid\n");
return -EINVAL;
}
@ -81,15 +83,15 @@ static int hibmc_plane_atomic_check(struct drm_plane *plane,
if (!crtc_state->enable)
return 0;
if (state->crtc_x + state->crtc_w >
if (new_plane_state->crtc_x + new_plane_state->crtc_w >
crtc_state->adjusted_mode.hdisplay ||
state->crtc_y + state->crtc_h >
new_plane_state->crtc_y + new_plane_state->crtc_h >
crtc_state->adjusted_mode.vdisplay) {
drm_dbg_atomic(plane->dev, "visible portion of plane is invalid\n");
return -EINVAL;
}
if (state->fb->pitches[0] % 128 != 0) {
if (new_plane_state->fb->pitches[0] % 128 != 0) {
drm_dbg_atomic(plane->dev, "wrong stride with 128-byte aligned\n");
return -EINVAL;
}
@ -97,19 +99,20 @@ static int hibmc_plane_atomic_check(struct drm_plane *plane,
}
static void hibmc_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
struct drm_atomic_state *state)
{
struct drm_plane_state *state = plane->state;
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
u32 reg;
s64 gpu_addr = 0;
u32 line_l;
struct hibmc_drm_private *priv = to_hibmc_drm_private(plane->dev);
struct drm_gem_vram_object *gbo;
if (!state->fb)
if (!new_state->fb)
return;
gbo = drm_gem_vram_of_gem(state->fb->obj[0]);
gbo = drm_gem_vram_of_gem(new_state->fb->obj[0]);
gpu_addr = drm_gem_vram_offset(gbo);
if (WARN_ON_ONCE(gpu_addr < 0))
@ -117,9 +120,9 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane,
writel(gpu_addr, priv->mmio + HIBMC_CRT_FB_ADDRESS);
reg = state->fb->width * (state->fb->format->cpp[0]);
reg = new_state->fb->width * (new_state->fb->format->cpp[0]);
line_l = state->fb->pitches[0];
line_l = new_state->fb->pitches[0];
writel(HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_WIDTH, reg) |
HIBMC_FIELD(HIBMC_CRT_FB_WIDTH_OFFS, line_l),
priv->mmio + HIBMC_CRT_FB_WIDTH);
@ -128,7 +131,7 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane,
reg = readl(priv->mmio + HIBMC_CRT_DISP_CTL);
reg &= ~HIBMC_CRT_DISP_CTL_FORMAT_MASK;
reg |= HIBMC_FIELD(HIBMC_CRT_DISP_CTL_FORMAT,
state->fb->format->cpp[0] * 8 / 16);
new_state->fb->format->cpp[0] * 8 / 16);
writel(reg, priv->mmio + HIBMC_CRT_DISP_CTL);
}

View File

@ -549,16 +549,15 @@ static void ade_rdma_set(void __iomem *base, struct drm_framebuffer *fb,
u32 ch, u32 y, u32 in_h, u32 fmt)
{
struct drm_gem_cma_object *obj = drm_fb_cma_get_gem_obj(fb, 0);
struct drm_format_name_buf format_name;
u32 reg_ctrl, reg_addr, reg_size, reg_stride, reg_space, reg_en;
u32 stride = fb->pitches[0];
u32 addr = (u32)obj->paddr + y * stride;
DRM_DEBUG_DRIVER("rdma%d: (y=%d, height=%d), stride=%d, paddr=0x%x\n",
ch + 1, y, in_h, stride, (u32)obj->paddr);
DRM_DEBUG_DRIVER("addr=0x%x, fb:%dx%d, pixel_format=%d(%s)\n",
DRM_DEBUG_DRIVER("addr=0x%x, fb:%dx%d, pixel_format=%d(%p4cc)\n",
addr, fb->width, fb->height, fmt,
drm_get_format_name(fb->format->format, &format_name));
&fb->format->format);
/* get reg offset */
reg_ctrl = RD_CH_CTRL(ch);
@ -758,19 +757,21 @@ static void ade_disable_channel(struct kirin_plane *kplane)
}
static int ade_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
struct drm_atomic_state *state)
{
struct drm_framebuffer *fb = state->fb;
struct drm_crtc *crtc = state->crtc;
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
struct drm_framebuffer *fb = new_plane_state->fb;
struct drm_crtc *crtc = new_plane_state->crtc;
struct drm_crtc_state *crtc_state;
u32 src_x = state->src_x >> 16;
u32 src_y = state->src_y >> 16;
u32 src_w = state->src_w >> 16;
u32 src_h = state->src_h >> 16;
int crtc_x = state->crtc_x;
int crtc_y = state->crtc_y;
u32 crtc_w = state->crtc_w;
u32 crtc_h = state->crtc_h;
u32 src_x = new_plane_state->src_x >> 16;
u32 src_y = new_plane_state->src_y >> 16;
u32 src_w = new_plane_state->src_w >> 16;
u32 src_h = new_plane_state->src_h >> 16;
int crtc_x = new_plane_state->crtc_x;
int crtc_y = new_plane_state->crtc_y;
u32 crtc_w = new_plane_state->crtc_w;
u32 crtc_h = new_plane_state->crtc_h;
u32 fmt;
if (!crtc || !fb)
@ -780,7 +781,7 @@ static int ade_plane_atomic_check(struct drm_plane *plane,
if (fmt == ADE_FORMAT_UNSUPPORT)
return -EINVAL;
crtc_state = drm_atomic_get_crtc_state(state->state, crtc);
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
@ -803,19 +804,21 @@ static int ade_plane_atomic_check(struct drm_plane *plane,
}
static void ade_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
struct drm_atomic_state *state)
{
struct drm_plane_state *state = plane->state;
struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
plane);
struct kirin_plane *kplane = to_kirin_plane(plane);
ade_update_channel(kplane, state->fb, state->crtc_x, state->crtc_y,
state->crtc_w, state->crtc_h,
state->src_x >> 16, state->src_y >> 16,
state->src_w >> 16, state->src_h >> 16);
ade_update_channel(kplane, new_state->fb, new_state->crtc_x,
new_state->crtc_y,
new_state->crtc_w, new_state->crtc_h,
new_state->src_x >> 16, new_state->src_y >> 16,
new_state->src_w >> 16, new_state->src_h >> 16);
}
static void ade_plane_atomic_disable(struct drm_plane *plane,
struct drm_plane_state *old_state)
struct drm_atomic_state *state)
{
struct kirin_plane *kplane = to_kirin_plane(plane);

View File

@ -10228,7 +10228,6 @@ static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *i915 = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
struct drm_format_name_buf format_name;
if (!fb) {
drm_dbg_kms(&i915->drm,
@ -10239,10 +10238,9 @@ static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
}
drm_dbg_kms(&i915->drm,
"[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s modifier = 0x%llx, visible: %s\n",
"[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %p4cc modifier = 0x%llx, visible: %s\n",
plane->base.base.id, plane->base.name,
fb->base.id, fb->width, fb->height,
drm_get_format_name(fb->format->format, &format_name),
fb->base.id, fb->width, fb->height, &fb->format->format,
fb->modifier, yesno(plane_state->uapi.visible));
drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
plane_state->hw.rotation, plane_state->scaler_id);
@ -14236,13 +14234,9 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
if (!drm_any_plane_has_format(&dev_priv->drm,
mode_cmd->pixel_format,
mode_cmd->modifier[0])) {
struct drm_format_name_buf format_name;
drm_dbg_kms(&dev_priv->drm,
"unsupported pixel format %s / modifier 0x%llx\n",
drm_get_format_name(mode_cmd->pixel_format,
&format_name),
mode_cmd->modifier[0]);
"unsupported pixel format %p4cc / modifier 0x%llx\n",
&mode_cmd->pixel_format, mode_cmd->modifier[0]);
goto err;
}

Some files were not shown because too many files have changed in this diff Show More