Merge tag 'drm-intel-next-2022-05-20' of git://anongit.freedesktop.org/drm/drm-intel into drm-intel-gt-next
drm/i915 drm-intel-next -> drm-intel-gt-next cross-merge sync Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> # Conflicts: # drivers/gpu/drm/i915/gt/intel_rps.c # drivers/gpu/drm/i915/i915_vma.c From: Jani Nikula <jani.nikula@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/87y1ywbh5y.fsf@intel.com
This commit is contained in:
commit
8ec5c0006c
@ -41,10 +41,26 @@ properties:
|
||||
|
||||
properties:
|
||||
port@0:
|
||||
$ref: /schemas/graph.yaml#/properties/port
|
||||
$ref: /schemas/graph.yaml#/$defs/port-base
|
||||
unevaluatedProperties: false
|
||||
description:
|
||||
Video port for MIPI DSI input
|
||||
|
||||
properties:
|
||||
endpoint:
|
||||
$ref: /schemas/media/video-interfaces.yaml#
|
||||
unevaluatedProperties: false
|
||||
|
||||
properties:
|
||||
data-lanes:
|
||||
description: array of physical DSI data lane indexes.
|
||||
minItems: 1
|
||||
items:
|
||||
- const: 1
|
||||
- const: 2
|
||||
- const: 3
|
||||
- const: 4
|
||||
|
||||
port@1:
|
||||
$ref: /schemas/graph.yaml#/properties/port
|
||||
description:
|
||||
|
@ -0,0 +1,117 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/display/bridge/lontium,lt9211.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Lontium LT9211 DSI/LVDS/DPI to DSI/LVDS/DPI bridge.
|
||||
|
||||
maintainers:
|
||||
- Marek Vasut <marex@denx.de>
|
||||
|
||||
description: |
|
||||
The LT9211 are bridge devices which convert Single/Dual-Link DSI/LVDS
|
||||
or Single DPI to Single/Dual-Link DSI/LVDS or Single DPI.
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- lontium,lt9211
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
|
||||
reset-gpios:
|
||||
maxItems: 1
|
||||
description: GPIO connected to active high RESET pin.
|
||||
|
||||
vccio-supply:
|
||||
description: Regulator for 1.8V IO power.
|
||||
|
||||
ports:
|
||||
$ref: /schemas/graph.yaml#/properties/ports
|
||||
|
||||
properties:
|
||||
port@0:
|
||||
$ref: /schemas/graph.yaml#/properties/port
|
||||
description:
|
||||
Primary MIPI DSI port-1 for MIPI input or
|
||||
LVDS port-1 for LVDS input or DPI input.
|
||||
|
||||
port@1:
|
||||
$ref: /schemas/graph.yaml#/properties/port
|
||||
description:
|
||||
Additional MIPI port-2 for MIPI input or LVDS port-2
|
||||
for LVDS input. Used in combination with primary
|
||||
port-1 to drive higher resolution displays
|
||||
|
||||
port@2:
|
||||
$ref: /schemas/graph.yaml#/properties/port
|
||||
description:
|
||||
Primary MIPI DSI port-1 for MIPI output or
|
||||
LVDS port-1 for LVDS output or DPI output.
|
||||
|
||||
port@3:
|
||||
$ref: /schemas/graph.yaml#/properties/port
|
||||
description:
|
||||
Additional MIPI port-2 for MIPI output or LVDS port-2
|
||||
for LVDS output. Used in combination with primary
|
||||
port-1 to drive higher resolution displays.
|
||||
|
||||
required:
|
||||
- port@0
|
||||
- port@2
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- vccio-supply
|
||||
- ports
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
#include <dt-bindings/interrupt-controller/irq.h>
|
||||
|
||||
i2c {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
hdmi-bridge@3b {
|
||||
compatible = "lontium,lt9211";
|
||||
reg = <0x3b>;
|
||||
|
||||
reset-gpios = <&tlmm 128 GPIO_ACTIVE_HIGH>;
|
||||
interrupts-extended = <&tlmm 84 IRQ_TYPE_EDGE_FALLING>;
|
||||
|
||||
vccio-supply = <<9211_1v8>;
|
||||
|
||||
ports {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
port@0 {
|
||||
reg = <0>;
|
||||
|
||||
endpoint {
|
||||
remote-endpoint = <&dsi0_out>;
|
||||
};
|
||||
};
|
||||
|
||||
port@2 {
|
||||
reg = <2>;
|
||||
|
||||
endpoint {
|
||||
remote-endpoint = <&panel_in_lvds>;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
...
|
@ -12,11 +12,22 @@ maintainers:
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- solomon,ssd1305fb-i2c
|
||||
- solomon,ssd1306fb-i2c
|
||||
- solomon,ssd1307fb-i2c
|
||||
- solomon,ssd1309fb-i2c
|
||||
oneOf:
|
||||
# Deprecated compatible strings
|
||||
- items:
|
||||
- enum:
|
||||
- solomon,ssd1305fb-i2c
|
||||
- solomon,ssd1306fb-i2c
|
||||
- solomon,ssd1307fb-i2c
|
||||
- solomon,ssd1309fb-i2c
|
||||
deprecated: true
|
||||
- items:
|
||||
- enum:
|
||||
- sinowealth,sh1106
|
||||
- solomon,ssd1305
|
||||
- solomon,ssd1306
|
||||
- solomon,ssd1307
|
||||
- solomon,ssd1309
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
@ -27,9 +38,20 @@ properties:
|
||||
reset-gpios:
|
||||
maxItems: 1
|
||||
|
||||
# Only required for SPI
|
||||
dc-gpios:
|
||||
description:
|
||||
GPIO connected to the controller's D/C# (Data/Command) pin,
|
||||
that is needed for 4-wire SPI to tell the controller if the
|
||||
data sent is for a command register or the display data RAM
|
||||
maxItems: 1
|
||||
|
||||
vbat-supply:
|
||||
description: The supply for VBAT
|
||||
|
||||
# Only required for SPI
|
||||
spi-max-frequency: true
|
||||
|
||||
solomon,height:
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
default: 16
|
||||
@ -135,7 +157,21 @@ allOf:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: solomon,ssd1305fb-i2c
|
||||
const: sinowealth,sh1106
|
||||
then:
|
||||
properties:
|
||||
solomon,dclk-div:
|
||||
default: 1
|
||||
solomon,dclk-frq:
|
||||
default: 5
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
enum:
|
||||
- solomon,ssd1305-i2c
|
||||
- solomon,ssd1305
|
||||
then:
|
||||
properties:
|
||||
solomon,dclk-div:
|
||||
@ -147,7 +183,9 @@ allOf:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: solomon,ssd1306fb-i2c
|
||||
enum:
|
||||
- solomon,ssd1306-i2c
|
||||
- solomon,ssd1306
|
||||
then:
|
||||
properties:
|
||||
solomon,dclk-div:
|
||||
@ -159,7 +197,9 @@ allOf:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: solomon,ssd1307fb-i2c
|
||||
enum:
|
||||
- solomon,ssd1307-i2c
|
||||
- solomon,ssd1307
|
||||
then:
|
||||
properties:
|
||||
solomon,dclk-div:
|
||||
@ -173,7 +213,9 @@ allOf:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: solomon,ssd1309fb-i2c
|
||||
enum:
|
||||
- solomon,ssd1309-i2c
|
||||
- solomon,ssd1309
|
||||
then:
|
||||
properties:
|
||||
solomon,dclk-div:
|
||||
@ -189,15 +231,15 @@ examples:
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
ssd1307: oled@3c {
|
||||
compatible = "solomon,ssd1307fb-i2c";
|
||||
ssd1307_i2c: oled@3c {
|
||||
compatible = "solomon,ssd1307";
|
||||
reg = <0x3c>;
|
||||
pwms = <&pwm 4 3000>;
|
||||
reset-gpios = <&gpio2 7>;
|
||||
};
|
||||
|
||||
ssd1306: oled@3d {
|
||||
compatible = "solomon,ssd1306fb-i2c";
|
||||
ssd1306_i2c: oled@3d {
|
||||
compatible = "solomon,ssd1306";
|
||||
reg = <0x3c>;
|
||||
pwms = <&pwm 4 3000>;
|
||||
reset-gpios = <&gpio2 7>;
|
||||
@ -207,3 +249,30 @@ examples:
|
||||
solomon,lookup-table = /bits/ 8 <0x3f 0x3f 0x3f 0x3f>;
|
||||
};
|
||||
};
|
||||
- |
|
||||
spi {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
ssd1307_spi: oled@0 {
|
||||
compatible = "solomon,ssd1307";
|
||||
reg = <0x0>;
|
||||
pwms = <&pwm 4 3000>;
|
||||
reset-gpios = <&gpio2 7>;
|
||||
dc-gpios = <&gpio2 8>;
|
||||
spi-max-frequency = <10000000>;
|
||||
};
|
||||
|
||||
ssd1306_spi: oled@1 {
|
||||
compatible = "solomon,ssd1306";
|
||||
reg = <0x1>;
|
||||
pwms = <&pwm 4 3000>;
|
||||
reset-gpios = <&gpio2 7>;
|
||||
dc-gpios = <&gpio2 8>;
|
||||
spi-max-frequency = <10000000>;
|
||||
solomon,com-lrremap;
|
||||
solomon,com-invdir;
|
||||
solomon,com-offset = <32>;
|
||||
solomon,lookup-table = /bits/ 8 <0x3f 0x3f 0x3f 0x3f>;
|
||||
};
|
||||
};
|
||||
|
@ -1130,6 +1130,8 @@ patternProperties:
|
||||
description: Sinlinx Electronics Technology Co., LTD
|
||||
"^sinovoip,.*":
|
||||
description: SinoVoip Co., Ltd
|
||||
"^sinowealth,.*":
|
||||
description: SINO WEALTH Electronic Ltd.
|
||||
"^sipeed,.*":
|
||||
description: Shenzhen Sipeed Technology Co., Ltd.
|
||||
"^sirf,.*":
|
||||
|
@ -105,6 +105,7 @@ structure to represent a mediated device's driver::
|
||||
struct mdev_driver {
|
||||
int (*probe) (struct mdev_device *dev);
|
||||
void (*remove) (struct mdev_device *dev);
|
||||
struct attribute_group **supported_type_groups;
|
||||
struct device_driver driver;
|
||||
};
|
||||
|
||||
@ -119,33 +120,15 @@ to register and unregister itself with the core driver:
|
||||
|
||||
extern void mdev_unregister_driver(struct mdev_driver *drv);
|
||||
|
||||
The mediated bus driver is responsible for adding mediated devices to the VFIO
|
||||
group when devices are bound to the driver and removing mediated devices from
|
||||
the VFIO when devices are unbound from the driver.
|
||||
|
||||
|
||||
Physical Device Driver Interface
|
||||
--------------------------------
|
||||
|
||||
The physical device driver interface provides the mdev_parent_ops[3] structure
|
||||
to define the APIs to manage work in the mediated core driver that is related
|
||||
to the physical device.
|
||||
|
||||
The structures in the mdev_parent_ops structure are as follows:
|
||||
|
||||
* dev_attr_groups: attributes of the parent device
|
||||
* mdev_attr_groups: attributes of the mediated device
|
||||
* supported_config: attributes to define supported configurations
|
||||
* device_driver: device driver to bind for mediated device instances
|
||||
|
||||
The mdev_parent_ops also still has various functions pointers. Theses exist
|
||||
for historical reasons only and shall not be used for new drivers.
|
||||
The mediated bus driver's probe function should create a vfio_device on top of
|
||||
the mdev_device and connect it to an appropriate implementation of
|
||||
vfio_device_ops.
|
||||
|
||||
When a driver wants to add the GUID creation sysfs to an existing device it has
|
||||
probe'd to then it should call::
|
||||
|
||||
extern int mdev_register_device(struct device *dev,
|
||||
const struct mdev_parent_ops *ops);
|
||||
struct mdev_driver *mdev_driver);
|
||||
|
||||
This will provide the 'mdev_supported_types/XX/create' files which can then be
|
||||
used to trigger the creation of a mdev_device. The created mdev_device will be
|
||||
|
@ -558,6 +558,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = {
|
||||
INTEL_ADLP_IDS(&gen11_early_ops),
|
||||
INTEL_ADLN_IDS(&gen11_early_ops),
|
||||
INTEL_RPLS_IDS(&gen11_early_ops),
|
||||
INTEL_RPLP_IDS(&gen11_early_ops),
|
||||
};
|
||||
|
||||
struct resource intel_graphics_stolen_res __ro_after_init = DEFINE_RES_MEM(0, 0);
|
||||
|
@ -216,7 +216,8 @@ static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
|
||||
struct dma_fence *fence;
|
||||
int r;
|
||||
|
||||
dma_resv_for_each_fence(&cursor, resv, write, fence) {
|
||||
dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write),
|
||||
fence) {
|
||||
dma_fence_get(fence);
|
||||
r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
|
||||
if (!r)
|
||||
@ -660,12 +661,24 @@ static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
struct sg_table *sg_table;
|
||||
signed long ret;
|
||||
|
||||
sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
|
||||
if (IS_ERR_OR_NULL(sg_table))
|
||||
return sg_table;
|
||||
|
||||
if (!IS_ERR_OR_NULL(sg_table))
|
||||
mangle_sg_table(sg_table);
|
||||
if (!dma_buf_attachment_is_dynamic(attach)) {
|
||||
ret = dma_resv_wait_timeout(attach->dmabuf->resv,
|
||||
DMA_RESV_USAGE_KERNEL, true,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
if (ret < 0) {
|
||||
attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
|
||||
direction);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
}
|
||||
|
||||
mangle_sg_table(sg_table);
|
||||
return sg_table;
|
||||
}
|
||||
|
||||
@ -1124,7 +1137,8 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
|
||||
long ret;
|
||||
|
||||
/* Wait on any implicit rendering fences */
|
||||
ret = dma_resv_wait_timeout(resv, write, true, MAX_SCHEDULE_TIMEOUT);
|
||||
ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write),
|
||||
true, MAX_SCHEDULE_TIMEOUT);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -44,12 +44,12 @@
|
||||
/**
|
||||
* DOC: Reservation Object Overview
|
||||
*
|
||||
* The reservation object provides a mechanism to manage shared and
|
||||
* exclusive fences associated with a buffer. A reservation object
|
||||
* can have attached one exclusive fence (normally associated with
|
||||
* write operations) or N shared fences (read operations). The RCU
|
||||
* mechanism is used to protect read access to fences from locked
|
||||
* write-side updates.
|
||||
* The reservation object provides a mechanism to manage a container of
|
||||
* dma_fence object associated with a resource. A reservation object
|
||||
* can have any number of fences attaches to it. Each fence carries an usage
|
||||
* parameter determining how the operation represented by the fence is using the
|
||||
* resource. The RCU mechanism is used to protect read access to fences from
|
||||
* locked write-side updates.
|
||||
*
|
||||
* See struct dma_resv for more details.
|
||||
*/
|
||||
@ -57,39 +57,59 @@
|
||||
DEFINE_WD_CLASS(reservation_ww_class);
|
||||
EXPORT_SYMBOL(reservation_ww_class);
|
||||
|
||||
/* Mask for the lower fence pointer bits */
|
||||
#define DMA_RESV_LIST_MASK 0x3
|
||||
|
||||
struct dma_resv_list {
|
||||
struct rcu_head rcu;
|
||||
u32 shared_count, shared_max;
|
||||
struct dma_fence __rcu *shared[];
|
||||
u32 num_fences, max_fences;
|
||||
struct dma_fence __rcu *table[];
|
||||
};
|
||||
|
||||
/**
|
||||
* dma_resv_list_alloc - allocate fence list
|
||||
* @shared_max: number of fences we need space for
|
||||
*
|
||||
/* Extract the fence and usage flags from an RCU protected entry in the list. */
|
||||
static void dma_resv_list_entry(struct dma_resv_list *list, unsigned int index,
|
||||
struct dma_resv *resv, struct dma_fence **fence,
|
||||
enum dma_resv_usage *usage)
|
||||
{
|
||||
long tmp;
|
||||
|
||||
tmp = (long)rcu_dereference_check(list->table[index],
|
||||
resv ? dma_resv_held(resv) : true);
|
||||
*fence = (struct dma_fence *)(tmp & ~DMA_RESV_LIST_MASK);
|
||||
if (usage)
|
||||
*usage = tmp & DMA_RESV_LIST_MASK;
|
||||
}
|
||||
|
||||
/* Set the fence and usage flags at the specific index in the list. */
|
||||
static void dma_resv_list_set(struct dma_resv_list *list,
|
||||
unsigned int index,
|
||||
struct dma_fence *fence,
|
||||
enum dma_resv_usage usage)
|
||||
{
|
||||
long tmp = ((long)fence) | usage;
|
||||
|
||||
RCU_INIT_POINTER(list->table[index], (struct dma_fence *)tmp);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate a new dma_resv_list and make sure to correctly initialize
|
||||
* shared_max.
|
||||
* max_fences.
|
||||
*/
|
||||
static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
|
||||
static struct dma_resv_list *dma_resv_list_alloc(unsigned int max_fences)
|
||||
{
|
||||
struct dma_resv_list *list;
|
||||
|
||||
list = kmalloc(struct_size(list, shared, shared_max), GFP_KERNEL);
|
||||
list = kmalloc(struct_size(list, table, max_fences), GFP_KERNEL);
|
||||
if (!list)
|
||||
return NULL;
|
||||
|
||||
list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
|
||||
sizeof(*list->shared);
|
||||
list->max_fences = (ksize(list) - offsetof(typeof(*list), table)) /
|
||||
sizeof(*list->table);
|
||||
|
||||
return list;
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_resv_list_free - free fence list
|
||||
* @list: list to free
|
||||
*
|
||||
* Free a dma_resv_list and make sure to drop all references.
|
||||
*/
|
||||
/* Free a dma_resv_list and make sure to drop all references. */
|
||||
static void dma_resv_list_free(struct dma_resv_list *list)
|
||||
{
|
||||
unsigned int i;
|
||||
@ -97,9 +117,12 @@ static void dma_resv_list_free(struct dma_resv_list *list)
|
||||
if (!list)
|
||||
return;
|
||||
|
||||
for (i = 0; i < list->shared_count; ++i)
|
||||
dma_fence_put(rcu_dereference_protected(list->shared[i], true));
|
||||
for (i = 0; i < list->num_fences; ++i) {
|
||||
struct dma_fence *fence;
|
||||
|
||||
dma_resv_list_entry(list, i, NULL, &fence, NULL);
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
kfree_rcu(list, rcu);
|
||||
}
|
||||
|
||||
@ -110,10 +133,8 @@ static void dma_resv_list_free(struct dma_resv_list *list)
|
||||
void dma_resv_init(struct dma_resv *obj)
|
||||
{
|
||||
ww_mutex_init(&obj->lock, &reservation_ww_class);
|
||||
seqcount_ww_mutex_init(&obj->seq, &obj->lock);
|
||||
|
||||
RCU_INIT_POINTER(obj->fence, NULL);
|
||||
RCU_INIT_POINTER(obj->fence_excl, NULL);
|
||||
RCU_INIT_POINTER(obj->fences, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_resv_init);
|
||||
|
||||
@ -123,46 +144,32 @@ EXPORT_SYMBOL(dma_resv_init);
|
||||
*/
|
||||
void dma_resv_fini(struct dma_resv *obj)
|
||||
{
|
||||
struct dma_resv_list *fobj;
|
||||
struct dma_fence *excl;
|
||||
|
||||
/*
|
||||
* This object should be dead and all references must have
|
||||
* been released to it, so no need to be protected with rcu.
|
||||
*/
|
||||
excl = rcu_dereference_protected(obj->fence_excl, 1);
|
||||
if (excl)
|
||||
dma_fence_put(excl);
|
||||
|
||||
fobj = rcu_dereference_protected(obj->fence, 1);
|
||||
dma_resv_list_free(fobj);
|
||||
dma_resv_list_free(rcu_dereference_protected(obj->fences, true));
|
||||
ww_mutex_destroy(&obj->lock);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_resv_fini);
|
||||
|
||||
static inline struct dma_fence *
|
||||
dma_resv_excl_fence(struct dma_resv *obj)
|
||||
/* Dereference the fences while ensuring RCU rules */
|
||||
static inline struct dma_resv_list *dma_resv_fences_list(struct dma_resv *obj)
|
||||
{
|
||||
return rcu_dereference_check(obj->fence_excl, dma_resv_held(obj));
|
||||
}
|
||||
|
||||
static inline struct dma_resv_list *dma_resv_shared_list(struct dma_resv *obj)
|
||||
{
|
||||
return rcu_dereference_check(obj->fence, dma_resv_held(obj));
|
||||
return rcu_dereference_check(obj->fences, dma_resv_held(obj));
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_resv_reserve_fences - Reserve space to add shared fences to
|
||||
* a dma_resv.
|
||||
* dma_resv_reserve_fences - Reserve space to add fences to a dma_resv object.
|
||||
* @obj: reservation object
|
||||
* @num_fences: number of fences we want to add
|
||||
*
|
||||
* Should be called before dma_resv_add_shared_fence(). Must
|
||||
* be called with @obj locked through dma_resv_lock().
|
||||
* Should be called before dma_resv_add_fence(). Must be called with @obj
|
||||
* locked through dma_resv_lock().
|
||||
*
|
||||
* Note that the preallocated slots need to be re-reserved if @obj is unlocked
|
||||
* at any time before calling dma_resv_add_shared_fence(). This is validated
|
||||
* when CONFIG_DEBUG_MUTEXES is enabled.
|
||||
* at any time before calling dma_resv_add_fence(). This is validated when
|
||||
* CONFIG_DEBUG_MUTEXES is enabled.
|
||||
*
|
||||
* RETURNS
|
||||
* Zero for success, or -errno
|
||||
@ -174,11 +181,11 @@ int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences)
|
||||
|
||||
dma_resv_assert_held(obj);
|
||||
|
||||
old = dma_resv_shared_list(obj);
|
||||
if (old && old->shared_max) {
|
||||
if ((old->shared_count + num_fences) <= old->shared_max)
|
||||
old = dma_resv_fences_list(obj);
|
||||
if (old && old->max_fences) {
|
||||
if ((old->num_fences + num_fences) <= old->max_fences)
|
||||
return 0;
|
||||
max = max(old->shared_count + num_fences, old->shared_max * 2);
|
||||
max = max(old->num_fences + num_fences, old->max_fences * 2);
|
||||
} else {
|
||||
max = max(4ul, roundup_pow_of_two(num_fences));
|
||||
}
|
||||
@ -193,27 +200,27 @@ int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences)
|
||||
* references from the old struct are carried over to
|
||||
* the new.
|
||||
*/
|
||||
for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
|
||||
for (i = 0, j = 0, k = max; i < (old ? old->num_fences : 0); ++i) {
|
||||
enum dma_resv_usage usage;
|
||||
struct dma_fence *fence;
|
||||
|
||||
fence = rcu_dereference_protected(old->shared[i],
|
||||
dma_resv_held(obj));
|
||||
dma_resv_list_entry(old, i, obj, &fence, &usage);
|
||||
if (dma_fence_is_signaled(fence))
|
||||
RCU_INIT_POINTER(new->shared[--k], fence);
|
||||
RCU_INIT_POINTER(new->table[--k], fence);
|
||||
else
|
||||
RCU_INIT_POINTER(new->shared[j++], fence);
|
||||
dma_resv_list_set(new, j++, fence, usage);
|
||||
}
|
||||
new->shared_count = j;
|
||||
new->num_fences = j;
|
||||
|
||||
/*
|
||||
* We are not changing the effective set of fences here so can
|
||||
* merely update the pointer to the new array; both existing
|
||||
* readers and new readers will see exactly the same set of
|
||||
* active (unsignaled) shared fences. Individual fences and the
|
||||
* active (unsignaled) fences. Individual fences and the
|
||||
* old array are protected by RCU and so will not vanish under
|
||||
* the gaze of the rcu_read_lock() readers.
|
||||
*/
|
||||
rcu_assign_pointer(obj->fence, new);
|
||||
rcu_assign_pointer(obj->fences, new);
|
||||
|
||||
if (!old)
|
||||
return 0;
|
||||
@ -222,7 +229,7 @@ int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences)
|
||||
for (i = k; i < max; ++i) {
|
||||
struct dma_fence *fence;
|
||||
|
||||
fence = rcu_dereference_protected(new->shared[i],
|
||||
fence = rcu_dereference_protected(new->table[i],
|
||||
dma_resv_held(obj));
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
@ -234,37 +241,39 @@ EXPORT_SYMBOL(dma_resv_reserve_fences);
|
||||
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
/**
|
||||
* dma_resv_reset_shared_max - reset shared fences for debugging
|
||||
* dma_resv_reset_max_fences - reset fences for debugging
|
||||
* @obj: the dma_resv object to reset
|
||||
*
|
||||
* Reset the number of pre-reserved shared slots to test that drivers do
|
||||
* Reset the number of pre-reserved fence slots to test that drivers do
|
||||
* correct slot allocation using dma_resv_reserve_fences(). See also
|
||||
* &dma_resv_list.shared_max.
|
||||
* &dma_resv_list.max_fences.
|
||||
*/
|
||||
void dma_resv_reset_shared_max(struct dma_resv *obj)
|
||||
void dma_resv_reset_max_fences(struct dma_resv *obj)
|
||||
{
|
||||
struct dma_resv_list *fences = dma_resv_shared_list(obj);
|
||||
struct dma_resv_list *fences = dma_resv_fences_list(obj);
|
||||
|
||||
dma_resv_assert_held(obj);
|
||||
|
||||
/* Test shared fence slot reservation */
|
||||
/* Test fence slot reservation */
|
||||
if (fences)
|
||||
fences->shared_max = fences->shared_count;
|
||||
fences->max_fences = fences->num_fences;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_resv_reset_shared_max);
|
||||
EXPORT_SYMBOL(dma_resv_reset_max_fences);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* dma_resv_add_shared_fence - Add a fence to a shared slot
|
||||
* dma_resv_add_fence - Add a fence to the dma_resv obj
|
||||
* @obj: the reservation object
|
||||
* @fence: the shared fence to add
|
||||
* @fence: the fence to add
|
||||
* @usage: how the fence is used, see enum dma_resv_usage
|
||||
*
|
||||
* Add a fence to a shared slot, @obj must be locked with dma_resv_lock(), and
|
||||
* Add a fence to a slot, @obj must be locked with dma_resv_lock(), and
|
||||
* dma_resv_reserve_fences() has been called.
|
||||
*
|
||||
* See also &dma_resv.fence for a discussion of the semantics.
|
||||
*/
|
||||
void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
|
||||
void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
|
||||
enum dma_resv_usage usage)
|
||||
{
|
||||
struct dma_resv_list *fobj;
|
||||
struct dma_fence *old;
|
||||
@ -279,39 +288,36 @@ void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
|
||||
*/
|
||||
WARN_ON(dma_fence_is_container(fence));
|
||||
|
||||
fobj = dma_resv_shared_list(obj);
|
||||
count = fobj->shared_count;
|
||||
|
||||
write_seqcount_begin(&obj->seq);
|
||||
fobj = dma_resv_fences_list(obj);
|
||||
count = fobj->num_fences;
|
||||
|
||||
for (i = 0; i < count; ++i) {
|
||||
enum dma_resv_usage old_usage;
|
||||
|
||||
old = rcu_dereference_protected(fobj->shared[i],
|
||||
dma_resv_held(obj));
|
||||
if (old->context == fence->context ||
|
||||
dma_fence_is_signaled(old))
|
||||
goto replace;
|
||||
dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
|
||||
if ((old->context == fence->context && old_usage >= usage) ||
|
||||
dma_fence_is_signaled(old)) {
|
||||
dma_resv_list_set(fobj, i, fence, usage);
|
||||
dma_fence_put(old);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
BUG_ON(fobj->shared_count >= fobj->shared_max);
|
||||
old = NULL;
|
||||
BUG_ON(fobj->num_fences >= fobj->max_fences);
|
||||
count++;
|
||||
|
||||
replace:
|
||||
RCU_INIT_POINTER(fobj->shared[i], fence);
|
||||
/* pointer update must be visible before we extend the shared_count */
|
||||
smp_store_mb(fobj->shared_count, count);
|
||||
|
||||
write_seqcount_end(&obj->seq);
|
||||
dma_fence_put(old);
|
||||
dma_resv_list_set(fobj, i, fence, usage);
|
||||
/* pointer update must be visible before we extend the num_fences */
|
||||
smp_store_mb(fobj->num_fences, count);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_resv_add_shared_fence);
|
||||
EXPORT_SYMBOL(dma_resv_add_fence);
|
||||
|
||||
/**
|
||||
* dma_resv_replace_fences - replace fences in the dma_resv obj
|
||||
* @obj: the reservation object
|
||||
* @context: the context of the fences to replace
|
||||
* @replacement: the new fence to use instead
|
||||
* @usage: how the new fence is used, see enum dma_resv_usage
|
||||
*
|
||||
* Replace fences with a specified context with a new fence. Only valid if the
|
||||
* operation represented by the original fence has no longer access to the
|
||||
@ -321,107 +327,66 @@ EXPORT_SYMBOL(dma_resv_add_shared_fence);
|
||||
* update fence which makes the resource inaccessible.
|
||||
*/
|
||||
void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
|
||||
struct dma_fence *replacement)
|
||||
struct dma_fence *replacement,
|
||||
enum dma_resv_usage usage)
|
||||
{
|
||||
struct dma_resv_list *list;
|
||||
struct dma_fence *old;
|
||||
unsigned int i;
|
||||
|
||||
dma_resv_assert_held(obj);
|
||||
|
||||
write_seqcount_begin(&obj->seq);
|
||||
list = dma_resv_fences_list(obj);
|
||||
for (i = 0; list && i < list->num_fences; ++i) {
|
||||
struct dma_fence *old;
|
||||
|
||||
old = dma_resv_excl_fence(obj);
|
||||
if (old->context == context) {
|
||||
RCU_INIT_POINTER(obj->fence_excl, dma_fence_get(replacement));
|
||||
dma_fence_put(old);
|
||||
}
|
||||
|
||||
list = dma_resv_shared_list(obj);
|
||||
for (i = 0; list && i < list->shared_count; ++i) {
|
||||
old = rcu_dereference_protected(list->shared[i],
|
||||
dma_resv_held(obj));
|
||||
dma_resv_list_entry(list, i, obj, &old, NULL);
|
||||
if (old->context != context)
|
||||
continue;
|
||||
|
||||
rcu_assign_pointer(list->shared[i], dma_fence_get(replacement));
|
||||
dma_resv_list_set(list, i, replacement, usage);
|
||||
dma_fence_put(old);
|
||||
}
|
||||
|
||||
write_seqcount_end(&obj->seq);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_resv_replace_fences);
|
||||
|
||||
/**
|
||||
* dma_resv_add_excl_fence - Add an exclusive fence.
|
||||
* @obj: the reservation object
|
||||
* @fence: the exclusive fence to add
|
||||
*
|
||||
* Add a fence to the exclusive slot. @obj must be locked with dma_resv_lock().
|
||||
* See also &dma_resv.fence_excl for a discussion of the semantics.
|
||||
*/
|
||||
void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
|
||||
{
|
||||
struct dma_fence *old_fence = dma_resv_excl_fence(obj);
|
||||
|
||||
dma_resv_assert_held(obj);
|
||||
|
||||
dma_fence_get(fence);
|
||||
|
||||
write_seqcount_begin(&obj->seq);
|
||||
/* write_seqcount_begin provides the necessary memory barrier */
|
||||
RCU_INIT_POINTER(obj->fence_excl, fence);
|
||||
write_seqcount_end(&obj->seq);
|
||||
|
||||
dma_fence_put(old_fence);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_resv_add_excl_fence);
|
||||
|
||||
/* Restart the iterator by initializing all the necessary fields, but not the
|
||||
* relation to the dma_resv object. */
|
||||
/* Restart the unlocked iteration by initializing the cursor object. */
|
||||
static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
|
||||
{
|
||||
cursor->seq = read_seqcount_begin(&cursor->obj->seq);
|
||||
cursor->index = -1;
|
||||
cursor->shared_count = 0;
|
||||
if (cursor->all_fences) {
|
||||
cursor->fences = dma_resv_shared_list(cursor->obj);
|
||||
if (cursor->fences)
|
||||
cursor->shared_count = cursor->fences->shared_count;
|
||||
} else {
|
||||
cursor->fences = NULL;
|
||||
}
|
||||
cursor->index = 0;
|
||||
cursor->num_fences = 0;
|
||||
cursor->fences = dma_resv_fences_list(cursor->obj);
|
||||
if (cursor->fences)
|
||||
cursor->num_fences = cursor->fences->num_fences;
|
||||
cursor->is_restarted = true;
|
||||
}
|
||||
|
||||
/* Walk to the next not signaled fence and grab a reference to it */
|
||||
static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
|
||||
{
|
||||
struct dma_resv *obj = cursor->obj;
|
||||
if (!cursor->fences)
|
||||
return;
|
||||
|
||||
do {
|
||||
/* Drop the reference from the previous round */
|
||||
dma_fence_put(cursor->fence);
|
||||
|
||||
if (cursor->index == -1) {
|
||||
cursor->fence = dma_resv_excl_fence(obj);
|
||||
cursor->index++;
|
||||
if (!cursor->fence)
|
||||
continue;
|
||||
|
||||
} else if (!cursor->fences ||
|
||||
cursor->index >= cursor->shared_count) {
|
||||
if (cursor->index >= cursor->num_fences) {
|
||||
cursor->fence = NULL;
|
||||
break;
|
||||
|
||||
} else {
|
||||
struct dma_resv_list *fences = cursor->fences;
|
||||
unsigned int idx = cursor->index++;
|
||||
|
||||
cursor->fence = rcu_dereference(fences->shared[idx]);
|
||||
}
|
||||
|
||||
dma_resv_list_entry(cursor->fences, cursor->index++,
|
||||
cursor->obj, &cursor->fence,
|
||||
&cursor->fence_usage);
|
||||
cursor->fence = dma_fence_get_rcu(cursor->fence);
|
||||
if (!cursor->fence || !dma_fence_is_signaled(cursor->fence))
|
||||
if (!cursor->fence) {
|
||||
dma_resv_iter_restart_unlocked(cursor);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!dma_fence_is_signaled(cursor->fence) &&
|
||||
cursor->usage >= cursor->fence_usage)
|
||||
break;
|
||||
} while (true);
|
||||
}
|
||||
@ -444,7 +409,7 @@ struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor)
|
||||
do {
|
||||
dma_resv_iter_restart_unlocked(cursor);
|
||||
dma_resv_iter_walk_unlocked(cursor);
|
||||
} while (read_seqcount_retry(&cursor->obj->seq, cursor->seq));
|
||||
} while (dma_resv_fences_list(cursor->obj) != cursor->fences);
|
||||
rcu_read_unlock();
|
||||
|
||||
return cursor->fence;
|
||||
@ -467,13 +432,13 @@ struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
|
||||
|
||||
rcu_read_lock();
|
||||
cursor->is_restarted = false;
|
||||
restart = read_seqcount_retry(&cursor->obj->seq, cursor->seq);
|
||||
restart = dma_resv_fences_list(cursor->obj) != cursor->fences;
|
||||
do {
|
||||
if (restart)
|
||||
dma_resv_iter_restart_unlocked(cursor);
|
||||
dma_resv_iter_walk_unlocked(cursor);
|
||||
restart = true;
|
||||
} while (read_seqcount_retry(&cursor->obj->seq, cursor->seq));
|
||||
} while (dma_resv_fences_list(cursor->obj) != cursor->fences);
|
||||
rcu_read_unlock();
|
||||
|
||||
return cursor->fence;
|
||||
@ -496,15 +461,9 @@ struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor)
|
||||
dma_resv_assert_held(cursor->obj);
|
||||
|
||||
cursor->index = 0;
|
||||
if (cursor->all_fences)
|
||||
cursor->fences = dma_resv_shared_list(cursor->obj);
|
||||
else
|
||||
cursor->fences = NULL;
|
||||
|
||||
fence = dma_resv_excl_fence(cursor->obj);
|
||||
if (!fence)
|
||||
fence = dma_resv_iter_next(cursor);
|
||||
cursor->fences = dma_resv_fences_list(cursor->obj);
|
||||
|
||||
fence = dma_resv_iter_next(cursor);
|
||||
cursor->is_restarted = true;
|
||||
return fence;
|
||||
}
|
||||
@ -519,17 +478,22 @@ EXPORT_SYMBOL_GPL(dma_resv_iter_first);
|
||||
*/
|
||||
struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor)
|
||||
{
|
||||
unsigned int idx;
|
||||
struct dma_fence *fence;
|
||||
|
||||
dma_resv_assert_held(cursor->obj);
|
||||
|
||||
cursor->is_restarted = false;
|
||||
if (!cursor->fences || cursor->index >= cursor->fences->shared_count)
|
||||
return NULL;
|
||||
|
||||
idx = cursor->index++;
|
||||
return rcu_dereference_protected(cursor->fences->shared[idx],
|
||||
dma_resv_held(cursor->obj));
|
||||
do {
|
||||
if (!cursor->fences ||
|
||||
cursor->index >= cursor->fences->num_fences)
|
||||
return NULL;
|
||||
|
||||
dma_resv_list_entry(cursor->fences, cursor->index++,
|
||||
cursor->obj, &fence, &cursor->fence_usage);
|
||||
} while (cursor->fence_usage > cursor->usage);
|
||||
|
||||
return fence;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dma_resv_iter_next);
|
||||
|
||||
@ -544,60 +508,43 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
|
||||
{
|
||||
struct dma_resv_iter cursor;
|
||||
struct dma_resv_list *list;
|
||||
struct dma_fence *f, *excl;
|
||||
struct dma_fence *f;
|
||||
|
||||
dma_resv_assert_held(dst);
|
||||
|
||||
list = NULL;
|
||||
excl = NULL;
|
||||
|
||||
dma_resv_iter_begin(&cursor, src, true);
|
||||
dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_BOOKKEEP);
|
||||
dma_resv_for_each_fence_unlocked(&cursor, f) {
|
||||
|
||||
if (dma_resv_iter_is_restarted(&cursor)) {
|
||||
dma_resv_list_free(list);
|
||||
dma_fence_put(excl);
|
||||
|
||||
if (cursor.shared_count) {
|
||||
list = dma_resv_list_alloc(cursor.shared_count);
|
||||
if (!list) {
|
||||
dma_resv_iter_end(&cursor);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
list->shared_count = 0;
|
||||
|
||||
} else {
|
||||
list = NULL;
|
||||
list = dma_resv_list_alloc(cursor.num_fences);
|
||||
if (!list) {
|
||||
dma_resv_iter_end(&cursor);
|
||||
return -ENOMEM;
|
||||
}
|
||||
excl = NULL;
|
||||
list->num_fences = 0;
|
||||
}
|
||||
|
||||
dma_fence_get(f);
|
||||
if (dma_resv_iter_is_exclusive(&cursor))
|
||||
excl = f;
|
||||
else
|
||||
RCU_INIT_POINTER(list->shared[list->shared_count++], f);
|
||||
dma_resv_list_set(list, list->num_fences++, f,
|
||||
dma_resv_iter_usage(&cursor));
|
||||
}
|
||||
dma_resv_iter_end(&cursor);
|
||||
|
||||
write_seqcount_begin(&dst->seq);
|
||||
excl = rcu_replace_pointer(dst->fence_excl, excl, dma_resv_held(dst));
|
||||
list = rcu_replace_pointer(dst->fence, list, dma_resv_held(dst));
|
||||
write_seqcount_end(&dst->seq);
|
||||
|
||||
list = rcu_replace_pointer(dst->fences, list, dma_resv_held(dst));
|
||||
dma_resv_list_free(list);
|
||||
dma_fence_put(excl);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_resv_copy_fences);
|
||||
|
||||
/**
|
||||
* dma_resv_get_fences - Get an object's shared and exclusive
|
||||
* dma_resv_get_fences - Get an object's fences
|
||||
* fences without update side lock held
|
||||
* @obj: the reservation object
|
||||
* @write: true if we should return all fences
|
||||
* @usage: controls which fences to include, see enum dma_resv_usage.
|
||||
* @num_fences: the number of fences returned
|
||||
* @fences: the array of fence ptrs returned (array is krealloc'd to the
|
||||
* required size, and must be freed by caller)
|
||||
@ -605,7 +552,7 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
|
||||
* Retrieve all fences from the reservation object.
|
||||
* Returns either zero or -ENOMEM.
|
||||
*/
|
||||
int dma_resv_get_fences(struct dma_resv *obj, bool write,
|
||||
int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
|
||||
unsigned int *num_fences, struct dma_fence ***fences)
|
||||
{
|
||||
struct dma_resv_iter cursor;
|
||||
@ -614,7 +561,7 @@ int dma_resv_get_fences(struct dma_resv *obj, bool write,
|
||||
*num_fences = 0;
|
||||
*fences = NULL;
|
||||
|
||||
dma_resv_iter_begin(&cursor, obj, write);
|
||||
dma_resv_iter_begin(&cursor, obj, usage);
|
||||
dma_resv_for_each_fence_unlocked(&cursor, fence) {
|
||||
|
||||
if (dma_resv_iter_is_restarted(&cursor)) {
|
||||
@ -623,7 +570,7 @@ int dma_resv_get_fences(struct dma_resv *obj, bool write,
|
||||
while (*num_fences)
|
||||
dma_fence_put((*fences)[--(*num_fences)]);
|
||||
|
||||
count = cursor.shared_count + 1;
|
||||
count = cursor.num_fences + 1;
|
||||
|
||||
/* Eventually re-allocate the array */
|
||||
*fences = krealloc_array(*fences, count,
|
||||
@ -646,7 +593,7 @@ EXPORT_SYMBOL_GPL(dma_resv_get_fences);
|
||||
/**
|
||||
* dma_resv_get_singleton - Get a single fence for all the fences
|
||||
* @obj: the reservation object
|
||||
* @write: true if we should return all fences
|
||||
* @usage: controls which fences to include, see enum dma_resv_usage.
|
||||
* @fence: the resulting fence
|
||||
*
|
||||
* Get a single fence representing all the fences inside the resv object.
|
||||
@ -658,7 +605,7 @@ EXPORT_SYMBOL_GPL(dma_resv_get_fences);
|
||||
*
|
||||
* Returns 0 on success and negative error values on failure.
|
||||
*/
|
||||
int dma_resv_get_singleton(struct dma_resv *obj, bool write,
|
||||
int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
struct dma_fence_array *array;
|
||||
@ -666,7 +613,7 @@ int dma_resv_get_singleton(struct dma_resv *obj, bool write,
|
||||
unsigned count;
|
||||
int r;
|
||||
|
||||
r = dma_resv_get_fences(obj, write, &count, &fences);
|
||||
r = dma_resv_get_fences(obj, usage, &count, &fences);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -697,10 +644,9 @@ int dma_resv_get_singleton(struct dma_resv *obj, bool write,
|
||||
EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
|
||||
|
||||
/**
|
||||
* dma_resv_wait_timeout - Wait on reservation's objects
|
||||
* shared and/or exclusive fences.
|
||||
* dma_resv_wait_timeout - Wait on reservation's objects fences
|
||||
* @obj: the reservation object
|
||||
* @wait_all: if true, wait on all fences, else wait on just exclusive fence
|
||||
* @usage: controls which fences to include, see enum dma_resv_usage.
|
||||
* @intr: if true, do interruptible wait
|
||||
* @timeout: timeout value in jiffies or zero to return immediately
|
||||
*
|
||||
@ -710,14 +656,14 @@ EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
|
||||
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
|
||||
* greater than zer on success.
|
||||
*/
|
||||
long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
|
||||
unsigned long timeout)
|
||||
long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
|
||||
bool intr, unsigned long timeout)
|
||||
{
|
||||
long ret = timeout ? timeout : 1;
|
||||
struct dma_resv_iter cursor;
|
||||
struct dma_fence *fence;
|
||||
|
||||
dma_resv_iter_begin(&cursor, obj, wait_all);
|
||||
dma_resv_iter_begin(&cursor, obj, usage);
|
||||
dma_resv_for_each_fence_unlocked(&cursor, fence) {
|
||||
|
||||
ret = dma_fence_wait_timeout(fence, intr, ret);
|
||||
@ -737,8 +683,7 @@ EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
|
||||
* dma_resv_test_signaled - Test if a reservation object's fences have been
|
||||
* signaled.
|
||||
* @obj: the reservation object
|
||||
* @test_all: if true, test all fences, otherwise only test the exclusive
|
||||
* fence
|
||||
* @usage: controls which fences to include, see enum dma_resv_usage.
|
||||
*
|
||||
* Callers are not required to hold specific locks, but maybe hold
|
||||
* dma_resv_lock() already.
|
||||
@ -747,12 +692,12 @@ EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
|
||||
*
|
||||
* True if all fences signaled, else false.
|
||||
*/
|
||||
bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
|
||||
bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage)
|
||||
{
|
||||
struct dma_resv_iter cursor;
|
||||
struct dma_fence *fence;
|
||||
|
||||
dma_resv_iter_begin(&cursor, obj, test_all);
|
||||
dma_resv_iter_begin(&cursor, obj, usage);
|
||||
dma_resv_for_each_fence_unlocked(&cursor, fence) {
|
||||
dma_resv_iter_end(&cursor);
|
||||
return false;
|
||||
@ -772,13 +717,13 @@ EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
|
||||
*/
|
||||
void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq)
|
||||
{
|
||||
static const char *usage[] = { "kernel", "write", "read", "bookkeep" };
|
||||
struct dma_resv_iter cursor;
|
||||
struct dma_fence *fence;
|
||||
|
||||
dma_resv_for_each_fence(&cursor, obj, true, fence) {
|
||||
dma_resv_for_each_fence(&cursor, obj, DMA_RESV_USAGE_READ, fence) {
|
||||
seq_printf(seq, "\t%s fence:",
|
||||
dma_resv_iter_is_exclusive(&cursor) ?
|
||||
"Exclusive" : "Shared");
|
||||
usage[dma_resv_iter_usage(&cursor)]);
|
||||
dma_fence_describe(fence, seq);
|
||||
}
|
||||
}
|
||||
|
@ -58,8 +58,9 @@ static int sanitycheck(void *arg)
|
||||
return r;
|
||||
}
|
||||
|
||||
static int test_signaling(void *arg, bool shared)
|
||||
static int test_signaling(void *arg)
|
||||
{
|
||||
enum dma_resv_usage usage = (unsigned long)arg;
|
||||
struct dma_resv resv;
|
||||
struct dma_fence *f;
|
||||
int r;
|
||||
@ -81,18 +82,14 @@ static int test_signaling(void *arg, bool shared)
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
if (shared)
|
||||
dma_resv_add_shared_fence(&resv, f);
|
||||
else
|
||||
dma_resv_add_excl_fence(&resv, f);
|
||||
|
||||
if (dma_resv_test_signaled(&resv, shared)) {
|
||||
dma_resv_add_fence(&resv, f, usage);
|
||||
if (dma_resv_test_signaled(&resv, usage)) {
|
||||
pr_err("Resv unexpectedly signaled\n");
|
||||
r = -EINVAL;
|
||||
goto err_unlock;
|
||||
}
|
||||
dma_fence_signal(f);
|
||||
if (!dma_resv_test_signaled(&resv, shared)) {
|
||||
if (!dma_resv_test_signaled(&resv, usage)) {
|
||||
pr_err("Resv not reporting signaled\n");
|
||||
r = -EINVAL;
|
||||
goto err_unlock;
|
||||
@ -105,18 +102,9 @@ err_free:
|
||||
return r;
|
||||
}
|
||||
|
||||
static int test_excl_signaling(void *arg)
|
||||
{
|
||||
return test_signaling(arg, false);
|
||||
}
|
||||
|
||||
static int test_shared_signaling(void *arg)
|
||||
{
|
||||
return test_signaling(arg, true);
|
||||
}
|
||||
|
||||
static int test_for_each(void *arg, bool shared)
|
||||
static int test_for_each(void *arg)
|
||||
{
|
||||
enum dma_resv_usage usage = (unsigned long)arg;
|
||||
struct dma_resv_iter cursor;
|
||||
struct dma_fence *f, *fence;
|
||||
struct dma_resv resv;
|
||||
@ -139,13 +127,10 @@ static int test_for_each(void *arg, bool shared)
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
if (shared)
|
||||
dma_resv_add_shared_fence(&resv, f);
|
||||
else
|
||||
dma_resv_add_excl_fence(&resv, f);
|
||||
dma_resv_add_fence(&resv, f, usage);
|
||||
|
||||
r = -ENOENT;
|
||||
dma_resv_for_each_fence(&cursor, &resv, shared, fence) {
|
||||
dma_resv_for_each_fence(&cursor, &resv, usage, fence) {
|
||||
if (!r) {
|
||||
pr_err("More than one fence found\n");
|
||||
r = -EINVAL;
|
||||
@ -156,7 +141,7 @@ static int test_for_each(void *arg, bool shared)
|
||||
r = -EINVAL;
|
||||
goto err_unlock;
|
||||
}
|
||||
if (dma_resv_iter_is_exclusive(&cursor) != !shared) {
|
||||
if (dma_resv_iter_usage(&cursor) != usage) {
|
||||
pr_err("Unexpected fence usage\n");
|
||||
r = -EINVAL;
|
||||
goto err_unlock;
|
||||
@ -176,18 +161,9 @@ err_free:
|
||||
return r;
|
||||
}
|
||||
|
||||
static int test_excl_for_each(void *arg)
|
||||
{
|
||||
return test_for_each(arg, false);
|
||||
}
|
||||
|
||||
static int test_shared_for_each(void *arg)
|
||||
{
|
||||
return test_for_each(arg, true);
|
||||
}
|
||||
|
||||
static int test_for_each_unlocked(void *arg, bool shared)
|
||||
static int test_for_each_unlocked(void *arg)
|
||||
{
|
||||
enum dma_resv_usage usage = (unsigned long)arg;
|
||||
struct dma_resv_iter cursor;
|
||||
struct dma_fence *f, *fence;
|
||||
struct dma_resv resv;
|
||||
@ -211,14 +187,11 @@ static int test_for_each_unlocked(void *arg, bool shared)
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
if (shared)
|
||||
dma_resv_add_shared_fence(&resv, f);
|
||||
else
|
||||
dma_resv_add_excl_fence(&resv, f);
|
||||
dma_resv_add_fence(&resv, f, usage);
|
||||
dma_resv_unlock(&resv);
|
||||
|
||||
r = -ENOENT;
|
||||
dma_resv_iter_begin(&cursor, &resv, shared);
|
||||
dma_resv_iter_begin(&cursor, &resv, usage);
|
||||
dma_resv_for_each_fence_unlocked(&cursor, fence) {
|
||||
if (!r) {
|
||||
pr_err("More than one fence found\n");
|
||||
@ -234,7 +207,7 @@ static int test_for_each_unlocked(void *arg, bool shared)
|
||||
r = -EINVAL;
|
||||
goto err_iter_end;
|
||||
}
|
||||
if (dma_resv_iter_is_exclusive(&cursor) != !shared) {
|
||||
if (dma_resv_iter_usage(&cursor) != usage) {
|
||||
pr_err("Unexpected fence usage\n");
|
||||
r = -EINVAL;
|
||||
goto err_iter_end;
|
||||
@ -244,7 +217,7 @@ static int test_for_each_unlocked(void *arg, bool shared)
|
||||
if (r == -ENOENT) {
|
||||
r = -EINVAL;
|
||||
/* That should trigger an restart */
|
||||
cursor.seq--;
|
||||
cursor.fences = (void*)~0;
|
||||
} else if (r == -EINVAL) {
|
||||
r = 0;
|
||||
}
|
||||
@ -260,18 +233,9 @@ err_free:
|
||||
return r;
|
||||
}
|
||||
|
||||
static int test_excl_for_each_unlocked(void *arg)
|
||||
{
|
||||
return test_for_each_unlocked(arg, false);
|
||||
}
|
||||
|
||||
static int test_shared_for_each_unlocked(void *arg)
|
||||
{
|
||||
return test_for_each_unlocked(arg, true);
|
||||
}
|
||||
|
||||
static int test_get_fences(void *arg, bool shared)
|
||||
static int test_get_fences(void *arg)
|
||||
{
|
||||
enum dma_resv_usage usage = (unsigned long)arg;
|
||||
struct dma_fence *f, **fences = NULL;
|
||||
struct dma_resv resv;
|
||||
int r, i;
|
||||
@ -294,13 +258,10 @@ static int test_get_fences(void *arg, bool shared)
|
||||
goto err_resv;
|
||||
}
|
||||
|
||||
if (shared)
|
||||
dma_resv_add_shared_fence(&resv, f);
|
||||
else
|
||||
dma_resv_add_excl_fence(&resv, f);
|
||||
dma_resv_add_fence(&resv, f, usage);
|
||||
dma_resv_unlock(&resv);
|
||||
|
||||
r = dma_resv_get_fences(&resv, shared, &i, &fences);
|
||||
r = dma_resv_get_fences(&resv, usage, &i, &fences);
|
||||
if (r) {
|
||||
pr_err("get_fences failed\n");
|
||||
goto err_free;
|
||||
@ -322,30 +283,24 @@ err_resv:
|
||||
return r;
|
||||
}
|
||||
|
||||
static int test_excl_get_fences(void *arg)
|
||||
{
|
||||
return test_get_fences(arg, false);
|
||||
}
|
||||
|
||||
static int test_shared_get_fences(void *arg)
|
||||
{
|
||||
return test_get_fences(arg, true);
|
||||
}
|
||||
|
||||
int dma_resv(void)
|
||||
{
|
||||
static const struct subtest tests[] = {
|
||||
SUBTEST(sanitycheck),
|
||||
SUBTEST(test_excl_signaling),
|
||||
SUBTEST(test_shared_signaling),
|
||||
SUBTEST(test_excl_for_each),
|
||||
SUBTEST(test_shared_for_each),
|
||||
SUBTEST(test_excl_for_each_unlocked),
|
||||
SUBTEST(test_shared_for_each_unlocked),
|
||||
SUBTEST(test_excl_get_fences),
|
||||
SUBTEST(test_shared_get_fences),
|
||||
SUBTEST(test_signaling),
|
||||
SUBTEST(test_for_each),
|
||||
SUBTEST(test_for_each_unlocked),
|
||||
SUBTEST(test_get_fences),
|
||||
};
|
||||
enum dma_resv_usage usage;
|
||||
int r;
|
||||
|
||||
spin_lock_init(&fence_lock);
|
||||
return subtests(tests, NULL);
|
||||
for (usage = DMA_RESV_USAGE_KERNEL; usage <= DMA_RESV_USAGE_BOOKKEEP;
|
||||
++usage) {
|
||||
r = subtests(tests, (void *)(unsigned long)usage);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -280,6 +280,7 @@ config DRM_AMDGPU
|
||||
select HWMON
|
||||
select BACKLIGHT_CLASS_DEVICE
|
||||
select INTERVAL_TREE
|
||||
select DRM_BUDDY
|
||||
help
|
||||
Choose this option if you have a recent AMD Radeon graphics card.
|
||||
|
||||
|
@ -263,7 +263,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
|
||||
*/
|
||||
replacement = dma_fence_get_stub();
|
||||
dma_resv_replace_fences(bo->tbo.base.resv, ef->base.context,
|
||||
replacement);
|
||||
replacement, DMA_RESV_USAGE_READ);
|
||||
dma_fence_put(replacement);
|
||||
return 0;
|
||||
}
|
||||
@ -2447,6 +2447,8 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
|
||||
struct amdgpu_bo *bo = mem->bo;
|
||||
uint32_t domain = mem->domain;
|
||||
struct kfd_mem_attachment *attachment;
|
||||
struct dma_resv_iter cursor;
|
||||
struct dma_fence *fence;
|
||||
|
||||
total_size += amdgpu_bo_size(bo);
|
||||
|
||||
@ -2461,10 +2463,13 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
|
||||
goto validate_map_fail;
|
||||
}
|
||||
}
|
||||
ret = amdgpu_sync_fence(&sync_obj, bo->tbo.moving);
|
||||
if (ret) {
|
||||
pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
|
||||
goto validate_map_fail;
|
||||
dma_resv_for_each_fence(&cursor, bo->tbo.base.resv,
|
||||
DMA_RESV_USAGE_KERNEL, fence) {
|
||||
ret = amdgpu_sync_fence(&sync_obj, fence);
|
||||
if (ret) {
|
||||
pr_debug("Memory eviction: Sync BO fence failed. Try again\n");
|
||||
goto validate_map_fail;
|
||||
}
|
||||
}
|
||||
list_for_each_entry(attachment, &mem->attachments, list) {
|
||||
if (!attachment->is_mapped)
|
||||
|
@ -34,7 +34,6 @@ struct amdgpu_fpriv;
|
||||
struct amdgpu_bo_list_entry {
|
||||
struct ttm_validate_buffer tv;
|
||||
struct amdgpu_bo_va *bo_va;
|
||||
struct dma_fence_chain *chain;
|
||||
uint32_t priority;
|
||||
struct page **user_pages;
|
||||
bool user_invalidated;
|
||||
|
@ -55,8 +55,8 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
|
||||
bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
|
||||
p->uf_entry.priority = 0;
|
||||
p->uf_entry.tv.bo = &bo->tbo;
|
||||
/* One for TTM and one for the CS job */
|
||||
p->uf_entry.tv.num_shared = 2;
|
||||
/* One for TTM and two for the CS job */
|
||||
p->uf_entry.tv.num_shared = 3;
|
||||
|
||||
drm_gem_object_put(gobj);
|
||||
|
||||
@ -574,14 +574,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
||||
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
|
||||
|
||||
e->bo_va = amdgpu_vm_bo_find(vm, bo);
|
||||
|
||||
if (bo->tbo.base.dma_buf && !amdgpu_bo_explicit_sync(bo)) {
|
||||
e->chain = dma_fence_chain_alloc();
|
||||
if (!e->chain) {
|
||||
r = -ENOMEM;
|
||||
goto error_validate;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Move fence waiting after getting reservation lock of
|
||||
@ -642,13 +634,8 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
||||
}
|
||||
|
||||
error_validate:
|
||||
if (r) {
|
||||
amdgpu_bo_list_for_each_entry(e, p->bo_list) {
|
||||
dma_fence_chain_free(e->chain);
|
||||
e->chain = NULL;
|
||||
}
|
||||
if (r)
|
||||
ttm_eu_backoff_reservation(&p->ticket, &p->validated);
|
||||
}
|
||||
out:
|
||||
return r;
|
||||
}
|
||||
@ -688,17 +675,9 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
if (error && backoff) {
|
||||
struct amdgpu_bo_list_entry *e;
|
||||
|
||||
amdgpu_bo_list_for_each_entry(e, parser->bo_list) {
|
||||
dma_fence_chain_free(e->chain);
|
||||
e->chain = NULL;
|
||||
}
|
||||
|
||||
if (error && backoff)
|
||||
ttm_eu_backoff_reservation(&parser->ticket,
|
||||
&parser->validated);
|
||||
}
|
||||
|
||||
for (i = 0; i < parser->num_post_deps; i++) {
|
||||
drm_syncobj_put(parser->post_deps[i].syncobj);
|
||||
@ -1272,29 +1251,9 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
||||
|
||||
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
|
||||
|
||||
amdgpu_bo_list_for_each_entry(e, p->bo_list) {
|
||||
struct dma_resv *resv = e->tv.bo->base.resv;
|
||||
struct dma_fence_chain *chain = e->chain;
|
||||
struct dma_resv_iter cursor;
|
||||
struct dma_fence *fence;
|
||||
|
||||
if (!chain)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Temporary workaround dma_resv shortcommings by wrapping up
|
||||
* the submission in a dma_fence_chain and add it as exclusive
|
||||
* fence.
|
||||
*
|
||||
* TODO: Remove together with dma_resv rework.
|
||||
*/
|
||||
dma_resv_for_each_fence(&cursor, resv, false, fence) {
|
||||
break;
|
||||
}
|
||||
dma_fence_chain_init(chain, fence, dma_fence_get(p->fence), 1);
|
||||
rcu_assign_pointer(resv->fence_excl, &chain->base);
|
||||
e->chain = NULL;
|
||||
}
|
||||
/* Make sure all BOs are remembered as writers */
|
||||
amdgpu_bo_list_for_each_entry(e, p->bo_list)
|
||||
e->tv.num_shared = 0;
|
||||
|
||||
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
|
||||
mutex_unlock(&p->adev->notifier_lock);
|
||||
|
@ -200,8 +200,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
|
||||
goto unpin;
|
||||
}
|
||||
|
||||
/* TODO: Unify this with other drivers */
|
||||
r = dma_resv_get_fences(new_abo->tbo.base.resv, true,
|
||||
r = dma_resv_get_fences(new_abo->tbo.base.resv, DMA_RESV_USAGE_WRITE,
|
||||
&work->shared_count,
|
||||
&work->shared);
|
||||
if (unlikely(r != 0)) {
|
||||
|
@ -102,21 +102,9 @@ static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
|
||||
{
|
||||
struct drm_gem_object *obj = attach->dmabuf->priv;
|
||||
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
|
||||
int r;
|
||||
|
||||
/* pin buffer into GTT */
|
||||
r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (bo->tbo.moving) {
|
||||
r = dma_fence_wait(bo->tbo.moving, true);
|
||||
if (r) {
|
||||
amdgpu_bo_unpin(bo);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -526,7 +526,8 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
||||
return -ENOENT;
|
||||
}
|
||||
robj = gem_to_amdgpu_bo(gobj);
|
||||
ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, timeout);
|
||||
ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
|
||||
true, timeout);
|
||||
|
||||
/* ret == 0 means not signaled,
|
||||
* ret > 0 means signaled
|
||||
|
@ -111,7 +111,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
|
||||
struct dma_fence *fence;
|
||||
int r;
|
||||
|
||||
r = dma_resv_get_singleton(resv, true, &fence);
|
||||
r = dma_resv_get_singleton(resv, DMA_RESV_USAGE_BOOKKEEP, &fence);
|
||||
if (r)
|
||||
goto fallback;
|
||||
|
||||
@ -139,7 +139,8 @@ fallback:
|
||||
/* Not enough memory for the delayed delete, as last resort
|
||||
* block for all the fences to complete.
|
||||
*/
|
||||
dma_resv_wait_timeout(resv, true, false, MAX_SCHEDULE_TIMEOUT);
|
||||
dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP,
|
||||
false, MAX_SCHEDULE_TIMEOUT);
|
||||
amdgpu_pasid_free(pasid);
|
||||
}
|
||||
|
||||
|
@ -75,8 +75,8 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni,
|
||||
|
||||
mmu_interval_set_seq(mni, cur_seq);
|
||||
|
||||
r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP,
|
||||
false, MAX_SCHEDULE_TIMEOUT);
|
||||
mutex_unlock(&adev->notifier_lock);
|
||||
if (r <= 0)
|
||||
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
|
||||
|
@ -612,9 +612,8 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
|
||||
if (unlikely(r))
|
||||
goto fail_unreserve;
|
||||
|
||||
amdgpu_bo_fence(bo, fence, false);
|
||||
dma_fence_put(bo->tbo.moving);
|
||||
bo->tbo.moving = dma_fence_get(fence);
|
||||
dma_resv_add_fence(bo->tbo.base.resv, fence,
|
||||
DMA_RESV_USAGE_KERNEL);
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
if (!bp->resv)
|
||||
@ -761,6 +760,11 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
|
||||
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
|
||||
return -EPERM;
|
||||
|
||||
r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL,
|
||||
false, MAX_SCHEDULE_TIMEOUT);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
kptr = amdgpu_bo_kptr(bo);
|
||||
if (kptr) {
|
||||
if (ptr)
|
||||
@ -768,11 +772,6 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap);
|
||||
if (r)
|
||||
return r;
|
||||
@ -1399,10 +1398,8 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
|
||||
return;
|
||||
}
|
||||
|
||||
if (shared)
|
||||
dma_resv_add_shared_fence(resv, fence);
|
||||
else
|
||||
dma_resv_add_excl_fence(resv, fence);
|
||||
dma_resv_add_fence(resv, fence, shared ? DMA_RESV_USAGE_READ :
|
||||
DMA_RESV_USAGE_WRITE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -30,12 +30,15 @@
|
||||
#include <drm/ttm/ttm_resource.h>
|
||||
#include <drm/ttm/ttm_range_manager.h>
|
||||
|
||||
#include "amdgpu_vram_mgr.h"
|
||||
|
||||
/* state back for walking over vram_mgr and gtt_mgr allocations */
|
||||
struct amdgpu_res_cursor {
|
||||
uint64_t start;
|
||||
uint64_t size;
|
||||
uint64_t remaining;
|
||||
struct drm_mm_node *node;
|
||||
void *node;
|
||||
uint32_t mem_type;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -52,27 +55,63 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
|
||||
uint64_t start, uint64_t size,
|
||||
struct amdgpu_res_cursor *cur)
|
||||
{
|
||||
struct drm_buddy_block *block;
|
||||
struct list_head *head, *next;
|
||||
struct drm_mm_node *node;
|
||||
|
||||
if (!res || res->mem_type == TTM_PL_SYSTEM) {
|
||||
cur->start = start;
|
||||
cur->size = size;
|
||||
cur->remaining = size;
|
||||
cur->node = NULL;
|
||||
WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT);
|
||||
return;
|
||||
}
|
||||
if (!res)
|
||||
goto fallback;
|
||||
|
||||
BUG_ON(start + size > res->num_pages << PAGE_SHIFT);
|
||||
|
||||
node = to_ttm_range_mgr_node(res)->mm_nodes;
|
||||
while (start >= node->size << PAGE_SHIFT)
|
||||
start -= node++->size << PAGE_SHIFT;
|
||||
cur->mem_type = res->mem_type;
|
||||
|
||||
cur->start = (node->start << PAGE_SHIFT) + start;
|
||||
cur->size = min((node->size << PAGE_SHIFT) - start, size);
|
||||
switch (cur->mem_type) {
|
||||
case TTM_PL_VRAM:
|
||||
head = &to_amdgpu_vram_mgr_resource(res)->blocks;
|
||||
|
||||
block = list_first_entry_or_null(head,
|
||||
struct drm_buddy_block,
|
||||
link);
|
||||
if (!block)
|
||||
goto fallback;
|
||||
|
||||
while (start >= amdgpu_vram_mgr_block_size(block)) {
|
||||
start -= amdgpu_vram_mgr_block_size(block);
|
||||
|
||||
next = block->link.next;
|
||||
if (next != head)
|
||||
block = list_entry(next, struct drm_buddy_block, link);
|
||||
}
|
||||
|
||||
cur->start = amdgpu_vram_mgr_block_start(block) + start;
|
||||
cur->size = min(amdgpu_vram_mgr_block_size(block) - start, size);
|
||||
cur->remaining = size;
|
||||
cur->node = block;
|
||||
break;
|
||||
case TTM_PL_TT:
|
||||
node = to_ttm_range_mgr_node(res)->mm_nodes;
|
||||
while (start >= node->size << PAGE_SHIFT)
|
||||
start -= node++->size << PAGE_SHIFT;
|
||||
|
||||
cur->start = (node->start << PAGE_SHIFT) + start;
|
||||
cur->size = min((node->size << PAGE_SHIFT) - start, size);
|
||||
cur->remaining = size;
|
||||
cur->node = node;
|
||||
break;
|
||||
default:
|
||||
goto fallback;
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
fallback:
|
||||
cur->start = start;
|
||||
cur->size = size;
|
||||
cur->remaining = size;
|
||||
cur->node = node;
|
||||
cur->node = NULL;
|
||||
WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT);
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -85,7 +124,9 @@ static inline void amdgpu_res_first(struct ttm_resource *res,
|
||||
*/
|
||||
static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
|
||||
{
|
||||
struct drm_mm_node *node = cur->node;
|
||||
struct drm_buddy_block *block;
|
||||
struct drm_mm_node *node;
|
||||
struct list_head *next;
|
||||
|
||||
BUG_ON(size > cur->remaining);
|
||||
|
||||
@ -99,9 +140,27 @@ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t size)
|
||||
return;
|
||||
}
|
||||
|
||||
cur->node = ++node;
|
||||
cur->start = node->start << PAGE_SHIFT;
|
||||
cur->size = min(node->size << PAGE_SHIFT, cur->remaining);
|
||||
switch (cur->mem_type) {
|
||||
case TTM_PL_VRAM:
|
||||
block = cur->node;
|
||||
|
||||
next = block->link.next;
|
||||
block = list_entry(next, struct drm_buddy_block, link);
|
||||
|
||||
cur->node = block;
|
||||
cur->start = amdgpu_vram_mgr_block_start(block);
|
||||
cur->size = min(amdgpu_vram_mgr_block_size(block), cur->remaining);
|
||||
break;
|
||||
case TTM_PL_TT:
|
||||
node = cur->node;
|
||||
|
||||
cur->node = ++node;
|
||||
cur->start = node->start << PAGE_SHIFT;
|
||||
cur->size = min(node->size << PAGE_SHIFT, cur->remaining);
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -259,7 +259,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
|
||||
if (resv == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
dma_resv_for_each_fence(&cursor, resv, true, f) {
|
||||
/* TODO: Use DMA_RESV_USAGE_READ here */
|
||||
dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, f) {
|
||||
dma_fence_chain_for_each(f, f) {
|
||||
struct dma_fence *tmp = dma_fence_chain_contained(f);
|
||||
|
||||
|
@ -1344,7 +1344,8 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
|
||||
* If true, then return false as any KFD process needs all its BOs to
|
||||
* be resident to run successfully
|
||||
*/
|
||||
dma_resv_for_each_fence(&resv_cursor, bo->base.resv, true, f) {
|
||||
dma_resv_for_each_fence(&resv_cursor, bo->base.resv,
|
||||
DMA_RESV_USAGE_BOOKKEEP, f) {
|
||||
if (amdkfd_fence_check_mm(f, current->mm))
|
||||
return false;
|
||||
}
|
||||
@ -2160,17 +2161,6 @@ int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type)
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
|
||||
static int amdgpu_mm_vram_table_show(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
|
||||
struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
|
||||
TTM_PL_VRAM);
|
||||
struct drm_printer p = drm_seq_file_printer(m);
|
||||
|
||||
ttm_resource_manager_debug(man, &p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
|
||||
@ -2178,55 +2168,6 @@ static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused)
|
||||
return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
|
||||
}
|
||||
|
||||
static int amdgpu_mm_tt_table_show(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
|
||||
struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
|
||||
TTM_PL_TT);
|
||||
struct drm_printer p = drm_seq_file_printer(m);
|
||||
|
||||
ttm_resource_manager_debug(man, &p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_mm_gds_table_show(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
|
||||
struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
|
||||
AMDGPU_PL_GDS);
|
||||
struct drm_printer p = drm_seq_file_printer(m);
|
||||
|
||||
ttm_resource_manager_debug(man, &p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_mm_gws_table_show(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
|
||||
struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
|
||||
AMDGPU_PL_GWS);
|
||||
struct drm_printer p = drm_seq_file_printer(m);
|
||||
|
||||
ttm_resource_manager_debug(man, &p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_mm_oa_table_show(struct seq_file *m, void *unused)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
|
||||
struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
|
||||
AMDGPU_PL_OA);
|
||||
struct drm_printer p = drm_seq_file_printer(m);
|
||||
|
||||
ttm_resource_manager_debug(man, &p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_vram_table);
|
||||
DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_tt_table);
|
||||
DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gds_table);
|
||||
DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gws_table);
|
||||
DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_oa_table);
|
||||
DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool);
|
||||
|
||||
/*
|
||||
@ -2436,17 +2377,23 @@ void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
|
||||
&amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size);
|
||||
debugfs_create_file("amdgpu_iomem", 0444, root, adev,
|
||||
&amdgpu_ttm_iomem_fops);
|
||||
debugfs_create_file("amdgpu_vram_mm", 0444, root, adev,
|
||||
&amdgpu_mm_vram_table_fops);
|
||||
debugfs_create_file("amdgpu_gtt_mm", 0444, root, adev,
|
||||
&amdgpu_mm_tt_table_fops);
|
||||
debugfs_create_file("amdgpu_gds_mm", 0444, root, adev,
|
||||
&amdgpu_mm_gds_table_fops);
|
||||
debugfs_create_file("amdgpu_gws_mm", 0444, root, adev,
|
||||
&amdgpu_mm_gws_table_fops);
|
||||
debugfs_create_file("amdgpu_oa_mm", 0444, root, adev,
|
||||
&amdgpu_mm_oa_table_fops);
|
||||
debugfs_create_file("ttm_page_pool", 0444, root, adev,
|
||||
&amdgpu_ttm_page_pool_fops);
|
||||
ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
|
||||
TTM_PL_VRAM),
|
||||
root, "amdgpu_vram_mm");
|
||||
ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
|
||||
TTM_PL_TT),
|
||||
root, "amdgpu_gtt_mm");
|
||||
ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
|
||||
AMDGPU_PL_GDS),
|
||||
root, "amdgpu_gds_mm");
|
||||
ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
|
||||
AMDGPU_PL_GWS),
|
||||
root, "amdgpu_gws_mm");
|
||||
ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev,
|
||||
AMDGPU_PL_OA),
|
||||
root, "amdgpu_oa_mm");
|
||||
|
||||
#endif
|
||||
}
|
||||
|
@ -26,6 +26,7 @@
|
||||
|
||||
#include <linux/dma-direction.h>
|
||||
#include <drm/gpu_scheduler.h>
|
||||
#include "amdgpu_vram_mgr.h"
|
||||
#include "amdgpu.h"
|
||||
|
||||
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
|
||||
@ -38,15 +39,6 @@
|
||||
|
||||
#define AMDGPU_POISON 0xd0bed0be
|
||||
|
||||
struct amdgpu_vram_mgr {
|
||||
struct ttm_resource_manager manager;
|
||||
struct drm_mm mm;
|
||||
spinlock_t lock;
|
||||
struct list_head reservations_pending;
|
||||
struct list_head reserved_pages;
|
||||
atomic64_t vis_usage;
|
||||
};
|
||||
|
||||
struct amdgpu_gtt_mgr {
|
||||
struct ttm_resource_manager manager;
|
||||
struct drm_mm mm;
|
||||
|
@ -1163,7 +1163,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
|
||||
ib->length_dw = 16;
|
||||
|
||||
if (direct) {
|
||||
r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
|
||||
r = dma_resv_wait_timeout(bo->tbo.base.resv,
|
||||
DMA_RESV_USAGE_KERNEL, false,
|
||||
msecs_to_jiffies(10));
|
||||
if (r == 0)
|
||||
r = -ETIMEDOUT;
|
||||
|
@ -2059,7 +2059,7 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||
struct dma_resv_iter cursor;
|
||||
struct dma_fence *fence;
|
||||
|
||||
dma_resv_for_each_fence(&cursor, resv, true, fence) {
|
||||
dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP, fence) {
|
||||
/* Add a callback for each fence in the reservation object */
|
||||
amdgpu_vm_prt_get(adev);
|
||||
amdgpu_vm_add_prt_cb(adev, fence);
|
||||
@ -2665,7 +2665,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
|
||||
return true;
|
||||
|
||||
/* Don't evict VM page tables while they are busy */
|
||||
if (!dma_resv_test_signaled(bo->tbo.base.resv, true))
|
||||
if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP))
|
||||
return false;
|
||||
|
||||
/* Try to block ongoing updates */
|
||||
@ -2845,7 +2845,8 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
|
||||
*/
|
||||
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
|
||||
{
|
||||
timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, true,
|
||||
timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
|
||||
DMA_RESV_USAGE_BOOKKEEP,
|
||||
true, timeout);
|
||||
if (timeout <= 0)
|
||||
return timeout;
|
||||
|
@ -74,13 +74,12 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
|
||||
{
|
||||
unsigned int i;
|
||||
uint64_t value;
|
||||
int r;
|
||||
long r;
|
||||
|
||||
if (vmbo->bo.tbo.moving) {
|
||||
r = dma_fence_wait(vmbo->bo.tbo.moving, true);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
r = dma_resv_wait_timeout(vmbo->bo.tbo.base.resv, DMA_RESV_USAGE_KERNEL,
|
||||
true, MAX_SCHEDULE_TIMEOUT);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
pe += (unsigned long)amdgpu_bo_kptr(&vmbo->bo);
|
||||
|
||||
|
@ -204,14 +204,19 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
|
||||
struct amdgpu_bo *bo = &vmbo->bo;
|
||||
enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
|
||||
: AMDGPU_IB_POOL_DELAYED;
|
||||
struct dma_resv_iter cursor;
|
||||
unsigned int i, ndw, nptes;
|
||||
struct dma_fence *fence;
|
||||
uint64_t *pte;
|
||||
int r;
|
||||
|
||||
/* Wait for PD/PT moves to be completed */
|
||||
r = amdgpu_sync_fence(&p->job->sync, bo->tbo.moving);
|
||||
if (r)
|
||||
return r;
|
||||
dma_resv_for_each_fence(&cursor, bo->tbo.base.resv,
|
||||
DMA_RESV_USAGE_KERNEL, fence) {
|
||||
r = amdgpu_sync_fence(&p->job->sync, fence);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
do {
|
||||
ndw = p->num_dw_left;
|
||||
|
@ -32,8 +32,10 @@
|
||||
#include "atom.h"
|
||||
|
||||
struct amdgpu_vram_reservation {
|
||||
struct list_head node;
|
||||
struct drm_mm_node mm_node;
|
||||
u64 start;
|
||||
u64 size;
|
||||
struct list_head allocated;
|
||||
struct list_head blocks;
|
||||
};
|
||||
|
||||
static inline struct amdgpu_vram_mgr *
|
||||
@ -186,18 +188,18 @@ const struct attribute_group amdgpu_vram_mgr_attr_group = {
|
||||
};
|
||||
|
||||
/**
|
||||
* amdgpu_vram_mgr_vis_size - Calculate visible node size
|
||||
* amdgpu_vram_mgr_vis_size - Calculate visible block size
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @node: MM node structure
|
||||
* @block: DRM BUDDY block structure
|
||||
*
|
||||
* Calculate how many bytes of the MM node are inside visible VRAM
|
||||
* Calculate how many bytes of the DRM BUDDY block are inside visible VRAM
|
||||
*/
|
||||
static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
|
||||
struct drm_mm_node *node)
|
||||
struct drm_buddy_block *block)
|
||||
{
|
||||
uint64_t start = node->start << PAGE_SHIFT;
|
||||
uint64_t end = (node->size + node->start) << PAGE_SHIFT;
|
||||
u64 start = amdgpu_vram_mgr_block_start(block);
|
||||
u64 end = start + amdgpu_vram_mgr_block_size(block);
|
||||
|
||||
if (start >= adev->gmc.visible_vram_size)
|
||||
return 0;
|
||||
@ -218,9 +220,9 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
struct ttm_resource *res = bo->tbo.resource;
|
||||
unsigned pages = res->num_pages;
|
||||
struct drm_mm_node *mm;
|
||||
u64 usage;
|
||||
struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res);
|
||||
struct drm_buddy_block *block;
|
||||
u64 usage = 0;
|
||||
|
||||
if (amdgpu_gmc_vram_full_visible(&adev->gmc))
|
||||
return amdgpu_bo_size(bo);
|
||||
@ -228,9 +230,8 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
|
||||
if (res->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
|
||||
return 0;
|
||||
|
||||
mm = &container_of(res, struct ttm_range_mgr_node, base)->mm_nodes[0];
|
||||
for (usage = 0; pages; pages -= mm->size, mm++)
|
||||
usage += amdgpu_vram_mgr_vis_size(adev, mm);
|
||||
list_for_each_entry(block, &vres->blocks, link)
|
||||
usage += amdgpu_vram_mgr_vis_size(adev, block);
|
||||
|
||||
return usage;
|
||||
}
|
||||
@ -240,23 +241,30 @@ static void amdgpu_vram_mgr_do_reserve(struct ttm_resource_manager *man)
|
||||
{
|
||||
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
|
||||
struct amdgpu_device *adev = to_amdgpu_device(mgr);
|
||||
struct drm_mm *mm = &mgr->mm;
|
||||
struct drm_buddy *mm = &mgr->mm;
|
||||
struct amdgpu_vram_reservation *rsv, *temp;
|
||||
struct drm_buddy_block *block;
|
||||
uint64_t vis_usage;
|
||||
|
||||
list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node) {
|
||||
if (drm_mm_reserve_node(mm, &rsv->mm_node))
|
||||
list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) {
|
||||
if (drm_buddy_alloc_blocks(mm, rsv->start, rsv->start + rsv->size,
|
||||
rsv->size, mm->chunk_size, &rsv->allocated,
|
||||
DRM_BUDDY_RANGE_ALLOCATION))
|
||||
continue;
|
||||
|
||||
block = amdgpu_vram_mgr_first_block(&rsv->allocated);
|
||||
if (!block)
|
||||
continue;
|
||||
|
||||
dev_dbg(adev->dev, "Reservation 0x%llx - %lld, Succeeded\n",
|
||||
rsv->mm_node.start, rsv->mm_node.size);
|
||||
rsv->start, rsv->size);
|
||||
|
||||
vis_usage = amdgpu_vram_mgr_vis_size(adev, &rsv->mm_node);
|
||||
vis_usage = amdgpu_vram_mgr_vis_size(adev, block);
|
||||
atomic64_add(vis_usage, &mgr->vis_usage);
|
||||
spin_lock(&man->bdev->lru_lock);
|
||||
man->usage += rsv->mm_node.size << PAGE_SHIFT;
|
||||
man->usage += rsv->size;
|
||||
spin_unlock(&man->bdev->lru_lock);
|
||||
list_move(&rsv->node, &mgr->reserved_pages);
|
||||
list_move(&rsv->blocks, &mgr->reserved_pages);
|
||||
}
|
||||
}
|
||||
|
||||
@ -278,14 +286,16 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr,
|
||||
if (!rsv)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_LIST_HEAD(&rsv->node);
|
||||
rsv->mm_node.start = start >> PAGE_SHIFT;
|
||||
rsv->mm_node.size = size >> PAGE_SHIFT;
|
||||
INIT_LIST_HEAD(&rsv->allocated);
|
||||
INIT_LIST_HEAD(&rsv->blocks);
|
||||
|
||||
spin_lock(&mgr->lock);
|
||||
list_add_tail(&rsv->node, &mgr->reservations_pending);
|
||||
rsv->start = start;
|
||||
rsv->size = size;
|
||||
|
||||
mutex_lock(&mgr->lock);
|
||||
list_add_tail(&rsv->blocks, &mgr->reservations_pending);
|
||||
amdgpu_vram_mgr_do_reserve(&mgr->manager);
|
||||
spin_unlock(&mgr->lock);
|
||||
mutex_unlock(&mgr->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -307,19 +317,19 @@ int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
|
||||
struct amdgpu_vram_reservation *rsv;
|
||||
int ret;
|
||||
|
||||
spin_lock(&mgr->lock);
|
||||
mutex_lock(&mgr->lock);
|
||||
|
||||
list_for_each_entry(rsv, &mgr->reservations_pending, node) {
|
||||
if ((rsv->mm_node.start <= start) &&
|
||||
(start < (rsv->mm_node.start + rsv->mm_node.size))) {
|
||||
list_for_each_entry(rsv, &mgr->reservations_pending, blocks) {
|
||||
if (rsv->start <= start &&
|
||||
(start < (rsv->start + rsv->size))) {
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry(rsv, &mgr->reserved_pages, node) {
|
||||
if ((rsv->mm_node.start <= start) &&
|
||||
(start < (rsv->mm_node.start + rsv->mm_node.size))) {
|
||||
list_for_each_entry(rsv, &mgr->reserved_pages, blocks) {
|
||||
if (rsv->start <= start &&
|
||||
(start < (rsv->start + rsv->size))) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
@ -327,32 +337,10 @@ int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr,
|
||||
|
||||
ret = -ENOENT;
|
||||
out:
|
||||
spin_unlock(&mgr->lock);
|
||||
mutex_unlock(&mgr->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vram_mgr_virt_start - update virtual start address
|
||||
*
|
||||
* @mem: ttm_resource to update
|
||||
* @node: just allocated node
|
||||
*
|
||||
* Calculate a virtual BO start address to easily check if everything is CPU
|
||||
* accessible.
|
||||
*/
|
||||
static void amdgpu_vram_mgr_virt_start(struct ttm_resource *mem,
|
||||
struct drm_mm_node *node)
|
||||
{
|
||||
unsigned long start;
|
||||
|
||||
start = node->start + node->size;
|
||||
if (start > mem->num_pages)
|
||||
start -= mem->num_pages;
|
||||
else
|
||||
start = 0;
|
||||
mem->start = max(mem->start, start);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vram_mgr_new - allocate new ranges
|
||||
*
|
||||
@ -368,46 +356,44 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
||||
const struct ttm_place *place,
|
||||
struct ttm_resource **res)
|
||||
{
|
||||
unsigned long lpfn, num_nodes, pages_per_node, pages_left, pages;
|
||||
u64 vis_usage = 0, max_bytes, cur_size, min_block_size;
|
||||
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
|
||||
struct amdgpu_device *adev = to_amdgpu_device(mgr);
|
||||
uint64_t vis_usage = 0, mem_bytes, max_bytes;
|
||||
struct ttm_range_mgr_node *node;
|
||||
struct drm_mm *mm = &mgr->mm;
|
||||
enum drm_mm_insert_mode mode;
|
||||
unsigned i;
|
||||
struct amdgpu_vram_mgr_resource *vres;
|
||||
u64 size, remaining_size, lpfn, fpfn;
|
||||
struct drm_buddy *mm = &mgr->mm;
|
||||
struct drm_buddy_block *block;
|
||||
unsigned long pages_per_block;
|
||||
int r;
|
||||
|
||||
lpfn = place->lpfn;
|
||||
lpfn = place->lpfn << PAGE_SHIFT;
|
||||
if (!lpfn)
|
||||
lpfn = man->size >> PAGE_SHIFT;
|
||||
lpfn = man->size;
|
||||
|
||||
fpfn = place->fpfn << PAGE_SHIFT;
|
||||
|
||||
max_bytes = adev->gmc.mc_vram_size;
|
||||
if (tbo->type != ttm_bo_type_kernel)
|
||||
max_bytes -= AMDGPU_VM_RESERVED_VRAM;
|
||||
|
||||
mem_bytes = tbo->base.size;
|
||||
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
|
||||
pages_per_node = ~0ul;
|
||||
num_nodes = 1;
|
||||
pages_per_block = ~0ul;
|
||||
} else {
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
pages_per_node = HPAGE_PMD_NR;
|
||||
pages_per_block = HPAGE_PMD_NR;
|
||||
#else
|
||||
/* default to 2MB */
|
||||
pages_per_node = 2UL << (20UL - PAGE_SHIFT);
|
||||
pages_per_block = 2UL << (20UL - PAGE_SHIFT);
|
||||
#endif
|
||||
pages_per_node = max_t(uint32_t, pages_per_node,
|
||||
tbo->page_alignment);
|
||||
num_nodes = DIV_ROUND_UP_ULL(PFN_UP(mem_bytes), pages_per_node);
|
||||
pages_per_block = max_t(uint32_t, pages_per_block,
|
||||
tbo->page_alignment);
|
||||
}
|
||||
|
||||
node = kvmalloc(struct_size(node, mm_nodes, num_nodes),
|
||||
GFP_KERNEL | __GFP_ZERO);
|
||||
if (!node)
|
||||
vres = kzalloc(sizeof(*vres), GFP_KERNEL);
|
||||
if (!vres)
|
||||
return -ENOMEM;
|
||||
|
||||
ttm_resource_init(tbo, place, &node->base);
|
||||
ttm_resource_init(tbo, place, &vres->base);
|
||||
|
||||
/* bail out quickly if there's likely not enough VRAM for this BO */
|
||||
if (ttm_resource_manager_usage(man) > max_bytes) {
|
||||
@ -415,66 +401,130 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
||||
goto error_fini;
|
||||
}
|
||||
|
||||
mode = DRM_MM_INSERT_BEST;
|
||||
INIT_LIST_HEAD(&vres->blocks);
|
||||
|
||||
if (place->flags & TTM_PL_FLAG_TOPDOWN)
|
||||
mode = DRM_MM_INSERT_HIGH;
|
||||
vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
|
||||
|
||||
pages_left = node->base.num_pages;
|
||||
if (fpfn || lpfn != man->size)
|
||||
/* Allocate blocks in desired range */
|
||||
vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
|
||||
|
||||
/* Limit maximum size to 2GB due to SG table limitations */
|
||||
pages = min(pages_left, 2UL << (30 - PAGE_SHIFT));
|
||||
remaining_size = vres->base.num_pages << PAGE_SHIFT;
|
||||
|
||||
i = 0;
|
||||
spin_lock(&mgr->lock);
|
||||
while (pages_left) {
|
||||
uint32_t alignment = tbo->page_alignment;
|
||||
mutex_lock(&mgr->lock);
|
||||
while (remaining_size) {
|
||||
if (tbo->page_alignment)
|
||||
min_block_size = tbo->page_alignment << PAGE_SHIFT;
|
||||
else
|
||||
min_block_size = mgr->default_page_size;
|
||||
|
||||
if (pages >= pages_per_node)
|
||||
alignment = pages_per_node;
|
||||
BUG_ON(min_block_size < mm->chunk_size);
|
||||
|
||||
r = drm_mm_insert_node_in_range(mm, &node->mm_nodes[i], pages,
|
||||
alignment, 0, place->fpfn,
|
||||
lpfn, mode);
|
||||
if (unlikely(r)) {
|
||||
if (pages > pages_per_node) {
|
||||
if (is_power_of_2(pages))
|
||||
pages = pages / 2;
|
||||
else
|
||||
pages = rounddown_pow_of_two(pages);
|
||||
continue;
|
||||
/* Limit maximum size to 2GiB due to SG table limitations */
|
||||
size = min(remaining_size, 2ULL << 30);
|
||||
|
||||
if (size >= pages_per_block << PAGE_SHIFT)
|
||||
min_block_size = pages_per_block << PAGE_SHIFT;
|
||||
|
||||
cur_size = size;
|
||||
|
||||
if (fpfn + size != place->lpfn << PAGE_SHIFT) {
|
||||
/*
|
||||
* Except for actual range allocation, modify the size and
|
||||
* min_block_size conforming to continuous flag enablement
|
||||
*/
|
||||
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
|
||||
size = roundup_pow_of_two(size);
|
||||
min_block_size = size;
|
||||
/*
|
||||
* Modify the size value if size is not
|
||||
* aligned with min_block_size
|
||||
*/
|
||||
} else if (!IS_ALIGNED(size, min_block_size)) {
|
||||
size = round_up(size, min_block_size);
|
||||
}
|
||||
goto error_free;
|
||||
}
|
||||
|
||||
vis_usage += amdgpu_vram_mgr_vis_size(adev, &node->mm_nodes[i]);
|
||||
amdgpu_vram_mgr_virt_start(&node->base, &node->mm_nodes[i]);
|
||||
pages_left -= pages;
|
||||
++i;
|
||||
r = drm_buddy_alloc_blocks(mm, fpfn,
|
||||
lpfn,
|
||||
size,
|
||||
min_block_size,
|
||||
&vres->blocks,
|
||||
vres->flags);
|
||||
if (unlikely(r))
|
||||
goto error_free_blocks;
|
||||
|
||||
if (pages > pages_left)
|
||||
pages = pages_left;
|
||||
if (size > remaining_size)
|
||||
remaining_size = 0;
|
||||
else
|
||||
remaining_size -= size;
|
||||
}
|
||||
spin_unlock(&mgr->lock);
|
||||
mutex_unlock(&mgr->lock);
|
||||
|
||||
if (i == 1)
|
||||
node->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
|
||||
if (cur_size != size) {
|
||||
struct drm_buddy_block *block;
|
||||
struct list_head *trim_list;
|
||||
u64 original_size;
|
||||
LIST_HEAD(temp);
|
||||
|
||||
trim_list = &vres->blocks;
|
||||
original_size = vres->base.num_pages << PAGE_SHIFT;
|
||||
|
||||
/*
|
||||
* If size value is rounded up to min_block_size, trim the last
|
||||
* block to the required size
|
||||
*/
|
||||
if (!list_is_singular(&vres->blocks)) {
|
||||
block = list_last_entry(&vres->blocks, typeof(*block), link);
|
||||
list_move_tail(&block->link, &temp);
|
||||
trim_list = &temp;
|
||||
/*
|
||||
* Compute the original_size value by subtracting the
|
||||
* last block size with (aligned size - original size)
|
||||
*/
|
||||
original_size = amdgpu_vram_mgr_block_size(block) - (size - cur_size);
|
||||
}
|
||||
|
||||
mutex_lock(&mgr->lock);
|
||||
drm_buddy_block_trim(mm,
|
||||
original_size,
|
||||
trim_list);
|
||||
mutex_unlock(&mgr->lock);
|
||||
|
||||
if (!list_empty(&temp))
|
||||
list_splice_tail(trim_list, &vres->blocks);
|
||||
}
|
||||
|
||||
list_for_each_entry(block, &vres->blocks, link)
|
||||
vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
|
||||
|
||||
block = amdgpu_vram_mgr_first_block(&vres->blocks);
|
||||
if (!block) {
|
||||
r = -EINVAL;
|
||||
goto error_fini;
|
||||
}
|
||||
|
||||
vres->base.start = amdgpu_vram_mgr_block_start(block) >> PAGE_SHIFT;
|
||||
|
||||
if (amdgpu_is_vram_mgr_blocks_contiguous(&vres->blocks))
|
||||
vres->base.placement |= TTM_PL_FLAG_CONTIGUOUS;
|
||||
|
||||
if (adev->gmc.xgmi.connected_to_cpu)
|
||||
node->base.bus.caching = ttm_cached;
|
||||
vres->base.bus.caching = ttm_cached;
|
||||
else
|
||||
node->base.bus.caching = ttm_write_combined;
|
||||
vres->base.bus.caching = ttm_write_combined;
|
||||
|
||||
atomic64_add(vis_usage, &mgr->vis_usage);
|
||||
*res = &node->base;
|
||||
*res = &vres->base;
|
||||
return 0;
|
||||
|
||||
error_free:
|
||||
while (i--)
|
||||
drm_mm_remove_node(&node->mm_nodes[i]);
|
||||
spin_unlock(&mgr->lock);
|
||||
error_free_blocks:
|
||||
drm_buddy_free_list(mm, &vres->blocks);
|
||||
mutex_unlock(&mgr->lock);
|
||||
error_fini:
|
||||
ttm_resource_fini(man, &node->base);
|
||||
kvfree(node);
|
||||
ttm_resource_fini(man, &vres->base);
|
||||
kfree(vres);
|
||||
|
||||
return r;
|
||||
}
|
||||
@ -490,27 +540,26 @@ error_fini:
|
||||
static void amdgpu_vram_mgr_del(struct ttm_resource_manager *man,
|
||||
struct ttm_resource *res)
|
||||
{
|
||||
struct ttm_range_mgr_node *node = to_ttm_range_mgr_node(res);
|
||||
struct amdgpu_vram_mgr_resource *vres = to_amdgpu_vram_mgr_resource(res);
|
||||
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
|
||||
struct amdgpu_device *adev = to_amdgpu_device(mgr);
|
||||
struct drm_buddy *mm = &mgr->mm;
|
||||
struct drm_buddy_block *block;
|
||||
uint64_t vis_usage = 0;
|
||||
unsigned i, pages;
|
||||
|
||||
spin_lock(&mgr->lock);
|
||||
for (i = 0, pages = res->num_pages; pages;
|
||||
pages -= node->mm_nodes[i].size, ++i) {
|
||||
struct drm_mm_node *mm = &node->mm_nodes[i];
|
||||
mutex_lock(&mgr->lock);
|
||||
list_for_each_entry(block, &vres->blocks, link)
|
||||
vis_usage += amdgpu_vram_mgr_vis_size(adev, block);
|
||||
|
||||
drm_mm_remove_node(mm);
|
||||
vis_usage += amdgpu_vram_mgr_vis_size(adev, mm);
|
||||
}
|
||||
amdgpu_vram_mgr_do_reserve(man);
|
||||
spin_unlock(&mgr->lock);
|
||||
|
||||
drm_buddy_free_list(mm, &vres->blocks);
|
||||
mutex_unlock(&mgr->lock);
|
||||
|
||||
atomic64_sub(vis_usage, &mgr->vis_usage);
|
||||
|
||||
ttm_resource_fini(man, res);
|
||||
kvfree(node);
|
||||
kfree(vres);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -542,7 +591,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
|
||||
if (!*sgt)
|
||||
return -ENOMEM;
|
||||
|
||||
/* Determine the number of DRM_MM nodes to export */
|
||||
/* Determine the number of DRM_BUDDY blocks to export */
|
||||
amdgpu_res_first(res, offset, length, &cursor);
|
||||
while (cursor.remaining) {
|
||||
num_entries++;
|
||||
@ -558,10 +607,10 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *adev,
|
||||
sg->length = 0;
|
||||
|
||||
/*
|
||||
* Walk down DRM_MM nodes to populate scatterlist nodes
|
||||
* @note: Use iterator api to get first the DRM_MM node
|
||||
* Walk down DRM_BUDDY blocks to populate scatterlist nodes
|
||||
* @note: Use iterator api to get first the DRM_BUDDY block
|
||||
* and the number of bytes from it. Access the following
|
||||
* DRM_MM node(s) if more buffer needs to exported
|
||||
* DRM_BUDDY block(s) if more buffer needs to exported
|
||||
*/
|
||||
amdgpu_res_first(res, offset, length, &cursor);
|
||||
for_each_sgtable_sg((*sgt), sg, i) {
|
||||
@ -648,13 +697,22 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_manager *man,
|
||||
struct drm_printer *printer)
|
||||
{
|
||||
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
|
||||
struct drm_buddy *mm = &mgr->mm;
|
||||
struct drm_buddy_block *block;
|
||||
|
||||
drm_printf(printer, " vis usage:%llu\n",
|
||||
amdgpu_vram_mgr_vis_usage(mgr));
|
||||
|
||||
spin_lock(&mgr->lock);
|
||||
drm_mm_print(&mgr->mm, printer);
|
||||
spin_unlock(&mgr->lock);
|
||||
mutex_lock(&mgr->lock);
|
||||
drm_printf(printer, "default_page_size: %lluKiB\n",
|
||||
mgr->default_page_size >> 10);
|
||||
|
||||
drm_buddy_print(mm, printer);
|
||||
|
||||
drm_printf(printer, "reserved:\n");
|
||||
list_for_each_entry(block, &mgr->reserved_pages, link)
|
||||
drm_buddy_block_print(mm, block, printer);
|
||||
mutex_unlock(&mgr->lock);
|
||||
}
|
||||
|
||||
static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = {
|
||||
@ -674,16 +732,21 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr;
|
||||
struct ttm_resource_manager *man = &mgr->manager;
|
||||
int err;
|
||||
|
||||
ttm_resource_manager_init(man, &adev->mman.bdev,
|
||||
adev->gmc.real_vram_size);
|
||||
|
||||
man->func = &amdgpu_vram_mgr_func;
|
||||
|
||||
drm_mm_init(&mgr->mm, 0, man->size >> PAGE_SHIFT);
|
||||
spin_lock_init(&mgr->lock);
|
||||
err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
mutex_init(&mgr->lock);
|
||||
INIT_LIST_HEAD(&mgr->reservations_pending);
|
||||
INIT_LIST_HEAD(&mgr->reserved_pages);
|
||||
mgr->default_page_size = PAGE_SIZE;
|
||||
|
||||
ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager);
|
||||
ttm_resource_manager_set_used(man, true);
|
||||
@ -711,16 +774,16 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev)
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
spin_lock(&mgr->lock);
|
||||
list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, node)
|
||||
mutex_lock(&mgr->lock);
|
||||
list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks)
|
||||
kfree(rsv);
|
||||
|
||||
list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, node) {
|
||||
drm_mm_remove_node(&rsv->mm_node);
|
||||
list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) {
|
||||
drm_buddy_free_list(&mgr->mm, &rsv->blocks);
|
||||
kfree(rsv);
|
||||
}
|
||||
drm_mm_takedown(&mgr->mm);
|
||||
spin_unlock(&mgr->lock);
|
||||
drm_buddy_fini(&mgr->mm);
|
||||
mutex_unlock(&mgr->lock);
|
||||
|
||||
ttm_resource_manager_cleanup(man);
|
||||
ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, NULL);
|
||||
|
89
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
Normal file
89
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h
Normal file
@ -0,0 +1,89 @@
|
||||
/* SPDX-License-Identifier: MIT
|
||||
* Copyright 2021 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __AMDGPU_VRAM_MGR_H__
|
||||
#define __AMDGPU_VRAM_MGR_H__
|
||||
|
||||
#include <drm/drm_buddy.h>
|
||||
|
||||
struct amdgpu_vram_mgr {
|
||||
struct ttm_resource_manager manager;
|
||||
struct drm_buddy mm;
|
||||
/* protects access to buffer objects */
|
||||
struct mutex lock;
|
||||
struct list_head reservations_pending;
|
||||
struct list_head reserved_pages;
|
||||
atomic64_t vis_usage;
|
||||
u64 default_page_size;
|
||||
};
|
||||
|
||||
struct amdgpu_vram_mgr_resource {
|
||||
struct ttm_resource base;
|
||||
struct list_head blocks;
|
||||
unsigned long flags;
|
||||
};
|
||||
|
||||
static inline u64 amdgpu_vram_mgr_block_start(struct drm_buddy_block *block)
|
||||
{
|
||||
return drm_buddy_block_offset(block);
|
||||
}
|
||||
|
||||
static inline u64 amdgpu_vram_mgr_block_size(struct drm_buddy_block *block)
|
||||
{
|
||||
return PAGE_SIZE << drm_buddy_block_order(block);
|
||||
}
|
||||
|
||||
static inline struct drm_buddy_block *
|
||||
amdgpu_vram_mgr_first_block(struct list_head *list)
|
||||
{
|
||||
return list_first_entry_or_null(list, struct drm_buddy_block, link);
|
||||
}
|
||||
|
||||
static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *head)
|
||||
{
|
||||
struct drm_buddy_block *block;
|
||||
u64 start, size;
|
||||
|
||||
block = amdgpu_vram_mgr_first_block(head);
|
||||
if (!block)
|
||||
return false;
|
||||
|
||||
while (head != block->link.next) {
|
||||
start = amdgpu_vram_mgr_block_start(block);
|
||||
size = amdgpu_vram_mgr_block_size(block);
|
||||
|
||||
block = list_entry(block->link.next, struct drm_buddy_block, link);
|
||||
if (start + size != amdgpu_vram_mgr_block_start(block))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline struct amdgpu_vram_mgr_resource *
|
||||
to_amdgpu_vram_mgr_resource(struct ttm_resource *res)
|
||||
{
|
||||
return container_of(res, struct amdgpu_vram_mgr_resource, base);
|
||||
}
|
||||
|
||||
#endif
|
@ -9238,7 +9238,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
* deadlock during GPU reset when this fence will not signal
|
||||
* but we hold reservation lock for the BO.
|
||||
*/
|
||||
r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
|
||||
r = dma_resv_wait_timeout(abo->tbo.base.resv,
|
||||
DMA_RESV_USAGE_WRITE, false,
|
||||
msecs_to_jiffies(5000));
|
||||
if (unlikely(r <= 0))
|
||||
DRM_ERROR("Waiting for fences timed out!");
|
||||
|
@ -256,6 +256,10 @@ static int komeda_plane_add(struct komeda_kms_dev *kms,
|
||||
|
||||
formats = komeda_get_layer_fourcc_list(&mdev->fmt_tbl,
|
||||
layer->layer_type, &n_formats);
|
||||
if (!formats) {
|
||||
kfree(kplane);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
err = drm_universal_plane_init(&kms->base, plane,
|
||||
get_possible_crtcs(kms, c->pipeline),
|
||||
@ -266,8 +270,10 @@ static int komeda_plane_add(struct komeda_kms_dev *kms,
|
||||
|
||||
komeda_put_fourcc_list(formats);
|
||||
|
||||
if (err)
|
||||
goto cleanup;
|
||||
if (err) {
|
||||
kfree(kplane);
|
||||
return err;
|
||||
}
|
||||
|
||||
drm_plane_helper_add(plane, &komeda_plane_helper_funcs);
|
||||
|
||||
|
@ -487,7 +487,10 @@ static void malidp_crtc_reset(struct drm_crtc *crtc)
|
||||
if (crtc->state)
|
||||
malidp_crtc_destroy_state(crtc, crtc->state);
|
||||
|
||||
__drm_atomic_helper_crtc_reset(crtc, &state->base);
|
||||
if (state)
|
||||
__drm_atomic_helper_crtc_reset(crtc, &state->base);
|
||||
else
|
||||
__drm_atomic_helper_crtc_reset(crtc, NULL);
|
||||
}
|
||||
|
||||
static int malidp_crtc_enable_vblank(struct drm_crtc *crtc)
|
||||
|
@ -310,17 +310,13 @@ static int malidp_se_check_scaling(struct malidp_plane *mp,
|
||||
|
||||
static u32 malidp_get_pgsize_bitmap(struct malidp_plane *mp)
|
||||
{
|
||||
u32 pgsize_bitmap = 0;
|
||||
struct iommu_domain *mmu_dom;
|
||||
|
||||
if (iommu_present(&platform_bus_type)) {
|
||||
struct iommu_domain *mmu_dom =
|
||||
iommu_get_domain_for_dev(mp->base.dev->dev);
|
||||
mmu_dom = iommu_get_domain_for_dev(mp->base.dev->dev);
|
||||
if (mmu_dom)
|
||||
return mmu_dom->pgsize_bitmap;
|
||||
|
||||
if (mmu_dom)
|
||||
pgsize_bitmap = mmu_dom->pgsize_bitmap;
|
||||
}
|
||||
|
||||
return pgsize_bitmap;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -32,6 +32,7 @@ config DRM_CHIPONE_ICN6211
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_MIPI_DSI
|
||||
select DRM_PANEL_BRIDGE
|
||||
select REGMAP_I2C
|
||||
help
|
||||
ICN6211 is MIPI-DSI/RGB Converter bridge from chipone.
|
||||
|
||||
@ -99,6 +100,19 @@ config DRM_LONTIUM_LT8912B
|
||||
Say M here if you want to support this hardware as a module.
|
||||
The module will be named "lontium-lt8912b".
|
||||
|
||||
config DRM_LONTIUM_LT9211
|
||||
tristate "Lontium LT9211 DSI/LVDS/DPI bridge"
|
||||
depends on OF
|
||||
select DRM_PANEL_BRIDGE
|
||||
select DRM_KMS_HELPER
|
||||
select DRM_MIPI_DSI
|
||||
select REGMAP_I2C
|
||||
help
|
||||
Driver for Lontium LT9211 Single/Dual-Link DSI/LVDS or Single DPI
|
||||
input to Single-link/Dual-Link DSI/LVDS or Single DPI output bridge
|
||||
chip.
|
||||
Please say Y if you have such hardware.
|
||||
|
||||
config DRM_LONTIUM_LT9611
|
||||
tristate "Lontium LT9611 DSI/HDMI bridge"
|
||||
select SND_SOC_HDMI_CODEC if SND_SOC
|
||||
|
@ -6,6 +6,7 @@ obj-$(CONFIG_DRM_CROS_EC_ANX7688) += cros-ec-anx7688.o
|
||||
obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o
|
||||
obj-$(CONFIG_DRM_ITE_IT6505) += ite-it6505.o
|
||||
obj-$(CONFIG_DRM_LONTIUM_LT8912B) += lontium-lt8912b.o
|
||||
obj-$(CONFIG_DRM_LONTIUM_LT9211) += lontium-lt9211.o
|
||||
obj-$(CONFIG_DRM_LONTIUM_LT9611) += lontium-lt9611.o
|
||||
obj-$(CONFIG_DRM_LONTIUM_LT9611UXC) += lontium-lt9611uxc.o
|
||||
obj-$(CONFIG_DRM_LVDS_CODEC) += lvds-codec.o
|
||||
|
@ -1292,8 +1292,10 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
|
||||
goto err_unregister_cec;
|
||||
|
||||
adv7511->bridge.funcs = &adv7511_bridge_funcs;
|
||||
adv7511->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID
|
||||
| DRM_BRIDGE_OP_HPD;
|
||||
adv7511->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID;
|
||||
if (adv7511->i2c_main->irq)
|
||||
adv7511->bridge.ops |= DRM_BRIDGE_OP_HPD;
|
||||
|
||||
adv7511->bridge.of_node = dev->of_node;
|
||||
adv7511->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
|
||||
|
||||
|
@ -1486,12 +1486,12 @@ static void anx7625_dp_adjust_swing(struct anx7625_data *ctx)
|
||||
for (i = 0; i < ctx->pdata.dp_lane0_swing_reg_cnt; i++)
|
||||
anx7625_reg_write(ctx, ctx->i2c.tx_p1_client,
|
||||
DP_TX_LANE0_SWING_REG0 + i,
|
||||
ctx->pdata.lane0_reg_data[i] & 0xFF);
|
||||
ctx->pdata.lane0_reg_data[i]);
|
||||
|
||||
for (i = 0; i < ctx->pdata.dp_lane1_swing_reg_cnt; i++)
|
||||
anx7625_reg_write(ctx, ctx->i2c.tx_p1_client,
|
||||
DP_TX_LANE1_SWING_REG0 + i,
|
||||
ctx->pdata.lane1_reg_data[i] & 0xFF);
|
||||
ctx->pdata.lane1_reg_data[i]);
|
||||
}
|
||||
|
||||
static void dp_hpd_change_handler(struct anx7625_data *ctx, bool on)
|
||||
@ -1598,8 +1598,8 @@ static int anx7625_get_swing_setting(struct device *dev,
|
||||
num_regs = DP_TX_SWING_REG_CNT;
|
||||
|
||||
pdata->dp_lane0_swing_reg_cnt = num_regs;
|
||||
of_property_read_u32_array(dev->of_node, "analogix,lane0-swing",
|
||||
pdata->lane0_reg_data, num_regs);
|
||||
of_property_read_u8_array(dev->of_node, "analogix,lane0-swing",
|
||||
pdata->lane0_reg_data, num_regs);
|
||||
}
|
||||
|
||||
if (of_get_property(dev->of_node,
|
||||
@ -1608,8 +1608,8 @@ static int anx7625_get_swing_setting(struct device *dev,
|
||||
num_regs = DP_TX_SWING_REG_CNT;
|
||||
|
||||
pdata->dp_lane1_swing_reg_cnt = num_regs;
|
||||
of_property_read_u32_array(dev->of_node, "analogix,lane1-swing",
|
||||
pdata->lane1_reg_data, num_regs);
|
||||
of_property_read_u8_array(dev->of_node, "analogix,lane1-swing",
|
||||
pdata->lane1_reg_data, num_regs);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1932,14 +1932,14 @@ static int anx7625_audio_get_eld(struct device *dev, void *data,
|
||||
struct anx7625_data *ctx = dev_get_drvdata(dev);
|
||||
|
||||
if (!ctx->connector) {
|
||||
dev_err(dev, "connector not initial\n");
|
||||
return -EINVAL;
|
||||
/* Pass en empty ELD if connector not available */
|
||||
memset(buf, 0, len);
|
||||
} else {
|
||||
dev_dbg(dev, "audio copy eld\n");
|
||||
memcpy(buf, ctx->connector->eld,
|
||||
min(sizeof(ctx->connector->eld), len));
|
||||
}
|
||||
|
||||
dev_dbg(dev, "audio copy eld\n");
|
||||
memcpy(buf, ctx->connector->eld,
|
||||
min(sizeof(ctx->connector->eld), len));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -426,9 +426,9 @@ struct anx7625_platform_data {
|
||||
int mipi_lanes;
|
||||
int audio_en;
|
||||
int dp_lane0_swing_reg_cnt;
|
||||
int lane0_reg_data[DP_TX_SWING_REG_CNT];
|
||||
u8 lane0_reg_data[DP_TX_SWING_REG_CNT];
|
||||
int dp_lane1_swing_reg_cnt;
|
||||
int lane1_reg_data[DP_TX_SWING_REG_CNT];
|
||||
u8 lane1_reg_data[DP_TX_SWING_REG_CNT];
|
||||
u32 low_power_mode;
|
||||
struct device_node *mipi_host_node;
|
||||
};
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
|
||||
#define VENDOR_ID 0x00
|
||||
@ -134,6 +135,7 @@
|
||||
|
||||
struct chipone {
|
||||
struct device *dev;
|
||||
struct regmap *regmap;
|
||||
struct i2c_client *client;
|
||||
struct drm_bridge bridge;
|
||||
struct drm_display_mode mode;
|
||||
@ -146,6 +148,77 @@ struct chipone {
|
||||
bool interface_i2c;
|
||||
};
|
||||
|
||||
static const struct regmap_range chipone_dsi_readable_ranges[] = {
|
||||
regmap_reg_range(VENDOR_ID, VERSION_ID),
|
||||
regmap_reg_range(FIRMWARE_VERSION, PLL_SSC_OFFSET(3)),
|
||||
regmap_reg_range(GPIO_OEN, MIPI_ULPS_CTRL),
|
||||
regmap_reg_range(MIPI_CLK_CHK_VAR, MIPI_T_TA_SURE_PRE),
|
||||
regmap_reg_range(MIPI_T_LPX_SET, MIPI_INIT_TIME_H),
|
||||
regmap_reg_range(MIPI_T_CLK_TERM_EN, MIPI_T_CLK_SETTLE),
|
||||
regmap_reg_range(MIPI_TO_HS_RX_L, MIPI_PHY_(5)),
|
||||
regmap_reg_range(MIPI_PD_RX, MIPI_RST_NUM),
|
||||
regmap_reg_range(MIPI_DBG_SET_(0), MIPI_DBG_SET_(9)),
|
||||
regmap_reg_range(MIPI_DBG_SEL, MIPI_ATE_STATUS_(1)),
|
||||
};
|
||||
|
||||
static const struct regmap_access_table chipone_dsi_readable_table = {
|
||||
.yes_ranges = chipone_dsi_readable_ranges,
|
||||
.n_yes_ranges = ARRAY_SIZE(chipone_dsi_readable_ranges),
|
||||
};
|
||||
|
||||
static const struct regmap_range chipone_dsi_writeable_ranges[] = {
|
||||
regmap_reg_range(CONFIG_FINISH, PLL_SSC_OFFSET(3)),
|
||||
regmap_reg_range(GPIO_OEN, MIPI_ULPS_CTRL),
|
||||
regmap_reg_range(MIPI_CLK_CHK_VAR, MIPI_T_TA_SURE_PRE),
|
||||
regmap_reg_range(MIPI_T_LPX_SET, MIPI_INIT_TIME_H),
|
||||
regmap_reg_range(MIPI_T_CLK_TERM_EN, MIPI_T_CLK_SETTLE),
|
||||
regmap_reg_range(MIPI_TO_HS_RX_L, MIPI_PHY_(5)),
|
||||
regmap_reg_range(MIPI_PD_RX, MIPI_RST_NUM),
|
||||
regmap_reg_range(MIPI_DBG_SET_(0), MIPI_DBG_SET_(9)),
|
||||
regmap_reg_range(MIPI_DBG_SEL, MIPI_ATE_STATUS_(1)),
|
||||
};
|
||||
|
||||
static const struct regmap_access_table chipone_dsi_writeable_table = {
|
||||
.yes_ranges = chipone_dsi_writeable_ranges,
|
||||
.n_yes_ranges = ARRAY_SIZE(chipone_dsi_writeable_ranges),
|
||||
};
|
||||
|
||||
static const struct regmap_config chipone_regmap_config = {
|
||||
.reg_bits = 8,
|
||||
.val_bits = 8,
|
||||
.rd_table = &chipone_dsi_readable_table,
|
||||
.wr_table = &chipone_dsi_writeable_table,
|
||||
.cache_type = REGCACHE_RBTREE,
|
||||
.max_register = MIPI_ATE_STATUS_(1),
|
||||
};
|
||||
|
||||
static int chipone_dsi_read(void *context,
|
||||
const void *reg, size_t reg_size,
|
||||
void *val, size_t val_size)
|
||||
{
|
||||
struct mipi_dsi_device *dsi = context;
|
||||
const u16 reg16 = (val_size << 8) | *(u8 *)reg;
|
||||
int ret;
|
||||
|
||||
ret = mipi_dsi_generic_read(dsi, ®16, 2, val, val_size);
|
||||
|
||||
return ret == val_size ? 0 : -EINVAL;
|
||||
}
|
||||
|
||||
static int chipone_dsi_write(void *context, const void *data, size_t count)
|
||||
{
|
||||
struct mipi_dsi_device *dsi = context;
|
||||
|
||||
return mipi_dsi_generic_write(dsi, data, 2);
|
||||
}
|
||||
|
||||
static const struct regmap_bus chipone_dsi_regmap_bus = {
|
||||
.read = chipone_dsi_read,
|
||||
.write = chipone_dsi_write,
|
||||
.reg_format_endian_default = REGMAP_ENDIAN_NATIVE,
|
||||
.val_format_endian_default = REGMAP_ENDIAN_NATIVE,
|
||||
};
|
||||
|
||||
static inline struct chipone *bridge_to_chipone(struct drm_bridge *bridge)
|
||||
{
|
||||
return container_of(bridge, struct chipone, bridge);
|
||||
@ -153,18 +226,16 @@ static inline struct chipone *bridge_to_chipone(struct drm_bridge *bridge)
|
||||
|
||||
static void chipone_readb(struct chipone *icn, u8 reg, u8 *val)
|
||||
{
|
||||
if (icn->interface_i2c)
|
||||
*val = i2c_smbus_read_byte_data(icn->client, reg);
|
||||
else
|
||||
mipi_dsi_generic_read(icn->dsi, (u8[]){reg, 1}, 2, val, 1);
|
||||
int ret, pval;
|
||||
|
||||
ret = regmap_read(icn->regmap, reg, &pval);
|
||||
|
||||
*val = ret ? 0 : pval & 0xff;
|
||||
}
|
||||
|
||||
static int chipone_writeb(struct chipone *icn, u8 reg, u8 val)
|
||||
{
|
||||
if (icn->interface_i2c)
|
||||
return i2c_smbus_write_byte_data(icn->client, reg, val);
|
||||
else
|
||||
return mipi_dsi_generic_write(icn->dsi, (u8[]){reg, val}, 2);
|
||||
return regmap_write(icn->regmap, reg, val);
|
||||
}
|
||||
|
||||
static void chipone_configure_pll(struct chipone *icn,
|
||||
@ -324,6 +395,11 @@ static void chipone_atomic_enable(struct drm_bridge *bridge,
|
||||
/* dsi specific sequence */
|
||||
chipone_writeb(icn, SYNC_EVENT_DLY, 0x80);
|
||||
chipone_writeb(icn, HFP_MIN, hfp & 0xff);
|
||||
|
||||
/* DSI data lane count */
|
||||
chipone_writeb(icn, DSI_CTRL,
|
||||
DSI_CTRL_UNKNOWN | DSI_CTRL_DSI_LANES(icn->dsi->lanes - 1));
|
||||
|
||||
chipone_writeb(icn, MIPI_PD_CK_LANE, 0xa0);
|
||||
chipone_writeb(icn, PLL_CTRL(12), 0xff);
|
||||
chipone_writeb(icn, MIPI_PN_SWAP, 0x00);
|
||||
@ -409,9 +485,23 @@ static void chipone_mode_set(struct drm_bridge *bridge,
|
||||
static int chipone_dsi_attach(struct chipone *icn)
|
||||
{
|
||||
struct mipi_dsi_device *dsi = icn->dsi;
|
||||
int ret;
|
||||
struct device *dev = icn->dev;
|
||||
struct device_node *endpoint;
|
||||
int dsi_lanes, ret;
|
||||
|
||||
endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0);
|
||||
dsi_lanes = of_property_count_u32_elems(endpoint, "data-lanes");
|
||||
of_node_put(endpoint);
|
||||
|
||||
/*
|
||||
* If the 'data-lanes' property does not exist in DT or is invalid,
|
||||
* default to previously hard-coded behavior, which was 4 data lanes.
|
||||
*/
|
||||
if (dsi_lanes >= 1 && dsi_lanes <= 4)
|
||||
icn->dsi->lanes = dsi_lanes;
|
||||
else
|
||||
icn->dsi->lanes = 4;
|
||||
|
||||
dsi->lanes = 4;
|
||||
dsi->format = MIPI_DSI_FMT_RGB888;
|
||||
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
|
||||
MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_NO_EOT_PACKET;
|
||||
@ -591,6 +681,11 @@ static int chipone_dsi_probe(struct mipi_dsi_device *dsi)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
icn->regmap = devm_regmap_init(dev, &chipone_dsi_regmap_bus,
|
||||
dsi, &chipone_regmap_config);
|
||||
if (IS_ERR(icn->regmap))
|
||||
return PTR_ERR(icn->regmap);
|
||||
|
||||
icn->interface_i2c = false;
|
||||
icn->dsi = dsi;
|
||||
|
||||
@ -616,6 +711,10 @@ static int chipone_i2c_probe(struct i2c_client *client,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
icn->regmap = devm_regmap_init_i2c(client, &chipone_regmap_config);
|
||||
if (IS_ERR(icn->regmap))
|
||||
return PTR_ERR(icn->regmap);
|
||||
|
||||
icn->interface_i2c = true;
|
||||
icn->client = client;
|
||||
dev_set_drvdata(dev, icn);
|
||||
|
@ -24,6 +24,7 @@ struct display_connector {
|
||||
int hpd_irq;
|
||||
|
||||
struct regulator *dp_pwr;
|
||||
struct gpio_desc *ddc_en;
|
||||
};
|
||||
|
||||
static inline struct display_connector *
|
||||
@ -345,6 +346,17 @@ static int display_connector_probe(struct platform_device *pdev)
|
||||
}
|
||||
}
|
||||
|
||||
/* enable DDC */
|
||||
if (type == DRM_MODE_CONNECTOR_HDMIA) {
|
||||
conn->ddc_en = devm_gpiod_get_optional(&pdev->dev, "ddc-en",
|
||||
GPIOD_OUT_HIGH);
|
||||
|
||||
if (IS_ERR(conn->ddc_en)) {
|
||||
dev_err(&pdev->dev, "Couldn't get ddc-en gpio\n");
|
||||
return PTR_ERR(conn->ddc_en);
|
||||
}
|
||||
}
|
||||
|
||||
conn->bridge.funcs = &display_connector_bridge_funcs;
|
||||
conn->bridge.of_node = pdev->dev.of_node;
|
||||
|
||||
@ -373,6 +385,9 @@ static int display_connector_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct display_connector *conn = platform_get_drvdata(pdev);
|
||||
|
||||
if (conn->ddc_en)
|
||||
gpiod_set_value(conn->ddc_en, 0);
|
||||
|
||||
if (conn->dp_pwr)
|
||||
regulator_disable(conn->dp_pwr);
|
||||
|
||||
|
802
drivers/gpu/drm/bridge/lontium-lt9211.c
Normal file
802
drivers/gpu/drm/bridge/lontium-lt9211.c
Normal file
@ -0,0 +1,802 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Lontium LT9211 bridge driver
|
||||
*
|
||||
* LT9211 is capable of converting:
|
||||
* 2xDSI/2xLVDS/1xDPI -> 2xDSI/2xLVDS/1xDPI
|
||||
* Currently supported is:
|
||||
* 1xDSI -> 1xLVDS
|
||||
*
|
||||
* Copyright (C) 2022 Marek Vasut <marex@denx.de>
|
||||
*/
|
||||
|
||||
#include <linux/bits.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_graph.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_bridge.h>
|
||||
#include <drm/drm_mipi_dsi.h>
|
||||
#include <drm/drm_of.h>
|
||||
#include <drm/drm_panel.h>
|
||||
#include <drm/drm_print.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
|
||||
#define REG_PAGE_CONTROL 0xff
|
||||
#define REG_CHIPID0 0x8100
|
||||
#define REG_CHIPID0_VALUE 0x18
|
||||
#define REG_CHIPID1 0x8101
|
||||
#define REG_CHIPID1_VALUE 0x01
|
||||
#define REG_CHIPID2 0x8102
|
||||
#define REG_CHIPID2_VALUE 0xe3
|
||||
|
||||
#define REG_DSI_LANE 0xd000
|
||||
/* DSI lane count - 0 means 4 lanes ; 1, 2, 3 means 1, 2, 3 lanes. */
|
||||
#define REG_DSI_LANE_COUNT(n) ((n) & 3)
|
||||
|
||||
struct lt9211 {
|
||||
struct drm_bridge bridge;
|
||||
struct device *dev;
|
||||
struct regmap *regmap;
|
||||
struct mipi_dsi_device *dsi;
|
||||
struct drm_bridge *panel_bridge;
|
||||
struct gpio_desc *reset_gpio;
|
||||
struct regulator *vccio;
|
||||
bool lvds_dual_link;
|
||||
bool lvds_dual_link_even_odd_swap;
|
||||
};
|
||||
|
||||
static const struct regmap_range lt9211_rw_ranges[] = {
|
||||
regmap_reg_range(0xff, 0xff),
|
||||
regmap_reg_range(0x8100, 0x816b),
|
||||
regmap_reg_range(0x8200, 0x82aa),
|
||||
regmap_reg_range(0x8500, 0x85ff),
|
||||
regmap_reg_range(0x8600, 0x86a0),
|
||||
regmap_reg_range(0x8700, 0x8746),
|
||||
regmap_reg_range(0xd000, 0xd0a7),
|
||||
regmap_reg_range(0xd400, 0xd42c),
|
||||
regmap_reg_range(0xd800, 0xd838),
|
||||
regmap_reg_range(0xd9c0, 0xd9d5),
|
||||
};
|
||||
|
||||
static const struct regmap_access_table lt9211_rw_table = {
|
||||
.yes_ranges = lt9211_rw_ranges,
|
||||
.n_yes_ranges = ARRAY_SIZE(lt9211_rw_ranges),
|
||||
};
|
||||
|
||||
static const struct regmap_range_cfg lt9211_range = {
|
||||
.name = "lt9211",
|
||||
.range_min = 0x0000,
|
||||
.range_max = 0xda00,
|
||||
.selector_reg = REG_PAGE_CONTROL,
|
||||
.selector_mask = 0xff,
|
||||
.selector_shift = 0,
|
||||
.window_start = 0,
|
||||
.window_len = 0x100,
|
||||
};
|
||||
|
||||
static const struct regmap_config lt9211_regmap_config = {
|
||||
.reg_bits = 8,
|
||||
.val_bits = 8,
|
||||
.rd_table = <9211_rw_table,
|
||||
.wr_table = <9211_rw_table,
|
||||
.volatile_table = <9211_rw_table,
|
||||
.ranges = <9211_range,
|
||||
.num_ranges = 1,
|
||||
.cache_type = REGCACHE_RBTREE,
|
||||
.max_register = 0xda00,
|
||||
};
|
||||
|
||||
static struct lt9211 *bridge_to_lt9211(struct drm_bridge *bridge)
|
||||
{
|
||||
return container_of(bridge, struct lt9211, bridge);
|
||||
}
|
||||
|
||||
static int lt9211_attach(struct drm_bridge *bridge,
|
||||
enum drm_bridge_attach_flags flags)
|
||||
{
|
||||
struct lt9211 *ctx = bridge_to_lt9211(bridge);
|
||||
|
||||
return drm_bridge_attach(bridge->encoder, ctx->panel_bridge,
|
||||
&ctx->bridge, flags);
|
||||
}
|
||||
|
||||
static int lt9211_read_chipid(struct lt9211 *ctx)
|
||||
{
|
||||
u8 chipid[3];
|
||||
int ret;
|
||||
|
||||
/* Read Chip ID registers and verify the chip can communicate. */
|
||||
ret = regmap_bulk_read(ctx->regmap, REG_CHIPID0, chipid, 3);
|
||||
if (ret < 0) {
|
||||
dev_err(ctx->dev, "Failed to read Chip ID: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Test for known Chip ID. */
|
||||
if (chipid[0] != REG_CHIPID0_VALUE || chipid[1] != REG_CHIPID1_VALUE ||
|
||||
chipid[2] != REG_CHIPID2_VALUE) {
|
||||
dev_err(ctx->dev, "Unknown Chip ID: 0x%02x 0x%02x 0x%02x\n",
|
||||
chipid[0], chipid[1], chipid[2]);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int lt9211_system_init(struct lt9211 *ctx)
|
||||
{
|
||||
const struct reg_sequence lt9211_system_init_seq[] = {
|
||||
{ 0x8201, 0x18 },
|
||||
{ 0x8606, 0x61 },
|
||||
{ 0x8607, 0xa8 },
|
||||
{ 0x8714, 0x08 },
|
||||
{ 0x8715, 0x00 },
|
||||
{ 0x8718, 0x0f },
|
||||
{ 0x8722, 0x08 },
|
||||
{ 0x8723, 0x00 },
|
||||
{ 0x8726, 0x0f },
|
||||
{ 0x810b, 0xfe },
|
||||
};
|
||||
|
||||
return regmap_multi_reg_write(ctx->regmap, lt9211_system_init_seq,
|
||||
ARRAY_SIZE(lt9211_system_init_seq));
|
||||
}
|
||||
|
||||
static int lt9211_configure_rx(struct lt9211 *ctx)
|
||||
{
|
||||
const struct reg_sequence lt9211_rx_phy_seq[] = {
|
||||
{ 0x8202, 0x44 },
|
||||
{ 0x8204, 0xa0 },
|
||||
{ 0x8205, 0x22 },
|
||||
{ 0x8207, 0x9f },
|
||||
{ 0x8208, 0xfc },
|
||||
/* ORR with 0xf8 here to enable DSI DN/DP swap. */
|
||||
{ 0x8209, 0x01 },
|
||||
{ 0x8217, 0x0c },
|
||||
{ 0x8633, 0x1b },
|
||||
};
|
||||
|
||||
const struct reg_sequence lt9211_rx_cal_reset_seq[] = {
|
||||
{ 0x8120, 0x7f },
|
||||
{ 0x8120, 0xff },
|
||||
};
|
||||
|
||||
const struct reg_sequence lt9211_rx_dig_seq[] = {
|
||||
{ 0x8630, 0x85 },
|
||||
/* 0x8588: BIT 6 set = MIPI-RX, BIT 4 unset = LVDS-TX */
|
||||
{ 0x8588, 0x40 },
|
||||
{ 0x85ff, 0xd0 },
|
||||
{ REG_DSI_LANE, REG_DSI_LANE_COUNT(ctx->dsi->lanes) },
|
||||
{ 0xd002, 0x05 },
|
||||
};
|
||||
|
||||
const struct reg_sequence lt9211_rx_div_reset_seq[] = {
|
||||
{ 0x810a, 0xc0 },
|
||||
{ 0x8120, 0xbf },
|
||||
};
|
||||
|
||||
const struct reg_sequence lt9211_rx_div_clear_seq[] = {
|
||||
{ 0x810a, 0xc1 },
|
||||
{ 0x8120, 0xff },
|
||||
};
|
||||
|
||||
int ret;
|
||||
|
||||
ret = regmap_multi_reg_write(ctx->regmap, lt9211_rx_phy_seq,
|
||||
ARRAY_SIZE(lt9211_rx_phy_seq));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = regmap_multi_reg_write(ctx->regmap, lt9211_rx_cal_reset_seq,
|
||||
ARRAY_SIZE(lt9211_rx_cal_reset_seq));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = regmap_multi_reg_write(ctx->regmap, lt9211_rx_dig_seq,
|
||||
ARRAY_SIZE(lt9211_rx_dig_seq));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = regmap_multi_reg_write(ctx->regmap, lt9211_rx_div_reset_seq,
|
||||
ARRAY_SIZE(lt9211_rx_div_reset_seq));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
usleep_range(10000, 15000);
|
||||
|
||||
return regmap_multi_reg_write(ctx->regmap, lt9211_rx_div_clear_seq,
|
||||
ARRAY_SIZE(lt9211_rx_div_clear_seq));
|
||||
}
|
||||
|
||||
static int lt9211_autodetect_rx(struct lt9211 *ctx,
|
||||
const struct drm_display_mode *mode)
|
||||
{
|
||||
u16 width, height;
|
||||
u32 byteclk;
|
||||
u8 buf[5];
|
||||
u8 format;
|
||||
u8 bc[3];
|
||||
int ret;
|
||||
|
||||
/* Measure ByteClock frequency. */
|
||||
ret = regmap_write(ctx->regmap, 0x8600, 0x01);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Give the chip time to lock onto RX stream. */
|
||||
msleep(100);
|
||||
|
||||
/* Read the ByteClock frequency from the chip. */
|
||||
ret = regmap_bulk_read(ctx->regmap, 0x8608, bc, sizeof(bc));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* RX ByteClock in kHz */
|
||||
byteclk = ((bc[0] & 0xf) << 16) | (bc[1] << 8) | bc[2];
|
||||
|
||||
/* Width/Height/Format Auto-detection */
|
||||
ret = regmap_bulk_read(ctx->regmap, 0xd082, buf, sizeof(buf));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
width = (buf[0] << 8) | buf[1];
|
||||
height = (buf[3] << 8) | buf[4];
|
||||
format = buf[2] & 0xf;
|
||||
|
||||
if (format == 0x3) { /* YUV422 16bit */
|
||||
width /= 2;
|
||||
} else if (format == 0xa) { /* RGB888 24bit */
|
||||
width /= 3;
|
||||
} else {
|
||||
dev_err(ctx->dev, "Unsupported DSI pixel format 0x%01x\n",
|
||||
format);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (width != mode->hdisplay) {
|
||||
dev_err(ctx->dev,
|
||||
"RX: Detected DSI width (%d) does not match mode hdisplay (%d)\n",
|
||||
width, mode->hdisplay);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (height != mode->vdisplay) {
|
||||
dev_err(ctx->dev,
|
||||
"RX: Detected DSI height (%d) does not match mode vdisplay (%d)\n",
|
||||
height, mode->vdisplay);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_dbg(ctx->dev, "RX: %dx%d format=0x%01x byteclock=%d kHz\n",
|
||||
width, height, format, byteclk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int lt9211_configure_timing(struct lt9211 *ctx,
|
||||
const struct drm_display_mode *mode)
|
||||
{
|
||||
const struct reg_sequence lt9211_timing[] = {
|
||||
{ 0xd00d, (mode->vtotal >> 8) & 0xff },
|
||||
{ 0xd00e, mode->vtotal & 0xff },
|
||||
{ 0xd00f, (mode->vdisplay >> 8) & 0xff },
|
||||
{ 0xd010, mode->vdisplay & 0xff },
|
||||
{ 0xd011, (mode->htotal >> 8) & 0xff },
|
||||
{ 0xd012, mode->htotal & 0xff },
|
||||
{ 0xd013, (mode->hdisplay >> 8) & 0xff },
|
||||
{ 0xd014, mode->hdisplay & 0xff },
|
||||
{ 0xd015, (mode->vsync_end - mode->vsync_start) & 0xff },
|
||||
{ 0xd016, (mode->hsync_end - mode->hsync_start) & 0xff },
|
||||
{ 0xd017, ((mode->vsync_start - mode->vdisplay) >> 8) & 0xff },
|
||||
{ 0xd018, (mode->vsync_start - mode->vdisplay) & 0xff },
|
||||
{ 0xd019, ((mode->hsync_start - mode->hdisplay) >> 8) & 0xff },
|
||||
{ 0xd01a, (mode->hsync_start - mode->hdisplay) & 0xff },
|
||||
};
|
||||
|
||||
return regmap_multi_reg_write(ctx->regmap, lt9211_timing,
|
||||
ARRAY_SIZE(lt9211_timing));
|
||||
}
|
||||
|
||||
static int lt9211_configure_plls(struct lt9211 *ctx,
|
||||
const struct drm_display_mode *mode)
|
||||
{
|
||||
const struct reg_sequence lt9211_pcr_seq[] = {
|
||||
{ 0xd026, 0x17 },
|
||||
{ 0xd027, 0xc3 },
|
||||
{ 0xd02d, 0x30 },
|
||||
{ 0xd031, 0x10 },
|
||||
{ 0xd023, 0x20 },
|
||||
{ 0xd038, 0x02 },
|
||||
{ 0xd039, 0x10 },
|
||||
{ 0xd03a, 0x20 },
|
||||
{ 0xd03b, 0x60 },
|
||||
{ 0xd03f, 0x04 },
|
||||
{ 0xd040, 0x08 },
|
||||
{ 0xd041, 0x10 },
|
||||
{ 0x810b, 0xee },
|
||||
{ 0x810b, 0xfe },
|
||||
};
|
||||
|
||||
unsigned int pval;
|
||||
int ret;
|
||||
|
||||
/* DeSSC PLL reference clock is 25 MHz XTal. */
|
||||
ret = regmap_write(ctx->regmap, 0x822d, 0x48);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (mode->clock < 44000) {
|
||||
ret = regmap_write(ctx->regmap, 0x8235, 0x83);
|
||||
} else if (mode->clock < 88000) {
|
||||
ret = regmap_write(ctx->regmap, 0x8235, 0x82);
|
||||
} else if (mode->clock < 176000) {
|
||||
ret = regmap_write(ctx->regmap, 0x8235, 0x81);
|
||||
} else {
|
||||
dev_err(ctx->dev,
|
||||
"Unsupported mode clock (%d kHz) above 176 MHz.\n",
|
||||
mode->clock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Wait for the DeSSC PLL to stabilize. */
|
||||
msleep(100);
|
||||
|
||||
ret = regmap_multi_reg_write(ctx->regmap, lt9211_pcr_seq,
|
||||
ARRAY_SIZE(lt9211_pcr_seq));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* PCR stability test takes seconds. */
|
||||
ret = regmap_read_poll_timeout(ctx->regmap, 0xd087, pval, pval & 0x8,
|
||||
20000, 10000000);
|
||||
if (ret)
|
||||
dev_err(ctx->dev, "PCR unstable, ret=%i\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int lt9211_configure_tx(struct lt9211 *ctx, bool jeida,
|
||||
bool bpp24, bool de)
|
||||
{
|
||||
const struct reg_sequence system_lt9211_tx_phy_seq[] = {
|
||||
/* DPI output disable */
|
||||
{ 0x8262, 0x00 },
|
||||
/* BIT(7) is LVDS dual-port */
|
||||
{ 0x823b, 0x38 | (ctx->lvds_dual_link ? BIT(7) : 0) },
|
||||
{ 0x823e, 0x92 },
|
||||
{ 0x823f, 0x48 },
|
||||
{ 0x8240, 0x31 },
|
||||
{ 0x8243, 0x80 },
|
||||
{ 0x8244, 0x00 },
|
||||
{ 0x8245, 0x00 },
|
||||
{ 0x8249, 0x00 },
|
||||
{ 0x824a, 0x01 },
|
||||
{ 0x824e, 0x00 },
|
||||
{ 0x824f, 0x00 },
|
||||
{ 0x8250, 0x00 },
|
||||
{ 0x8253, 0x00 },
|
||||
{ 0x8254, 0x01 },
|
||||
/* LVDS channel order, Odd:Even 0x10..A:B, 0x40..B:A */
|
||||
{ 0x8646, ctx->lvds_dual_link_even_odd_swap ? 0x40 : 0x10 },
|
||||
{ 0x8120, 0x7b },
|
||||
{ 0x816b, 0xff },
|
||||
};
|
||||
|
||||
const struct reg_sequence system_lt9211_tx_dig_seq[] = {
|
||||
{ 0x8559, 0x40 | (jeida ? BIT(7) : 0) |
|
||||
(de ? BIT(5) : 0) | (bpp24 ? BIT(4) : 0) },
|
||||
{ 0x855a, 0xaa },
|
||||
{ 0x855b, 0xaa },
|
||||
{ 0x855c, ctx->lvds_dual_link ? BIT(0) : 0 },
|
||||
{ 0x85a1, 0x77 },
|
||||
{ 0x8640, 0x40 },
|
||||
{ 0x8641, 0x34 },
|
||||
{ 0x8642, 0x10 },
|
||||
{ 0x8643, 0x23 },
|
||||
{ 0x8644, 0x41 },
|
||||
{ 0x8645, 0x02 },
|
||||
};
|
||||
|
||||
const struct reg_sequence system_lt9211_tx_pll_seq[] = {
|
||||
/* TX PLL power down */
|
||||
{ 0x8236, 0x01 },
|
||||
{ 0x8237, ctx->lvds_dual_link ? 0x2a : 0x29 },
|
||||
{ 0x8238, 0x06 },
|
||||
{ 0x8239, 0x30 },
|
||||
{ 0x823a, 0x8e },
|
||||
{ 0x8737, 0x14 },
|
||||
{ 0x8713, 0x00 },
|
||||
{ 0x8713, 0x80 },
|
||||
};
|
||||
|
||||
unsigned int pval;
|
||||
int ret;
|
||||
|
||||
ret = regmap_multi_reg_write(ctx->regmap, system_lt9211_tx_phy_seq,
|
||||
ARRAY_SIZE(system_lt9211_tx_phy_seq));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = regmap_multi_reg_write(ctx->regmap, system_lt9211_tx_dig_seq,
|
||||
ARRAY_SIZE(system_lt9211_tx_dig_seq));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = regmap_multi_reg_write(ctx->regmap, system_lt9211_tx_pll_seq,
|
||||
ARRAY_SIZE(system_lt9211_tx_pll_seq));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = regmap_read_poll_timeout(ctx->regmap, 0x871f, pval, pval & 0x80,
|
||||
10000, 1000000);
|
||||
if (ret) {
|
||||
dev_err(ctx->dev, "TX PLL unstable, ret=%i\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = regmap_read_poll_timeout(ctx->regmap, 0x8720, pval, pval & 0x80,
|
||||
10000, 1000000);
|
||||
if (ret) {
|
||||
dev_err(ctx->dev, "TX PLL unstable, ret=%i\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void lt9211_atomic_enable(struct drm_bridge *bridge,
|
||||
struct drm_bridge_state *old_bridge_state)
|
||||
{
|
||||
struct lt9211 *ctx = bridge_to_lt9211(bridge);
|
||||
struct drm_atomic_state *state = old_bridge_state->base.state;
|
||||
const struct drm_bridge_state *bridge_state;
|
||||
const struct drm_crtc_state *crtc_state;
|
||||
const struct drm_display_mode *mode;
|
||||
struct drm_connector *connector;
|
||||
struct drm_crtc *crtc;
|
||||
bool lvds_format_24bpp;
|
||||
bool lvds_format_jeida;
|
||||
u32 bus_flags;
|
||||
int ret;
|
||||
|
||||
ret = regulator_enable(ctx->vccio);
|
||||
if (ret) {
|
||||
dev_err(ctx->dev, "Failed to enable vccio: %d\n", ret);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Deassert reset */
|
||||
gpiod_set_value(ctx->reset_gpio, 1);
|
||||
usleep_range(20000, 21000); /* Very long post-reset delay. */
|
||||
|
||||
/* Get the LVDS format from the bridge state. */
|
||||
bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
|
||||
bus_flags = bridge_state->output_bus_cfg.flags;
|
||||
|
||||
switch (bridge_state->output_bus_cfg.format) {
|
||||
case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
|
||||
lvds_format_24bpp = false;
|
||||
lvds_format_jeida = true;
|
||||
break;
|
||||
case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
|
||||
lvds_format_24bpp = true;
|
||||
lvds_format_jeida = true;
|
||||
break;
|
||||
case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
|
||||
lvds_format_24bpp = true;
|
||||
lvds_format_jeida = false;
|
||||
break;
|
||||
default:
|
||||
/*
|
||||
* Some bridges still don't set the correct
|
||||
* LVDS bus pixel format, use SPWG24 default
|
||||
* format until those are fixed.
|
||||
*/
|
||||
lvds_format_24bpp = true;
|
||||
lvds_format_jeida = false;
|
||||
dev_warn(ctx->dev,
|
||||
"Unsupported LVDS bus format 0x%04x, please check output bridge driver. Falling back to SPWG24.\n",
|
||||
bridge_state->output_bus_cfg.format);
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* Retrieve the CRTC adjusted mode. This requires a little dance to go
|
||||
* from the bridge to the encoder, to the connector and to the CRTC.
|
||||
*/
|
||||
connector = drm_atomic_get_new_connector_for_encoder(state,
|
||||
bridge->encoder);
|
||||
crtc = drm_atomic_get_new_connector_state(state, connector)->crtc;
|
||||
crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
|
||||
mode = &crtc_state->adjusted_mode;
|
||||
|
||||
ret = lt9211_read_chipid(ctx);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
ret = lt9211_system_init(ctx);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
ret = lt9211_configure_rx(ctx);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
ret = lt9211_autodetect_rx(ctx, mode);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
ret = lt9211_configure_timing(ctx, mode);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
ret = lt9211_configure_plls(ctx, mode);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
ret = lt9211_configure_tx(ctx, lvds_format_jeida, lvds_format_24bpp,
|
||||
bus_flags & DRM_BUS_FLAG_DE_HIGH);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
dev_dbg(ctx->dev, "LT9211 enabled.\n");
|
||||
}
|
||||
|
||||
static void lt9211_atomic_disable(struct drm_bridge *bridge,
|
||||
struct drm_bridge_state *old_bridge_state)
|
||||
{
|
||||
struct lt9211 *ctx = bridge_to_lt9211(bridge);
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Put the chip in reset, pull nRST line low,
|
||||
* and assure lengthy 10ms reset low timing.
|
||||
*/
|
||||
gpiod_set_value(ctx->reset_gpio, 0);
|
||||
usleep_range(10000, 11000); /* Very long reset duration. */
|
||||
|
||||
ret = regulator_disable(ctx->vccio);
|
||||
if (ret)
|
||||
dev_err(ctx->dev, "Failed to disable vccio: %d\n", ret);
|
||||
|
||||
regcache_mark_dirty(ctx->regmap);
|
||||
}
|
||||
|
||||
static enum drm_mode_status
|
||||
lt9211_mode_valid(struct drm_bridge *bridge,
|
||||
const struct drm_display_info *info,
|
||||
const struct drm_display_mode *mode)
|
||||
{
|
||||
/* LVDS output clock range 25..176 MHz */
|
||||
if (mode->clock < 25000)
|
||||
return MODE_CLOCK_LOW;
|
||||
if (mode->clock > 176000)
|
||||
return MODE_CLOCK_HIGH;
|
||||
|
||||
return MODE_OK;
|
||||
}
|
||||
|
||||
#define MAX_INPUT_SEL_FORMATS 1
|
||||
|
||||
static u32 *
|
||||
lt9211_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
|
||||
struct drm_bridge_state *bridge_state,
|
||||
struct drm_crtc_state *crtc_state,
|
||||
struct drm_connector_state *conn_state,
|
||||
u32 output_fmt,
|
||||
unsigned int *num_input_fmts)
|
||||
{
|
||||
u32 *input_fmts;
|
||||
|
||||
*num_input_fmts = 0;
|
||||
|
||||
input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts),
|
||||
GFP_KERNEL);
|
||||
if (!input_fmts)
|
||||
return NULL;
|
||||
|
||||
/* This is the DSI-end bus format */
|
||||
input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24;
|
||||
*num_input_fmts = 1;
|
||||
|
||||
return input_fmts;
|
||||
}
|
||||
|
||||
static const struct drm_bridge_funcs lt9211_funcs = {
|
||||
.attach = lt9211_attach,
|
||||
.mode_valid = lt9211_mode_valid,
|
||||
.atomic_enable = lt9211_atomic_enable,
|
||||
.atomic_disable = lt9211_atomic_disable,
|
||||
.atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
|
||||
.atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
|
||||
.atomic_get_input_bus_fmts = lt9211_atomic_get_input_bus_fmts,
|
||||
.atomic_reset = drm_atomic_helper_bridge_reset,
|
||||
};
|
||||
|
||||
static int lt9211_parse_dt(struct lt9211 *ctx)
|
||||
{
|
||||
struct device_node *port2, *port3;
|
||||
struct drm_bridge *panel_bridge;
|
||||
struct device *dev = ctx->dev;
|
||||
struct drm_panel *panel;
|
||||
int dual_link;
|
||||
int ret;
|
||||
|
||||
ctx->vccio = devm_regulator_get(dev, "vccio");
|
||||
if (IS_ERR(ctx->vccio))
|
||||
return dev_err_probe(dev, PTR_ERR(ctx->vccio),
|
||||
"Failed to get supply 'vccio'\n");
|
||||
|
||||
ctx->lvds_dual_link = false;
|
||||
ctx->lvds_dual_link_even_odd_swap = false;
|
||||
|
||||
port2 = of_graph_get_port_by_id(dev->of_node, 2);
|
||||
port3 = of_graph_get_port_by_id(dev->of_node, 3);
|
||||
dual_link = drm_of_lvds_get_dual_link_pixel_order(port2, port3);
|
||||
of_node_put(port2);
|
||||
of_node_put(port3);
|
||||
|
||||
if (dual_link == DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS) {
|
||||
ctx->lvds_dual_link = true;
|
||||
/* Odd pixels to LVDS Channel A, even pixels to B */
|
||||
ctx->lvds_dual_link_even_odd_swap = false;
|
||||
} else if (dual_link == DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS) {
|
||||
ctx->lvds_dual_link = true;
|
||||
/* Even pixels to LVDS Channel A, odd pixels to B */
|
||||
ctx->lvds_dual_link_even_odd_swap = true;
|
||||
}
|
||||
|
||||
ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &panel, &panel_bridge);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (panel) {
|
||||
panel_bridge = devm_drm_panel_bridge_add(dev, panel);
|
||||
if (IS_ERR(panel_bridge))
|
||||
return PTR_ERR(panel_bridge);
|
||||
}
|
||||
|
||||
ctx->panel_bridge = panel_bridge;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int lt9211_host_attach(struct lt9211 *ctx)
|
||||
{
|
||||
const struct mipi_dsi_device_info info = {
|
||||
.type = "lt9211",
|
||||
.channel = 0,
|
||||
.node = NULL,
|
||||
};
|
||||
struct device *dev = ctx->dev;
|
||||
struct device_node *host_node;
|
||||
struct device_node *endpoint;
|
||||
struct mipi_dsi_device *dsi;
|
||||
struct mipi_dsi_host *host;
|
||||
int dsi_lanes;
|
||||
int ret;
|
||||
|
||||
endpoint = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1);
|
||||
dsi_lanes = of_property_count_u32_elems(endpoint, "data-lanes");
|
||||
host_node = of_graph_get_remote_port_parent(endpoint);
|
||||
host = of_find_mipi_dsi_host_by_node(host_node);
|
||||
of_node_put(host_node);
|
||||
of_node_put(endpoint);
|
||||
|
||||
if (!host)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
if (dsi_lanes < 0 || dsi_lanes > 4)
|
||||
return -EINVAL;
|
||||
|
||||
dsi = devm_mipi_dsi_device_register_full(dev, host, &info);
|
||||
if (IS_ERR(dsi))
|
||||
return dev_err_probe(dev, PTR_ERR(dsi),
|
||||
"failed to create dsi device\n");
|
||||
|
||||
ctx->dsi = dsi;
|
||||
|
||||
dsi->lanes = dsi_lanes;
|
||||
dsi->format = MIPI_DSI_FMT_RGB888;
|
||||
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
|
||||
MIPI_DSI_MODE_VIDEO_HSE;
|
||||
|
||||
ret = devm_mipi_dsi_attach(dev, dsi);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "failed to attach dsi to host: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int lt9211_probe(struct i2c_client *client,
|
||||
const struct i2c_device_id *id)
|
||||
{
|
||||
struct device *dev = &client->dev;
|
||||
struct lt9211 *ctx;
|
||||
int ret;
|
||||
|
||||
ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
|
||||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
ctx->dev = dev;
|
||||
|
||||
/*
|
||||
* Put the chip in reset, pull nRST line low,
|
||||
* and assure lengthy 10ms reset low timing.
|
||||
*/
|
||||
ctx->reset_gpio = devm_gpiod_get_optional(ctx->dev, "reset",
|
||||
GPIOD_OUT_LOW);
|
||||
if (IS_ERR(ctx->reset_gpio))
|
||||
return PTR_ERR(ctx->reset_gpio);
|
||||
|
||||
usleep_range(10000, 11000); /* Very long reset duration. */
|
||||
|
||||
ret = lt9211_parse_dt(ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ctx->regmap = devm_regmap_init_i2c(client, <9211_regmap_config);
|
||||
if (IS_ERR(ctx->regmap))
|
||||
return PTR_ERR(ctx->regmap);
|
||||
|
||||
dev_set_drvdata(dev, ctx);
|
||||
i2c_set_clientdata(client, ctx);
|
||||
|
||||
ctx->bridge.funcs = <9211_funcs;
|
||||
ctx->bridge.of_node = dev->of_node;
|
||||
drm_bridge_add(&ctx->bridge);
|
||||
|
||||
ret = lt9211_host_attach(ctx);
|
||||
if (ret)
|
||||
drm_bridge_remove(&ctx->bridge);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int lt9211_remove(struct i2c_client *client)
|
||||
{
|
||||
struct lt9211 *ctx = i2c_get_clientdata(client);
|
||||
|
||||
drm_bridge_remove(&ctx->bridge);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct i2c_device_id lt9211_id[] = {
|
||||
{ "lontium,lt9211" },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(i2c, lt9211_id);
|
||||
|
||||
static const struct of_device_id lt9211_match_table[] = {
|
||||
{ .compatible = "lontium,lt9211" },
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, lt9211_match_table);
|
||||
|
||||
static struct i2c_driver lt9211_driver = {
|
||||
.probe = lt9211_probe,
|
||||
.remove = lt9211_remove,
|
||||
.id_table = lt9211_id,
|
||||
.driver = {
|
||||
.name = "lt9211",
|
||||
.of_match_table = lt9211_match_table,
|
||||
},
|
||||
};
|
||||
module_i2c_driver(lt9211_driver);
|
||||
|
||||
MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
|
||||
MODULE_DESCRIPTION("Lontium LT9211 DSI/LVDS/DPI bridge driver");
|
||||
MODULE_LICENSE("GPL");
|
@ -83,8 +83,11 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
|
||||
drm_connector_attach_encoder(&panel_bridge->connector,
|
||||
bridge->encoder);
|
||||
|
||||
if (connector->funcs->reset)
|
||||
connector->funcs->reset(connector);
|
||||
if (bridge->dev->registered) {
|
||||
if (connector->funcs->reset)
|
||||
connector->funcs->reset(connector);
|
||||
drm_connector_register(connector);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -25,6 +25,16 @@ config DRM_DW_HDMI_I2S_AUDIO
|
||||
Support the I2S Audio interface which is part of the Synopsys
|
||||
Designware HDMI block.
|
||||
|
||||
config DRM_DW_HDMI_GP_AUDIO
|
||||
tristate "Synopsys Designware GP Audio interface"
|
||||
depends on DRM_DW_HDMI && SND
|
||||
select SND_PCM
|
||||
select SND_PCM_ELD
|
||||
select SND_PCM_IEC958
|
||||
help
|
||||
Support the GP Audio interface which is part of the Synopsys
|
||||
Designware HDMI block.
|
||||
|
||||
config DRM_DW_HDMI_CEC
|
||||
tristate "Synopsis Designware CEC interface"
|
||||
depends on DRM_DW_HDMI
|
||||
|
@ -1,6 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o
|
||||
obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o
|
||||
obj-$(CONFIG_DRM_DW_HDMI_GP_AUDIO) += dw-hdmi-gp-audio.o
|
||||
obj-$(CONFIG_DRM_DW_HDMI_I2S_AUDIO) += dw-hdmi-i2s-audio.o
|
||||
obj-$(CONFIG_DRM_DW_HDMI_CEC) += dw-hdmi-cec.o
|
||||
|
||||
|
199
drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c
Normal file
199
drivers/gpu/drm/bridge/synopsys/dw-hdmi-gp-audio.c
Normal file
@ -0,0 +1,199 @@
|
||||
// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
|
||||
/*
|
||||
* dw-hdmi-gp-audio.c
|
||||
*
|
||||
* Copyright 2020-2022 NXP
|
||||
*/
|
||||
#include <linux/io.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <drm/bridge/dw_hdmi.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_connector.h>
|
||||
|
||||
#include <sound/hdmi-codec.h>
|
||||
#include <sound/asoundef.h>
|
||||
#include <sound/core.h>
|
||||
#include <sound/initval.h>
|
||||
#include <sound/pcm.h>
|
||||
#include <sound/pcm_drm_eld.h>
|
||||
#include <sound/pcm_iec958.h>
|
||||
#include <sound/dmaengine_pcm.h>
|
||||
|
||||
#include "dw-hdmi-audio.h"
|
||||
|
||||
#define DRIVER_NAME "dw-hdmi-gp-audio"
|
||||
#define DRV_NAME "hdmi-gp-audio"
|
||||
|
||||
struct snd_dw_hdmi {
|
||||
struct dw_hdmi_audio_data data;
|
||||
struct platform_device *audio_pdev;
|
||||
unsigned int pos;
|
||||
};
|
||||
|
||||
struct dw_hdmi_channel_conf {
|
||||
u8 conf1;
|
||||
u8 ca;
|
||||
};
|
||||
|
||||
/*
|
||||
* The default mapping of ALSA channels to HDMI channels and speaker
|
||||
* allocation bits. Note that we can't do channel remapping here -
|
||||
* channels must be in the same order.
|
||||
*
|
||||
* Mappings for alsa-lib pcm/surround*.conf files:
|
||||
*
|
||||
* Front Sur4.0 Sur4.1 Sur5.0 Sur5.1 Sur7.1
|
||||
* Channels 2 4 6 6 6 8
|
||||
*
|
||||
* Our mapping from ALSA channel to CEA686D speaker name and HDMI channel:
|
||||
*
|
||||
* Number of ALSA channels
|
||||
* ALSA Channel 2 3 4 5 6 7 8
|
||||
* 0 FL:0 = = = = = =
|
||||
* 1 FR:1 = = = = = =
|
||||
* 2 FC:3 RL:4 LFE:2 = = =
|
||||
* 3 RR:5 RL:4 FC:3 = =
|
||||
* 4 RR:5 RL:4 = =
|
||||
* 5 RR:5 = =
|
||||
* 6 RC:6 =
|
||||
* 7 RLC/FRC RLC/FRC
|
||||
*/
|
||||
static struct dw_hdmi_channel_conf default_hdmi_channel_config[7] = {
|
||||
{ 0x03, 0x00 }, /* FL,FR */
|
||||
{ 0x0b, 0x02 }, /* FL,FR,FC */
|
||||
{ 0x33, 0x08 }, /* FL,FR,RL,RR */
|
||||
{ 0x37, 0x09 }, /* FL,FR,LFE,RL,RR */
|
||||
{ 0x3f, 0x0b }, /* FL,FR,LFE,FC,RL,RR */
|
||||
{ 0x7f, 0x0f }, /* FL,FR,LFE,FC,RL,RR,RC */
|
||||
{ 0xff, 0x13 }, /* FL,FR,LFE,FC,RL,RR,[FR]RC,[FR]LC */
|
||||
};
|
||||
|
||||
static int audio_hw_params(struct device *dev, void *data,
|
||||
struct hdmi_codec_daifmt *daifmt,
|
||||
struct hdmi_codec_params *params)
|
||||
{
|
||||
struct snd_dw_hdmi *dw = dev_get_drvdata(dev);
|
||||
int ret = 0;
|
||||
u8 ca;
|
||||
|
||||
dw_hdmi_set_sample_rate(dw->data.hdmi, params->sample_rate);
|
||||
|
||||
ca = default_hdmi_channel_config[params->channels - 2].ca;
|
||||
|
||||
dw_hdmi_set_channel_count(dw->data.hdmi, params->channels);
|
||||
dw_hdmi_set_channel_allocation(dw->data.hdmi, ca);
|
||||
|
||||
dw_hdmi_set_sample_non_pcm(dw->data.hdmi,
|
||||
params->iec.status[0] & IEC958_AES0_NONAUDIO);
|
||||
dw_hdmi_set_sample_width(dw->data.hdmi, params->sample_width);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void audio_shutdown(struct device *dev, void *data)
|
||||
{
|
||||
}
|
||||
|
||||
static int audio_mute_stream(struct device *dev, void *data,
|
||||
bool enable, int direction)
|
||||
{
|
||||
struct snd_dw_hdmi *dw = dev_get_drvdata(dev);
|
||||
int ret = 0;
|
||||
|
||||
if (!enable)
|
||||
dw_hdmi_audio_enable(dw->data.hdmi);
|
||||
else
|
||||
dw_hdmi_audio_disable(dw->data.hdmi);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int audio_get_eld(struct device *dev, void *data,
|
||||
u8 *buf, size_t len)
|
||||
{
|
||||
struct dw_hdmi_audio_data *audio = data;
|
||||
u8 *eld;
|
||||
|
||||
eld = audio->get_eld(audio->hdmi);
|
||||
if (eld)
|
||||
memcpy(buf, eld, min_t(size_t, MAX_ELD_BYTES, len));
|
||||
else
|
||||
/* Pass en empty ELD if connector not available */
|
||||
memset(buf, 0, len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int audio_hook_plugged_cb(struct device *dev, void *data,
|
||||
hdmi_codec_plugged_cb fn,
|
||||
struct device *codec_dev)
|
||||
{
|
||||
struct snd_dw_hdmi *dw = dev_get_drvdata(dev);
|
||||
|
||||
return dw_hdmi_set_plugged_cb(dw->data.hdmi, fn, codec_dev);
|
||||
}
|
||||
|
||||
static const struct hdmi_codec_ops audio_codec_ops = {
|
||||
.hw_params = audio_hw_params,
|
||||
.audio_shutdown = audio_shutdown,
|
||||
.mute_stream = audio_mute_stream,
|
||||
.get_eld = audio_get_eld,
|
||||
.hook_plugged_cb = audio_hook_plugged_cb,
|
||||
};
|
||||
|
||||
static int snd_dw_hdmi_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct dw_hdmi_audio_data *data = pdev->dev.platform_data;
|
||||
struct snd_dw_hdmi *dw;
|
||||
|
||||
const struct hdmi_codec_pdata codec_data = {
|
||||
.i2s = 1,
|
||||
.spdif = 0,
|
||||
.ops = &audio_codec_ops,
|
||||
.max_i2s_channels = 8,
|
||||
.data = data,
|
||||
};
|
||||
|
||||
dw = devm_kzalloc(&pdev->dev, sizeof(*dw), GFP_KERNEL);
|
||||
if (!dw)
|
||||
return -ENOMEM;
|
||||
|
||||
dw->data = *data;
|
||||
|
||||
platform_set_drvdata(pdev, dw);
|
||||
|
||||
dw->audio_pdev = platform_device_register_data(&pdev->dev,
|
||||
HDMI_CODEC_DRV_NAME, 1,
|
||||
&codec_data,
|
||||
sizeof(codec_data));
|
||||
|
||||
return PTR_ERR_OR_ZERO(dw->audio_pdev);
|
||||
}
|
||||
|
||||
static int snd_dw_hdmi_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct snd_dw_hdmi *dw = platform_get_drvdata(pdev);
|
||||
|
||||
platform_device_unregister(dw->audio_pdev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct platform_driver snd_dw_hdmi_driver = {
|
||||
.probe = snd_dw_hdmi_probe,
|
||||
.remove = snd_dw_hdmi_remove,
|
||||
.driver = {
|
||||
.name = DRIVER_NAME,
|
||||
},
|
||||
};
|
||||
|
||||
module_platform_driver(snd_dw_hdmi_driver);
|
||||
|
||||
MODULE_AUTHOR("Shengjiu Wang <shengjiu.wang@nxp.com>");
|
||||
MODULE_DESCRIPTION("Synopsys Designware HDMI GPA ALSA interface");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS("platform:" DRIVER_NAME);
|
@ -191,7 +191,10 @@ struct dw_hdmi {
|
||||
|
||||
spinlock_t audio_lock;
|
||||
struct mutex audio_mutex;
|
||||
unsigned int sample_non_pcm;
|
||||
unsigned int sample_width;
|
||||
unsigned int sample_rate;
|
||||
unsigned int channels;
|
||||
unsigned int audio_cts;
|
||||
unsigned int audio_n;
|
||||
bool audio_enable;
|
||||
@ -589,6 +592,8 @@ static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk)
|
||||
n = 4096;
|
||||
else if (pixel_clk == 74176000 || pixel_clk == 148352000)
|
||||
n = 11648;
|
||||
else if (pixel_clk == 297000000)
|
||||
n = 3072;
|
||||
else
|
||||
n = 4096;
|
||||
n *= mult;
|
||||
@ -601,6 +606,8 @@ static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk)
|
||||
n = 17836;
|
||||
else if (pixel_clk == 148352000)
|
||||
n = 8918;
|
||||
else if (pixel_clk == 297000000)
|
||||
n = 4704;
|
||||
else
|
||||
n = 6272;
|
||||
n *= mult;
|
||||
@ -615,6 +622,8 @@ static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk)
|
||||
n = 11648;
|
||||
else if (pixel_clk == 148352000)
|
||||
n = 5824;
|
||||
else if (pixel_clk == 297000000)
|
||||
n = 5120;
|
||||
else
|
||||
n = 6144;
|
||||
n *= mult;
|
||||
@ -659,8 +668,8 @@ static void hdmi_set_clk_regenerator(struct dw_hdmi *hdmi,
|
||||
|
||||
config3 = hdmi_readb(hdmi, HDMI_CONFIG3_ID);
|
||||
|
||||
/* Only compute CTS when using internal AHB audio */
|
||||
if (config3 & HDMI_CONFIG3_AHBAUDDMA) {
|
||||
/* Compute CTS when using internal AHB audio or General Parallel audio*/
|
||||
if ((config3 & HDMI_CONFIG3_AHBAUDDMA) || (config3 & HDMI_CONFIG3_GPAUD)) {
|
||||
/*
|
||||
* Compute the CTS value from the N value. Note that CTS and N
|
||||
* can be up to 20 bits in total, so we need 64-bit math. Also
|
||||
@ -702,6 +711,22 @@ static void hdmi_clk_regenerator_update_pixel_clock(struct dw_hdmi *hdmi)
|
||||
mutex_unlock(&hdmi->audio_mutex);
|
||||
}
|
||||
|
||||
void dw_hdmi_set_sample_width(struct dw_hdmi *hdmi, unsigned int width)
|
||||
{
|
||||
mutex_lock(&hdmi->audio_mutex);
|
||||
hdmi->sample_width = width;
|
||||
mutex_unlock(&hdmi->audio_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_width);
|
||||
|
||||
void dw_hdmi_set_sample_non_pcm(struct dw_hdmi *hdmi, unsigned int non_pcm)
|
||||
{
|
||||
mutex_lock(&hdmi->audio_mutex);
|
||||
hdmi->sample_non_pcm = non_pcm;
|
||||
mutex_unlock(&hdmi->audio_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_hdmi_set_sample_non_pcm);
|
||||
|
||||
void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate)
|
||||
{
|
||||
mutex_lock(&hdmi->audio_mutex);
|
||||
@ -717,6 +742,7 @@ void dw_hdmi_set_channel_count(struct dw_hdmi *hdmi, unsigned int cnt)
|
||||
u8 layout;
|
||||
|
||||
mutex_lock(&hdmi->audio_mutex);
|
||||
hdmi->channels = cnt;
|
||||
|
||||
/*
|
||||
* For >2 channel PCM audio, we need to select layout 1
|
||||
@ -765,6 +791,89 @@ static u8 *hdmi_audio_get_eld(struct dw_hdmi *hdmi)
|
||||
return hdmi->curr_conn->eld;
|
||||
}
|
||||
|
||||
static void dw_hdmi_gp_audio_enable(struct dw_hdmi *hdmi)
|
||||
{
|
||||
const struct dw_hdmi_plat_data *pdata = hdmi->plat_data;
|
||||
int sample_freq = 0x2, org_sample_freq = 0xD;
|
||||
int ch_mask = BIT(hdmi->channels) - 1;
|
||||
|
||||
switch (hdmi->sample_rate) {
|
||||
case 32000:
|
||||
sample_freq = 0x03;
|
||||
org_sample_freq = 0x0C;
|
||||
break;
|
||||
case 44100:
|
||||
sample_freq = 0x00;
|
||||
org_sample_freq = 0x0F;
|
||||
break;
|
||||
case 48000:
|
||||
sample_freq = 0x02;
|
||||
org_sample_freq = 0x0D;
|
||||
break;
|
||||
case 88200:
|
||||
sample_freq = 0x08;
|
||||
org_sample_freq = 0x07;
|
||||
break;
|
||||
case 96000:
|
||||
sample_freq = 0x0A;
|
||||
org_sample_freq = 0x05;
|
||||
break;
|
||||
case 176400:
|
||||
sample_freq = 0x0C;
|
||||
org_sample_freq = 0x03;
|
||||
break;
|
||||
case 192000:
|
||||
sample_freq = 0x0E;
|
||||
org_sample_freq = 0x01;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
hdmi_set_cts_n(hdmi, hdmi->audio_cts, hdmi->audio_n);
|
||||
hdmi_enable_audio_clk(hdmi, true);
|
||||
|
||||
hdmi_writeb(hdmi, 0x1, HDMI_FC_AUDSCHNLS0);
|
||||
hdmi_writeb(hdmi, hdmi->channels, HDMI_FC_AUDSCHNLS2);
|
||||
hdmi_writeb(hdmi, 0x22, HDMI_FC_AUDSCHNLS3);
|
||||
hdmi_writeb(hdmi, 0x22, HDMI_FC_AUDSCHNLS4);
|
||||
hdmi_writeb(hdmi, 0x11, HDMI_FC_AUDSCHNLS5);
|
||||
hdmi_writeb(hdmi, 0x11, HDMI_FC_AUDSCHNLS6);
|
||||
hdmi_writeb(hdmi, (0x3 << 4) | sample_freq, HDMI_FC_AUDSCHNLS7);
|
||||
hdmi_writeb(hdmi, (org_sample_freq << 4) | 0xb, HDMI_FC_AUDSCHNLS8);
|
||||
|
||||
hdmi_writeb(hdmi, ch_mask, HDMI_GP_CONF1);
|
||||
hdmi_writeb(hdmi, 0x02, HDMI_GP_CONF2);
|
||||
hdmi_writeb(hdmi, 0x01, HDMI_GP_CONF0);
|
||||
|
||||
hdmi_modb(hdmi, 0x3, 0x3, HDMI_FC_DATAUTO3);
|
||||
|
||||
/* hbr */
|
||||
if (hdmi->sample_rate == 192000 && hdmi->channels == 8 &&
|
||||
hdmi->sample_width == 32 && hdmi->sample_non_pcm)
|
||||
hdmi_modb(hdmi, 0x01, 0x01, HDMI_GP_CONF2);
|
||||
|
||||
if (pdata->enable_audio)
|
||||
pdata->enable_audio(hdmi,
|
||||
hdmi->channels,
|
||||
hdmi->sample_width,
|
||||
hdmi->sample_rate,
|
||||
hdmi->sample_non_pcm);
|
||||
}
|
||||
|
||||
static void dw_hdmi_gp_audio_disable(struct dw_hdmi *hdmi)
|
||||
{
|
||||
const struct dw_hdmi_plat_data *pdata = hdmi->plat_data;
|
||||
|
||||
hdmi_set_cts_n(hdmi, hdmi->audio_cts, 0);
|
||||
|
||||
hdmi_modb(hdmi, 0, 0x3, HDMI_FC_DATAUTO3);
|
||||
if (pdata->disable_audio)
|
||||
pdata->disable_audio(hdmi);
|
||||
|
||||
hdmi_enable_audio_clk(hdmi, false);
|
||||
}
|
||||
|
||||
static void dw_hdmi_ahb_audio_enable(struct dw_hdmi *hdmi)
|
||||
{
|
||||
hdmi_set_cts_n(hdmi, hdmi->audio_cts, hdmi->audio_n);
|
||||
@ -1108,6 +1217,8 @@ static void hdmi_video_packetize(struct dw_hdmi *hdmi)
|
||||
unsigned int output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_PP;
|
||||
struct hdmi_data_info *hdmi_data = &hdmi->hdmi_data;
|
||||
u8 val, vp_conf;
|
||||
u8 clear_gcp_auto = 0;
|
||||
|
||||
|
||||
if (hdmi_bus_fmt_is_rgb(hdmi->hdmi_data.enc_out_bus_format) ||
|
||||
hdmi_bus_fmt_is_yuv444(hdmi->hdmi_data.enc_out_bus_format) ||
|
||||
@ -1117,6 +1228,7 @@ static void hdmi_video_packetize(struct dw_hdmi *hdmi)
|
||||
case 8:
|
||||
color_depth = 4;
|
||||
output_select = HDMI_VP_CONF_OUTPUT_SELECTOR_BYPASS;
|
||||
clear_gcp_auto = 1;
|
||||
break;
|
||||
case 10:
|
||||
color_depth = 5;
|
||||
@ -1136,6 +1248,7 @@ static void hdmi_video_packetize(struct dw_hdmi *hdmi)
|
||||
case 0:
|
||||
case 8:
|
||||
remap_size = HDMI_VP_REMAP_YCC422_16bit;
|
||||
clear_gcp_auto = 1;
|
||||
break;
|
||||
case 10:
|
||||
remap_size = HDMI_VP_REMAP_YCC422_20bit;
|
||||
@ -1160,6 +1273,19 @@ static void hdmi_video_packetize(struct dw_hdmi *hdmi)
|
||||
HDMI_VP_PR_CD_DESIRED_PR_FACTOR_MASK);
|
||||
hdmi_writeb(hdmi, val, HDMI_VP_PR_CD);
|
||||
|
||||
/* HDMI1.4b specification section 6.5.3:
|
||||
* Source shall only send GCPs with non-zero CD to sinks
|
||||
* that indicate support for Deep Color.
|
||||
* GCP only transmit CD and do not handle AVMUTE, PP norDefault_Phase (yet).
|
||||
* Disable Auto GCP when 24-bit color for sinks that not support Deep Color.
|
||||
*/
|
||||
val = hdmi_readb(hdmi, HDMI_FC_DATAUTO3);
|
||||
if (clear_gcp_auto == 1)
|
||||
val &= ~HDMI_FC_DATAUTO3_GCP_AUTO;
|
||||
else
|
||||
val |= HDMI_FC_DATAUTO3_GCP_AUTO;
|
||||
hdmi_writeb(hdmi, val, HDMI_FC_DATAUTO3);
|
||||
|
||||
hdmi_modb(hdmi, HDMI_VP_STUFF_PR_STUFFING_STUFFING_MODE,
|
||||
HDMI_VP_STUFF_PR_STUFFING_MASK, HDMI_VP_STUFF);
|
||||
|
||||
@ -1357,13 +1483,21 @@ static void dw_hdmi_phy_sel_interface_control(struct dw_hdmi *hdmi, u8 enable)
|
||||
HDMI_PHY_CONF0_SELDIPIF_MASK);
|
||||
}
|
||||
|
||||
void dw_hdmi_phy_reset(struct dw_hdmi *hdmi)
|
||||
void dw_hdmi_phy_gen1_reset(struct dw_hdmi *hdmi)
|
||||
{
|
||||
/* PHY reset. The reset signal is active low on Gen1 PHYs. */
|
||||
hdmi_writeb(hdmi, 0, HDMI_MC_PHYRSTZ);
|
||||
hdmi_writeb(hdmi, HDMI_MC_PHYRSTZ_PHYRSTZ, HDMI_MC_PHYRSTZ);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_hdmi_phy_gen1_reset);
|
||||
|
||||
void dw_hdmi_phy_gen2_reset(struct dw_hdmi *hdmi)
|
||||
{
|
||||
/* PHY reset. The reset signal is active high on Gen2 PHYs. */
|
||||
hdmi_writeb(hdmi, HDMI_MC_PHYRSTZ_PHYRSTZ, HDMI_MC_PHYRSTZ);
|
||||
hdmi_writeb(hdmi, 0, HDMI_MC_PHYRSTZ);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dw_hdmi_phy_reset);
|
||||
EXPORT_SYMBOL_GPL(dw_hdmi_phy_gen2_reset);
|
||||
|
||||
void dw_hdmi_phy_i2c_set_addr(struct dw_hdmi *hdmi, u8 address)
|
||||
{
|
||||
@ -1517,7 +1651,7 @@ static int hdmi_phy_configure(struct dw_hdmi *hdmi,
|
||||
if (phy->has_svsret)
|
||||
dw_hdmi_phy_enable_svsret(hdmi, 1);
|
||||
|
||||
dw_hdmi_phy_reset(hdmi);
|
||||
dw_hdmi_phy_gen2_reset(hdmi);
|
||||
|
||||
hdmi_writeb(hdmi, HDMI_MC_HEACPHY_RST_ASSERT, HDMI_MC_HEACPHY_RST);
|
||||
|
||||
@ -2086,30 +2220,21 @@ static void dw_hdmi_clear_overflow(struct dw_hdmi *hdmi)
|
||||
* then write one of the FC registers several times.
|
||||
*
|
||||
* The number of iterations matters and depends on the HDMI TX revision
|
||||
* (and possibly on the platform). So far i.MX6Q (v1.30a), i.MX6DL
|
||||
* (v1.31a) and multiple Allwinner SoCs (v1.32a) have been identified
|
||||
* as needing the workaround, with 4 iterations for v1.30a and 1
|
||||
* iteration for others.
|
||||
* The Amlogic Meson GX SoCs (v2.01a) have been identified as needing
|
||||
* the workaround with a single iteration.
|
||||
* The Rockchip RK3288 SoC (v2.00a) and RK3328/RK3399 SoCs (v2.11a) have
|
||||
* been identified as needing the workaround with a single iteration.
|
||||
* (and possibly on the platform).
|
||||
* 4 iterations for i.MX6Q(v1.30a) and 1 iteration for others.
|
||||
* i.MX6DL (v1.31a), Allwinner SoCs (v1.32a), Rockchip RK3288 SoC (v2.00a),
|
||||
* Amlogic Meson GX SoCs (v2.01a), RK3328/RK3399 SoCs (v2.11a)
|
||||
* and i.MX8MPlus (v2.13a) have been identified as needing the workaround
|
||||
* with a single iteration.
|
||||
*/
|
||||
|
||||
switch (hdmi->version) {
|
||||
case 0x130a:
|
||||
count = 4;
|
||||
break;
|
||||
case 0x131a:
|
||||
case 0x132a:
|
||||
case 0x200a:
|
||||
case 0x201a:
|
||||
case 0x211a:
|
||||
case 0x212a:
|
||||
default:
|
||||
count = 1;
|
||||
break;
|
||||
default:
|
||||
return;
|
||||
}
|
||||
|
||||
/* TMDS software reset */
|
||||
@ -3242,6 +3367,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
|
||||
hdmi->plat_data = plat_data;
|
||||
hdmi->dev = dev;
|
||||
hdmi->sample_rate = 48000;
|
||||
hdmi->channels = 2;
|
||||
hdmi->disabled = true;
|
||||
hdmi->rxsense = true;
|
||||
hdmi->phy_mask = (u8)~(HDMI_PHY_HPD | HDMI_PHY_RX_SENSE);
|
||||
@ -3465,6 +3591,24 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
|
||||
pdevinfo.size_data = sizeof(audio);
|
||||
pdevinfo.dma_mask = DMA_BIT_MASK(32);
|
||||
hdmi->audio = platform_device_register_full(&pdevinfo);
|
||||
} else if (iores && config3 & HDMI_CONFIG3_GPAUD) {
|
||||
struct dw_hdmi_audio_data audio;
|
||||
|
||||
audio.phys = iores->start;
|
||||
audio.base = hdmi->regs;
|
||||
audio.irq = irq;
|
||||
audio.hdmi = hdmi;
|
||||
audio.get_eld = hdmi_audio_get_eld;
|
||||
|
||||
hdmi->enable_audio = dw_hdmi_gp_audio_enable;
|
||||
hdmi->disable_audio = dw_hdmi_gp_audio_disable;
|
||||
|
||||
pdevinfo.name = "dw-hdmi-gp-audio";
|
||||
pdevinfo.id = PLATFORM_DEVID_NONE;
|
||||
pdevinfo.data = &audio;
|
||||
pdevinfo.size_data = sizeof(audio);
|
||||
pdevinfo.dma_mask = DMA_BIT_MASK(32);
|
||||
hdmi->audio = platform_device_register_full(&pdevinfo);
|
||||
}
|
||||
|
||||
if (!plat_data->disable_cec && (config0 & HDMI_CONFIG0_CEC)) {
|
||||
|
@ -158,8 +158,17 @@
|
||||
#define HDMI_FC_SPDDEVICEINF 0x1062
|
||||
#define HDMI_FC_AUDSCONF 0x1063
|
||||
#define HDMI_FC_AUDSSTAT 0x1064
|
||||
#define HDMI_FC_AUDSCHNLS7 0x106e
|
||||
#define HDMI_FC_AUDSCHNLS8 0x106f
|
||||
#define HDMI_FC_AUDSV 0x1065
|
||||
#define HDMI_FC_AUDSU 0x1066
|
||||
#define HDMI_FC_AUDSCHNLS0 0x1067
|
||||
#define HDMI_FC_AUDSCHNLS1 0x1068
|
||||
#define HDMI_FC_AUDSCHNLS2 0x1069
|
||||
#define HDMI_FC_AUDSCHNLS3 0x106A
|
||||
#define HDMI_FC_AUDSCHNLS4 0x106B
|
||||
#define HDMI_FC_AUDSCHNLS5 0x106C
|
||||
#define HDMI_FC_AUDSCHNLS6 0x106D
|
||||
#define HDMI_FC_AUDSCHNLS7 0x106E
|
||||
#define HDMI_FC_AUDSCHNLS8 0x106F
|
||||
#define HDMI_FC_DATACH0FILL 0x1070
|
||||
#define HDMI_FC_DATACH1FILL 0x1071
|
||||
#define HDMI_FC_DATACH2FILL 0x1072
|
||||
@ -850,6 +859,9 @@ enum {
|
||||
HDMI_FC_DATAUTO0_VSD_MASK = 0x08,
|
||||
HDMI_FC_DATAUTO0_VSD_OFFSET = 3,
|
||||
|
||||
/* FC_DATAUTO3 field values */
|
||||
HDMI_FC_DATAUTO3_GCP_AUTO = 0x04,
|
||||
|
||||
/* PHY_CONF0 field values */
|
||||
HDMI_PHY_CONF0_PDZ_MASK = 0x80,
|
||||
HDMI_PHY_CONF0_PDZ_OFFSET = 7,
|
||||
|
@ -527,6 +527,31 @@ unlock:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_dp_dpcd_probe() - probe a given DPCD address with a 1-byte read access
|
||||
* @aux: DisplayPort AUX channel (SST)
|
||||
* @offset: address of the register to probe
|
||||
*
|
||||
* Probe the provided DPCD address by reading 1 byte from it. The function can
|
||||
* be used to trigger some side-effect the read access has, like waking up the
|
||||
* sink, without the need for the read-out value.
|
||||
*
|
||||
* Returns 0 if the read access suceeded, or a negative error code on failure.
|
||||
*/
|
||||
int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset)
|
||||
{
|
||||
u8 buffer;
|
||||
int ret;
|
||||
|
||||
ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset, &buffer, 1);
|
||||
WARN_ON(ret == 0);
|
||||
|
||||
drm_dp_dump_access(aux, DP_AUX_NATIVE_READ, offset, &buffer, ret);
|
||||
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_dp_dpcd_probe);
|
||||
|
||||
/**
|
||||
* drm_dp_dpcd_read() - read a series of bytes from the DPCD
|
||||
* @aux: DisplayPort AUX channel (SST or MST)
|
||||
@ -559,10 +584,9 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
|
||||
* monitor doesn't power down exactly after the throw away read.
|
||||
*/
|
||||
if (!aux->is_remote) {
|
||||
ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, DP_DPCD_REV,
|
||||
buffer, 1);
|
||||
if (ret != 1)
|
||||
goto out;
|
||||
ret = drm_dp_dpcd_probe(aux, DP_DPCD_REV);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (aux->is_remote)
|
||||
@ -571,7 +595,6 @@ ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
|
||||
ret = drm_dp_dpcd_access(aux, DP_AUX_NATIVE_READ, offset,
|
||||
buffer, size);
|
||||
|
||||
out:
|
||||
drm_dp_dump_access(aux, DP_AUX_NATIVE_READ, offset, buffer, ret);
|
||||
return ret;
|
||||
}
|
||||
|
@ -665,6 +665,9 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
|
||||
if (start + size == end)
|
||||
return __drm_buddy_alloc_range(mm, start, size, blocks);
|
||||
|
||||
if (!IS_ALIGNED(size, min_page_size))
|
||||
return -EINVAL;
|
||||
|
||||
pages = size >> ilog2(mm->chunk_size);
|
||||
order = fls(pages) - 1;
|
||||
min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
|
||||
|
@ -297,15 +297,15 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
|
||||
return false;
|
||||
}
|
||||
|
||||
saved_mode = crtc->mode;
|
||||
saved_hwmode = crtc->hwmode;
|
||||
drm_mode_init(&saved_mode, &crtc->mode);
|
||||
drm_mode_init(&saved_hwmode, &crtc->hwmode);
|
||||
saved_x = crtc->x;
|
||||
saved_y = crtc->y;
|
||||
|
||||
/* Update crtc values up front so the driver can rely on them for mode
|
||||
* setting.
|
||||
*/
|
||||
crtc->mode = *mode;
|
||||
drm_mode_copy(&crtc->mode, mode);
|
||||
crtc->x = x;
|
||||
crtc->y = y;
|
||||
|
||||
@ -341,7 +341,7 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
|
||||
}
|
||||
DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
|
||||
|
||||
crtc->hwmode = *adjusted_mode;
|
||||
drm_mode_copy(&crtc->hwmode, adjusted_mode);
|
||||
|
||||
/* Prepare the encoders and CRTCs before setting the mode. */
|
||||
drm_for_each_encoder(encoder, dev) {
|
||||
@ -411,8 +411,8 @@ done:
|
||||
drm_mode_destroy(dev, adjusted_mode);
|
||||
if (!ret) {
|
||||
crtc->enabled = saved_enabled;
|
||||
crtc->mode = saved_mode;
|
||||
crtc->hwmode = saved_hwmode;
|
||||
drm_mode_copy(&crtc->mode, &saved_mode);
|
||||
drm_mode_copy(&crtc->hwmode, &saved_hwmode);
|
||||
crtc->x = saved_x;
|
||||
crtc->y = saved_y;
|
||||
}
|
||||
|
@ -1568,6 +1568,38 @@ static const struct drm_display_mode edid_4k_modes[] = {
|
||||
|
||||
/*** DDC fetch and block validation ***/
|
||||
|
||||
static int edid_extension_block_count(const struct edid *edid)
|
||||
{
|
||||
return edid->extensions;
|
||||
}
|
||||
|
||||
static int edid_block_count(const struct edid *edid)
|
||||
{
|
||||
return edid_extension_block_count(edid) + 1;
|
||||
}
|
||||
|
||||
static int edid_size_by_blocks(int num_blocks)
|
||||
{
|
||||
return num_blocks * EDID_LENGTH;
|
||||
}
|
||||
|
||||
static int edid_size(const struct edid *edid)
|
||||
{
|
||||
return edid_size_by_blocks(edid_block_count(edid));
|
||||
}
|
||||
|
||||
static const void *edid_block_data(const struct edid *edid, int index)
|
||||
{
|
||||
BUILD_BUG_ON(sizeof(*edid) != EDID_LENGTH);
|
||||
|
||||
return edid + index;
|
||||
}
|
||||
|
||||
static const void *edid_extension_block_data(const struct edid *edid, int index)
|
||||
{
|
||||
return edid_block_data(edid, index + 1);
|
||||
}
|
||||
|
||||
static const u8 edid_header[] = {
|
||||
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
|
||||
};
|
||||
@ -1632,9 +1664,9 @@ static int edid_block_tag(const void *_block)
|
||||
return block[0];
|
||||
}
|
||||
|
||||
static bool edid_is_zero(const void *edid, int length)
|
||||
static bool edid_block_is_zero(const void *edid)
|
||||
{
|
||||
return !memchr_inv(edid, 0, length);
|
||||
return !memchr_inv(edid, 0, EDID_LENGTH);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1654,8 +1686,8 @@ bool drm_edid_are_equal(const struct edid *edid1, const struct edid *edid2)
|
||||
return false;
|
||||
|
||||
if (edid1) {
|
||||
edid1_len = EDID_LENGTH * (1 + edid1->extensions);
|
||||
edid2_len = EDID_LENGTH * (1 + edid2->extensions);
|
||||
edid1_len = edid_size(edid1);
|
||||
edid2_len = edid_size(edid2);
|
||||
|
||||
if (edid1_len != edid2_len)
|
||||
return false;
|
||||
@ -1670,7 +1702,9 @@ EXPORT_SYMBOL(drm_edid_are_equal);
|
||||
|
||||
enum edid_block_status {
|
||||
EDID_BLOCK_OK = 0,
|
||||
EDID_BLOCK_READ_FAIL,
|
||||
EDID_BLOCK_NULL,
|
||||
EDID_BLOCK_ZERO,
|
||||
EDID_BLOCK_HEADER_CORRUPT,
|
||||
EDID_BLOCK_HEADER_REPAIR,
|
||||
EDID_BLOCK_HEADER_FIXED,
|
||||
@ -1689,15 +1723,23 @@ static enum edid_block_status edid_block_check(const void *_block,
|
||||
if (is_base_block) {
|
||||
int score = drm_edid_header_is_valid(block);
|
||||
|
||||
if (score < clamp(edid_fixup, 0, 8))
|
||||
return EDID_BLOCK_HEADER_CORRUPT;
|
||||
if (score < clamp(edid_fixup, 0, 8)) {
|
||||
if (edid_block_is_zero(block))
|
||||
return EDID_BLOCK_ZERO;
|
||||
else
|
||||
return EDID_BLOCK_HEADER_CORRUPT;
|
||||
}
|
||||
|
||||
if (score < 8)
|
||||
return EDID_BLOCK_HEADER_REPAIR;
|
||||
}
|
||||
|
||||
if (edid_block_compute_checksum(block) != edid_block_get_checksum(block))
|
||||
return EDID_BLOCK_CHECKSUM;
|
||||
if (edid_block_compute_checksum(block) != edid_block_get_checksum(block)) {
|
||||
if (edid_block_is_zero(block))
|
||||
return EDID_BLOCK_ZERO;
|
||||
else
|
||||
return EDID_BLOCK_CHECKSUM;
|
||||
}
|
||||
|
||||
if (is_base_block) {
|
||||
if (block->version != 1)
|
||||
@ -1720,6 +1762,70 @@ static bool edid_block_valid(const void *block, bool base)
|
||||
edid_block_tag(block));
|
||||
}
|
||||
|
||||
static void edid_block_status_print(enum edid_block_status status,
|
||||
const struct edid *block,
|
||||
int block_num)
|
||||
{
|
||||
switch (status) {
|
||||
case EDID_BLOCK_OK:
|
||||
break;
|
||||
case EDID_BLOCK_READ_FAIL:
|
||||
pr_debug("EDID block %d read failed\n", block_num);
|
||||
break;
|
||||
case EDID_BLOCK_NULL:
|
||||
pr_debug("EDID block %d pointer is NULL\n", block_num);
|
||||
break;
|
||||
case EDID_BLOCK_ZERO:
|
||||
pr_notice("EDID block %d is all zeroes\n", block_num);
|
||||
break;
|
||||
case EDID_BLOCK_HEADER_CORRUPT:
|
||||
pr_notice("EDID has corrupt header\n");
|
||||
break;
|
||||
case EDID_BLOCK_HEADER_REPAIR:
|
||||
pr_debug("EDID corrupt header needs repair\n");
|
||||
break;
|
||||
case EDID_BLOCK_HEADER_FIXED:
|
||||
pr_debug("EDID corrupt header fixed\n");
|
||||
break;
|
||||
case EDID_BLOCK_CHECKSUM:
|
||||
if (edid_block_status_valid(status, edid_block_tag(block))) {
|
||||
pr_debug("EDID block %d (tag 0x%02x) checksum is invalid, remainder is %d, ignoring\n",
|
||||
block_num, edid_block_tag(block),
|
||||
edid_block_compute_checksum(block));
|
||||
} else {
|
||||
pr_notice("EDID block %d (tag 0x%02x) checksum is invalid, remainder is %d\n",
|
||||
block_num, edid_block_tag(block),
|
||||
edid_block_compute_checksum(block));
|
||||
}
|
||||
break;
|
||||
case EDID_BLOCK_VERSION:
|
||||
pr_notice("EDID has major version %d, instead of 1\n",
|
||||
block->version);
|
||||
break;
|
||||
default:
|
||||
WARN(1, "EDID block %d unknown edid block status code %d\n",
|
||||
block_num, status);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void edid_block_dump(const char *level, const void *block, int block_num)
|
||||
{
|
||||
enum edid_block_status status;
|
||||
char prefix[20];
|
||||
|
||||
status = edid_block_check(block, block_num == 0);
|
||||
if (status == EDID_BLOCK_ZERO)
|
||||
sprintf(prefix, "\t[%02x] ZERO ", block_num);
|
||||
else if (!edid_block_status_valid(status, edid_block_tag(block)))
|
||||
sprintf(prefix, "\t[%02x] BAD ", block_num);
|
||||
else
|
||||
sprintf(prefix, "\t[%02x] GOOD ", block_num);
|
||||
|
||||
print_hex_dump(level, prefix, DUMP_PREFIX_NONE, 16, 1,
|
||||
block, EDID_LENGTH, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_edid_block_valid - Sanity check the EDID block (base or extension)
|
||||
* @raw_edid: pointer to raw EDID block
|
||||
@ -1766,33 +1872,14 @@ bool drm_edid_block_valid(u8 *_block, int block_num, bool print_bad_edid,
|
||||
*edid_corrupt = true;
|
||||
}
|
||||
|
||||
edid_block_status_print(status, block, block_num);
|
||||
|
||||
/* Determine whether we can use this block with this status. */
|
||||
valid = edid_block_status_valid(status, edid_block_tag(block));
|
||||
|
||||
/* Some fairly random status printouts. */
|
||||
if (status == EDID_BLOCK_CHECKSUM) {
|
||||
if (valid) {
|
||||
DRM_DEBUG("EDID block checksum is invalid, remainder is %d\n",
|
||||
edid_block_compute_checksum(block));
|
||||
DRM_DEBUG("Assuming a KVM switch modified the block but left the original checksum\n");
|
||||
} else if (print_bad_edid) {
|
||||
DRM_NOTE("EDID block checksum is invalid, remainder is %d\n",
|
||||
edid_block_compute_checksum(block));
|
||||
}
|
||||
} else if (status == EDID_BLOCK_VERSION) {
|
||||
DRM_NOTE("EDID has major version %d, instead of 1\n",
|
||||
block->version);
|
||||
}
|
||||
|
||||
if (!valid && print_bad_edid) {
|
||||
if (edid_is_zero(block, EDID_LENGTH)) {
|
||||
pr_notice("EDID block is all zeroes\n");
|
||||
} else {
|
||||
pr_notice("Raw EDID:\n");
|
||||
print_hex_dump(KERN_NOTICE,
|
||||
" \t", DUMP_PREFIX_NONE, 16, 1,
|
||||
block, EDID_LENGTH, false);
|
||||
}
|
||||
if (!valid && print_bad_edid && status != EDID_BLOCK_ZERO) {
|
||||
pr_notice("Raw EDID:\n");
|
||||
edid_block_dump(KERN_NOTICE, block, block_num);
|
||||
}
|
||||
|
||||
return valid;
|
||||
@ -1810,14 +1897,16 @@ EXPORT_SYMBOL(drm_edid_block_valid);
|
||||
bool drm_edid_is_valid(struct edid *edid)
|
||||
{
|
||||
int i;
|
||||
u8 *raw = (u8 *)edid;
|
||||
|
||||
if (!edid)
|
||||
return false;
|
||||
|
||||
for (i = 0; i <= edid->extensions; i++)
|
||||
if (!drm_edid_block_valid(raw + i * EDID_LENGTH, i, true, NULL))
|
||||
for (i = 0; i < edid_block_count(edid); i++) {
|
||||
void *block = (void *)edid_block_data(edid, i);
|
||||
|
||||
if (!drm_edid_block_valid(block, i, true, NULL))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -1830,13 +1919,13 @@ static struct edid *edid_filter_invalid_blocks(const struct edid *edid,
|
||||
int valid_extensions = edid->extensions - invalid_blocks;
|
||||
int i;
|
||||
|
||||
new = kmalloc_array(valid_extensions + 1, EDID_LENGTH, GFP_KERNEL);
|
||||
new = kmalloc(edid_size_by_blocks(valid_extensions + 1), GFP_KERNEL);
|
||||
if (!new)
|
||||
goto out;
|
||||
|
||||
dest_block = new;
|
||||
for (i = 0; i <= edid->extensions; i++) {
|
||||
const void *block = edid + i;
|
||||
for (i = 0; i < edid_block_count(edid); i++) {
|
||||
const void *block = edid_block_data(edid, i);
|
||||
|
||||
if (edid_block_valid(block, i == 0))
|
||||
memcpy(dest_block++, block, EDID_LENGTH);
|
||||
@ -1916,7 +2005,7 @@ drm_do_probe_ddc_edid(void *data, u8 *buf, unsigned int block, size_t len)
|
||||
}
|
||||
|
||||
static void connector_bad_edid(struct drm_connector *connector,
|
||||
u8 *edid, int num_blocks)
|
||||
const struct edid *edid, int num_blocks)
|
||||
{
|
||||
int i;
|
||||
u8 last_block;
|
||||
@ -1927,32 +2016,19 @@ static void connector_bad_edid(struct drm_connector *connector,
|
||||
* of 0x7e in the EDID of the _index_ of the last block in the
|
||||
* combined chunk of memory.
|
||||
*/
|
||||
last_block = edid[0x7e];
|
||||
last_block = edid->extensions;
|
||||
|
||||
/* Calculate real checksum for the last edid extension block data */
|
||||
if (last_block < num_blocks)
|
||||
connector->real_edid_checksum =
|
||||
edid_block_compute_checksum(edid + last_block * EDID_LENGTH);
|
||||
edid_block_compute_checksum(edid + last_block);
|
||||
|
||||
if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS))
|
||||
return;
|
||||
|
||||
drm_dbg_kms(connector->dev, "%s: EDID is invalid:\n", connector->name);
|
||||
for (i = 0; i < num_blocks; i++) {
|
||||
u8 *block = edid + i * EDID_LENGTH;
|
||||
char prefix[20];
|
||||
|
||||
if (edid_is_zero(block, EDID_LENGTH))
|
||||
sprintf(prefix, "\t[%02x] ZERO ", i);
|
||||
else if (!drm_edid_block_valid(block, i, false, NULL))
|
||||
sprintf(prefix, "\t[%02x] BAD ", i);
|
||||
else
|
||||
sprintf(prefix, "\t[%02x] GOOD ", i);
|
||||
|
||||
print_hex_dump(KERN_DEBUG,
|
||||
prefix, DUMP_PREFIX_NONE, 16, 1,
|
||||
block, EDID_LENGTH, false);
|
||||
}
|
||||
for (i = 0; i < num_blocks; i++)
|
||||
edid_block_dump(KERN_DEBUG, edid + i, i);
|
||||
}
|
||||
|
||||
/* Get override or firmware EDID */
|
||||
@ -1999,43 +2075,39 @@ int drm_add_override_edid_modes(struct drm_connector *connector)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_add_override_edid_modes);
|
||||
|
||||
static struct edid *drm_do_get_edid_base_block(struct drm_connector *connector,
|
||||
int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
|
||||
size_t len),
|
||||
void *data)
|
||||
typedef int read_block_fn(void *context, u8 *buf, unsigned int block, size_t len);
|
||||
|
||||
static enum edid_block_status edid_block_read(void *block, unsigned int block_num,
|
||||
read_block_fn read_block,
|
||||
void *context)
|
||||
{
|
||||
int *null_edid_counter = connector ? &connector->null_edid_counter : NULL;
|
||||
bool *edid_corrupt = connector ? &connector->edid_corrupt : NULL;
|
||||
void *edid;
|
||||
enum edid_block_status status;
|
||||
bool is_base_block = block_num == 0;
|
||||
int try;
|
||||
|
||||
edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
|
||||
if (edid == NULL)
|
||||
return NULL;
|
||||
|
||||
/* base block fetch */
|
||||
for (try = 0; try < 4; try++) {
|
||||
if (get_edid_block(data, edid, 0, EDID_LENGTH))
|
||||
goto out;
|
||||
if (drm_edid_block_valid(edid, 0, false, edid_corrupt))
|
||||
break;
|
||||
if (try == 0 && edid_is_zero(edid, EDID_LENGTH)) {
|
||||
if (null_edid_counter)
|
||||
(*null_edid_counter)++;
|
||||
goto carp;
|
||||
if (read_block(context, block, block_num, EDID_LENGTH))
|
||||
return EDID_BLOCK_READ_FAIL;
|
||||
|
||||
status = edid_block_check(block, is_base_block);
|
||||
if (status == EDID_BLOCK_HEADER_REPAIR) {
|
||||
edid_header_fix(block);
|
||||
|
||||
/* Retry with fixed header, update status if that worked. */
|
||||
status = edid_block_check(block, is_base_block);
|
||||
if (status == EDID_BLOCK_OK)
|
||||
status = EDID_BLOCK_HEADER_FIXED;
|
||||
}
|
||||
|
||||
if (edid_block_status_valid(status, edid_block_tag(block)))
|
||||
break;
|
||||
|
||||
/* Fail early for unrepairable base block all zeros. */
|
||||
if (try == 0 && is_base_block && status == EDID_BLOCK_ZERO)
|
||||
break;
|
||||
}
|
||||
if (try == 4)
|
||||
goto carp;
|
||||
|
||||
return edid;
|
||||
|
||||
carp:
|
||||
if (connector)
|
||||
connector_bad_edid(connector, edid, 1);
|
||||
out:
|
||||
kfree(edid);
|
||||
return NULL;
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2059,53 +2131,74 @@ out:
|
||||
* Return: Pointer to valid EDID or NULL if we couldn't find any.
|
||||
*/
|
||||
struct edid *drm_do_get_edid(struct drm_connector *connector,
|
||||
int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
|
||||
size_t len),
|
||||
void *data)
|
||||
read_block_fn read_block,
|
||||
void *context)
|
||||
{
|
||||
int j, invalid_blocks = 0;
|
||||
struct edid *edid, *new, *override;
|
||||
enum edid_block_status status;
|
||||
int i, invalid_blocks = 0;
|
||||
struct edid *edid, *new;
|
||||
|
||||
override = drm_get_override_edid(connector);
|
||||
if (override)
|
||||
return override;
|
||||
edid = drm_get_override_edid(connector);
|
||||
if (edid)
|
||||
goto ok;
|
||||
|
||||
edid = drm_do_get_edid_base_block(connector, get_edid_block, data);
|
||||
edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
|
||||
if (!edid)
|
||||
return NULL;
|
||||
|
||||
if (edid->extensions == 0)
|
||||
return edid;
|
||||
status = edid_block_read(edid, 0, read_block, context);
|
||||
|
||||
new = krealloc(edid, (edid->extensions + 1) * EDID_LENGTH, GFP_KERNEL);
|
||||
edid_block_status_print(status, edid, 0);
|
||||
|
||||
if (status == EDID_BLOCK_READ_FAIL)
|
||||
goto fail;
|
||||
|
||||
/* FIXME: Clarify what a corrupt EDID actually means. */
|
||||
if (status == EDID_BLOCK_OK || status == EDID_BLOCK_VERSION)
|
||||
connector->edid_corrupt = false;
|
||||
else
|
||||
connector->edid_corrupt = true;
|
||||
|
||||
if (!edid_block_status_valid(status, edid_block_tag(edid))) {
|
||||
if (status == EDID_BLOCK_ZERO)
|
||||
connector->null_edid_counter++;
|
||||
|
||||
connector_bad_edid(connector, edid, 1);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (!edid_extension_block_count(edid))
|
||||
goto ok;
|
||||
|
||||
new = krealloc(edid, edid_size(edid), GFP_KERNEL);
|
||||
if (!new)
|
||||
goto out;
|
||||
goto fail;
|
||||
edid = new;
|
||||
|
||||
for (j = 1; j <= edid->extensions; j++) {
|
||||
void *block = edid + j;
|
||||
int try;
|
||||
for (i = 1; i < edid_block_count(edid); i++) {
|
||||
void *block = (void *)edid_block_data(edid, i);
|
||||
|
||||
for (try = 0; try < 4; try++) {
|
||||
if (get_edid_block(data, block, j, EDID_LENGTH))
|
||||
goto out;
|
||||
if (drm_edid_block_valid(block, j, false, NULL))
|
||||
break;
|
||||
}
|
||||
status = edid_block_read(block, i, read_block, context);
|
||||
|
||||
if (try == 4)
|
||||
edid_block_status_print(status, block, i);
|
||||
|
||||
if (!edid_block_status_valid(status, edid_block_tag(block))) {
|
||||
if (status == EDID_BLOCK_READ_FAIL)
|
||||
goto fail;
|
||||
invalid_blocks++;
|
||||
}
|
||||
}
|
||||
|
||||
if (invalid_blocks) {
|
||||
connector_bad_edid(connector, (u8 *)edid, edid->extensions + 1);
|
||||
connector_bad_edid(connector, edid, edid_block_count(edid));
|
||||
|
||||
edid = edid_filter_invalid_blocks(edid, invalid_blocks);
|
||||
}
|
||||
|
||||
ok:
|
||||
return edid;
|
||||
|
||||
out:
|
||||
fail:
|
||||
kfree(edid);
|
||||
return NULL;
|
||||
}
|
||||
@ -2199,20 +2292,27 @@ static u32 edid_extract_panel_id(const struct edid *edid)
|
||||
|
||||
u32 drm_edid_get_panel_id(struct i2c_adapter *adapter)
|
||||
{
|
||||
const struct edid *edid;
|
||||
u32 panel_id;
|
||||
|
||||
edid = drm_do_get_edid_base_block(NULL, drm_do_probe_ddc_edid, adapter);
|
||||
enum edid_block_status status;
|
||||
void *base_block;
|
||||
u32 panel_id = 0;
|
||||
|
||||
/*
|
||||
* There are no manufacturer IDs of 0, so if there is a problem reading
|
||||
* the EDID then we'll just return 0.
|
||||
*/
|
||||
if (!edid)
|
||||
|
||||
base_block = kmalloc(EDID_LENGTH, GFP_KERNEL);
|
||||
if (!base_block)
|
||||
return 0;
|
||||
|
||||
panel_id = edid_extract_panel_id(edid);
|
||||
kfree(edid);
|
||||
status = edid_block_read(base_block, 0, drm_do_probe_ddc_edid, adapter);
|
||||
|
||||
edid_block_status_print(status, base_block, 0);
|
||||
|
||||
if (edid_block_status_valid(status, edid_block_tag(base_block)))
|
||||
panel_id = edid_extract_panel_id(base_block);
|
||||
|
||||
kfree(base_block);
|
||||
|
||||
return panel_id;
|
||||
}
|
||||
@ -2255,7 +2355,7 @@ EXPORT_SYMBOL(drm_get_edid_switcheroo);
|
||||
*/
|
||||
struct edid *drm_edid_duplicate(const struct edid *edid)
|
||||
{
|
||||
return kmemdup(edid, (edid->extensions + 1) * EDID_LENGTH, GFP_KERNEL);
|
||||
return kmemdup(edid, edid_size(edid), GFP_KERNEL);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_edid_duplicate);
|
||||
|
||||
@ -2439,8 +2539,8 @@ drm_for_each_detailed_block(const struct edid *edid, detailed_cb *cb, void *clos
|
||||
for (i = 0; i < EDID_DETAILED_TIMINGS; i++)
|
||||
cb(&(edid->detailed_timings[i]), closure);
|
||||
|
||||
for (i = 1; i <= edid->extensions; i++) {
|
||||
const u8 *ext = (const u8 *)edid + (i * EDID_LENGTH);
|
||||
for (i = 0; i < edid_extension_block_count(edid); i++) {
|
||||
const u8 *ext = edid_extension_block_data(edid, i);
|
||||
|
||||
switch (*ext) {
|
||||
case CEA_EXT:
|
||||
@ -3410,17 +3510,17 @@ const u8 *drm_find_edid_extension(const struct edid *edid,
|
||||
int i;
|
||||
|
||||
/* No EDID or EDID extensions */
|
||||
if (edid == NULL || edid->extensions == 0)
|
||||
if (!edid || !edid_extension_block_count(edid))
|
||||
return NULL;
|
||||
|
||||
/* Find CEA extension */
|
||||
for (i = *ext_index; i < edid->extensions; i++) {
|
||||
edid_ext = (const u8 *)edid + EDID_LENGTH * (i + 1);
|
||||
for (i = *ext_index; i < edid_extension_block_count(edid); i++) {
|
||||
edid_ext = edid_extension_block_data(edid, i);
|
||||
if (edid_block_tag(edid_ext) == ext_id)
|
||||
break;
|
||||
}
|
||||
|
||||
if (i >= edid->extensions)
|
||||
if (i >= edid_extension_block_count(edid))
|
||||
return NULL;
|
||||
|
||||
*ext_index = i + 1;
|
||||
@ -3551,9 +3651,11 @@ static u8 drm_match_cea_mode_clock_tolerance(const struct drm_display_mode *to_m
|
||||
match_flags |= DRM_MODE_MATCH_ASPECT_RATIO;
|
||||
|
||||
for (vic = 1; vic < cea_num_vics(); vic = cea_next_vic(vic)) {
|
||||
struct drm_display_mode cea_mode = *cea_mode_for_vic(vic);
|
||||
struct drm_display_mode cea_mode;
|
||||
unsigned int clock1, clock2;
|
||||
|
||||
drm_mode_init(&cea_mode, cea_mode_for_vic(vic));
|
||||
|
||||
/* Check both 60Hz and 59.94Hz */
|
||||
clock1 = cea_mode.clock;
|
||||
clock2 = cea_mode_alternate_clock(&cea_mode);
|
||||
@ -3590,9 +3692,11 @@ u8 drm_match_cea_mode(const struct drm_display_mode *to_match)
|
||||
match_flags |= DRM_MODE_MATCH_ASPECT_RATIO;
|
||||
|
||||
for (vic = 1; vic < cea_num_vics(); vic = cea_next_vic(vic)) {
|
||||
struct drm_display_mode cea_mode = *cea_mode_for_vic(vic);
|
||||
struct drm_display_mode cea_mode;
|
||||
unsigned int clock1, clock2;
|
||||
|
||||
drm_mode_init(&cea_mode, cea_mode_for_vic(vic));
|
||||
|
||||
/* Check both 60Hz and 59.94Hz */
|
||||
clock1 = cea_mode.clock;
|
||||
clock2 = cea_mode_alternate_clock(&cea_mode);
|
||||
|
@ -771,7 +771,8 @@ long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = dma_resv_wait_timeout(obj->resv, wait_all, true, timeout);
|
||||
ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all),
|
||||
true, timeout);
|
||||
if (ret == 0)
|
||||
ret = -ETIME;
|
||||
else if (ret > 0)
|
||||
|
@ -151,7 +151,7 @@ int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_st
|
||||
return 0;
|
||||
|
||||
obj = drm_gem_fb_get_obj(state->fb, 0);
|
||||
ret = dma_resv_get_singleton(obj->resv, false, &fence);
|
||||
ret = dma_resv_get_singleton(obj->resv, DMA_RESV_USAGE_WRITE, &fence);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -837,7 +837,9 @@ EXPORT_SYMBOL(drm_mode_vrefresh);
|
||||
void drm_mode_get_hv_timing(const struct drm_display_mode *mode,
|
||||
int *hdisplay, int *vdisplay)
|
||||
{
|
||||
struct drm_display_mode adjusted = *mode;
|
||||
struct drm_display_mode adjusted;
|
||||
|
||||
drm_mode_init(&adjusted, mode);
|
||||
|
||||
drm_mode_set_crtcinfo(&adjusted, CRTC_STEREO_DOUBLE_ONLY);
|
||||
*hdisplay = adjusted.crtc_hdisplay;
|
||||
|
@ -644,7 +644,7 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc,
|
||||
|
||||
vblank->linedur_ns = linedur_ns;
|
||||
vblank->framedur_ns = framedur_ns;
|
||||
vblank->hwmode = *mode;
|
||||
drm_mode_copy(&vblank->hwmode, mode);
|
||||
|
||||
drm_dbg_core(dev,
|
||||
"crtc %u: hwmode: htotal %d, vtotal %d, vdisplay %d\n",
|
||||
|
@ -380,12 +380,14 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
|
||||
}
|
||||
|
||||
if (op & ETNA_PREP_NOSYNC) {
|
||||
if (!dma_resv_test_signaled(obj->resv, write))
|
||||
if (!dma_resv_test_signaled(obj->resv,
|
||||
dma_resv_usage_rw(write)))
|
||||
return -EBUSY;
|
||||
} else {
|
||||
unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
|
||||
|
||||
ret = dma_resv_wait_timeout(obj->resv, write, true, remain);
|
||||
ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
|
||||
true, remain);
|
||||
if (ret <= 0)
|
||||
return ret == 0 ? -ETIMEDOUT : ret;
|
||||
}
|
||||
|
@ -202,14 +202,10 @@ static void submit_attach_object_fences(struct etnaviv_gem_submit *submit)
|
||||
|
||||
for (i = 0; i < submit->nr_bos; i++) {
|
||||
struct drm_gem_object *obj = &submit->bos[i].obj->base;
|
||||
bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
|
||||
|
||||
if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
|
||||
dma_resv_add_excl_fence(obj->resv,
|
||||
submit->out_fence);
|
||||
else
|
||||
dma_resv_add_shared_fence(obj->resv,
|
||||
submit->out_fence);
|
||||
|
||||
dma_resv_add_fence(obj->resv, submit->out_fence, write ?
|
||||
DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
|
||||
submit_unlock_object(submit, i);
|
||||
}
|
||||
}
|
||||
|
@ -396,9 +396,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
|
||||
drm_for_each_connector_iter(connector, &conn_iter) {
|
||||
gma_encoder = gma_attached_encoder(connector);
|
||||
|
||||
switch (gma_encoder->type) {
|
||||
case INTEL_OUTPUT_LVDS:
|
||||
case INTEL_OUTPUT_MIPI:
|
||||
if (gma_encoder->type == INTEL_OUTPUT_LVDS ||
|
||||
gma_encoder->type == INTEL_OUTPUT_MIPI) {
|
||||
ret = gma_backlight_init(dev);
|
||||
break;
|
||||
}
|
||||
|
@ -103,40 +103,30 @@ config DRM_I915_USERPTR
|
||||
If in doubt, say "Y".
|
||||
|
||||
config DRM_I915_GVT
|
||||
bool "Enable Intel GVT-g graphics virtualization host support"
|
||||
bool
|
||||
|
||||
config DRM_I915_GVT_KVMGT
|
||||
tristate "Enable KVM host support Intel GVT-g graphics virtualization"
|
||||
depends on DRM_I915
|
||||
depends on X86
|
||||
depends on 64BIT
|
||||
default n
|
||||
depends on KVM
|
||||
depends on VFIO_MDEV
|
||||
select DRM_I915_GVT
|
||||
select KVM_EXTERNAL_WRITE_TRACKING
|
||||
|
||||
help
|
||||
Choose this option if you want to enable Intel GVT-g graphics
|
||||
virtualization technology host support with integrated graphics.
|
||||
With GVT-g, it's possible to have one integrated graphics
|
||||
device shared by multiple VMs under different hypervisors.
|
||||
device shared by multiple VMs under KVM.
|
||||
|
||||
Note that at least one hypervisor like Xen or KVM is required for
|
||||
this driver to work, and it only supports newer device from
|
||||
Broadwell+. For further information and setup guide, you can
|
||||
visit: http://01.org/igvt-g.
|
||||
|
||||
Now it's just a stub to support the modifications of i915 for
|
||||
GVT device model. It requires at least one MPT modules for Xen/KVM
|
||||
and other components of GVT device model to work. Use it under
|
||||
you own risk.
|
||||
Note that this driver only supports newer device from Broadwell on.
|
||||
For further information and setup guide, you can visit:
|
||||
http://01.org/igvt-g.
|
||||
|
||||
If in doubt, say "N".
|
||||
|
||||
config DRM_I915_GVT_KVMGT
|
||||
tristate "Enable KVM/VFIO support for Intel GVT-g"
|
||||
depends on DRM_I915_GVT
|
||||
depends on KVM
|
||||
depends on VFIO_MDEV
|
||||
select KVM_EXTERNAL_WRITE_TRACKING
|
||||
default n
|
||||
help
|
||||
Choose this option if you want to enable KVMGT support for
|
||||
Intel GVT-g.
|
||||
|
||||
config DRM_I915_PXP
|
||||
bool "Enable Intel PXP support"
|
||||
depends on DRM_I915
|
||||
|
@ -223,6 +223,7 @@ i915-y += \
|
||||
display/intel_cursor.o \
|
||||
display/intel_display.o \
|
||||
display/intel_display_power.o \
|
||||
display/intel_display_power_map.o \
|
||||
display/intel_display_power_well.o \
|
||||
display/intel_dmc.o \
|
||||
display/intel_dpio_phy.o \
|
||||
@ -331,13 +332,13 @@ i915-$(CONFIG_DRM_I915_SELFTEST) += \
|
||||
# virtual gpu code
|
||||
i915-y += i915_vgpu.o
|
||||
|
||||
ifeq ($(CONFIG_DRM_I915_GVT),y)
|
||||
i915-y += intel_gvt.o
|
||||
i915-$(CONFIG_DRM_I915_GVT) += \
|
||||
intel_gvt.o \
|
||||
intel_gvt_mmio_table.o
|
||||
include $(src)/gvt/Makefile
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_DRM_I915) += i915.o
|
||||
obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o
|
||||
obj-$(CONFIG_DRM_I915_GVT_KVMGT) += kvmgt.o
|
||||
|
||||
# header test
|
||||
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include "intel_connector.h"
|
||||
#include "intel_crtc.h"
|
||||
#include "intel_de.h"
|
||||
#include "intel_display_power.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dp.h"
|
||||
#include "intel_dp_link_training.h"
|
||||
@ -1375,7 +1376,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
|
||||
dig_port->max_lanes = 4;
|
||||
|
||||
intel_encoder->type = INTEL_OUTPUT_DP;
|
||||
intel_encoder->power_domain = intel_port_to_power_domain(port);
|
||||
intel_encoder->power_domain = intel_display_power_ddi_lanes_domain(dev_priv, port);
|
||||
if (IS_CHERRYVIEW(dev_priv)) {
|
||||
if (port == PORT_D)
|
||||
intel_encoder->pipe_mask = BIT(PIPE_C);
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include "intel_connector.h"
|
||||
#include "intel_crtc.h"
|
||||
#include "intel_de.h"
|
||||
#include "intel_display_power.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dpio_phy.h"
|
||||
#include "intel_fifo_underrun.h"
|
||||
@ -574,7 +575,7 @@ void g4x_hdmi_init(struct drm_i915_private *dev_priv,
|
||||
intel_encoder->shutdown = intel_hdmi_encoder_shutdown;
|
||||
|
||||
intel_encoder->type = INTEL_OUTPUT_HDMI;
|
||||
intel_encoder->power_domain = intel_port_to_power_domain(port);
|
||||
intel_encoder->power_domain = intel_display_power_ddi_lanes_domain(dev_priv, port);
|
||||
intel_encoder->port = port;
|
||||
if (IS_CHERRYVIEW(dev_priv)) {
|
||||
if (port == PORT_D)
|
||||
|
@ -28,7 +28,7 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
|
||||
|
||||
if (IS_BROADWELL(i915)) {
|
||||
drm_WARN_ON(&i915->drm,
|
||||
snb_pcode_write(i915, DISPLAY_IPS_CONTROL,
|
||||
snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL,
|
||||
IPS_ENABLE | IPS_PCODE_CONTROL));
|
||||
/*
|
||||
* Quoting Art Runyan: "its not safe to expect any particular
|
||||
@ -62,7 +62,7 @@ bool hsw_ips_disable(const struct intel_crtc_state *crtc_state)
|
||||
|
||||
if (IS_BROADWELL(i915)) {
|
||||
drm_WARN_ON(&i915->drm,
|
||||
snb_pcode_write(i915, DISPLAY_IPS_CONTROL, 0));
|
||||
snb_pcode_write(&i915->uncore, DISPLAY_IPS_CONTROL, 0));
|
||||
/*
|
||||
* Wait for PCODE to finish disabling IPS. The BSpec specified
|
||||
* 42ms timeout value leads to occasional timeouts so use 100ms
|
||||
|
@ -399,8 +399,8 @@ static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv,
|
||||
intel_dsi->io_wakeref[port] =
|
||||
intel_display_power_get(dev_priv,
|
||||
port == PORT_A ?
|
||||
POWER_DOMAIN_PORT_DDI_A_IO :
|
||||
POWER_DOMAIN_PORT_DDI_B_IO);
|
||||
POWER_DOMAIN_PORT_DDI_IO_A :
|
||||
POWER_DOMAIN_PORT_DDI_IO_B);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1425,8 +1425,8 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
|
||||
wakeref = fetch_and_zero(&intel_dsi->io_wakeref[port]);
|
||||
intel_display_power_put(dev_priv,
|
||||
port == PORT_A ?
|
||||
POWER_DOMAIN_PORT_DDI_A_IO :
|
||||
POWER_DOMAIN_PORT_DDI_B_IO,
|
||||
POWER_DOMAIN_PORT_DDI_IO_A :
|
||||
POWER_DOMAIN_PORT_DDI_IO_B,
|
||||
wakeref);
|
||||
}
|
||||
|
||||
|
@ -1047,7 +1047,8 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
|
||||
if (ret < 0)
|
||||
goto unpin_fb;
|
||||
|
||||
dma_resv_iter_begin(&cursor, obj->base.resv, false);
|
||||
dma_resv_iter_begin(&cursor, obj->base.resv,
|
||||
DMA_RESV_USAGE_WRITE);
|
||||
dma_resv_for_each_fence_unlocked(&cursor, fence) {
|
||||
add_rps_boost_after_vblank(new_plane_state->hw.crtc,
|
||||
fence);
|
||||
|
@ -827,7 +827,7 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
|
||||
drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Enable audio codec on pipe %c, %u bytes ELD\n",
|
||||
connector->base.id, connector->name,
|
||||
encoder->base.base.id, encoder->base.name,
|
||||
pipe, drm_eld_size(connector->eld));
|
||||
pipe_name(pipe), drm_eld_size(connector->eld));
|
||||
|
||||
/* FIXME precompute the ELD in .compute_config() */
|
||||
if (!connector->eld[0])
|
||||
@ -888,7 +888,7 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
|
||||
|
||||
drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s][ENCODER:%d:%s] Disable audio codec on pipe %c\n",
|
||||
connector->base.id, connector->name,
|
||||
encoder->base.base.id, encoder->base.name, pipe);
|
||||
encoder->base.base.id, encoder->base.name, pipe_name(pipe));
|
||||
|
||||
if (dev_priv->audio.funcs)
|
||||
dev_priv->audio.funcs->audio_codec_disable(encoder,
|
||||
|
@ -185,10 +185,14 @@ static const struct {
|
||||
.min_size = sizeof(struct bdb_edp), },
|
||||
{ .section_id = BDB_LVDS_OPTIONS,
|
||||
.min_size = sizeof(struct bdb_lvds_options), },
|
||||
/*
|
||||
* BDB_LVDS_LFP_DATA depends on BDB_LVDS_LFP_DATA_PTRS,
|
||||
* so keep the two ordered.
|
||||
*/
|
||||
{ .section_id = BDB_LVDS_LFP_DATA_PTRS,
|
||||
.min_size = sizeof(struct bdb_lvds_lfp_data_ptrs), },
|
||||
{ .section_id = BDB_LVDS_LFP_DATA,
|
||||
.min_size = sizeof(struct bdb_lvds_lfp_data), },
|
||||
.min_size = 0, /* special case */ },
|
||||
{ .section_id = BDB_LVDS_BACKLIGHT,
|
||||
.min_size = sizeof(struct bdb_lfp_backlight_data), },
|
||||
{ .section_id = BDB_LFP_POWER,
|
||||
@ -203,6 +207,23 @@ static const struct {
|
||||
.min_size = sizeof(struct bdb_generic_dtd), },
|
||||
};
|
||||
|
||||
static size_t lfp_data_min_size(struct drm_i915_private *i915)
|
||||
{
|
||||
const struct bdb_lvds_lfp_data_ptrs *ptrs;
|
||||
size_t size;
|
||||
|
||||
ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
|
||||
if (!ptrs)
|
||||
return 0;
|
||||
|
||||
size = sizeof(struct bdb_lvds_lfp_data);
|
||||
if (ptrs->panel_name.table_size)
|
||||
size = max(size, ptrs->panel_name.offset +
|
||||
sizeof(struct bdb_lvds_lfp_data_tail));
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
static bool validate_lfp_data_ptrs(const void *bdb,
|
||||
const struct bdb_lvds_lfp_data_ptrs *ptrs)
|
||||
{
|
||||
@ -310,16 +331,144 @@ static bool fixup_lfp_data_ptrs(const void *bdb, void *ptrs_block)
|
||||
return validate_lfp_data_ptrs(bdb, ptrs);
|
||||
}
|
||||
|
||||
static const void *find_fp_timing_terminator(const u8 *data, int size)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < size - 1; i++) {
|
||||
if (data[i] == 0xff && data[i+1] == 0xff)
|
||||
return &data[i];
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int make_lfp_data_ptr(struct lvds_lfp_data_ptr_table *table,
|
||||
int table_size, int total_size)
|
||||
{
|
||||
if (total_size < table_size)
|
||||
return total_size;
|
||||
|
||||
table->table_size = table_size;
|
||||
table->offset = total_size - table_size;
|
||||
|
||||
return total_size - table_size;
|
||||
}
|
||||
|
||||
static void next_lfp_data_ptr(struct lvds_lfp_data_ptr_table *next,
|
||||
const struct lvds_lfp_data_ptr_table *prev,
|
||||
int size)
|
||||
{
|
||||
next->table_size = prev->table_size;
|
||||
next->offset = prev->offset + size;
|
||||
}
|
||||
|
||||
static void *generate_lfp_data_ptrs(struct drm_i915_private *i915,
|
||||
const void *bdb)
|
||||
{
|
||||
int i, size, table_size, block_size, offset;
|
||||
const void *t0, *t1, *block;
|
||||
struct bdb_lvds_lfp_data_ptrs *ptrs;
|
||||
void *ptrs_block;
|
||||
|
||||
block = find_raw_section(bdb, BDB_LVDS_LFP_DATA);
|
||||
if (!block)
|
||||
return NULL;
|
||||
|
||||
drm_dbg_kms(&i915->drm, "Generating LFP data table pointers\n");
|
||||
|
||||
block_size = get_blocksize(block);
|
||||
|
||||
size = block_size;
|
||||
t0 = find_fp_timing_terminator(block, size);
|
||||
if (!t0)
|
||||
return NULL;
|
||||
|
||||
size -= t0 - block - 2;
|
||||
t1 = find_fp_timing_terminator(t0 + 2, size);
|
||||
if (!t1)
|
||||
return NULL;
|
||||
|
||||
size = t1 - t0;
|
||||
if (size * 16 > block_size)
|
||||
return NULL;
|
||||
|
||||
ptrs_block = kzalloc(sizeof(*ptrs) + 3, GFP_KERNEL);
|
||||
if (!ptrs_block)
|
||||
return NULL;
|
||||
|
||||
*(u8 *)(ptrs_block + 0) = BDB_LVDS_LFP_DATA_PTRS;
|
||||
*(u16 *)(ptrs_block + 1) = sizeof(*ptrs);
|
||||
ptrs = ptrs_block + 3;
|
||||
|
||||
table_size = sizeof(struct lvds_pnp_id);
|
||||
size = make_lfp_data_ptr(&ptrs->ptr[0].panel_pnp_id, table_size, size);
|
||||
|
||||
table_size = sizeof(struct lvds_dvo_timing);
|
||||
size = make_lfp_data_ptr(&ptrs->ptr[0].dvo_timing, table_size, size);
|
||||
|
||||
table_size = t0 - block + 2;
|
||||
size = make_lfp_data_ptr(&ptrs->ptr[0].fp_timing, table_size, size);
|
||||
|
||||
if (ptrs->ptr[0].fp_timing.table_size)
|
||||
ptrs->lvds_entries++;
|
||||
if (ptrs->ptr[0].dvo_timing.table_size)
|
||||
ptrs->lvds_entries++;
|
||||
if (ptrs->ptr[0].panel_pnp_id.table_size)
|
||||
ptrs->lvds_entries++;
|
||||
|
||||
if (size != 0 || ptrs->lvds_entries != 3) {
|
||||
kfree(ptrs);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
size = t1 - t0;
|
||||
for (i = 1; i < 16; i++) {
|
||||
next_lfp_data_ptr(&ptrs->ptr[i].fp_timing, &ptrs->ptr[i-1].fp_timing, size);
|
||||
next_lfp_data_ptr(&ptrs->ptr[i].dvo_timing, &ptrs->ptr[i-1].dvo_timing, size);
|
||||
next_lfp_data_ptr(&ptrs->ptr[i].panel_pnp_id, &ptrs->ptr[i-1].panel_pnp_id, size);
|
||||
}
|
||||
|
||||
size = t1 - t0;
|
||||
table_size = sizeof(struct lvds_lfp_panel_name);
|
||||
|
||||
if (16 * (size + table_size) <= block_size) {
|
||||
ptrs->panel_name.table_size = table_size;
|
||||
ptrs->panel_name.offset = size * 16;
|
||||
}
|
||||
|
||||
offset = block - bdb;
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
ptrs->ptr[i].fp_timing.offset += offset;
|
||||
ptrs->ptr[i].dvo_timing.offset += offset;
|
||||
ptrs->ptr[i].panel_pnp_id.offset += offset;
|
||||
}
|
||||
|
||||
if (ptrs->panel_name.table_size)
|
||||
ptrs->panel_name.offset += offset;
|
||||
|
||||
return ptrs_block;
|
||||
}
|
||||
|
||||
static void
|
||||
init_bdb_block(struct drm_i915_private *i915,
|
||||
const void *bdb, enum bdb_block_id section_id,
|
||||
size_t min_size)
|
||||
{
|
||||
struct bdb_block_entry *entry;
|
||||
void *temp_block = NULL;
|
||||
const void *block;
|
||||
size_t block_size;
|
||||
|
||||
block = find_raw_section(bdb, section_id);
|
||||
|
||||
/* Modern VBTs lack the LFP data table pointers block, make one up */
|
||||
if (!block && section_id == BDB_LVDS_LFP_DATA_PTRS) {
|
||||
temp_block = generate_lfp_data_ptrs(i915, bdb);
|
||||
if (temp_block)
|
||||
block = temp_block + 3;
|
||||
}
|
||||
if (!block)
|
||||
return;
|
||||
|
||||
@ -330,12 +479,16 @@ init_bdb_block(struct drm_i915_private *i915,
|
||||
|
||||
entry = kzalloc(struct_size(entry, data, max(min_size, block_size) + 3),
|
||||
GFP_KERNEL);
|
||||
if (!entry)
|
||||
if (!entry) {
|
||||
kfree(temp_block);
|
||||
return;
|
||||
}
|
||||
|
||||
entry->section_id = section_id;
|
||||
memcpy(entry->data, block - 3, block_size + 3);
|
||||
|
||||
kfree(temp_block);
|
||||
|
||||
drm_dbg_kms(&i915->drm, "Found BDB block %d (size %zu, min size %zu)\n",
|
||||
section_id, block_size, min_size);
|
||||
|
||||
@ -358,6 +511,9 @@ static void init_bdb_blocks(struct drm_i915_private *i915,
|
||||
enum bdb_block_id section_id = bdb_blocks[i].section_id;
|
||||
size_t min_size = bdb_blocks[i].min_size;
|
||||
|
||||
if (section_id == BDB_LVDS_LFP_DATA)
|
||||
min_size = lfp_data_min_size(i915);
|
||||
|
||||
init_bdb_block(i915, bdb, section_id, min_size);
|
||||
}
|
||||
}
|
||||
@ -428,6 +584,94 @@ get_lvds_fp_timing(const struct bdb_lvds_lfp_data *data,
|
||||
return (const void *)data + ptrs->ptr[index].fp_timing.offset;
|
||||
}
|
||||
|
||||
static const struct bdb_lvds_lfp_data_tail *
|
||||
get_lfp_data_tail(const struct bdb_lvds_lfp_data *data,
|
||||
const struct bdb_lvds_lfp_data_ptrs *ptrs)
|
||||
{
|
||||
if (ptrs->panel_name.table_size)
|
||||
return (const void *)data + ptrs->panel_name.offset;
|
||||
else
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int opregion_get_panel_type(struct drm_i915_private *i915)
|
||||
{
|
||||
return intel_opregion_get_panel_type(i915);
|
||||
}
|
||||
|
||||
static int vbt_get_panel_type(struct drm_i915_private *i915)
|
||||
{
|
||||
const struct bdb_lvds_options *lvds_options;
|
||||
|
||||
lvds_options = find_section(i915, BDB_LVDS_OPTIONS);
|
||||
if (!lvds_options)
|
||||
return -1;
|
||||
|
||||
if (lvds_options->panel_type > 0xf) {
|
||||
drm_dbg_kms(&i915->drm, "Invalid VBT panel type 0x%x\n",
|
||||
lvds_options->panel_type);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return lvds_options->panel_type;
|
||||
}
|
||||
|
||||
static int fallback_get_panel_type(struct drm_i915_private *i915)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
enum panel_type {
|
||||
PANEL_TYPE_OPREGION,
|
||||
PANEL_TYPE_VBT,
|
||||
PANEL_TYPE_FALLBACK,
|
||||
};
|
||||
|
||||
static int get_panel_type(struct drm_i915_private *i915)
|
||||
{
|
||||
struct {
|
||||
const char *name;
|
||||
int (*get_panel_type)(struct drm_i915_private *i915);
|
||||
int panel_type;
|
||||
} panel_types[] = {
|
||||
[PANEL_TYPE_OPREGION] = {
|
||||
.name = "OpRegion",
|
||||
.get_panel_type = opregion_get_panel_type,
|
||||
},
|
||||
[PANEL_TYPE_VBT] = {
|
||||
.name = "VBT",
|
||||
.get_panel_type = vbt_get_panel_type,
|
||||
},
|
||||
[PANEL_TYPE_FALLBACK] = {
|
||||
.name = "fallback",
|
||||
.get_panel_type = fallback_get_panel_type,
|
||||
},
|
||||
};
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(panel_types); i++) {
|
||||
panel_types[i].panel_type = panel_types[i].get_panel_type(i915);
|
||||
|
||||
drm_WARN_ON(&i915->drm, panel_types[i].panel_type > 0xf);
|
||||
|
||||
if (panel_types[i].panel_type >= 0)
|
||||
drm_dbg_kms(&i915->drm, "Panel type (%s): %d\n",
|
||||
panel_types[i].name, panel_types[i].panel_type);
|
||||
}
|
||||
|
||||
if (panel_types[PANEL_TYPE_OPREGION].panel_type >= 0)
|
||||
i = PANEL_TYPE_OPREGION;
|
||||
else if (panel_types[PANEL_TYPE_VBT].panel_type >= 0)
|
||||
i = PANEL_TYPE_VBT;
|
||||
else
|
||||
i = PANEL_TYPE_FALLBACK;
|
||||
|
||||
drm_dbg_kms(&i915->drm, "Selected panel type (%s): %d\n",
|
||||
panel_types[i].name, panel_types[i].panel_type);
|
||||
|
||||
return panel_types[i].panel_type;
|
||||
}
|
||||
|
||||
/* Parse general panel options */
|
||||
static void
|
||||
parse_panel_options(struct drm_i915_private *i915)
|
||||
@ -435,7 +679,6 @@ parse_panel_options(struct drm_i915_private *i915)
|
||||
const struct bdb_lvds_options *lvds_options;
|
||||
int panel_type;
|
||||
int drrs_mode;
|
||||
int ret;
|
||||
|
||||
lvds_options = find_section(i915, BDB_LVDS_OPTIONS);
|
||||
if (!lvds_options)
|
||||
@ -443,23 +686,7 @@ parse_panel_options(struct drm_i915_private *i915)
|
||||
|
||||
i915->vbt.lvds_dither = lvds_options->pixel_dither;
|
||||
|
||||
ret = intel_opregion_get_panel_type(i915);
|
||||
if (ret >= 0) {
|
||||
drm_WARN_ON(&i915->drm, ret > 0xf);
|
||||
panel_type = ret;
|
||||
drm_dbg_kms(&i915->drm, "Panel type: %d (OpRegion)\n",
|
||||
panel_type);
|
||||
} else {
|
||||
if (lvds_options->panel_type > 0xf) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Invalid VBT panel type 0x%x\n",
|
||||
lvds_options->panel_type);
|
||||
return;
|
||||
}
|
||||
panel_type = lvds_options->panel_type;
|
||||
drm_dbg_kms(&i915->drm, "Panel type: %d (VBT)\n",
|
||||
panel_type);
|
||||
}
|
||||
panel_type = get_panel_type(i915);
|
||||
|
||||
i915->vbt.panel_type = panel_type;
|
||||
|
||||
@ -488,25 +715,16 @@ parse_panel_options(struct drm_i915_private *i915)
|
||||
}
|
||||
}
|
||||
|
||||
/* Try to find integrated panel timing data */
|
||||
static void
|
||||
parse_lfp_panel_dtd(struct drm_i915_private *i915)
|
||||
parse_lfp_panel_dtd(struct drm_i915_private *i915,
|
||||
const struct bdb_lvds_lfp_data *lvds_lfp_data,
|
||||
const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs)
|
||||
{
|
||||
const struct bdb_lvds_lfp_data *lvds_lfp_data;
|
||||
const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs;
|
||||
const struct lvds_dvo_timing *panel_dvo_timing;
|
||||
const struct lvds_fp_timing *fp_timing;
|
||||
struct drm_display_mode *panel_fixed_mode;
|
||||
int panel_type = i915->vbt.panel_type;
|
||||
|
||||
lvds_lfp_data = find_section(i915, BDB_LVDS_LFP_DATA);
|
||||
if (!lvds_lfp_data)
|
||||
return;
|
||||
|
||||
lvds_lfp_data_ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
|
||||
if (!lvds_lfp_data_ptrs)
|
||||
return;
|
||||
|
||||
panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
|
||||
lvds_lfp_data_ptrs,
|
||||
panel_type);
|
||||
@ -537,6 +755,38 @@ parse_lfp_panel_dtd(struct drm_i915_private *i915)
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
parse_lfp_data(struct drm_i915_private *i915)
|
||||
{
|
||||
const struct bdb_lvds_lfp_data *data;
|
||||
const struct bdb_lvds_lfp_data_tail *tail;
|
||||
const struct bdb_lvds_lfp_data_ptrs *ptrs;
|
||||
int panel_type = i915->vbt.panel_type;
|
||||
|
||||
ptrs = find_section(i915, BDB_LVDS_LFP_DATA_PTRS);
|
||||
if (!ptrs)
|
||||
return;
|
||||
|
||||
data = find_section(i915, BDB_LVDS_LFP_DATA);
|
||||
if (!data)
|
||||
return;
|
||||
|
||||
if (!i915->vbt.lfp_lvds_vbt_mode)
|
||||
parse_lfp_panel_dtd(i915, data, ptrs);
|
||||
|
||||
tail = get_lfp_data_tail(data, ptrs);
|
||||
if (!tail)
|
||||
return;
|
||||
|
||||
if (i915->vbt.version >= 188) {
|
||||
i915->vbt.seamless_drrs_min_refresh_rate =
|
||||
tail->seamless_drrs_min_refresh_rate[panel_type];
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Seamless DRRS min refresh rate: %d Hz\n",
|
||||
i915->vbt.seamless_drrs_min_refresh_rate);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
parse_generic_dtd(struct drm_i915_private *i915)
|
||||
{
|
||||
@ -545,6 +795,17 @@ parse_generic_dtd(struct drm_i915_private *i915)
|
||||
struct drm_display_mode *panel_fixed_mode;
|
||||
int num_dtd;
|
||||
|
||||
/*
|
||||
* Older VBTs provided DTD information for internal displays through
|
||||
* the "LFP panel tables" block (42). As of VBT revision 229 the
|
||||
* DTD information should be provided via a newer "generic DTD"
|
||||
* block (58). Just to be safe, we'll try the new generic DTD block
|
||||
* first on VBT >= 229, but still fall back to trying the old LFP
|
||||
* block if that fails.
|
||||
*/
|
||||
if (i915->vbt.version < 229)
|
||||
return;
|
||||
|
||||
generic_dtd = find_section(i915, BDB_GENERIC_DTD);
|
||||
if (!generic_dtd)
|
||||
return;
|
||||
@ -615,23 +876,6 @@ parse_generic_dtd(struct drm_i915_private *i915)
|
||||
i915->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
|
||||
}
|
||||
|
||||
static void
|
||||
parse_panel_dtd(struct drm_i915_private *i915)
|
||||
{
|
||||
/*
|
||||
* Older VBTs provided provided DTD information for internal displays
|
||||
* through the "LFP panel DTD" block (42). As of VBT revision 229,
|
||||
* that block is now deprecated and DTD information should be provided
|
||||
* via a newer "generic DTD" block (58). Just to be safe, we'll
|
||||
* try the new generic DTD block first on VBT >= 229, but still fall
|
||||
* back to trying the old LFP block if that fails.
|
||||
*/
|
||||
if (i915->vbt.version >= 229)
|
||||
parse_generic_dtd(i915);
|
||||
if (!i915->vbt.lfp_lvds_vbt_mode)
|
||||
parse_lfp_panel_dtd(i915);
|
||||
}
|
||||
|
||||
static void
|
||||
parse_lfp_backlight(struct drm_i915_private *i915)
|
||||
{
|
||||
@ -2708,7 +2952,8 @@ void intel_bios_init(struct drm_i915_private *i915)
|
||||
parse_general_features(i915);
|
||||
parse_general_definitions(i915);
|
||||
parse_panel_options(i915);
|
||||
parse_panel_dtd(i915);
|
||||
parse_generic_dtd(i915);
|
||||
parse_lfp_data(i915);
|
||||
parse_lfp_backlight(i915);
|
||||
parse_sdvo_panel_data(i915);
|
||||
parse_driver_features(i915);
|
||||
|
@ -78,7 +78,7 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv,
|
||||
u16 dclk;
|
||||
int ret;
|
||||
|
||||
ret = snb_pcode_read(dev_priv, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
|
||||
ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
|
||||
ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
|
||||
&val, &val2);
|
||||
if (ret)
|
||||
@ -104,7 +104,7 @@ static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv,
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
ret = snb_pcode_read(dev_priv, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
|
||||
ret = snb_pcode_read(&dev_priv->uncore, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
|
||||
ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -123,7 +123,7 @@ int icl_pcode_restrict_qgv_points(struct drm_i915_private *dev_priv,
|
||||
int ret;
|
||||
|
||||
/* bspec says to keep retrying for at least 1 ms */
|
||||
ret = skl_pcode_request(dev_priv, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
|
||||
ret = skl_pcode_request(&dev_priv->uncore, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
|
||||
points_mask,
|
||||
ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK,
|
||||
ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE,
|
||||
|
@ -800,7 +800,7 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
"trying to change cdclk frequency with cdclk not enabled\n"))
|
||||
return;
|
||||
|
||||
ret = snb_pcode_write(dev_priv, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
|
||||
ret = snb_pcode_write(&dev_priv->uncore, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
|
||||
if (ret) {
|
||||
drm_err(&dev_priv->drm,
|
||||
"failed to inform pcode about cdclk change\n");
|
||||
@ -828,7 +828,7 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
|
||||
drm_err(&dev_priv->drm, "Switching back to LCPLL failed\n");
|
||||
|
||||
snb_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
|
||||
snb_pcode_write(&dev_priv->uncore, HSW_PCODE_DE_WRITE_FREQ_REQ,
|
||||
cdclk_config->voltage_level);
|
||||
|
||||
intel_de_write(dev_priv, CDCLK_FREQ,
|
||||
@ -1086,7 +1086,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
drm_WARN_ON_ONCE(&dev_priv->drm,
|
||||
IS_SKYLAKE(dev_priv) && vco == 8640000);
|
||||
|
||||
ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
|
||||
ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
|
||||
SKL_CDCLK_PREPARE_FOR_CHANGE,
|
||||
SKL_CDCLK_READY_FOR_CHANGE,
|
||||
SKL_CDCLK_READY_FOR_CHANGE, 3);
|
||||
@ -1132,7 +1132,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
intel_de_posting_read(dev_priv, CDCLK_CTL);
|
||||
|
||||
/* inform PCU of the change */
|
||||
snb_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
|
||||
snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
|
||||
cdclk_config->voltage_level);
|
||||
|
||||
intel_update_cdclk(dev_priv);
|
||||
@ -1702,7 +1702,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
|
||||
/* Inform power controller of upcoming frequency change. */
|
||||
if (DISPLAY_VER(dev_priv) >= 11)
|
||||
ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
|
||||
ret = skl_pcode_request(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
|
||||
SKL_CDCLK_PREPARE_FOR_CHANGE,
|
||||
SKL_CDCLK_READY_FOR_CHANGE,
|
||||
SKL_CDCLK_READY_FOR_CHANGE, 3);
|
||||
@ -1711,7 +1711,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
* BSpec requires us to wait up to 150usec, but that leads to
|
||||
* timeouts; the 2ms used here is based on experiment.
|
||||
*/
|
||||
ret = snb_pcode_write_timeout(dev_priv,
|
||||
ret = snb_pcode_write_timeout(&dev_priv->uncore,
|
||||
HSW_PCODE_DE_WRITE_FREQ_REQ,
|
||||
0x80000000, 150, 2);
|
||||
if (ret) {
|
||||
@ -1774,7 +1774,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe));
|
||||
|
||||
if (DISPLAY_VER(dev_priv) >= 11) {
|
||||
ret = snb_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
|
||||
ret = snb_pcode_write(&dev_priv->uncore, SKL_PCODE_CDCLK_CONTROL,
|
||||
cdclk_config->voltage_level);
|
||||
} else {
|
||||
/*
|
||||
@ -1783,7 +1783,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
|
||||
* FIXME: Waiting for the request completion could be delayed
|
||||
* until the next PCODE request based on BSpec.
|
||||
*/
|
||||
ret = snb_pcode_write_timeout(dev_priv,
|
||||
ret = snb_pcode_write_timeout(&dev_priv->uncore,
|
||||
HSW_PCODE_DE_WRITE_FREQ_REQ,
|
||||
cdclk_config->voltage_level,
|
||||
150, 2);
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include "intel_ddi.h"
|
||||
#include "intel_ddi_buf_trans.h"
|
||||
#include "intel_de.h"
|
||||
#include "intel_display_power.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dp.h"
|
||||
#include "intel_dp_link_training.h"
|
||||
@ -4364,7 +4365,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
|
||||
encoder->get_power_domains = intel_ddi_get_power_domains;
|
||||
|
||||
encoder->type = INTEL_OUTPUT_DDI;
|
||||
encoder->power_domain = intel_port_to_power_domain(port);
|
||||
encoder->power_domain = intel_display_power_ddi_lanes_domain(dev_priv, port);
|
||||
encoder->port = port;
|
||||
encoder->cloneable = 0;
|
||||
encoder->pipe_mask = ~0;
|
||||
@ -4492,8 +4493,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
|
||||
}
|
||||
|
||||
drm_WARN_ON(&dev_priv->drm, port > PORT_I);
|
||||
dig_port->ddi_io_power_domain = POWER_DOMAIN_PORT_DDI_A_IO +
|
||||
port - PORT_A;
|
||||
dig_port->ddi_io_power_domain = intel_display_power_ddi_io_domain(dev_priv, port);
|
||||
|
||||
if (init_dp) {
|
||||
if (!intel_ddi_init_dp_connector(dig_port))
|
||||
|
@ -1673,7 +1673,9 @@ void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
|
||||
encoder->get_buf_trans = skl_get_buf_trans;
|
||||
} else if (IS_BROADWELL(i915)) {
|
||||
encoder->get_buf_trans = bdw_get_buf_trans;
|
||||
} else {
|
||||
} else if (IS_HASWELL(i915)) {
|
||||
encoder->get_buf_trans = hsw_get_buf_trans;
|
||||
} else {
|
||||
MISSING_CASE(INTEL_INFO(i915)->platform);
|
||||
}
|
||||
}
|
||||
|
@ -51,6 +51,7 @@
|
||||
#include "display/intel_crt.h"
|
||||
#include "display/intel_ddi.h"
|
||||
#include "display/intel_display_debugfs.h"
|
||||
#include "display/intel_display_power.h"
|
||||
#include "display/intel_dp.h"
|
||||
#include "display/intel_dp_mst.h"
|
||||
#include "display/intel_dpll.h"
|
||||
@ -2157,153 +2158,82 @@ enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
|
||||
return TC_PORT_1 + port - PORT_C;
|
||||
}
|
||||
|
||||
enum intel_display_power_domain intel_port_to_power_domain(enum port port)
|
||||
{
|
||||
switch (port) {
|
||||
case PORT_A:
|
||||
return POWER_DOMAIN_PORT_DDI_A_LANES;
|
||||
case PORT_B:
|
||||
return POWER_DOMAIN_PORT_DDI_B_LANES;
|
||||
case PORT_C:
|
||||
return POWER_DOMAIN_PORT_DDI_C_LANES;
|
||||
case PORT_D:
|
||||
return POWER_DOMAIN_PORT_DDI_D_LANES;
|
||||
case PORT_E:
|
||||
return POWER_DOMAIN_PORT_DDI_E_LANES;
|
||||
case PORT_F:
|
||||
return POWER_DOMAIN_PORT_DDI_F_LANES;
|
||||
case PORT_G:
|
||||
return POWER_DOMAIN_PORT_DDI_G_LANES;
|
||||
case PORT_H:
|
||||
return POWER_DOMAIN_PORT_DDI_H_LANES;
|
||||
case PORT_I:
|
||||
return POWER_DOMAIN_PORT_DDI_I_LANES;
|
||||
default:
|
||||
MISSING_CASE(port);
|
||||
return POWER_DOMAIN_PORT_OTHER;
|
||||
}
|
||||
}
|
||||
|
||||
enum intel_display_power_domain
|
||||
intel_aux_power_domain(struct intel_digital_port *dig_port)
|
||||
{
|
||||
if (intel_tc_port_in_tbt_alt_mode(dig_port)) {
|
||||
switch (dig_port->aux_ch) {
|
||||
case AUX_CH_C:
|
||||
return POWER_DOMAIN_AUX_C_TBT;
|
||||
case AUX_CH_D:
|
||||
return POWER_DOMAIN_AUX_D_TBT;
|
||||
case AUX_CH_E:
|
||||
return POWER_DOMAIN_AUX_E_TBT;
|
||||
case AUX_CH_F:
|
||||
return POWER_DOMAIN_AUX_F_TBT;
|
||||
case AUX_CH_G:
|
||||
return POWER_DOMAIN_AUX_G_TBT;
|
||||
case AUX_CH_H:
|
||||
return POWER_DOMAIN_AUX_H_TBT;
|
||||
case AUX_CH_I:
|
||||
return POWER_DOMAIN_AUX_I_TBT;
|
||||
default:
|
||||
MISSING_CASE(dig_port->aux_ch);
|
||||
return POWER_DOMAIN_AUX_C_TBT;
|
||||
}
|
||||
}
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
|
||||
return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
|
||||
if (intel_tc_port_in_tbt_alt_mode(dig_port))
|
||||
return intel_display_power_tbt_aux_domain(i915, dig_port->aux_ch);
|
||||
|
||||
return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
|
||||
}
|
||||
|
||||
/*
|
||||
* Converts aux_ch to power_domain without caring about TBT ports for that use
|
||||
* intel_aux_power_domain()
|
||||
*/
|
||||
enum intel_display_power_domain
|
||||
intel_legacy_aux_to_power_domain(enum aux_ch aux_ch)
|
||||
{
|
||||
switch (aux_ch) {
|
||||
case AUX_CH_A:
|
||||
return POWER_DOMAIN_AUX_A;
|
||||
case AUX_CH_B:
|
||||
return POWER_DOMAIN_AUX_B;
|
||||
case AUX_CH_C:
|
||||
return POWER_DOMAIN_AUX_C;
|
||||
case AUX_CH_D:
|
||||
return POWER_DOMAIN_AUX_D;
|
||||
case AUX_CH_E:
|
||||
return POWER_DOMAIN_AUX_E;
|
||||
case AUX_CH_F:
|
||||
return POWER_DOMAIN_AUX_F;
|
||||
case AUX_CH_G:
|
||||
return POWER_DOMAIN_AUX_G;
|
||||
case AUX_CH_H:
|
||||
return POWER_DOMAIN_AUX_H;
|
||||
case AUX_CH_I:
|
||||
return POWER_DOMAIN_AUX_I;
|
||||
default:
|
||||
MISSING_CASE(aux_ch);
|
||||
return POWER_DOMAIN_AUX_A;
|
||||
}
|
||||
}
|
||||
|
||||
static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
|
||||
static void get_crtc_power_domains(struct intel_crtc_state *crtc_state,
|
||||
struct intel_power_domain_mask *mask)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
|
||||
struct drm_encoder *encoder;
|
||||
enum pipe pipe = crtc->pipe;
|
||||
u64 mask;
|
||||
|
||||
bitmap_zero(mask->bits, POWER_DOMAIN_NUM);
|
||||
|
||||
if (!crtc_state->hw.active)
|
||||
return 0;
|
||||
return;
|
||||
|
||||
mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
|
||||
mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(cpu_transcoder));
|
||||
set_bit(POWER_DOMAIN_PIPE(pipe), mask->bits);
|
||||
set_bit(POWER_DOMAIN_TRANSCODER(cpu_transcoder), mask->bits);
|
||||
if (crtc_state->pch_pfit.enabled ||
|
||||
crtc_state->pch_pfit.force_thru)
|
||||
mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
|
||||
set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits);
|
||||
|
||||
drm_for_each_encoder_mask(encoder, &dev_priv->drm,
|
||||
crtc_state->uapi.encoder_mask) {
|
||||
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
|
||||
|
||||
mask |= BIT_ULL(intel_encoder->power_domain);
|
||||
set_bit(intel_encoder->power_domain, mask->bits);
|
||||
}
|
||||
|
||||
if (HAS_DDI(dev_priv) && crtc_state->has_audio)
|
||||
mask |= BIT_ULL(POWER_DOMAIN_AUDIO_MMIO);
|
||||
set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits);
|
||||
|
||||
if (crtc_state->shared_dpll)
|
||||
mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
|
||||
set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits);
|
||||
|
||||
if (crtc_state->dsc.compression_enable)
|
||||
mask |= BIT_ULL(intel_dsc_power_domain(crtc, cpu_transcoder));
|
||||
|
||||
return mask;
|
||||
set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits);
|
||||
}
|
||||
|
||||
static u64
|
||||
modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state)
|
||||
static void
|
||||
modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state,
|
||||
struct intel_power_domain_mask *old_domains)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
enum intel_display_power_domain domain;
|
||||
u64 domains, new_domains, old_domains;
|
||||
struct intel_power_domain_mask domains, new_domains;
|
||||
|
||||
domains = get_crtc_power_domains(crtc_state);
|
||||
get_crtc_power_domains(crtc_state, &domains);
|
||||
|
||||
new_domains = domains & ~crtc->enabled_power_domains.mask;
|
||||
old_domains = crtc->enabled_power_domains.mask & ~domains;
|
||||
bitmap_andnot(new_domains.bits,
|
||||
domains.bits,
|
||||
crtc->enabled_power_domains.mask.bits,
|
||||
POWER_DOMAIN_NUM);
|
||||
bitmap_andnot(old_domains->bits,
|
||||
crtc->enabled_power_domains.mask.bits,
|
||||
domains.bits,
|
||||
POWER_DOMAIN_NUM);
|
||||
|
||||
for_each_power_domain(domain, new_domains)
|
||||
for_each_power_domain(domain, &new_domains)
|
||||
intel_display_power_get_in_set(dev_priv,
|
||||
&crtc->enabled_power_domains,
|
||||
domain);
|
||||
|
||||
return old_domains;
|
||||
}
|
||||
|
||||
static void modeset_put_crtc_power_domains(struct intel_crtc *crtc,
|
||||
u64 domains)
|
||||
struct intel_power_domain_mask *domains)
|
||||
{
|
||||
intel_display_power_put_mask_in_set(to_i915(crtc->base.dev),
|
||||
&crtc->enabled_power_domains,
|
||||
@ -4974,9 +4904,12 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
|
||||
mode_changed && !crtc_state->hw.active)
|
||||
crtc_state->update_wm_post = true;
|
||||
|
||||
if (mode_changed && crtc_state->hw.enable &&
|
||||
!drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
|
||||
ret = intel_dpll_crtc_compute_clock(crtc_state);
|
||||
if (mode_changed) {
|
||||
ret = intel_dpll_crtc_compute_clock(state, crtc);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = intel_dpll_crtc_get_shared_dpll(state, crtc);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -6969,8 +6902,9 @@ intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct drm_display_mode adjusted_mode =
|
||||
crtc_state->hw.adjusted_mode;
|
||||
struct drm_display_mode adjusted_mode;
|
||||
|
||||
drm_mode_init(&adjusted_mode, &crtc_state->hw.adjusted_mode);
|
||||
|
||||
if (crtc_state->vrr.enable) {
|
||||
adjusted_mode.crtc_vtotal = crtc_state->vrr.vmax;
|
||||
@ -7028,14 +6962,10 @@ intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state)
|
||||
|
||||
static void intel_modeset_clear_plls(struct intel_atomic_state *state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
struct intel_crtc_state *new_crtc_state;
|
||||
struct intel_crtc *crtc;
|
||||
int i;
|
||||
|
||||
if (!dev_priv->dpll_funcs)
|
||||
return;
|
||||
|
||||
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
|
||||
if (!intel_crtc_needs_modeset(new_crtc_state))
|
||||
continue;
|
||||
@ -8505,7 +8435,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
struct intel_crtc_state *new_crtc_state, *old_crtc_state;
|
||||
struct intel_crtc *crtc;
|
||||
u64 put_domains[I915_MAX_PIPES] = {};
|
||||
struct intel_power_domain_mask put_domains[I915_MAX_PIPES] = {};
|
||||
intel_wakeref_t wakeref = 0;
|
||||
int i;
|
||||
|
||||
@ -8522,9 +8452,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
|
||||
new_crtc_state, i) {
|
||||
if (intel_crtc_needs_modeset(new_crtc_state) ||
|
||||
new_crtc_state->update_pipe) {
|
||||
|
||||
put_domains[crtc->pipe] =
|
||||
modeset_get_crtc_power_domains(new_crtc_state);
|
||||
modeset_get_crtc_power_domains(new_crtc_state, &put_domains[crtc->pipe]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -8624,7 +8552,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
|
||||
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
||||
intel_post_plane_update(state, crtc);
|
||||
|
||||
modeset_put_crtc_power_domains(crtc, put_domains[crtc->pipe]);
|
||||
modeset_put_crtc_power_domains(crtc, &put_domains[crtc->pipe]);
|
||||
|
||||
intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
|
||||
|
||||
@ -9737,7 +9665,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
|
||||
}
|
||||
|
||||
intel_plane_possible_crtcs_init(i915);
|
||||
intel_shared_dpll_init(dev);
|
||||
intel_shared_dpll_init(i915);
|
||||
intel_fdi_pll_freq_update(i915);
|
||||
|
||||
intel_update_czclk(i915);
|
||||
@ -9844,9 +9772,6 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
|
||||
PLL_REF_INPUT_DREFCLK |
|
||||
DPLL_VCO_ENABLE;
|
||||
|
||||
intel_de_write(dev_priv, FP0(pipe), fp);
|
||||
intel_de_write(dev_priv, FP1(pipe), fp);
|
||||
|
||||
intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
|
||||
intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
|
||||
intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
|
||||
@ -9855,6 +9780,9 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
|
||||
intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
|
||||
intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
|
||||
|
||||
intel_de_write(dev_priv, FP0(pipe), fp);
|
||||
intel_de_write(dev_priv, FP1(pipe), fp);
|
||||
|
||||
/*
|
||||
* Apparently we need to have VGA mode enabled prior to changing
|
||||
* the P1/P2 dividers. Otherwise the DPLL will keep using the old
|
||||
@ -10465,11 +10393,11 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
|
||||
for_each_intel_crtc(dev, crtc) {
|
||||
struct intel_crtc_state *crtc_state =
|
||||
to_intel_crtc_state(crtc->base.state);
|
||||
u64 put_domains;
|
||||
struct intel_power_domain_mask put_domains;
|
||||
|
||||
put_domains = modeset_get_crtc_power_domains(crtc_state);
|
||||
if (drm_WARN_ON(dev, put_domains))
|
||||
modeset_put_crtc_power_domains(crtc, put_domains);
|
||||
modeset_get_crtc_power_domains(crtc_state, &put_domains);
|
||||
if (drm_WARN_ON(dev, !bitmap_empty(put_domains.bits, POWER_DOMAIN_NUM)))
|
||||
modeset_put_crtc_power_domains(crtc, &put_domains);
|
||||
}
|
||||
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
|
||||
|
@ -635,11 +635,9 @@ void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc,
|
||||
void i9xx_crtc_clock_get(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *pipe_config);
|
||||
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
|
||||
enum intel_display_power_domain intel_port_to_power_domain(enum port port);
|
||||
enum intel_display_power_domain intel_port_to_power_domain(struct intel_digital_port *dig_port);
|
||||
enum intel_display_power_domain
|
||||
intel_aux_power_domain(struct intel_digital_port *dig_port);
|
||||
enum intel_display_power_domain
|
||||
intel_legacy_aux_to_power_domain(enum aux_ch aux_ch);
|
||||
void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
|
||||
struct intel_crtc_state *crtc_state);
|
||||
void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -8,8 +8,10 @@
|
||||
|
||||
#include "intel_runtime_pm.h"
|
||||
|
||||
enum aux_ch;
|
||||
enum dpio_channel;
|
||||
enum dpio_phy;
|
||||
enum port;
|
||||
struct drm_i915_private;
|
||||
struct i915_power_well;
|
||||
struct intel_encoder;
|
||||
@ -25,10 +27,10 @@ enum intel_display_power_domain {
|
||||
POWER_DOMAIN_PIPE_B,
|
||||
POWER_DOMAIN_PIPE_C,
|
||||
POWER_DOMAIN_PIPE_D,
|
||||
POWER_DOMAIN_PIPE_A_PANEL_FITTER,
|
||||
POWER_DOMAIN_PIPE_B_PANEL_FITTER,
|
||||
POWER_DOMAIN_PIPE_C_PANEL_FITTER,
|
||||
POWER_DOMAIN_PIPE_D_PANEL_FITTER,
|
||||
POWER_DOMAIN_PIPE_PANEL_FITTER_A,
|
||||
POWER_DOMAIN_PIPE_PANEL_FITTER_B,
|
||||
POWER_DOMAIN_PIPE_PANEL_FITTER_C,
|
||||
POWER_DOMAIN_PIPE_PANEL_FITTER_D,
|
||||
POWER_DOMAIN_TRANSCODER_A,
|
||||
POWER_DOMAIN_TRANSCODER_B,
|
||||
POWER_DOMAIN_TRANSCODER_C,
|
||||
@ -40,46 +42,34 @@ enum intel_display_power_domain {
|
||||
/* VDSC/joining for eDP/DSI transcoder (ICL) or pipe A (TGL) */
|
||||
POWER_DOMAIN_TRANSCODER_VDSC_PW2,
|
||||
|
||||
POWER_DOMAIN_PORT_DDI_A_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_B_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_C_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_D_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_E_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_F_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_G_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_H_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_I_LANES,
|
||||
POWER_DOMAIN_PORT_DDI_LANES_A,
|
||||
POWER_DOMAIN_PORT_DDI_LANES_B,
|
||||
POWER_DOMAIN_PORT_DDI_LANES_C,
|
||||
POWER_DOMAIN_PORT_DDI_LANES_D,
|
||||
POWER_DOMAIN_PORT_DDI_LANES_E,
|
||||
POWER_DOMAIN_PORT_DDI_LANES_F,
|
||||
|
||||
POWER_DOMAIN_PORT_DDI_LANES_TC1 = POWER_DOMAIN_PORT_DDI_D_LANES, /* tgl+ */
|
||||
POWER_DOMAIN_PORT_DDI_LANES_TC1,
|
||||
POWER_DOMAIN_PORT_DDI_LANES_TC2,
|
||||
POWER_DOMAIN_PORT_DDI_LANES_TC3,
|
||||
POWER_DOMAIN_PORT_DDI_LANES_TC4,
|
||||
POWER_DOMAIN_PORT_DDI_LANES_TC5,
|
||||
POWER_DOMAIN_PORT_DDI_LANES_TC6,
|
||||
|
||||
POWER_DOMAIN_PORT_DDI_LANES_D_XELPD = POWER_DOMAIN_PORT_DDI_LANES_TC5, /* XELPD */
|
||||
POWER_DOMAIN_PORT_DDI_LANES_E_XELPD,
|
||||
POWER_DOMAIN_PORT_DDI_IO_A,
|
||||
POWER_DOMAIN_PORT_DDI_IO_B,
|
||||
POWER_DOMAIN_PORT_DDI_IO_C,
|
||||
POWER_DOMAIN_PORT_DDI_IO_D,
|
||||
POWER_DOMAIN_PORT_DDI_IO_E,
|
||||
POWER_DOMAIN_PORT_DDI_IO_F,
|
||||
|
||||
POWER_DOMAIN_PORT_DDI_A_IO,
|
||||
POWER_DOMAIN_PORT_DDI_B_IO,
|
||||
POWER_DOMAIN_PORT_DDI_C_IO,
|
||||
POWER_DOMAIN_PORT_DDI_D_IO,
|
||||
POWER_DOMAIN_PORT_DDI_E_IO,
|
||||
POWER_DOMAIN_PORT_DDI_F_IO,
|
||||
POWER_DOMAIN_PORT_DDI_G_IO,
|
||||
POWER_DOMAIN_PORT_DDI_H_IO,
|
||||
POWER_DOMAIN_PORT_DDI_I_IO,
|
||||
|
||||
POWER_DOMAIN_PORT_DDI_IO_TC1 = POWER_DOMAIN_PORT_DDI_D_IO, /* tgl+ */
|
||||
POWER_DOMAIN_PORT_DDI_IO_TC1,
|
||||
POWER_DOMAIN_PORT_DDI_IO_TC2,
|
||||
POWER_DOMAIN_PORT_DDI_IO_TC3,
|
||||
POWER_DOMAIN_PORT_DDI_IO_TC4,
|
||||
POWER_DOMAIN_PORT_DDI_IO_TC5,
|
||||
POWER_DOMAIN_PORT_DDI_IO_TC6,
|
||||
|
||||
POWER_DOMAIN_PORT_DDI_IO_D_XELPD = POWER_DOMAIN_PORT_DDI_IO_TC5, /* XELPD */
|
||||
POWER_DOMAIN_PORT_DDI_IO_E_XELPD,
|
||||
|
||||
POWER_DOMAIN_PORT_DSI,
|
||||
POWER_DOMAIN_PORT_CRT,
|
||||
POWER_DOMAIN_PORT_OTHER,
|
||||
@ -92,30 +82,17 @@ enum intel_display_power_domain {
|
||||
POWER_DOMAIN_AUX_D,
|
||||
POWER_DOMAIN_AUX_E,
|
||||
POWER_DOMAIN_AUX_F,
|
||||
POWER_DOMAIN_AUX_G,
|
||||
POWER_DOMAIN_AUX_H,
|
||||
POWER_DOMAIN_AUX_I,
|
||||
|
||||
POWER_DOMAIN_AUX_USBC1 = POWER_DOMAIN_AUX_D, /* tgl+ */
|
||||
POWER_DOMAIN_AUX_USBC1,
|
||||
POWER_DOMAIN_AUX_USBC2,
|
||||
POWER_DOMAIN_AUX_USBC3,
|
||||
POWER_DOMAIN_AUX_USBC4,
|
||||
POWER_DOMAIN_AUX_USBC5,
|
||||
POWER_DOMAIN_AUX_USBC6,
|
||||
|
||||
POWER_DOMAIN_AUX_D_XELPD = POWER_DOMAIN_AUX_USBC5, /* XELPD */
|
||||
POWER_DOMAIN_AUX_E_XELPD,
|
||||
|
||||
POWER_DOMAIN_AUX_IO_A,
|
||||
POWER_DOMAIN_AUX_C_TBT,
|
||||
POWER_DOMAIN_AUX_D_TBT,
|
||||
POWER_DOMAIN_AUX_E_TBT,
|
||||
POWER_DOMAIN_AUX_F_TBT,
|
||||
POWER_DOMAIN_AUX_G_TBT,
|
||||
POWER_DOMAIN_AUX_H_TBT,
|
||||
POWER_DOMAIN_AUX_I_TBT,
|
||||
|
||||
POWER_DOMAIN_AUX_TBT1 = POWER_DOMAIN_AUX_D_TBT, /* tgl+ */
|
||||
POWER_DOMAIN_AUX_TBT1,
|
||||
POWER_DOMAIN_AUX_TBT2,
|
||||
POWER_DOMAIN_AUX_TBT3,
|
||||
POWER_DOMAIN_AUX_TBT4,
|
||||
@ -130,15 +107,20 @@ enum intel_display_power_domain {
|
||||
POWER_DOMAIN_INIT,
|
||||
|
||||
POWER_DOMAIN_NUM,
|
||||
POWER_DOMAIN_INVALID = POWER_DOMAIN_NUM,
|
||||
};
|
||||
|
||||
#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
|
||||
#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
|
||||
((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
|
||||
((pipe) + POWER_DOMAIN_PIPE_PANEL_FITTER_A)
|
||||
#define POWER_DOMAIN_TRANSCODER(tran) \
|
||||
((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
|
||||
(tran) + POWER_DOMAIN_TRANSCODER_A)
|
||||
|
||||
struct intel_power_domain_mask {
|
||||
DECLARE_BITMAP(bits, POWER_DOMAIN_NUM);
|
||||
};
|
||||
|
||||
struct i915_power_domains {
|
||||
/*
|
||||
* Power wells needed for initialization at driver init and suspend
|
||||
@ -156,41 +138,21 @@ struct i915_power_domains {
|
||||
|
||||
struct delayed_work async_put_work;
|
||||
intel_wakeref_t async_put_wakeref;
|
||||
u64 async_put_domains[2];
|
||||
struct intel_power_domain_mask async_put_domains[2];
|
||||
|
||||
struct i915_power_well *power_wells;
|
||||
};
|
||||
|
||||
struct intel_display_power_domain_set {
|
||||
u64 mask;
|
||||
struct intel_power_domain_mask mask;
|
||||
#ifdef CONFIG_DRM_I915_DEBUG_RUNTIME_PM
|
||||
intel_wakeref_t wakerefs[POWER_DOMAIN_NUM];
|
||||
#endif
|
||||
};
|
||||
|
||||
#define for_each_power_domain(domain, mask) \
|
||||
for ((domain) = 0; (domain) < POWER_DOMAIN_NUM; (domain)++) \
|
||||
for_each_if(BIT_ULL(domain) & (mask))
|
||||
|
||||
#define for_each_power_well(__dev_priv, __power_well) \
|
||||
for ((__power_well) = (__dev_priv)->power_domains.power_wells; \
|
||||
(__power_well) - (__dev_priv)->power_domains.power_wells < \
|
||||
(__dev_priv)->power_domains.power_well_count; \
|
||||
(__power_well)++)
|
||||
|
||||
#define for_each_power_well_reverse(__dev_priv, __power_well) \
|
||||
for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
|
||||
(__dev_priv)->power_domains.power_well_count - 1; \
|
||||
(__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
|
||||
(__power_well)--)
|
||||
|
||||
#define for_each_power_domain_well(__dev_priv, __power_well, __domain_mask) \
|
||||
for_each_power_well(__dev_priv, __power_well) \
|
||||
for_each_if((__power_well)->desc->domains & (__domain_mask))
|
||||
|
||||
#define for_each_power_domain_well_reverse(__dev_priv, __power_well, __domain_mask) \
|
||||
for_each_power_well_reverse(__dev_priv, __power_well) \
|
||||
for_each_if((__power_well)->desc->domains & (__domain_mask))
|
||||
#define for_each_power_domain(__domain, __mask) \
|
||||
for ((__domain) = 0; (__domain) < POWER_DOMAIN_NUM; (__domain)++) \
|
||||
for_each_if(test_bit((__domain), (__mask)->bits))
|
||||
|
||||
int intel_power_domains_init(struct drm_i915_private *dev_priv);
|
||||
void intel_power_domains_cleanup(struct drm_i915_private *dev_priv);
|
||||
@ -271,17 +233,26 @@ intel_display_power_get_in_set_if_enabled(struct drm_i915_private *i915,
|
||||
void
|
||||
intel_display_power_put_mask_in_set(struct drm_i915_private *i915,
|
||||
struct intel_display_power_domain_set *power_domain_set,
|
||||
u64 mask);
|
||||
struct intel_power_domain_mask *mask);
|
||||
|
||||
static inline void
|
||||
intel_display_power_put_all_in_set(struct drm_i915_private *i915,
|
||||
struct intel_display_power_domain_set *power_domain_set)
|
||||
{
|
||||
intel_display_power_put_mask_in_set(i915, power_domain_set, power_domain_set->mask);
|
||||
intel_display_power_put_mask_in_set(i915, power_domain_set, &power_domain_set->mask);
|
||||
}
|
||||
|
||||
void intel_display_power_debug(struct drm_i915_private *i915, struct seq_file *m);
|
||||
|
||||
enum intel_display_power_domain
|
||||
intel_display_power_ddi_lanes_domain(struct drm_i915_private *i915, enum port port);
|
||||
enum intel_display_power_domain
|
||||
intel_display_power_ddi_io_domain(struct drm_i915_private *i915, enum port port);
|
||||
enum intel_display_power_domain
|
||||
intel_display_power_legacy_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch);
|
||||
enum intel_display_power_domain
|
||||
intel_display_power_tbt_aux_domain(struct drm_i915_private *i915, enum aux_ch aux_ch);
|
||||
|
||||
/*
|
||||
* FIXME: We should probably switch this to a 0-based scheme to be consistent
|
||||
* with how we now name/number DBUF_CTL instances.
|
||||
@ -305,9 +276,4 @@ void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
|
||||
for ((wf) = intel_display_power_get_if_enabled((i915), (domain)); (wf); \
|
||||
intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0)
|
||||
|
||||
void chv_phy_powergate_lanes(struct intel_encoder *encoder,
|
||||
bool override, unsigned int mask);
|
||||
bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
|
||||
enum dpio_channel ch, bool override);
|
||||
|
||||
#endif /* __INTEL_DISPLAY_POWER_H__ */
|
||||
|
1501
drivers/gpu/drm/i915/display/intel_display_power_map.c
Normal file
1501
drivers/gpu/drm/i915/display/intel_display_power_map.c
Normal file
File diff suppressed because it is too large
Load Diff
14
drivers/gpu/drm/i915/display/intel_display_power_map.h
Normal file
14
drivers/gpu/drm/i915/display/intel_display_power_map.h
Normal file
@ -0,0 +1,14 @@
|
||||
/* SPDX-License-Identifier: MIT */
|
||||
/*
|
||||
* Copyright © 2022 Intel Corporation
|
||||
*/
|
||||
|
||||
#ifndef __INTEL_DISPLAY_POWER_MAP_H__
|
||||
#define __INTEL_DISPLAY_POWER_MAP_H__
|
||||
|
||||
struct i915_power_domains;
|
||||
|
||||
int intel_display_power_map_init(struct i915_power_domains *power_domains);
|
||||
void intel_display_power_map_cleanup(struct i915_power_domains *power_domains);
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
@ -8,10 +8,23 @@
|
||||
#include <linux/types.h>
|
||||
|
||||
#include "intel_display.h"
|
||||
#include "intel_display_power.h"
|
||||
|
||||
struct drm_i915_private;
|
||||
struct i915_power_well;
|
||||
|
||||
#define for_each_power_well(__dev_priv, __power_well) \
|
||||
for ((__power_well) = (__dev_priv)->power_domains.power_wells; \
|
||||
(__power_well) - (__dev_priv)->power_domains.power_wells < \
|
||||
(__dev_priv)->power_domains.power_well_count; \
|
||||
(__power_well)++)
|
||||
|
||||
#define for_each_power_well_reverse(__dev_priv, __power_well) \
|
||||
for ((__power_well) = (__dev_priv)->power_domains.power_wells + \
|
||||
(__dev_priv)->power_domains.power_well_count - 1; \
|
||||
(__power_well) - (__dev_priv)->power_domains.power_wells >= 0; \
|
||||
(__power_well)--)
|
||||
|
||||
/*
|
||||
* i915_power_well_id:
|
||||
*
|
||||
@ -20,7 +33,7 @@ struct i915_power_well;
|
||||
* wells must be assigned DISP_PW_ID_NONE.
|
||||
*/
|
||||
enum i915_power_well_id {
|
||||
DISP_PW_ID_NONE,
|
||||
DISP_PW_ID_NONE = 0, /* must be kept zero */
|
||||
|
||||
VLV_DISP_PW_DISP2D,
|
||||
BXT_DISP_PW_DPIO_CMN_A,
|
||||
@ -36,45 +49,13 @@ enum i915_power_well_id {
|
||||
TGL_DISP_PW_TC_COLD_OFF,
|
||||
};
|
||||
|
||||
struct i915_power_well_regs {
|
||||
i915_reg_t bios;
|
||||
i915_reg_t driver;
|
||||
i915_reg_t kvmr;
|
||||
i915_reg_t debug;
|
||||
};
|
||||
|
||||
struct i915_power_well_ops {
|
||||
const struct i915_power_well_regs *regs;
|
||||
/*
|
||||
* Synchronize the well's hw state to match the current sw state, for
|
||||
* example enable/disable it based on the current refcount. Called
|
||||
* during driver init and resume time, possibly after first calling
|
||||
* the enable/disable handlers.
|
||||
*/
|
||||
void (*sync_hw)(struct drm_i915_private *i915,
|
||||
struct i915_power_well *power_well);
|
||||
/*
|
||||
* Enable the well and resources that depend on it (for example
|
||||
* interrupts located on the well). Called after the 0->1 refcount
|
||||
* transition.
|
||||
*/
|
||||
void (*enable)(struct drm_i915_private *i915,
|
||||
struct i915_power_well *power_well);
|
||||
/*
|
||||
* Disable the well and resources that depend on it. Called after
|
||||
* the 1->0 refcount transition.
|
||||
*/
|
||||
void (*disable)(struct drm_i915_private *i915,
|
||||
struct i915_power_well *power_well);
|
||||
/* Returns the hw enabled state. */
|
||||
bool (*is_enabled)(struct drm_i915_private *i915,
|
||||
struct i915_power_well *power_well);
|
||||
};
|
||||
|
||||
struct i915_power_well_desc {
|
||||
struct i915_power_well_instance {
|
||||
const char *name;
|
||||
bool always_on;
|
||||
u64 domains;
|
||||
const struct i915_power_domain_list {
|
||||
const enum intel_display_power_domain *list;
|
||||
u8 count;
|
||||
} *domain_list;
|
||||
|
||||
/* unique identifier for this power well */
|
||||
enum i915_power_well_id id;
|
||||
/*
|
||||
@ -98,33 +79,45 @@ struct i915_power_well_desc {
|
||||
* constrol/status registers.
|
||||
*/
|
||||
u8 idx;
|
||||
/* Mask of pipes whose IRQ logic is backed by the pw */
|
||||
u8 irq_pipe_mask;
|
||||
/*
|
||||
* Instead of waiting for the status bit to ack enables,
|
||||
* just wait a specific amount of time and then consider
|
||||
* the well enabled.
|
||||
*/
|
||||
u16 fixed_enable_delay;
|
||||
/* The pw is backing the VGA functionality */
|
||||
bool has_vga:1;
|
||||
bool has_fuses:1;
|
||||
/*
|
||||
* The pw is for an ICL+ TypeC PHY port in
|
||||
* Thunderbolt mode.
|
||||
*/
|
||||
bool is_tc_tbt:1;
|
||||
} hsw;
|
||||
};
|
||||
};
|
||||
|
||||
struct i915_power_well_desc {
|
||||
const struct i915_power_well_ops *ops;
|
||||
const struct i915_power_well_instance_list {
|
||||
const struct i915_power_well_instance *list;
|
||||
u8 count;
|
||||
} *instances;
|
||||
|
||||
/* Mask of pipes whose IRQ logic is backed by the pw */
|
||||
u16 irq_pipe_mask:4;
|
||||
u16 always_on:1;
|
||||
/*
|
||||
* Instead of waiting for the status bit to ack enables,
|
||||
* just wait a specific amount of time and then consider
|
||||
* the well enabled.
|
||||
*/
|
||||
u16 fixed_enable_delay:1;
|
||||
/* The pw is backing the VGA functionality */
|
||||
u16 has_vga:1;
|
||||
u16 has_fuses:1;
|
||||
/*
|
||||
* The pw is for an ICL+ TypeC PHY port in
|
||||
* Thunderbolt mode.
|
||||
*/
|
||||
u16 is_tc_tbt:1;
|
||||
};
|
||||
|
||||
struct i915_power_well {
|
||||
const struct i915_power_well_desc *desc;
|
||||
struct intel_power_domain_mask domains;
|
||||
/* power well enable/disable usage count */
|
||||
int count;
|
||||
/* cached hw enabled state */
|
||||
bool hw_enabled;
|
||||
/* index into desc->instances->list */
|
||||
u8 instance_idx;
|
||||
};
|
||||
|
||||
struct i915_power_well *lookup_power_well(struct drm_i915_private *i915,
|
||||
@ -147,7 +140,34 @@ bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
|
||||
enum i915_power_well_id power_well_id);
|
||||
bool intel_power_well_is_always_on(struct i915_power_well *power_well);
|
||||
const char *intel_power_well_name(struct i915_power_well *power_well);
|
||||
u64 intel_power_well_domains(struct i915_power_well *power_well);
|
||||
struct intel_power_domain_mask *intel_power_well_domains(struct i915_power_well *power_well);
|
||||
int intel_power_well_refcount(struct i915_power_well *power_well);
|
||||
|
||||
void chv_phy_powergate_lanes(struct intel_encoder *encoder,
|
||||
bool override, unsigned int mask);
|
||||
bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
|
||||
enum dpio_channel ch, bool override);
|
||||
|
||||
void gen9_enable_dc5(struct drm_i915_private *dev_priv);
|
||||
void skl_enable_dc6(struct drm_i915_private *dev_priv);
|
||||
void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
|
||||
void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state);
|
||||
void gen9_disable_dc_states(struct drm_i915_private *dev_priv);
|
||||
void bxt_enable_dc9(struct drm_i915_private *dev_priv);
|
||||
void bxt_disable_dc9(struct drm_i915_private *dev_priv);
|
||||
|
||||
extern const struct i915_power_well_ops i9xx_always_on_power_well_ops;
|
||||
extern const struct i915_power_well_ops chv_pipe_power_well_ops;
|
||||
extern const struct i915_power_well_ops chv_dpio_cmn_power_well_ops;
|
||||
extern const struct i915_power_well_ops i830_pipes_power_well_ops;
|
||||
extern const struct i915_power_well_ops hsw_power_well_ops;
|
||||
extern const struct i915_power_well_ops gen9_dc_off_power_well_ops;
|
||||
extern const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops;
|
||||
extern const struct i915_power_well_ops vlv_display_power_well_ops;
|
||||
extern const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops;
|
||||
extern const struct i915_power_well_ops vlv_dpio_power_well_ops;
|
||||
extern const struct i915_power_well_ops icl_aux_power_well_ops;
|
||||
extern const struct i915_power_well_ops icl_ddi_power_well_ops;
|
||||
extern const struct i915_power_well_ops tgl_tc_cold_off_ops;
|
||||
|
||||
#endif
|
||||
|
@ -52,6 +52,10 @@
|
||||
|
||||
#define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
|
||||
|
||||
#define DG2_DMC_PATH DMC_PATH(dg2, 2, 06)
|
||||
#define DG2_DMC_VERSION_REQUIRED DMC_VERSION(2, 06)
|
||||
MODULE_FIRMWARE(DG2_DMC_PATH);
|
||||
|
||||
#define ADLP_DMC_PATH DMC_PATH(adlp, 2, 16)
|
||||
#define ADLP_DMC_VERSION_REQUIRED DMC_VERSION(2, 16)
|
||||
MODULE_FIRMWARE(ADLP_DMC_PATH);
|
||||
@ -374,6 +378,44 @@ static void dmc_set_fw_offset(struct intel_dmc *dmc,
|
||||
}
|
||||
}
|
||||
|
||||
static bool dmc_mmio_addr_sanity_check(struct intel_dmc *dmc,
|
||||
const u32 *mmioaddr, u32 mmio_count,
|
||||
int header_ver, u8 dmc_id)
|
||||
{
|
||||
struct drm_i915_private *i915 = container_of(dmc, typeof(*i915), dmc);
|
||||
u32 start_range, end_range;
|
||||
int i;
|
||||
|
||||
if (dmc_id >= DMC_FW_MAX) {
|
||||
drm_warn(&i915->drm, "Unsupported firmware id %u\n", dmc_id);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (header_ver == 1) {
|
||||
start_range = DMC_MMIO_START_RANGE;
|
||||
end_range = DMC_MMIO_END_RANGE;
|
||||
} else if (dmc_id == DMC_FW_MAIN) {
|
||||
start_range = TGL_MAIN_MMIO_START;
|
||||
end_range = TGL_MAIN_MMIO_END;
|
||||
} else if (DISPLAY_VER(i915) >= 13) {
|
||||
start_range = ADLP_PIPE_MMIO_START;
|
||||
end_range = ADLP_PIPE_MMIO_END;
|
||||
} else if (DISPLAY_VER(i915) >= 12) {
|
||||
start_range = TGL_PIPE_MMIO_START(dmc_id);
|
||||
end_range = TGL_PIPE_MMIO_END(dmc_id);
|
||||
} else {
|
||||
drm_warn(&i915->drm, "Unknown mmio range for sanity check");
|
||||
return false;
|
||||
}
|
||||
|
||||
for (i = 0; i < mmio_count; i++) {
|
||||
if (mmioaddr[i] < start_range || mmioaddr[i] > end_range)
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
|
||||
const struct intel_dmc_header_base *dmc_header,
|
||||
size_t rem_size, u8 dmc_id)
|
||||
@ -443,6 +485,12 @@ static u32 parse_dmc_fw_header(struct intel_dmc *dmc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!dmc_mmio_addr_sanity_check(dmc, mmioaddr, mmio_count,
|
||||
dmc_header->header_ver, dmc_id)) {
|
||||
drm_err(&i915->drm, "DMC firmware has Wrong MMIO Addresses\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
for (i = 0; i < mmio_count; i++) {
|
||||
dmc_info->mmioaddr[i] = _MMIO(mmioaddr[i]);
|
||||
dmc_info->mmiodata[i] = mmiodata[i];
|
||||
@ -688,7 +736,11 @@ void intel_dmc_ucode_init(struct drm_i915_private *dev_priv)
|
||||
*/
|
||||
intel_dmc_runtime_pm_get(dev_priv);
|
||||
|
||||
if (IS_ALDERLAKE_P(dev_priv)) {
|
||||
if (IS_DG2(dev_priv)) {
|
||||
dmc->fw_path = DG2_DMC_PATH;
|
||||
dmc->required_version = DG2_DMC_VERSION_REQUIRED;
|
||||
dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
|
||||
} else if (IS_ALDERLAKE_P(dev_priv)) {
|
||||
dmc->fw_path = ADLP_DMC_PATH;
|
||||
dmc->required_version = ADLP_DMC_VERSION_REQUIRED;
|
||||
dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE;
|
||||
|
@ -16,7 +16,23 @@
|
||||
#define DMC_LAST_WRITE _MMIO(0x8F034)
|
||||
#define DMC_LAST_WRITE_VALUE 0xc003b400
|
||||
#define DMC_MMIO_START_RANGE 0x80000
|
||||
#define DMC_MMIO_END_RANGE 0x8FFFF
|
||||
#define DMC_MMIO_END_RANGE 0x8FFFF
|
||||
#define DMC_V1_MMIO_START_RANGE 0x80000
|
||||
#define TGL_MAIN_MMIO_START 0x8F000
|
||||
#define TGL_MAIN_MMIO_END 0x8FFFF
|
||||
#define _TGL_PIPEA_MMIO_START 0x92000
|
||||
#define _TGL_PIPEA_MMIO_END 0x93FFF
|
||||
#define _TGL_PIPEB_MMIO_START 0x96000
|
||||
#define _TGL_PIPEB_MMIO_END 0x97FFF
|
||||
#define ADLP_PIPE_MMIO_START 0x5F000
|
||||
#define ADLP_PIPE_MMIO_END 0x5FFFF
|
||||
|
||||
#define TGL_PIPE_MMIO_START(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_START,\
|
||||
_TGL_PIPEB_MMIO_START)
|
||||
|
||||
#define TGL_PIPE_MMIO_END(dmc_id) _PICK_EVEN(((dmc_id) - 1), _TGL_PIPEA_MMIO_END,\
|
||||
_TGL_PIPEB_MMIO_END)
|
||||
|
||||
#define SKL_DMC_DC3_DC5_COUNT _MMIO(0x80030)
|
||||
#define SKL_DMC_DC5_DC6_COUNT _MMIO(0x8002C)
|
||||
#define BXT_DMC_DC3_DC5_COUNT _MMIO(0x80038)
|
||||
|
@ -97,6 +97,14 @@
|
||||
|
||||
#define INTEL_EDP_BRIGHTNESS_OPTIMIZATION_1 0x359
|
||||
|
||||
enum intel_dp_aux_backlight_modparam {
|
||||
INTEL_DP_AUX_BACKLIGHT_AUTO = -1,
|
||||
INTEL_DP_AUX_BACKLIGHT_OFF = 0,
|
||||
INTEL_DP_AUX_BACKLIGHT_ON = 1,
|
||||
INTEL_DP_AUX_BACKLIGHT_FORCE_VESA = 2,
|
||||
INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3,
|
||||
};
|
||||
|
||||
/* Intel EDP backlight callbacks */
|
||||
static bool
|
||||
intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
|
||||
@ -126,6 +134,24 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we don't have HDR static metadata there is no way to
|
||||
* runtime detect used range for nits based control. For now
|
||||
* do not use Intel proprietary eDP backlight control if we
|
||||
* don't have this data in panel EDID. In case we find panel
|
||||
* which supports only nits based control, but doesn't provide
|
||||
* HDR static metadata we need to start maintaining table of
|
||||
* ranges for such panels.
|
||||
*/
|
||||
if (i915->params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL &&
|
||||
!(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type &
|
||||
BIT(HDMI_STATIC_METADATA_TYPE1))) {
|
||||
drm_info(&i915->drm,
|
||||
"Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n",
|
||||
INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL);
|
||||
return false;
|
||||
}
|
||||
|
||||
panel->backlight.edp.intel.sdr_uses_aux =
|
||||
tcon_cap[2] & INTEL_EDP_SDR_TCON_BRIGHTNESS_AUX_CAP;
|
||||
|
||||
@ -413,14 +439,6 @@ static const struct intel_panel_bl_funcs intel_dp_vesa_bl_funcs = {
|
||||
.get = intel_dp_aux_vesa_get_backlight,
|
||||
};
|
||||
|
||||
enum intel_dp_aux_backlight_modparam {
|
||||
INTEL_DP_AUX_BACKLIGHT_AUTO = -1,
|
||||
INTEL_DP_AUX_BACKLIGHT_OFF = 0,
|
||||
INTEL_DP_AUX_BACKLIGHT_ON = 1,
|
||||
INTEL_DP_AUX_BACKLIGHT_FORCE_VESA = 2,
|
||||
INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3,
|
||||
};
|
||||
|
||||
int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
|
@ -82,19 +82,8 @@ static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp,
|
||||
const u8 dpcd[DP_RECEIVER_CAP_SIZE])
|
||||
{
|
||||
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
|
||||
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
|
||||
int ret;
|
||||
|
||||
if (intel_dp_is_edp(intel_dp))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Detecting LTTPRs must be avoided on platforms with an AUX timeout
|
||||
* period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
|
||||
*/
|
||||
if (DISPLAY_VER(i915) < 10 || IS_GEMINILAKE(i915))
|
||||
return false;
|
||||
|
||||
ret = drm_dp_read_lttpr_common_caps(&intel_dp->aux, dpcd,
|
||||
intel_dp->lttpr_common_caps);
|
||||
if (ret < 0)
|
||||
@ -197,13 +186,25 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEI
|
||||
*/
|
||||
int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
|
||||
{
|
||||
u8 dpcd[DP_RECEIVER_CAP_SIZE];
|
||||
int lttpr_count;
|
||||
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
|
||||
int lttpr_count = 0;
|
||||
|
||||
if (drm_dp_read_dpcd_caps(&intel_dp->aux, dpcd))
|
||||
return -EIO;
|
||||
/*
|
||||
* Detecting LTTPRs must be avoided on platforms with an AUX timeout
|
||||
* period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
|
||||
*/
|
||||
if (!intel_dp_is_edp(intel_dp) &&
|
||||
(DISPLAY_VER(i915) >= 10 && !IS_GEMINILAKE(i915))) {
|
||||
u8 dpcd[DP_RECEIVER_CAP_SIZE];
|
||||
|
||||
lttpr_count = intel_dp_init_lttpr(intel_dp, dpcd);
|
||||
if (drm_dp_dpcd_probe(&intel_dp->aux, DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV))
|
||||
return -EIO;
|
||||
|
||||
if (drm_dp_read_dpcd_caps(&intel_dp->aux, dpcd))
|
||||
return -EIO;
|
||||
|
||||
lttpr_count = intel_dp_init_lttpr(intel_dp, dpcd);
|
||||
}
|
||||
|
||||
/*
|
||||
* The DPTX shall read the DPRX caps after LTTPR detection, so re-read
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include "intel_ddi.h"
|
||||
#include "intel_ddi_buf_trans.h"
|
||||
#include "intel_de.h"
|
||||
#include "intel_display_power_well.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dp.h"
|
||||
#include "intel_dpio_phy.h"
|
||||
|
@ -18,7 +18,10 @@
|
||||
#include "vlv_sideband.h"
|
||||
|
||||
struct intel_dpll_funcs {
|
||||
int (*crtc_compute_clock)(struct intel_crtc_state *crtc_state);
|
||||
int (*crtc_compute_clock)(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc);
|
||||
int (*crtc_get_shared_dpll)(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc);
|
||||
};
|
||||
|
||||
struct intel_limit {
|
||||
@ -759,8 +762,8 @@ chv_find_best_dpll(const struct intel_limit *limit,
|
||||
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
|
||||
struct dpll *best_clock)
|
||||
{
|
||||
int refclk = 100000;
|
||||
const struct intel_limit *limit = &intel_limits_bxt;
|
||||
int refclk = 100000;
|
||||
|
||||
return chv_find_best_dpll(limit, crtc_state,
|
||||
crtc_state->port_clock, refclk,
|
||||
@ -927,32 +930,48 @@ static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
|
||||
crtc_state->dpll_hw_state.dpll = dpll;
|
||||
}
|
||||
|
||||
static int hsw_crtc_compute_clock(struct intel_crtc_state *crtc_state)
|
||||
static int hsw_crtc_compute_clock(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct intel_atomic_state *state =
|
||||
to_intel_atomic_state(crtc_state->uapi.state);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hsw_crtc_get_shared_dpll(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
struct intel_crtc_state *crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
struct intel_encoder *encoder =
|
||||
intel_get_crtc_new_encoder(state, crtc_state);
|
||||
|
||||
if (IS_DG2(dev_priv))
|
||||
return intel_mpllb_calc_state(crtc_state, encoder);
|
||||
int ret;
|
||||
|
||||
if (DISPLAY_VER(dev_priv) < 11 &&
|
||||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
|
||||
return 0;
|
||||
|
||||
if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
|
||||
ret = intel_reserve_shared_dplls(state, crtc, encoder);
|
||||
if (ret) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"failed to find PLL for pipe %c\n",
|
||||
pipe_name(crtc->pipe));
|
||||
return -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dg2_crtc_compute_clock(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct intel_crtc_state *crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
struct intel_encoder *encoder =
|
||||
intel_get_crtc_new_encoder(state, crtc_state);
|
||||
|
||||
return intel_mpllb_calc_state(crtc_state, encoder);
|
||||
}
|
||||
|
||||
static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor)
|
||||
{
|
||||
return dpll->m < factor * dpll->n;
|
||||
@ -1068,18 +1087,15 @@ static void ilk_compute_dpll(struct intel_crtc_state *crtc_state,
|
||||
crtc_state->dpll_hw_state.dpll = dpll;
|
||||
}
|
||||
|
||||
static int ilk_crtc_compute_clock(struct intel_crtc_state *crtc_state)
|
||||
static int ilk_crtc_compute_clock(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct intel_atomic_state *state =
|
||||
to_intel_atomic_state(crtc_state->uapi.state);
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
struct intel_crtc_state *crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
const struct intel_limit *limit;
|
||||
int refclk = 120000;
|
||||
|
||||
memset(&crtc_state->dpll_hw_state, 0,
|
||||
sizeof(crtc_state->dpll_hw_state));
|
||||
|
||||
/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
|
||||
if (!crtc_state->has_pch_encoder)
|
||||
return 0;
|
||||
@ -1118,11 +1134,27 @@ static int ilk_crtc_compute_clock(struct intel_crtc_state *crtc_state)
|
||||
ilk_compute_dpll(crtc_state, &crtc_state->dpll,
|
||||
&crtc_state->dpll);
|
||||
|
||||
if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ilk_crtc_get_shared_dpll(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
struct intel_crtc_state *crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
int ret;
|
||||
|
||||
/* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
|
||||
if (!crtc_state->has_pch_encoder)
|
||||
return 0;
|
||||
|
||||
ret = intel_reserve_shared_dplls(state, crtc, NULL);
|
||||
if (ret) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"failed to find PLL for pipe %c\n",
|
||||
pipe_name(crtc->pipe));
|
||||
return -EINVAL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1163,14 +1195,14 @@ void chv_compute_dpll(struct intel_crtc_state *crtc_state)
|
||||
(crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
|
||||
}
|
||||
|
||||
static int chv_crtc_compute_clock(struct intel_crtc_state *crtc_state)
|
||||
static int chv_crtc_compute_clock(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
int refclk = 100000;
|
||||
struct drm_i915_private *i915 = to_i915(state->base.dev);
|
||||
struct intel_crtc_state *crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
const struct intel_limit *limit = &intel_limits_chv;
|
||||
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
|
||||
|
||||
memset(&crtc_state->dpll_hw_state, 0,
|
||||
sizeof(crtc_state->dpll_hw_state));
|
||||
int refclk = 100000;
|
||||
|
||||
if (!crtc_state->clock_set &&
|
||||
!chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
|
||||
@ -1184,14 +1216,14 @@ static int chv_crtc_compute_clock(struct intel_crtc_state *crtc_state)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vlv_crtc_compute_clock(struct intel_crtc_state *crtc_state)
|
||||
static int vlv_crtc_compute_clock(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
int refclk = 100000;
|
||||
struct drm_i915_private *i915 = to_i915(state->base.dev);
|
||||
struct intel_crtc_state *crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
const struct intel_limit *limit = &intel_limits_vlv;
|
||||
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
|
||||
|
||||
memset(&crtc_state->dpll_hw_state, 0,
|
||||
sizeof(crtc_state->dpll_hw_state));
|
||||
int refclk = 100000;
|
||||
|
||||
if (!crtc_state->clock_set &&
|
||||
!vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
|
||||
@ -1205,16 +1237,15 @@ static int vlv_crtc_compute_clock(struct intel_crtc_state *crtc_state)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int g4x_crtc_compute_clock(struct intel_crtc_state *crtc_state)
|
||||
static int g4x_crtc_compute_clock(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
struct intel_crtc_state *crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
const struct intel_limit *limit;
|
||||
int refclk = 96000;
|
||||
|
||||
memset(&crtc_state->dpll_hw_state, 0,
|
||||
sizeof(crtc_state->dpll_hw_state));
|
||||
|
||||
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
|
||||
if (intel_panel_use_ssc(dev_priv)) {
|
||||
refclk = dev_priv->vbt.lvds_ssc_freq;
|
||||
@ -1251,16 +1282,15 @@ static int g4x_crtc_compute_clock(struct intel_crtc_state *crtc_state)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pnv_crtc_compute_clock(struct intel_crtc_state *crtc_state)
|
||||
static int pnv_crtc_compute_clock(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
struct intel_crtc_state *crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
const struct intel_limit *limit;
|
||||
int refclk = 96000;
|
||||
|
||||
memset(&crtc_state->dpll_hw_state, 0,
|
||||
sizeof(crtc_state->dpll_hw_state));
|
||||
|
||||
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
|
||||
if (intel_panel_use_ssc(dev_priv)) {
|
||||
refclk = dev_priv->vbt.lvds_ssc_freq;
|
||||
@ -1288,16 +1318,15 @@ static int pnv_crtc_compute_clock(struct intel_crtc_state *crtc_state)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i9xx_crtc_compute_clock(struct intel_crtc_state *crtc_state)
|
||||
static int i9xx_crtc_compute_clock(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
struct intel_crtc_state *crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
const struct intel_limit *limit;
|
||||
int refclk = 96000;
|
||||
|
||||
memset(&crtc_state->dpll_hw_state, 0,
|
||||
sizeof(crtc_state->dpll_hw_state));
|
||||
|
||||
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
|
||||
if (intel_panel_use_ssc(dev_priv)) {
|
||||
refclk = dev_priv->vbt.lvds_ssc_freq;
|
||||
@ -1325,16 +1354,15 @@ static int i9xx_crtc_compute_clock(struct intel_crtc_state *crtc_state)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i8xx_crtc_compute_clock(struct intel_crtc_state *crtc_state)
|
||||
static int i8xx_crtc_compute_clock(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
struct intel_crtc_state *crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
const struct intel_limit *limit;
|
||||
int refclk = 48000;
|
||||
|
||||
memset(&crtc_state->dpll_hw_state, 0,
|
||||
sizeof(crtc_state->dpll_hw_state));
|
||||
|
||||
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
|
||||
if (intel_panel_use_ssc(dev_priv)) {
|
||||
refclk = dev_priv->vbt.lvds_ssc_freq;
|
||||
@ -1364,12 +1392,18 @@ static int i8xx_crtc_compute_clock(struct intel_crtc_state *crtc_state)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct intel_dpll_funcs dg2_dpll_funcs = {
|
||||
.crtc_compute_clock = dg2_crtc_compute_clock,
|
||||
};
|
||||
|
||||
static const struct intel_dpll_funcs hsw_dpll_funcs = {
|
||||
.crtc_compute_clock = hsw_crtc_compute_clock,
|
||||
.crtc_get_shared_dpll = hsw_crtc_get_shared_dpll,
|
||||
};
|
||||
|
||||
static const struct intel_dpll_funcs ilk_dpll_funcs = {
|
||||
.crtc_compute_clock = ilk_crtc_compute_clock,
|
||||
.crtc_get_shared_dpll = ilk_crtc_get_shared_dpll,
|
||||
};
|
||||
|
||||
static const struct intel_dpll_funcs chv_dpll_funcs = {
|
||||
@ -1396,18 +1430,54 @@ static const struct intel_dpll_funcs i8xx_dpll_funcs = {
|
||||
.crtc_compute_clock = i8xx_crtc_compute_clock,
|
||||
};
|
||||
|
||||
int intel_dpll_crtc_compute_clock(struct intel_crtc_state *crtc_state)
|
||||
int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
|
||||
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
|
||||
struct drm_i915_private *i915 = to_i915(state->base.dev);
|
||||
struct intel_crtc_state *crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
|
||||
return i915->dpll_funcs->crtc_compute_clock(crtc_state);
|
||||
drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
|
||||
|
||||
if (drm_WARN_ON(&i915->drm, crtc_state->shared_dpll))
|
||||
return 0;
|
||||
|
||||
memset(&crtc_state->dpll_hw_state, 0,
|
||||
sizeof(crtc_state->dpll_hw_state));
|
||||
|
||||
if (!crtc_state->hw.enable)
|
||||
return 0;
|
||||
|
||||
return i915->dpll_funcs->crtc_compute_clock(state, crtc);
|
||||
}
|
||||
|
||||
int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(state->base.dev);
|
||||
struct intel_crtc_state *crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
|
||||
drm_WARN_ON(&i915->drm, !intel_crtc_needs_modeset(crtc_state));
|
||||
|
||||
if (drm_WARN_ON(&i915->drm, crtc_state->shared_dpll))
|
||||
return 0;
|
||||
|
||||
if (!crtc_state->hw.enable)
|
||||
return 0;
|
||||
|
||||
if (!i915->dpll_funcs->crtc_get_shared_dpll)
|
||||
return 0;
|
||||
|
||||
return i915->dpll_funcs->crtc_get_shared_dpll(state, crtc);
|
||||
}
|
||||
|
||||
void
|
||||
intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv))
|
||||
if (IS_DG2(dev_priv))
|
||||
dev_priv->dpll_funcs = &dg2_dpll_funcs;
|
||||
else if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv))
|
||||
dev_priv->dpll_funcs = &hsw_dpll_funcs;
|
||||
else if (HAS_PCH_SPLIT(dev_priv))
|
||||
dev_priv->dpll_funcs = &ilk_dpll_funcs;
|
||||
|
@ -10,12 +10,16 @@
|
||||
|
||||
struct dpll;
|
||||
struct drm_i915_private;
|
||||
struct intel_atomic_state;
|
||||
struct intel_crtc;
|
||||
struct intel_crtc_state;
|
||||
enum pipe;
|
||||
|
||||
void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv);
|
||||
int intel_dpll_crtc_compute_clock(struct intel_crtc_state *crtc_state);
|
||||
int intel_dpll_crtc_compute_clock(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc);
|
||||
int intel_dpll_crtc_get_shared_dpll(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc);
|
||||
int vlv_calc_dpll_params(int refclk, struct dpll *clock);
|
||||
int pnv_calc_dpll_params(int refclk, struct dpll *clock);
|
||||
int i9xx_calc_dpll_params(int refclk, struct dpll *clock);
|
||||
|
@ -90,9 +90,9 @@ struct intel_shared_dpll_funcs {
|
||||
struct intel_dpll_mgr {
|
||||
const struct dpll_info *dpll_info;
|
||||
|
||||
bool (*get_dplls)(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc,
|
||||
struct intel_encoder *encoder);
|
||||
int (*get_dplls)(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc,
|
||||
struct intel_encoder *encoder);
|
||||
void (*put_dplls)(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc);
|
||||
void (*update_active_dpll)(struct intel_atomic_state *state,
|
||||
@ -514,9 +514,9 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
|
||||
udelay(200);
|
||||
}
|
||||
|
||||
static bool ibx_get_dpll(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc,
|
||||
struct intel_encoder *encoder)
|
||||
static int ibx_get_dpll(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc,
|
||||
struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_crtc_state *crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
@ -541,7 +541,7 @@ static bool ibx_get_dpll(struct intel_atomic_state *state,
|
||||
}
|
||||
|
||||
if (!pll)
|
||||
return false;
|
||||
return -EINVAL;
|
||||
|
||||
/* reference the pll */
|
||||
intel_reference_shared_dpll(state, crtc,
|
||||
@ -549,7 +549,7 @@ static bool ibx_get_dpll(struct intel_atomic_state *state,
|
||||
|
||||
crtc_state->shared_dpll = pll;
|
||||
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
|
||||
@ -584,7 +584,7 @@ static const struct intel_dpll_mgr pch_pll_mgr = {
|
||||
};
|
||||
|
||||
static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
|
||||
struct intel_shared_dpll *pll)
|
||||
struct intel_shared_dpll *pll)
|
||||
{
|
||||
const enum intel_dpll_id id = pll->info->id;
|
||||
|
||||
@ -1060,16 +1060,13 @@ static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
|
||||
return link_clock * 2;
|
||||
}
|
||||
|
||||
static bool hsw_get_dpll(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc,
|
||||
struct intel_encoder *encoder)
|
||||
static int hsw_get_dpll(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc,
|
||||
struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_crtc_state *crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
struct intel_shared_dpll *pll;
|
||||
|
||||
memset(&crtc_state->dpll_hw_state, 0,
|
||||
sizeof(crtc_state->dpll_hw_state));
|
||||
struct intel_shared_dpll *pll = NULL;
|
||||
|
||||
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
|
||||
pll = hsw_ddi_wrpll_get_dpll(state, crtc);
|
||||
@ -1077,18 +1074,16 @@ static bool hsw_get_dpll(struct intel_atomic_state *state,
|
||||
pll = hsw_ddi_lcpll_get_dpll(crtc_state);
|
||||
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
|
||||
pll = hsw_ddi_spll_get_dpll(state, crtc);
|
||||
else
|
||||
return false;
|
||||
|
||||
if (!pll)
|
||||
return false;
|
||||
return -EINVAL;
|
||||
|
||||
intel_reference_shared_dpll(state, crtc,
|
||||
pll, &crtc_state->dpll_hw_state);
|
||||
|
||||
crtc_state->shared_dpll = pll;
|
||||
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
|
||||
@ -1493,7 +1488,7 @@ static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
|
||||
params->dco_integer * MHz(1)) * 0x8000, MHz(1));
|
||||
}
|
||||
|
||||
static bool
|
||||
static int
|
||||
skl_ddi_calculate_wrpll(int clock /* in Hz */,
|
||||
int ref_clock,
|
||||
struct skl_wrpll_params *wrpll_params)
|
||||
@ -1552,7 +1547,7 @@ skip_remaining_dividers:
|
||||
|
||||
if (!ctx.p) {
|
||||
DRM_DEBUG_DRIVER("No valid divider found for %dHz\n", clock);
|
||||
return false;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1564,14 +1559,15 @@ skip_remaining_dividers:
|
||||
skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
|
||||
ctx.central_freq, p0, p1, p2);
|
||||
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
|
||||
static int skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
|
||||
struct skl_wrpll_params wrpll_params = {};
|
||||
u32 ctrl1, cfgcr1, cfgcr2;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* See comment in intel_dpll_hw_state to understand why we always use 0
|
||||
@ -1581,10 +1577,10 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
|
||||
|
||||
ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
|
||||
|
||||
if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
|
||||
i915->dpll.ref_clks.nssc,
|
||||
&wrpll_params))
|
||||
return false;
|
||||
ret = skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
|
||||
i915->dpll.ref_clks.nssc, &wrpll_params);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
|
||||
DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
|
||||
@ -1596,13 +1592,11 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
|
||||
DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
|
||||
wrpll_params.central_freq;
|
||||
|
||||
memset(&crtc_state->dpll_hw_state, 0,
|
||||
sizeof(crtc_state->dpll_hw_state));
|
||||
|
||||
crtc_state->dpll_hw_state.ctrl1 = ctrl1;
|
||||
crtc_state->dpll_hw_state.cfgcr1 = cfgcr1;
|
||||
crtc_state->dpll_hw_state.cfgcr2 = cfgcr2;
|
||||
return true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
|
||||
@ -1676,7 +1670,7 @@ static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
|
||||
return dco_freq / (p0 * p1 * p2 * 5);
|
||||
}
|
||||
|
||||
static bool
|
||||
static int
|
||||
skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
u32 ctrl1;
|
||||
@ -1708,12 +1702,9 @@ skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
|
||||
break;
|
||||
}
|
||||
|
||||
memset(&crtc_state->dpll_hw_state, 0,
|
||||
sizeof(crtc_state->dpll_hw_state));
|
||||
|
||||
crtc_state->dpll_hw_state.ctrl1 = ctrl1;
|
||||
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
|
||||
@ -1750,33 +1741,23 @@ static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
|
||||
return link_clock * 2;
|
||||
}
|
||||
|
||||
static bool skl_get_dpll(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc,
|
||||
struct intel_encoder *encoder)
|
||||
static int skl_get_dpll(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc,
|
||||
struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_crtc_state *crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
|
||||
struct intel_shared_dpll *pll;
|
||||
bool bret;
|
||||
int ret;
|
||||
|
||||
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
|
||||
bret = skl_ddi_hdmi_pll_dividers(crtc_state);
|
||||
if (!bret) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Could not get HDMI pll dividers.\n");
|
||||
return false;
|
||||
}
|
||||
} else if (intel_crtc_has_dp_encoder(crtc_state)) {
|
||||
bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
|
||||
if (!bret) {
|
||||
drm_dbg_kms(&i915->drm,
|
||||
"Could not set DP dpll HW state.\n");
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
|
||||
ret = skl_ddi_hdmi_pll_dividers(crtc_state);
|
||||
else if (intel_crtc_has_dp_encoder(crtc_state))
|
||||
ret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
|
||||
else
|
||||
ret = -EINVAL;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
|
||||
pll = intel_find_shared_dpll(state, crtc,
|
||||
@ -1789,14 +1770,14 @@ static bool skl_get_dpll(struct intel_atomic_state *state,
|
||||
BIT(DPLL_ID_SKL_DPLL2) |
|
||||
BIT(DPLL_ID_SKL_DPLL1));
|
||||
if (!pll)
|
||||
return false;
|
||||
return -EINVAL;
|
||||
|
||||
intel_reference_shared_dpll(state, crtc,
|
||||
pll, &crtc_state->dpll_hw_state);
|
||||
|
||||
crtc_state->shared_dpll = pll;
|
||||
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
|
||||
@ -2095,7 +2076,7 @@ static const struct dpll bxt_dp_clk_val[] = {
|
||||
{ .dot = 432000, .p1 = 3, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x819999a /* 32.4 */ },
|
||||
};
|
||||
|
||||
static bool
|
||||
static int
|
||||
bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
|
||||
struct dpll *clk_div)
|
||||
{
|
||||
@ -2111,12 +2092,12 @@ bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
|
||||
drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n",
|
||||
crtc_state->port_clock,
|
||||
pipe_name(crtc->pipe));
|
||||
return false;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
drm_WARN_ON(&i915->drm, clk_div->m1 != 2);
|
||||
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
|
||||
@ -2139,8 +2120,8 @@ static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
|
||||
clk_div->dot != crtc_state->port_clock);
|
||||
}
|
||||
|
||||
static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
|
||||
const struct dpll *clk_div)
|
||||
static int bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
|
||||
const struct dpll *clk_div)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
|
||||
struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
|
||||
@ -2149,8 +2130,6 @@ static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
|
||||
u32 prop_coef, int_coef, gain_ctl, targ_cnt;
|
||||
u32 lanestagger;
|
||||
|
||||
memset(dpll_hw_state, 0, sizeof(*dpll_hw_state));
|
||||
|
||||
if (vco >= 6200000 && vco <= 6700000) {
|
||||
prop_coef = 4;
|
||||
int_coef = 9;
|
||||
@ -2169,7 +2148,7 @@ static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
|
||||
targ_cnt = 9;
|
||||
} else {
|
||||
drm_err(&i915->drm, "Invalid VCO\n");
|
||||
return false;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (clock > 270000)
|
||||
@ -2206,10 +2185,10 @@ static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
|
||||
|
||||
dpll_hw_state->pcsdw12 = LANESTAGGER_STRAP_OVRD | lanestagger;
|
||||
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool
|
||||
static int
|
||||
bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct dpll clk_div = {};
|
||||
@ -2219,7 +2198,7 @@ bxt_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
|
||||
return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
|
||||
}
|
||||
|
||||
static bool
|
||||
static int
|
||||
bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
|
||||
{
|
||||
struct dpll clk_div = {};
|
||||
@ -2246,23 +2225,25 @@ static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
|
||||
return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
|
||||
}
|
||||
|
||||
static bool bxt_get_dpll(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc,
|
||||
struct intel_encoder *encoder)
|
||||
static int bxt_get_dpll(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc,
|
||||
struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_crtc_state *crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
|
||||
struct intel_shared_dpll *pll;
|
||||
enum intel_dpll_id id;
|
||||
int ret;
|
||||
|
||||
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
|
||||
!bxt_ddi_hdmi_set_dpll_hw_state(crtc_state))
|
||||
return false;
|
||||
|
||||
if (intel_crtc_has_dp_encoder(crtc_state) &&
|
||||
!bxt_ddi_dp_set_dpll_hw_state(crtc_state))
|
||||
return false;
|
||||
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
|
||||
ret = bxt_ddi_hdmi_set_dpll_hw_state(crtc_state);
|
||||
else if (intel_crtc_has_dp_encoder(crtc_state))
|
||||
ret = bxt_ddi_dp_set_dpll_hw_state(crtc_state);
|
||||
else
|
||||
ret = -EINVAL;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* 1:1 mapping between ports and PLLs */
|
||||
id = (enum intel_dpll_id) encoder->port;
|
||||
@ -2276,7 +2257,7 @@ static bool bxt_get_dpll(struct intel_atomic_state *state,
|
||||
|
||||
crtc_state->shared_dpll = pll;
|
||||
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
|
||||
@ -2513,8 +2494,8 @@ static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = {
|
||||
/* the following params are unused */
|
||||
};
|
||||
|
||||
static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
|
||||
struct skl_wrpll_params *pll_params)
|
||||
static int icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
|
||||
struct skl_wrpll_params *pll_params)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
|
||||
const struct icl_combo_pll_params *params =
|
||||
@ -2527,16 +2508,16 @@ static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
|
||||
for (i = 0; i < ARRAY_SIZE(icl_dp_combo_pll_24MHz_values); i++) {
|
||||
if (clock == params[i].clock) {
|
||||
*pll_params = params[i].wrpll;
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
MISSING_CASE(clock);
|
||||
return false;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
|
||||
struct skl_wrpll_params *pll_params)
|
||||
static int icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
|
||||
struct skl_wrpll_params *pll_params)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
|
||||
|
||||
@ -2568,7 +2549,7 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
|
||||
@ -2598,7 +2579,7 @@ static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
|
||||
return ref_clock;
|
||||
}
|
||||
|
||||
static bool
|
||||
static int
|
||||
icl_calc_wrpll(struct intel_crtc_state *crtc_state,
|
||||
struct skl_wrpll_params *wrpll_params)
|
||||
{
|
||||
@ -2633,13 +2614,13 @@ icl_calc_wrpll(struct intel_crtc_state *crtc_state,
|
||||
}
|
||||
|
||||
if (best_div == 0)
|
||||
return false;
|
||||
return -EINVAL;
|
||||
|
||||
icl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
|
||||
icl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
|
||||
pdiv, qdiv, kdiv);
|
||||
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
|
||||
@ -2709,8 +2690,6 @@ static void icl_calc_dpll_state(struct drm_i915_private *i915,
|
||||
{
|
||||
u32 dco_fraction = pll_params->dco_fraction;
|
||||
|
||||
memset(pll_state, 0, sizeof(*pll_state));
|
||||
|
||||
if (ehl_combo_pll_div_frac_wa_needed(i915))
|
||||
dco_fraction = DIV_ROUND_CLOSEST(dco_fraction, 2);
|
||||
|
||||
@ -2731,10 +2710,10 @@ static void icl_calc_dpll_state(struct drm_i915_private *i915,
|
||||
pll_state->div0 = TGL_DPLL0_DIV0_AFC_STARTUP(i915->vbt.override_afc_startup_val);
|
||||
}
|
||||
|
||||
static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
|
||||
u32 *target_dco_khz,
|
||||
struct intel_dpll_hw_state *state,
|
||||
bool is_dkl)
|
||||
static int icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
|
||||
u32 *target_dco_khz,
|
||||
struct intel_dpll_hw_state *state,
|
||||
bool is_dkl)
|
||||
{
|
||||
static const u8 div1_vals[] = { 7, 5, 3, 2 };
|
||||
u32 dco_min_freq, dco_max_freq;
|
||||
@ -2800,19 +2779,19 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc,
|
||||
hsdiv |
|
||||
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(div2);
|
||||
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* The specification for this function uses real numbers, so the math had to be
|
||||
* adapted to integer-only calculation, that's why it looks so different.
|
||||
*/
|
||||
static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
|
||||
struct intel_dpll_hw_state *pll_state)
|
||||
static int icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
|
||||
struct intel_dpll_hw_state *pll_state)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
|
||||
int refclk_khz = dev_priv->dpll.ref_clks.nssc;
|
||||
@ -2826,14 +2805,14 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
|
||||
bool use_ssc = false;
|
||||
bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI);
|
||||
bool is_dkl = DISPLAY_VER(dev_priv) >= 12;
|
||||
int ret;
|
||||
|
||||
memset(pll_state, 0, sizeof(*pll_state));
|
||||
|
||||
if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
|
||||
pll_state, is_dkl)) {
|
||||
ret = icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
|
||||
pll_state, is_dkl);
|
||||
if (ret) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Failed to find divisors for clock %d\n", clock);
|
||||
return false;
|
||||
return ret;
|
||||
}
|
||||
|
||||
m1div = 2;
|
||||
@ -2848,7 +2827,7 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Failed to find mdiv for clock %d\n",
|
||||
clock);
|
||||
return false;
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
m2div_rem = dco_khz % (refclk_khz * m1div);
|
||||
@ -2875,7 +2854,7 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
|
||||
break;
|
||||
default:
|
||||
MISSING_CASE(refclk_khz);
|
||||
return false;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -3018,7 +2997,7 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
|
||||
pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask;
|
||||
}
|
||||
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
|
||||
@ -3140,9 +3119,9 @@ static u32 intel_get_hti_plls(struct drm_i915_private *i915)
|
||||
return REG_FIELD_GET(HDPORT_DPLL_USED_MASK, i915->hti_state);
|
||||
}
|
||||
|
||||
static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc,
|
||||
struct intel_encoder *encoder)
|
||||
static int icl_get_combo_phy_dpll(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc,
|
||||
struct intel_encoder *encoder)
|
||||
{
|
||||
struct intel_crtc_state *crtc_state =
|
||||
intel_atomic_get_new_crtc_state(state, crtc);
|
||||
@ -3160,11 +3139,10 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
|
||||
else
|
||||
ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
|
||||
|
||||
if (!ret) {
|
||||
if (ret) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Could not calculate combo PHY PLL state.\n");
|
||||
|
||||
return false;
|
||||
return ret;
|
||||
}
|
||||
|
||||
icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
|
||||
@ -3209,7 +3187,7 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"No combo PHY PLL found for [ENCODER:%d:%s]\n",
|
||||
encoder->base.base.id, encoder->base.name);
|
||||
return false;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
intel_reference_shared_dpll(state, crtc,
|
||||
@ -3217,12 +3195,12 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
|
||||
|
||||
icl_update_active_dpll(state, crtc, encoder);
|
||||
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc,
|
||||
struct intel_encoder *encoder)
|
||||
static int icl_get_tc_phy_dplls(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc,
|
||||
struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
struct intel_crtc_state *crtc_state =
|
||||
@ -3230,12 +3208,14 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
|
||||
struct skl_wrpll_params pll_params = { };
|
||||
struct icl_port_dpll *port_dpll;
|
||||
enum intel_dpll_id dpll_id;
|
||||
int ret;
|
||||
|
||||
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
|
||||
if (!icl_calc_tbt_pll(crtc_state, &pll_params)) {
|
||||
ret = icl_calc_tbt_pll(crtc_state, &pll_params);
|
||||
if (ret) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Could not calculate TBT PLL state.\n");
|
||||
return false;
|
||||
return ret;
|
||||
}
|
||||
|
||||
icl_calc_dpll_state(dev_priv, &pll_params, &port_dpll->hw_state);
|
||||
@ -3245,14 +3225,15 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
|
||||
BIT(DPLL_ID_ICL_TBTPLL));
|
||||
if (!port_dpll->pll) {
|
||||
drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n");
|
||||
return false;
|
||||
return -EINVAL;
|
||||
}
|
||||
intel_reference_shared_dpll(state, crtc,
|
||||
port_dpll->pll, &port_dpll->hw_state);
|
||||
|
||||
|
||||
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
|
||||
if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
|
||||
ret = icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state);
|
||||
if (ret) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"Could not calculate MG PHY PLL state.\n");
|
||||
goto err_unreference_tbt_pll;
|
||||
@ -3264,6 +3245,7 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
|
||||
&port_dpll->hw_state,
|
||||
BIT(dpll_id));
|
||||
if (!port_dpll->pll) {
|
||||
ret = -EINVAL;
|
||||
drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n");
|
||||
goto err_unreference_tbt_pll;
|
||||
}
|
||||
@ -3272,18 +3254,18 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
|
||||
|
||||
icl_update_active_dpll(state, crtc, encoder);
|
||||
|
||||
return true;
|
||||
return 0;
|
||||
|
||||
err_unreference_tbt_pll:
|
||||
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
|
||||
intel_unreference_shared_dpll(state, crtc, port_dpll->pll);
|
||||
|
||||
return false;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool icl_get_dplls(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc,
|
||||
struct intel_encoder *encoder)
|
||||
static int icl_get_dplls(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc,
|
||||
struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
|
||||
@ -3295,7 +3277,7 @@ static bool icl_get_dplls(struct intel_atomic_state *state,
|
||||
|
||||
MISSING_CASE(phy);
|
||||
|
||||
return false;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void icl_put_dplls(struct intel_atomic_state *state,
|
||||
@ -4081,13 +4063,12 @@ static const struct intel_dpll_mgr adlp_pll_mgr = {
|
||||
|
||||
/**
|
||||
* intel_shared_dpll_init - Initialize shared DPLLs
|
||||
* @dev: drm device
|
||||
* @dev_priv: i915 device
|
||||
*
|
||||
* Initialize shared DPLLs for @dev.
|
||||
* Initialize shared DPLLs for @dev_priv.
|
||||
*/
|
||||
void intel_shared_dpll_init(struct drm_device *dev)
|
||||
void intel_shared_dpll_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
const struct intel_dpll_mgr *dpll_mgr = NULL;
|
||||
const struct dpll_info *dpll_info;
|
||||
int i;
|
||||
@ -4126,7 +4107,7 @@ void intel_shared_dpll_init(struct drm_device *dev)
|
||||
dpll_info = dpll_mgr->dpll_info;
|
||||
|
||||
for (i = 0; dpll_info[i].name; i++) {
|
||||
drm_WARN_ON(dev, i != dpll_info[i].id);
|
||||
drm_WARN_ON(&dev_priv->drm, i != dpll_info[i].id);
|
||||
dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
|
||||
}
|
||||
|
||||
@ -4154,17 +4135,18 @@ void intel_shared_dpll_init(struct drm_device *dev)
|
||||
* intel_release_shared_dplls().
|
||||
*
|
||||
* Returns:
|
||||
* True if all required DPLLs were successfully reserved.
|
||||
* 0 if all required DPLLs were successfully reserved,
|
||||
* negative error code otherwise.
|
||||
*/
|
||||
bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc,
|
||||
struct intel_encoder *encoder)
|
||||
int intel_reserve_shared_dplls(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc,
|
||||
struct intel_encoder *encoder)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
|
||||
|
||||
if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
|
||||
return false;
|
||||
return -EINVAL;
|
||||
|
||||
return dpll_mgr->get_dplls(state, crtc, encoder);
|
||||
}
|
||||
|
@ -37,7 +37,6 @@
|
||||
__a > __b ? (__a - __b) : (__b - __a); })
|
||||
|
||||
enum tc_port;
|
||||
struct drm_device;
|
||||
struct drm_i915_private;
|
||||
struct intel_atomic_state;
|
||||
struct intel_crtc;
|
||||
@ -337,9 +336,9 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
|
||||
bool state);
|
||||
#define assert_shared_dpll_enabled(d, p) assert_shared_dpll(d, p, true)
|
||||
#define assert_shared_dpll_disabled(d, p) assert_shared_dpll(d, p, false)
|
||||
bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc,
|
||||
struct intel_encoder *encoder);
|
||||
int intel_reserve_shared_dplls(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc,
|
||||
struct intel_encoder *encoder);
|
||||
void intel_release_shared_dplls(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc);
|
||||
void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
|
||||
@ -356,7 +355,7 @@ bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
|
||||
void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
|
||||
void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
|
||||
void intel_shared_dpll_swap_state(struct intel_atomic_state *state);
|
||||
void intel_shared_dpll_init(struct drm_device *dev);
|
||||
void intel_shared_dpll_init(struct drm_i915_private *dev_priv);
|
||||
void intel_dpll_update_ref_clks(struct drm_i915_private *dev_priv);
|
||||
void intel_dpll_readout_hw_state(struct drm_i915_private *dev_priv);
|
||||
void intel_dpll_sanitize_state(struct drm_i915_private *dev_priv);
|
||||
|
@ -811,6 +811,14 @@ static void intel_fbc_program_cfb(struct intel_fbc *fbc)
|
||||
fbc->funcs->program_cfb(fbc);
|
||||
}
|
||||
|
||||
static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
|
||||
{
|
||||
/* Wa_22014263786:icl,jsl,tgl,dg1,rkl,adls,dg2,adlp */
|
||||
if (DISPLAY_VER(fbc->i915) >= 11)
|
||||
intel_de_rmw(fbc->i915, ILK_DPFC_CHICKEN(fbc->id), 0,
|
||||
DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
|
||||
}
|
||||
|
||||
static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc)
|
||||
{
|
||||
struct drm_i915_private *i915 = fbc->i915;
|
||||
@ -1045,7 +1053,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
|
||||
struct intel_plane_state *plane_state =
|
||||
intel_atomic_get_new_plane_state(state, plane);
|
||||
const struct drm_framebuffer *fb = plane_state->hw.fb;
|
||||
struct intel_crtc *crtc = to_intel_crtc(plane_state->uapi.crtc);
|
||||
struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
|
||||
const struct intel_crtc_state *crtc_state;
|
||||
struct intel_fbc *fbc = plane->fbc;
|
||||
|
||||
@ -1086,7 +1094,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
|
||||
*/
|
||||
if (DISPLAY_VER(i915) >= 12 && crtc_state->has_psr2) {
|
||||
plane_state->no_fbc_reason = "PSR2 enabled";
|
||||
return false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!pixel_format_is_valid(plane_state)) {
|
||||
@ -1112,7 +1120,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
|
||||
if (plane_state->hw.pixel_blend_mode != DRM_MODE_BLEND_PIXEL_NONE &&
|
||||
fb->format->has_alpha) {
|
||||
plane_state->no_fbc_reason = "per-pixel alpha not supported";
|
||||
return false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!intel_fbc_hw_tracking_covers_screen(plane_state)) {
|
||||
@ -1128,7 +1136,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
|
||||
if (DISPLAY_VER(i915) >= 9 &&
|
||||
plane_state->view.color_plane[0].y & 3) {
|
||||
plane_state->no_fbc_reason = "plane start Y offset misaligned";
|
||||
return false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Wa_22010751166: icl, ehl, tgl, dg1, rkl */
|
||||
@ -1136,7 +1144,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
|
||||
(plane_state->view.color_plane[0].y +
|
||||
(drm_rect_height(&plane_state->uapi.src) >> 16)) & 3) {
|
||||
plane_state->no_fbc_reason = "plane end Y offset misaligned";
|
||||
return false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* WaFbcExceedCdClockThreshold:hsw,bdw */
|
||||
@ -1462,6 +1470,7 @@ static void __intel_fbc_enable(struct intel_atomic_state *state,
|
||||
|
||||
intel_fbc_update_state(state, crtc, plane);
|
||||
|
||||
intel_fbc_program_workarounds(fbc);
|
||||
intel_fbc_program_cfb(fbc);
|
||||
}
|
||||
|
||||
|
@ -298,7 +298,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
|
||||
* Mailbox interface.
|
||||
*/
|
||||
if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) {
|
||||
ret = snb_pcode_write(dev_priv, SKL_PCODE_LOAD_HDCP_KEYS, 1);
|
||||
ret = snb_pcode_write(&dev_priv->uncore, SKL_PCODE_LOAD_HDCP_KEYS, 1);
|
||||
if (ret) {
|
||||
drm_err(&dev_priv->drm,
|
||||
"Failed to initiate HDCP key load (%d)\n",
|
||||
|
@ -75,13 +75,17 @@ const struct drm_display_mode *
|
||||
intel_panel_downclock_mode(struct intel_connector *connector,
|
||||
const struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(connector->base.dev);
|
||||
const struct drm_display_mode *fixed_mode, *best_mode = NULL;
|
||||
int vrefresh = drm_mode_vrefresh(adjusted_mode);
|
||||
int min_vrefresh = i915->vbt.seamless_drrs_min_refresh_rate;
|
||||
int max_vrefresh = drm_mode_vrefresh(adjusted_mode);
|
||||
|
||||
/* pick the fixed_mode with the lowest refresh rate */
|
||||
list_for_each_entry(fixed_mode, &connector->panel.fixed_modes, head) {
|
||||
if (drm_mode_vrefresh(fixed_mode) < vrefresh) {
|
||||
vrefresh = drm_mode_vrefresh(fixed_mode);
|
||||
int vrefresh = drm_mode_vrefresh(fixed_mode);
|
||||
|
||||
if (vrefresh >= min_vrefresh && vrefresh < max_vrefresh) {
|
||||
max_vrefresh = vrefresh;
|
||||
best_mode = fixed_mode;
|
||||
}
|
||||
}
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include "g4x_dp.h"
|
||||
#include "i915_drv.h"
|
||||
#include "intel_de.h"
|
||||
#include "intel_display_power_well.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dp.h"
|
||||
#include "intel_dpll.h"
|
||||
|
@ -891,6 +891,20 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Wa_16011303918:adl-p */
|
||||
if (crtc_state->vrr.enable &&
|
||||
IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"PSR2 not enabled, not compatible with HW stepping + VRR\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (HAS_PSR2_SEL_FETCH(dev_priv)) {
|
||||
if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
|
||||
!HAS_PSR_HW_TRACKING(dev_priv)) {
|
||||
@ -904,12 +918,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
|
||||
if (!crtc_state->enable_psr2_sel_fetch &&
|
||||
IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
|
||||
drm_dbg_kms(&dev_priv->drm, "PSR2 HW tracking is not supported this Display stepping\n");
|
||||
return false;
|
||||
goto unsupported;
|
||||
}
|
||||
|
||||
if (!psr2_granularity_check(intel_dp, crtc_state)) {
|
||||
drm_dbg_kms(&dev_priv->drm, "PSR2 not enabled, SU granularity not compatible\n");
|
||||
return false;
|
||||
goto unsupported;
|
||||
}
|
||||
|
||||
if (!crtc_state->enable_psr2_sel_fetch &&
|
||||
@ -918,25 +932,15 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
|
||||
"PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
|
||||
crtc_hdisplay, crtc_vdisplay,
|
||||
psr_max_h, psr_max_v);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"PSR2 not enabled, PSR2 SDP indication do not fit in hblank\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Wa_16011303918:adl-p */
|
||||
if (crtc_state->vrr.enable &&
|
||||
IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
|
||||
drm_dbg_kms(&dev_priv->drm,
|
||||
"PSR2 not enabled, not compatible with HW stepping + VRR\n");
|
||||
return false;
|
||||
goto unsupported;
|
||||
}
|
||||
|
||||
tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
|
||||
return true;
|
||||
|
||||
unsupported:
|
||||
crtc_state->enable_psr2_sel_fetch = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
void intel_psr_compute_config(struct intel_dp *intel_dp,
|
||||
@ -1349,6 +1353,9 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
|
||||
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, 0);
|
||||
|
||||
intel_dp->psr.enabled = false;
|
||||
intel_dp->psr.psr2_enabled = false;
|
||||
intel_dp->psr.psr2_sel_fetch_enabled = false;
|
||||
intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1611,8 +1618,12 @@ exit:
|
||||
}
|
||||
|
||||
static void clip_area_update(struct drm_rect *overlap_damage_area,
|
||||
struct drm_rect *damage_area)
|
||||
struct drm_rect *damage_area,
|
||||
struct drm_rect *pipe_src)
|
||||
{
|
||||
if (!drm_rect_intersect(damage_area, pipe_src))
|
||||
return;
|
||||
|
||||
if (overlap_damage_area->y1 == -1) {
|
||||
overlap_damage_area->y1 = damage_area->y1;
|
||||
overlap_damage_area->y2 = damage_area->y2;
|
||||
@ -1678,6 +1689,7 @@ static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *c
|
||||
int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
|
||||
struct intel_crtc *crtc)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
|
||||
struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
|
||||
struct drm_rect pipe_clip = { .x1 = 0, .y1 = -1, .x2 = INT_MAX, .y2 = -1 };
|
||||
struct intel_plane_state *new_plane_state, *old_plane_state;
|
||||
@ -1701,7 +1713,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
|
||||
*/
|
||||
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
|
||||
new_plane_state, i) {
|
||||
struct drm_rect src, damaged_area = { .y1 = -1 };
|
||||
struct drm_rect src, damaged_area = { .x1 = 0, .y1 = -1,
|
||||
.x2 = INT_MAX };
|
||||
struct drm_atomic_helper_damage_iter iter;
|
||||
struct drm_rect clip;
|
||||
|
||||
@ -1728,20 +1741,23 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
|
||||
if (old_plane_state->uapi.visible) {
|
||||
damaged_area.y1 = old_plane_state->uapi.dst.y1;
|
||||
damaged_area.y2 = old_plane_state->uapi.dst.y2;
|
||||
clip_area_update(&pipe_clip, &damaged_area);
|
||||
clip_area_update(&pipe_clip, &damaged_area,
|
||||
&crtc_state->pipe_src);
|
||||
}
|
||||
|
||||
if (new_plane_state->uapi.visible) {
|
||||
damaged_area.y1 = new_plane_state->uapi.dst.y1;
|
||||
damaged_area.y2 = new_plane_state->uapi.dst.y2;
|
||||
clip_area_update(&pipe_clip, &damaged_area);
|
||||
clip_area_update(&pipe_clip, &damaged_area,
|
||||
&crtc_state->pipe_src);
|
||||
}
|
||||
continue;
|
||||
} else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
|
||||
/* If alpha changed mark the whole plane area as damaged */
|
||||
damaged_area.y1 = new_plane_state->uapi.dst.y1;
|
||||
damaged_area.y2 = new_plane_state->uapi.dst.y2;
|
||||
clip_area_update(&pipe_clip, &damaged_area);
|
||||
clip_area_update(&pipe_clip, &damaged_area,
|
||||
&crtc_state->pipe_src);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1752,7 +1768,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
|
||||
&new_plane_state->uapi);
|
||||
drm_atomic_for_each_plane_damage(&iter, &clip) {
|
||||
if (drm_rect_intersect(&clip, &src))
|
||||
clip_area_update(&damaged_area, &clip);
|
||||
clip_area_update(&damaged_area, &clip,
|
||||
&crtc_state->pipe_src);
|
||||
}
|
||||
|
||||
if (damaged_area.y1 == -1)
|
||||
@ -1760,7 +1777,20 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
|
||||
|
||||
damaged_area.y1 += new_plane_state->uapi.dst.y1 - src.y1;
|
||||
damaged_area.y2 += new_plane_state->uapi.dst.y1 - src.y1;
|
||||
clip_area_update(&pipe_clip, &damaged_area);
|
||||
clip_area_update(&pipe_clip, &damaged_area, &crtc_state->pipe_src);
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: For now we are just using full update in case
|
||||
* selective fetch area calculation fails. To optimize this we
|
||||
* should identify cases where this happens and fix the area
|
||||
* calculation for those.
|
||||
*/
|
||||
if (pipe_clip.y1 == -1) {
|
||||
drm_info_once(&dev_priv->drm,
|
||||
"Selective fetch area calculation failed in pipe %c\n",
|
||||
pipe_name(crtc->pipe));
|
||||
full_update = true;
|
||||
}
|
||||
|
||||
if (full_update)
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include "i915_drv.h"
|
||||
#include "i915_reg.h"
|
||||
#include "intel_display.h"
|
||||
#include "intel_display_power_map.h"
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dp_mst.h"
|
||||
#include "intel_tc.h"
|
||||
@ -61,10 +62,12 @@ bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
|
||||
static enum intel_display_power_domain
|
||||
tc_cold_get_power_domain(struct intel_digital_port *dig_port, enum tc_port_mode mode)
|
||||
{
|
||||
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
|
||||
|
||||
if (mode == TC_PORT_TBT_ALT || !intel_tc_cold_requires_aux_pw(dig_port))
|
||||
return POWER_DOMAIN_TC_COLD_OFF;
|
||||
|
||||
return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
|
||||
return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch);
|
||||
}
|
||||
|
||||
static intel_wakeref_t
|
||||
|
@ -735,7 +735,7 @@ struct lvds_lfp_data_ptr {
|
||||
} __packed;
|
||||
|
||||
struct bdb_lvds_lfp_data_ptrs {
|
||||
u8 lvds_entries; /* followed by one or more lvds_data_ptr structs */
|
||||
u8 lvds_entries;
|
||||
struct lvds_lfp_data_ptr ptr[16];
|
||||
struct lvds_lfp_data_ptr_table panel_name; /* 156-163? */
|
||||
} __packed;
|
||||
@ -769,6 +769,11 @@ struct lvds_pnp_id {
|
||||
u8 mfg_year;
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* For reference only. fp_timing has variable size so
|
||||
* the data must be accessed using the data table pointers.
|
||||
* Do not use this directly!
|
||||
*/
|
||||
struct lvds_lfp_data_entry {
|
||||
struct lvds_fp_timing fp_timing;
|
||||
struct lvds_dvo_timing dvo_timing;
|
||||
@ -783,6 +788,23 @@ struct lvds_lfp_panel_name {
|
||||
u8 name[13];
|
||||
} __packed;
|
||||
|
||||
struct lvds_lfp_black_border {
|
||||
u8 top; /* 227 */
|
||||
u8 bottom; /* 227 */
|
||||
u8 left; /* 238 */
|
||||
u8 right; /* 238 */
|
||||
} __packed;
|
||||
|
||||
struct bdb_lvds_lfp_data_tail {
|
||||
struct lvds_lfp_panel_name panel_name[16]; /* 156-163? */
|
||||
u16 scaling_enable; /* 187 */
|
||||
u8 seamless_drrs_min_refresh_rate[16]; /* 188 */
|
||||
u8 pixel_overlap_count[16]; /* 208 */
|
||||
struct lvds_lfp_black_border black_border[16]; /* 227 */
|
||||
u16 dual_lfp_port_sync_enable; /* 231 */
|
||||
u16 gpu_dithering_for_banding_artifacts; /* 245 */
|
||||
} __packed;
|
||||
|
||||
/*
|
||||
* Block 43 - LFP Backlight Control Data Block
|
||||
*/
|
||||
|
@ -138,21 +138,21 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
|
||||
* Alternatively, we can trade that extra information on read/write
|
||||
* activity with
|
||||
* args->busy =
|
||||
* !dma_resv_test_signaled(obj->resv, true);
|
||||
* !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ);
|
||||
* to report the overall busyness. This is what the wait-ioctl does.
|
||||
*
|
||||
*/
|
||||
args->busy = 0;
|
||||
dma_resv_iter_begin(&cursor, obj->base.resv, true);
|
||||
dma_resv_iter_begin(&cursor, obj->base.resv, DMA_RESV_USAGE_READ);
|
||||
dma_resv_for_each_fence_unlocked(&cursor, fence) {
|
||||
if (dma_resv_iter_is_restarted(&cursor))
|
||||
args->busy = 0;
|
||||
|
||||
if (dma_resv_iter_is_exclusive(&cursor))
|
||||
/* Translate the exclusive fence to the READ *and* WRITE engine */
|
||||
if (dma_resv_iter_usage(&cursor) <= DMA_RESV_USAGE_WRITE)
|
||||
/* Translate the write fences to the READ *and* WRITE engine */
|
||||
args->busy |= busy_check_writer(fence);
|
||||
else
|
||||
/* Translate shared fences to READ set of engines */
|
||||
/* Translate read fences to READ set of engines */
|
||||
args->busy |= busy_check_reader(fence);
|
||||
}
|
||||
dma_resv_iter_end(&cursor);
|
||||
|
@ -116,7 +116,8 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
|
||||
obj->base.resv, NULL, true,
|
||||
i915_fence_timeout(i915),
|
||||
I915_FENCE_GFP);
|
||||
dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma);
|
||||
dma_resv_add_fence(obj->base.resv, &clflush->base.dma,
|
||||
DMA_RESV_USAGE_KERNEL);
|
||||
dma_fence_work_commit(&clflush->base);
|
||||
/*
|
||||
* We must have successfully populated the pages(since we are
|
||||
|
@ -68,7 +68,7 @@ bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
|
||||
struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, true) &&
|
||||
GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, DMA_RESV_USAGE_BOOKKEEP) &&
|
||||
i915_gem_object_evictable(obj));
|
||||
#endif
|
||||
return mr && (mr->type == INTEL_MEMORY_LOCAL ||
|
||||
|
@ -745,30 +745,19 @@ static const struct drm_gem_object_funcs i915_gem_object_funcs = {
|
||||
/**
|
||||
* i915_gem_object_get_moving_fence - Get the object's moving fence if any
|
||||
* @obj: The object whose moving fence to get.
|
||||
* @fence: The resulting fence
|
||||
*
|
||||
* A non-signaled moving fence means that there is an async operation
|
||||
* pending on the object that needs to be waited on before setting up
|
||||
* any GPU- or CPU PTEs to the object's pages.
|
||||
*
|
||||
* Return: A refcounted pointer to the object's moving fence if any,
|
||||
* NULL otherwise.
|
||||
* Return: Negative error code or 0 for success.
|
||||
*/
|
||||
struct dma_fence *
|
||||
i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj)
|
||||
int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
return dma_fence_get(i915_gem_to_ttm(obj)->moving);
|
||||
}
|
||||
|
||||
void i915_gem_object_set_moving_fence(struct drm_i915_gem_object *obj,
|
||||
struct dma_fence *fence)
|
||||
{
|
||||
struct dma_fence **moving = &i915_gem_to_ttm(obj)->moving;
|
||||
|
||||
if (*moving == fence)
|
||||
return;
|
||||
|
||||
dma_fence_put(*moving);
|
||||
*moving = dma_fence_get(fence);
|
||||
return dma_resv_get_singleton(obj->base.resv, DMA_RESV_USAGE_KERNEL,
|
||||
fence);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -786,23 +775,16 @@ void i915_gem_object_set_moving_fence(struct drm_i915_gem_object *obj,
|
||||
int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
|
||||
bool intr)
|
||||
{
|
||||
struct dma_fence *fence = i915_gem_to_ttm(obj)->moving;
|
||||
int ret;
|
||||
long ret;
|
||||
|
||||
assert_object_held(obj);
|
||||
if (!fence)
|
||||
return 0;
|
||||
|
||||
ret = dma_fence_wait(fence, intr);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = dma_resv_wait_timeout(obj->base. resv, DMA_RESV_USAGE_KERNEL,
|
||||
intr, MAX_SCHEDULE_TIMEOUT);
|
||||
if (!ret)
|
||||
ret = -ETIME;
|
||||
|
||||
if (fence->error)
|
||||
return fence->error;
|
||||
|
||||
i915_gem_to_ttm(obj)->moving = NULL;
|
||||
dma_fence_put(fence);
|
||||
return 0;
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
|
||||
|
@ -520,12 +520,8 @@ i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
|
||||
i915_gem_object_unpin_pages(obj);
|
||||
}
|
||||
|
||||
struct dma_fence *
|
||||
i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj);
|
||||
|
||||
void i915_gem_object_set_moving_fence(struct drm_i915_gem_object *obj,
|
||||
struct dma_fence *fence);
|
||||
|
||||
int i915_gem_object_get_moving_fence(struct drm_i915_gem_object *obj,
|
||||
struct dma_fence **fence);
|
||||
int i915_gem_object_wait_moving_fence(struct drm_i915_gem_object *obj,
|
||||
bool intr);
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user