350 lines
9.7 KiB
C
Raw Normal View History

/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
#ifndef __AMDGPU_DM_H__
#define __AMDGPU_DM_H__
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
/*
* This file contains the definition for amdgpu_display_manager
* and its API for amdgpu driver's use.
* This component provides all the display related functionality
* and this is the only component that calls DAL API.
* The API contained here intended for amdgpu driver use.
* The API that is called directly from KMS framework is located
* in amdgpu_dm_kms.h file
*/
#define AMDGPU_DM_MAX_DISPLAY_INDEX 31
/*
#include "include/amdgpu_dal_power_if.h"
#include "amdgpu_dm_irq.h"
*/
#include "irq_types.h"
#include "signal_types.h"
/* Forward declarations */
struct amdgpu_device;
struct drm_device;
struct amdgpu_dm_irq_handler_data;
struct dc;
struct common_irq_params {
struct amdgpu_device *adev;
enum dc_irq_source irq_src;
};
/**
* struct irq_list_head - Linked-list for low context IRQ handlers.
*
* @head: The list_head within &struct handler_data
* @work: A work_struct containing the deferred handler work
*/
struct irq_list_head {
struct list_head head;
/* In case this interrupt needs post-processing, 'work' will be queued*/
struct work_struct work;
};
/**
* struct dm_compressor_info - Buffer info used by frame buffer compression
* @cpu_addr: MMIO cpu addr
* @bo_ptr: Pointer to the buffer object
* @gpu_addr: MMIO gpu addr
*/
struct dm_comressor_info {
void *cpu_addr;
struct amdgpu_bo *bo_ptr;
uint64_t gpu_addr;
};
/**
* struct amdgpu_dm_backlight_caps - Usable range of backlight values from ACPI
* @min_input_signal: minimum possible input in range 0-255
* @max_input_signal: maximum possible input in range 0-255
* @caps_valid: true if these values are from the ACPI interface
*/
struct amdgpu_dm_backlight_caps {
int min_input_signal;
int max_input_signal;
bool caps_valid;
};
/**
* struct amdgpu_display_manager - Central amdgpu display manager device
*
* @dc: Display Core control structure
* @adev: AMDGPU base driver structure
* @ddev: DRM base driver structure
* @display_indexes_num: Max number of display streams supported
* @irq_handler_list_table_lock: Synchronizes access to IRQ tables
* @backlight_dev: Backlight control device
* @cached_state: Caches device atomic state for suspend/resume
* @compressor: Frame buffer compression buffer. See &struct dm_comressor_info
*/
struct amdgpu_display_manager {
struct dc *dc;
/**
* @cgs_device:
*
* The Common Graphics Services device. It provides an interface for
* accessing registers.
*/
struct cgs_device *cgs_device;
struct amdgpu_device *adev;
struct drm_device *ddev;
u16 display_indexes_num;
drm/amd/display: Use private obj helpers for dm_atomic_state [Why] Two non-blocking commits in succession can result in a sequence where the same dc->current_state is queried for both commits. 1. 1st commit -> check -> commit -> swaps atomic state -> queues work 2. 2nd commit -> check -> commit -> swaps atomic state -> queues work 3. 1st commit work finishes The issue with this sequence is that the same dc->current_state is read in both atomic checks. If the first commit modifies streams or planes those will be missing from the dc->current_state for the second atomic check. This result in many stream and plane errors in atomic commit tail. [How] The driver still needs to track old to new state to determine if the commit in its current implementation. Updating the dc_state in atomic tail is wrong since the dc_state swap should be happening as part of drm_atomic_helper_swap_state *before* the worker queue kicks its work off. The simplest replacement for the subclassing (which doesn't properly manage the old to new atomic state swap) is to use the drm private object helpers. While some of the dc_state members could be merged into dm_crtc_state or dm_plane_state and copied over that way it is easier for now to just treat the whole dc_state structure as a single private object. This allows amdgpu_dm to drop the dc->current_state copy from within atomic check. It's replaced by a copy from the current atomic state which is propagated correctly for the sequence described above. Since access to the dm_state private object is now locked this should also fix issues that could arise if submitting non-blocking commits from different threads. Cc: Harry Wentland <harry.wentland@amd.com> Cc: Leo Li <sunpeng.li@amd.com> Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com> Reviewed-by: Leo Li <sunpeng.li@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2018-11-22 12:34:36 -05:00
/**
* @atomic_obj
*
* In combination with &dm_atomic_state it helps manage
* global atomic state that doesn't map cleanly into existing
* drm resources, like &dc_context.
*/
struct drm_private_obj atomic_obj;
struct drm_modeset_lock atomic_obj_lock;
/**
* @irq_handler_list_low_tab:
*
* Low priority IRQ handler table.
*
* It is a n*m table consisting of n IRQ sources, and m handlers per IRQ
* source. Low priority IRQ handlers are deferred to a workqueue to be
* processed. Hence, they can sleep.
*
* Note that handlers are called in the same order as they were
* registered (FIFO).
*/
struct irq_list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER];
/**
* @irq_handler_list_high_tab:
*
* High priority IRQ handler table.
*
* It is a n*m table, same as &irq_handler_list_low_tab. However,
* handlers in this table are not deferred and are called immediately.
*/
struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER];
/**
* @pflip_params:
*
* Page flip IRQ parameters, passed to registered handlers when
* triggered.
*/
struct common_irq_params
pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1];
/**
* @vblank_params:
*
* Vertical blanking IRQ parameters, passed to registered handlers when
* triggered.
*/
struct common_irq_params
vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1];
spinlock_t irq_handler_list_table_lock;
struct backlight_device *backlight_dev;
const struct dc_link *backlight_link;
struct amdgpu_dm_backlight_caps backlight_caps;
struct mod_freesync *freesync_module;
struct drm_atomic_state *cached_state;
struct dm_comressor_info compressor;
const struct firmware *fw_dmcu;
uint32_t dmcu_fw_version;
};
struct amdgpu_dm_connector {
struct drm_connector base;
uint32_t connector_id;
/* we need to mind the EDID between detect
and get modes due to analog/digital/tvencoder */
struct edid *edid;
/* shared with amdgpu */
struct amdgpu_hpd hpd;
/* number of modes generated from EDID at 'dc_sink' */
int num_modes;
/* The 'old' sink - before an HPD.
* The 'current' sink is in dc_link->sink. */
struct dc_sink *dc_sink;
struct dc_link *dc_link;
struct dc_sink *dc_em_sink;
/* DM only */
struct drm_dp_mst_topology_mgr mst_mgr;
struct amdgpu_dm_dp_aux dm_dp_aux;
struct drm_dp_mst_port *port;
struct amdgpu_dm_connector *mst_port;
struct amdgpu_encoder *mst_encoder;
/* TODO see if we can merge with ddc_bus or make a dm_connector */
struct amdgpu_i2c_adapter *i2c;
/* Monitor range limits */
int min_vfreq ;
int max_vfreq ;
int pixel_clock_mhz;
struct mutex hpd_lock;
bool fake_enable;
};
#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
extern const struct amdgpu_ip_block_version dm_ip_block;
struct amdgpu_framebuffer;
struct amdgpu_display_manager;
struct dc_validation_set;
struct dc_plane_state;
struct dm_plane_state {
struct drm_plane_state base;
struct dc_plane_state *dc_state;
};
struct dm_crtc_state {
struct drm_crtc_state base;
struct dc_stream_state *stream;
int crc_skip_count;
bool crc_enabled;
bool freesync_timing_changed;
bool freesync_vrr_info_changed;
bool vrr_supported;
struct mod_freesync_config freesync_config;
struct dc_crtc_timing_adjust adjust;
struct dc_info_packet vrr_infopacket;
int abm_level;
};
#define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
struct dm_atomic_state {
drm/amd/display: Use private obj helpers for dm_atomic_state [Why] Two non-blocking commits in succession can result in a sequence where the same dc->current_state is queried for both commits. 1. 1st commit -> check -> commit -> swaps atomic state -> queues work 2. 2nd commit -> check -> commit -> swaps atomic state -> queues work 3. 1st commit work finishes The issue with this sequence is that the same dc->current_state is read in both atomic checks. If the first commit modifies streams or planes those will be missing from the dc->current_state for the second atomic check. This result in many stream and plane errors in atomic commit tail. [How] The driver still needs to track old to new state to determine if the commit in its current implementation. Updating the dc_state in atomic tail is wrong since the dc_state swap should be happening as part of drm_atomic_helper_swap_state *before* the worker queue kicks its work off. The simplest replacement for the subclassing (which doesn't properly manage the old to new atomic state swap) is to use the drm private object helpers. While some of the dc_state members could be merged into dm_crtc_state or dm_plane_state and copied over that way it is easier for now to just treat the whole dc_state structure as a single private object. This allows amdgpu_dm to drop the dc->current_state copy from within atomic check. It's replaced by a copy from the current atomic state which is propagated correctly for the sequence described above. Since access to the dm_state private object is now locked this should also fix issues that could arise if submitting non-blocking commits from different threads. Cc: Harry Wentland <harry.wentland@amd.com> Cc: Leo Li <sunpeng.li@amd.com> Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com> Reviewed-by: Leo Li <sunpeng.li@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
2018-11-22 12:34:36 -05:00
struct drm_private_state base;
struct dc_state *context;
};
#define to_dm_atomic_state(x) container_of(x, struct dm_atomic_state, base)
struct dm_connector_state {
struct drm_connector_state base;
enum amdgpu_rmx_type scaling;
uint8_t underscan_vborder;
uint8_t underscan_hborder;
uint8_t max_bpc;
bool underscan_enable;
bool freesync_capable;
uint8_t abm_level;
};
#define to_dm_connector_state(x)\
container_of((x), struct dm_connector_state, base)
void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector);
struct drm_connector_state *
amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector);
int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
struct drm_connector_state *state,
struct drm_property *property,
uint64_t val);
int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
const struct drm_connector_state *state,
struct drm_property *property,
uint64_t *val);
int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev);
void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector,
int connector_type,
struct dc_link *link,
int link_index);
enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode);
void dm_restore_drm_connector_state(struct drm_device *dev,
struct drm_connector *connector);
void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
struct edid *edid);
/* amdgpu_dm_crc.c */
#ifdef CONFIG_DEBUG_FS
int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name);
int amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc,
const char *src_name,
size_t *values_cnt);
void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc);
#else
#define amdgpu_dm_crtc_set_crc_source NULL
#define amdgpu_dm_crtc_verify_crc_source NULL
#define amdgpu_dm_crtc_handle_crc_irq(x)
#endif
#define MAX_COLOR_LUT_ENTRIES 4096
/* Legacy gamm LUT users such as X doesn't like large LUT sizes */
#define MAX_COLOR_LEGACY_LUT_ENTRIES 256
void amdgpu_dm_init_color_mod(void);
int amdgpu_dm_set_degamma_lut(struct drm_crtc_state *crtc_state,
struct dc_plane_state *dc_plane_state);
void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc);
int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc);
extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
#endif /* __AMDGPU_DM_H__ */