drm fixes for 6.4-rc6

amdgpu:
 - S/G display workaround for platforms with >= 64G of memory
 - S0i3 fix
 - SMU 13.0.0 fixes
 - Disable SMU 13.x OD features temporarily while the interface is reworked
   to enable additional functionality
 - Fix cursor gamma issues on DCN3+
 - SMU 13.0.6 fixes
 - Fix possible UAF in CS IOCTL
 - Polaris display regression fix
 - Only enable CP GFX shadowing on SR-IOV
 
 amdkfd:
 - Raven/Picasso KFD regression fix
 
 bridge:
 - it6505: runtime PM fix
 - lt9611: revert Do not generate HFP/HBP/HSA and EOT packet
 
 nouveau:
 - enable global memory loads for helper invocations for userspace driver
 - dp 1.3 dpcd+ workaround fix
 - remove unused function
 - revert incorrect NULL check
 
 accel/ivpu:
 - Add set_pages_array_wc/uc for internal buffers
 
 rockchip:
 - Don't spam logs in atomic check
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmTV080ACgkQDHTzWXnE
 hr6JOw//eB0X18Dg86uk95wYCBb6IVcUZSt7gVWUFNmyHgTLFuY01LqGijFhZyia
 Iws1TbShkeBxLzHx80t5VtznfRI69n2WJDkyUPx+bjW4KZob5oA4nN1mwd9RZFli
 4wYja2uAdhTFebRliD3pODWqYb5DfTr0N0jVWRftS0D/Pl6WbtKjKLwyQXdn+r5+
 wf8KUKYOYeTAqlJb7azDG3+7xSbhYOKX1LtUbHlsL02HqSmTA/tzRApBRHpkiU8i
 ks37zsFCb69OzUqaRtJ/sL8MNO2S4stqAq3Ltlxy0tCTKK4xf54y0LIl07Q4JzBK
 G4VKaNssikWtm8K51nN6kf2u5S6wfntttHlftq37tixjGgZRWUGH8SgePKT9oZ8C
 qH82q4BBYKfuF7SwRpo3FrjKijcZXKOgXdb1F/AqP4oHUcE6TDzBCKVbjrE82X6E
 YyZs3WQlTrUiGaFP0sZNkkDH7bSevoDHVXoRdZrEBFSUSQqOC+b/1UEhUW9PDRfu
 zqK/1Lg5MxbEqN1b4hYtao9J2cFjdTZ7/U+BIAvV6l9M13+BbbFm1ire0H/O2Kfd
 dvePpM5sg4Tl+9kHNmwL+jqchtmdh6+19rDg3z6YkYRJXW/zPc2QVS1f5NTzi6w1
 cxvlP1vRO/5RM+ZIPmDVe+gN2q2mAs8e065ItrcWDNcztVIAiFk=
 =iuoN
 -----END PGP SIGNATURE-----

Merge tag 'drm-fixes-2023-08-11' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "This week's fixes, as expected amdgpu is probably a little larger
  since it skipped a week, but otherwise a few nouveau fixes, a couple
  of bridge, rockchip and ivpu fixes.

  amdgpu:
   - S/G display workaround for platforms with >= 64G of memory
   - S0i3 fix
   - SMU 13.0.0 fixes
   - Disable SMU 13.x OD features temporarily while the interface is
     reworked to enable additional functionality
   - Fix cursor gamma issues on DCN3+
   - SMU 13.0.6 fixes
   - Fix possible UAF in CS IOCTL
   - Polaris display regression fix
   - Only enable CP GFX shadowing on SR-IOV

  amdkfd:
   - Raven/Picasso KFD regression fix

  bridge:
   - it6505: runtime PM fix
   - lt9611: revert Do not generate HFP/HBP/HSA and EOT packet

  nouveau:
   - enable global memory loads for helper invocations for userspace
     driver
   - dp 1.3 dpcd+ workaround fix
   - remove unused function
   - revert incorrect NULL check

  accel/ivpu:
   - Add set_pages_array_wc/uc for internal buffers

  rockchip:
   - Don't spam logs in atomic check"

* tag 'drm-fixes-2023-08-11' of git://anongit.freedesktop.org/drm/drm: (23 commits)
  drm/shmem-helper: Reset vma->vm_ops before calling dma_buf_mmap()
  drm/amdkfd: disable IOMMUv2 support for Raven
  drm/amdkfd: disable IOMMUv2 support for KV/CZ
  drm/amdkfd: ignore crat by default
  drm/amdgpu/gfx11: only enable CP GFX shadowing on SR-IOV
  drm/amd/display: Fix a regression on Polaris cards
  drm/amdgpu: fix possible UAF in amdgpu_cs_pass1()
  drm/amd/pm: Fix SMU v13.0.6 energy reporting
  drm/amd/display: check attr flag before set cursor degamma on DCN3+
  drm/amd/pm: disable the SMU13 OD feature support temporarily
  drm/amd/pm: correct the pcie width for smu 13.0.0
  drm/amd/display: Don't show stack trace for missing eDP
  drm/amdgpu: Match against exact bootloader status
  drm/amd/pm: skip the RLC stop when S0i3 suspend for SMU v13.0.4/11
  drm/amd: Disable S/G for APUs when 64GB or more host memory
  drm/rockchip: Don't spam logs in atomic check
  accel/ivpu: Add set_pages_array_wc/uc for internal buffers
  drm/nouveau/disp: Revert a NULL check inside nouveau_connector_get_modes
  Revert "drm/bridge: lt9611: Do not generate HFP/HBP/HSA and EOT packet"
  drm/nouveau: remove unused tu102_gr_load() function
  ...
This commit is contained in:
Linus Torvalds 2023-08-11 08:53:58 -07:00
commit 9b1b1b74dd
30 changed files with 173 additions and 82 deletions

View File

@ -173,6 +173,9 @@ static void internal_free_pages_locked(struct ivpu_bo *bo)
{
unsigned int i, npages = bo->base.size >> PAGE_SHIFT;
if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
set_pages_array_wb(bo->pages, bo->base.size >> PAGE_SHIFT);
for (i = 0; i < npages; i++)
put_page(bo->pages[i]);
@ -587,6 +590,11 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla
if (ivpu_bo_cache_mode(bo) != DRM_IVPU_BO_CACHED)
drm_clflush_pages(bo->pages, bo->base.size >> PAGE_SHIFT);
if (bo->flags & DRM_IVPU_BO_WC)
set_pages_array_wc(bo->pages, bo->base.size >> PAGE_SHIFT);
else if (bo->flags & DRM_IVPU_BO_UNCACHED)
set_pages_array_uc(bo->pages, bo->base.size >> PAGE_SHIFT);
prot = ivpu_bo_pgprot(bo, PAGE_KERNEL);
bo->kvaddr = vmap(bo->pages, bo->base.size >> PAGE_SHIFT, VM_MAP, prot);
if (!bo->kvaddr) {

View File

@ -1296,6 +1296,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
void amdgpu_device_pci_config_reset(struct amdgpu_device *adev);
int amdgpu_device_pci_reset(struct amdgpu_device *adev);
bool amdgpu_device_need_post(struct amdgpu_device *adev);
bool amdgpu_sg_display_supported(struct amdgpu_device *adev);
bool amdgpu_device_pcie_dynamic_switching_supported(void);
bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev);
bool amdgpu_device_aspm_support_quirk(void);

View File

@ -295,7 +295,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
if (!p->gang_size) {
ret = -EINVAL;
goto free_partial_kdata;
goto free_all_kdata;
}
for (i = 0; i < p->gang_size; ++i) {

View File

@ -1458,6 +1458,32 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
return true;
}
/*
* On APUs with >= 64GB white flickering has been observed w/ SG enabled.
* Disable S/G on such systems until we have a proper fix.
* https://gitlab.freedesktop.org/drm/amd/-/issues/2354
* https://gitlab.freedesktop.org/drm/amd/-/issues/2735
*/
bool amdgpu_sg_display_supported(struct amdgpu_device *adev)
{
switch (amdgpu_sg_display) {
case -1:
break;
case 0:
return false;
case 1:
return true;
default:
return false;
}
if ((totalram_pages() << (PAGE_SHIFT - 10)) +
(adev->gmc.real_vram_size / 1024) >= 64000000) {
DRM_WARN("Disabling S/G due to >=64GB RAM\n");
return false;
}
return true;
}
/*
* Intel hosts such as Raptor Lake and Sapphire Rapids don't support dynamic
* speed switching. Until we have confirmation from Intel that a specific host

View File

@ -471,8 +471,12 @@ static void gfx_v11_0_check_fw_cp_gfx_shadow(struct amdgpu_device *adev)
case IP_VERSION(11, 0, 3):
if ((adev->gfx.me_fw_version >= 1505) &&
(adev->gfx.pfp_fw_version >= 1600) &&
(adev->gfx.mec_fw_version >= 512))
adev->gfx.cp_gfx_shadow = true;
(adev->gfx.mec_fw_version >= 512)) {
if (amdgpu_sriov_vf(adev))
adev->gfx.cp_gfx_shadow = true;
else
adev->gfx.cp_gfx_shadow = false;
}
break;
default:
adev->gfx.cp_gfx_shadow = false;

View File

@ -137,14 +137,15 @@ static int psp_v13_0_wait_for_bootloader(struct psp_context *psp)
int ret;
int retry_loop;
/* Wait for bootloader to signify that it is ready having bit 31 of
* C2PMSG_35 set to 1. All other bits are expected to be cleared.
* If there is an error in processing command, bits[7:0] will be set.
* This is applicable for PSP v13.0.6 and newer.
*/
for (retry_loop = 0; retry_loop < 10; retry_loop++) {
/* Wait for bootloader to signify that is
ready having bit 31 of C2PMSG_35 set to 1 */
ret = psp_wait_for(psp,
SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
0x80000000,
0x80000000,
false);
ret = psp_wait_for(
psp, SOC15_REG_OFFSET(MP0, 0, regMP0_SMN_C2PMSG_35),
0x80000000, 0xffffffff, false);
if (ret == 0)
return 0;

View File

@ -1543,11 +1543,7 @@ static bool kfd_ignore_crat(void)
if (ignore_crat)
return true;
#ifndef KFD_SUPPORT_IOMMU_V2
ret = true;
#else
ret = false;
#endif
return ret;
}

View File

@ -194,11 +194,6 @@ static void kfd_device_info_init(struct kfd_dev *kfd,
kfd_device_info_set_event_interrupt_class(kfd);
/* Raven */
if (gc_version == IP_VERSION(9, 1, 0) ||
gc_version == IP_VERSION(9, 2, 2))
kfd->device_info.needs_iommu_device = true;
if (gc_version < IP_VERSION(11, 0, 0)) {
/* Navi2x+, Navi1x+ */
if (gc_version == IP_VERSION(10, 3, 6))
@ -233,10 +228,6 @@ static void kfd_device_info_init(struct kfd_dev *kfd,
asic_type != CHIP_TONGA)
kfd->device_info.supports_cwsr = true;
if (asic_type == CHIP_KAVERI ||
asic_type == CHIP_CARRIZO)
kfd->device_info.needs_iommu_device = true;
if (asic_type != CHIP_HAWAII && !vf)
kfd->device_info.needs_pci_atomics = true;
}
@ -249,7 +240,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
uint32_t gfx_target_version = 0;
switch (adev->asic_type) {
#ifdef KFD_SUPPORT_IOMMU_V2
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_KAVERI:
gfx_target_version = 70000;
@ -262,7 +252,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
if (!vf)
f2g = &gfx_v8_kfd2kgd;
break;
#endif
#ifdef CONFIG_DRM_AMDGPU_CIK
case CHIP_HAWAII:
gfx_target_version = 70001;
@ -298,7 +287,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
gfx_target_version = 90000;
f2g = &gfx_v9_kfd2kgd;
break;
#ifdef KFD_SUPPORT_IOMMU_V2
/* Raven */
case IP_VERSION(9, 1, 0):
case IP_VERSION(9, 2, 2):
@ -306,7 +294,6 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
if (!vf)
f2g = &gfx_v9_kfd2kgd;
break;
#endif
/* Vega12 */
case IP_VERSION(9, 2, 1):
gfx_target_version = 90004;

View File

@ -2538,18 +2538,12 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev)
}
switch (dev->adev->asic_type) {
case CHIP_CARRIZO:
device_queue_manager_init_vi(&dqm->asic_ops);
break;
case CHIP_KAVERI:
device_queue_manager_init_cik(&dqm->asic_ops);
break;
case CHIP_HAWAII:
device_queue_manager_init_cik_hawaii(&dqm->asic_ops);
break;
case CHIP_CARRIZO:
case CHIP_TONGA:
case CHIP_FIJI:
case CHIP_POLARIS10:

View File

@ -1638,9 +1638,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
}
break;
}
if (init_data.flags.gpu_vm_support &&
(amdgpu_sg_display == 0))
init_data.flags.gpu_vm_support = false;
if (init_data.flags.gpu_vm_support)
init_data.flags.gpu_vm_support = amdgpu_sg_display_supported(adev);
if (init_data.flags.gpu_vm_support)
adev->mode_info.gpu_vm_support = true;

View File

@ -1320,7 +1320,7 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
if (computed_streams[i])
continue;
if (!res_pool->funcs->remove_stream_from_ctx ||
if (res_pool->funcs->remove_stream_from_ctx &&
res_pool->funcs->remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK)
return -EINVAL;

View File

@ -777,7 +777,8 @@ void dce110_edp_wait_for_hpd_ready(
dal_gpio_destroy_irq(&hpd);
/* ensure that the panel is detected */
ASSERT(edp_hpd_high);
if (!edp_hpd_high)
DC_LOG_DC("%s: wait timed out!\n", __func__);
}
void dce110_edp_power_control(

View File

@ -357,8 +357,11 @@ void dpp3_set_cursor_attributes(
int cur_rom_en = 0;
if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA ||
color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA)
cur_rom_en = 1;
color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) {
if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) {
cur_rom_en = 1;
}
}
REG_UPDATE_3(CURSOR0_CONTROL,
CUR0_MODE, color_format,

View File

@ -1581,9 +1581,9 @@ static int smu_disable_dpms(struct smu_context *smu)
/*
* For SMU 13.0.4/11, PMFW will handle the features disablement properly
* for gpu reset case. Driver involvement is unnecessary.
* for gpu reset and S0i3 cases. Driver involvement is unnecessary.
*/
if (amdgpu_in_reset(adev)) {
if (amdgpu_in_reset(adev) || adev->in_s0ix) {
switch (adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(13, 0, 4):
case IP_VERSION(13, 0, 11):

View File

@ -331,11 +331,13 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu)
struct smu_13_0_0_powerplay_table *powerplay_table =
table_context->power_play_table;
struct smu_baco_context *smu_baco = &smu->smu_baco;
#if 0
PPTable_t *pptable = smu->smu_table.driver_pptable;
const OverDriveLimits_t * const overdrive_upperlimits =
&pptable->SkuTable.OverDriveLimitsBasicMax;
const OverDriveLimits_t * const overdrive_lowerlimits =
&pptable->SkuTable.OverDriveLimitsMin;
#endif
if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_HARDWAREDC)
smu->dc_controlled_by_gpio = true;
@ -347,18 +349,27 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu)
if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
smu_baco->maco_support = true;
/*
* We are in the transition to a new OD mechanism.
* Disable the OD feature support for SMU13 temporarily.
* TODO: get this reverted when new OD mechanism online
*/
#if 0
if (!overdrive_lowerlimits->FeatureCtrlMask ||
!overdrive_upperlimits->FeatureCtrlMask)
smu->od_enabled = false;
table_context->thermal_controller_type =
powerplay_table->thermal_controller_type;
/*
* Instead of having its own buffer space and get overdrive_table copied,
* smu->od_settings just points to the actual overdrive_table
*/
smu->od_settings = &powerplay_table->overdrive_table;
#else
smu->od_enabled = false;
#endif
table_context->thermal_controller_type =
powerplay_table->thermal_controller_type;
return 0;
}
@ -1140,7 +1151,6 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
(OverDriveTableExternal_t *)smu->smu_table.overdrive_table;
struct smu_13_0_dpm_table *single_dpm_table;
struct smu_13_0_pcie_table *pcie_table;
const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
uint32_t gen_speed, lane_width;
int i, curr_freq, size = 0;
int32_t min_value, max_value;
@ -1256,7 +1266,7 @@ static int smu_v13_0_0_print_clk_levels(struct smu_context *smu,
(pcie_table->pcie_lane[i] == 6) ? "x16" : "",
pcie_table->clk_freq[i],
(gen_speed == DECODE_GEN_SPEED(pcie_table->pcie_gen[i])) &&
(lane_width == DECODE_LANE_WIDTH(link_width[pcie_table->pcie_lane[i]])) ?
(lane_width == DECODE_LANE_WIDTH(pcie_table->pcie_lane[i])) ?
"*" : "");
break;

View File

@ -1993,9 +1993,8 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
gpu_metrics->average_socket_power =
SMUQ10_TO_UINT(metrics->SocketPower);
/* Energy is reported in 15.625mJ units */
gpu_metrics->energy_accumulator =
SMUQ10_TO_UINT(metrics->SocketEnergyAcc);
/* Energy counter reported in 15.259uJ (2^-16) units */
gpu_metrics->energy_accumulator = metrics->SocketEnergyAcc;
gpu_metrics->current_gfxclk =
SMUQ10_TO_UINT(metrics->GfxclkFrequency[xcc0]);

View File

@ -323,10 +323,12 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu)
struct smu_baco_context *smu_baco = &smu->smu_baco;
PPTable_t *smc_pptable = table_context->driver_pptable;
BoardTable_t *BoardTable = &smc_pptable->BoardTable;
#if 0
const OverDriveLimits_t * const overdrive_upperlimits =
&smc_pptable->SkuTable.OverDriveLimitsBasicMax;
const OverDriveLimits_t * const overdrive_lowerlimits =
&smc_pptable->SkuTable.OverDriveLimitsMin;
#endif
if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC)
smu->dc_controlled_by_gpio = true;
@ -338,18 +340,22 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu)
if (smu_baco->platform_support && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled))
smu_baco->maco_support = true;
#if 0
if (!overdrive_lowerlimits->FeatureCtrlMask ||
!overdrive_upperlimits->FeatureCtrlMask)
smu->od_enabled = false;
table_context->thermal_controller_type =
powerplay_table->thermal_controller_type;
/*
* Instead of having its own buffer space and get overdrive_table copied,
* smu->od_settings just points to the actual overdrive_table
*/
smu->od_settings = &powerplay_table->overdrive_table;
#else
smu->od_enabled = false;
#endif
table_context->thermal_controller_type =
powerplay_table->thermal_controller_type;
return 0;
}

View File

@ -2517,9 +2517,11 @@ static irqreturn_t it6505_int_threaded_handler(int unused, void *data)
};
int int_status[3], i;
if (it6505->enable_drv_hold || pm_runtime_get_if_in_use(dev) <= 0)
if (it6505->enable_drv_hold || !it6505->powered)
return IRQ_HANDLED;
pm_runtime_get_sync(dev);
int_status[0] = it6505_read(it6505, INT_STATUS_01);
int_status[1] = it6505_read(it6505, INT_STATUS_02);
int_status[2] = it6505_read(it6505, INT_STATUS_03);

View File

@ -774,9 +774,7 @@ static struct mipi_dsi_device *lt9611_attach_dsi(struct lt9611 *lt9611,
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
MIPI_DSI_MODE_VIDEO_HSE | MIPI_DSI_MODE_VIDEO_NO_HSA |
MIPI_DSI_MODE_VIDEO_NO_HFP | MIPI_DSI_MODE_VIDEO_NO_HBP |
MIPI_DSI_MODE_NO_EOT_PACKET;
MIPI_DSI_MODE_VIDEO_HSE;
ret = devm_mipi_dsi_attach(dev, dsi);
if (ret < 0) {

View File

@ -623,7 +623,13 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
int ret;
if (obj->import_attach) {
/* Reset both vm_ops and vm_private_data, so we don't end up with
* vm_ops pointing to our implementation if the dma-buf backend
* doesn't set those fields.
*/
vma->vm_private_data = NULL;
vma->vm_ops = NULL;
ret = dma_buf_mmap(obj->dma_buf, vma, 0);
/* Drop the reference drm_gem_mmap_obj() acquired.*/

View File

@ -967,7 +967,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
/* Determine display colour depth for everything except LVDS now,
* DP requires this before mode_valid() is called.
*/
if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && nv_connector->native_mode)
if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
nouveau_connector_detect_depth(connector);
/* Find the native mode if this is a digital panel, if we didn't

View File

@ -26,6 +26,8 @@
#include "head.h"
#include "ior.h"
#include <drm/display/drm_dp.h>
#include <subdev/bios.h>
#include <subdev/bios/init.h>
#include <subdev/gpio.h>
@ -634,6 +636,50 @@ nvkm_dp_enable_supported_link_rates(struct nvkm_outp *outp)
return outp->dp.rates != 0;
}
/* XXX: This is a big fat hack, and this is just drm_dp_read_dpcd_caps()
* converted to work inside nvkm. This is a temporary holdover until we start
* passing the drm_dp_aux device through NVKM
*/
static int
nvkm_dp_read_dpcd_caps(struct nvkm_outp *outp)
{
struct nvkm_i2c_aux *aux = outp->dp.aux;
u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
int ret;
ret = nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dp.dpcd, DP_RECEIVER_CAP_SIZE);
if (ret < 0)
return ret;
/*
* Prior to DP1.3 the bit represented by
* DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
* If it is set DP_DPCD_REV at 0000h could be at a value less than
* the true capability of the panel. The only way to check is to
* then compare 0000h and 2200h.
*/
if (!(outp->dp.dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
return 0;
ret = nvkm_rdaux(aux, DP_DP13_DPCD_REV, dpcd_ext, sizeof(dpcd_ext));
if (ret < 0)
return ret;
if (outp->dp.dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
OUTP_DBG(outp, "Extended DPCD rev less than base DPCD rev (%d > %d)\n",
outp->dp.dpcd[DP_DPCD_REV], dpcd_ext[DP_DPCD_REV]);
return 0;
}
if (!memcmp(outp->dp.dpcd, dpcd_ext, sizeof(dpcd_ext)))
return 0;
memcpy(outp->dp.dpcd, dpcd_ext, sizeof(dpcd_ext));
return 0;
}
void
nvkm_dp_enable(struct nvkm_outp *outp, bool auxpwr)
{
@ -689,7 +735,7 @@ nvkm_dp_enable(struct nvkm_outp *outp, bool auxpwr)
memset(outp->dp.lttpr, 0x00, sizeof(outp->dp.lttpr));
}
if (!nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dp.dpcd, sizeof(outp->dp.dpcd))) {
if (!nvkm_dp_read_dpcd_caps(outp)) {
const u8 rates[] = { 0x1e, 0x14, 0x0a, 0x06, 0 };
const u8 *rate;
int rate_max;

View File

@ -117,6 +117,7 @@ void gk104_grctx_generate_r418800(struct gf100_gr *);
extern const struct gf100_grctx_func gk110_grctx;
void gk110_grctx_generate_r419eb0(struct gf100_gr *);
void gk110_grctx_generate_r419f78(struct gf100_gr *);
extern const struct gf100_grctx_func gk110b_grctx;
extern const struct gf100_grctx_func gk208_grctx;

View File

@ -906,7 +906,9 @@ static void
gk104_grctx_generate_r419f78(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
nvkm_mask(device, 0x419f78, 0x00000001, 0x00000000);
/* bit 3 set disables loads in fp helper invocations, we need it enabled */
nvkm_mask(device, 0x419f78, 0x00000009, 0x00000000);
}
void

View File

@ -820,6 +820,15 @@ gk110_grctx_generate_r419eb0(struct gf100_gr *gr)
nvkm_mask(device, 0x419eb0, 0x00001000, 0x00001000);
}
void
gk110_grctx_generate_r419f78(struct gf100_gr *gr)
{
struct nvkm_device *device = gr->base.engine.subdev.device;
/* bit 3 set disables loads in fp helper invocations, we need it enabled */
nvkm_mask(device, 0x419f78, 0x00000008, 0x00000000);
}
const struct gf100_grctx_func
gk110_grctx = {
.main = gf100_grctx_generate_main,
@ -854,4 +863,5 @@ gk110_grctx = {
.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
.r418800 = gk104_grctx_generate_r418800,
.r419eb0 = gk110_grctx_generate_r419eb0,
.r419f78 = gk110_grctx_generate_r419f78,
};

View File

@ -103,4 +103,5 @@ gk110b_grctx = {
.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
.r418800 = gk104_grctx_generate_r418800,
.r419eb0 = gk110_grctx_generate_r419eb0,
.r419f78 = gk110_grctx_generate_r419f78,
};

View File

@ -568,4 +568,5 @@ gk208_grctx = {
.dist_skip_table = gf117_grctx_generate_dist_skip_table,
.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
.r418800 = gk104_grctx_generate_r418800,
.r419f78 = gk110_grctx_generate_r419f78,
};

View File

@ -988,4 +988,5 @@ gm107_grctx = {
.r406500 = gm107_grctx_generate_r406500,
.gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr,
.r419e00 = gm107_grctx_generate_r419e00,
.r419f78 = gk110_grctx_generate_r419f78,
};

View File

@ -206,19 +206,6 @@ tu102_gr_av_to_init_veid(struct nvkm_blob *blob, struct gf100_gr_pack **ppack)
return gk20a_gr_av_to_init_(blob, 64, 0x00100000, ppack);
}
int
tu102_gr_load(struct gf100_gr *gr, int ver, const struct gf100_gr_fwif *fwif)
{
int ret;
ret = gm200_gr_load(gr, ver, fwif);
if (ret)
return ret;
return gk20a_gr_load_net(gr, "gr/", "sw_veid_bundle_init", ver, tu102_gr_av_to_init_veid,
&gr->bundle_veid);
}
static const struct gf100_gr_fwif
tu102_gr_fwif[] = {
{ 0, gm200_gr_load, &tu102_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr },

View File

@ -833,12 +833,12 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
* need align with 2 pixel.
*/
if (fb->format->is_yuv && ((new_plane_state->src.x1 >> 16) % 2)) {
DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n");
DRM_DEBUG_KMS("Invalid Source: Yuv format not support odd xpos\n");
return -EINVAL;
}
if (fb->format->is_yuv && new_plane_state->rotation & DRM_MODE_REFLECT_Y) {
DRM_ERROR("Invalid Source: Yuv format does not support this rotation\n");
DRM_DEBUG_KMS("Invalid Source: Yuv format does not support this rotation\n");
return -EINVAL;
}
@ -846,7 +846,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
struct vop *vop = to_vop(crtc);
if (!vop->data->afbc) {
DRM_ERROR("vop does not support AFBC\n");
DRM_DEBUG_KMS("vop does not support AFBC\n");
return -EINVAL;
}
@ -855,15 +855,16 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
return ret;
if (new_plane_state->src.x1 || new_plane_state->src.y1) {
DRM_ERROR("AFBC does not support offset display, xpos=%d, ypos=%d, offset=%d\n",
new_plane_state->src.x1,
new_plane_state->src.y1, fb->offsets[0]);
DRM_DEBUG_KMS("AFBC does not support offset display, " \
"xpos=%d, ypos=%d, offset=%d\n",
new_plane_state->src.x1, new_plane_state->src.y1,
fb->offsets[0]);
return -EINVAL;
}
if (new_plane_state->rotation && new_plane_state->rotation != DRM_MODE_ROTATE_0) {
DRM_ERROR("No rotation support in AFBC, rotation=%d\n",
new_plane_state->rotation);
DRM_DEBUG_KMS("No rotation support in AFBC, rotation=%d\n",
new_plane_state->rotation);
return -EINVAL;
}
}