drm fixes for 6.10-rc2
shmem: - fix BUG_ON in COW handling - Warn when trying to pin imported objects buddy: - fix page size handling dma-buf: - sw-sync: Don't interfere with IRQ handling - Fix kthreads-handling error path i915: - Fix a race in audio component by registering it later - Make DPT object unshrinkable to avoid shrinking when framebuffer has not shrunk - Fix CCS id calculation to fix a perf regression - Fix selftest caching mode - Fix FIELD_PREP compiler warnings - Fix indefinite wait for GT wakeref release - Revert overeager multi-gt pm reference removal xe: - One pcode polling timeout change - One fix for deadlocks for faulting VMs - One error-path lock imbalance fix amdgpu: - RAS fix - Fix colorspace property for MST connectors - Fix for PCIe DPM - Silence UBSAN warning - GPUVM robustness fix - Partition fix - Drop deprecated I2C_CLASS_SPD amdkfd: - Revert unused changes for certain 11.0.3 devices - Simplify APU VRAM handling lima: - Fix dma_resv-related deadlock in object pin msm: - Remove build-time dependency on Python 3.9 nouveau: - nvif: Fix possible integer overflow panel: - lg-sw43408: Select DP helpers; Declare backlight ops as static - sitronix-st7789v: Various fixes for jt240mhqs_hwt_ek_e3 panel panfrost: - Fix dma_resv-related deadlock in object pin -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmZaMqYACgkQDHTzWXnE hr6laxAAkw/fxO2/QZkqVcO1QvOOg7++zkiLBqMgPe3LZ8u1DpJ4Y6DZijAVA025 7vP1wgS7LUIHka0u3wToj1On18qU2obw1+EP78asTM0E4Yp8y+uR08sUyuJWIKl2 2rfrvry8rV2a0T3trntbQhsqQ11WIvEaEcbhYvJrYW3YR6bTzdoFDxS/7uC7rwMq qdS3uGt5jZySj+HuQP9O3eBdrfRrieZpG/q2gylypj/7L1e28qlamvhDf9vHTYm9 nlaFvfMuW+WeX7FUPh4wyMZG57HVQRHijvaJcpEKophj9VlWD+daNR5VC/fNpyOb qFt5H/438rxDNgWh8i6J6b9+e5PPuobv0O/+drCzOam0xm3sMD90lCiBhCE8OBSv HsXhJFEE7Yye2E0bKU7P4sMTfpIqG7EyEUeD9ML0QWLN5hnznVY36BwTPmdYqyKN Pil3XQtRy7N1xcv1osszctHZot4xZ8M8Iw7YnmZVBbVraNLYjiakJuSYD8r81NA7 f4o2roq6OI/Y3zIaNetUQjMk3GO6y0gcAv0GzQxXb4hvlOQ0r5mU8C2P2TsHMaqn /7Tv9CLDyNdS/QUkQJTDRouPwfzW2K2TV/K4xRB+bFCZatKw2/WCNmD9IM0EtaqP EBi+FhXW805dWLZmw4Ca4y1IoVLSPn6FvsAe5QkDdELNgreK6KQ= =hp1i -----END PGP SIGNATURE----- Merge tag 'drm-fixes-2024-06-01' of https://gitlab.freedesktop.org/drm/kernel Pull drm fixes from Dave Airlie: "This is the weekly fixes. Lots of small fixes across the board, one BUG_ON fix in shmem seems most important, otherwise amdgpu, i915, xe mostly with small fixes to all the other drivers. shmem: - fix BUG_ON in COW handling - warn when trying to pin imported objects buddy: - fix page size handling dma-buf: - sw-sync: Don't interfere with IRQ handling - fix kthreads-handling error path i915: - fix a race in audio component by registering it later - make DPT object unshrinkable to avoid shrinking when framebuffer has not shrunk - fix CCS id calculation to fix a perf regression - fix selftest caching mode - fix FIELD_PREP compiler warnings - fix indefinite wait for GT wakeref release - revert overeager multi-gt pm reference removal xe: - pcode polling timeout change - fix for deadlocks for faulting VMs - error-path lock imbalance fix amdgpu: - RAS fix - fix colorspace property for MST connectors - fix for PCIe DPM - silence UBSAN warning - GPUVM robustness fix - partition fix - drop deprecated I2C_CLASS_SPD amdkfd: - revert unused changes for certain 11.0.3 devices - simplify APU VRAM handling lima: - fix dma_resv-related deadlock in object pin msm: - remove build-time dependency on Python 3.9 nouveau: - nvif: Fix possible integer overflow panel: - lg-sw43408: Select DP helpers; Declare backlight ops as static - sitronix-st7789v: Various fixes for jt240mhqs_hwt_ek_e3 panel panfrost: - fix dma_resv-related deadlock in object pin" * tag 'drm-fixes-2024-06-01' of https://gitlab.freedesktop.org/drm/kernel: (35 commits) drm/msm: remove python 3.9 dependency for compiling msm drm/panel: sitronix-st7789v: fix display size for jt240mhqs_hwt_ek_e3 panel drm/panel: sitronix-st7789v: tweak timing for jt240mhqs_hwt_ek_e3 panel drm/panel: sitronix-st7789v: fix timing for jt240mhqs_hwt_ek_e3 panel drm/amd/pm: remove deprecated I2C_CLASS_SPD support from newly added SMU_14_0_2 drm/amdgpu: Make CPX mode auto default in NPS4 drm/amdkfd: simplify APU VRAM handling Revert "drm/amdkfd: fix gfx_target_version for certain 11.0.3 devices" drm/amdgpu: fix dereference null return value for the function amdgpu_vm_pt_parent drm/amdgpu: silence UBSAN warning drm/amdgpu: Adjust logic in amdgpu_device_partner_bandwidth() drm/i915: Fix audio component initialization drm/i915/dpt: Make DPT object unshrinkable drm/i915/gt: Fix CCS id's calculation for CCS mode setting drm/panel/lg-sw43408: mark sw43408_backlight_ops as static drm/i915/selftests: Set always_coherent to false when reading from CPU drm/panel/lg-sw43408: select CONFIG_DRM_DISPLAY_DP_HELPER drm/i915/guc: avoid FIELD_PREP warning drm/i915/gt: Disarm breadcrumbs if engines are already idle Revert "drm/i915: Remove extra multi-gt pm-references" ...
This commit is contained in:
commit
cc8ed4d0a8
@ -540,6 +540,12 @@ static int race_signal_callback(void *arg)
|
||||
t[i].before = pass;
|
||||
t[i].task = kthread_run(thread_signal_callback, &t[i],
|
||||
"dma-fence:%d", i);
|
||||
if (IS_ERR(t[i].task)) {
|
||||
ret = PTR_ERR(t[i].task);
|
||||
while (--i >= 0)
|
||||
kthread_stop_put(t[i].task);
|
||||
return ret;
|
||||
}
|
||||
get_task_struct(t[i].task);
|
||||
}
|
||||
|
||||
|
@ -110,12 +110,12 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
|
||||
|
||||
seq_printf(s, "%s: %d\n", obj->name, obj->value);
|
||||
|
||||
spin_lock_irq(&obj->lock);
|
||||
spin_lock(&obj->lock); /* Caller already disabled IRQ. */
|
||||
list_for_each(pos, &obj->pt_list) {
|
||||
struct sync_pt *pt = container_of(pos, struct sync_pt, link);
|
||||
sync_print_fence(s, &pt->base, false);
|
||||
}
|
||||
spin_unlock_irq(&obj->lock);
|
||||
spin_unlock(&obj->lock);
|
||||
}
|
||||
|
||||
static void sync_print_sync_file(struct seq_file *s,
|
||||
|
@ -196,7 +196,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
|
||||
return -EINVAL;
|
||||
|
||||
vram_size = KFD_XCP_MEMORY_SIZE(adev, xcp_id);
|
||||
if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
system_mem_needed = size;
|
||||
ttm_mem_needed = size;
|
||||
}
|
||||
@ -233,7 +233,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
|
||||
if (adev && xcp_id >= 0) {
|
||||
adev->kfd.vram_used[xcp_id] += vram_needed;
|
||||
adev->kfd.vram_used_aligned[xcp_id] +=
|
||||
(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) ?
|
||||
(adev->flags & AMD_IS_APU) ?
|
||||
vram_needed :
|
||||
ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
|
||||
}
|
||||
@ -261,7 +261,7 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
|
||||
|
||||
if (adev) {
|
||||
adev->kfd.vram_used[xcp_id] -= size;
|
||||
if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
adev->kfd.vram_used_aligned[xcp_id] -= size;
|
||||
kfd_mem_limit.system_mem_used -= size;
|
||||
kfd_mem_limit.ttm_mem_used -= size;
|
||||
@ -890,7 +890,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
|
||||
* if peer device has large BAR. In contrast, access over xGMI is
|
||||
* allowed for both small and large BAR configurations of peer device
|
||||
*/
|
||||
if ((adev != bo_adev && !(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU)) &&
|
||||
if ((adev != bo_adev && !(adev->flags & AMD_IS_APU)) &&
|
||||
((mem->domain == AMDGPU_GEM_DOMAIN_VRAM) ||
|
||||
(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL) ||
|
||||
(mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP))) {
|
||||
@ -1658,7 +1658,7 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
|
||||
- atomic64_read(&adev->vram_pin_size)
|
||||
- reserved_for_pt;
|
||||
|
||||
if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
system_mem_available = no_system_mem_limit ?
|
||||
kfd_mem_limit.max_system_mem_limit :
|
||||
kfd_mem_limit.max_system_mem_limit -
|
||||
@ -1706,7 +1706,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
|
||||
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
|
||||
domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||
|
||||
if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) {
|
||||
if (adev->flags & AMD_IS_APU) {
|
||||
domain = AMDGPU_GEM_DOMAIN_GTT;
|
||||
alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
|
||||
alloc_flags = 0;
|
||||
@ -1953,7 +1953,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
|
||||
if (size) {
|
||||
if (!is_imported &&
|
||||
(mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_VRAM ||
|
||||
((adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) &&
|
||||
((adev->flags & AMD_IS_APU) &&
|
||||
mem->bo->preferred_domains == AMDGPU_GEM_DOMAIN_GTT)))
|
||||
*size = bo_size;
|
||||
else
|
||||
@ -2376,7 +2376,7 @@ static int import_obj_create(struct amdgpu_device *adev,
|
||||
(*mem)->bo = bo;
|
||||
(*mem)->va = va;
|
||||
(*mem)->domain = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) &&
|
||||
!(adev->gmc.is_app_apu || adev->flags & AMD_IS_APU) ?
|
||||
!(adev->flags & AMD_IS_APU) ?
|
||||
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT;
|
||||
|
||||
(*mem)->mapped_to_gpu_memory = 0;
|
||||
|
@ -5944,13 +5944,18 @@ static void amdgpu_device_partner_bandwidth(struct amdgpu_device *adev,
|
||||
*speed = PCI_SPEED_UNKNOWN;
|
||||
*width = PCIE_LNK_WIDTH_UNKNOWN;
|
||||
|
||||
while ((parent = pci_upstream_bridge(parent))) {
|
||||
/* skip upstream/downstream switches internal to dGPU*/
|
||||
if (parent->vendor == PCI_VENDOR_ID_ATI)
|
||||
continue;
|
||||
*speed = pcie_get_speed_cap(parent);
|
||||
*width = pcie_get_width_cap(parent);
|
||||
break;
|
||||
if (amdgpu_device_pcie_dynamic_switching_supported(adev)) {
|
||||
while ((parent = pci_upstream_bridge(parent))) {
|
||||
/* skip upstream/downstream switches internal to dGPU*/
|
||||
if (parent->vendor == PCI_VENDOR_ID_ATI)
|
||||
continue;
|
||||
*speed = pcie_get_speed_cap(parent);
|
||||
*width = pcie_get_width_cap(parent);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
/* use the current speeds rather than max if switching is not supported */
|
||||
pcie_bandwidth_available(adev->pdev, NULL, speed, width);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -46,7 +46,7 @@ struct amdgpu_iv_entry;
|
||||
#define AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(x) AMDGPU_GET_REG_FIELD(x, 7, 7)
|
||||
#define AMDGPU_RAS_GPU_ERR_SOCKET_ID(x) AMDGPU_GET_REG_FIELD(x, 10, 8)
|
||||
#define AMDGPU_RAS_GPU_ERR_AID_ID(x) AMDGPU_GET_REG_FIELD(x, 12, 11)
|
||||
#define AMDGPU_RAS_GPU_ERR_HBM_ID(x) AMDGPU_GET_REG_FIELD(x, 13, 13)
|
||||
#define AMDGPU_RAS_GPU_ERR_HBM_ID(x) AMDGPU_GET_REG_FIELD(x, 14, 13)
|
||||
#define AMDGPU_RAS_GPU_ERR_BOOT_STATUS(x) AMDGPU_GET_REG_FIELD(x, 31, 31)
|
||||
|
||||
#define AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT 1000
|
||||
|
@ -706,11 +706,15 @@ int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params,
|
||||
struct amdgpu_vm_bo_base *entry)
|
||||
{
|
||||
struct amdgpu_vm_bo_base *parent = amdgpu_vm_pt_parent(entry);
|
||||
struct amdgpu_bo *bo = parent->bo, *pbo;
|
||||
struct amdgpu_bo *bo, *pbo;
|
||||
struct amdgpu_vm *vm = params->vm;
|
||||
uint64_t pde, pt, flags;
|
||||
unsigned int level;
|
||||
|
||||
if (WARN_ON(!parent))
|
||||
return -EINVAL;
|
||||
|
||||
bo = parent->bo;
|
||||
for (level = 0, pbo = bo->parent; pbo; ++level)
|
||||
pbo = pbo->parent;
|
||||
|
||||
|
@ -422,7 +422,7 @@ __aqua_vanjaram_get_auto_mode(struct amdgpu_xcp_mgr *xcp_mgr)
|
||||
|
||||
if (adev->gmc.num_mem_partitions == num_xcc / 2)
|
||||
return (adev->flags & AMD_IS_APU) ? AMDGPU_TPX_PARTITION_MODE :
|
||||
AMDGPU_QPX_PARTITION_MODE;
|
||||
AMDGPU_CPX_PARTITION_MODE;
|
||||
|
||||
if (adev->gmc.num_mem_partitions == 2 && !(adev->flags & AMD_IS_APU))
|
||||
return AMDGPU_DPX_PARTITION_MODE;
|
||||
|
@ -408,15 +408,8 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
|
||||
f2g = &gfx_v11_kfd2kgd;
|
||||
break;
|
||||
case IP_VERSION(11, 0, 3):
|
||||
if ((adev->pdev->device == 0x7460 &&
|
||||
adev->pdev->revision == 0x00) ||
|
||||
(adev->pdev->device == 0x7461 &&
|
||||
adev->pdev->revision == 0x00))
|
||||
/* Note: Compiler version is 11.0.5 while HW version is 11.0.3 */
|
||||
gfx_target_version = 110005;
|
||||
else
|
||||
/* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
|
||||
gfx_target_version = 110001;
|
||||
/* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
|
||||
gfx_target_version = 110001;
|
||||
f2g = &gfx_v11_kfd2kgd;
|
||||
break;
|
||||
case IP_VERSION(11, 5, 0):
|
||||
|
@ -1023,7 +1023,7 @@ int kgd2kfd_init_zone_device(struct amdgpu_device *adev)
|
||||
if (amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(9, 0, 1))
|
||||
return -EINVAL;
|
||||
|
||||
if (adev->gmc.is_app_apu || adev->flags & AMD_IS_APU)
|
||||
if (adev->flags & AMD_IS_APU)
|
||||
return 0;
|
||||
|
||||
pgmap = &kfddev->pgmap;
|
||||
|
@ -2619,8 +2619,7 @@ svm_range_best_restore_location(struct svm_range *prange,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (node->adev->gmc.is_app_apu ||
|
||||
node->adev->flags & AMD_IS_APU)
|
||||
if (node->adev->flags & AMD_IS_APU)
|
||||
return 0;
|
||||
|
||||
if (prange->preferred_loc == gpuid ||
|
||||
@ -3338,8 +3337,7 @@ svm_range_best_prefetch_location(struct svm_range *prange)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (bo_node->adev->gmc.is_app_apu ||
|
||||
bo_node->adev->flags & AMD_IS_APU) {
|
||||
if (bo_node->adev->flags & AMD_IS_APU) {
|
||||
best_loc = 0;
|
||||
goto out;
|
||||
}
|
||||
|
@ -201,7 +201,6 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s
|
||||
* is initialized to not 0 when page migration register device memory.
|
||||
*/
|
||||
#define KFD_IS_SVM_API_SUPPORTED(adev) ((adev)->kfd.pgmap.type != 0 ||\
|
||||
(adev)->gmc.is_app_apu ||\
|
||||
((adev)->flags & AMD_IS_APU))
|
||||
|
||||
void svm_range_bo_unref_async(struct svm_range_bo *svm_bo);
|
||||
|
@ -613,6 +613,9 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
|
||||
&connector->base,
|
||||
dev->mode_config.tile_property,
|
||||
0);
|
||||
connector->colorspace_property = master->base.colorspace_property;
|
||||
if (connector->colorspace_property)
|
||||
drm_connector_attach_colorspace_property(connector);
|
||||
|
||||
drm_connector_set_path_property(connector, pathprop);
|
||||
|
||||
|
@ -3583,7 +3583,7 @@ struct atom_gpio_voltage_object_v4
|
||||
uint8_t phase_delay_us; // phase delay in unit of micro second
|
||||
uint8_t reserved;
|
||||
uint32_t gpio_mask_val; // GPIO Mask value
|
||||
struct atom_voltage_gpio_map_lut voltage_gpio_lut[1];
|
||||
struct atom_voltage_gpio_map_lut voltage_gpio_lut[] __counted_by(gpio_entry_num);
|
||||
};
|
||||
|
||||
struct atom_svid2_voltage_object_v4
|
||||
|
@ -1562,7 +1562,6 @@ static int smu_v14_0_2_i2c_control_init(struct smu_context *smu)
|
||||
smu_i2c->port = i;
|
||||
mutex_init(&smu_i2c->mutex);
|
||||
control->owner = THIS_MODULE;
|
||||
control->class = I2C_CLASS_SPD;
|
||||
control->dev.parent = &adev->pdev->dev;
|
||||
control->algo = &smu_v14_0_2_i2c_algo;
|
||||
snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
|
||||
|
@ -239,7 +239,7 @@ int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
|
||||
if (size < chunk_size)
|
||||
return -EINVAL;
|
||||
|
||||
if (chunk_size < PAGE_SIZE)
|
||||
if (chunk_size < SZ_4K)
|
||||
return -EINVAL;
|
||||
|
||||
if (!is_power_of_2(chunk_size))
|
||||
|
@ -233,6 +233,8 @@ int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem)
|
||||
|
||||
dma_resv_assert_held(shmem->base.resv);
|
||||
|
||||
drm_WARN_ON(shmem->base.dev, shmem->base.import_attach);
|
||||
|
||||
ret = drm_gem_shmem_get_pages(shmem);
|
||||
|
||||
return ret;
|
||||
@ -611,6 +613,9 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (is_cow_mapping(vma->vm_flags))
|
||||
return -EINVAL;
|
||||
|
||||
dma_resv_lock(shmem->base.resv, NULL);
|
||||
ret = drm_gem_shmem_get_pages(shmem);
|
||||
dma_resv_unlock(shmem->base.resv);
|
||||
|
@ -1252,17 +1252,6 @@ static const struct component_ops i915_audio_component_bind_ops = {
|
||||
static void i915_audio_component_init(struct drm_i915_private *i915)
|
||||
{
|
||||
u32 aud_freq, aud_freq_init;
|
||||
int ret;
|
||||
|
||||
ret = component_add_typed(i915->drm.dev,
|
||||
&i915_audio_component_bind_ops,
|
||||
I915_COMPONENT_AUDIO);
|
||||
if (ret < 0) {
|
||||
drm_err(&i915->drm,
|
||||
"failed to add audio component (%d)\n", ret);
|
||||
/* continue with reduced functionality */
|
||||
return;
|
||||
}
|
||||
|
||||
if (DISPLAY_VER(i915) >= 9) {
|
||||
aud_freq_init = intel_de_read(i915, AUD_FREQ_CNTRL);
|
||||
@ -1285,6 +1274,21 @@ static void i915_audio_component_init(struct drm_i915_private *i915)
|
||||
|
||||
/* init with current cdclk */
|
||||
intel_audio_cdclk_change_post(i915);
|
||||
}
|
||||
|
||||
static void i915_audio_component_register(struct drm_i915_private *i915)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = component_add_typed(i915->drm.dev,
|
||||
&i915_audio_component_bind_ops,
|
||||
I915_COMPONENT_AUDIO);
|
||||
if (ret < 0) {
|
||||
drm_err(&i915->drm,
|
||||
"failed to add audio component (%d)\n", ret);
|
||||
/* continue with reduced functionality */
|
||||
return;
|
||||
}
|
||||
|
||||
i915->display.audio.component_registered = true;
|
||||
}
|
||||
@ -1317,6 +1321,12 @@ void intel_audio_init(struct drm_i915_private *i915)
|
||||
i915_audio_component_init(i915);
|
||||
}
|
||||
|
||||
void intel_audio_register(struct drm_i915_private *i915)
|
||||
{
|
||||
if (!i915->display.audio.lpe.platdev)
|
||||
i915_audio_component_register(i915);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_audio_deinit() - deinitialize the audio driver
|
||||
* @i915: the i915 drm device private data
|
||||
|
@ -28,6 +28,7 @@ void intel_audio_codec_get_config(struct intel_encoder *encoder,
|
||||
void intel_audio_cdclk_change_pre(struct drm_i915_private *dev_priv);
|
||||
void intel_audio_cdclk_change_post(struct drm_i915_private *dev_priv);
|
||||
void intel_audio_init(struct drm_i915_private *dev_priv);
|
||||
void intel_audio_register(struct drm_i915_private *i915);
|
||||
void intel_audio_deinit(struct drm_i915_private *dev_priv);
|
||||
void intel_audio_sdp_split_update(const struct intel_crtc_state *crtc_state);
|
||||
|
||||
|
@ -540,6 +540,8 @@ void intel_display_driver_register(struct drm_i915_private *i915)
|
||||
|
||||
intel_display_driver_enable_user_access(i915);
|
||||
|
||||
intel_audio_register(i915);
|
||||
|
||||
intel_display_debugfs_register(i915);
|
||||
|
||||
/*
|
||||
|
@ -255,6 +255,7 @@ struct i915_execbuffer {
|
||||
struct intel_context *context; /* logical state for the request */
|
||||
struct i915_gem_context *gem_context; /** caller's context */
|
||||
intel_wakeref_t wakeref;
|
||||
intel_wakeref_t wakeref_gt0;
|
||||
|
||||
/** our requests to build */
|
||||
struct i915_request *requests[MAX_ENGINE_INSTANCE + 1];
|
||||
@ -2685,6 +2686,7 @@ static int
|
||||
eb_select_engine(struct i915_execbuffer *eb)
|
||||
{
|
||||
struct intel_context *ce, *child;
|
||||
struct intel_gt *gt;
|
||||
unsigned int idx;
|
||||
int err;
|
||||
|
||||
@ -2708,10 +2710,17 @@ eb_select_engine(struct i915_execbuffer *eb)
|
||||
}
|
||||
}
|
||||
eb->num_batches = ce->parallel.number_children + 1;
|
||||
gt = ce->engine->gt;
|
||||
|
||||
for_each_child(ce, child)
|
||||
intel_context_get(child);
|
||||
eb->wakeref = intel_gt_pm_get(ce->engine->gt);
|
||||
/*
|
||||
* Keep GT0 active on MTL so that i915_vma_parked() doesn't
|
||||
* free VMAs while execbuf ioctl is validating VMAs.
|
||||
*/
|
||||
if (gt->info.id)
|
||||
eb->wakeref_gt0 = intel_gt_pm_get(to_gt(gt->i915));
|
||||
|
||||
if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
|
||||
err = intel_context_alloc_state(ce);
|
||||
@ -2750,6 +2759,9 @@ eb_select_engine(struct i915_execbuffer *eb)
|
||||
return err;
|
||||
|
||||
err:
|
||||
if (gt->info.id)
|
||||
intel_gt_pm_put(to_gt(gt->i915), eb->wakeref_gt0);
|
||||
|
||||
intel_gt_pm_put(ce->engine->gt, eb->wakeref);
|
||||
for_each_child(ce, child)
|
||||
intel_context_put(child);
|
||||
@ -2763,6 +2775,12 @@ eb_put_engine(struct i915_execbuffer *eb)
|
||||
struct intel_context *child;
|
||||
|
||||
i915_vm_put(eb->context->vm);
|
||||
/*
|
||||
* This works in conjunction with eb_select_engine() to prevent
|
||||
* i915_vma_parked() from interfering while execbuf validates vmas.
|
||||
*/
|
||||
if (eb->gt->info.id)
|
||||
intel_gt_pm_put(to_gt(eb->gt->i915), eb->wakeref_gt0);
|
||||
intel_gt_pm_put(eb->context->engine->gt, eb->wakeref);
|
||||
for_each_child(eb->context, child)
|
||||
intel_context_put(child);
|
||||
|
@ -284,7 +284,9 @@ bool i915_gem_object_has_iomem(const struct drm_i915_gem_object *obj);
|
||||
static inline bool
|
||||
i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE);
|
||||
/* TODO: make DPT shrinkable when it has no bound vmas */
|
||||
return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE) &&
|
||||
!obj->is_dpt;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
|
@ -196,7 +196,7 @@ static int verify_access(struct drm_i915_private *i915,
|
||||
if (err)
|
||||
goto out_file;
|
||||
|
||||
mode = intel_gt_coherent_map_type(to_gt(i915), native_obj, true);
|
||||
mode = intel_gt_coherent_map_type(to_gt(i915), native_obj, false);
|
||||
vaddr = i915_gem_object_pin_map_unlocked(native_obj, mode);
|
||||
if (IS_ERR(vaddr)) {
|
||||
err = PTR_ERR(vaddr);
|
||||
|
@ -263,8 +263,13 @@ static void signal_irq_work(struct irq_work *work)
|
||||
i915_request_put(rq);
|
||||
}
|
||||
|
||||
/* Lazy irq enabling after HW submission */
|
||||
if (!READ_ONCE(b->irq_armed) && !list_empty(&b->signalers))
|
||||
intel_breadcrumbs_arm_irq(b);
|
||||
|
||||
/* And confirm that we still want irqs enabled before we yield */
|
||||
if (READ_ONCE(b->irq_armed) && !atomic_read(&b->active))
|
||||
intel_breadcrumbs_disarm_irq(b);
|
||||
}
|
||||
|
||||
struct intel_breadcrumbs *
|
||||
@ -315,13 +320,7 @@ void __intel_breadcrumbs_park(struct intel_breadcrumbs *b)
|
||||
return;
|
||||
|
||||
/* Kick the work once more to drain the signalers, and disarm the irq */
|
||||
irq_work_sync(&b->irq_work);
|
||||
while (READ_ONCE(b->irq_armed) && !atomic_read(&b->active)) {
|
||||
local_irq_disable();
|
||||
signal_irq_work(&b->irq_work);
|
||||
local_irq_enable();
|
||||
cond_resched();
|
||||
}
|
||||
irq_work_queue(&b->irq_work);
|
||||
}
|
||||
|
||||
void intel_breadcrumbs_free(struct kref *kref)
|
||||
@ -404,7 +403,7 @@ static void insert_breadcrumb(struct i915_request *rq)
|
||||
* the request as it may have completed and raised the interrupt as
|
||||
* we were attaching it into the lists.
|
||||
*/
|
||||
if (!b->irq_armed || __i915_request_is_complete(rq))
|
||||
if (!READ_ONCE(b->irq_armed) || __i915_request_is_complete(rq))
|
||||
irq_work_queue(&b->irq_work);
|
||||
}
|
||||
|
||||
|
@ -885,6 +885,12 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
|
||||
if (IS_DG2(gt->i915)) {
|
||||
u8 first_ccs = __ffs(CCS_MASK(gt));
|
||||
|
||||
/*
|
||||
* Store the number of active cslices before
|
||||
* changing the CCS engine configuration
|
||||
*/
|
||||
gt->ccs.cslices = CCS_MASK(gt);
|
||||
|
||||
/* Mask off all the CCS engine */
|
||||
info->engine_mask &= ~GENMASK(CCS3, CCS0);
|
||||
/* Put back in the first CCS engine */
|
||||
|
@ -19,7 +19,7 @@ unsigned int intel_gt_apply_ccs_mode(struct intel_gt *gt)
|
||||
|
||||
/* Build the value for the fixed CCS load balancing */
|
||||
for (cslice = 0; cslice < I915_MAX_CCS; cslice++) {
|
||||
if (CCS_MASK(gt) & BIT(cslice))
|
||||
if (gt->ccs.cslices & BIT(cslice))
|
||||
/*
|
||||
* If available, assign the cslice
|
||||
* to the first available engine...
|
||||
|
@ -207,6 +207,14 @@ struct intel_gt {
|
||||
[MAX_ENGINE_INSTANCE + 1];
|
||||
enum intel_submission_method submission_method;
|
||||
|
||||
struct {
|
||||
/*
|
||||
* Mask of the non fused CCS slices
|
||||
* to be used for the load balancing
|
||||
*/
|
||||
intel_engine_mask_t cslices;
|
||||
} ccs;
|
||||
|
||||
/*
|
||||
* Default address space (either GGTT or ppGTT depending on arch).
|
||||
*
|
||||
|
@ -29,9 +29,9 @@
|
||||
*/
|
||||
|
||||
#define GUC_KLV_LEN_MIN 1u
|
||||
#define GUC_KLV_0_KEY (0xffff << 16)
|
||||
#define GUC_KLV_0_LEN (0xffff << 0)
|
||||
#define GUC_KLV_n_VALUE (0xffffffff << 0)
|
||||
#define GUC_KLV_0_KEY (0xffffu << 16)
|
||||
#define GUC_KLV_0_LEN (0xffffu << 0)
|
||||
#define GUC_KLV_n_VALUE (0xffffffffu << 0)
|
||||
|
||||
/**
|
||||
* DOC: GuC Self Config KLVs
|
||||
|
@ -185,7 +185,7 @@ static int lima_gem_pin(struct drm_gem_object *obj)
|
||||
if (bo->heap_size)
|
||||
return -EINVAL;
|
||||
|
||||
return drm_gem_shmem_pin(&bo->base);
|
||||
return drm_gem_shmem_pin_locked(&bo->base);
|
||||
}
|
||||
|
||||
static int lima_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map)
|
||||
|
@ -538,7 +538,7 @@ class Parser(object):
|
||||
self.variants.add(reg.domain)
|
||||
|
||||
def do_validate(self, schemafile):
|
||||
if self.validate == False:
|
||||
if not self.validate:
|
||||
return
|
||||
|
||||
try:
|
||||
@ -948,7 +948,8 @@ def main():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('--rnn', type=str, required=True)
|
||||
parser.add_argument('--xml', type=str, required=True)
|
||||
parser.add_argument('--validate', action=argparse.BooleanOptionalAction)
|
||||
parser.add_argument('--validate', default=False, action='store_true')
|
||||
parser.add_argument('--no-validate', dest='validate', action='store_false')
|
||||
|
||||
subparsers = parser.add_subparsers()
|
||||
subparsers.required = True
|
||||
|
@ -142,11 +142,16 @@ nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size)
|
||||
struct nvif_ioctl_v0 ioctl;
|
||||
struct nvif_ioctl_mthd_v0 mthd;
|
||||
} *args;
|
||||
u32 args_size;
|
||||
u8 stack[128];
|
||||
int ret;
|
||||
|
||||
if (sizeof(*args) + size > sizeof(stack)) {
|
||||
if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL)))
|
||||
if (check_add_overflow(sizeof(*args), size, &args_size))
|
||||
return -ENOMEM;
|
||||
|
||||
if (args_size > sizeof(stack)) {
|
||||
args = kmalloc(args_size, GFP_KERNEL);
|
||||
if (!args)
|
||||
return -ENOMEM;
|
||||
} else {
|
||||
args = (void *)stack;
|
||||
@ -157,7 +162,7 @@ nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size)
|
||||
args->mthd.method = mthd;
|
||||
|
||||
memcpy(args->mthd.data, data, size);
|
||||
ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL);
|
||||
ret = nvif_object_ioctl(object, args, args_size, NULL);
|
||||
memcpy(data, args->mthd.data, size);
|
||||
if (args != (void *)stack)
|
||||
kfree(args);
|
||||
@ -276,7 +281,15 @@ nvif_object_ctor(struct nvif_object *parent, const char *name, u32 handle,
|
||||
object->map.size = 0;
|
||||
|
||||
if (parent) {
|
||||
if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL))) {
|
||||
u32 args_size;
|
||||
|
||||
if (check_add_overflow(sizeof(*args), size, &args_size)) {
|
||||
nvif_object_dtor(object);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
args = kmalloc(args_size, GFP_KERNEL);
|
||||
if (!args) {
|
||||
nvif_object_dtor(object);
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -293,8 +306,7 @@ nvif_object_ctor(struct nvif_object *parent, const char *name, u32 handle,
|
||||
args->new.oclass = oclass;
|
||||
|
||||
memcpy(args->new.data, data, size);
|
||||
ret = nvif_object_ioctl(parent, args, sizeof(*args) + size,
|
||||
&object->priv);
|
||||
ret = nvif_object_ioctl(parent, args, args_size, &object->priv);
|
||||
memcpy(data, args->new.data, size);
|
||||
kfree(args);
|
||||
if (ret == 0)
|
||||
|
@ -340,6 +340,8 @@ config DRM_PANEL_LG_SW43408
|
||||
depends on OF
|
||||
depends on DRM_MIPI_DSI
|
||||
depends on BACKLIGHT_CLASS_DEVICE
|
||||
select DRM_DISPLAY_DP_HELPER
|
||||
select DRM_DISPLAY_HELPER
|
||||
help
|
||||
Say Y here if you want to enable support for LG sw43408 panel.
|
||||
The panel has a 1080x2160@60Hz resolution and uses 24 bit RGB per
|
||||
|
@ -182,7 +182,7 @@ static int sw43408_backlight_update_status(struct backlight_device *bl)
|
||||
return mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
|
||||
}
|
||||
|
||||
const struct backlight_ops sw43408_backlight_ops = {
|
||||
static const struct backlight_ops sw43408_backlight_ops = {
|
||||
.update_status = sw43408_backlight_update_status,
|
||||
};
|
||||
|
||||
|
@ -282,15 +282,15 @@ static const struct drm_display_mode et028013dma_mode = {
|
||||
static const struct drm_display_mode jt240mhqs_hwt_ek_e3_mode = {
|
||||
.clock = 6000,
|
||||
.hdisplay = 240,
|
||||
.hsync_start = 240 + 28,
|
||||
.hsync_end = 240 + 28 + 10,
|
||||
.htotal = 240 + 28 + 10 + 10,
|
||||
.hsync_start = 240 + 38,
|
||||
.hsync_end = 240 + 38 + 10,
|
||||
.htotal = 240 + 38 + 10 + 10,
|
||||
.vdisplay = 280,
|
||||
.vsync_start = 280 + 8,
|
||||
.vsync_end = 280 + 8 + 4,
|
||||
.vtotal = 280 + 8 + 4 + 4,
|
||||
.width_mm = 43,
|
||||
.height_mm = 37,
|
||||
.vsync_start = 280 + 48,
|
||||
.vsync_end = 280 + 48 + 4,
|
||||
.vtotal = 280 + 48 + 4 + 4,
|
||||
.width_mm = 37,
|
||||
.height_mm = 43,
|
||||
.flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
|
||||
};
|
||||
|
||||
|
@ -192,7 +192,7 @@ static int panfrost_gem_pin(struct drm_gem_object *obj)
|
||||
if (bo->is_heap)
|
||||
return -EINVAL;
|
||||
|
||||
return drm_gem_shmem_pin(&bo->base);
|
||||
return drm_gem_shmem_pin_locked(&bo->base);
|
||||
}
|
||||
|
||||
static enum drm_gem_object_status panfrost_gem_status(struct drm_gem_object *obj)
|
||||
|
@ -505,8 +505,8 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
|
||||
* Eventually we will have a fully 50% fragmented mm.
|
||||
*/
|
||||
|
||||
mm_size = PAGE_SIZE << max_order;
|
||||
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
|
||||
mm_size = SZ_4K << max_order;
|
||||
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
|
||||
"buddy_init failed\n");
|
||||
|
||||
KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
|
||||
@ -520,7 +520,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
|
||||
}
|
||||
|
||||
for (order = top; order--;) {
|
||||
size = get_size(order, PAGE_SIZE);
|
||||
size = get_size(order, mm.chunk_size);
|
||||
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start,
|
||||
mm_size, size, size,
|
||||
&tmp, flags),
|
||||
@ -534,7 +534,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
|
||||
}
|
||||
|
||||
/* There should be one final page for this sub-allocation */
|
||||
size = get_size(0, PAGE_SIZE);
|
||||
size = get_size(0, mm.chunk_size);
|
||||
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
|
||||
size, size, &tmp, flags),
|
||||
"buddy_alloc hit -ENOMEM for hole\n");
|
||||
@ -544,7 +544,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
|
||||
|
||||
list_move_tail(&block->link, &holes);
|
||||
|
||||
size = get_size(top, PAGE_SIZE);
|
||||
size = get_size(top, mm.chunk_size);
|
||||
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
|
||||
size, size, &tmp, flags),
|
||||
"buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
|
||||
@ -555,7 +555,7 @@ static void drm_test_buddy_alloc_pathological(struct kunit *test)
|
||||
|
||||
/* Nothing larger than blocks of chunk_size now available */
|
||||
for (order = 1; order <= max_order; order++) {
|
||||
size = get_size(order, PAGE_SIZE);
|
||||
size = get_size(order, mm.chunk_size);
|
||||
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
|
||||
size, size, &tmp, flags),
|
||||
"buddy_alloc unexpectedly succeeded at order %d, it should be full!",
|
||||
@ -584,14 +584,14 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
|
||||
* page left.
|
||||
*/
|
||||
|
||||
mm_size = PAGE_SIZE << max_order;
|
||||
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
|
||||
mm_size = SZ_4K << max_order;
|
||||
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
|
||||
"buddy_init failed\n");
|
||||
|
||||
KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
|
||||
|
||||
for (order = 0; order < max_order; order++) {
|
||||
size = get_size(order, PAGE_SIZE);
|
||||
size = get_size(order, mm.chunk_size);
|
||||
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
|
||||
size, size, &tmp, flags),
|
||||
"buddy_alloc hit -ENOMEM with order=%d\n",
|
||||
@ -604,7 +604,7 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
|
||||
}
|
||||
|
||||
/* And now the last remaining block available */
|
||||
size = get_size(0, PAGE_SIZE);
|
||||
size = get_size(0, mm.chunk_size);
|
||||
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
|
||||
size, size, &tmp, flags),
|
||||
"buddy_alloc hit -ENOMEM on final alloc\n");
|
||||
@ -616,7 +616,7 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
|
||||
|
||||
/* Should be completely full! */
|
||||
for (order = max_order; order--;) {
|
||||
size = get_size(order, PAGE_SIZE);
|
||||
size = get_size(order, mm.chunk_size);
|
||||
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
|
||||
size, size, &tmp, flags),
|
||||
"buddy_alloc unexpectedly succeeded, it should be full!");
|
||||
@ -632,7 +632,7 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
|
||||
list_del(&block->link);
|
||||
drm_buddy_free_block(&mm, block);
|
||||
|
||||
size = get_size(order, PAGE_SIZE);
|
||||
size = get_size(order, mm.chunk_size);
|
||||
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
|
||||
size, size, &tmp, flags),
|
||||
"buddy_alloc hit -ENOMEM with order=%d\n",
|
||||
@ -647,7 +647,7 @@ static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
|
||||
}
|
||||
|
||||
/* To confirm, now the whole mm should be available */
|
||||
size = get_size(max_order, PAGE_SIZE);
|
||||
size = get_size(max_order, mm.chunk_size);
|
||||
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
|
||||
size, size, &tmp, flags),
|
||||
"buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
|
||||
@ -678,15 +678,15 @@ static void drm_test_buddy_alloc_optimistic(struct kunit *test)
|
||||
* try to allocate them all.
|
||||
*/
|
||||
|
||||
mm_size = PAGE_SIZE * ((1 << (max_order + 1)) - 1);
|
||||
mm_size = SZ_4K * ((1 << (max_order + 1)) - 1);
|
||||
|
||||
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
|
||||
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K),
|
||||
"buddy_init failed\n");
|
||||
|
||||
KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
|
||||
|
||||
for (order = 0; order <= max_order; order++) {
|
||||
size = get_size(order, PAGE_SIZE);
|
||||
size = get_size(order, mm.chunk_size);
|
||||
KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
|
||||
size, size, &tmp, flags),
|
||||
"buddy_alloc hit -ENOMEM with order=%d\n",
|
||||
@ -699,7 +699,7 @@ static void drm_test_buddy_alloc_optimistic(struct kunit *test)
|
||||
}
|
||||
|
||||
/* Should be completely full! */
|
||||
size = get_size(0, PAGE_SIZE);
|
||||
size = get_size(0, mm.chunk_size);
|
||||
KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
|
||||
size, size, &tmp, flags),
|
||||
"buddy_alloc unexpectedly succeeded, it should be full!");
|
||||
@ -716,7 +716,7 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
|
||||
LIST_HEAD(allocated);
|
||||
struct drm_buddy mm;
|
||||
|
||||
KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, PAGE_SIZE));
|
||||
KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, SZ_4K));
|
||||
|
||||
KUNIT_EXPECT_EQ_MSG(test, mm.max_order, DRM_BUDDY_MAX_ORDER,
|
||||
"mm.max_order(%d) != %d\n", mm.max_order,
|
||||
@ -724,7 +724,7 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
|
||||
|
||||
size = mm.chunk_size << mm.max_order;
|
||||
KUNIT_EXPECT_FALSE(test, drm_buddy_alloc_blocks(&mm, start, size, size,
|
||||
PAGE_SIZE, &allocated, flags));
|
||||
mm.chunk_size, &allocated, flags));
|
||||
|
||||
block = list_first_entry_or_null(&allocated, struct drm_buddy_block, link);
|
||||
KUNIT_EXPECT_TRUE(test, block);
|
||||
@ -734,10 +734,10 @@ static void drm_test_buddy_alloc_limit(struct kunit *test)
|
||||
drm_buddy_block_order(block), mm.max_order);
|
||||
|
||||
KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_size(&mm, block),
|
||||
BIT_ULL(mm.max_order) * PAGE_SIZE,
|
||||
BIT_ULL(mm.max_order) * mm.chunk_size,
|
||||
"block size(%llu) != %llu\n",
|
||||
drm_buddy_block_size(&mm, block),
|
||||
BIT_ULL(mm.max_order) * PAGE_SIZE);
|
||||
BIT_ULL(mm.max_order) * mm.chunk_size);
|
||||
|
||||
drm_buddy_free_list(&mm, &allocated, 0);
|
||||
drm_buddy_fini(&mm);
|
||||
|
@ -1240,6 +1240,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q)
|
||||
return 0;
|
||||
|
||||
err_entity:
|
||||
mutex_unlock(&guc->submission_state.lock);
|
||||
xe_sched_entity_fini(&ge->entity);
|
||||
err_sched:
|
||||
xe_sched_fini(&ge->sched);
|
||||
|
@ -34,7 +34,6 @@
|
||||
#include "xe_sync.h"
|
||||
#include "xe_trace.h"
|
||||
#include "xe_vm.h"
|
||||
#include "xe_wa.h"
|
||||
|
||||
/**
|
||||
* struct xe_migrate - migrate context.
|
||||
@ -300,10 +299,6 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
|
||||
}
|
||||
|
||||
/*
|
||||
* Due to workaround 16017236439, odd instance hardware copy engines are
|
||||
* faster than even instance ones.
|
||||
* This function returns the mask involving all fast copy engines and the
|
||||
* reserved copy engine to be used as logical mask for migrate engine.
|
||||
* Including the reserved copy engine is required to avoid deadlocks due to
|
||||
* migrate jobs servicing the faults gets stuck behind the job that faulted.
|
||||
*/
|
||||
@ -317,8 +312,7 @@ static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt)
|
||||
if (hwe->class != XE_ENGINE_CLASS_COPY)
|
||||
continue;
|
||||
|
||||
if (!XE_WA(gt, 16017236439) ||
|
||||
xe_gt_is_usm_hwe(gt, hwe) || hwe->instance & 1)
|
||||
if (xe_gt_is_usm_hwe(gt, hwe))
|
||||
logical_mask |= BIT(hwe->logical_instance);
|
||||
}
|
||||
|
||||
@ -369,6 +363,10 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
|
||||
if (!hwe || !logical_mask)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
/*
|
||||
* XXX: Currently only reserving 1 (likely slow) BCS instance on
|
||||
* PVC, may want to revisit if performance is needed.
|
||||
*/
|
||||
m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe,
|
||||
EXEC_QUEUE_FLAG_KERNEL |
|
||||
EXEC_QUEUE_FLAG_PERMANENT |
|
||||
|
@ -191,7 +191,7 @@ int xe_pcode_request(struct xe_gt *gt, u32 mbox, u32 request,
|
||||
drm_WARN_ON_ONCE(>_to_xe(gt)->drm, timeout_base_ms > 1);
|
||||
preempt_disable();
|
||||
ret = pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
|
||||
true, timeout_base_ms * 1000, true);
|
||||
true, 50 * 1000, true);
|
||||
preempt_enable();
|
||||
|
||||
out:
|
||||
|
@ -56,8 +56,8 @@ struct drm_buddy_block {
|
||||
struct list_head tmp_link;
|
||||
};
|
||||
|
||||
/* Order-zero must be at least PAGE_SIZE */
|
||||
#define DRM_BUDDY_MAX_ORDER (63 - PAGE_SHIFT)
|
||||
/* Order-zero must be at least SZ_4K */
|
||||
#define DRM_BUDDY_MAX_ORDER (63 - 12)
|
||||
|
||||
/*
|
||||
* Binary Buddy System.
|
||||
@ -85,7 +85,7 @@ struct drm_buddy {
|
||||
unsigned int n_roots;
|
||||
unsigned int max_order;
|
||||
|
||||
/* Must be at least PAGE_SIZE */
|
||||
/* Must be at least SZ_4K */
|
||||
u64 chunk_size;
|
||||
u64 size;
|
||||
u64 avail;
|
||||
|
Loading…
x
Reference in New Issue
Block a user