drm-misc-next for $kernel-version:
UAPI Changes: Cross-subsystem Changes: - arch/arm64: Describe G12b GPU as coherent - iommu: Support coherency for Mali LPAE Core Changes: - atomic: Pass full state to CRTC atomic_{check, begin, flush}(); Use atomic-state pointers - drm: Remove SCATTER_LIST_MAX_SEGMENT; Cleanups - doc: Document legacy_cursor_update better; cleanups - edid: Don't warn n EDIDs of zero - ttm: New backend allocation pool; Remove old page allocator; Rework no_retry handling; Replace flags with booleans in struct ttm_operation_ctx - vram-helper: Cleanups - fbdev: Cleanups - console: Store font size as unsigned value Driver Changes: - ast: Support new display mode - amdgpu: Switch to new TTM allocator - hisilicon: Cleanups - nouveau: Switch to new TTM allocator; Fix include of swiotbl.h and limits.h; Use state helper instead of CRTC state pointer - panfrost: Support cache-coherent integrations; Fix mutex corruption on open/close; Cleanupse - qxl: Cleanups - radeon: Switch to new TTM allocator - ticdc: Fix build failure - vmwgfx: Switch to new TTM allocator - xlnx: Use dma_request_chan - fbdev/sh_mobile: Cleanups -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEchf7rIzpz2NEoWjlaA3BHVMLeiMFAl+j0DcACgkQaA3BHVML eiNdAQf/fadZLQeKHuUYTlvyCkjxcWnpT2gXpcX8pq/7AMv1dW2+R7ZEs1SfuFD7 /4LPd9+prFgYBB5T7GAoUN46Ni6fO3/hfP7KfQNW3o1GC4a0+kvgbgEp+bzRQotX 6AAH0nNA0ADYQWSWpouOEJOQVrAtLFe7eQcieMna8/SYMvKDhxch5sxP5HYONbsU eF9wPzGVlcVFf2wdow2lllp3pKU40etxyNhdJuwbuqBtudJZgr7yWFKC4XoW0V2w kWmbY5nZfAIPgb9DwD9FhWnhOd2jVVpeea1ZUjhCAqt1E8T7szzJI1BTl4vs8npT O8KIG+RAI/l5WPrQ1AbMjbknIQTQVw== =mzXQ -----END PGP SIGNATURE----- Merge tag 'drm-misc-next-2020-11-05' of git://anongit.freedesktop.org/drm/drm-misc into drm-next drm-misc-next for 5.11: UAPI Changes: Cross-subsystem Changes: - arch/arm64: Describe G12b GPU as coherent - iommu: Support coherency for Mali LPAE Core Changes: - atomic: Pass full state to CRTC atomic_{check, begin, flush}(); Use atomic-state pointers - drm: Remove SCATTER_LIST_MAX_SEGMENT; Cleanups - doc: Document legacy_cursor_update better; cleanups - edid: Don't warn n EDIDs of zero - ttm: New backend allocation pool; Remove old page allocator; Rework no_retry handling; Replace flags with booleans in struct ttm_operation_ctx - vram-helper: Cleanups - fbdev: Cleanups - console: Store font size as unsigned value Driver Changes: - ast: Support new display mode - amdgpu: Switch to new TTM allocator - hisilicon: Cleanups - nouveau: Switch to new TTM allocator; Fix include of swiotbl.h and limits.h; Use state helper instead of CRTC state pointer - panfrost: Support cache-coherent integrations; Fix mutex corruption on open/close; Cleanupse - qxl: Cleanups - radeon: Switch to new TTM allocator - ticdc: Fix build failure - vmwgfx: Switch to new TTM allocator - xlnx: Use dma_request_chan - fbdev/sh_mobile: Cleanups Signed-off-by: Dave Airlie <airlied@redhat.com> From: Thomas Zimmermann <tzimmermann@suse.de> Link: https://patchwork.freedesktop.org/patch/msgid/20201105101641.GA13099@linux-uq9g
This commit is contained in:
commit
c0f98d2f8b
@ -105,6 +105,10 @@ converted over to the new infrastructure.
|
||||
One issue with the helpers is that they require that drivers handle completion
|
||||
events for atomic commits correctly. But fixing these bugs is good anyway.
|
||||
|
||||
Somewhat related is the legacy_cursor_update hack, which should be replaced with
|
||||
the new atomic_async_check/commit functionality in the helpers in drivers that
|
||||
still look at that flag.
|
||||
|
||||
Contact: Daniel Vetter, respective driver maintainers
|
||||
|
||||
Level: Advanced
|
||||
|
@ -135,3 +135,7 @@
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
&mali {
|
||||
dma-coherent;
|
||||
};
|
||||
|
@ -182,13 +182,6 @@ config DRM_TTM
|
||||
GPU memory types. Will be enabled automatically if a device driver
|
||||
uses it.
|
||||
|
||||
config DRM_TTM_DMA_PAGE_POOL
|
||||
bool
|
||||
depends on DRM_TTM && (SWIOTLB || INTEL_IOMMU)
|
||||
default y
|
||||
help
|
||||
Choose this if you need the TTM dma page pool
|
||||
|
||||
config DRM_VRAM_HELPER
|
||||
tristate
|
||||
depends on DRM
|
||||
|
@ -404,8 +404,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
|
||||
struct ttm_operation_ctx ctx = {
|
||||
.interruptible = true,
|
||||
.no_wait_gpu = false,
|
||||
.resv = bo->tbo.base.resv,
|
||||
.flags = 0
|
||||
.resv = bo->tbo.base.resv
|
||||
};
|
||||
uint32_t domain;
|
||||
int r;
|
||||
|
@ -516,9 +516,10 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
|
||||
struct ttm_operation_ctx ctx = {
|
||||
.interruptible = (bp->type != ttm_bo_type_kernel),
|
||||
.no_wait_gpu = bp->no_wait_gpu,
|
||||
.resv = bp->resv,
|
||||
.flags = bp->type != ttm_bo_type_kernel ?
|
||||
TTM_OPT_FLAG_ALLOW_RES_EVICT : 0
|
||||
/* We opt to avoid OOM on system pages allocations */
|
||||
.gfp_retry_mayfail = true,
|
||||
.allow_res_evict = bp->type != ttm_bo_type_kernel,
|
||||
.resv = bp->resv
|
||||
};
|
||||
struct amdgpu_bo *bo;
|
||||
unsigned long page_align, size = bp->size;
|
||||
|
@ -47,7 +47,6 @@
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
#include <drm/ttm/ttm_module.h>
|
||||
#include <drm/ttm/ttm_page_alloc.h>
|
||||
|
||||
#include <drm/drm_debugfs.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
@ -1389,15 +1388,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
if (adev->need_swiotlb && swiotlb_nr_tbl()) {
|
||||
return ttm_dma_populate(>t->ttm, adev->dev, ctx);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* fall back to generic helper to populate the page array
|
||||
* and map them to the device */
|
||||
return ttm_populate_and_map_pages(adev->dev, >t->ttm, ctx);
|
||||
return ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1406,7 +1397,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
|
||||
* Unmaps pages of a ttm_tt object from the device address space and
|
||||
* unpopulates the page array backing it.
|
||||
*/
|
||||
static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
|
||||
static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
|
||||
struct ttm_tt *ttm)
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct amdgpu_device *adev;
|
||||
@ -1431,16 +1423,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *
|
||||
return;
|
||||
|
||||
adev = amdgpu_ttm_adev(bdev);
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
if (adev->need_swiotlb && swiotlb_nr_tbl()) {
|
||||
ttm_dma_unpopulate(>t->ttm, adev->dev);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* fall back to generic helper to unmap and unpopulate array */
|
||||
ttm_unmap_and_unpopulate_pages(adev->dev, >t->ttm);
|
||||
return ttm_pool_free(&adev->mman.bdev.pool, ttm);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1920,10 +1903,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
||||
mutex_init(&adev->mman.gtt_window_lock);
|
||||
|
||||
/* No others user of address space so set it to 0 */
|
||||
r = ttm_bo_device_init(&adev->mman.bdev,
|
||||
&amdgpu_bo_driver,
|
||||
r = ttm_bo_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
|
||||
adev_to_drm(adev)->anon_inode->i_mapping,
|
||||
adev_to_drm(adev)->vma_offset_manager,
|
||||
adev->need_swiotlb,
|
||||
dma_addressing_limited(adev->dev));
|
||||
if (r) {
|
||||
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
|
||||
@ -1931,9 +1914,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
||||
}
|
||||
adev->mman.initialized = true;
|
||||
|
||||
/* We opt to avoid OOM on system pages allocations */
|
||||
adev->mman.bdev.no_retry = true;
|
||||
|
||||
/* Initialize VRAM pool with all of VRAM divided into pages */
|
||||
r = amdgpu_vram_mgr_init(adev);
|
||||
if (r) {
|
||||
@ -2353,16 +2333,22 @@ static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_ttm_pool_debugfs(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *)m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
|
||||
return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
|
||||
}
|
||||
|
||||
static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
|
||||
{"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_VRAM},
|
||||
{"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_TT},
|
||||
{"amdgpu_gds_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GDS},
|
||||
{"amdgpu_gws_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GWS},
|
||||
{"amdgpu_oa_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_OA},
|
||||
{"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
{"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
|
||||
#endif
|
||||
{"ttm_page_pool", amdgpu_ttm_pool_debugfs, 0, NULL},
|
||||
};
|
||||
|
||||
/**
|
||||
@ -2655,12 +2641,6 @@ int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
|
||||
}
|
||||
|
||||
count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
if (!(adev->need_swiotlb && swiotlb_nr_tbl()))
|
||||
--count;
|
||||
#endif
|
||||
|
||||
return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
|
||||
#else
|
||||
return 0;
|
||||
|
@ -5514,17 +5514,19 @@ static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
|
||||
crtc);
|
||||
struct amdgpu_device *adev = drm_to_adev(crtc->dev);
|
||||
struct dc *dc = adev->dm.dc;
|
||||
struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(state);
|
||||
struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
|
||||
int ret = -EINVAL;
|
||||
|
||||
dm_update_crtc_active_planes(crtc, state);
|
||||
dm_update_crtc_active_planes(crtc, crtc_state);
|
||||
|
||||
if (unlikely(!dm_crtc_state->stream &&
|
||||
modeset_required(state, NULL, dm_crtc_state->stream))) {
|
||||
modeset_required(crtc_state, NULL, dm_crtc_state->stream))) {
|
||||
WARN_ON(1);
|
||||
return ret;
|
||||
}
|
||||
@ -5535,8 +5537,8 @@ static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
|
||||
* planes are disabled, which is not supported by the hardware. And there is legacy
|
||||
* userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
|
||||
*/
|
||||
if (state->enable &&
|
||||
!(state->plane_mask & drm_plane_mask(crtc->primary)))
|
||||
if (crtc_state->enable &&
|
||||
!(crtc_state->plane_mask & drm_plane_mask(crtc->primary)))
|
||||
return -EINVAL;
|
||||
|
||||
/* In some use cases, like reset, no stream is attached */
|
||||
|
@ -74,16 +74,18 @@ static void komeda_crtc_update_clock_ratio(struct komeda_crtc_state *kcrtc_st)
|
||||
*/
|
||||
static int
|
||||
komeda_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
|
||||
crtc);
|
||||
struct komeda_crtc *kcrtc = to_kcrtc(crtc);
|
||||
struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(state);
|
||||
struct komeda_crtc_state *kcrtc_st = to_kcrtc_st(crtc_state);
|
||||
int err;
|
||||
|
||||
if (drm_atomic_crtc_needs_modeset(state))
|
||||
if (drm_atomic_crtc_needs_modeset(crtc_state))
|
||||
komeda_crtc_update_clock_ratio(kcrtc_st);
|
||||
|
||||
if (state->active) {
|
||||
if (crtc_state->active) {
|
||||
err = komeda_build_display_data_flow(kcrtc, kcrtc_st);
|
||||
if (err)
|
||||
return err;
|
||||
@ -383,8 +385,10 @@ komeda_crtc_atomic_disable(struct drm_crtc *crtc,
|
||||
|
||||
static void
|
||||
komeda_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_crtc_state *old = drm_atomic_get_old_crtc_state(state,
|
||||
crtc);
|
||||
/* commit with modeset will be handled in enable/disable */
|
||||
if (drm_atomic_crtc_needs_modeset(crtc->state))
|
||||
return;
|
||||
|
@ -205,7 +205,7 @@ static enum drm_mode_status hdlcd_crtc_mode_valid(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_pending_vblank_event *event = crtc->state->event;
|
||||
|
||||
|
@ -337,8 +337,10 @@ mclk_calc:
|
||||
}
|
||||
|
||||
static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
|
||||
crtc);
|
||||
struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
|
||||
struct malidp_hw_device *hwdev = malidp->dev;
|
||||
struct drm_plane *plane;
|
||||
@ -373,7 +375,7 @@ static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
*/
|
||||
|
||||
/* first count the number of rotated planes */
|
||||
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
|
||||
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
|
||||
struct drm_framebuffer *fb = pstate->fb;
|
||||
|
||||
if ((pstate->rotation & MALIDP_ROTATED_MASK) || fb->modifier)
|
||||
@ -389,7 +391,7 @@ static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
rot_mem_free += hwdev->rotation_memory[1];
|
||||
|
||||
/* now validate the rotation memory requirements */
|
||||
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
|
||||
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
|
||||
struct malidp_plane *mp = to_malidp_plane(plane);
|
||||
struct malidp_plane_state *ms = to_malidp_plane_state(pstate);
|
||||
struct drm_framebuffer *fb = pstate->fb;
|
||||
@ -417,18 +419,18 @@ static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
/* If only the writeback routing has changed, we don't need a modeset */
|
||||
if (state->connectors_changed) {
|
||||
if (crtc_state->connectors_changed) {
|
||||
u32 old_mask = crtc->state->connector_mask;
|
||||
u32 new_mask = state->connector_mask;
|
||||
u32 new_mask = crtc_state->connector_mask;
|
||||
|
||||
if ((old_mask ^ new_mask) ==
|
||||
(1 << drm_connector_index(&malidp->mw_connector.base)))
|
||||
state->connectors_changed = false;
|
||||
crtc_state->connectors_changed = false;
|
||||
}
|
||||
|
||||
ret = malidp_crtc_atomic_check_gamma(crtc, state);
|
||||
ret = ret ? ret : malidp_crtc_atomic_check_ctm(crtc, state);
|
||||
ret = ret ? ret : malidp_crtc_atomic_check_scaling(crtc, state);
|
||||
ret = malidp_crtc_atomic_check_gamma(crtc, crtc_state);
|
||||
ret = ret ? ret : malidp_crtc_atomic_check_ctm(crtc, crtc_state);
|
||||
ret = ret ? ret : malidp_crtc_atomic_check_scaling(crtc, crtc_state);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -413,21 +413,23 @@ static void armada_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
|
||||
}
|
||||
|
||||
static int armada_drm_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
|
||||
crtc);
|
||||
DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
|
||||
|
||||
if (state->gamma_lut && drm_color_lut_size(state->gamma_lut) != 256)
|
||||
if (crtc_state->gamma_lut && drm_color_lut_size(crtc_state->gamma_lut) != 256)
|
||||
return -EINVAL;
|
||||
|
||||
if (state->color_mgmt_changed)
|
||||
state->planes_changed = true;
|
||||
if (crtc_state->color_mgmt_changed)
|
||||
crtc_state->planes_changed = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void armada_drm_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
|
||||
|
||||
@ -441,7 +443,7 @@ static void armada_drm_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void armada_drm_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
|
||||
|
||||
|
@ -751,24 +751,26 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode)
|
||||
}
|
||||
|
||||
static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
|
||||
crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct ast_crtc_state *ast_state;
|
||||
const struct drm_format_info *format;
|
||||
bool succ;
|
||||
|
||||
if (!state->enable)
|
||||
if (!crtc_state->enable)
|
||||
return 0; /* no mode checks if CRTC is being disabled */
|
||||
|
||||
ast_state = to_ast_crtc_state(state);
|
||||
ast_state = to_ast_crtc_state(crtc_state);
|
||||
|
||||
format = ast_state->format;
|
||||
if (drm_WARN_ON_ONCE(dev, !format))
|
||||
return -EINVAL; /* BUG: We didn't set format in primary check(). */
|
||||
|
||||
succ = ast_get_vbios_mode_info(format, &state->mode,
|
||||
&state->adjusted_mode,
|
||||
succ = ast_get_vbios_mode_info(format, &crtc_state->mode,
|
||||
&crtc_state->adjusted_mode,
|
||||
&ast_state->vbios_mode_info);
|
||||
if (!succ)
|
||||
return -EINVAL;
|
||||
@ -777,8 +779,11 @@ static int ast_crtc_helper_atomic_check(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void
|
||||
ast_crtc_helper_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state)
|
||||
ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
|
||||
crtc);
|
||||
struct ast_private *ast = to_ast_private(crtc->dev);
|
||||
struct ast_crtc_state *ast_crtc_state = to_ast_crtc_state(crtc->state);
|
||||
struct ast_crtc_state *old_ast_crtc_state = to_ast_crtc_state(old_crtc_state);
|
||||
|
@ -282,6 +282,8 @@ static const struct ast_vbios_enhtable res_1360x768[] = {
|
||||
};
|
||||
|
||||
static const struct ast_vbios_enhtable res_1600x900[] = {
|
||||
{1800, 1600, 24, 80, 1000, 900, 1, 3, VCLK108, /* 60Hz */
|
||||
(SyncPP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo), 60, 3, 0x3A },
|
||||
{1760, 1600, 48, 32, 926, 900, 3, 5, VCLK97_75, /* 60Hz CVT RB */
|
||||
(SyncNP | Charx8Dot | LineCompareOff | WideScreenMode | NewModeInfo |
|
||||
AST2500PreCatchCRT), 60, 1, 0x3A },
|
||||
|
@ -325,8 +325,9 @@ static int atmel_hlcdc_crtc_select_output_mode(struct drm_crtc_state *state)
|
||||
}
|
||||
|
||||
static int atmel_hlcdc_crtc_atomic_check(struct drm_crtc *c,
|
||||
struct drm_crtc_state *s)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_crtc_state *s = drm_atomic_get_new_crtc_state(state, c);
|
||||
int ret;
|
||||
|
||||
ret = atmel_hlcdc_crtc_select_output_mode(s);
|
||||
@ -341,7 +342,7 @@ static int atmel_hlcdc_crtc_atomic_check(struct drm_crtc *c,
|
||||
}
|
||||
|
||||
static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c,
|
||||
struct drm_crtc_state *old_s)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct atmel_hlcdc_crtc *crtc = drm_crtc_to_atmel_hlcdc_crtc(c);
|
||||
|
||||
@ -356,7 +357,7 @@ static void atmel_hlcdc_crtc_atomic_begin(struct drm_crtc *c,
|
||||
}
|
||||
|
||||
static void atmel_hlcdc_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_s)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
/* TODO: write common plane control register if available */
|
||||
}
|
||||
|
@ -918,7 +918,7 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
|
||||
if (!funcs || !funcs->atomic_check)
|
||||
continue;
|
||||
|
||||
ret = funcs->atomic_check(crtc, new_crtc_state);
|
||||
ret = funcs->atomic_check(crtc, state);
|
||||
if (ret) {
|
||||
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
|
||||
crtc->base.id, crtc->name);
|
||||
@ -2521,7 +2521,7 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
|
||||
if (active_only && !new_crtc_state->active)
|
||||
continue;
|
||||
|
||||
funcs->atomic_begin(crtc, old_crtc_state);
|
||||
funcs->atomic_begin(crtc, old_state);
|
||||
}
|
||||
|
||||
for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
|
||||
@ -2579,7 +2579,7 @@ void drm_atomic_helper_commit_planes(struct drm_device *dev,
|
||||
if (active_only && !new_crtc_state->active)
|
||||
continue;
|
||||
|
||||
funcs->atomic_flush(crtc, old_crtc_state);
|
||||
funcs->atomic_flush(crtc, old_state);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
|
||||
@ -2617,7 +2617,7 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
|
||||
|
||||
crtc_funcs = crtc->helper_private;
|
||||
if (crtc_funcs && crtc_funcs->atomic_begin)
|
||||
crtc_funcs->atomic_begin(crtc, old_crtc_state);
|
||||
crtc_funcs->atomic_begin(crtc, old_state);
|
||||
|
||||
drm_for_each_plane_mask(plane, crtc->dev, plane_mask) {
|
||||
struct drm_plane_state *old_plane_state =
|
||||
@ -2643,7 +2643,7 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
|
||||
}
|
||||
|
||||
if (crtc_funcs && crtc_funcs->atomic_flush)
|
||||
crtc_funcs->atomic_flush(crtc, old_crtc_state);
|
||||
crtc_funcs->atomic_flush(crtc, old_state);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_atomic_helper_commit_planes_on_crtc);
|
||||
|
||||
|
@ -241,7 +241,7 @@ static int drm_bridge_connector_get_modes_edid(struct drm_connector *connector,
|
||||
goto no_edid;
|
||||
|
||||
edid = bridge->funcs->get_edid(bridge, connector);
|
||||
if (!edid || !drm_edid_is_valid(edid)) {
|
||||
if (!drm_edid_is_valid(edid)) {
|
||||
kfree(edid);
|
||||
goto no_edid;
|
||||
}
|
||||
|
@ -97,12 +97,12 @@
|
||||
* &drm_plane specific COLOR_ENCODING and COLOR_RANGE properties. They
|
||||
* are set up by calling drm_plane_create_color_properties().
|
||||
*
|
||||
* "COLOR_ENCODING"
|
||||
* "COLOR_ENCODING":
|
||||
* Optional plane enum property to support different non RGB
|
||||
* color encodings. The driver can provide a subset of standard
|
||||
* enum values supported by the DRM plane.
|
||||
*
|
||||
* "COLOR_RANGE"
|
||||
* "COLOR_RANGE":
|
||||
* Optional plane enum property to support different non RGB
|
||||
* color parameter ranges. The driver can provide a subset of
|
||||
* standard enum values supported by the DRM plane.
|
||||
|
@ -1844,7 +1844,7 @@ static void connector_bad_edid(struct drm_connector *connector,
|
||||
if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS))
|
||||
return;
|
||||
|
||||
drm_warn(connector->dev, "%s: EDID is invalid:\n", connector->name);
|
||||
drm_dbg_kms(connector->dev, "%s: EDID is invalid:\n", connector->name);
|
||||
for (i = 0; i < num_blocks; i++) {
|
||||
u8 *block = edid + i * EDID_LENGTH;
|
||||
char prefix[20];
|
||||
@ -1856,7 +1856,7 @@ static void connector_bad_edid(struct drm_connector *connector,
|
||||
else
|
||||
sprintf(prefix, "\t[%02x] GOOD ", i);
|
||||
|
||||
print_hex_dump(KERN_WARNING,
|
||||
print_hex_dump(KERN_DEBUG,
|
||||
prefix, DUMP_PREFIX_NONE, 16, 1,
|
||||
block, EDID_LENGTH, false);
|
||||
}
|
||||
|
@ -15,7 +15,6 @@
|
||||
#include <drm/drm_plane.h>
|
||||
#include <drm/drm_prime.h>
|
||||
#include <drm/drm_simple_kms_helper.h>
|
||||
#include <drm/ttm/ttm_page_alloc.h>
|
||||
|
||||
static const struct drm_gem_object_funcs drm_gem_vram_object_funcs;
|
||||
|
||||
@ -1045,10 +1044,10 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
|
||||
vmm->vram_base = vram_base;
|
||||
vmm->vram_size = vram_size;
|
||||
|
||||
ret = ttm_bo_device_init(&vmm->bdev, &bo_driver,
|
||||
ret = ttm_bo_device_init(&vmm->bdev, &bo_driver, dev->dev,
|
||||
dev->anon_inode->i_mapping,
|
||||
dev->vma_offset_manager,
|
||||
true);
|
||||
false, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -820,8 +820,8 @@ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
|
||||
|
||||
if (dev)
|
||||
max_segment = dma_max_mapping_size(dev->dev);
|
||||
if (max_segment == 0 || max_segment > SCATTERLIST_MAX_SEGMENT)
|
||||
max_segment = SCATTERLIST_MAX_SEGMENT;
|
||||
if (max_segment == 0)
|
||||
max_segment = UINT_MAX;
|
||||
sge = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0,
|
||||
nr_pages << PAGE_SHIFT,
|
||||
max_segment,
|
||||
|
@ -86,16 +86,18 @@ drm_simple_kms_crtc_mode_valid(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static int drm_simple_kms_crtc_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
bool has_primary = state->plane_mask &
|
||||
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
|
||||
crtc);
|
||||
bool has_primary = crtc_state->plane_mask &
|
||||
drm_plane_mask(crtc->primary);
|
||||
|
||||
/* We always want to have an active plane with an active CRTC */
|
||||
if (has_primary != state->enable)
|
||||
if (has_primary != crtc_state->enable)
|
||||
return -EINVAL;
|
||||
|
||||
return drm_atomic_add_affected_planes(state->state, crtc);
|
||||
return drm_atomic_add_affected_planes(state, crtc);
|
||||
}
|
||||
|
||||
static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc,
|
||||
|
@ -49,21 +49,23 @@ static void exynos_drm_crtc_atomic_disable(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static int exynos_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
|
||||
crtc);
|
||||
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
|
||||
|
||||
if (!state->enable)
|
||||
if (!crtc_state->enable)
|
||||
return 0;
|
||||
|
||||
if (exynos_crtc->ops->atomic_check)
|
||||
return exynos_crtc->ops->atomic_check(exynos_crtc, state);
|
||||
return exynos_crtc->ops->atomic_check(exynos_crtc, crtc_state);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void exynos_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
|
||||
|
||||
@ -72,7 +74,7 @@ static void exynos_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void exynos_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
|
||||
|
||||
|
@ -21,7 +21,7 @@
|
||||
#include "fsl_dcu_drm_plane.h"
|
||||
|
||||
static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
|
||||
|
@ -139,7 +139,7 @@ static const u32 channel_formats1[] = {
|
||||
DRM_FORMAT_ABGR8888
|
||||
};
|
||||
|
||||
static struct drm_plane_funcs hibmc_plane_funcs = {
|
||||
static const struct drm_plane_funcs hibmc_plane_funcs = {
|
||||
.update_plane = drm_atomic_helper_update_plane,
|
||||
.disable_plane = drm_atomic_helper_disable_plane,
|
||||
.destroy = drm_plane_cleanup,
|
||||
@ -390,7 +390,7 @@ static void hibmc_crtc_mode_set_nofb(struct drm_crtc *crtc)
|
||||
}
|
||||
|
||||
static void hibmc_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
u32 reg;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
@ -410,7 +410,7 @@ static void hibmc_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void hibmc_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_state)
|
||||
struct drm_atomic_state *state)
|
||||
|
||||
{
|
||||
unsigned long flags;
|
||||
|
@ -369,7 +369,7 @@ static void hibmc_pci_remove(struct pci_dev *pdev)
|
||||
drm_dev_put(dev);
|
||||
}
|
||||
|
||||
static struct pci_device_id hibmc_pci_table[] = {
|
||||
static const struct pci_device_id hibmc_pci_table[] = {
|
||||
{ PCI_VDEVICE(HUAWEI, 0x1711) },
|
||||
{0,}
|
||||
};
|
||||
|
@ -485,7 +485,7 @@ static void ade_crtc_mode_set_nofb(struct drm_crtc *crtc)
|
||||
}
|
||||
|
||||
static void ade_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
|
||||
struct ade_hw_ctx *ctx = kcrtc->hw_ctx;
|
||||
@ -498,7 +498,7 @@ static void ade_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_state)
|
||||
struct drm_atomic_state *state)
|
||||
|
||||
{
|
||||
struct kirin_crtc *kcrtc = to_kirin_crtc(crtc);
|
||||
|
@ -112,7 +112,7 @@ static inline unsigned int i915_sg_segment_size(void)
|
||||
unsigned int size = swiotlb_max_segment();
|
||||
|
||||
if (size == 0)
|
||||
return SCATTERLIST_MAX_SEGMENT;
|
||||
size = UINT_MAX;
|
||||
|
||||
size = rounddown(size, PAGE_SIZE);
|
||||
/* swiotlb_max_segment_size can return 1 byte when it means one page. */
|
||||
|
@ -53,13 +53,13 @@ static const struct drm_crtc_funcs dcss_crtc_funcs = {
|
||||
};
|
||||
|
||||
static void dcss_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
drm_crtc_vblank_on(crtc);
|
||||
}
|
||||
|
||||
static void dcss_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct dcss_crtc *dcss_crtc = container_of(crtc, struct dcss_crtc,
|
||||
base);
|
||||
|
@ -227,24 +227,26 @@ static bool ipu_crtc_mode_fixup(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static int ipu_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
|
||||
crtc);
|
||||
u32 primary_plane_mask = drm_plane_mask(crtc->primary);
|
||||
|
||||
if (state->active && (primary_plane_mask & state->plane_mask) == 0)
|
||||
if (crtc_state->active && (primary_plane_mask & crtc_state->plane_mask) == 0)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ipu_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
drm_crtc_vblank_on(crtc);
|
||||
}
|
||||
|
||||
static void ipu_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
spin_lock_irq(&crtc->dev->event_lock);
|
||||
if (crtc->state->event) {
|
||||
|
@ -239,28 +239,33 @@ static void ingenic_drm_crtc_update_timings(struct ingenic_drm *priv,
|
||||
}
|
||||
|
||||
static int ingenic_drm_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
|
||||
crtc);
|
||||
struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
|
||||
struct drm_plane_state *f1_state, *f0_state, *ipu_state = NULL;
|
||||
|
||||
if (state->gamma_lut &&
|
||||
drm_color_lut_size(state->gamma_lut) != ARRAY_SIZE(priv->dma_hwdescs->palette)) {
|
||||
if (crtc_state->gamma_lut &&
|
||||
drm_color_lut_size(crtc_state->gamma_lut) != ARRAY_SIZE(priv->dma_hwdescs->palette)) {
|
||||
dev_dbg(priv->dev, "Invalid palette size\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (drm_atomic_crtc_needs_modeset(state) && priv->soc_info->has_osd) {
|
||||
f1_state = drm_atomic_get_plane_state(state->state, &priv->f1);
|
||||
if (drm_atomic_crtc_needs_modeset(crtc_state) && priv->soc_info->has_osd) {
|
||||
f1_state = drm_atomic_get_plane_state(crtc_state->state,
|
||||
&priv->f1);
|
||||
if (IS_ERR(f1_state))
|
||||
return PTR_ERR(f1_state);
|
||||
|
||||
f0_state = drm_atomic_get_plane_state(state->state, &priv->f0);
|
||||
f0_state = drm_atomic_get_plane_state(crtc_state->state,
|
||||
&priv->f0);
|
||||
if (IS_ERR(f0_state))
|
||||
return PTR_ERR(f0_state);
|
||||
|
||||
if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU) && priv->ipu_plane) {
|
||||
ipu_state = drm_atomic_get_plane_state(state->state, priv->ipu_plane);
|
||||
ipu_state = drm_atomic_get_plane_state(crtc_state->state,
|
||||
priv->ipu_plane);
|
||||
if (IS_ERR(ipu_state))
|
||||
return PTR_ERR(ipu_state);
|
||||
|
||||
@ -298,7 +303,7 @@ ingenic_drm_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode
|
||||
}
|
||||
|
||||
static void ingenic_drm_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *oldstate)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
|
||||
u32 ctrl = 0;
|
||||
@ -318,26 +323,27 @@ static void ingenic_drm_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void ingenic_drm_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *oldstate)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct ingenic_drm *priv = drm_crtc_get_priv(crtc);
|
||||
struct drm_crtc_state *state = crtc->state;
|
||||
struct drm_pending_vblank_event *event = state->event;
|
||||
struct drm_crtc_state *crtc_state = crtc->state;
|
||||
struct drm_pending_vblank_event *event = crtc_state->event;
|
||||
|
||||
if (drm_atomic_crtc_needs_modeset(state)) {
|
||||
ingenic_drm_crtc_update_timings(priv, &state->mode);
|
||||
if (drm_atomic_crtc_needs_modeset(crtc_state)) {
|
||||
ingenic_drm_crtc_update_timings(priv, &crtc_state->mode);
|
||||
priv->update_clk_rate = true;
|
||||
}
|
||||
|
||||
if (priv->update_clk_rate) {
|
||||
mutex_lock(&priv->clk_mutex);
|
||||
clk_set_rate(priv->pix_clk, state->adjusted_mode.clock * 1000);
|
||||
clk_set_rate(priv->pix_clk,
|
||||
crtc_state->adjusted_mode.clock * 1000);
|
||||
priv->update_clk_rate = false;
|
||||
mutex_unlock(&priv->clk_mutex);
|
||||
}
|
||||
|
||||
if (event) {
|
||||
state->event = NULL;
|
||||
crtc_state->event = NULL;
|
||||
|
||||
spin_lock_irq(&crtc->dev->event_lock);
|
||||
if (drm_crtc_vblank_get(crtc) == 0)
|
||||
|
@ -575,24 +575,24 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void mtk_drm_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct mtk_crtc_state *state = to_mtk_crtc_state(crtc->state);
|
||||
struct mtk_crtc_state *crtc_state = to_mtk_crtc_state(crtc->state);
|
||||
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
|
||||
|
||||
if (mtk_crtc->event && state->base.event)
|
||||
if (mtk_crtc->event && crtc_state->base.event)
|
||||
DRM_ERROR("new event while there is still a pending event\n");
|
||||
|
||||
if (state->base.event) {
|
||||
state->base.event->pipe = drm_crtc_index(crtc);
|
||||
if (crtc_state->base.event) {
|
||||
crtc_state->base.event->pipe = drm_crtc_index(crtc);
|
||||
WARN_ON(drm_crtc_vblank_get(crtc) != 0);
|
||||
mtk_crtc->event = state->base.event;
|
||||
state->base.event = NULL;
|
||||
mtk_crtc->event = crtc_state->base.event;
|
||||
crtc_state->base.event = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
|
||||
int i;
|
||||
|
@ -201,7 +201,7 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
|
||||
unsigned long flags;
|
||||
@ -217,7 +217,7 @@ static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void meson_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
|
||||
struct meson_drm *priv = meson_crtc->priv;
|
||||
|
@ -486,7 +486,7 @@ static void _dpu_crtc_setup_cp_blocks(struct drm_crtc *crtc)
|
||||
}
|
||||
|
||||
static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc->state);
|
||||
struct drm_encoder *encoder;
|
||||
@ -527,7 +527,7 @@ static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct dpu_crtc *dpu_crtc;
|
||||
struct drm_device *dev;
|
||||
@ -815,10 +815,12 @@ struct plane_state {
|
||||
};
|
||||
|
||||
static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
|
||||
crtc);
|
||||
struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
|
||||
struct dpu_crtc_state *cstate = to_dpu_crtc_state(state);
|
||||
struct dpu_crtc_state *cstate = to_dpu_crtc_state(crtc_state);
|
||||
struct plane_state *pstates;
|
||||
|
||||
const struct drm_plane_state *pstate;
|
||||
@ -835,32 +837,33 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
|
||||
pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
|
||||
|
||||
if (!state->enable || !state->active) {
|
||||
if (!crtc_state->enable || !crtc_state->active) {
|
||||
DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
|
||||
crtc->base.id, state->enable, state->active);
|
||||
crtc->base.id, crtc_state->enable,
|
||||
crtc_state->active);
|
||||
goto end;
|
||||
}
|
||||
|
||||
mode = &state->adjusted_mode;
|
||||
mode = &crtc_state->adjusted_mode;
|
||||
DPU_DEBUG("%s: check", dpu_crtc->name);
|
||||
|
||||
/* force a full mode set if active state changed */
|
||||
if (state->active_changed)
|
||||
state->mode_changed = true;
|
||||
if (crtc_state->active_changed)
|
||||
crtc_state->mode_changed = true;
|
||||
|
||||
memset(pipe_staged, 0, sizeof(pipe_staged));
|
||||
|
||||
if (cstate->num_mixers) {
|
||||
mixer_width = mode->hdisplay / cstate->num_mixers;
|
||||
|
||||
_dpu_crtc_setup_lm_bounds(crtc, state);
|
||||
_dpu_crtc_setup_lm_bounds(crtc, crtc_state);
|
||||
}
|
||||
|
||||
crtc_rect.x2 = mode->hdisplay;
|
||||
crtc_rect.y2 = mode->vdisplay;
|
||||
|
||||
/* get plane state for all drm planes associated with crtc state */
|
||||
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
|
||||
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
|
||||
struct drm_rect dst, clip = crtc_rect;
|
||||
|
||||
if (IS_ERR_OR_NULL(pstate)) {
|
||||
@ -966,7 +969,7 @@ static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
|
||||
atomic_inc(&_dpu_crtc_get_kms(crtc)->bandwidth_ref);
|
||||
|
||||
rc = dpu_core_perf_crtc_check(crtc, state);
|
||||
rc = dpu_core_perf_crtc_check(crtc, crtc_state);
|
||||
if (rc) {
|
||||
DPU_ERROR("crtc%d failed performance check %d\n",
|
||||
crtc->base.id, rc);
|
||||
|
@ -307,7 +307,7 @@ static void mdp4_crtc_atomic_enable(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
|
||||
DBG("%s: check", mdp4_crtc->name);
|
||||
@ -316,14 +316,14 @@ static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
|
||||
DBG("%s: begin", mdp4_crtc->name);
|
||||
}
|
||||
|
||||
static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
|
@ -7,6 +7,7 @@
|
||||
|
||||
#include <linux/sort.h>
|
||||
|
||||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_mode.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_flip_work.h>
|
||||
@ -682,15 +683,17 @@ static enum mdp_mixer_stage_id get_start_stage(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
|
||||
crtc);
|
||||
struct mdp5_kms *mdp5_kms = get_kms(crtc);
|
||||
struct drm_plane *plane;
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct plane_state pstates[STAGE_MAX + 1];
|
||||
const struct mdp5_cfg_hw *hw_cfg;
|
||||
const struct drm_plane_state *pstate;
|
||||
const struct drm_display_mode *mode = &state->adjusted_mode;
|
||||
const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
|
||||
bool cursor_plane = false;
|
||||
bool need_right_mixer = false;
|
||||
int cnt = 0, i;
|
||||
@ -699,7 +702,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
|
||||
DBG("%s: check", crtc->name);
|
||||
|
||||
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
|
||||
drm_atomic_crtc_state_for_each_plane_state(plane, pstate, crtc_state) {
|
||||
if (!pstate->visible)
|
||||
continue;
|
||||
|
||||
@ -731,7 +734,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
if (mode->hdisplay > hw_cfg->lm.max_width)
|
||||
need_right_mixer = true;
|
||||
|
||||
ret = mdp5_crtc_setup_pipeline(crtc, state, need_right_mixer);
|
||||
ret = mdp5_crtc_setup_pipeline(crtc, crtc_state, need_right_mixer);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(dev->dev, "couldn't assign mixers %d\n", ret);
|
||||
return ret;
|
||||
@ -744,7 +747,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
WARN_ON(cursor_plane &&
|
||||
(pstates[cnt - 1].plane->type != DRM_PLANE_TYPE_CURSOR));
|
||||
|
||||
start = get_start_stage(crtc, state, &pstates[0].state->base);
|
||||
start = get_start_stage(crtc, crtc_state, &pstates[0].state->base);
|
||||
|
||||
/* verify that there are not too many planes attached to crtc
|
||||
* and that we don't have conflicting mixer stages:
|
||||
@ -769,13 +772,13 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
DBG("%s: begin", crtc->name);
|
||||
}
|
||||
|
||||
static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
|
||||
struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state);
|
||||
|
@ -269,21 +269,23 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
|
||||
}
|
||||
|
||||
static int mxsfb_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
bool has_primary = state->plane_mask &
|
||||
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
|
||||
crtc);
|
||||
bool has_primary = crtc_state->plane_mask &
|
||||
drm_plane_mask(crtc->primary);
|
||||
|
||||
/* The primary plane has to be enabled when the CRTC is active. */
|
||||
if (state->active && !has_primary)
|
||||
if (crtc_state->active && !has_primary)
|
||||
return -EINVAL;
|
||||
|
||||
/* TODO: Is this needed ? */
|
||||
return drm_atomic_add_affected_planes(state->state, crtc);
|
||||
return drm_atomic_add_affected_planes(state, crtc);
|
||||
}
|
||||
|
||||
static void mxsfb_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_pending_vblank_event *event;
|
||||
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include <nvif/event.h>
|
||||
#include <nvif/cl0046.h>
|
||||
|
||||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
@ -310,12 +311,16 @@ nv50_head_atomic_check_mode(struct nv50_head *head, struct nv50_head_atom *asyh)
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
|
||||
nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
|
||||
crtc);
|
||||
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
|
||||
crtc);
|
||||
struct nouveau_drm *drm = nouveau_drm(crtc->dev);
|
||||
struct nv50_head *head = nv50_head(crtc);
|
||||
struct nv50_head_atom *armh = nv50_head_atom(crtc->state);
|
||||
struct nv50_head_atom *asyh = nv50_head_atom(state);
|
||||
struct nv50_head_atom *armh = nv50_head_atom(old_crtc_state);
|
||||
struct nv50_head_atom *asyh = nv50_head_atom(crtc_state);
|
||||
struct nouveau_conn_atom *asyc = NULL;
|
||||
struct drm_connector_state *conns;
|
||||
struct drm_connector *conn;
|
||||
|
@ -28,7 +28,6 @@
|
||||
*/
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/swiotlb.h>
|
||||
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_chan.h"
|
||||
@ -1327,25 +1326,13 @@ nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
|
||||
drm = nouveau_bdev(bdev);
|
||||
dev = drm->dev->dev;
|
||||
|
||||
#if IS_ENABLED(CONFIG_AGP)
|
||||
if (drm->agp.bridge) {
|
||||
return ttm_pool_populate(ttm, ctx);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
|
||||
if (swiotlb_nr_tbl()) {
|
||||
return ttm_dma_populate((void *)ttm, dev, ctx);
|
||||
}
|
||||
#endif
|
||||
return ttm_populate_and_map_pages(dev, ttm_dma, ctx);
|
||||
return ttm_pool_alloc(&drm->ttm.bdev.pool, ttm, ctx);
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
|
||||
struct ttm_tt *ttm)
|
||||
{
|
||||
struct ttm_tt *ttm_dma = (void *)ttm;
|
||||
struct nouveau_drm *drm;
|
||||
struct device *dev;
|
||||
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
|
||||
@ -1356,21 +1343,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
|
||||
drm = nouveau_bdev(bdev);
|
||||
dev = drm->dev->dev;
|
||||
|
||||
#if IS_ENABLED(CONFIG_AGP)
|
||||
if (drm->agp.bridge) {
|
||||
ttm_pool_unpopulate(ttm);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
|
||||
if (swiotlb_nr_tbl()) {
|
||||
ttm_dma_unpopulate((void *)ttm, dev);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
ttm_unmap_and_unpopulate_pages(dev, ttm_dma);
|
||||
return ttm_pool_free(&drm->ttm.bdev.pool, ttm);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -56,7 +56,6 @@
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
#include <drm/ttm/ttm_memory.h>
|
||||
#include <drm/ttm/ttm_module.h>
|
||||
#include <drm/ttm/ttm_page_alloc.h>
|
||||
|
||||
#include <drm/drm_audio_component.h>
|
||||
|
||||
|
@ -22,6 +22,10 @@
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/limits.h>
|
||||
#include <linux/swiotlb.h>
|
||||
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_gem.h"
|
||||
#include "nouveau_mem.h"
|
||||
@ -281,6 +285,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
|
||||
struct nvkm_pci *pci = device->pci;
|
||||
struct nvif_mmu *mmu = &drm->client.mmu;
|
||||
struct drm_device *dev = drm->dev;
|
||||
bool need_swiotlb = false;
|
||||
int typei, ret;
|
||||
|
||||
ret = nouveau_ttm_init_host(drm, 0);
|
||||
@ -315,11 +320,14 @@ nouveau_ttm_init(struct nouveau_drm *drm)
|
||||
drm->agp.cma = pci->agp.cma;
|
||||
}
|
||||
|
||||
ret = ttm_bo_device_init(&drm->ttm.bdev,
|
||||
&nouveau_bo_driver,
|
||||
dev->anon_inode->i_mapping,
|
||||
dev->vma_offset_manager,
|
||||
drm->client.mmu.dmabits <= 32 ? true : false);
|
||||
#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
|
||||
need_swiotlb = !!swiotlb_nr_tbl();
|
||||
#endif
|
||||
|
||||
ret = ttm_bo_device_init(&drm->ttm.bdev, &nouveau_bo_driver,
|
||||
drm->dev->dev, dev->anon_inode->i_mapping,
|
||||
dev->vma_offset_manager, need_swiotlb,
|
||||
drm->client.mmu.dmabits <= 32);
|
||||
if (ret) {
|
||||
NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
|
||||
return ret;
|
||||
|
@ -569,22 +569,25 @@ static bool omap_crtc_is_manually_updated(struct drm_crtc *crtc)
|
||||
}
|
||||
|
||||
static int omap_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
|
||||
crtc);
|
||||
struct drm_plane_state *pri_state;
|
||||
|
||||
if (state->color_mgmt_changed && state->gamma_lut) {
|
||||
unsigned int length = state->gamma_lut->length /
|
||||
if (crtc_state->color_mgmt_changed && crtc_state->gamma_lut) {
|
||||
unsigned int length = crtc_state->gamma_lut->length /
|
||||
sizeof(struct drm_color_lut);
|
||||
|
||||
if (length < 2)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
pri_state = drm_atomic_get_new_plane_state(state->state, crtc->primary);
|
||||
pri_state = drm_atomic_get_new_plane_state(state,
|
||||
crtc->primary);
|
||||
if (pri_state) {
|
||||
struct omap_crtc_state *omap_crtc_state =
|
||||
to_omap_crtc_state(state);
|
||||
to_omap_crtc_state(crtc_state);
|
||||
|
||||
/* Mirror new values for zpos and rotation in omap_crtc_state */
|
||||
omap_crtc_state->zpos = pri_state->zpos;
|
||||
@ -598,12 +601,12 @@ static int omap_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void omap_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
}
|
||||
|
||||
static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct omap_drm_private *priv = crtc->dev->dev_private;
|
||||
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
|
||||
|
@ -18,7 +18,7 @@
|
||||
|
||||
static int panfrost_reset_init(struct panfrost_device *pfdev)
|
||||
{
|
||||
pfdev->rstc = devm_reset_control_array_get(pfdev->dev, false, true);
|
||||
pfdev->rstc = devm_reset_control_array_get_optional_exclusive(pfdev->dev);
|
||||
if (IS_ERR(pfdev->rstc)) {
|
||||
dev_err(pfdev->dev, "get reset failed %ld\n", PTR_ERR(pfdev->rstc));
|
||||
return PTR_ERR(pfdev->rstc);
|
||||
|
@ -88,6 +88,7 @@ struct panfrost_device {
|
||||
/* pm_domains for devices with more than one. */
|
||||
struct device *pm_domain_devs[MAX_PM_DOMAINS];
|
||||
struct device_link *pm_domain_links[MAX_PM_DOMAINS];
|
||||
bool coherent;
|
||||
|
||||
struct panfrost_features features;
|
||||
const struct panfrost_compatible *comp;
|
||||
|
@ -587,6 +587,8 @@ static int panfrost_probe(struct platform_device *pdev)
|
||||
if (!pfdev->comp)
|
||||
return -ENODEV;
|
||||
|
||||
pfdev->coherent = device_get_dma_attr(&pdev->dev) == DEV_DMA_COHERENT;
|
||||
|
||||
/* Allocate and initialze the DRM device. */
|
||||
ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev);
|
||||
if (IS_ERR(ddev))
|
||||
|
@ -220,6 +220,7 @@ static const struct drm_gem_object_funcs panfrost_gem_funcs = {
|
||||
*/
|
||||
struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
|
||||
{
|
||||
struct panfrost_device *pfdev = dev->dev_private;
|
||||
struct panfrost_gem_object *obj;
|
||||
|
||||
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
|
||||
@ -229,6 +230,7 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t
|
||||
INIT_LIST_HEAD(&obj->mappings.list);
|
||||
mutex_init(&obj->mappings.lock);
|
||||
obj->base.base.funcs = &panfrost_gem_funcs;
|
||||
obj->base.map_cached = pfdev->coherent;
|
||||
|
||||
return &obj->base.base;
|
||||
}
|
||||
|
@ -554,6 +554,8 @@ int panfrost_job_init(struct panfrost_device *pfdev)
|
||||
}
|
||||
|
||||
for (j = 0; j < NUM_JOB_SLOTS; j++) {
|
||||
mutex_init(&js->queue[j].lock);
|
||||
|
||||
js->queue[j].fence_context = dma_fence_context_alloc(1);
|
||||
|
||||
ret = drm_sched_init(&js->queue[j].sched,
|
||||
@ -584,8 +586,10 @@ void panfrost_job_fini(struct panfrost_device *pfdev)
|
||||
|
||||
job_write(pfdev, JOB_INT_MASK, 0);
|
||||
|
||||
for (j = 0; j < NUM_JOB_SLOTS; j++)
|
||||
for (j = 0; j < NUM_JOB_SLOTS; j++) {
|
||||
drm_sched_fini(&js->queue[j].sched);
|
||||
mutex_destroy(&js->queue[j].lock);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@ -597,7 +601,6 @@ int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
|
||||
int ret, i;
|
||||
|
||||
for (i = 0; i < NUM_JOB_SLOTS; i++) {
|
||||
mutex_init(&js->queue[i].lock);
|
||||
sched = &js->queue[i].sched;
|
||||
ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i],
|
||||
DRM_SCHED_PRIORITY_NORMAL, &sched,
|
||||
@ -610,14 +613,10 @@ int panfrost_job_open(struct panfrost_file_priv *panfrost_priv)
|
||||
|
||||
void panfrost_job_close(struct panfrost_file_priv *panfrost_priv)
|
||||
{
|
||||
struct panfrost_device *pfdev = panfrost_priv->pfdev;
|
||||
struct panfrost_job_slot *js = pfdev->js;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NUM_JOB_SLOTS; i++) {
|
||||
for (i = 0; i < NUM_JOB_SLOTS; i++)
|
||||
drm_sched_entity_destroy(&panfrost_priv->sched_entity[i]);
|
||||
mutex_destroy(&js->queue[i].lock);
|
||||
}
|
||||
}
|
||||
|
||||
int panfrost_job_is_idle(struct panfrost_device *pfdev)
|
||||
|
@ -371,6 +371,7 @@ int panfrost_mmu_pgtable_alloc(struct panfrost_file_priv *priv)
|
||||
.pgsize_bitmap = SZ_4K | SZ_2M,
|
||||
.ias = FIELD_GET(0xff, pfdev->features.mmu_features),
|
||||
.oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
|
||||
.coherent_walk = pfdev->coherent,
|
||||
.tlb = &mmu_tlb_ops,
|
||||
.iommu_dev = pfdev->dev,
|
||||
};
|
||||
|
@ -372,7 +372,7 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void qxl_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
qxl_crtc_update_monitors_config(crtc, "flush");
|
||||
}
|
||||
|
@ -32,7 +32,6 @@
|
||||
#include <drm/ttm/ttm_bo_api.h>
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_module.h>
|
||||
#include <drm/ttm/ttm_page_alloc.h>
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
|
||||
#include "qxl_drv.h"
|
||||
@ -194,11 +193,10 @@ int qxl_ttm_init(struct qxl_device *qdev)
|
||||
int num_io_pages; /* != rom->num_io_pages, we include surface0 */
|
||||
|
||||
/* No others user of address space so set it to 0 */
|
||||
r = ttm_bo_device_init(&qdev->mman.bdev,
|
||||
&qxl_bo_driver,
|
||||
r = ttm_bo_device_init(&qdev->mman.bdev, &qxl_bo_driver, NULL,
|
||||
qdev->ddev.anon_inode->i_mapping,
|
||||
qdev->ddev.vma_offset_manager,
|
||||
false);
|
||||
false, false);
|
||||
if (r) {
|
||||
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
|
||||
return r;
|
||||
|
@ -47,7 +47,6 @@
|
||||
#include <drm/ttm/ttm_bo_api.h>
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_module.h>
|
||||
#include <drm/ttm/ttm_page_alloc.h>
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
|
||||
#include "radeon_reg.h"
|
||||
@ -679,19 +678,7 @@ static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_AGP)
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
return ttm_pool_populate(ttm, ctx);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
if (rdev->need_swiotlb && swiotlb_nr_tbl()) {
|
||||
return ttm_dma_populate(>t->ttm, rdev->dev, ctx);
|
||||
}
|
||||
#endif
|
||||
|
||||
return ttm_populate_and_map_pages(rdev->dev, >t->ttm, ctx);
|
||||
return ttm_pool_alloc(&rdev->mman.bdev.pool, ttm, ctx);
|
||||
}
|
||||
|
||||
static void radeon_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
|
||||
@ -709,21 +696,7 @@ static void radeon_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *
|
||||
if (slave)
|
||||
return;
|
||||
|
||||
#if IS_ENABLED(CONFIG_AGP)
|
||||
if (rdev->flags & RADEON_IS_AGP) {
|
||||
ttm_pool_unpopulate(ttm);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
if (rdev->need_swiotlb && swiotlb_nr_tbl()) {
|
||||
ttm_dma_unpopulate(>t->ttm, rdev->dev);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
ttm_unmap_and_unpopulate_pages(rdev->dev, >t->ttm);
|
||||
return ttm_pool_free(&rdev->mman.bdev.pool, ttm);
|
||||
}
|
||||
|
||||
int radeon_ttm_tt_set_userptr(struct radeon_device *rdev,
|
||||
@ -846,10 +819,10 @@ int radeon_ttm_init(struct radeon_device *rdev)
|
||||
int r;
|
||||
|
||||
/* No others user of address space so set it to 0 */
|
||||
r = ttm_bo_device_init(&rdev->mman.bdev,
|
||||
&radeon_bo_driver,
|
||||
r = ttm_bo_device_init(&rdev->mman.bdev, &radeon_bo_driver, rdev->dev,
|
||||
rdev->ddev->anon_inode->i_mapping,
|
||||
rdev->ddev->vma_offset_manager,
|
||||
rdev->need_swiotlb,
|
||||
dma_addressing_limited(&rdev->pdev->dev));
|
||||
if (r) {
|
||||
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
|
||||
@ -857,6 +830,9 @@ int radeon_ttm_init(struct radeon_device *rdev)
|
||||
}
|
||||
rdev->mman.initialized = true;
|
||||
|
||||
ttm_pool_init(&rdev->mman.bdev.pool, rdev->dev, rdev->need_swiotlb,
|
||||
dma_addressing_limited(&rdev->pdev->dev));
|
||||
|
||||
r = radeon_ttm_init_vram(rdev);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed initializing VRAM heap.\n");
|
||||
@ -1004,6 +980,14 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int radeon_ttm_pool_debugfs(struct seq_file *m, void *data)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *)m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
|
||||
return ttm_pool_debugfs(&rdev->mman.bdev.pool, m);
|
||||
}
|
||||
|
||||
static int ttm_pl_vram = TTM_PL_VRAM;
|
||||
static int ttm_pl_tt = TTM_PL_TT;
|
||||
@ -1011,10 +995,7 @@ static int ttm_pl_tt = TTM_PL_TT;
|
||||
static struct drm_info_list radeon_ttm_debugfs_list[] = {
|
||||
{"radeon_vram_mm", radeon_mm_dump_table, 0, &ttm_pl_vram},
|
||||
{"radeon_gtt_mm", radeon_mm_dump_table, 0, &ttm_pl_tt},
|
||||
{"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL},
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
{"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL}
|
||||
#endif
|
||||
{"ttm_page_pool", radeon_ttm_pool_debugfs, 0, NULL}
|
||||
};
|
||||
|
||||
static int radeon_ttm_vram_open(struct inode *inode, struct file *filep)
|
||||
@ -1142,11 +1123,6 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
|
||||
|
||||
count = ARRAY_SIZE(radeon_ttm_debugfs_list);
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
if (!(rdev->need_swiotlb && swiotlb_nr_tbl()))
|
||||
--count;
|
||||
#endif
|
||||
|
||||
return radeon_debugfs_add_files(rdev, radeon_ttm_debugfs_list, count);
|
||||
#else
|
||||
|
||||
|
@ -682,20 +682,23 @@ static void rcar_du_crtc_stop(struct rcar_du_crtc *rcrtc)
|
||||
*/
|
||||
|
||||
static int rcar_du_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(state);
|
||||
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
|
||||
crtc);
|
||||
struct rcar_du_crtc_state *rstate = to_rcar_crtc_state(crtc_state);
|
||||
struct drm_encoder *encoder;
|
||||
int ret;
|
||||
|
||||
ret = rcar_du_cmm_check(crtc, state);
|
||||
ret = rcar_du_cmm_check(crtc, crtc_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Store the routes from the CRTC output to the DU outputs. */
|
||||
rstate->outputs = 0;
|
||||
|
||||
drm_for_each_encoder_mask(encoder, crtc->dev, state->encoder_mask) {
|
||||
drm_for_each_encoder_mask(encoder, crtc->dev,
|
||||
crtc_state->encoder_mask) {
|
||||
struct rcar_du_encoder *renc;
|
||||
|
||||
/* Skip the writeback encoder. */
|
||||
@ -782,7 +785,7 @@ static void rcar_du_crtc_atomic_disable(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
|
||||
|
||||
@ -811,7 +814,7 @@ static void rcar_du_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void rcar_du_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
|
||||
struct drm_device *dev = rcrtc->crtc.dev;
|
||||
|
@ -1246,8 +1246,10 @@ static void vop_crtc_gamma_set(struct vop *vop, struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
|
||||
crtc);
|
||||
struct vop *vop = to_vop(crtc);
|
||||
|
||||
/*
|
||||
@ -1415,8 +1417,10 @@ static void vop_wait_for_irq_handler(struct vop *vop)
|
||||
}
|
||||
|
||||
static int vop_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
|
||||
crtc);
|
||||
struct vop *vop = to_vop(crtc);
|
||||
struct drm_plane *plane;
|
||||
struct drm_plane_state *plane_state;
|
||||
@ -1460,8 +1464,10 @@ static int vop_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void vop_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
|
||||
crtc);
|
||||
struct drm_atomic_state *old_state = old_crtc_state->state;
|
||||
struct drm_plane_state *old_plane_state, *new_plane_state;
|
||||
struct vop *vop = to_vop(crtc);
|
||||
|
@ -133,7 +133,7 @@ sti_crtc_mode_set_nofb(struct drm_crtc *crtc)
|
||||
}
|
||||
|
||||
static void sti_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_device *drm_dev = crtc->dev;
|
||||
struct sti_mixer *mixer = to_sti_mixer(crtc);
|
||||
|
@ -596,7 +596,7 @@ static void ltdc_crtc_mode_set_nofb(struct drm_crtc *crtc)
|
||||
}
|
||||
|
||||
static void ltdc_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct ltdc_device *ldev = crtc_to_ltdc(crtc);
|
||||
struct drm_device *ddev = crtc->dev;
|
||||
|
@ -15,6 +15,7 @@
|
||||
|
||||
#include <video/videomode.h>
|
||||
|
||||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_modes.h>
|
||||
@ -45,21 +46,25 @@ static struct drm_encoder *sun4i_crtc_get_encoder(struct drm_crtc *crtc)
|
||||
}
|
||||
|
||||
static int sun4i_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
|
||||
crtc);
|
||||
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
|
||||
struct sunxi_engine *engine = scrtc->engine;
|
||||
int ret = 0;
|
||||
|
||||
if (engine && engine->ops && engine->ops->atomic_check)
|
||||
ret = engine->ops->atomic_check(engine, state);
|
||||
ret = engine->ops->atomic_check(engine, crtc_state);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void sun4i_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
|
||||
crtc);
|
||||
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct sunxi_engine *engine = scrtc->engine;
|
||||
@ -79,7 +84,7 @@ static void sun4i_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
|
||||
struct drm_pending_vblank_event *event = crtc->state->event;
|
||||
|
@ -1918,7 +1918,7 @@ static void tegra_crtc_atomic_enable(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void tegra_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
@ -1937,17 +1937,17 @@ static void tegra_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void tegra_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct tegra_dc_state *state = to_dc_state(crtc->state);
|
||||
struct tegra_dc_state *crtc_state = to_dc_state(crtc->state);
|
||||
struct tegra_dc *dc = to_tegra_dc(crtc);
|
||||
u32 value;
|
||||
|
||||
value = state->planes << 8 | GENERAL_UPDATE;
|
||||
value = crtc_state->planes << 8 | GENERAL_UPDATE;
|
||||
tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
|
||||
value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
|
||||
|
||||
value = state->planes | GENERAL_ACT_REQ;
|
||||
value = crtc_state->planes | GENERAL_ACT_REQ;
|
||||
tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
|
||||
value = tegra_dc_readl(dc, DC_CMD_STATE_CONTROL);
|
||||
}
|
||||
|
@ -85,8 +85,10 @@ void tidss_crtc_error_irq(struct drm_crtc *crtc, u64 irqstatus)
|
||||
/* drm_crtc_helper_funcs */
|
||||
|
||||
static int tidss_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
|
||||
crtc);
|
||||
struct drm_device *ddev = crtc->dev;
|
||||
struct tidss_device *tidss = to_tidss(ddev);
|
||||
struct dispc_device *dispc = tidss->dispc;
|
||||
@ -97,10 +99,10 @@ static int tidss_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
|
||||
dev_dbg(ddev->dev, "%s\n", __func__);
|
||||
|
||||
if (!state->enable)
|
||||
if (!crtc_state->enable)
|
||||
return 0;
|
||||
|
||||
mode = &state->adjusted_mode;
|
||||
mode = &crtc_state->adjusted_mode;
|
||||
|
||||
ok = dispc_vp_mode_valid(dispc, hw_videoport, mode);
|
||||
if (ok != MODE_OK) {
|
||||
@ -109,7 +111,7 @@ static int tidss_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return dispc_vp_bus_check(dispc, hw_videoport, state);
|
||||
return dispc_vp_bus_check(dispc, hw_videoport, crtc_state);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -161,8 +163,10 @@ static void tidss_crtc_position_planes(struct tidss_device *tidss,
|
||||
}
|
||||
|
||||
static void tidss_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_crtc_state *old_crtc_state = drm_atomic_get_old_crtc_state(state,
|
||||
crtc);
|
||||
struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
|
||||
struct drm_device *ddev = crtc->dev;
|
||||
struct tidss_device *tidss = to_tidss(ddev);
|
||||
|
@ -535,7 +535,7 @@ static void tilcdc_crtc_atomic_disable(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void tilcdc_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
if (!crtc->state->event)
|
||||
return;
|
||||
@ -657,15 +657,17 @@ static bool tilcdc_crtc_mode_fixup(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static int tilcdc_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
|
||||
crtc);
|
||||
/* If we are not active we don't care */
|
||||
if (!state->active)
|
||||
if (!crtc_state->active)
|
||||
return 0;
|
||||
|
||||
if (state->state->planes[0].ptr != crtc->primary ||
|
||||
state->state->planes[0].state == NULL ||
|
||||
state->state->planes[0].state->crtc != crtc) {
|
||||
if (state->planes[0].ptr != crtc->primary ||
|
||||
state->planes[0].state == NULL ||
|
||||
state->planes[0].state->crtc != crtc) {
|
||||
dev_dbg(crtc->dev->dev, "CRTC primary plane must be present");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -432,8 +432,8 @@ static int tilcdc_mm_show(struct seq_file *m, void *arg)
|
||||
}
|
||||
|
||||
static struct drm_info_list tilcdc_debugfs_list[] = {
|
||||
{ "regs", tilcdc_regs_show, 0 },
|
||||
{ "mm", tilcdc_mm_show, 0 },
|
||||
{ "regs", tilcdc_regs_show, 0, NULL },
|
||||
{ "mm", tilcdc_mm_show, 0, NULL },
|
||||
};
|
||||
|
||||
static void tilcdc_debugfs_init(struct drm_minor *minor)
|
||||
|
@ -4,9 +4,8 @@
|
||||
|
||||
ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \
|
||||
ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
|
||||
ttm_execbuf_util.o ttm_page_alloc.o ttm_range_manager.o \
|
||||
ttm_resource.o
|
||||
ttm_execbuf_util.o ttm_range_manager.o \
|
||||
ttm_resource.o ttm_pool.o
|
||||
ttm-$(CONFIG_AGP) += ttm_agp_backend.o
|
||||
ttm-$(CONFIG_DRM_TTM_DMA_PAGE_POOL) += ttm_page_alloc_dma.o
|
||||
|
||||
obj-$(CONFIG_DRM_TTM) += ttm.o
|
||||
|
@ -34,7 +34,6 @@
|
||||
|
||||
#include <drm/ttm/ttm_module.h>
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_page_alloc.h>
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
#include <linux/agp_backend.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -637,7 +637,7 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
|
||||
|
||||
if (bo->base.resv == ctx->resv) {
|
||||
dma_resv_assert_held(bo->base.resv);
|
||||
if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT)
|
||||
if (ctx->allow_res_evict)
|
||||
ret = true;
|
||||
*locked = false;
|
||||
if (busy)
|
||||
@ -1283,6 +1283,8 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
|
||||
pr_debug("Swap list %d was clean\n", i);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
|
||||
ttm_pool_fini(&bdev->pool);
|
||||
|
||||
if (!ret)
|
||||
ttm_bo_global_release();
|
||||
|
||||
@ -1307,9 +1309,10 @@ static void ttm_bo_init_sysman(struct ttm_bo_device *bdev)
|
||||
|
||||
int ttm_bo_device_init(struct ttm_bo_device *bdev,
|
||||
struct ttm_bo_driver *driver,
|
||||
struct device *dev,
|
||||
struct address_space *mapping,
|
||||
struct drm_vma_offset_manager *vma_manager,
|
||||
bool need_dma32)
|
||||
bool use_dma_alloc, bool use_dma32)
|
||||
{
|
||||
struct ttm_bo_global *glob = &ttm_bo_glob;
|
||||
int ret;
|
||||
@ -1324,12 +1327,12 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
|
||||
bdev->driver = driver;
|
||||
|
||||
ttm_bo_init_sysman(bdev);
|
||||
ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32);
|
||||
|
||||
bdev->vma_manager = vma_manager;
|
||||
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
|
||||
INIT_LIST_HEAD(&bdev->ddestroy);
|
||||
bdev->dev_mapping = mapping;
|
||||
bdev->need_dma32 = need_dma32;
|
||||
mutex_lock(&ttm_global_mutex);
|
||||
list_add_tail(&bdev->device_list, &glob->device_list);
|
||||
mutex_unlock(&ttm_global_mutex);
|
||||
|
@ -315,8 +315,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
|
||||
struct ttm_operation_ctx ctx = {
|
||||
.interruptible = false,
|
||||
.no_wait_gpu = false,
|
||||
.flags = TTM_OPT_FLAG_FORCE_ALLOC
|
||||
|
||||
.force_alloc = true
|
||||
};
|
||||
|
||||
ttm = bo->ttm;
|
||||
|
@ -30,7 +30,6 @@
|
||||
|
||||
#include <drm/ttm/ttm_memory.h>
|
||||
#include <drm/ttm/ttm_module.h>
|
||||
#include <drm/ttm/ttm_page_alloc.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/wait.h>
|
||||
@ -38,6 +37,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/swap.h>
|
||||
#include <drm/ttm/ttm_pool.h>
|
||||
|
||||
#define TTM_MEMORY_ALLOC_RETRIES 4
|
||||
|
||||
@ -451,8 +451,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
|
||||
pr_info("Zone %7s: Available graphics memory: %llu KiB\n",
|
||||
zone->name, (unsigned long long)zone->max_mem >> 10);
|
||||
}
|
||||
ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
|
||||
ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
|
||||
ttm_pool_mgr_init(glob->zone_kernel->max_mem/(2*PAGE_SIZE));
|
||||
return 0;
|
||||
out_no_zone:
|
||||
ttm_mem_global_release(glob);
|
||||
@ -465,8 +464,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
|
||||
unsigned int i;
|
||||
|
||||
/* let the page allocator first stop the shrink work. */
|
||||
ttm_page_alloc_fini();
|
||||
ttm_dma_page_alloc_fini();
|
||||
ttm_pool_mgr_fini();
|
||||
|
||||
flush_workqueue(glob->swap_queue);
|
||||
destroy_workqueue(glob->swap_queue);
|
||||
@ -544,7 +542,8 @@ ttm_check_under_lowerlimit(struct ttm_mem_global *glob,
|
||||
{
|
||||
int64_t available;
|
||||
|
||||
if (ctx->flags & TTM_OPT_FLAG_FORCE_ALLOC)
|
||||
/* We allow over commit during suspend */
|
||||
if (ctx->force_alloc)
|
||||
return false;
|
||||
|
||||
available = get_nr_swap_pages() + si_mem_available();
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
667
drivers/gpu/drm/ttm/ttm_pool.c
Normal file
667
drivers/gpu/drm/ttm/ttm_pool.c
Normal file
@ -0,0 +1,667 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 OR MIT
|
||||
/*
|
||||
* Copyright 2020 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Christian König
|
||||
*/
|
||||
|
||||
/* Pooling of allocated pages is necessary because changing the caching
|
||||
* attributes on x86 of the linear mapping requires a costly cross CPU TLB
|
||||
* invalidate for those addresses.
|
||||
*
|
||||
* Additional to that allocations from the DMA coherent API are pooled as well
|
||||
* cause they are rather slow compared to alloc_pages+map.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
#include <asm/set_memory.h>
|
||||
#endif
|
||||
|
||||
#include <drm/ttm/ttm_pool.h>
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_tt.h>
|
||||
|
||||
/**
|
||||
* struct ttm_pool_dma - Helper object for coherent DMA mappings
|
||||
*
|
||||
* @addr: original DMA address returned for the mapping
|
||||
* @vaddr: original vaddr return for the mapping and order in the lower bits
|
||||
*/
|
||||
struct ttm_pool_dma {
|
||||
dma_addr_t addr;
|
||||
unsigned long vaddr;
|
||||
};
|
||||
|
||||
static unsigned long page_pool_size;
|
||||
|
||||
MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool");
|
||||
module_param(page_pool_size, ulong, 0644);
|
||||
|
||||
static atomic_long_t allocated_pages;
|
||||
|
||||
static struct ttm_pool_type global_write_combined[MAX_ORDER];
|
||||
static struct ttm_pool_type global_uncached[MAX_ORDER];
|
||||
|
||||
static spinlock_t shrinker_lock;
|
||||
static struct list_head shrinker_list;
|
||||
static struct shrinker mm_shrinker;
|
||||
|
||||
/* Allocate pages of size 1 << order with the given gfp_flags */
|
||||
static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
|
||||
unsigned int order)
|
||||
{
|
||||
unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
|
||||
struct ttm_pool_dma *dma;
|
||||
struct page *p;
|
||||
void *vaddr;
|
||||
|
||||
if (order) {
|
||||
gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY |
|
||||
__GFP_KSWAPD_RECLAIM;
|
||||
gfp_flags &= ~__GFP_MOVABLE;
|
||||
gfp_flags &= ~__GFP_COMP;
|
||||
}
|
||||
|
||||
if (!pool->use_dma_alloc) {
|
||||
p = alloc_pages(gfp_flags, order);
|
||||
if (p)
|
||||
p->private = order;
|
||||
return p;
|
||||
}
|
||||
|
||||
dma = kmalloc(sizeof(*dma), GFP_KERNEL);
|
||||
if (!dma)
|
||||
return NULL;
|
||||
|
||||
if (order)
|
||||
attr |= DMA_ATTR_NO_WARN;
|
||||
|
||||
vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE,
|
||||
&dma->addr, gfp_flags, attr);
|
||||
if (!vaddr)
|
||||
goto error_free;
|
||||
|
||||
/* TODO: This is an illegal abuse of the DMA API, but we need to rework
|
||||
* TTM page fault handling and extend the DMA API to clean this up.
|
||||
*/
|
||||
if (is_vmalloc_addr(vaddr))
|
||||
p = vmalloc_to_page(vaddr);
|
||||
else
|
||||
p = virt_to_page(vaddr);
|
||||
|
||||
dma->vaddr = (unsigned long)vaddr | order;
|
||||
p->private = (unsigned long)dma;
|
||||
return p;
|
||||
|
||||
error_free:
|
||||
kfree(dma);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Reset the caching and pages of size 1 << order */
|
||||
static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching,
|
||||
unsigned int order, struct page *p)
|
||||
{
|
||||
unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS;
|
||||
struct ttm_pool_dma *dma;
|
||||
void *vaddr;
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
/* We don't care that set_pages_wb is inefficient here. This is only
|
||||
* used when we have to shrink and CPU overhead is irrelevant then.
|
||||
*/
|
||||
if (caching != ttm_cached && !PageHighMem(p))
|
||||
set_pages_wb(p, 1 << order);
|
||||
#endif
|
||||
|
||||
if (!pool->use_dma_alloc) {
|
||||
__free_pages(p, order);
|
||||
return;
|
||||
}
|
||||
|
||||
if (order)
|
||||
attr |= DMA_ATTR_NO_WARN;
|
||||
|
||||
dma = (void *)p->private;
|
||||
vaddr = (void *)(dma->vaddr & PAGE_MASK);
|
||||
dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr,
|
||||
attr);
|
||||
kfree(dma);
|
||||
}
|
||||
|
||||
/* Apply a new caching to an array of pages */
|
||||
static int ttm_pool_apply_caching(struct page **first, struct page **last,
|
||||
enum ttm_caching caching)
|
||||
{
|
||||
#ifdef CONFIG_X86
|
||||
unsigned int num_pages = last - first;
|
||||
|
||||
if (!num_pages)
|
||||
return 0;
|
||||
|
||||
switch (caching) {
|
||||
case ttm_cached:
|
||||
break;
|
||||
case ttm_write_combined:
|
||||
return set_pages_array_wc(first, num_pages);
|
||||
case ttm_uncached:
|
||||
return set_pages_array_uc(first, num_pages);
|
||||
}
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Map pages of 1 << order size and fill the DMA address array */
|
||||
static int ttm_pool_map(struct ttm_pool *pool, unsigned int order,
|
||||
struct page *p, dma_addr_t **dma_addr)
|
||||
{
|
||||
dma_addr_t addr;
|
||||
unsigned int i;
|
||||
|
||||
if (pool->use_dma_alloc) {
|
||||
struct ttm_pool_dma *dma = (void *)p->private;
|
||||
|
||||
addr = dma->addr;
|
||||
} else {
|
||||
size_t size = (1ULL << order) * PAGE_SIZE;
|
||||
|
||||
addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL);
|
||||
if (dma_mapping_error(pool->dev, **dma_addr))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
for (i = 1 << order; i ; --i) {
|
||||
*(*dma_addr)++ = addr;
|
||||
addr += PAGE_SIZE;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Unmap pages of 1 << order size */
|
||||
static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr,
|
||||
unsigned int num_pages)
|
||||
{
|
||||
/* Unmapped while freeing the page */
|
||||
if (pool->use_dma_alloc)
|
||||
return;
|
||||
|
||||
dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT,
|
||||
DMA_BIDIRECTIONAL);
|
||||
}
|
||||
|
||||
/* Give pages into a specific pool_type */
|
||||
static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p)
|
||||
{
|
||||
spin_lock(&pt->lock);
|
||||
list_add(&p->lru, &pt->pages);
|
||||
spin_unlock(&pt->lock);
|
||||
atomic_long_add(1 << pt->order, &allocated_pages);
|
||||
}
|
||||
|
||||
/* Take pages from a specific pool_type, return NULL when nothing available */
|
||||
static struct page *ttm_pool_type_take(struct ttm_pool_type *pt)
|
||||
{
|
||||
struct page *p;
|
||||
|
||||
spin_lock(&pt->lock);
|
||||
p = list_first_entry_or_null(&pt->pages, typeof(*p), lru);
|
||||
if (p) {
|
||||
atomic_long_sub(1 << pt->order, &allocated_pages);
|
||||
list_del(&p->lru);
|
||||
}
|
||||
spin_unlock(&pt->lock);
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
/* Count the number of pages available in a pool_type */
|
||||
static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt)
|
||||
{
|
||||
unsigned int count = 0;
|
||||
struct page *p;
|
||||
|
||||
spin_lock(&pt->lock);
|
||||
/* Only used for debugfs, the overhead doesn't matter */
|
||||
list_for_each_entry(p, &pt->pages, lru)
|
||||
++count;
|
||||
spin_unlock(&pt->lock);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
/* Initialize and add a pool type to the global shrinker list */
|
||||
static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
|
||||
enum ttm_caching caching, unsigned int order)
|
||||
{
|
||||
pt->pool = pool;
|
||||
pt->caching = caching;
|
||||
pt->order = order;
|
||||
spin_lock_init(&pt->lock);
|
||||
INIT_LIST_HEAD(&pt->pages);
|
||||
|
||||
spin_lock(&shrinker_lock);
|
||||
list_add_tail(&pt->shrinker_list, &shrinker_list);
|
||||
spin_unlock(&shrinker_lock);
|
||||
}
|
||||
|
||||
/* Remove a pool_type from the global shrinker list and free all pages */
|
||||
static void ttm_pool_type_fini(struct ttm_pool_type *pt)
|
||||
{
|
||||
struct page *p, *tmp;
|
||||
|
||||
spin_lock(&shrinker_lock);
|
||||
list_del(&pt->shrinker_list);
|
||||
spin_unlock(&shrinker_lock);
|
||||
|
||||
list_for_each_entry_safe(p, tmp, &pt->pages, lru)
|
||||
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
|
||||
}
|
||||
|
||||
/* Return the pool_type to use for the given caching and order */
|
||||
static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
|
||||
enum ttm_caching caching,
|
||||
unsigned int order)
|
||||
{
|
||||
if (pool->use_dma_alloc)
|
||||
return &pool->caching[caching].orders[order];
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
switch (caching) {
|
||||
case ttm_write_combined:
|
||||
return &global_write_combined[order];
|
||||
case ttm_uncached:
|
||||
return &global_uncached[order];
|
||||
default:
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Free pages using the global shrinker list */
|
||||
static unsigned int ttm_pool_shrink(void)
|
||||
{
|
||||
struct ttm_pool_type *pt;
|
||||
unsigned int num_freed;
|
||||
struct page *p;
|
||||
|
||||
spin_lock(&shrinker_lock);
|
||||
pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
|
||||
|
||||
p = ttm_pool_type_take(pt);
|
||||
if (p) {
|
||||
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
|
||||
num_freed = 1 << pt->order;
|
||||
} else {
|
||||
num_freed = 0;
|
||||
}
|
||||
|
||||
list_move_tail(&pt->shrinker_list, &shrinker_list);
|
||||
spin_unlock(&shrinker_lock);
|
||||
|
||||
return num_freed;
|
||||
}
|
||||
|
||||
/* Return the allocation order based for a page */
|
||||
static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p)
|
||||
{
|
||||
if (pool->use_dma_alloc) {
|
||||
struct ttm_pool_dma *dma = (void *)p->private;
|
||||
|
||||
return dma->vaddr & ~PAGE_MASK;
|
||||
}
|
||||
|
||||
return p->private;
|
||||
}
|
||||
|
||||
/**
|
||||
* ttm_pool_alloc - Fill a ttm_tt object
|
||||
*
|
||||
* @pool: ttm_pool to use
|
||||
* @tt: ttm_tt object to fill
|
||||
* @ctx: operation context
|
||||
*
|
||||
* Fill the ttm_tt object with pages and also make sure to DMA map them when
|
||||
* necessary.
|
||||
*
|
||||
* Returns: 0 on successe, negative error code otherwise.
|
||||
*/
|
||||
int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
|
||||
struct ttm_operation_ctx *ctx)
|
||||
{
|
||||
unsigned long num_pages = tt->num_pages;
|
||||
dma_addr_t *dma_addr = tt->dma_address;
|
||||
struct page **caching = tt->pages;
|
||||
struct page **pages = tt->pages;
|
||||
gfp_t gfp_flags = GFP_USER;
|
||||
unsigned int i, order;
|
||||
struct page *p;
|
||||
int r;
|
||||
|
||||
WARN_ON(!num_pages || ttm_tt_is_populated(tt));
|
||||
WARN_ON(dma_addr && !pool->dev);
|
||||
|
||||
if (tt->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
|
||||
gfp_flags |= __GFP_ZERO;
|
||||
|
||||
if (ctx->gfp_retry_mayfail)
|
||||
gfp_flags |= __GFP_RETRY_MAYFAIL;
|
||||
|
||||
if (pool->use_dma32)
|
||||
gfp_flags |= GFP_DMA32;
|
||||
else
|
||||
gfp_flags |= GFP_HIGHUSER;
|
||||
|
||||
for (order = min(MAX_ORDER - 1UL, __fls(num_pages)); num_pages;
|
||||
order = min_t(unsigned int, order, __fls(num_pages))) {
|
||||
bool apply_caching = false;
|
||||
struct ttm_pool_type *pt;
|
||||
|
||||
pt = ttm_pool_select_type(pool, tt->caching, order);
|
||||
p = pt ? ttm_pool_type_take(pt) : NULL;
|
||||
if (p) {
|
||||
apply_caching = true;
|
||||
} else {
|
||||
p = ttm_pool_alloc_page(pool, gfp_flags, order);
|
||||
if (p && PageHighMem(p))
|
||||
apply_caching = true;
|
||||
}
|
||||
|
||||
if (!p) {
|
||||
if (order) {
|
||||
--order;
|
||||
continue;
|
||||
}
|
||||
r = -ENOMEM;
|
||||
goto error_free_all;
|
||||
}
|
||||
|
||||
if (apply_caching) {
|
||||
r = ttm_pool_apply_caching(caching, pages,
|
||||
tt->caching);
|
||||
if (r)
|
||||
goto error_free_page;
|
||||
caching = pages + (1 << order);
|
||||
}
|
||||
|
||||
r = ttm_mem_global_alloc_page(&ttm_mem_glob, p,
|
||||
(1 << order) * PAGE_SIZE,
|
||||
ctx);
|
||||
if (r)
|
||||
goto error_free_page;
|
||||
|
||||
if (dma_addr) {
|
||||
r = ttm_pool_map(pool, order, p, &dma_addr);
|
||||
if (r)
|
||||
goto error_global_free;
|
||||
}
|
||||
|
||||
num_pages -= 1 << order;
|
||||
for (i = 1 << order; i; --i)
|
||||
*(pages++) = p++;
|
||||
}
|
||||
|
||||
r = ttm_pool_apply_caching(caching, pages, tt->caching);
|
||||
if (r)
|
||||
goto error_free_all;
|
||||
|
||||
return 0;
|
||||
|
||||
error_global_free:
|
||||
ttm_mem_global_free_page(&ttm_mem_glob, p, (1 << order) * PAGE_SIZE);
|
||||
|
||||
error_free_page:
|
||||
ttm_pool_free_page(pool, tt->caching, order, p);
|
||||
|
||||
error_free_all:
|
||||
num_pages = tt->num_pages - num_pages;
|
||||
for (i = 0; i < num_pages; ) {
|
||||
order = ttm_pool_page_order(pool, tt->pages[i]);
|
||||
ttm_pool_free_page(pool, tt->caching, order, tt->pages[i]);
|
||||
i += 1 << order;
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_pool_alloc);
|
||||
|
||||
/**
|
||||
* ttm_pool_free - Free the backing pages from a ttm_tt object
|
||||
*
|
||||
* @pool: Pool to give pages back to.
|
||||
* @tt: ttm_tt object to unpopulate
|
||||
*
|
||||
* Give the packing pages back to a pool or free them
|
||||
*/
|
||||
void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < tt->num_pages; ) {
|
||||
struct page *p = tt->pages[i];
|
||||
unsigned int order, num_pages;
|
||||
struct ttm_pool_type *pt;
|
||||
|
||||
order = ttm_pool_page_order(pool, p);
|
||||
num_pages = 1ULL << order;
|
||||
ttm_mem_global_free_page(&ttm_mem_glob, p,
|
||||
num_pages * PAGE_SIZE);
|
||||
if (tt->dma_address)
|
||||
ttm_pool_unmap(pool, tt->dma_address[i], num_pages);
|
||||
|
||||
pt = ttm_pool_select_type(pool, tt->caching, order);
|
||||
if (pt)
|
||||
ttm_pool_type_give(pt, tt->pages[i]);
|
||||
else
|
||||
ttm_pool_free_page(pool, tt->caching, order,
|
||||
tt->pages[i]);
|
||||
|
||||
i += num_pages;
|
||||
}
|
||||
|
||||
while (atomic_long_read(&allocated_pages) > page_pool_size)
|
||||
ttm_pool_shrink();
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_pool_free);
|
||||
|
||||
/**
|
||||
* ttm_pool_init - Initialize a pool
|
||||
*
|
||||
* @pool: the pool to initialize
|
||||
* @dev: device for DMA allocations and mappings
|
||||
* @use_dma_alloc: true if coherent DMA alloc should be used
|
||||
* @use_dma32: true if GFP_DMA32 should be used
|
||||
*
|
||||
* Initialize the pool and its pool types.
|
||||
*/
|
||||
void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
|
||||
bool use_dma_alloc, bool use_dma32)
|
||||
{
|
||||
unsigned int i, j;
|
||||
|
||||
WARN_ON(!dev && use_dma_alloc);
|
||||
|
||||
pool->dev = dev;
|
||||
pool->use_dma_alloc = use_dma_alloc;
|
||||
pool->use_dma32 = use_dma32;
|
||||
|
||||
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
|
||||
for (j = 0; j < MAX_ORDER; ++j)
|
||||
ttm_pool_type_init(&pool->caching[i].orders[j],
|
||||
pool, i, j);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_pool_init);
|
||||
|
||||
/**
|
||||
* ttm_pool_fini - Cleanup a pool
|
||||
*
|
||||
* @pool: the pool to clean up
|
||||
*
|
||||
* Free all pages in the pool and unregister the types from the global
|
||||
* shrinker.
|
||||
*/
|
||||
void ttm_pool_fini(struct ttm_pool *pool)
|
||||
{
|
||||
unsigned int i, j;
|
||||
|
||||
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
|
||||
for (j = 0; j < MAX_ORDER; ++j)
|
||||
ttm_pool_type_fini(&pool->caching[i].orders[j]);
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_pool_fini);
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
||||
/* Dump information about the different pool types */
|
||||
static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt,
|
||||
struct seq_file *m)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < MAX_ORDER; ++i)
|
||||
seq_printf(m, " %8u", ttm_pool_type_count(&pt[i]));
|
||||
seq_puts(m, "\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* ttm_pool_debugfs - Debugfs dump function for a pool
|
||||
*
|
||||
* @pool: the pool to dump the information for
|
||||
* @m: seq_file to dump to
|
||||
*
|
||||
* Make a debugfs dump with the per pool and global information.
|
||||
*/
|
||||
int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
spin_lock(&shrinker_lock);
|
||||
|
||||
seq_puts(m, "\t ");
|
||||
for (i = 0; i < MAX_ORDER; ++i)
|
||||
seq_printf(m, " ---%2u---", i);
|
||||
seq_puts(m, "\n");
|
||||
|
||||
seq_puts(m, "wc\t:");
|
||||
ttm_pool_debugfs_orders(global_write_combined, m);
|
||||
seq_puts(m, "uc\t:");
|
||||
ttm_pool_debugfs_orders(global_uncached, m);
|
||||
|
||||
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
|
||||
seq_puts(m, "DMA ");
|
||||
switch (i) {
|
||||
case ttm_cached:
|
||||
seq_puts(m, "\t:");
|
||||
break;
|
||||
case ttm_write_combined:
|
||||
seq_puts(m, "wc\t:");
|
||||
break;
|
||||
case ttm_uncached:
|
||||
seq_puts(m, "uc\t:");
|
||||
break;
|
||||
}
|
||||
ttm_pool_debugfs_orders(pool->caching[i].orders, m);
|
||||
}
|
||||
|
||||
seq_printf(m, "\ntotal\t: %8lu of %8lu\n",
|
||||
atomic_long_read(&allocated_pages), page_pool_size);
|
||||
|
||||
spin_unlock(&shrinker_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_pool_debugfs);
|
||||
|
||||
#endif
|
||||
|
||||
/* As long as pages are available make sure to release at least one */
|
||||
static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
unsigned long num_freed = 0;
|
||||
|
||||
do
|
||||
num_freed += ttm_pool_shrink();
|
||||
while (!num_freed && atomic_long_read(&allocated_pages));
|
||||
|
||||
return num_freed;
|
||||
}
|
||||
|
||||
/* Return the number of pages available or SHRINK_EMPTY if we have none */
|
||||
static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink,
|
||||
struct shrink_control *sc)
|
||||
{
|
||||
unsigned long num_pages = atomic_long_read(&allocated_pages);
|
||||
|
||||
return num_pages ? num_pages : SHRINK_EMPTY;
|
||||
}
|
||||
|
||||
/**
|
||||
* ttm_pool_mgr_init - Initialize globals
|
||||
*
|
||||
* @num_pages: default number of pages
|
||||
*
|
||||
* Initialize the global locks and lists for the MM shrinker.
|
||||
*/
|
||||
int ttm_pool_mgr_init(unsigned long num_pages)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
if (!page_pool_size)
|
||||
page_pool_size = num_pages;
|
||||
|
||||
spin_lock_init(&shrinker_lock);
|
||||
INIT_LIST_HEAD(&shrinker_list);
|
||||
|
||||
for (i = 0; i < MAX_ORDER; ++i) {
|
||||
ttm_pool_type_init(&global_write_combined[i], NULL,
|
||||
ttm_write_combined, i);
|
||||
ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i);
|
||||
}
|
||||
|
||||
mm_shrinker.count_objects = ttm_pool_shrinker_count;
|
||||
mm_shrinker.scan_objects = ttm_pool_shrinker_scan;
|
||||
mm_shrinker.seeks = 1;
|
||||
return register_shrinker(&mm_shrinker);
|
||||
}
|
||||
|
||||
/**
|
||||
* ttm_pool_mgr_fini - Finalize globals
|
||||
*
|
||||
* Cleanup the global pools and unregister the MM shrinker.
|
||||
*/
|
||||
void ttm_pool_mgr_fini(void)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < MAX_ORDER; ++i) {
|
||||
ttm_pool_type_fini(&global_write_combined[i]);
|
||||
ttm_pool_type_fini(&global_uncached[i]);
|
||||
}
|
||||
|
||||
unregister_shrinker(&mm_shrinker);
|
||||
WARN_ON(!list_empty(&shrinker_list));
|
||||
}
|
@ -89,7 +89,7 @@ int ttm_resource_manager_evict_all(struct ttm_bo_device *bdev,
|
||||
struct ttm_operation_ctx ctx = {
|
||||
.interruptible = false,
|
||||
.no_wait_gpu = false,
|
||||
.flags = TTM_OPT_FLAG_FORCE_ALLOC
|
||||
.force_alloc = true
|
||||
};
|
||||
struct ttm_bo_global *glob = &ttm_bo_glob;
|
||||
struct dma_fence *fence;
|
||||
|
@ -1,84 +0,0 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2018 Advanced Micro Devices, Inc.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Huang Rui <ray.huang@amd.com>
|
||||
*/
|
||||
|
||||
#ifndef TTM_SET_MEMORY
|
||||
#define TTM_SET_MEMORY
|
||||
|
||||
#include <linux/mm.h>
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
|
||||
#include <asm/set_memory.h>
|
||||
|
||||
static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
|
||||
{
|
||||
return set_pages_array_wb(pages, addrinarray);
|
||||
}
|
||||
|
||||
static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
|
||||
{
|
||||
return set_pages_array_wc(pages, addrinarray);
|
||||
}
|
||||
|
||||
static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
|
||||
{
|
||||
return set_pages_array_uc(pages, addrinarray);
|
||||
}
|
||||
|
||||
static inline int ttm_set_pages_wb(struct page *page, int numpages)
|
||||
{
|
||||
return set_pages_wb(page, numpages);
|
||||
}
|
||||
|
||||
#else /* for CONFIG_X86 */
|
||||
|
||||
static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int ttm_set_pages_wb(struct page *page, int numpages)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* for CONFIG_X86 */
|
||||
|
||||
#endif
|
@ -37,7 +37,6 @@
|
||||
#include <linux/file.h>
|
||||
#include <drm/drm_cache.h>
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_page_alloc.h>
|
||||
|
||||
/**
|
||||
* Allocates a ttm structure for the given BO.
|
||||
@ -52,12 +51,6 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
|
||||
if (bo->ttm)
|
||||
return 0;
|
||||
|
||||
if (bdev->need_dma32)
|
||||
page_flags |= TTM_PAGE_FLAG_DMA32;
|
||||
|
||||
if (bdev->no_retry)
|
||||
page_flags |= TTM_PAGE_FLAG_NO_RETRY;
|
||||
|
||||
switch (bo->type) {
|
||||
case ttm_bo_type_device:
|
||||
if (zero_alloc)
|
||||
@ -142,7 +135,6 @@ static void ttm_tt_init_fields(struct ttm_tt *ttm,
|
||||
ttm->dma_address = NULL;
|
||||
ttm->swap_storage = NULL;
|
||||
ttm->sg = bo->sg;
|
||||
INIT_LIST_HEAD(&ttm->pages_list);
|
||||
ttm->caching = caching;
|
||||
}
|
||||
|
||||
@ -216,8 +208,6 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
|
||||
|
||||
swap_space = swap_storage->f_mapping;
|
||||
gfp_mask = mapping_gfp_mask(swap_space);
|
||||
if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
|
||||
gfp_mask |= __GFP_RETRY_MAYFAIL;
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
from_page = shmem_read_mapping_page_gfp(swap_space, i,
|
||||
@ -265,8 +255,6 @@ int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
|
||||
|
||||
swap_space = swap_storage->f_mapping;
|
||||
gfp_mask = mapping_gfp_mask(swap_space);
|
||||
if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
|
||||
gfp_mask |= __GFP_RETRY_MAYFAIL;
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
from_page = ttm->pages[i];
|
||||
@ -321,7 +309,7 @@ int ttm_tt_populate(struct ttm_bo_device *bdev,
|
||||
if (bdev->driver->ttm_tt_populate)
|
||||
ret = bdev->driver->ttm_tt_populate(bdev, ttm, ctx);
|
||||
else
|
||||
ret = ttm_pool_populate(ttm, ctx);
|
||||
ret = ttm_pool_alloc(&bdev->pool, ttm, ctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -363,6 +351,6 @@ void ttm_tt_unpopulate(struct ttm_bo_device *bdev,
|
||||
if (bdev->driver->ttm_tt_unpopulate)
|
||||
bdev->driver->ttm_tt_unpopulate(bdev, ttm);
|
||||
else
|
||||
ttm_pool_unpopulate(ttm);
|
||||
ttm_pool_free(&bdev->pool, ttm);
|
||||
ttm->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED;
|
||||
}
|
||||
|
@ -223,7 +223,7 @@ static void vbox_crtc_atomic_disable(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void vbox_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -584,18 +584,21 @@ void vc4_crtc_get_margins(struct drm_crtc_state *state,
|
||||
}
|
||||
|
||||
static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
|
||||
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
|
||||
crtc);
|
||||
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
|
||||
struct drm_connector *conn;
|
||||
struct drm_connector_state *conn_state;
|
||||
int ret, i;
|
||||
|
||||
ret = vc4_hvs_atomic_check(crtc, state);
|
||||
ret = vc4_hvs_atomic_check(crtc, crtc_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for_each_new_connector_in_state(state->state, conn, conn_state, i) {
|
||||
for_each_new_connector_in_state(state, conn, conn_state,
|
||||
i) {
|
||||
if (conn_state->crtc != crtc)
|
||||
continue;
|
||||
|
||||
|
@ -916,7 +916,8 @@ int vc4_hvs_get_fifo_from_output(struct drm_device *dev, unsigned int output);
|
||||
int vc4_hvs_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state);
|
||||
void vc4_hvs_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state);
|
||||
void vc4_hvs_atomic_disable(struct drm_crtc *crtc, struct drm_crtc_state *old_state);
|
||||
void vc4_hvs_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *state);
|
||||
void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_atomic_state *state);
|
||||
void vc4_hvs_dump_state(struct drm_device *dev);
|
||||
void vc4_hvs_unmask_underrun(struct drm_device *dev, int channel);
|
||||
void vc4_hvs_mask_underrun(struct drm_device *dev, int channel);
|
||||
|
@ -414,8 +414,10 @@ void vc4_hvs_atomic_disable(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
void vc4_hvs_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_crtc_state *old_state = drm_atomic_get_old_crtc_state(state,
|
||||
crtc);
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct vc4_dev *vc4 = to_vc4_dev(dev);
|
||||
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state);
|
||||
|
@ -386,16 +386,18 @@ static const struct drm_crtc_funcs vc4_txp_crtc_funcs = {
|
||||
};
|
||||
|
||||
static int vc4_txp_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state);
|
||||
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
|
||||
crtc);
|
||||
struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc_state);
|
||||
int ret;
|
||||
|
||||
ret = vc4_hvs_atomic_check(crtc, state);
|
||||
ret = vc4_hvs_atomic_check(crtc, crtc_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
state->no_vblank = true;
|
||||
crtc_state->no_vblank = true;
|
||||
vc4_state->feed_txp = true;
|
||||
|
||||
return 0;
|
||||
|
@ -111,13 +111,13 @@ static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
|
||||
|
||||
|
@ -168,9 +168,11 @@ static const struct drm_crtc_funcs vkms_crtc_funcs = {
|
||||
};
|
||||
|
||||
static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(state);
|
||||
struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
|
||||
crtc);
|
||||
struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(crtc_state);
|
||||
struct drm_plane *plane;
|
||||
struct drm_plane_state *plane_state;
|
||||
int i = 0, ret;
|
||||
@ -178,12 +180,12 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
if (vkms_state->active_planes)
|
||||
return 0;
|
||||
|
||||
ret = drm_atomic_add_affected_planes(state->state, crtc);
|
||||
ret = drm_atomic_add_affected_planes(crtc_state->state, crtc);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
drm_for_each_plane_mask(plane, crtc->dev, state->plane_mask) {
|
||||
plane_state = drm_atomic_get_existing_plane_state(state->state,
|
||||
drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
|
||||
plane_state = drm_atomic_get_existing_plane_state(crtc_state->state,
|
||||
plane);
|
||||
WARN_ON(!plane_state);
|
||||
|
||||
@ -199,8 +201,8 @@ static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
vkms_state->num_active_planes = i;
|
||||
|
||||
i = 0;
|
||||
drm_for_each_plane_mask(plane, crtc->dev, state->plane_mask) {
|
||||
plane_state = drm_atomic_get_existing_plane_state(state->state,
|
||||
drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
|
||||
plane_state = drm_atomic_get_existing_plane_state(crtc_state->state,
|
||||
plane);
|
||||
|
||||
if (!plane_state->visible)
|
||||
@ -226,7 +228,7 @@ static void vkms_crtc_atomic_disable(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void vkms_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
|
||||
|
||||
@ -237,7 +239,7 @@ static void vkms_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void vkms_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
|
||||
|
||||
|
@ -595,10 +595,6 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
|
||||
else
|
||||
dev_priv->map_mode = vmw_dma_map_populate;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_DRM_TTM_DMA_PAGE_POOL) &&
|
||||
(dev_priv->map_mode == vmw_dma_alloc_coherent))
|
||||
return -EINVAL;
|
||||
|
||||
DRM_INFO("DMA map mode: %s\n", names[dev_priv->map_mode]);
|
||||
return 0;
|
||||
}
|
||||
@ -798,8 +794,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err0;
|
||||
|
||||
dma_set_max_seg_size(dev->dev, min_t(unsigned int, U32_MAX & PAGE_MASK,
|
||||
SCATTERLIST_MAX_SEGMENT));
|
||||
dma_set_max_seg_size(dev->dev, U32_MAX);
|
||||
|
||||
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
|
||||
DRM_INFO("Max GMR ids is %u\n",
|
||||
@ -878,10 +873,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
||||
drm_vma_offset_manager_init(&dev_priv->vma_manager,
|
||||
DRM_FILE_PAGE_OFFSET_START,
|
||||
DRM_FILE_PAGE_OFFSET_SIZE);
|
||||
ret = ttm_bo_device_init(&dev_priv->bdev,
|
||||
&vmw_bo_driver,
|
||||
ret = ttm_bo_device_init(&dev_priv->bdev, &vmw_bo_driver,
|
||||
dev_priv->dev->dev,
|
||||
dev->anon_inode->i_mapping,
|
||||
&dev_priv->vma_manager,
|
||||
dev_priv->map_mode == vmw_dma_alloc_coherent,
|
||||
false);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Failed initializing TTM buffer object driver.\n");
|
||||
|
@ -522,8 +522,10 @@ int vmw_du_cursor_plane_atomic_check(struct drm_plane *plane,
|
||||
|
||||
|
||||
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *new_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_crtc_state *new_state = drm_atomic_get_new_crtc_state(state,
|
||||
crtc);
|
||||
struct vmw_display_unit *du = vmw_crtc_to_du(new_state->crtc);
|
||||
int connector_mask = drm_connector_mask(&du->connector);
|
||||
bool has_primary = new_state->plane_mask &
|
||||
@ -552,13 +554,13 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
|
||||
|
||||
void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_pending_vblank_event *event = crtc->state->event;
|
||||
|
||||
|
@ -473,11 +473,11 @@ void vmw_du_plane_unpin_surf(struct vmw_plane_state *vps,
|
||||
bool unreference);
|
||||
|
||||
int vmw_du_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state);
|
||||
struct drm_atomic_state *state);
|
||||
void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state);
|
||||
struct drm_atomic_state *state);
|
||||
void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state);
|
||||
struct drm_atomic_state *state);
|
||||
void vmw_du_crtc_reset(struct drm_crtc *crtc);
|
||||
struct drm_crtc_state *vmw_du_crtc_duplicate_state(struct drm_crtc *crtc);
|
||||
void vmw_du_crtc_destroy_state(struct drm_crtc *crtc,
|
||||
|
@ -28,7 +28,6 @@
|
||||
#include "vmwgfx_drv.h"
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_placement.h>
|
||||
#include <drm/ttm/ttm_page_alloc.h>
|
||||
|
||||
static const struct ttm_place vram_placement_flags = {
|
||||
.fpfn = 0,
|
||||
@ -576,30 +575,11 @@ static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
|
||||
static int vmw_ttm_populate(struct ttm_bo_device *bdev,
|
||||
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
|
||||
{
|
||||
struct vmw_ttm_tt *vmw_tt =
|
||||
container_of(ttm, struct vmw_ttm_tt, dma_ttm);
|
||||
struct vmw_private *dev_priv = vmw_tt->dev_priv;
|
||||
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
|
||||
int ret;
|
||||
|
||||
/* TODO: maybe completely drop this ? */
|
||||
if (ttm_tt_is_populated(ttm))
|
||||
return 0;
|
||||
|
||||
if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
|
||||
size_t size =
|
||||
ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
|
||||
ret = ttm_mem_global_alloc(glob, size, ctx);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev,
|
||||
ctx);
|
||||
if (unlikely(ret != 0))
|
||||
ttm_mem_global_free(glob, size);
|
||||
} else
|
||||
ret = ttm_pool_populate(ttm, ctx);
|
||||
|
||||
return ret;
|
||||
return ttm_pool_alloc(&bdev->pool, ttm, ctx);
|
||||
}
|
||||
|
||||
static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev,
|
||||
@ -607,9 +587,6 @@ static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev,
|
||||
{
|
||||
struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
|
||||
dma_ttm);
|
||||
struct vmw_private *dev_priv = vmw_tt->dev_priv;
|
||||
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
|
||||
|
||||
|
||||
if (vmw_tt->mob) {
|
||||
vmw_mob_destroy(vmw_tt->mob);
|
||||
@ -617,14 +594,7 @@ static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev,
|
||||
}
|
||||
|
||||
vmw_ttm_unmap_dma(vmw_tt);
|
||||
if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
|
||||
size_t size =
|
||||
ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
|
||||
|
||||
ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
|
||||
ttm_mem_global_free(glob, size);
|
||||
} else
|
||||
ttm_pool_unpopulate(ttm);
|
||||
ttm_pool_free(&bdev->pool, ttm);
|
||||
}
|
||||
|
||||
static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo,
|
||||
|
@ -28,7 +28,6 @@
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_dma.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/spinlock.h>
|
||||
@ -1316,8 +1315,7 @@ static int zynqmp_disp_layer_request_dma(struct zynqmp_disp *disp,
|
||||
|
||||
snprintf(dma_channel_name, sizeof(dma_channel_name),
|
||||
"%s%u", dma_names[layer->id], i);
|
||||
dma->chan = of_dma_request_slave_channel(disp->dev->of_node,
|
||||
dma_channel_name);
|
||||
dma->chan = dma_request_chan(disp->dev, dma_channel_name);
|
||||
if (IS_ERR(dma->chan)) {
|
||||
dev_err(disp->dev, "failed to request dma channel\n");
|
||||
ret = PTR_ERR(dma->chan);
|
||||
@ -1506,21 +1504,21 @@ zynqmp_disp_crtc_atomic_disable(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static int zynqmp_disp_crtc_atomic_check(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
return drm_atomic_add_affected_planes(state->state, crtc);
|
||||
return drm_atomic_add_affected_planes(state, crtc);
|
||||
}
|
||||
|
||||
static void
|
||||
zynqmp_disp_crtc_atomic_begin(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
drm_crtc_vblank_on(crtc);
|
||||
}
|
||||
|
||||
static void
|
||||
zynqmp_disp_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
if (crtc->state->event) {
|
||||
struct drm_pending_vblank_event *event;
|
||||
|
@ -473,7 +473,7 @@ static void zx_crtc_atomic_disable(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void zx_crtc_atomic_flush(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_state)
|
||||
struct drm_atomic_state *state)
|
||||
{
|
||||
struct drm_pending_vblank_event *event = crtc->state->event;
|
||||
|
||||
|
@ -417,7 +417,13 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
|
||||
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
|
||||
}
|
||||
|
||||
if (prot & IOMMU_CACHE)
|
||||
/*
|
||||
* Also Mali has its own notions of shareability wherein its Inner
|
||||
* domain covers the cores within the GPU, and its Outer domain is
|
||||
* "outside the GPU" (i.e. either the Inner or System domain in CPU
|
||||
* terms, depending on coherency).
|
||||
*/
|
||||
if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE)
|
||||
pte |= ARM_LPAE_PTE_SH_IS;
|
||||
else
|
||||
pte |= ARM_LPAE_PTE_SH_OS;
|
||||
@ -1021,6 +1027,9 @@ arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
|
||||
cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
|
||||
ARM_MALI_LPAE_TTBR_READ_INNER |
|
||||
ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
|
||||
if (cfg->coherent_walk)
|
||||
cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER;
|
||||
|
||||
return &data->iop;
|
||||
|
||||
out_free_data:
|
||||
|
@ -502,7 +502,7 @@ sti_select_fbfont(struct sti_cooked_rom *cooked_rom, const char *fbfont_name)
|
||||
if (!fbfont)
|
||||
return NULL;
|
||||
|
||||
pr_info("STI selected %dx%d framebuffer font %s for sticon\n",
|
||||
pr_info("STI selected %ux%u framebuffer font %s for sticon\n",
|
||||
fbfont->width, fbfont->height, fbfont->name);
|
||||
|
||||
bpc = ((fbfont->width+7)/8) * fbfont->height;
|
||||
|
@ -240,14 +240,6 @@ static int *MV300_reg = MV300_reg_8bit;
|
||||
|
||||
static int inverse;
|
||||
|
||||
extern int fontheight_8x8;
|
||||
extern int fontwidth_8x8;
|
||||
extern unsigned char fontdata_8x8[];
|
||||
|
||||
extern int fontheight_8x16;
|
||||
extern int fontwidth_8x16;
|
||||
extern unsigned char fontdata_8x16[];
|
||||
|
||||
/*
|
||||
* struct fb_ops {
|
||||
* * open/release and usage marking
|
||||
|
@ -16,7 +16,6 @@
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/fbcon.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/ioctl.h>
|
||||
|
@ -221,14 +221,18 @@ EXPORT_SYMBOL(hdmi_avi_infoframe_pack);
|
||||
int hdmi_spd_infoframe_init(struct hdmi_spd_infoframe *frame,
|
||||
const char *vendor, const char *product)
|
||||
{
|
||||
size_t len;
|
||||
|
||||
memset(frame, 0, sizeof(*frame));
|
||||
|
||||
frame->type = HDMI_INFOFRAME_TYPE_SPD;
|
||||
frame->version = 1;
|
||||
frame->length = HDMI_SPD_INFOFRAME_SIZE;
|
||||
|
||||
strncpy(frame->vendor, vendor, sizeof(frame->vendor));
|
||||
strncpy(frame->product, product, sizeof(frame->product));
|
||||
len = strlen(vendor);
|
||||
memcpy(frame->vendor, vendor, min(len, sizeof(frame->vendor)));
|
||||
len = strlen(product);
|
||||
memcpy(frame->product, product, min(len, sizeof(frame->product)));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -308,7 +308,6 @@ struct __drm_private_objs_state {
|
||||
* struct drm_atomic_state - the global state object for atomic updates
|
||||
* @ref: count of all references to this state (will not be freed until zero)
|
||||
* @dev: parent DRM device
|
||||
* @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
|
||||
* @async_update: hint for asynchronous plane update
|
||||
* @planes: pointer to array of structures with per-plane data
|
||||
* @crtcs: pointer to array of CRTC pointers
|
||||
@ -336,6 +335,17 @@ struct drm_atomic_state {
|
||||
* drm_atomic_crtc_needs_modeset().
|
||||
*/
|
||||
bool allow_modeset : 1;
|
||||
/**
|
||||
* @legacy_cursor_update:
|
||||
*
|
||||
* Hint to enforce legacy cursor IOCTL semantics.
|
||||
*
|
||||
* WARNING: This is thoroughly broken and pretty much impossible to
|
||||
* implement correctly. Drivers must ignore this and should instead
|
||||
* implement &drm_plane_helper_funcs.atomic_async_check and
|
||||
* &drm_plane_helper_funcs.atomic_async_commit hooks. New users of this
|
||||
* flag are not allowed.
|
||||
*/
|
||||
bool legacy_cursor_update : 1;
|
||||
bool async_update : 1;
|
||||
/**
|
||||
|
@ -336,8 +336,7 @@ struct drm_crtc_helper_funcs {
|
||||
*
|
||||
* This function is called in the check phase of an atomic update. The
|
||||
* driver is not allowed to change anything outside of the free-standing
|
||||
* state objects passed-in or assembled in the overall &drm_atomic_state
|
||||
* update tracking structure.
|
||||
* state object passed-in.
|
||||
*
|
||||
* Also beware that userspace can request its own custom modes, neither
|
||||
* core nor helpers filter modes to the list of probe modes reported by
|
||||
@ -353,7 +352,7 @@ struct drm_crtc_helper_funcs {
|
||||
* deadlock.
|
||||
*/
|
||||
int (*atomic_check)(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *state);
|
||||
struct drm_atomic_state *state);
|
||||
|
||||
/**
|
||||
* @atomic_begin:
|
||||
@ -374,7 +373,7 @@ struct drm_crtc_helper_funcs {
|
||||
* transitional plane helpers, but it is optional.
|
||||
*/
|
||||
void (*atomic_begin)(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state);
|
||||
struct drm_atomic_state *state);
|
||||
/**
|
||||
* @atomic_flush:
|
||||
*
|
||||
@ -398,7 +397,7 @@ struct drm_crtc_helper_funcs {
|
||||
* transitional plane helpers, but it is optional.
|
||||
*/
|
||||
void (*atomic_flush)(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *old_crtc_state);
|
||||
struct drm_atomic_state *state);
|
||||
|
||||
/**
|
||||
* @atomic_enable:
|
||||
|
@ -195,8 +195,12 @@ struct ttm_bo_kmap_obj {
|
||||
*
|
||||
* @interruptible: Sleep interruptible if sleeping.
|
||||
* @no_wait_gpu: Return immediately if the GPU is busy.
|
||||
* @gfp_retry_mayfail: Set the __GFP_RETRY_MAYFAIL when allocation pages.
|
||||
* @allow_res_evict: Allow eviction of reserved BOs. Can be used when multiple
|
||||
* BOs share the same reservation object.
|
||||
* @force_alloc: Don't check the memory account during suspend or CPU page
|
||||
* faults. Should only be used by TTM internally.
|
||||
* @resv: Reservation object to allow reserved evictions with.
|
||||
* @flags: Including the following flags
|
||||
*
|
||||
* Context for TTM operations like changing buffer placement or general memory
|
||||
* allocation.
|
||||
@ -204,16 +208,13 @@ struct ttm_bo_kmap_obj {
|
||||
struct ttm_operation_ctx {
|
||||
bool interruptible;
|
||||
bool no_wait_gpu;
|
||||
bool gfp_retry_mayfail;
|
||||
bool allow_res_evict;
|
||||
bool force_alloc;
|
||||
struct dma_resv *resv;
|
||||
uint64_t bytes_moved;
|
||||
uint32_t flags;
|
||||
};
|
||||
|
||||
/* Allow eviction of reserved BOs */
|
||||
#define TTM_OPT_FLAG_ALLOW_RES_EVICT 0x1
|
||||
/* when serving page fault or suspend, allow alloc anyway */
|
||||
#define TTM_OPT_FLAG_FORCE_ALLOC 0x2
|
||||
|
||||
/**
|
||||
* ttm_bo_get - reference a struct ttm_buffer_object
|
||||
*
|
||||
|
@ -42,6 +42,7 @@
|
||||
#include "ttm_module.h"
|
||||
#include "ttm_placement.h"
|
||||
#include "ttm_tt.h"
|
||||
#include "ttm_pool.h"
|
||||
|
||||
/**
|
||||
* struct ttm_bo_driver
|
||||
@ -275,7 +276,6 @@ extern struct ttm_bo_global {
|
||||
* @dev_mapping: A pointer to the struct address_space representing the
|
||||
* device address space.
|
||||
* @wq: Work queue structure for the delayed delete workqueue.
|
||||
* @no_retry: Don't retry allocation if it fails
|
||||
*
|
||||
*/
|
||||
|
||||
@ -295,6 +295,7 @@ struct ttm_bo_device {
|
||||
* Protected by internal locks.
|
||||
*/
|
||||
struct drm_vma_offset_manager *vma_manager;
|
||||
struct ttm_pool pool;
|
||||
|
||||
/*
|
||||
* Protected by the global:lru lock.
|
||||
@ -312,10 +313,6 @@ struct ttm_bo_device {
|
||||
*/
|
||||
|
||||
struct delayed_work wq;
|
||||
|
||||
bool need_dma32;
|
||||
|
||||
bool no_retry;
|
||||
};
|
||||
|
||||
static inline struct ttm_resource_manager *ttm_manager_type(struct ttm_bo_device *bdev,
|
||||
@ -395,11 +392,11 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev);
|
||||
* @bdev: A pointer to a struct ttm_bo_device to initialize.
|
||||
* @glob: A pointer to an initialized struct ttm_bo_global.
|
||||
* @driver: A pointer to a struct ttm_bo_driver set up by the caller.
|
||||
* @dev: The core kernel device pointer for DMA mappings and allocations.
|
||||
* @mapping: The address space to use for this bo.
|
||||
* @vma_manager: A pointer to a vma manager.
|
||||
* @file_page_offset: Offset into the device address space that is available
|
||||
* for buffer data. This ensures compatibility with other users of the
|
||||
* address space.
|
||||
* @use_dma_alloc: If coherent DMA allocation API should be used.
|
||||
* @use_dma32: If we should use GFP_DMA32 for device memory allocations.
|
||||
*
|
||||
* Initializes a struct ttm_bo_device:
|
||||
* Returns:
|
||||
@ -407,9 +404,10 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev);
|
||||
*/
|
||||
int ttm_bo_device_init(struct ttm_bo_device *bdev,
|
||||
struct ttm_bo_driver *driver,
|
||||
struct device *dev,
|
||||
struct address_space *mapping,
|
||||
struct drm_vma_offset_manager *vma_manager,
|
||||
bool need_dma32);
|
||||
bool use_dma_alloc, bool use_dma32);
|
||||
|
||||
/**
|
||||
* ttm_bo_unmap_virtual
|
||||
|
@ -25,6 +25,8 @@
|
||||
#ifndef _TTM_CACHING_H_
|
||||
#define _TTM_CACHING_H_
|
||||
|
||||
#define TTM_NUM_CACHING_TYPES 3
|
||||
|
||||
enum ttm_caching {
|
||||
ttm_uncached,
|
||||
ttm_write_combined,
|
||||
|
@ -1,122 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) Red Hat Inc.
|
||||
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sub license,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Dave Airlie <airlied@redhat.com>
|
||||
* Jerome Glisse <jglisse@redhat.com>
|
||||
*/
|
||||
#ifndef TTM_PAGE_ALLOC
|
||||
#define TTM_PAGE_ALLOC
|
||||
|
||||
#include <drm/ttm/ttm_bo_driver.h>
|
||||
#include <drm/ttm/ttm_memory.h>
|
||||
|
||||
struct device;
|
||||
|
||||
/**
|
||||
* Initialize pool allocator.
|
||||
*/
|
||||
int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
|
||||
/**
|
||||
* Free pool allocator.
|
||||
*/
|
||||
void ttm_page_alloc_fini(void);
|
||||
|
||||
/**
|
||||
* ttm_pool_populate:
|
||||
*
|
||||
* @ttm: The struct ttm_tt to contain the backing pages.
|
||||
*
|
||||
* Add backing pages to all of @ttm
|
||||
*/
|
||||
int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
|
||||
|
||||
/**
|
||||
* ttm_pool_unpopulate:
|
||||
*
|
||||
* @ttm: The struct ttm_tt which to free backing pages.
|
||||
*
|
||||
* Free all pages of @ttm
|
||||
*/
|
||||
void ttm_pool_unpopulate(struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* Populates and DMA maps pages to fullfil a ttm_dma_populate() request
|
||||
*/
|
||||
int ttm_populate_and_map_pages(struct device *dev, struct ttm_tt *tt,
|
||||
struct ttm_operation_ctx *ctx);
|
||||
|
||||
/**
|
||||
* Unpopulates and DMA unmaps pages as part of a
|
||||
* ttm_dma_unpopulate() request */
|
||||
void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_tt *tt);
|
||||
|
||||
/**
|
||||
* Output the state of pools to debugfs file
|
||||
*/
|
||||
int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
|
||||
|
||||
#if defined(CONFIG_DRM_TTM_DMA_PAGE_POOL)
|
||||
/**
|
||||
* Initialize pool allocator.
|
||||
*/
|
||||
int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
|
||||
|
||||
/**
|
||||
* Free pool allocator.
|
||||
*/
|
||||
void ttm_dma_page_alloc_fini(void);
|
||||
|
||||
/**
|
||||
* Output the state of pools to debugfs file
|
||||
*/
|
||||
int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
|
||||
|
||||
int ttm_dma_populate(struct ttm_tt *ttm_dma, struct device *dev,
|
||||
struct ttm_operation_ctx *ctx);
|
||||
void ttm_dma_unpopulate(struct ttm_tt *ttm_dma, struct device *dev);
|
||||
|
||||
#else
|
||||
static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
|
||||
unsigned max_pages)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline void ttm_dma_page_alloc_fini(void) { return; }
|
||||
|
||||
static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int ttm_dma_populate(struct ttm_tt *ttm_dma,
|
||||
struct device *dev,
|
||||
struct ttm_operation_ctx *ctx)
|
||||
{
|
||||
return -ENOMEM;
|
||||
}
|
||||
static inline void ttm_dma_unpopulate(struct ttm_tt *ttm_dma,
|
||||
struct device *dev)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
91
include/drm/ttm/ttm_pool.h
Normal file
91
include/drm/ttm/ttm_pool.h
Normal file
@ -0,0 +1,91 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
|
||||
/*
|
||||
* Copyright 2020 Advanced Micro Devices, Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Christian König
|
||||
*/
|
||||
|
||||
#ifndef _TTM_PAGE_POOL_H_
|
||||
#define _TTM_PAGE_POOL_H_
|
||||
|
||||
#include <linux/mmzone.h>
|
||||
#include <linux/llist.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <drm/ttm/ttm_caching.h>
|
||||
|
||||
struct device;
|
||||
struct ttm_tt;
|
||||
struct ttm_pool;
|
||||
struct ttm_operation_ctx;
|
||||
|
||||
/**
|
||||
* ttm_pool_type - Pool for a certain memory type
|
||||
*
|
||||
* @pool: the pool we belong to, might be NULL for the global ones
|
||||
* @order: the allocation order our pages have
|
||||
* @caching: the caching type our pages have
|
||||
* @shrinker_list: our place on the global shrinker list
|
||||
* @lock: protection of the page list
|
||||
* @pages: the list of pages in the pool
|
||||
*/
|
||||
struct ttm_pool_type {
|
||||
struct ttm_pool *pool;
|
||||
unsigned int order;
|
||||
enum ttm_caching caching;
|
||||
|
||||
struct list_head shrinker_list;
|
||||
|
||||
spinlock_t lock;
|
||||
struct list_head pages;
|
||||
};
|
||||
|
||||
/**
|
||||
* ttm_pool - Pool for all caching and orders
|
||||
*
|
||||
* @use_dma_alloc: if coherent DMA allocations should be used
|
||||
* @use_dma32: if GFP_DMA32 should be used
|
||||
* @caching: pools for each caching/order
|
||||
*/
|
||||
struct ttm_pool {
|
||||
struct device *dev;
|
||||
|
||||
bool use_dma_alloc;
|
||||
bool use_dma32;
|
||||
|
||||
struct {
|
||||
struct ttm_pool_type orders[MAX_ORDER];
|
||||
} caching[TTM_NUM_CACHING_TYPES];
|
||||
};
|
||||
|
||||
int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
|
||||
struct ttm_operation_ctx *ctx);
|
||||
void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt);
|
||||
|
||||
void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
|
||||
bool use_dma_alloc, bool use_dma32);
|
||||
void ttm_pool_fini(struct ttm_pool *pool);
|
||||
|
||||
int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m);
|
||||
|
||||
int ttm_pool_mgr_init(unsigned long num_pages);
|
||||
void ttm_pool_mgr_fini(void);
|
||||
|
||||
#endif
|
@ -37,7 +37,6 @@ struct ttm_operation_ctx;
|
||||
|
||||
#define TTM_PAGE_FLAG_SWAPPED (1 << 4)
|
||||
#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
|
||||
#define TTM_PAGE_FLAG_DMA32 (1 << 7)
|
||||
#define TTM_PAGE_FLAG_SG (1 << 8)
|
||||
#define TTM_PAGE_FLAG_NO_RETRY (1 << 9)
|
||||
|
||||
@ -66,7 +65,6 @@ struct ttm_tt {
|
||||
struct sg_table *sg;
|
||||
dma_addr_t *dma_address;
|
||||
struct file *swap_storage;
|
||||
struct list_head pages_list;
|
||||
enum ttm_caching caching;
|
||||
};
|
||||
|
||||
|
@ -16,7 +16,7 @@
|
||||
struct font_desc {
|
||||
int idx;
|
||||
const char *name;
|
||||
int width, height;
|
||||
unsigned int width, height;
|
||||
const void *data;
|
||||
int pref;
|
||||
};
|
||||
|
@ -18,12 +18,6 @@ struct scatterlist {
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
* Since the above length field is an unsigned int, below we define the maximum
|
||||
* length in bytes that can be stored in one scatterlist entry.
|
||||
*/
|
||||
#define SCATTERLIST_MAX_SEGMENT (UINT_MAX & PAGE_MASK)
|
||||
|
||||
/*
|
||||
* These macros should be used after a dma_map_sg call has been done
|
||||
* to get bus addresses of each of the SG entries and their lengths.
|
||||
|
@ -50,7 +50,7 @@ static void fail(struct test *test, struct sg_table *st, const char *cond)
|
||||
|
||||
int main(void)
|
||||
{
|
||||
const unsigned int sgmax = SCATTERLIST_MAX_SEGMENT;
|
||||
const unsigned int sgmax = UINT_MAX;
|
||||
struct test *test, tests[] = {
|
||||
{ -EINVAL, 1, pfn(0), PAGE_SIZE, PAGE_SIZE + 1, 1 },
|
||||
{ -EINVAL, 1, pfn(0), PAGE_SIZE, 0, 1 },
|
||||
|
Loading…
x
Reference in New Issue
Block a user