drm/vmwgfx: Remove usage of MOBFMT_RANGE
Using MOBFMT_RANGE in the early days of guest backed objects was a major performance win but that has changed a lot since. There's no more a performance reason to use MOBFMT_RANGE. The device can/will still profit from the pages being contiguous but marking them as MOBFMT_RANGE no longer matters. Benchmarks (e.g. heaven, valley) show that creating page tables for mob memory is actually faster than using mobfmt ranges. Signed-off-by: Zack Rusin <zackr@vmware.com> Reviewed-by: Martin Krastev <krastevm@vmware.com> Link: https://patchwork.freedesktop.org/patch/msgid/20211206172620.3139754-12-zack@kde.org
This commit is contained in:
parent
bf625870b8
commit
9ca476acd5
@ -333,7 +333,6 @@ struct vmw_sg_table {
|
||||
struct page **pages;
|
||||
const dma_addr_t *addrs;
|
||||
struct sg_table *sgt;
|
||||
unsigned long num_regions;
|
||||
unsigned long num_pages;
|
||||
};
|
||||
|
||||
|
@ -146,9 +146,6 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
|
||||
if (otable->size <= PAGE_SIZE) {
|
||||
mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
|
||||
mob->pt_root_page = vmw_piter_dma_addr(&iter);
|
||||
} else if (vsgt->num_regions == 1) {
|
||||
mob->pt_level = SVGA3D_MOBFMT_RANGE;
|
||||
mob->pt_root_page = vmw_piter_dma_addr(&iter);
|
||||
} else {
|
||||
ret = vmw_mob_pt_populate(dev_priv, mob);
|
||||
if (unlikely(ret != 0))
|
||||
@ -623,9 +620,6 @@ int vmw_mob_bind(struct vmw_private *dev_priv,
|
||||
if (likely(num_data_pages == 1)) {
|
||||
mob->pt_level = VMW_MOBFMT_PTDEPTH_0;
|
||||
mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
|
||||
} else if (vsgt->num_regions == 1) {
|
||||
mob->pt_level = SVGA3D_MOBFMT_RANGE;
|
||||
mob->pt_root_page = vmw_piter_dma_addr(&data_iter);
|
||||
} else if (unlikely(mob->pt_bo == NULL)) {
|
||||
ret = vmw_mob_pt_populate(dev_priv, mob);
|
||||
if (unlikely(ret != 0))
|
||||
|
@ -288,8 +288,6 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_tt->dev_priv;
|
||||
struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
|
||||
struct vmw_piter iter;
|
||||
dma_addr_t old;
|
||||
int ret = 0;
|
||||
|
||||
if (vmw_tt->mapped)
|
||||
@ -321,16 +319,6 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
|
||||
break;
|
||||
}
|
||||
|
||||
old = ~((dma_addr_t) 0);
|
||||
vmw_tt->vsgt.num_regions = 0;
|
||||
for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
|
||||
dma_addr_t cur = vmw_piter_dma_addr(&iter);
|
||||
|
||||
if (cur != old + PAGE_SIZE)
|
||||
vmw_tt->vsgt.num_regions++;
|
||||
old = cur;
|
||||
}
|
||||
|
||||
vmw_tt->mapped = true;
|
||||
return 0;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user