drm/ttm: introduce callback for ttm_tt populate & unpopulate V4
Move the page allocation and freeing to driver callback and provide ttm code helper function for those. Most intrusive change, is the fact that we now only fully populate an object this simplify some of code designed around the page fault design. V2 Rebase on top of memory accounting overhaul V3 New rebase on top of more memory accouting changes V4 Rebase on top of no memory account changes (where/when is my delorean when i need it ?) Signed-off-by: Jerome Glisse <jglisse@redhat.com> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>
This commit is contained in:
parent
649bf3ca77
commit
b1e5f17232
@ -28,6 +28,7 @@
|
||||
*/
|
||||
|
||||
#include "drmP.h"
|
||||
#include "ttm/ttm_page_alloc.h"
|
||||
|
||||
#include "nouveau_drm.h"
|
||||
#include "nouveau_drv.h"
|
||||
@ -1050,6 +1051,8 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
|
||||
|
||||
struct ttm_bo_driver nouveau_bo_driver = {
|
||||
.ttm_tt_create = &nouveau_ttm_tt_create,
|
||||
.ttm_tt_populate = &ttm_pool_populate,
|
||||
.ttm_tt_unpopulate = &ttm_pool_unpopulate,
|
||||
.invalidate_caches = nouveau_bo_invalidate_caches,
|
||||
.init_mem_type = nouveau_bo_init_mem_type,
|
||||
.evict_flags = nouveau_bo_evict_flags,
|
||||
|
@ -581,6 +581,8 @@ struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
|
||||
|
||||
static struct ttm_bo_driver radeon_bo_driver = {
|
||||
.ttm_tt_create = &radeon_ttm_tt_create,
|
||||
.ttm_tt_populate = &ttm_pool_populate,
|
||||
.ttm_tt_unpopulate = &ttm_pool_unpopulate,
|
||||
.invalidate_caches = &radeon_invalidate_caches,
|
||||
.init_mem_type = &radeon_init_mem_type,
|
||||
.evict_flags = &radeon_evict_flags,
|
||||
|
@ -244,7 +244,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
|
||||
unsigned long page,
|
||||
pgprot_t prot)
|
||||
{
|
||||
struct page *d = ttm_tt_get_page(ttm, page);
|
||||
struct page *d = ttm->pages[page];
|
||||
void *dst;
|
||||
|
||||
if (!d)
|
||||
@ -281,7 +281,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
|
||||
unsigned long page,
|
||||
pgprot_t prot)
|
||||
{
|
||||
struct page *s = ttm_tt_get_page(ttm, page);
|
||||
struct page *s = ttm->pages[page];
|
||||
void *src;
|
||||
|
||||
if (!s)
|
||||
@ -342,6 +342,12 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
|
||||
if (old_iomap == NULL && ttm == NULL)
|
||||
goto out2;
|
||||
|
||||
if (ttm->state == tt_unpopulated) {
|
||||
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
|
||||
if (ret)
|
||||
goto out1;
|
||||
}
|
||||
|
||||
add = 0;
|
||||
dir = 1;
|
||||
|
||||
@ -502,10 +508,16 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
|
||||
{
|
||||
struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
|
||||
struct ttm_tt *ttm = bo->ttm;
|
||||
struct page *d;
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
BUG_ON(!ttm);
|
||||
|
||||
if (ttm->state == tt_unpopulated) {
|
||||
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
|
||||
/*
|
||||
* We're mapping a single page, and the desired
|
||||
@ -513,18 +525,9 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
|
||||
*/
|
||||
|
||||
map->bo_kmap_type = ttm_bo_map_kmap;
|
||||
map->page = ttm_tt_get_page(ttm, start_page);
|
||||
map->page = ttm->pages[start_page];
|
||||
map->virtual = kmap(map->page);
|
||||
} else {
|
||||
/*
|
||||
* Populate the part we're mapping;
|
||||
*/
|
||||
for (i = start_page; i < start_page + num_pages; ++i) {
|
||||
d = ttm_tt_get_page(ttm, i);
|
||||
if (!d)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to use vmap to get the desired page protection
|
||||
* or to make the buffer object look contiguous.
|
||||
|
@ -174,18 +174,23 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
|
||||
vm_get_page_prot(vma->vm_flags) :
|
||||
ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
|
||||
|
||||
/* Allocate all page at once, most common usage */
|
||||
if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
|
||||
retval = VM_FAULT_OOM;
|
||||
goto out_io_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Speculatively prefault a number of pages. Only error on
|
||||
* first page.
|
||||
*/
|
||||
|
||||
for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
|
||||
if (bo->mem.bus.is_iomem)
|
||||
pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
|
||||
else {
|
||||
page = ttm_tt_get_page(ttm, page_offset);
|
||||
page = ttm->pages[page_offset];
|
||||
if (unlikely(!page && i == 0)) {
|
||||
retval = VM_FAULT_OOM;
|
||||
goto out_io_unlock;
|
||||
|
@ -855,6 +855,63 @@ void ttm_page_alloc_fini(void)
|
||||
_manager = NULL;
|
||||
}
|
||||
|
||||
int ttm_pool_populate(struct ttm_tt *ttm)
|
||||
{
|
||||
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
|
||||
unsigned i;
|
||||
int ret;
|
||||
|
||||
if (ttm->state != tt_unpopulated)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
ret = ttm_get_pages(&ttm->pages[i], ttm->page_flags,
|
||||
ttm->caching_state, 1,
|
||||
&ttm->dma_address[i]);
|
||||
if (ret != 0) {
|
||||
ttm_pool_unpopulate(ttm);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
|
||||
false, false);
|
||||
if (unlikely(ret != 0)) {
|
||||
ttm_pool_unpopulate(ttm);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
|
||||
ret = ttm_tt_swapin(ttm);
|
||||
if (unlikely(ret != 0)) {
|
||||
ttm_pool_unpopulate(ttm);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ttm->state = tt_unbound;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_pool_populate);
|
||||
|
||||
void ttm_pool_unpopulate(struct ttm_tt *ttm)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
if (ttm->pages[i]) {
|
||||
ttm_mem_global_free_page(ttm->glob->mem_glob,
|
||||
ttm->pages[i]);
|
||||
ttm_put_pages(&ttm->pages[i], 1,
|
||||
ttm->page_flags,
|
||||
ttm->caching_state,
|
||||
ttm->dma_address);
|
||||
}
|
||||
}
|
||||
ttm->state = tt_unpopulated;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_pool_unpopulate);
|
||||
|
||||
int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
|
||||
{
|
||||
struct ttm_page_pool *p;
|
||||
|
@ -43,8 +43,6 @@
|
||||
#include "ttm/ttm_placement.h"
|
||||
#include "ttm/ttm_page_alloc.h"
|
||||
|
||||
static int ttm_tt_swapin(struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* Allocates storage for pointers to the pages that back the ttm.
|
||||
*/
|
||||
@ -63,69 +61,6 @@ static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
|
||||
ttm->dma_address = NULL;
|
||||
}
|
||||
|
||||
static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
|
||||
{
|
||||
struct page *p;
|
||||
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
|
||||
int ret;
|
||||
|
||||
if (NULL == (p = ttm->pages[index])) {
|
||||
|
||||
ret = ttm_get_pages(&p, ttm->page_flags, ttm->caching_state, 1,
|
||||
&ttm->dma_address[index]);
|
||||
if (ret != 0)
|
||||
return NULL;
|
||||
|
||||
ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err;
|
||||
|
||||
ttm->pages[index] = p;
|
||||
}
|
||||
return p;
|
||||
out_err:
|
||||
ttm_put_pages(&p, 1, ttm->page_flags,
|
||||
ttm->caching_state, &ttm->dma_address[index]);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
|
||||
ret = ttm_tt_swapin(ttm);
|
||||
if (unlikely(ret != 0))
|
||||
return NULL;
|
||||
}
|
||||
return __ttm_tt_get_page(ttm, index);
|
||||
}
|
||||
|
||||
int ttm_tt_populate(struct ttm_tt *ttm)
|
||||
{
|
||||
struct page *page;
|
||||
unsigned long i;
|
||||
int ret;
|
||||
|
||||
if (ttm->state != tt_unpopulated)
|
||||
return 0;
|
||||
|
||||
if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
|
||||
ret = ttm_tt_swapin(ttm);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
page = __ttm_tt_get_page(ttm, i);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
}
|
||||
ttm->state = tt_unbound;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_tt_populate);
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
static inline int ttm_tt_set_page_caching(struct page *p,
|
||||
enum ttm_caching_state c_old,
|
||||
@ -227,21 +162,6 @@ int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_tt_set_placement_caching);
|
||||
|
||||
static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < ttm->num_pages; ++i) {
|
||||
if (ttm->pages[i]) {
|
||||
ttm_mem_global_free_page(ttm->glob->mem_glob,
|
||||
ttm->pages[i]);
|
||||
ttm_put_pages(&ttm->pages[i], 1, ttm->page_flags,
|
||||
ttm->caching_state, &ttm->dma_address[i]);
|
||||
}
|
||||
}
|
||||
ttm->state = tt_unpopulated;
|
||||
}
|
||||
|
||||
void ttm_tt_destroy(struct ttm_tt *ttm)
|
||||
{
|
||||
if (unlikely(ttm == NULL))
|
||||
@ -252,7 +172,7 @@ void ttm_tt_destroy(struct ttm_tt *ttm)
|
||||
}
|
||||
|
||||
if (likely(ttm->pages != NULL)) {
|
||||
ttm_tt_free_alloced_pages(ttm);
|
||||
ttm->bdev->driver->ttm_tt_unpopulate(ttm);
|
||||
ttm_tt_free_page_directory(ttm);
|
||||
}
|
||||
|
||||
@ -307,7 +227,7 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
|
||||
if (ttm->state == tt_bound)
|
||||
return 0;
|
||||
|
||||
ret = ttm_tt_populate(ttm);
|
||||
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -321,7 +241,7 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
|
||||
}
|
||||
EXPORT_SYMBOL(ttm_tt_bind);
|
||||
|
||||
static int ttm_tt_swapin(struct ttm_tt *ttm)
|
||||
int ttm_tt_swapin(struct ttm_tt *ttm)
|
||||
{
|
||||
struct address_space *swap_space;
|
||||
struct file *swap_storage;
|
||||
@ -343,7 +263,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
|
||||
ret = PTR_ERR(from_page);
|
||||
goto out_err;
|
||||
}
|
||||
to_page = __ttm_tt_get_page(ttm, i);
|
||||
to_page = ttm->pages[i];
|
||||
if (unlikely(to_page == NULL))
|
||||
goto out_err;
|
||||
|
||||
@ -364,7 +284,6 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
|
||||
|
||||
return 0;
|
||||
out_err:
|
||||
ttm_tt_free_alloced_pages(ttm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -416,7 +335,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
|
||||
page_cache_release(to_page);
|
||||
}
|
||||
|
||||
ttm_tt_free_alloced_pages(ttm);
|
||||
ttm->bdev->driver->ttm_tt_unpopulate(ttm);
|
||||
ttm->swap_storage = swap_storage;
|
||||
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
|
||||
if (persistent_swap_storage)
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "vmwgfx_drv.h"
|
||||
#include "ttm/ttm_bo_driver.h"
|
||||
#include "ttm/ttm_placement.h"
|
||||
#include "ttm/ttm_page_alloc.h"
|
||||
|
||||
static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
|
||||
TTM_PL_FLAG_CACHED;
|
||||
@ -334,6 +335,8 @@ static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
|
||||
|
||||
struct ttm_bo_driver vmw_bo_driver = {
|
||||
.ttm_tt_create = &vmw_ttm_tt_create,
|
||||
.ttm_tt_populate = &ttm_pool_populate,
|
||||
.ttm_tt_unpopulate = &ttm_pool_unpopulate,
|
||||
.invalidate_caches = vmw_invalidate_caches,
|
||||
.init_mem_type = vmw_init_mem_type,
|
||||
.evict_flags = vmw_evict_flags,
|
||||
|
@ -318,6 +318,26 @@ struct ttm_bo_driver {
|
||||
uint32_t page_flags,
|
||||
struct page *dummy_read_page);
|
||||
|
||||
/**
|
||||
* ttm_tt_populate
|
||||
*
|
||||
* @ttm: The struct ttm_tt to contain the backing pages.
|
||||
*
|
||||
* Allocate all backing pages
|
||||
* Returns:
|
||||
* -ENOMEM: Out of memory.
|
||||
*/
|
||||
int (*ttm_tt_populate)(struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* ttm_tt_unpopulate
|
||||
*
|
||||
* @ttm: The struct ttm_tt to contain the backing pages.
|
||||
*
|
||||
* Free all backing page
|
||||
*/
|
||||
void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* struct ttm_bo_driver member invalidate_caches
|
||||
*
|
||||
@ -584,15 +604,6 @@ extern int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
|
||||
*/
|
||||
extern int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem);
|
||||
|
||||
/**
|
||||
* ttm_tt_populate:
|
||||
*
|
||||
* @ttm: The struct ttm_tt to contain the backing pages.
|
||||
*
|
||||
* Add backing pages to all of @ttm
|
||||
*/
|
||||
extern int ttm_tt_populate(struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* ttm_ttm_destroy:
|
||||
*
|
||||
@ -612,19 +623,13 @@ extern void ttm_tt_destroy(struct ttm_tt *ttm);
|
||||
extern void ttm_tt_unbind(struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* ttm_ttm_destroy:
|
||||
* ttm_tt_swapin:
|
||||
*
|
||||
* @ttm: The struct ttm_tt.
|
||||
* @index: Index of the desired page.
|
||||
*
|
||||
* Return a pointer to the struct page backing @ttm at page
|
||||
* index @index. If the page is unpopulated, one will be allocated to
|
||||
* populate that index.
|
||||
*
|
||||
* Returns:
|
||||
* NULL on OOM.
|
||||
* Swap in a previously swap out ttm_tt.
|
||||
*/
|
||||
extern struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index);
|
||||
extern int ttm_tt_swapin(struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* ttm_tt_cache_flush:
|
||||
|
@ -67,6 +67,24 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages);
|
||||
*/
|
||||
void ttm_page_alloc_fini(void);
|
||||
|
||||
/**
|
||||
* ttm_pool_populate:
|
||||
*
|
||||
* @ttm: The struct ttm_tt to contain the backing pages.
|
||||
*
|
||||
* Add backing pages to all of @ttm
|
||||
*/
|
||||
extern int ttm_pool_populate(struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* ttm_pool_unpopulate:
|
||||
*
|
||||
* @ttm: The struct ttm_tt which to free backing pages.
|
||||
*
|
||||
* Free all pages of @ttm
|
||||
*/
|
||||
extern void ttm_pool_unpopulate(struct ttm_tt *ttm);
|
||||
|
||||
/**
|
||||
* Output the state of pools to debugfs file
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user