2009-12-11 19:24:15 +10:00
/*
* Copyright ( c ) 2007 - 2008 Tungsten Graphics , Inc . , Cedar Park , TX . , USA ,
* All Rights Reserved .
* Copyright ( c ) 2009 VMware , Inc . , Palo Alto , CA . , USA ,
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sub license ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*/
2012-07-20 08:17:34 +10:00
# include "nouveau_drm.h"
# include "nouveau_ttm.h"
# include "nouveau_gem.h"
2009-12-11 19:24:15 +10:00
2014-12-16 16:33:09 +10:00
# include "drm_legacy.h"
2012-07-19 17:54:21 +10:00
static int
nouveau_vram_manager_init ( struct ttm_mem_type_manager * man , unsigned long psize )
{
2013-03-02 20:00:31 +01:00
struct nouveau_drm * drm = nouveau_bdev ( man - > bdev ) ;
2015-01-14 15:36:34 +10:00
struct nvkm_fb * pfb = nvxx_fb ( & drm - > device ) ;
2013-03-02 20:00:31 +01:00
man - > priv = pfb ;
2012-07-19 17:54:21 +10:00
return 0 ;
}
static int
nouveau_vram_manager_fini ( struct ttm_mem_type_manager * man )
{
2013-03-02 20:00:31 +01:00
man - > priv = NULL ;
2012-07-19 17:54:21 +10:00
return 0 ;
}
static inline void
2015-01-14 15:36:34 +10:00
nvkm_mem_node_cleanup ( struct nvkm_mem * node )
2012-07-19 17:54:21 +10:00
{
if ( node - > vma [ 0 ] . node ) {
2015-01-14 15:36:34 +10:00
nvkm_vm_unmap ( & node - > vma [ 0 ] ) ;
nvkm_vm_put ( & node - > vma [ 0 ] ) ;
2012-07-19 17:54:21 +10:00
}
if ( node - > vma [ 1 ] . node ) {
2015-01-14 15:36:34 +10:00
nvkm_vm_unmap ( & node - > vma [ 1 ] ) ;
nvkm_vm_put ( & node - > vma [ 1 ] ) ;
2012-07-19 17:54:21 +10:00
}
}
static void
nouveau_vram_manager_del ( struct ttm_mem_type_manager * man ,
struct ttm_mem_reg * mem )
{
2012-07-20 08:17:34 +10:00
struct nouveau_drm * drm = nouveau_bdev ( man - > bdev ) ;
2015-01-14 15:36:34 +10:00
struct nvkm_fb * pfb = nvxx_fb ( & drm - > device ) ;
nvkm_mem_node_cleanup ( mem - > mm_node ) ;
pfb - > ram - > put ( pfb , ( struct nvkm_mem * * ) & mem - > mm_node ) ;
2012-07-19 17:54:21 +10:00
}
static int
nouveau_vram_manager_new ( struct ttm_mem_type_manager * man ,
struct ttm_buffer_object * bo ,
2014-08-27 13:16:04 +02:00
const struct ttm_place * place ,
2012-07-19 17:54:21 +10:00
struct ttm_mem_reg * mem )
{
2012-07-20 08:17:34 +10:00
struct nouveau_drm * drm = nouveau_bdev ( man - > bdev ) ;
2015-01-14 15:36:34 +10:00
struct nvkm_fb * pfb = nvxx_fb ( & drm - > device ) ;
2012-07-19 17:54:21 +10:00
struct nouveau_bo * nvbo = nouveau_bo ( bo ) ;
2015-01-14 15:36:34 +10:00
struct nvkm_mem * node ;
2012-07-19 17:54:21 +10:00
u32 size_nc = 0 ;
int ret ;
if ( nvbo - > tile_flags & NOUVEAU_GEM_TILE_NONCONTIG )
size_nc = 1 < < nvbo - > page_shift ;
2013-03-04 13:01:21 +10:00
ret = pfb - > ram - > get ( pfb , mem - > num_pages < < PAGE_SHIFT ,
2012-07-20 08:17:34 +10:00
mem - > page_alignment < < PAGE_SHIFT , size_nc ,
( nvbo - > tile_flags > > 8 ) & 0x3ff , & node ) ;
2012-07-19 17:54:21 +10:00
if ( ret ) {
mem - > mm_node = NULL ;
return ( ret = = - ENOSPC ) ? 0 : ret ;
}
node - > page_shift = nvbo - > page_shift ;
mem - > mm_node = node ;
mem - > start = node - > offset > > PAGE_SHIFT ;
return 0 ;
}
2012-08-19 23:00:00 +02:00
static void
2012-07-19 17:54:21 +10:00
nouveau_vram_manager_debug ( struct ttm_mem_type_manager * man , const char * prefix )
{
2015-01-14 15:36:34 +10:00
struct nvkm_fb * pfb = man - > priv ;
struct nvkm_mm * mm = & pfb - > vram ;
struct nvkm_mm_node * r ;
2012-07-19 17:54:21 +10:00
u32 total = 0 , free = 0 ;
2013-05-13 22:30:56 +10:00
mutex_lock ( & nv_subdev ( pfb ) - > mutex ) ;
2012-07-19 17:54:21 +10:00
list_for_each_entry ( r , & mm - > nodes , nl_entry ) {
printk ( KERN_DEBUG " %s %d: 0x%010llx 0x%010llx \n " ,
prefix , r - > type , ( ( u64 ) r - > offset < < 12 ) ,
( ( ( u64 ) r - > offset + r - > length ) < < 12 ) ) ;
total + = r - > length ;
if ( ! r - > type )
free + = r - > length ;
}
2013-05-13 22:30:56 +10:00
mutex_unlock ( & nv_subdev ( pfb ) - > mutex ) ;
2012-07-19 17:54:21 +10:00
printk ( KERN_DEBUG " %s total: 0x%010llx free: 0x%010llx \n " ,
prefix , ( u64 ) total < < 12 , ( u64 ) free < < 12 ) ;
printk ( KERN_DEBUG " %s block: 0x%08x \n " ,
prefix , mm - > block_size < < 12 ) ;
}
const struct ttm_mem_type_manager_func nouveau_vram_manager = {
nouveau_vram_manager_init ,
nouveau_vram_manager_fini ,
nouveau_vram_manager_new ,
nouveau_vram_manager_del ,
nouveau_vram_manager_debug
} ;
static int
nouveau_gart_manager_init ( struct ttm_mem_type_manager * man , unsigned long psize )
{
return 0 ;
}
static int
nouveau_gart_manager_fini ( struct ttm_mem_type_manager * man )
{
return 0 ;
}
static void
nouveau_gart_manager_del ( struct ttm_mem_type_manager * man ,
struct ttm_mem_reg * mem )
{
2015-01-14 15:36:34 +10:00
nvkm_mem_node_cleanup ( mem - > mm_node ) ;
2012-07-19 17:54:21 +10:00
kfree ( mem - > mm_node ) ;
mem - > mm_node = NULL ;
}
static int
nouveau_gart_manager_new ( struct ttm_mem_type_manager * man ,
struct ttm_buffer_object * bo ,
2014-08-27 13:16:04 +02:00
const struct ttm_place * place ,
2012-07-19 17:54:21 +10:00
struct ttm_mem_reg * mem )
{
2013-03-22 12:12:17 +10:00
struct nouveau_drm * drm = nouveau_bdev ( bo - > bdev ) ;
struct nouveau_bo * nvbo = nouveau_bo ( bo ) ;
2015-01-14 15:36:34 +10:00
struct nvkm_mem * node ;
2012-07-19 17:54:21 +10:00
node = kzalloc ( sizeof ( * node ) , GFP_KERNEL ) ;
if ( ! node )
return - ENOMEM ;
2013-11-15 11:56:49 +10:00
2012-07-19 17:54:21 +10:00
node - > page_shift = 12 ;
2014-08-10 04:10:22 +10:00
switch ( drm - > device . info . family ) {
case NV_DEVICE_INFO_V0_TESLA :
if ( drm - > device . info . chipset ! = 0x50 )
2013-03-22 12:12:17 +10:00
node - > memtype = ( nvbo - > tile_flags & 0x7f00 ) > > 8 ;
break ;
2014-08-10 04:10:22 +10:00
case NV_DEVICE_INFO_V0_FERMI :
case NV_DEVICE_INFO_V0_KEPLER :
2013-03-22 12:12:17 +10:00
node - > memtype = ( nvbo - > tile_flags & 0xff00 ) > > 8 ;
break ;
default :
break ;
}
2012-07-19 17:54:21 +10:00
mem - > mm_node = node ;
mem - > start = 0 ;
return 0 ;
}
2012-08-19 23:00:00 +02:00
static void
2012-07-19 17:54:21 +10:00
nouveau_gart_manager_debug ( struct ttm_mem_type_manager * man , const char * prefix )
{
}
const struct ttm_mem_type_manager_func nouveau_gart_manager = {
nouveau_gart_manager_init ,
nouveau_gart_manager_fini ,
nouveau_gart_manager_new ,
nouveau_gart_manager_del ,
nouveau_gart_manager_debug
} ;
2014-08-10 04:10:23 +10:00
/*XXX*/
2015-01-14 09:57:36 +10:00
# include <subdev/mmu/nv04.h>
2012-07-19 17:54:21 +10:00
static int
nv04_gart_manager_init ( struct ttm_mem_type_manager * man , unsigned long psize )
{
2012-07-20 08:17:34 +10:00
struct nouveau_drm * drm = nouveau_bdev ( man - > bdev ) ;
2015-01-14 15:36:34 +10:00
struct nvkm_mmu * mmu = nvxx_mmu ( & drm - > device ) ;
2015-01-14 09:57:36 +10:00
struct nv04_mmu_priv * priv = ( void * ) mmu ;
2015-01-14 15:36:34 +10:00
struct nvkm_vm * vm = NULL ;
nvkm_vm_ref ( priv - > vm , & vm , NULL ) ;
2012-07-20 08:17:34 +10:00
man - > priv = vm ;
return 0 ;
2012-07-19 17:54:21 +10:00
}
static int
nv04_gart_manager_fini ( struct ttm_mem_type_manager * man )
{
2015-01-14 15:36:34 +10:00
struct nvkm_vm * vm = man - > priv ;
nvkm_vm_ref ( NULL , & vm , NULL ) ;
2012-07-19 17:54:21 +10:00
man - > priv = NULL ;
return 0 ;
}
static void
nv04_gart_manager_del ( struct ttm_mem_type_manager * man , struct ttm_mem_reg * mem )
{
2015-01-14 15:36:34 +10:00
struct nvkm_mem * node = mem - > mm_node ;
2012-07-19 17:54:21 +10:00
if ( node - > vma [ 0 ] . node )
2015-01-14 15:36:34 +10:00
nvkm_vm_put ( & node - > vma [ 0 ] ) ;
2012-07-19 17:54:21 +10:00
kfree ( mem - > mm_node ) ;
mem - > mm_node = NULL ;
}
static int
nv04_gart_manager_new ( struct ttm_mem_type_manager * man ,
struct ttm_buffer_object * bo ,
2014-08-27 13:16:04 +02:00
const struct ttm_place * place ,
2012-07-19 17:54:21 +10:00
struct ttm_mem_reg * mem )
{
2015-01-14 15:36:34 +10:00
struct nvkm_mem * node ;
2012-07-19 17:54:21 +10:00
int ret ;
node = kzalloc ( sizeof ( * node ) , GFP_KERNEL ) ;
if ( ! node )
return - ENOMEM ;
node - > page_shift = 12 ;
2015-01-14 15:36:34 +10:00
ret = nvkm_vm_get ( man - > priv , mem - > num_pages < < 12 , node - > page_shift ,
NV_MEM_ACCESS_RW , & node - > vma [ 0 ] ) ;
2012-07-19 17:54:21 +10:00
if ( ret ) {
kfree ( node ) ;
return ret ;
}
mem - > mm_node = node ;
mem - > start = node - > vma [ 0 ] . offset > > PAGE_SHIFT ;
return 0 ;
}
2012-08-19 23:00:00 +02:00
static void
2012-07-19 17:54:21 +10:00
nv04_gart_manager_debug ( struct ttm_mem_type_manager * man , const char * prefix )
{
}
const struct ttm_mem_type_manager_func nv04_gart_manager = {
nv04_gart_manager_init ,
nv04_gart_manager_fini ,
nv04_gart_manager_new ,
nv04_gart_manager_del ,
nv04_gart_manager_debug
} ;
2009-12-11 19:24:15 +10:00
int
nouveau_ttm_mmap ( struct file * filp , struct vm_area_struct * vma )
{
struct drm_file * file_priv = filp - > private_data ;
2012-07-31 16:16:21 +10:00
struct nouveau_drm * drm = nouveau_drm ( file_priv - > minor - > dev ) ;
2009-12-11 19:24:15 +10:00
if ( unlikely ( vma - > vm_pgoff < DRM_FILE_PAGE_OFFSET ) )
2014-12-16 16:33:09 +10:00
return drm_legacy_mmap ( filp , vma ) ;
2009-12-11 19:24:15 +10:00
2012-07-20 08:17:34 +10:00
return ttm_bo_mmap ( filp , vma , & drm - > ttm . bdev ) ;
2009-12-11 19:24:15 +10:00
}
static int
2010-03-09 10:56:52 +10:00
nouveau_ttm_mem_global_init ( struct drm_global_reference * ref )
2009-12-11 19:24:15 +10:00
{
return ttm_mem_global_init ( ref - > object ) ;
}
static void
2010-03-09 10:56:52 +10:00
nouveau_ttm_mem_global_release ( struct drm_global_reference * ref )
2009-12-11 19:24:15 +10:00
{
ttm_mem_global_release ( ref - > object ) ;
}
int
2012-07-20 08:17:34 +10:00
nouveau_ttm_global_init ( struct nouveau_drm * drm )
2009-12-11 19:24:15 +10:00
{
2010-03-09 10:56:52 +10:00
struct drm_global_reference * global_ref ;
2009-12-11 19:24:15 +10:00
int ret ;
2012-07-20 08:17:34 +10:00
global_ref = & drm - > ttm . mem_global_ref ;
2010-03-09 10:56:52 +10:00
global_ref - > global_type = DRM_GLOBAL_TTM_MEM ;
2009-12-11 19:24:15 +10:00
global_ref - > size = sizeof ( struct ttm_mem_global ) ;
global_ref - > init = & nouveau_ttm_mem_global_init ;
global_ref - > release = & nouveau_ttm_mem_global_release ;
2010-03-09 10:56:52 +10:00
ret = drm_global_item_ref ( global_ref ) ;
2009-12-11 19:24:15 +10:00
if ( unlikely ( ret ! = 0 ) ) {
DRM_ERROR ( " Failed setting up TTM memory accounting \n " ) ;
2012-07-20 08:17:34 +10:00
drm - > ttm . mem_global_ref . release = NULL ;
2009-12-11 19:24:15 +10:00
return ret ;
}
2012-07-20 08:17:34 +10:00
drm - > ttm . bo_global_ref . mem_glob = global_ref - > object ;
global_ref = & drm - > ttm . bo_global_ref . ref ;
2010-03-09 10:56:52 +10:00
global_ref - > global_type = DRM_GLOBAL_TTM_BO ;
2009-12-11 19:24:15 +10:00
global_ref - > size = sizeof ( struct ttm_bo_global ) ;
global_ref - > init = & ttm_bo_global_init ;
global_ref - > release = & ttm_bo_global_release ;
2010-03-09 10:56:52 +10:00
ret = drm_global_item_ref ( global_ref ) ;
2009-12-11 19:24:15 +10:00
if ( unlikely ( ret ! = 0 ) ) {
DRM_ERROR ( " Failed setting up TTM BO subsystem \n " ) ;
2012-07-20 08:17:34 +10:00
drm_global_item_unref ( & drm - > ttm . mem_global_ref ) ;
drm - > ttm . mem_global_ref . release = NULL ;
2009-12-11 19:24:15 +10:00
return ret ;
}
return 0 ;
}
void
2012-07-20 08:17:34 +10:00
nouveau_ttm_global_release ( struct nouveau_drm * drm )
2009-12-11 19:24:15 +10:00
{
2012-07-20 08:17:34 +10:00
if ( drm - > ttm . mem_global_ref . release = = NULL )
2009-12-11 19:24:15 +10:00
return ;
2012-07-20 08:17:34 +10:00
drm_global_item_unref ( & drm - > ttm . bo_global_ref . ref ) ;
drm_global_item_unref ( & drm - > ttm . mem_global_ref ) ;
drm - > ttm . mem_global_ref . release = NULL ;
}
int
nouveau_ttm_init ( struct nouveau_drm * drm )
{
struct drm_device * dev = drm - > dev ;
u32 bits ;
int ret ;
2015-01-12 12:33:37 +10:00
bits = nvxx_mmu ( & drm - > device ) - > dma_bits ;
if ( nv_device_is_pci ( nvxx_device ( & drm - > device ) ) ) {
2014-02-17 15:17:26 +09:00
if ( drm - > agp . stat = = ENABLED | |
! pci_dma_supported ( dev - > pdev , DMA_BIT_MASK ( bits ) ) )
bits = 32 ;
ret = pci_set_dma_mask ( dev - > pdev , DMA_BIT_MASK ( bits ) ) ;
if ( ret )
return ret ;
ret = pci_set_consistent_dma_mask ( dev - > pdev ,
DMA_BIT_MASK ( bits ) ) ;
if ( ret )
pci_set_consistent_dma_mask ( dev - > pdev ,
DMA_BIT_MASK ( 32 ) ) ;
}
2012-07-20 08:17:34 +10:00
ret = nouveau_ttm_global_init ( drm ) ;
if ( ret )
return ret ;
ret = ttm_bo_device_init ( & drm - > ttm . bdev ,
drm - > ttm . bo_global_ref . ref . object ,
2013-08-13 19:10:30 +02:00
& nouveau_bo_driver ,
dev - > anon_inode - > i_mapping ,
DRM_FILE_PAGE_OFFSET ,
2012-07-20 08:17:34 +10:00
bits < = 32 ? true : false ) ;
if ( ret ) {
NV_ERROR ( drm , " error initialising bo driver, %d \n " , ret ) ;
return ret ;
}
/* VRAM init */
2014-08-10 04:10:28 +10:00
drm - > gem . vram_available = drm - > device . info . ram_user ;
2012-07-20 08:17:34 +10:00
ret = ttm_bo_init_mm ( & drm - > ttm . bdev , TTM_PL_VRAM ,
drm - > gem . vram_available > > PAGE_SHIFT ) ;
if ( ret ) {
NV_ERROR ( drm , " VRAM mm init failed, %d \n " , ret ) ;
return ret ;
}
2015-01-12 12:33:37 +10:00
drm - > ttm . mtrr = arch_phys_wc_add ( nv_device_resource_start ( nvxx_device ( & drm - > device ) , 1 ) ,
nv_device_resource_len ( nvxx_device ( & drm - > device ) , 1 ) ) ;
2012-07-20 08:17:34 +10:00
/* GART init */
if ( drm - > agp . stat ! = ENABLED ) {
2015-01-12 12:33:37 +10:00
drm - > gem . gart_available = nvxx_mmu ( & drm - > device ) - > limit ;
2012-07-20 08:17:34 +10:00
} else {
drm - > gem . gart_available = drm - > agp . size ;
}
ret = ttm_bo_init_mm ( & drm - > ttm . bdev , TTM_PL_TT ,
drm - > gem . gart_available > > PAGE_SHIFT ) ;
if ( ret ) {
NV_ERROR ( drm , " GART mm init failed, %d \n " , ret ) ;
return ret ;
}
NV_INFO ( drm , " VRAM: %d MiB \n " , ( u32 ) ( drm - > gem . vram_available > > 20 ) ) ;
NV_INFO ( drm , " GART: %d MiB \n " , ( u32 ) ( drm - > gem . gart_available > > 20 ) ) ;
return 0 ;
}
void
nouveau_ttm_fini ( struct nouveau_drm * drm )
{
mutex_lock ( & drm - > dev - > struct_mutex ) ;
ttm_bo_clean_mm ( & drm - > ttm . bdev , TTM_PL_VRAM ) ;
ttm_bo_clean_mm ( & drm - > ttm . bdev , TTM_PL_TT ) ;
mutex_unlock ( & drm - > dev - > struct_mutex ) ;
ttm_bo_device_release ( & drm - > ttm . bdev ) ;
nouveau_ttm_global_release ( drm ) ;
2013-05-13 23:58:41 +00:00
arch_phys_wc_del ( drm - > ttm . mtrr ) ;
drm - > ttm . mtrr = 0 ;
2009-12-11 19:24:15 +10:00
}