2009-12-11 19:24:15 +10:00
/*
* Copyright ( C ) The Weather Channel , Inc . 2002. All Rights Reserved .
* Copyright 2005 Stephane Marchesin
*
* The Weather Channel ( TM ) funded Tungsten Graphics to develop the
* initial release of the Radeon 8500 driver under the XFree86 license .
* This notice must be preserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice ( including the next
* paragraph ) shall be included in all copies or substantial portions of the
* Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE AUTHORS AND / OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE .
*
* Authors :
* Keith Whitwell < keith @ tungstengraphics . com >
*/
# include "drmP.h"
# include "drm.h"
# include "drm_sarea.h"
2010-10-11 03:43:58 +02:00
# include "nouveau_drv.h"
# include "nouveau_pm.h"
2010-08-25 15:26:04 +10:00
# include "nouveau_mm.h"
2010-08-27 10:00:25 +10:00
# include "nouveau_vm.h"
2010-10-04 23:01:08 +02:00
2009-12-11 16:51:09 +01:00
/*
* NV10 - NV40 tiling helpers
*/
static void
2010-10-24 16:14:41 +02:00
nv10_mem_update_tile_region ( struct drm_device * dev ,
struct nouveau_tile_reg * tile , uint32_t addr ,
uint32_t size , uint32_t pitch , uint32_t flags )
2009-12-11 16:51:09 +01:00
{
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
struct nouveau_fifo_engine * pfifo = & dev_priv - > engine . fifo ;
struct nouveau_fb_engine * pfb = & dev_priv - > engine . fb ;
struct nouveau_pgraph_engine * pgraph = & dev_priv - > engine . graph ;
2010-10-24 16:14:41 +02:00
int i = tile - dev_priv - > tile . reg ;
unsigned long save ;
2009-12-11 16:51:09 +01:00
2010-10-20 21:50:24 +02:00
nouveau_fence_unref ( & tile - > fence ) ;
2009-12-11 16:51:09 +01:00
2010-10-24 16:14:41 +02:00
if ( tile - > pitch )
pfb - > free_tile_region ( dev , i ) ;
if ( pitch )
pfb - > init_tile_region ( dev , i , addr , size , pitch , flags ) ;
spin_lock_irqsave ( & dev_priv - > context_switch_lock , save ) ;
2009-12-11 16:51:09 +01:00
pfifo - > reassign ( dev , false ) ;
pfifo - > cache_pull ( dev , false ) ;
nouveau_wait_for_idle ( dev ) ;
2010-10-24 16:14:41 +02:00
pfb - > set_tile_region ( dev , i ) ;
pgraph - > set_tile_region ( dev , i ) ;
2009-12-11 16:51:09 +01:00
pfifo - > cache_pull ( dev , true ) ;
pfifo - > reassign ( dev , true ) ;
2010-10-24 16:14:41 +02:00
spin_unlock_irqrestore ( & dev_priv - > context_switch_lock , save ) ;
2009-12-11 16:51:09 +01:00
}
2010-10-24 16:14:41 +02:00
static struct nouveau_tile_reg *
nv10_mem_get_tile_region ( struct drm_device * dev , int i )
2009-12-11 16:51:09 +01:00
{
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
2010-10-24 16:14:41 +02:00
struct nouveau_tile_reg * tile = & dev_priv - > tile . reg [ i ] ;
2009-12-11 16:51:09 +01:00
2010-10-24 16:14:41 +02:00
spin_lock ( & dev_priv - > tile . lock ) ;
2009-12-11 16:51:09 +01:00
2010-10-24 16:14:41 +02:00
if ( ! tile - > used & &
( ! tile - > fence | | nouveau_fence_signalled ( tile - > fence ) ) )
tile - > used = true ;
else
tile = NULL ;
2009-12-11 16:51:09 +01:00
2010-10-24 16:14:41 +02:00
spin_unlock ( & dev_priv - > tile . lock ) ;
return tile ;
}
2009-12-11 16:51:09 +01:00
2010-10-24 16:14:41 +02:00
void
nv10_mem_put_tile_region ( struct drm_device * dev , struct nouveau_tile_reg * tile ,
struct nouveau_fence * fence )
{
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
2009-12-11 16:51:09 +01:00
2010-10-24 16:14:41 +02:00
if ( tile ) {
spin_lock ( & dev_priv - > tile . lock ) ;
if ( fence ) {
/* Mark it as pending. */
tile - > fence = fence ;
nouveau_fence_ref ( fence ) ;
2009-12-11 16:51:09 +01:00
}
2010-10-24 16:14:41 +02:00
tile - > used = false ;
spin_unlock ( & dev_priv - > tile . lock ) ;
}
2009-12-11 16:51:09 +01:00
}
2010-10-24 16:14:41 +02:00
struct nouveau_tile_reg *
nv10_mem_set_tiling ( struct drm_device * dev , uint32_t addr , uint32_t size ,
uint32_t pitch , uint32_t flags )
2009-12-11 16:51:09 +01:00
{
2010-10-24 16:14:41 +02:00
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
struct nouveau_fb_engine * pfb = & dev_priv - > engine . fb ;
struct nouveau_tile_reg * tile , * found = NULL ;
int i ;
for ( i = 0 ; i < pfb - > num_tiles ; i + + ) {
tile = nv10_mem_get_tile_region ( dev , i ) ;
if ( pitch & & ! found ) {
found = tile ;
continue ;
} else if ( tile & & tile - > pitch ) {
/* Kill an unused tile region. */
nv10_mem_update_tile_region ( dev , tile , 0 , 0 , 0 , 0 ) ;
}
nv10_mem_put_tile_region ( dev , tile , NULL ) ;
2009-12-11 16:51:09 +01:00
}
2010-10-24 16:14:41 +02:00
if ( found )
nv10_mem_update_tile_region ( dev , found , addr , size ,
pitch , flags ) ;
return found ;
2009-12-11 16:51:09 +01:00
}
2009-12-11 19:24:15 +10:00
/*
* Cleanup everything
*/
2010-06-01 15:32:24 +10:00
void
2010-09-01 15:24:34 +10:00
nouveau_mem_vram_fini ( struct drm_device * dev )
2009-12-11 19:24:15 +10:00
{
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
2010-01-15 09:24:20 +10:00
nouveau_bo_unpin ( dev_priv - > vga_ram ) ;
nouveau_bo_ref ( NULL , & dev_priv - > vga_ram ) ;
2009-12-11 19:24:15 +10:00
ttm_bo_device_release ( & dev_priv - > ttm . bdev ) ;
nouveau_ttm_global_release ( dev_priv ) ;
2010-09-01 15:24:34 +10:00
if ( dev_priv - > fb_mtrr > = 0 ) {
drm_mtrr_del ( dev_priv - > fb_mtrr ,
pci_resource_start ( dev - > pdev , 1 ) ,
pci_resource_len ( dev - > pdev , 1 ) , DRM_MTRR_WC ) ;
dev_priv - > fb_mtrr = - 1 ;
}
}
void
nouveau_mem_gart_fini ( struct drm_device * dev )
{
nouveau_sgdma_takedown ( dev ) ;
2010-06-01 15:56:22 +10:00
if ( drm_core_has_AGP ( dev ) & & dev - > agp ) {
2009-12-11 19:24:15 +10:00
struct drm_agp_mem * entry , * tempe ;
/* Remove AGP resources, but leave dev->agp
intact until drv_cleanup is called . */
list_for_each_entry_safe ( entry , tempe , & dev - > agp - > memory , head ) {
if ( entry - > bound )
drm_unbind_agp ( entry - > memory ) ;
drm_free_agp ( entry - > memory , entry - > pages ) ;
kfree ( entry ) ;
}
INIT_LIST_HEAD ( & dev - > agp - > memory ) ;
if ( dev - > agp - > acquired )
drm_agp_release ( dev ) ;
dev - > agp - > acquired = 0 ;
dev - > agp - > enabled = 0 ;
}
}
static uint32_t
2010-03-18 09:45:20 +10:00
nouveau_mem_detect_nv04 ( struct drm_device * dev )
{
2010-07-13 15:50:23 +02:00
uint32_t boot0 = nv_rd32 ( dev , NV04_PFB_BOOT_0 ) ;
2010-03-18 09:45:20 +10:00
if ( boot0 & 0x00000100 )
return ( ( ( boot0 > > 12 ) & 0xf ) * 2 + 2 ) * 1024 * 1024 ;
2010-07-13 15:50:23 +02:00
switch ( boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT ) {
case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB :
2010-03-18 09:45:20 +10:00
return 32 * 1024 * 1024 ;
2010-07-13 15:50:23 +02:00
case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB :
2010-03-18 09:45:20 +10:00
return 16 * 1024 * 1024 ;
2010-07-13 15:50:23 +02:00
case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB :
2010-03-18 09:45:20 +10:00
return 8 * 1024 * 1024 ;
2010-07-13 15:50:23 +02:00
case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB :
2010-03-18 09:45:20 +10:00
return 4 * 1024 * 1024 ;
}
return 0 ;
}
static uint32_t
nouveau_mem_detect_nforce ( struct drm_device * dev )
2009-12-11 19:24:15 +10:00
{
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
struct pci_dev * bridge ;
uint32_t mem ;
bridge = pci_get_bus_and_slot ( 0 , PCI_DEVFN ( 0 , 1 ) ) ;
if ( ! bridge ) {
NV_ERROR ( dev , " no bridge device \n " ) ;
return 0 ;
}
2010-03-18 09:45:20 +10:00
if ( dev_priv - > flags & NV_NFORCE ) {
2009-12-11 19:24:15 +10:00
pci_read_config_dword ( bridge , 0x7C , & mem ) ;
return ( uint64_t ) ( ( ( mem > > 6 ) & 31 ) + 1 ) * 1024 * 1024 ;
} else
2010-03-18 09:45:20 +10:00
if ( dev_priv - > flags & NV_NFORCE2 ) {
2009-12-11 19:24:15 +10:00
pci_read_config_dword ( bridge , 0x84 , & mem ) ;
return ( uint64_t ) ( ( ( mem > > 4 ) & 127 ) + 1 ) * 1024 * 1024 ;
}
NV_ERROR ( dev , " impossible! \n " ) ;
return 0 ;
}
2010-12-06 15:28:54 +10:00
int
2010-03-18 09:45:20 +10:00
nouveau_mem_detect ( struct drm_device * dev )
2009-12-11 19:24:15 +10:00
{
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
2010-03-18 09:45:20 +10:00
if ( dev_priv - > card_type = = NV_04 ) {
dev_priv - > vram_size = nouveau_mem_detect_nv04 ( dev ) ;
} else
if ( dev_priv - > flags & ( NV_NFORCE | NV_NFORCE2 ) ) {
dev_priv - > vram_size = nouveau_mem_detect_nforce ( dev ) ;
2010-06-02 10:12:00 +10:00
} else
if ( dev_priv - > card_type < NV_50 ) {
2010-07-13 15:50:23 +02:00
dev_priv - > vram_size = nv_rd32 ( dev , NV04_PFB_FIFO_DATA ) ;
dev_priv - > vram_size & = NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK ;
2009-12-11 19:24:15 +10:00
}
2010-03-18 09:45:20 +10:00
if ( dev_priv - > vram_size )
return 0 ;
return - ENOMEM ;
2009-12-11 19:24:15 +10:00
}
2010-12-06 15:28:54 +10:00
bool
nouveau_mem_flags_valid ( struct drm_device * dev , u32 tile_flags )
{
if ( ! ( tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK ) )
return true ;
return false ;
}
2010-09-08 02:23:20 +02:00
# if __OS_HAS_AGP
static unsigned long
get_agp_mode ( struct drm_device * dev , unsigned long mode )
{
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
/*
* FW seems to be broken on nv18 , it makes the card lock up
* randomly .
*/
if ( dev_priv - > chipset = = 0x18 )
mode & = ~ PCI_AGP_COMMAND_FW ;
2010-09-08 02:28:23 +02:00
/*
* AGP mode set in the command line .
*/
if ( nouveau_agpmode > 0 ) {
bool agpv3 = mode & 0x8 ;
int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode ;
mode = ( mode & ~ 0x7 ) | ( rate & 0x7 ) ;
}
2010-09-08 02:23:20 +02:00
return mode ;
}
# endif
2010-07-23 20:29:13 +02:00
int
nouveau_mem_reset_agp ( struct drm_device * dev )
2009-12-11 19:24:15 +10:00
{
2010-07-23 20:29:13 +02:00
# if __OS_HAS_AGP
uint32_t saved_pci_nv_1 , pmc_enable ;
int ret ;
/* First of all, disable fast writes, otherwise if it's
* already enabled in the AGP bridge and we disable the card ' s
* AGP controller we might be locking ourselves out of it . */
2010-08-26 16:13:49 +02:00
if ( ( nv_rd32 ( dev , NV04_PBUS_PCI_NV_19 ) |
dev - > agp - > mode ) & PCI_AGP_COMMAND_FW ) {
2010-07-23 20:29:13 +02:00
struct drm_agp_info info ;
struct drm_agp_mode mode ;
ret = drm_agp_info ( dev , & info ) ;
if ( ret )
return ret ;
2010-09-08 02:23:20 +02:00
mode . mode = get_agp_mode ( dev , info . mode ) & ~ PCI_AGP_COMMAND_FW ;
2010-07-23 20:29:13 +02:00
ret = drm_agp_enable ( dev , mode ) ;
if ( ret )
return ret ;
}
2009-12-11 19:24:15 +10:00
saved_pci_nv_1 = nv_rd32 ( dev , NV04_PBUS_PCI_NV_1 ) ;
/* clear busmaster bit */
nv_wr32 ( dev , NV04_PBUS_PCI_NV_1 , saved_pci_nv_1 & ~ 0x4 ) ;
2010-07-23 20:29:13 +02:00
/* disable AGP */
nv_wr32 ( dev , NV04_PBUS_PCI_NV_19 , 0 ) ;
2009-12-11 19:24:15 +10:00
/* power cycle pgraph, if enabled */
pmc_enable = nv_rd32 ( dev , NV03_PMC_ENABLE ) ;
if ( pmc_enable & NV_PMC_ENABLE_PGRAPH ) {
nv_wr32 ( dev , NV03_PMC_ENABLE ,
pmc_enable & ~ NV_PMC_ENABLE_PGRAPH ) ;
nv_wr32 ( dev , NV03_PMC_ENABLE , nv_rd32 ( dev , NV03_PMC_ENABLE ) |
NV_PMC_ENABLE_PGRAPH ) ;
}
/* and restore (gives effect of resetting AGP) */
nv_wr32 ( dev , NV04_PBUS_PCI_NV_1 , saved_pci_nv_1 ) ;
2009-12-15 10:38:32 +10:00
# endif
2009-12-11 19:24:15 +10:00
2010-07-23 20:29:13 +02:00
return 0 ;
}
2009-12-11 19:24:15 +10:00
int
nouveau_mem_init_agp ( struct drm_device * dev )
{
2009-12-15 10:38:32 +10:00
# if __OS_HAS_AGP
2009-12-11 19:24:15 +10:00
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
struct drm_agp_info info ;
struct drm_agp_mode mode ;
int ret ;
if ( ! dev - > agp - > acquired ) {
ret = drm_agp_acquire ( dev ) ;
if ( ret ) {
NV_ERROR ( dev , " Unable to acquire AGP: %d \n " , ret ) ;
return ret ;
}
}
2010-07-30 13:57:54 +02:00
nouveau_mem_reset_agp ( dev ) ;
2009-12-11 19:24:15 +10:00
ret = drm_agp_info ( dev , & info ) ;
if ( ret ) {
NV_ERROR ( dev , " Unable to get AGP info: %d \n " , ret ) ;
return ret ;
}
/* see agp.h for the AGPSTAT_* modes available */
2010-09-08 02:23:20 +02:00
mode . mode = get_agp_mode ( dev , info . mode ) ;
2009-12-11 19:24:15 +10:00
ret = drm_agp_enable ( dev , mode ) ;
if ( ret ) {
NV_ERROR ( dev , " Unable to enable AGP: %d \n " , ret ) ;
return ret ;
}
dev_priv - > gart_info . type = NOUVEAU_GART_AGP ;
dev_priv - > gart_info . aper_base = info . aperture_base ;
dev_priv - > gart_info . aper_size = info . aperture_size ;
2009-12-15 10:38:32 +10:00
# endif
2009-12-11 19:24:15 +10:00
return 0 ;
}
int
2010-09-01 15:24:34 +10:00
nouveau_mem_vram_init ( struct drm_device * dev )
2009-12-11 19:24:15 +10:00
{
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
struct ttm_bo_device * bdev = & dev_priv - > ttm . bdev ;
2010-09-01 15:24:34 +10:00
int ret , dma_bits ;
2009-12-11 19:24:15 +10:00
if ( dev_priv - > card_type > = NV_50 & &
pci_dma_supported ( dev - > pdev , DMA_BIT_MASK ( 40 ) ) )
dma_bits = 40 ;
2010-09-01 15:24:34 +10:00
else
dma_bits = 32 ;
2009-12-11 19:24:15 +10:00
ret = pci_set_dma_mask ( dev - > pdev , DMA_BIT_MASK ( dma_bits ) ) ;
2010-09-01 15:24:34 +10:00
if ( ret )
2009-12-11 19:24:15 +10:00
return ret ;
2010-09-01 15:24:34 +10:00
dev_priv - > fb_phys = pci_resource_start ( dev - > pdev , 1 ) ;
2009-12-11 19:24:15 +10:00
ret = nouveau_ttm_global_init ( dev_priv ) ;
if ( ret )
return ret ;
ret = ttm_bo_device_init ( & dev_priv - > ttm . bdev ,
dev_priv - > ttm . bo_global_ref . ref . object ,
& nouveau_bo_driver , DRM_FILE_PAGE_OFFSET ,
dma_bits < = 32 ? true : false ) ;
if ( ret ) {
NV_ERROR ( dev , " Error initialising bo driver: %d \n " , ret ) ;
return ret ;
}
2010-09-01 15:24:34 +10:00
/* reserve space at end of VRAM for PRAMIN */
if ( dev_priv - > chipset = = 0x40 | | dev_priv - > chipset = = 0x47 | |
dev_priv - > chipset = = 0x49 | | dev_priv - > chipset = = 0x4b )
dev_priv - > ramin_rsvd_vram = ( 2 * 1024 * 1024 ) ;
else
if ( dev_priv - > card_type > = NV_40 )
dev_priv - > ramin_rsvd_vram = ( 1 * 1024 * 1024 ) ;
else
dev_priv - > ramin_rsvd_vram = ( 512 * 1024 ) ;
2010-12-06 15:28:54 +10:00
ret = dev_priv - > engine . vram . init ( dev ) ;
2010-08-25 15:26:04 +10:00
if ( ret )
return ret ;
2010-12-06 15:28:54 +10:00
NV_INFO ( dev , " Detected %dMiB VRAM \n " , ( int ) ( dev_priv - > vram_size > > 20 ) ) ;
if ( dev_priv - > vram_sys_base ) {
NV_INFO ( dev , " Stolen system memory at: 0x%010llx \n " ,
dev_priv - > vram_sys_base ) ;
}
2010-08-25 15:26:04 +10:00
dev_priv - > fb_available_size = dev_priv - > vram_size ;
dev_priv - > fb_mappable_pages = dev_priv - > fb_available_size ;
if ( dev_priv - > fb_mappable_pages > pci_resource_len ( dev - > pdev , 1 ) )
dev_priv - > fb_mappable_pages = pci_resource_len ( dev - > pdev , 1 ) ;
dev_priv - > fb_mappable_pages > > = PAGE_SHIFT ;
2009-12-11 19:24:15 +10:00
dev_priv - > fb_available_size - = dev_priv - > ramin_rsvd_vram ;
dev_priv - > fb_aper_free = dev_priv - > fb_available_size ;
/* mappable vram */
ret = ttm_bo_init_mm ( bdev , TTM_PL_VRAM ,
dev_priv - > fb_available_size > > PAGE_SHIFT ) ;
if ( ret ) {
NV_ERROR ( dev , " Failed VRAM mm init: %d \n " , ret ) ;
return ret ;
}
2010-01-15 09:24:20 +10:00
ret = nouveau_bo_new ( dev , NULL , 256 * 1024 , 0 , TTM_PL_FLAG_VRAM ,
0 , 0 , true , true , & dev_priv - > vga_ram ) ;
if ( ret = = 0 )
ret = nouveau_bo_pin ( dev_priv - > vga_ram , TTM_PL_FLAG_VRAM ) ;
if ( ret ) {
NV_WARN ( dev , " failed to reserve VGA memory \n " ) ;
nouveau_bo_ref ( NULL , & dev_priv - > vga_ram ) ;
}
2010-09-01 15:24:34 +10:00
dev_priv - > fb_mtrr = drm_mtrr_add ( pci_resource_start ( dev - > pdev , 1 ) ,
pci_resource_len ( dev - > pdev , 1 ) ,
DRM_MTRR_WC ) ;
return 0 ;
}
int
nouveau_mem_gart_init ( struct drm_device * dev )
{
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
struct ttm_bo_device * bdev = & dev_priv - > ttm . bdev ;
int ret ;
dev_priv - > gart_info . type = NOUVEAU_GART_NONE ;
2009-12-11 19:24:15 +10:00
# if !defined(__powerpc__) && !defined(__ia64__)
2010-09-08 02:28:23 +02:00
if ( drm_device_is_agp ( dev ) & & dev - > agp & & nouveau_agpmode ) {
2009-12-11 19:24:15 +10:00
ret = nouveau_mem_init_agp ( dev ) ;
if ( ret )
NV_ERROR ( dev , " Error initialising AGP: %d \n " , ret ) ;
}
# endif
if ( dev_priv - > gart_info . type = = NOUVEAU_GART_NONE ) {
ret = nouveau_sgdma_init ( dev ) ;
if ( ret ) {
NV_ERROR ( dev , " Error initialising PCI(E): %d \n " , ret ) ;
return ret ;
}
}
NV_INFO ( dev , " %d MiB GART (aperture) \n " ,
( int ) ( dev_priv - > gart_info . aper_size > > 20 ) ) ;
dev_priv - > gart_info . aper_free = dev_priv - > gart_info . aper_size ;
ret = ttm_bo_init_mm ( bdev , TTM_PL_TT ,
dev_priv - > gart_info . aper_size > > PAGE_SHIFT ) ;
if ( ret ) {
NV_ERROR ( dev , " Failed TT mm init: %d \n " , ret ) ;
return ret ;
}
return 0 ;
}
2010-09-17 23:17:24 +02:00
void
nouveau_mem_timing_init ( struct drm_device * dev )
{
2010-10-20 01:09:56 +02:00
/* cards < NVC0 only */
2010-09-17 23:17:24 +02:00
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
struct nouveau_pm_engine * pm = & dev_priv - > engine . pm ;
struct nouveau_pm_memtimings * memtimings = & pm - > memtimings ;
struct nvbios * bios = & dev_priv - > vbios ;
struct bit_entry P ;
u8 tUNK_0 , tUNK_1 , tUNK_2 ;
u8 tRP ; /* Byte 3 */
u8 tRAS ; /* Byte 5 */
u8 tRFC ; /* Byte 7 */
u8 tRC ; /* Byte 9 */
u8 tUNK_10 , tUNK_11 , tUNK_12 , tUNK_13 , tUNK_14 ;
u8 tUNK_18 , tUNK_19 , tUNK_20 , tUNK_21 ;
u8 * mem = NULL , * entry ;
int i , recordlen , entries ;
if ( bios - > type = = NVBIOS_BIT ) {
if ( bit_table ( dev , ' P ' , & P ) )
return ;
if ( P . version = = 1 )
mem = ROMPTR ( bios , P . data [ 4 ] ) ;
else
if ( P . version = = 2 )
mem = ROMPTR ( bios , P . data [ 8 ] ) ;
else {
NV_WARN ( dev , " unknown mem for BIT P %d \n " , P . version ) ;
}
} else {
NV_DEBUG ( dev , " BMP version too old for memory \n " ) ;
return ;
}
if ( ! mem ) {
NV_DEBUG ( dev , " memory timing table pointer invalid \n " ) ;
return ;
}
if ( mem [ 0 ] ! = 0x10 ) {
NV_WARN ( dev , " memory timing table 0x%02x unknown \n " , mem [ 0 ] ) ;
return ;
}
/* validate record length */
entries = mem [ 2 ] ;
recordlen = mem [ 3 ] ;
if ( recordlen < 15 ) {
NV_ERROR ( dev , " mem timing table length unknown: %d \n " , mem [ 3 ] ) ;
return ;
}
/* parse vbios entries into common format */
memtimings - > timing =
kcalloc ( entries , sizeof ( * memtimings - > timing ) , GFP_KERNEL ) ;
if ( ! memtimings - > timing )
return ;
entry = mem + mem [ 1 ] ;
for ( i = 0 ; i < entries ; i + + , entry + = recordlen ) {
struct nouveau_pm_memtiming * timing = & pm - > memtimings . timing [ i ] ;
if ( entry [ 0 ] = = 0 )
continue ;
tUNK_18 = 1 ;
tUNK_19 = 1 ;
tUNK_20 = 0 ;
tUNK_21 = 0 ;
2010-10-20 01:09:56 +02:00
switch ( min ( recordlen , 22 ) ) {
case 22 :
2010-09-17 23:17:24 +02:00
tUNK_21 = entry [ 21 ] ;
2010-10-20 01:09:56 +02:00
case 21 :
2010-09-17 23:17:24 +02:00
tUNK_20 = entry [ 20 ] ;
2010-10-20 01:09:56 +02:00
case 20 :
2010-09-17 23:17:24 +02:00
tUNK_19 = entry [ 19 ] ;
2010-10-20 01:09:56 +02:00
case 19 :
2010-09-17 23:17:24 +02:00
tUNK_18 = entry [ 18 ] ;
default :
tUNK_0 = entry [ 0 ] ;
tUNK_1 = entry [ 1 ] ;
tUNK_2 = entry [ 2 ] ;
tRP = entry [ 3 ] ;
tRAS = entry [ 5 ] ;
tRFC = entry [ 7 ] ;
tRC = entry [ 9 ] ;
tUNK_10 = entry [ 10 ] ;
tUNK_11 = entry [ 11 ] ;
tUNK_12 = entry [ 12 ] ;
tUNK_13 = entry [ 13 ] ;
tUNK_14 = entry [ 14 ] ;
break ;
}
timing - > reg_100220 = ( tRC < < 24 | tRFC < < 16 | tRAS < < 8 | tRP ) ;
/* XXX: I don't trust the -1's and +1's... they must come
* from somewhere ! */
timing - > reg_100224 = ( ( tUNK_0 + tUNK_19 + 1 ) < < 24 |
tUNK_18 < < 16 |
( tUNK_1 + tUNK_19 + 1 ) < < 8 |
( tUNK_2 - 1 ) ) ;
timing - > reg_100228 = ( tUNK_12 < < 16 | tUNK_11 < < 8 | tUNK_10 ) ;
if ( recordlen > 19 ) {
timing - > reg_100228 + = ( tUNK_19 - 1 ) < < 24 ;
2010-10-20 01:09:56 +02:00
} /* I cannot back-up this else-statement right now
else {
2010-09-17 23:17:24 +02:00
timing - > reg_100228 + = tUNK_12 < < 24 ;
2010-10-20 01:09:56 +02:00
} */
2010-09-17 23:17:24 +02:00
/* XXX: reg_10022c */
2010-10-20 01:09:56 +02:00
timing - > reg_10022c = tUNK_2 - 1 ;
2010-09-17 23:17:24 +02:00
timing - > reg_100230 = ( tUNK_20 < < 24 | tUNK_21 < < 16 |
tUNK_13 < < 8 | tUNK_13 ) ;
/* XXX: +6? */
timing - > reg_100234 = ( tRAS < < 24 | ( tUNK_19 + 6 ) < < 8 | tRC ) ;
2010-10-20 01:09:56 +02:00
timing - > reg_100234 + = max ( tUNK_10 , tUNK_11 ) < < 16 ;
/* XXX; reg_100238, reg_10023c
* reg : 0x00 ? ? ? ? ? ?
* reg_10023c :
* 0 for pre - NV50 cards
* 0 x ? ? ? ? 0202 for NV50 + cards ( empirical evidence ) */
if ( dev_priv - > card_type > = NV_50 ) {
timing - > reg_10023c = 0x202 ;
2010-09-17 23:17:24 +02:00
}
NV_DEBUG ( dev , " Entry %d: 220: %08x %08x %08x %08x \n " , i ,
timing - > reg_100220 , timing - > reg_100224 ,
timing - > reg_100228 , timing - > reg_10022c ) ;
NV_DEBUG ( dev , " 230: %08x %08x %08x %08x \n " ,
timing - > reg_100230 , timing - > reg_100234 ,
timing - > reg_100238 , timing - > reg_10023c ) ;
}
memtimings - > nr_timing = entries ;
memtimings - > supported = true ;
}
void
nouveau_mem_timing_fini ( struct drm_device * dev )
{
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
struct nouveau_pm_memtimings * mem = & dev_priv - > engine . pm . memtimings ;
kfree ( mem - > timing ) ;
}
2010-08-25 15:26:04 +10:00
static int
nouveau_vram_manager_init ( struct ttm_mem_type_manager * man , unsigned long p_size )
{
struct drm_nouveau_private * dev_priv = nouveau_bdev ( man - > bdev ) ;
struct nouveau_mm * mm ;
u32 b_size ;
int ret ;
p_size = ( p_size < < PAGE_SHIFT ) > > 12 ;
b_size = dev_priv - > vram_rblock_size > > 12 ;
ret = nouveau_mm_init ( & mm , 0 , p_size , b_size ) ;
if ( ret )
return ret ;
man - > priv = mm ;
return 0 ;
}
static int
nouveau_vram_manager_fini ( struct ttm_mem_type_manager * man )
{
struct nouveau_mm * mm = man - > priv ;
int ret ;
ret = nouveau_mm_fini ( & mm ) ;
if ( ret )
return ret ;
man - > priv = NULL ;
return 0 ;
}
static void
nouveau_vram_manager_del ( struct ttm_mem_type_manager * man ,
struct ttm_mem_reg * mem )
{
struct drm_nouveau_private * dev_priv = nouveau_bdev ( man - > bdev ) ;
2010-12-06 15:28:54 +10:00
struct nouveau_vram_engine * vram = & dev_priv - > engine . vram ;
2010-08-25 15:26:04 +10:00
struct drm_device * dev = dev_priv - > dev ;
2010-12-06 15:28:54 +10:00
vram - > put ( dev , ( struct nouveau_vram * * ) & mem - > mm_node ) ;
2010-08-25 15:26:04 +10:00
}
static int
nouveau_vram_manager_new ( struct ttm_mem_type_manager * man ,
struct ttm_buffer_object * bo ,
struct ttm_placement * placement ,
struct ttm_mem_reg * mem )
{
struct drm_nouveau_private * dev_priv = nouveau_bdev ( man - > bdev ) ;
2010-12-06 15:28:54 +10:00
struct nouveau_vram_engine * vram = & dev_priv - > engine . vram ;
2010-08-25 15:26:04 +10:00
struct drm_device * dev = dev_priv - > dev ;
struct nouveau_bo * nvbo = nouveau_bo ( bo ) ;
2010-12-06 15:28:54 +10:00
struct nouveau_vram * node ;
2010-11-12 15:13:59 +10:00
u32 size_nc = 0 ;
2010-08-25 15:26:04 +10:00
int ret ;
2010-11-12 15:13:59 +10:00
if ( nvbo - > tile_flags & NOUVEAU_GEM_TILE_NONCONTIG )
size_nc = 1 < < nvbo - > vma . node - > type ;
2010-12-06 15:28:54 +10:00
ret = vram - > get ( dev , mem - > num_pages < < PAGE_SHIFT ,
mem - > page_alignment < < PAGE_SHIFT , size_nc ,
( nvbo - > tile_flags > > 8 ) & 0xff , & node ) ;
2010-08-25 15:26:04 +10:00
if ( ret )
return ret ;
2010-11-10 14:10:04 +10:00
node - > page_shift = 12 ;
if ( nvbo - > vma . node )
node - > page_shift = nvbo - > vma . node - > type ;
2010-12-06 15:28:54 +10:00
mem - > mm_node = node ;
mem - > start = node - > offset > > PAGE_SHIFT ;
2010-08-25 15:26:04 +10:00
return 0 ;
}
void
nouveau_vram_manager_debug ( struct ttm_mem_type_manager * man , const char * prefix )
{
struct nouveau_mm * mm = man - > priv ;
struct nouveau_mm_node * r ;
2011-01-14 15:46:30 +10:00
u32 total = 0 , free = 0 ;
2010-08-25 15:26:04 +10:00
mutex_lock ( & mm - > mutex ) ;
list_for_each_entry ( r , & mm - > nodes , nl_entry ) {
2011-01-14 15:46:30 +10:00
printk ( KERN_DEBUG " %s %d: 0x%010llx 0x%010llx \n " ,
prefix , r - > type , ( ( u64 ) r - > offset < < 12 ) ,
2010-08-25 15:26:04 +10:00
( ( ( u64 ) r - > offset + r - > length ) < < 12 ) ) ;
2011-01-14 15:46:30 +10:00
2010-08-25 15:26:04 +10:00
total + = r - > length ;
2011-01-14 15:46:30 +10:00
if ( ! r - > type )
free + = r - > length ;
2010-08-25 15:26:04 +10:00
}
mutex_unlock ( & mm - > mutex ) ;
2011-01-14 15:46:30 +10:00
printk ( KERN_DEBUG " %s total: 0x%010llx free: 0x%010llx \n " ,
prefix , ( u64 ) total < < 12 , ( u64 ) free < < 12 ) ;
printk ( KERN_DEBUG " %s block: 0x%08x \n " ,
prefix , mm - > block_size < < 12 ) ;
2010-08-25 15:26:04 +10:00
}
const struct ttm_mem_type_manager_func nouveau_vram_manager = {
nouveau_vram_manager_init ,
nouveau_vram_manager_fini ,
nouveau_vram_manager_new ,
nouveau_vram_manager_del ,
nouveau_vram_manager_debug
} ;