2009-12-11 19:24:15 +10:00
# include "drmP.h"
# include "drm.h"
# include "nouveau_drv.h"
# include "nouveau_drm.h"
2009-12-16 12:12:27 +01:00
void
2010-10-24 16:14:41 +02:00
nv40_fb_set_tile_region ( struct drm_device * dev , int i )
2009-12-16 12:12:27 +01:00
{
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
2010-10-24 16:14:41 +02:00
struct nouveau_tile_reg * tile = & dev_priv - > tile . reg [ i ] ;
2009-12-16 12:12:27 +01:00
switch ( dev_priv - > chipset ) {
case 0x40 :
2010-10-24 16:14:41 +02:00
nv_wr32 ( dev , NV10_PFB_TLIMIT ( i ) , tile - > limit ) ;
nv_wr32 ( dev , NV10_PFB_TSIZE ( i ) , tile - > pitch ) ;
nv_wr32 ( dev , NV10_PFB_TILE ( i ) , tile - > addr ) ;
2009-12-16 12:12:27 +01:00
break ;
default :
2010-10-24 16:14:41 +02:00
nv_wr32 ( dev , NV40_PFB_TLIMIT ( i ) , tile - > limit ) ;
nv_wr32 ( dev , NV40_PFB_TSIZE ( i ) , tile - > pitch ) ;
nv_wr32 ( dev , NV40_PFB_TILE ( i ) , tile - > addr ) ;
2009-12-16 12:12:27 +01:00
break ;
}
}
2011-01-11 14:52:40 +10:00
static void
nv40_fb_init_gart ( struct drm_device * dev )
{
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
struct nouveau_gpuobj * gart = dev_priv - > gart_info . sg_ctxdma ;
if ( dev_priv - > gart_info . type ! = NOUVEAU_GART_HW ) {
nv_wr32 ( dev , 0x100800 , 0x00000001 ) ;
return ;
}
nv_wr32 ( dev , 0x100800 , gart - > pinst | 0x00000002 ) ;
nv_mask ( dev , 0x10008c , 0x00000100 , 0x00000100 ) ;
nv_wr32 ( dev , 0x100820 , 0x00000000 ) ;
}
static void
nv44_fb_init_gart ( struct drm_device * dev )
{
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
struct nouveau_gpuobj * gart = dev_priv - > gart_info . sg_ctxdma ;
u32 vinst ;
if ( dev_priv - > gart_info . type ! = NOUVEAU_GART_HW ) {
nv_wr32 ( dev , 0x100850 , 0x80000000 ) ;
nv_wr32 ( dev , 0x100800 , 0x00000001 ) ;
return ;
}
/* calculate vram address of this PRAMIN block, object
* must be allocated on 512 KiB alignment , and not exceed
* a total size of 512 KiB for this to work correctly
*/
vinst = nv_rd32 ( dev , 0x10020c ) ;
vinst - = ( ( gart - > pinst > > 19 ) + 1 ) < < 19 ;
nv_wr32 ( dev , 0x100850 , 0x80000000 ) ;
nv_wr32 ( dev , 0x100818 , dev_priv - > gart_info . dummy . addr ) ;
nv_wr32 ( dev , 0x100804 , dev_priv - > gart_info . aper_size ) ;
nv_wr32 ( dev , 0x100850 , 0x00008000 ) ;
nv_mask ( dev , 0x10008c , 0x00000200 , 0x00000200 ) ;
nv_wr32 ( dev , 0x100820 , 0x00000000 ) ;
nv_wr32 ( dev , 0x10082c , 0x00000001 ) ;
nv_wr32 ( dev , 0x100800 , vinst | 0x00000010 ) ;
}
2009-12-11 19:24:15 +10:00
int
nv40_fb_init ( struct drm_device * dev )
{
struct drm_nouveau_private * dev_priv = dev - > dev_private ;
2009-12-16 12:12:27 +01:00
struct nouveau_fb_engine * pfb = & dev_priv - > engine . fb ;
uint32_t tmp ;
2009-12-11 19:24:15 +10:00
int i ;
2011-01-11 14:52:40 +10:00
if ( dev_priv - > chipset ! = 0x40 & & dev_priv - > chipset ! = 0x45 ) {
if ( nv44_graph_class ( dev ) )
nv44_fb_init_gart ( dev ) ;
else
nv40_fb_init_gart ( dev ) ;
}
2009-12-11 19:24:15 +10:00
switch ( dev_priv - > chipset ) {
case 0x40 :
case 0x45 :
tmp = nv_rd32 ( dev , NV10_PFB_CLOSE_PAGE2 ) ;
nv_wr32 ( dev , NV10_PFB_CLOSE_PAGE2 , tmp & ~ ( 1 < < 15 ) ) ;
2009-12-16 12:12:27 +01:00
pfb - > num_tiles = NV10_PFB_TILE__SIZE ;
2009-12-11 19:24:15 +10:00
break ;
case 0x46 : /* G72 */
case 0x47 : /* G70 */
case 0x49 : /* G71 */
case 0x4b : /* G73 */
case 0x4c : /* C51 (G7X version) */
2009-12-16 12:12:27 +01:00
pfb - > num_tiles = NV40_PFB_TILE__SIZE_1 ;
2009-12-11 19:24:15 +10:00
break ;
default :
2009-12-16 12:12:27 +01:00
pfb - > num_tiles = NV40_PFB_TILE__SIZE_0 ;
2009-12-11 19:24:15 +10:00
break ;
}
2009-12-16 12:12:27 +01:00
/* Turn all the tiling regions off. */
for ( i = 0 ; i < pfb - > num_tiles ; i + + )
2010-10-24 16:14:41 +02:00
pfb - > set_tile_region ( dev , i ) ;
2009-12-11 19:24:15 +10:00
return 0 ;
}
void
nv40_fb_takedown ( struct drm_device * dev )
{
}