2010-04-14 02:29:52 +04:00
/*
* Intel GTT ( Graphics Translation Table ) routines
*
* Caveat : This driver implements the linux agp interface , but this is far from
* a agp driver ! GTT support ended up here for purely historical reasons : The
* old userspace intel graphics drivers needed an interface to map memory into
* the GTT . And the drm provides a default interface for graphic devices sitting
* on an agp port . So it made sense to fake the GTT support as an agp port to
* avoid having to create a new api .
*
* With gem this does not make much sense anymore , just needlessly complicates
* the code . But as long as the old graphics stack is still support , it ' s stuck
* here .
*
* / fairy - tale - mode off
*/
2010-09-08 19:29:51 +04:00
# include <linux/module.h>
# include <linux/pci.h>
# include <linux/init.h>
# include <linux/kernel.h>
# include <linux/pagemap.h>
# include <linux/agp_backend.h>
2010-12-22 14:37:09 +03:00
# include <linux/delay.h>
2010-09-08 19:29:51 +04:00
# include <asm/smp.h>
# include "agp.h"
# include "intel-agp.h"
2010-08-25 00:18:41 +04:00
# include <drm/intel-gtt.h>
2010-09-08 19:29:51 +04:00
2010-04-14 02:29:52 +04:00
/*
* If we have Intel graphics , we ' re not going to have anything other than
* an Intel IOMMU . So make the correct use of the PCI DMA API contingent
2011-08-24 04:05:25 +04:00
* on the Intel IOMMU support ( CONFIG_INTEL_IOMMU ) .
2010-04-14 02:29:52 +04:00
* Only newer chipsets need to bother with this , of course .
*/
2011-08-24 04:05:25 +04:00
# ifdef CONFIG_INTEL_IOMMU
2010-04-14 02:29:52 +04:00
# define USE_PCI_DMA_API 1
2010-09-08 00:11:15 +04:00
# else
# define USE_PCI_DMA_API 0
2010-04-14 02:29:52 +04:00
# endif
2010-09-08 23:18:53 +04:00
struct intel_gtt_driver {
unsigned int gen : 8 ;
unsigned int is_g33 : 1 ;
unsigned int is_pineview : 1 ;
unsigned int is_ironlake : 1 ;
2010-10-31 13:37:02 +03:00
unsigned int has_pgtbl_enable : 1 ;
2010-09-12 18:38:55 +04:00
unsigned int dma_mask_size : 8 ;
2010-08-29 19:29:50 +04:00
/* Chipset specific GTT setup */
int ( * setup ) ( void ) ;
2010-09-12 19:11:15 +04:00
/* This should undo anything done in ->setup() save the unmapping
* of the mmio register file , that ' s done in the generic code . */
void ( * cleanup ) ( void ) ;
2010-09-08 00:41:04 +04:00
void ( * write_entry ) ( dma_addr_t addr , unsigned int entry , unsigned int flags ) ;
/* Flags is a more or less chipset specific opaque value.
* For chipsets that need to support old ums ( non - gem ) code , this
* needs to be identical to the various supported agp memory types ! */
2010-09-11 23:31:04 +04:00
bool ( * check_flags ) ( unsigned int flags ) ;
2010-09-12 02:27:24 +04:00
void ( * chipset_flush ) ( void ) ;
2010-09-08 23:18:53 +04:00
} ;
2010-04-14 02:29:52 +04:00
static struct _intel_private {
2010-08-25 00:18:41 +04:00
struct intel_gtt base ;
2010-09-08 23:18:53 +04:00
const struct intel_gtt_driver * driver ;
2010-04-14 02:29:52 +04:00
struct pci_dev * pcidev ; /* device one */
2010-08-25 01:06:19 +04:00
struct pci_dev * bridge_dev ;
2010-04-14 02:29:52 +04:00
u8 __iomem * registers ;
2010-08-29 19:27:36 +04:00
phys_addr_t gtt_bus_addr ;
2010-08-29 19:29:50 +04:00
phys_addr_t gma_bus_addr ;
2010-09-23 22:04:17 +04:00
u32 PGETBL_save ;
2010-04-14 02:29:52 +04:00
u32 __iomem * gtt ; /* I915G */
2011-01-21 13:54:32 +03:00
bool clear_fake_agp ; /* on first access via agp, fill with scratch */
2010-04-14 02:29:52 +04:00
int num_dcache_entries ;
2010-12-22 14:37:09 +03:00
void __iomem * i9xx_flush_page ;
2010-11-05 15:30:14 +03:00
char * i81x_gtt_table ;
2010-04-14 02:29:52 +04:00
struct resource ifp_resource ;
int resource_valid ;
2010-09-08 00:11:15 +04:00
struct page * scratch_page ;
2010-04-14 02:29:52 +04:00
} intel_private ;
2010-09-08 23:18:53 +04:00
# define INTEL_GTT_GEN intel_private.driver->gen
# define IS_G33 intel_private.driver->is_g33
# define IS_PINEVIEW intel_private.driver->is_pineview
# define IS_IRONLAKE intel_private.driver->is_ironlake
2010-10-31 13:37:02 +03:00
# define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable
2010-09-08 23:18:53 +04:00
2010-11-06 13:18:58 +03:00
int intel_gtt_map_memory ( struct page * * pages , unsigned int num_entries ,
struct scatterlist * * sg_list , int * num_sg )
2010-04-14 02:29:52 +04:00
{
struct sg_table st ;
struct scatterlist * sg ;
int i ;
2010-11-06 13:18:58 +03:00
if ( * sg_list )
2010-09-12 00:12:11 +04:00
return 0 ; /* already mapped (for e.g. resume */
2010-11-06 13:18:58 +03:00
DBG ( " try mapping %lu pages \n " , ( unsigned long ) num_entries ) ;
2010-04-14 02:29:52 +04:00
2010-11-06 13:18:58 +03:00
if ( sg_alloc_table ( & st , num_entries , GFP_KERNEL ) )
2010-07-24 21:29:37 +04:00
goto err ;
2010-04-14 02:29:52 +04:00
2010-11-06 13:18:58 +03:00
* sg_list = sg = st . sgl ;
2010-04-14 02:29:52 +04:00
2010-11-06 13:18:58 +03:00
for ( i = 0 ; i < num_entries ; i + + , sg = sg_next ( sg ) )
sg_set_page ( sg , pages [ i ] , PAGE_SIZE , 0 ) ;
2010-04-14 02:29:52 +04:00
2010-11-06 13:18:58 +03:00
* num_sg = pci_map_sg ( intel_private . pcidev , * sg_list ,
num_entries , PCI_DMA_BIDIRECTIONAL ) ;
if ( unlikely ( ! * num_sg ) )
2010-07-24 21:29:37 +04:00
goto err ;
2010-04-14 02:29:52 +04:00
return 0 ;
2010-07-24 21:29:37 +04:00
err :
sg_free_table ( & st ) ;
return - ENOMEM ;
2010-04-14 02:29:52 +04:00
}
2010-11-06 13:18:58 +03:00
EXPORT_SYMBOL ( intel_gtt_map_memory ) ;
2010-04-14 02:29:52 +04:00
2010-11-06 13:18:58 +03:00
void intel_gtt_unmap_memory ( struct scatterlist * sg_list , int num_sg )
2010-04-14 02:29:52 +04:00
{
2010-11-06 13:18:58 +03:00
struct sg_table st ;
2010-04-14 02:29:52 +04:00
DBG ( " try unmapping %lu pages \n " , ( unsigned long ) mem - > page_count ) ;
2010-11-06 13:18:58 +03:00
pci_unmap_sg ( intel_private . pcidev , sg_list ,
num_sg , PCI_DMA_BIDIRECTIONAL ) ;
st . sgl = sg_list ;
st . orig_nents = st . nents = num_sg ;
sg_free_table ( & st ) ;
2010-04-14 02:29:52 +04:00
}
2010-11-06 13:18:58 +03:00
EXPORT_SYMBOL ( intel_gtt_unmap_memory ) ;
2010-04-14 02:29:52 +04:00
2010-08-27 19:51:29 +04:00
static void intel_fake_agp_enable ( struct agp_bridge_data * bridge , u32 mode )
2010-04-14 02:29:52 +04:00
{
return ;
}
/* Exists to support ARGB cursors */
static struct page * i8xx_alloc_pages ( void )
{
struct page * page ;
page = alloc_pages ( GFP_KERNEL | GFP_DMA32 , 2 ) ;
if ( page = = NULL )
return NULL ;
if ( set_pages_uc ( page , 4 ) < 0 ) {
set_pages_wb ( page , 4 ) ;
__free_pages ( page , 2 ) ;
return NULL ;
}
get_page ( page ) ;
atomic_inc ( & agp_bridge - > current_memory_agp ) ;
return page ;
}
static void i8xx_destroy_pages ( struct page * page )
{
if ( page = = NULL )
return ;
set_pages_wb ( page , 4 ) ;
put_page ( page ) ;
__free_pages ( page , 2 ) ;
atomic_dec ( & agp_bridge - > current_memory_agp ) ;
}
2010-11-05 15:30:14 +03:00
# define I810_GTT_ORDER 4
static int i810_setup ( void )
{
u32 reg_addr ;
char * gtt_table ;
/* i81x does not preallocate the gtt. It's always 64kb in size. */
gtt_table = alloc_gatt_pages ( I810_GTT_ORDER ) ;
if ( gtt_table = = NULL )
return - ENOMEM ;
intel_private . i81x_gtt_table = gtt_table ;
pci_read_config_dword ( intel_private . pcidev , I810_MMADDR , & reg_addr ) ;
reg_addr & = 0xfff80000 ;
intel_private . registers = ioremap ( reg_addr , KB ( 64 ) ) ;
if ( ! intel_private . registers )
return - ENOMEM ;
writel ( virt_to_phys ( gtt_table ) | I810_PGETBL_ENABLED ,
intel_private . registers + I810_PGETBL_CTL ) ;
intel_private . gtt_bus_addr = reg_addr + I810_PTE_BASE ;
if ( ( readl ( intel_private . registers + I810_DRAM_CTL )
& I810_DRAM_ROW_0 ) = = I810_DRAM_ROW_0_SDRAM ) {
dev_info ( & intel_private . pcidev - > dev ,
" detected 4MB dedicated video ram \n " ) ;
intel_private . num_dcache_entries = 1024 ;
}
return 0 ;
}
static void i810_cleanup ( void )
{
writel ( 0 , intel_private . registers + I810_PGETBL_CTL ) ;
free_gatt_pages ( intel_private . i81x_gtt_table , I810_GTT_ORDER ) ;
}
2010-11-05 17:43:35 +03:00
static int i810_insert_dcache_entries ( struct agp_memory * mem , off_t pg_start ,
int type )
2010-04-14 02:29:52 +04:00
{
2010-11-04 22:07:57 +03:00
int i ;
2010-04-14 02:29:52 +04:00
2010-11-05 17:43:35 +03:00
if ( ( pg_start + mem - > page_count )
> intel_private . num_dcache_entries )
return - EINVAL ;
2010-11-04 22:07:57 +03:00
2010-11-05 17:43:35 +03:00
if ( ! mem - > is_flushed )
global_cache_flush ( ) ;
2010-04-14 02:29:52 +04:00
2010-11-05 17:43:35 +03:00
for ( i = pg_start ; i < ( pg_start + mem - > page_count ) ; i + + ) {
dma_addr_t addr = i < < PAGE_SHIFT ;
intel_private . driver - > write_entry ( addr ,
i , type ) ;
2010-04-14 02:29:52 +04:00
}
2010-11-05 17:43:35 +03:00
readl ( intel_private . gtt + i - 1 ) ;
2010-04-14 02:29:52 +04:00
2010-11-05 17:43:35 +03:00
return 0 ;
2010-04-14 02:29:52 +04:00
}
/*
* The i810 / i830 requires a physical address to program its mouse
* pointer into hardware .
* However the Xserver still writes to it through the agp aperture .
*/
static struct agp_memory * alloc_agpphysmem_i8xx ( size_t pg_count , int type )
{
struct agp_memory * new ;
struct page * page ;
switch ( pg_count ) {
case 1 : page = agp_bridge - > driver - > agp_alloc_page ( agp_bridge ) ;
break ;
case 4 :
/* kludge to get 4 physical pages for ARGB cursor */
page = i8xx_alloc_pages ( ) ;
break ;
default :
return NULL ;
}
if ( page = = NULL )
return NULL ;
new = agp_create_memory ( pg_count ) ;
if ( new = = NULL )
return NULL ;
new - > pages [ 0 ] = page ;
if ( pg_count = = 4 ) {
/* kludge to get 4 physical pages for ARGB cursor */
new - > pages [ 1 ] = new - > pages [ 0 ] + 1 ;
new - > pages [ 2 ] = new - > pages [ 1 ] + 1 ;
new - > pages [ 3 ] = new - > pages [ 2 ] + 1 ;
}
new - > page_count = pg_count ;
new - > num_scratch_pages = pg_count ;
new - > type = AGP_PHYS_MEMORY ;
new - > physical = page_to_phys ( new - > pages [ 0 ] ) ;
return new ;
}
static void intel_i810_free_by_type ( struct agp_memory * curr )
{
agp_free_key ( curr - > key ) ;
if ( curr - > type = = AGP_PHYS_MEMORY ) {
if ( curr - > page_count = = 4 )
i8xx_destroy_pages ( curr - > pages [ 0 ] ) ;
else {
agp_bridge - > driver - > agp_destroy_page ( curr - > pages [ 0 ] ,
AGP_PAGE_DESTROY_UNMAP ) ;
agp_bridge - > driver - > agp_destroy_page ( curr - > pages [ 0 ] ,
AGP_PAGE_DESTROY_FREE ) ;
}
agp_free_page_array ( curr ) ;
}
kfree ( curr ) ;
}
2010-09-08 00:11:15 +04:00
static int intel_gtt_setup_scratch_page ( void )
{
struct page * page ;
dma_addr_t dma_addr ;
page = alloc_page ( GFP_KERNEL | GFP_DMA32 | __GFP_ZERO ) ;
if ( page = = NULL )
return - ENOMEM ;
get_page ( page ) ;
set_pages_uc ( page , 1 ) ;
2010-11-06 13:18:58 +03:00
if ( intel_private . base . needs_dmar ) {
2010-09-08 00:11:15 +04:00
dma_addr = pci_map_page ( intel_private . pcidev , page , 0 ,
PAGE_SIZE , PCI_DMA_BIDIRECTIONAL ) ;
if ( pci_dma_mapping_error ( intel_private . pcidev , dma_addr ) )
return - EINVAL ;
2012-02-09 20:15:44 +04:00
intel_private . base . scratch_page_dma = dma_addr ;
2010-09-08 00:11:15 +04:00
} else
2012-02-09 20:15:44 +04:00
intel_private . base . scratch_page_dma = page_to_phys ( page ) ;
2010-09-08 00:11:15 +04:00
intel_private . scratch_page = page ;
return 0 ;
}
2010-11-04 22:07:57 +03:00
static void i810_write_entry ( dma_addr_t addr , unsigned int entry ,
unsigned int flags )
{
u32 pte_flags = I810_PTE_VALID ;
switch ( flags ) {
case AGP_DCACHE_MEMORY :
pte_flags | = I810_PTE_LOCAL ;
break ;
case AGP_USER_CACHED_MEMORY :
pte_flags | = I830_PTE_SYSTEM_CACHED ;
break ;
}
writel ( addr | pte_flags , intel_private . gtt + entry ) ;
}
2010-11-09 20:53:20 +03:00
static const struct aper_size_info_fixed intel_fake_agp_sizes [ ] = {
2010-11-05 15:30:14 +03:00
{ 32 , 8192 , 3 } ,
{ 64 , 16384 , 4 } ,
2010-04-14 02:29:52 +04:00
{ 128 , 32768 , 5 } ,
{ 256 , 65536 , 6 } ,
{ 512 , 131072 , 7 } ,
} ;
2010-11-23 17:24:24 +03:00
static unsigned int intel_gtt_stolen_size ( void )
2010-04-14 02:29:52 +04:00
{
u16 gmch_ctrl ;
u8 rdct ;
int local = 0 ;
static const int ddt [ 4 ] = { 0 , 16 , 32 , 64 } ;
2010-08-27 18:13:52 +04:00
unsigned int stolen_size = 0 ;
2010-04-14 02:29:52 +04:00
2010-11-05 15:30:14 +03:00
if ( INTEL_GTT_GEN = = 1 )
return 0 ; /* no stolen mem on i81x */
2010-08-25 01:06:19 +04:00
pci_read_config_word ( intel_private . bridge_dev ,
I830_GMCH_CTRL , & gmch_ctrl ) ;
2010-04-14 02:29:52 +04:00
2010-08-25 01:06:19 +04:00
if ( intel_private . bridge_dev - > device = = PCI_DEVICE_ID_INTEL_82830_HB | |
intel_private . bridge_dev - > device = = PCI_DEVICE_ID_INTEL_82845G_HB ) {
2010-04-14 02:29:52 +04:00
switch ( gmch_ctrl & I830_GMCH_GMS_MASK ) {
case I830_GMCH_GMS_STOLEN_512 :
2010-08-27 18:13:52 +04:00
stolen_size = KB ( 512 ) ;
2010-04-14 02:29:52 +04:00
break ;
case I830_GMCH_GMS_STOLEN_1024 :
2010-08-27 18:13:52 +04:00
stolen_size = MB ( 1 ) ;
2010-04-14 02:29:52 +04:00
break ;
case I830_GMCH_GMS_STOLEN_8192 :
2010-08-27 18:13:52 +04:00
stolen_size = MB ( 8 ) ;
2010-04-14 02:29:52 +04:00
break ;
case I830_GMCH_GMS_LOCAL :
rdct = readb ( intel_private . registers + I830_RDRAM_CHANNEL_TYPE ) ;
2010-08-27 18:13:52 +04:00
stolen_size = ( I830_RDRAM_ND ( rdct ) + 1 ) *
2010-04-14 02:29:52 +04:00
MB ( ddt [ I830_RDRAM_DDT ( rdct ) ] ) ;
local = 1 ;
break ;
default :
2010-08-27 18:13:52 +04:00
stolen_size = 0 ;
2010-04-14 02:29:52 +04:00
break ;
}
2010-09-08 23:18:53 +04:00
} else if ( INTEL_GTT_GEN = = 6 ) {
2010-04-14 02:29:52 +04:00
/*
* SandyBridge has new memory control reg at 0x50 . w
*/
u16 snb_gmch_ctl ;
pci_read_config_word ( intel_private . pcidev , SNB_GMCH_CTRL , & snb_gmch_ctl ) ;
switch ( snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK ) {
case SNB_GMCH_GMS_STOLEN_32M :
2010-08-27 18:13:52 +04:00
stolen_size = MB ( 32 ) ;
2010-04-14 02:29:52 +04:00
break ;
case SNB_GMCH_GMS_STOLEN_64M :
2010-08-27 18:13:52 +04:00
stolen_size = MB ( 64 ) ;
2010-04-14 02:29:52 +04:00
break ;
case SNB_GMCH_GMS_STOLEN_96M :
2010-08-27 18:13:52 +04:00
stolen_size = MB ( 96 ) ;
2010-04-14 02:29:52 +04:00
break ;
case SNB_GMCH_GMS_STOLEN_128M :
2010-08-27 18:13:52 +04:00
stolen_size = MB ( 128 ) ;
2010-04-14 02:29:52 +04:00
break ;
case SNB_GMCH_GMS_STOLEN_160M :
2010-08-27 18:13:52 +04:00
stolen_size = MB ( 160 ) ;
2010-04-14 02:29:52 +04:00
break ;
case SNB_GMCH_GMS_STOLEN_192M :
2010-08-27 18:13:52 +04:00
stolen_size = MB ( 192 ) ;
2010-04-14 02:29:52 +04:00
break ;
case SNB_GMCH_GMS_STOLEN_224M :
2010-08-27 18:13:52 +04:00
stolen_size = MB ( 224 ) ;
2010-04-14 02:29:52 +04:00
break ;
case SNB_GMCH_GMS_STOLEN_256M :
2010-08-27 18:13:52 +04:00
stolen_size = MB ( 256 ) ;
2010-04-14 02:29:52 +04:00
break ;
case SNB_GMCH_GMS_STOLEN_288M :
2010-08-27 18:13:52 +04:00
stolen_size = MB ( 288 ) ;
2010-04-14 02:29:52 +04:00
break ;
case SNB_GMCH_GMS_STOLEN_320M :
2010-08-27 18:13:52 +04:00
stolen_size = MB ( 320 ) ;
2010-04-14 02:29:52 +04:00
break ;
case SNB_GMCH_GMS_STOLEN_352M :
2010-08-27 18:13:52 +04:00
stolen_size = MB ( 352 ) ;
2010-04-14 02:29:52 +04:00
break ;
case SNB_GMCH_GMS_STOLEN_384M :
2010-08-27 18:13:52 +04:00
stolen_size = MB ( 384 ) ;
2010-04-14 02:29:52 +04:00
break ;
case SNB_GMCH_GMS_STOLEN_416M :
2010-08-27 18:13:52 +04:00
stolen_size = MB ( 416 ) ;
2010-04-14 02:29:52 +04:00
break ;
case SNB_GMCH_GMS_STOLEN_448M :
2010-08-27 18:13:52 +04:00
stolen_size = MB ( 448 ) ;
2010-04-14 02:29:52 +04:00
break ;
case SNB_GMCH_GMS_STOLEN_480M :
2010-08-27 18:13:52 +04:00
stolen_size = MB ( 480 ) ;
2010-04-14 02:29:52 +04:00
break ;
case SNB_GMCH_GMS_STOLEN_512M :
2010-08-27 18:13:52 +04:00
stolen_size = MB ( 512 ) ;
2010-04-14 02:29:52 +04:00
break ;
}
} else {
switch ( gmch_ctrl & I855_GMCH_GMS_MASK ) {
case I855_GMCH_GMS_STOLEN_1M :
2010-08-27 18:13:52 +04:00
stolen_size = MB ( 1 ) ;
2010-04-14 02:29:52 +04:00
break ;
case I855_GMCH_GMS_STOLEN_4M :
2010-08-27 18:13:52 +04:00
stolen_size = MB ( 4 ) ;
2010-04-14 02:29:52 +04:00
break ;
case I855_GMCH_GMS_STOLEN_8M :
2010-08-27 18:13:52 +04:00
stolen_size = MB ( 8 ) ;
2010-04-14 02:29:52 +04:00
break ;
case I855_GMCH_GMS_STOLEN_16M :
2010-08-27 18:13:52 +04:00
stolen_size = MB ( 16 ) ;
2010-04-14 02:29:52 +04:00
break ;
case I855_GMCH_GMS_STOLEN_32M :
2010-08-27 18:13:52 +04:00
stolen_size = MB ( 32 ) ;
2010-04-14 02:29:52 +04:00
break ;
case I915_GMCH_GMS_STOLEN_48M :
2010-08-27 18:25:54 +04:00
stolen_size = MB ( 48 ) ;
2010-04-14 02:29:52 +04:00
break ;
case I915_GMCH_GMS_STOLEN_64M :
2010-08-27 18:25:54 +04:00
stolen_size = MB ( 64 ) ;
2010-04-14 02:29:52 +04:00
break ;
case G33_GMCH_GMS_STOLEN_128M :
2010-08-27 18:25:54 +04:00
stolen_size = MB ( 128 ) ;
2010-04-14 02:29:52 +04:00
break ;
case G33_GMCH_GMS_STOLEN_256M :
2010-08-27 18:25:54 +04:00
stolen_size = MB ( 256 ) ;
2010-04-14 02:29:52 +04:00
break ;
case INTEL_GMCH_GMS_STOLEN_96M :
2010-08-27 18:25:54 +04:00
stolen_size = MB ( 96 ) ;
2010-04-14 02:29:52 +04:00
break ;
case INTEL_GMCH_GMS_STOLEN_160M :
2010-08-27 18:25:54 +04:00
stolen_size = MB ( 160 ) ;
2010-04-14 02:29:52 +04:00
break ;
case INTEL_GMCH_GMS_STOLEN_224M :
2010-08-27 18:25:54 +04:00
stolen_size = MB ( 224 ) ;
2010-04-14 02:29:52 +04:00
break ;
case INTEL_GMCH_GMS_STOLEN_352M :
2010-08-27 18:25:54 +04:00
stolen_size = MB ( 352 ) ;
2010-04-14 02:29:52 +04:00
break ;
default :
2010-08-27 18:13:52 +04:00
stolen_size = 0 ;
2010-04-14 02:29:52 +04:00
break ;
}
}
2010-09-08 23:01:04 +04:00
2010-11-23 15:33:54 +03:00
if ( stolen_size > 0 ) {
2010-08-25 01:06:19 +04:00
dev_info ( & intel_private . bridge_dev - > dev , " detected %dK %s memory \n " ,
2010-08-27 18:13:52 +04:00
stolen_size / KB ( 1 ) , local ? " local " : " stolen " ) ;
2010-04-14 02:29:52 +04:00
} else {
2010-08-25 01:06:19 +04:00
dev_info ( & intel_private . bridge_dev - > dev ,
2010-04-14 02:29:52 +04:00
" no pre-allocated video memory detected \n " ) ;
2010-08-27 18:13:52 +04:00
stolen_size = 0 ;
2010-04-14 02:29:52 +04:00
}
2010-11-23 17:24:24 +03:00
return stolen_size ;
2010-04-14 02:29:52 +04:00
}
2010-09-24 20:25:59 +04:00
static void i965_adjust_pgetbl_size ( unsigned int size_flag )
{
u32 pgetbl_ctl , pgetbl_ctl2 ;
/* ensure that ppgtt is disabled */
pgetbl_ctl2 = readl ( intel_private . registers + I965_PGETBL_CTL2 ) ;
pgetbl_ctl2 & = ~ I810_PGETBL_ENABLED ;
writel ( pgetbl_ctl2 , intel_private . registers + I965_PGETBL_CTL2 ) ;
/* write the new ggtt size */
pgetbl_ctl = readl ( intel_private . registers + I810_PGETBL_CTL ) ;
pgetbl_ctl & = ~ I965_PGETBL_SIZE_MASK ;
pgetbl_ctl | = size_flag ;
writel ( pgetbl_ctl , intel_private . registers + I810_PGETBL_CTL ) ;
}
static unsigned int i965_gtt_total_entries ( void )
2010-08-27 19:12:41 +04:00
{
int size ;
2010-09-24 20:25:59 +04:00
u32 pgetbl_ctl ;
u16 gmch_ctl ;
2010-08-27 19:12:41 +04:00
2010-09-24 20:25:59 +04:00
pci_read_config_word ( intel_private . bridge_dev ,
I830_GMCH_CTRL , & gmch_ctl ) ;
2010-08-27 19:12:41 +04:00
2010-09-24 20:25:59 +04:00
if ( INTEL_GTT_GEN = = 5 ) {
switch ( gmch_ctl & G4x_GMCH_SIZE_MASK ) {
case G4x_GMCH_SIZE_1M :
case G4x_GMCH_SIZE_VT_1M :
i965_adjust_pgetbl_size ( I965_PGETBL_SIZE_1MB ) ;
2010-08-27 19:12:41 +04:00
break ;
2010-09-24 20:25:59 +04:00
case G4x_GMCH_SIZE_VT_1_5M :
i965_adjust_pgetbl_size ( I965_PGETBL_SIZE_1_5MB ) ;
2010-08-27 19:12:41 +04:00
break ;
2010-09-24 20:25:59 +04:00
case G4x_GMCH_SIZE_2M :
case G4x_GMCH_SIZE_VT_2M :
i965_adjust_pgetbl_size ( I965_PGETBL_SIZE_2MB ) ;
2010-08-27 19:12:41 +04:00
break ;
}
2010-09-24 20:25:59 +04:00
}
2010-08-28 13:04:32 +04:00
2010-09-24 20:25:59 +04:00
pgetbl_ctl = readl ( intel_private . registers + I810_PGETBL_CTL ) ;
switch ( pgetbl_ctl & I965_PGETBL_SIZE_MASK ) {
case I965_PGETBL_SIZE_128KB :
size = KB ( 128 ) ;
break ;
case I965_PGETBL_SIZE_256KB :
size = KB ( 256 ) ;
break ;
case I965_PGETBL_SIZE_512KB :
size = KB ( 512 ) ;
break ;
/* GTT pagetable sizes bigger than 512KB are not possible on G33! */
case I965_PGETBL_SIZE_1MB :
size = KB ( 1024 ) ;
break ;
case I965_PGETBL_SIZE_2MB :
size = KB ( 2048 ) ;
break ;
case I965_PGETBL_SIZE_1_5MB :
size = KB ( 1024 + 512 ) ;
break ;
default :
dev_info ( & intel_private . pcidev - > dev ,
" unknown page table size, assuming 512KB \n " ) ;
size = KB ( 512 ) ;
}
return size / 4 ;
}
static unsigned int intel_gtt_total_entries ( void )
{
int size ;
if ( IS_G33 | | INTEL_GTT_GEN = = 4 | | INTEL_GTT_GEN = = 5 )
return i965_gtt_total_entries ( ) ;
else if ( INTEL_GTT_GEN = = 6 ) {
2010-08-28 18:14:32 +04:00
u16 snb_gmch_ctl ;
pci_read_config_word ( intel_private . pcidev , SNB_GMCH_CTRL , & snb_gmch_ctl ) ;
switch ( snb_gmch_ctl & SNB_GTT_SIZE_MASK ) {
default :
case SNB_GTT_SIZE_0M :
printk ( KERN_ERR " Bad GTT size mask: 0x%04x. \n " , snb_gmch_ctl ) ;
size = MB ( 0 ) ;
break ;
case SNB_GTT_SIZE_1M :
size = MB ( 1 ) ;
break ;
case SNB_GTT_SIZE_2M :
size = MB ( 2 ) ;
break ;
}
2010-08-28 13:04:32 +04:00
return size / 4 ;
2010-08-27 19:12:41 +04:00
} else {
/* On previous hardware, the GTT size was just what was
* required to map the aperture .
*/
2010-08-28 13:04:32 +04:00
return intel_private . base . gtt_mappable_entries ;
2010-08-27 19:12:41 +04:00
}
}
2010-09-08 23:01:04 +04:00
static unsigned int intel_gtt_mappable_entries ( void )
{
unsigned int aperture_size ;
2010-11-05 15:30:14 +03:00
if ( INTEL_GTT_GEN = = 1 ) {
u32 smram_miscc ;
pci_read_config_dword ( intel_private . bridge_dev ,
I810_SMRAM_MISCC , & smram_miscc ) ;
if ( ( smram_miscc & I810_GFX_MEM_WIN_SIZE )
= = I810_GFX_MEM_WIN_32M )
aperture_size = MB ( 32 ) ;
else
aperture_size = MB ( 64 ) ;
} else if ( INTEL_GTT_GEN = = 2 ) {
2010-09-14 22:30:13 +04:00
u16 gmch_ctrl ;
2010-09-08 23:01:04 +04:00
2010-09-14 22:30:13 +04:00
pci_read_config_word ( intel_private . bridge_dev ,
I830_GMCH_CTRL , & gmch_ctrl ) ;
2010-09-08 23:01:04 +04:00
if ( ( gmch_ctrl & I830_GMCH_MEM_MASK ) = = I830_GMCH_MEM_64M )
2010-09-14 22:30:13 +04:00
aperture_size = MB ( 64 ) ;
2010-09-08 23:01:04 +04:00
else
2010-09-14 22:30:13 +04:00
aperture_size = MB ( 128 ) ;
2010-09-01 00:30:43 +04:00
} else {
2010-09-08 23:01:04 +04:00
/* 9xx supports large sizes, just look at the length */
aperture_size = pci_resource_len ( intel_private . pcidev , 2 ) ;
}
return aperture_size > > PAGE_SHIFT ;
}
2010-09-08 00:11:15 +04:00
static void intel_gtt_teardown_scratch_page ( void )
{
set_pages_wb ( intel_private . scratch_page , 1 ) ;
2012-02-09 20:15:44 +04:00
pci_unmap_page ( intel_private . pcidev , intel_private . base . scratch_page_dma ,
2010-09-08 00:11:15 +04:00
PAGE_SIZE , PCI_DMA_BIDIRECTIONAL ) ;
put_page ( intel_private . scratch_page ) ;
__free_page ( intel_private . scratch_page ) ;
}
static void intel_gtt_cleanup ( void )
{
2010-09-12 19:11:15 +04:00
intel_private . driver - > cleanup ( ) ;
2010-09-08 00:11:15 +04:00
iounmap ( intel_private . gtt ) ;
iounmap ( intel_private . registers ) ;
2010-11-04 22:07:57 +03:00
2010-09-08 00:11:15 +04:00
intel_gtt_teardown_scratch_page ( ) ;
}
2010-09-08 23:01:04 +04:00
static int intel_gtt_init ( void )
{
2010-08-29 19:27:36 +04:00
u32 gtt_map_size ;
2010-08-29 16:18:49 +04:00
int ret ;
ret = intel_private . driver - > setup ( ) ;
if ( ret ! = 0 )
return ret ;
2010-08-29 19:27:36 +04:00
intel_private . base . gtt_mappable_entries = intel_gtt_mappable_entries ( ) ;
intel_private . base . gtt_total_entries = intel_gtt_total_entries ( ) ;
2010-09-23 22:04:17 +04:00
/* save the PGETBL reg for resume */
intel_private . PGETBL_save =
readl ( intel_private . registers + I810_PGETBL_CTL )
& ~ I810_PGETBL_ENABLED ;
2010-10-31 13:37:02 +03:00
/* we only ever restore the register when enabling the PGTBL... */
if ( HAS_PGTBL_EN )
intel_private . PGETBL_save | = I810_PGETBL_ENABLED ;
2010-09-23 22:04:17 +04:00
2010-09-12 16:04:03 +04:00
dev_info ( & intel_private . bridge_dev - > dev ,
" detected gtt size: %dK total, %dK mappable \n " ,
intel_private . base . gtt_total_entries * 4 ,
intel_private . base . gtt_mappable_entries * 4 ) ;
2010-08-29 19:27:36 +04:00
gtt_map_size = intel_private . base . gtt_total_entries * 4 ;
intel_private . gtt = ioremap ( intel_private . gtt_bus_addr ,
gtt_map_size ) ;
if ( ! intel_private . gtt ) {
2010-09-12 19:11:15 +04:00
intel_private . driver - > cleanup ( ) ;
2010-08-29 19:27:36 +04:00
iounmap ( intel_private . registers ) ;
return - ENOMEM ;
}
2012-02-09 20:15:45 +04:00
intel_private . base . gtt = intel_private . gtt ;
2010-08-29 19:27:36 +04:00
global_cache_flush ( ) ; /* FIXME: ? */
2010-11-23 17:24:24 +03:00
intel_private . base . stolen_size = intel_gtt_stolen_size ( ) ;
2010-09-08 23:01:04 +04:00
2011-01-12 04:38:37 +03:00
intel_private . base . needs_dmar = USE_PCI_DMA_API & & INTEL_GTT_GEN > 2 ;
2010-09-08 00:11:15 +04:00
ret = intel_gtt_setup_scratch_page ( ) ;
if ( ret ! = 0 ) {
intel_gtt_cleanup ( ) ;
return ret ;
}
2010-09-08 23:01:04 +04:00
return 0 ;
}
2010-08-27 17:33:26 +04:00
static int intel_fake_agp_fetch_size ( void )
{
2010-09-14 15:12:11 +04:00
int num_sizes = ARRAY_SIZE ( intel_fake_agp_sizes ) ;
2010-08-27 17:33:26 +04:00
unsigned int aper_size ;
int i ;
aper_size = ( intel_private . base . gtt_mappable_entries < < PAGE_SHIFT )
/ MB ( 1 ) ;
for ( i = 0 ; i < num_sizes ; i + + ) {
2010-08-27 19:51:29 +04:00
if ( aper_size = = intel_fake_agp_sizes [ i ] . size ) {
2010-09-14 15:12:11 +04:00
agp_bridge - > current_size =
( void * ) ( intel_fake_agp_sizes + i ) ;
2010-08-27 17:33:26 +04:00
return aper_size ;
}
}
return 0 ;
}
2010-09-12 19:11:15 +04:00
static void i830_cleanup ( void )
2010-04-14 02:29:52 +04:00
{
}
/* The chipset_flush interface needs to get data that has already been
* flushed out of the CPU all the way out to main memory , because the GPU
* doesn ' t snoop those buffers .
*
* The 8 xx series doesn ' t have the same lovely interface for flushing the
* chipset write buffers that the later chips do . According to the 865
* specs , it ' s 64 octwords , or 1 KB . So , to get those previous things in
* that buffer out , we just fill 1 KB and clflush it out , on the assumption
* that it ' ll push whatever was in there out . It appears to work .
*/
2010-09-12 02:27:24 +04:00
static void i830_chipset_flush ( void )
2010-04-14 02:29:52 +04:00
{
2010-12-22 14:37:09 +03:00
unsigned long timeout = jiffies + msecs_to_jiffies ( 1000 ) ;
/* Forcibly evict everything from the CPU write buffers.
* clflush appears to be insufficient .
*/
wbinvd_on_all_cpus ( ) ;
/* Now we've only seen documents for this magic bit on 855GM,
* we hope it exists for the other gen2 chipsets . . .
*
* Also works as advertised on my 845 G .
*/
writel ( readl ( intel_private . registers + I830_HIC ) | ( 1 < < 31 ) ,
intel_private . registers + I830_HIC ) ;
2010-04-14 02:29:52 +04:00
2010-12-22 14:37:09 +03:00
while ( readl ( intel_private . registers + I830_HIC ) & ( 1 < < 31 ) ) {
if ( time_after ( jiffies , timeout ) )
break ;
2010-04-14 02:29:52 +04:00
2010-12-22 14:37:09 +03:00
udelay ( 50 ) ;
}
2010-04-14 02:29:52 +04:00
}
2010-09-08 00:41:04 +04:00
static void i830_write_entry ( dma_addr_t addr , unsigned int entry ,
unsigned int flags )
{
u32 pte_flags = I810_PTE_VALID ;
2010-11-04 22:07:57 +03:00
2010-11-04 20:41:50 +03:00
if ( flags = = AGP_USER_CACHED_MEMORY )
2010-09-08 00:41:04 +04:00
pte_flags | = I830_PTE_SYSTEM_CACHED ;
writel ( addr | pte_flags , intel_private . gtt + entry ) ;
}
2010-10-29 21:11:26 +04:00
static bool intel_enable_gtt ( void )
2010-04-14 02:29:52 +04:00
{
2010-09-14 23:15:22 +04:00
u32 gma_addr ;
2010-10-29 21:11:26 +04:00
u8 __iomem * reg ;
2010-04-14 02:29:52 +04:00
2010-11-05 15:30:14 +03:00
if ( INTEL_GTT_GEN < = 2 )
2010-08-29 19:35:30 +04:00
pci_read_config_dword ( intel_private . pcidev , I810_GMADDR ,
& gma_addr ) ;
else
pci_read_config_dword ( intel_private . pcidev , I915_GMADDR ,
& gma_addr ) ;
2010-08-29 19:29:50 +04:00
intel_private . gma_bus_addr = ( gma_addr & PCI_BASE_ADDRESS_MEM_MASK ) ;
2010-04-14 02:29:52 +04:00
2010-10-29 21:11:26 +04:00
if ( INTEL_GTT_GEN > = 6 )
return true ;
2010-10-31 13:37:02 +03:00
if ( INTEL_GTT_GEN = = 2 ) {
u16 gmch_ctrl ;
2010-08-29 19:29:50 +04:00
2010-10-31 13:37:02 +03:00
pci_read_config_word ( intel_private . bridge_dev ,
I830_GMCH_CTRL , & gmch_ctrl ) ;
gmch_ctrl | = I830_GMCH_ENABLED ;
pci_write_config_word ( intel_private . bridge_dev ,
I830_GMCH_CTRL , gmch_ctrl ) ;
pci_read_config_word ( intel_private . bridge_dev ,
I830_GMCH_CTRL , & gmch_ctrl ) ;
if ( ( gmch_ctrl & I830_GMCH_ENABLED ) = = 0 ) {
dev_err ( & intel_private . pcidev - > dev ,
" failed to enable the GTT: GMCH_CTRL=%x \n " ,
gmch_ctrl ) ;
return false ;
}
2010-10-29 21:11:26 +04:00
}
2010-12-23 13:40:38 +03:00
/* On the resume path we may be adjusting the PGTBL value, so
* be paranoid and flush all chipset write buffers . . .
*/
if ( INTEL_GTT_GEN > = 3 )
writel ( 0 , intel_private . registers + GFX_FLSH_CNTL ) ;
2010-10-29 21:11:26 +04:00
reg = intel_private . registers + I810_PGETBL_CTL ;
2010-10-31 13:37:02 +03:00
writel ( intel_private . PGETBL_save , reg ) ;
if ( HAS_PGTBL_EN & & ( readl ( reg ) & I810_PGETBL_ENABLED ) = = 0 ) {
2010-10-29 21:11:26 +04:00
dev_err ( & intel_private . pcidev - > dev ,
2010-10-31 13:37:02 +03:00
" failed to enable the GTT: PGETBL=%x [expected %x] \n " ,
2010-10-29 21:11:26 +04:00
readl ( reg ) , intel_private . PGETBL_save ) ;
return false ;
}
2010-12-23 13:40:38 +03:00
if ( INTEL_GTT_GEN > = 3 )
writel ( 0 , intel_private . registers + GFX_FLSH_CNTL ) ;
2010-10-29 21:11:26 +04:00
return true ;
2010-08-29 19:29:50 +04:00
}
static int i830_setup ( void )
{
u32 reg_addr ;
pci_read_config_dword ( intel_private . pcidev , I810_MMADDR , & reg_addr ) ;
reg_addr & = 0xfff80000 ;
intel_private . registers = ioremap ( reg_addr , KB ( 64 ) ) ;
2010-04-14 02:29:52 +04:00
if ( ! intel_private . registers )
return - ENOMEM ;
2010-08-29 19:29:50 +04:00
intel_private . gtt_bus_addr = reg_addr + I810_PTE_BASE ;
return 0 ;
}
2010-08-29 16:18:49 +04:00
static int intel_fake_agp_create_gatt_table ( struct agp_bridge_data * bridge )
2010-08-29 19:29:50 +04:00
{
agp_bridge - > gatt_table_real = NULL ;
2010-04-14 02:29:52 +04:00
agp_bridge - > gatt_table = NULL ;
2010-08-29 19:29:50 +04:00
agp_bridge - > gatt_bus_addr = 0 ;
2010-04-14 02:29:52 +04:00
return 0 ;
}
2010-08-27 19:51:29 +04:00
static int intel_fake_agp_free_gatt_table ( struct agp_bridge_data * bridge )
2010-04-14 02:29:52 +04:00
{
return 0 ;
}
2010-09-08 00:41:04 +04:00
static int intel_fake_agp_configure ( void )
2010-04-14 02:29:52 +04:00
{
2010-10-29 21:11:26 +04:00
if ( ! intel_enable_gtt ( ) )
return - EIO ;
2010-04-14 02:29:52 +04:00
2011-01-21 13:54:32 +03:00
intel_private . clear_fake_agp = true ;
2010-08-29 19:29:50 +04:00
agp_bridge - > gart_bus_addr = intel_private . gma_bus_addr ;
2010-04-14 02:29:52 +04:00
return 0 ;
}
2010-09-11 23:31:04 +04:00
static bool i830_check_flags ( unsigned int flags )
2010-04-14 02:29:52 +04:00
{
2010-09-11 23:31:04 +04:00
switch ( flags ) {
case 0 :
case AGP_PHYS_MEMORY :
case AGP_USER_CACHED_MEMORY :
case AGP_USER_MEMORY :
return true ;
}
return false ;
}
2010-11-06 13:18:58 +03:00
void intel_gtt_insert_sg_entries ( struct scatterlist * sg_list ,
unsigned int sg_len ,
unsigned int pg_start ,
unsigned int flags )
2010-09-12 00:12:11 +04:00
{
struct scatterlist * sg ;
unsigned int len , m ;
int i , j ;
j = pg_start ;
/* sg may merge pages, but we have to separate
* per - page addr for GTT */
for_each_sg ( sg_list , sg , sg_len , i ) {
len = sg_dma_len ( sg ) > > PAGE_SHIFT ;
for ( m = 0 ; m < len ; m + + ) {
dma_addr_t addr = sg_dma_address ( sg ) + ( m < < PAGE_SHIFT ) ;
intel_private . driver - > write_entry ( addr ,
j , flags ) ;
j + + ;
}
}
readl ( intel_private . gtt + j - 1 ) ;
}
2010-11-06 13:18:58 +03:00
EXPORT_SYMBOL ( intel_gtt_insert_sg_entries ) ;
void intel_gtt_insert_pages ( unsigned int first_entry , unsigned int num_entries ,
struct page * * pages , unsigned int flags )
{
int i , j ;
for ( i = 0 , j = first_entry ; i < num_entries ; i + + , j + + ) {
dma_addr_t addr = page_to_phys ( pages [ i ] ) ;
intel_private . driver - > write_entry ( addr ,
j , flags ) ;
}
readl ( intel_private . gtt + j - 1 ) ;
}
EXPORT_SYMBOL ( intel_gtt_insert_pages ) ;
2010-09-12 00:12:11 +04:00
2010-09-11 23:31:04 +04:00
static int intel_fake_agp_insert_entries ( struct agp_memory * mem ,
off_t pg_start , int type )
{
2010-04-14 02:29:52 +04:00
int ret = - EINVAL ;
2011-10-18 02:51:55 +04:00
if ( intel_private . base . do_idle_maps )
return - ENODEV ;
2011-01-21 13:54:32 +03:00
if ( intel_private . clear_fake_agp ) {
int start = intel_private . base . stolen_size / PAGE_SIZE ;
int end = intel_private . base . gtt_mappable_entries ;
intel_gtt_clear_range ( start , end - start ) ;
intel_private . clear_fake_agp = false ;
}
2010-11-05 17:43:35 +03:00
if ( INTEL_GTT_GEN = = 1 & & type = = AGP_DCACHE_MEMORY )
return i810_insert_dcache_entries ( mem , pg_start , type ) ;
2010-04-14 02:29:52 +04:00
if ( mem - > page_count = = 0 )
goto out ;
2010-11-23 17:24:24 +03:00
if ( pg_start + mem - > page_count > intel_private . base . gtt_total_entries )
2010-04-14 02:29:52 +04:00
goto out_err ;
if ( type ! = mem - > type )
goto out_err ;
2010-09-11 23:31:04 +04:00
if ( ! intel_private . driver - > check_flags ( type ) )
2010-04-14 02:29:52 +04:00
goto out_err ;
if ( ! mem - > is_flushed )
global_cache_flush ( ) ;
2010-11-06 13:18:58 +03:00
if ( intel_private . base . needs_dmar ) {
ret = intel_gtt_map_memory ( mem - > pages , mem - > page_count ,
& mem - > sg_list , & mem - > num_sg ) ;
2010-09-12 00:12:11 +04:00
if ( ret ! = 0 )
return ret ;
intel_gtt_insert_sg_entries ( mem - > sg_list , mem - > num_sg ,
pg_start , type ) ;
2010-11-06 13:18:58 +03:00
} else
intel_gtt_insert_pages ( pg_start , mem - > page_count , mem - > pages ,
type ) ;
2010-04-14 02:29:52 +04:00
out :
ret = 0 ;
out_err :
mem - > is_flushed = true ;
return ret ;
}
2010-11-06 13:18:58 +03:00
void intel_gtt_clear_range ( unsigned int first_entry , unsigned int num_entries )
{
unsigned int i ;
for ( i = first_entry ; i < ( first_entry + num_entries ) ; i + + ) {
2012-02-09 20:15:44 +04:00
intel_private . driver - > write_entry ( intel_private . base . scratch_page_dma ,
2010-11-06 13:18:58 +03:00
i , 0 ) ;
}
readl ( intel_private . gtt + i - 1 ) ;
}
EXPORT_SYMBOL ( intel_gtt_clear_range ) ;
2010-09-11 23:31:04 +04:00
static int intel_fake_agp_remove_entries ( struct agp_memory * mem ,
off_t pg_start , int type )
2010-04-14 02:29:52 +04:00
{
if ( mem - > page_count = = 0 )
return 0 ;
2011-10-18 02:51:55 +04:00
if ( intel_private . base . do_idle_maps )
return - ENODEV ;
2011-01-12 04:39:48 +03:00
intel_gtt_clear_range ( pg_start , mem - > page_count ) ;
2010-11-06 13:18:58 +03:00
if ( intel_private . base . needs_dmar ) {
intel_gtt_unmap_memory ( mem - > sg_list , mem - > num_sg ) ;
mem - > sg_list = NULL ;
mem - > num_sg = 0 ;
2010-04-14 02:29:52 +04:00
}
2010-11-06 13:18:58 +03:00
2010-04-14 02:29:52 +04:00
return 0 ;
}
2010-08-27 19:51:29 +04:00
static struct agp_memory * intel_fake_agp_alloc_by_type ( size_t pg_count ,
int type )
2010-04-14 02:29:52 +04:00
{
2010-11-04 22:07:57 +03:00
struct agp_memory * new ;
if ( type = = AGP_DCACHE_MEMORY & & INTEL_GTT_GEN = = 1 ) {
if ( pg_count ! = intel_private . num_dcache_entries )
return NULL ;
new = agp_create_memory ( 1 ) ;
if ( new = = NULL )
return NULL ;
new - > type = AGP_DCACHE_MEMORY ;
new - > page_count = pg_count ;
new - > num_scratch_pages = 0 ;
agp_free_page_array ( new ) ;
return new ;
}
2010-04-14 02:29:52 +04:00
if ( type = = AGP_PHYS_MEMORY )
return alloc_agpphysmem_i8xx ( pg_count , type ) ;
/* always return NULL for other allocation types for now */
return NULL ;
}
static int intel_alloc_chipset_flush_resource ( void )
{
int ret ;
2010-08-25 01:06:19 +04:00
ret = pci_bus_alloc_resource ( intel_private . bridge_dev - > bus , & intel_private . ifp_resource , PAGE_SIZE ,
2010-04-14 02:29:52 +04:00
PAGE_SIZE , PCIBIOS_MIN_MEM , 0 ,
2010-08-25 01:06:19 +04:00
pcibios_align_resource , intel_private . bridge_dev ) ;
2010-04-14 02:29:52 +04:00
return ret ;
}
static void intel_i915_setup_chipset_flush ( void )
{
int ret ;
u32 temp ;
2010-08-25 01:06:19 +04:00
pci_read_config_dword ( intel_private . bridge_dev , I915_IFPADDR , & temp ) ;
2010-04-14 02:29:52 +04:00
if ( ! ( temp & 0x1 ) ) {
intel_alloc_chipset_flush_resource ( ) ;
intel_private . resource_valid = 1 ;
2010-08-25 01:06:19 +04:00
pci_write_config_dword ( intel_private . bridge_dev , I915_IFPADDR , ( intel_private . ifp_resource . start & 0xffffffff ) | 0x1 ) ;
2010-04-14 02:29:52 +04:00
} else {
temp & = ~ 1 ;
intel_private . resource_valid = 1 ;
intel_private . ifp_resource . start = temp ;
intel_private . ifp_resource . end = temp + PAGE_SIZE ;
ret = request_resource ( & iomem_resource , & intel_private . ifp_resource ) ;
/* some BIOSes reserve this area in a pnp some don't */
if ( ret )
intel_private . resource_valid = 0 ;
}
}
static void intel_i965_g33_setup_chipset_flush ( void )
{
u32 temp_hi , temp_lo ;
int ret ;
2010-08-25 01:06:19 +04:00
pci_read_config_dword ( intel_private . bridge_dev , I965_IFPADDR + 4 , & temp_hi ) ;
pci_read_config_dword ( intel_private . bridge_dev , I965_IFPADDR , & temp_lo ) ;
2010-04-14 02:29:52 +04:00
if ( ! ( temp_lo & 0x1 ) ) {
intel_alloc_chipset_flush_resource ( ) ;
intel_private . resource_valid = 1 ;
2010-08-25 01:06:19 +04:00
pci_write_config_dword ( intel_private . bridge_dev , I965_IFPADDR + 4 ,
2010-04-14 02:29:52 +04:00
upper_32_bits ( intel_private . ifp_resource . start ) ) ;
2010-08-25 01:06:19 +04:00
pci_write_config_dword ( intel_private . bridge_dev , I965_IFPADDR , ( intel_private . ifp_resource . start & 0xffffffff ) | 0x1 ) ;
2010-04-14 02:29:52 +04:00
} else {
u64 l64 ;
temp_lo & = ~ 0x1 ;
l64 = ( ( u64 ) temp_hi < < 32 ) | temp_lo ;
intel_private . resource_valid = 1 ;
intel_private . ifp_resource . start = l64 ;
intel_private . ifp_resource . end = l64 + PAGE_SIZE ;
ret = request_resource ( & iomem_resource , & intel_private . ifp_resource ) ;
/* some BIOSes reserve this area in a pnp some don't */
if ( ret )
intel_private . resource_valid = 0 ;
}
}
static void intel_i9xx_setup_flush ( void )
{
/* return if already configured */
if ( intel_private . ifp_resource . start )
return ;
2010-09-08 23:18:53 +04:00
if ( INTEL_GTT_GEN = = 6 )
2010-04-14 02:29:52 +04:00
return ;
/* setup a resource for this object */
intel_private . ifp_resource . name = " Intel Flush Page " ;
intel_private . ifp_resource . flags = IORESOURCE_MEM ;
/* Setup chipset flush for 915 */
2010-09-08 23:18:53 +04:00
if ( IS_G33 | | INTEL_GTT_GEN > = 4 ) {
2010-04-14 02:29:52 +04:00
intel_i965_g33_setup_chipset_flush ( ) ;
} else {
intel_i915_setup_chipset_flush ( ) ;
}
2010-09-04 17:57:27 +04:00
if ( intel_private . ifp_resource . start )
2010-04-14 02:29:52 +04:00
intel_private . i9xx_flush_page = ioremap_nocache ( intel_private . ifp_resource . start , PAGE_SIZE ) ;
2010-09-04 17:57:27 +04:00
if ( ! intel_private . i9xx_flush_page )
dev_err ( & intel_private . pcidev - > dev ,
" can't ioremap flush page - no chipset flushing \n " ) ;
2010-04-14 02:29:52 +04:00
}
2010-09-12 19:11:15 +04:00
static void i9xx_cleanup ( void )
{
if ( intel_private . i9xx_flush_page )
iounmap ( intel_private . i9xx_flush_page ) ;
if ( intel_private . resource_valid )
release_resource ( & intel_private . ifp_resource ) ;
intel_private . ifp_resource . start = 0 ;
intel_private . resource_valid = 0 ;
}
2010-09-12 02:27:24 +04:00
static void i9xx_chipset_flush ( void )
2010-04-14 02:29:52 +04:00
{
if ( intel_private . i9xx_flush_page )
writel ( 1 , intel_private . i9xx_flush_page ) ;
}
2010-12-14 14:29:23 +03:00
static void i965_write_entry ( dma_addr_t addr ,
unsigned int entry ,
2010-09-11 16:01:43 +04:00
unsigned int flags )
{
2010-12-14 14:29:23 +03:00
u32 pte_flags ;
pte_flags = I810_PTE_VALID ;
if ( flags = = AGP_USER_CACHED_MEMORY )
pte_flags | = I830_PTE_SYSTEM_CACHED ;
2010-09-11 16:01:43 +04:00
/* Shift high bits down */
addr | = ( addr > > 28 ) & 0xf0 ;
2010-12-14 14:29:23 +03:00
writel ( addr | pte_flags , intel_private . gtt + entry ) ;
2010-09-11 16:01:43 +04:00
}
2010-09-12 01:55:20 +04:00
static bool gen6_check_flags ( unsigned int flags )
{
return true ;
}
2010-09-09 19:52:20 +04:00
static void gen6_write_entry ( dma_addr_t addr , unsigned int entry ,
unsigned int flags )
{
unsigned int type_mask = flags & ~ AGP_USER_CACHED_MEMORY_GFDT ;
unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT ;
u32 pte_flags ;
2010-11-02 12:30:47 +03:00
if ( type_mask = = AGP_USER_MEMORY )
2010-10-22 17:59:29 +04:00
pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID ;
2010-09-09 19:52:20 +04:00
else if ( type_mask = = AGP_USER_CACHED_MEMORY_LLC_MLC ) {
2010-11-02 12:30:46 +03:00
pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID ;
2010-09-09 19:52:20 +04:00
if ( gfdt )
pte_flags | = GEN6_PTE_GFDT ;
} else { /* set 'normal'/'cached' to LLC by default */
2010-11-02 12:30:46 +03:00
pte_flags = GEN6_PTE_LLC | I810_PTE_VALID ;
2010-09-09 19:52:20 +04:00
if ( gfdt )
pte_flags | = GEN6_PTE_GFDT ;
}
/* gen6 has bit11-4 for physical addr bit39-32 */
addr | = ( addr > > 28 ) & 0xff0 ;
writel ( addr | pte_flags , intel_private . gtt + entry ) ;
}
2010-09-12 19:11:15 +04:00
static void gen6_cleanup ( void )
{
}
2011-10-18 02:51:55 +04:00
/* Certain Gen5 chipsets require require idling the GPU before
* unmapping anything from the GTT when VT - d is enabled .
*/
static inline int needs_idle_maps ( void )
{
2011-10-28 21:28:00 +04:00
# ifdef CONFIG_INTEL_IOMMU
2011-10-18 02:51:55 +04:00
const unsigned short gpu_devid = intel_private . pcidev - > device ;
2011-10-28 21:28:00 +04:00
extern int intel_iommu_gfx_mapped ;
2011-10-18 02:51:55 +04:00
/* Query intel_iommu to see if we need the workaround. Presumably that
* was loaded first .
*/
if ( ( gpu_devid = = PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB | |
gpu_devid = = PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG ) & &
intel_iommu_gfx_mapped )
return 1 ;
2011-10-28 21:28:00 +04:00
# endif
2011-10-18 02:51:55 +04:00
return 0 ;
}
2010-08-29 19:35:30 +04:00
static int i9xx_setup ( void )
2010-04-14 02:29:52 +04:00
{
2010-08-29 19:35:30 +04:00
u32 reg_addr ;
2012-03-29 00:39:33 +04:00
int size = KB ( 512 ) ;
2010-04-14 02:29:52 +04:00
2010-08-29 19:35:30 +04:00
pci_read_config_dword ( intel_private . pcidev , I915_MMADDR , & reg_addr ) ;
2010-04-14 02:29:52 +04:00
2010-08-29 19:35:30 +04:00
reg_addr & = 0xfff80000 ;
2010-05-18 15:24:51 +04:00
2012-03-29 00:39:33 +04:00
if ( INTEL_GTT_GEN > = 7 )
size = MB ( 2 ) ;
intel_private . registers = ioremap ( reg_addr , size ) ;
2010-09-08 23:20:12 +04:00
if ( ! intel_private . registers )
2010-04-14 02:29:52 +04:00
return - ENOMEM ;
2010-08-29 19:35:30 +04:00
if ( INTEL_GTT_GEN = = 3 ) {
u32 gtt_addr ;
2010-09-14 23:15:22 +04:00
2010-08-29 19:35:30 +04:00
pci_read_config_dword ( intel_private . pcidev ,
I915_PTEADDR , & gtt_addr ) ;
intel_private . gtt_bus_addr = gtt_addr ;
} else {
u32 gtt_offset ;
switch ( INTEL_GTT_GEN ) {
case 5 :
case 6 :
gtt_offset = MB ( 2 ) ;
break ;
case 4 :
default :
gtt_offset = KB ( 512 ) ;
break ;
}
intel_private . gtt_bus_addr = reg_addr + gtt_offset ;
}
2011-10-28 15:42:41 +04:00
if ( needs_idle_maps ( ) )
2011-10-18 02:51:55 +04:00
intel_private . base . do_idle_maps = 1 ;
2010-08-29 19:35:30 +04:00
intel_i9xx_setup_flush ( ) ;
return 0 ;
}
2010-09-12 02:29:26 +04:00
static const struct agp_bridge_driver intel_fake_agp_driver = {
2010-04-14 02:29:52 +04:00
. owner = THIS_MODULE ,
. size_type = FIXED_APER_SIZE ,
2010-09-14 15:12:11 +04:00
. aperture_sizes = intel_fake_agp_sizes ,
. num_aperture_sizes = ARRAY_SIZE ( intel_fake_agp_sizes ) ,
2010-09-11 16:01:43 +04:00
. configure = intel_fake_agp_configure ,
2010-08-27 17:33:26 +04:00
. fetch_size = intel_fake_agp_fetch_size ,
2010-08-29 02:15:03 +04:00
. cleanup = intel_gtt_cleanup ,
2010-08-27 19:51:29 +04:00
. agp_enable = intel_fake_agp_enable ,
2010-04-14 02:29:52 +04:00
. cache_flush = global_cache_flush ,
2010-08-29 16:18:49 +04:00
. create_gatt_table = intel_fake_agp_create_gatt_table ,
2010-08-27 19:51:29 +04:00
. free_gatt_table = intel_fake_agp_free_gatt_table ,
2010-09-12 01:48:25 +04:00
. insert_memory = intel_fake_agp_insert_entries ,
. remove_memory = intel_fake_agp_remove_entries ,
2010-08-27 19:51:29 +04:00
. alloc_by_type = intel_fake_agp_alloc_by_type ,
2010-04-14 02:29:52 +04:00
. free_by_type = intel_i810_free_by_type ,
. agp_alloc_page = agp_generic_alloc_page ,
. agp_alloc_pages = agp_generic_alloc_pages ,
. agp_destroy_page = agp_generic_destroy_page ,
. agp_destroy_pages = agp_generic_destroy_pages ,
} ;
2010-08-24 21:39:48 +04:00
2010-09-12 14:34:44 +04:00
static const struct intel_gtt_driver i81x_gtt_driver = {
. gen = 1 ,
2010-11-05 15:30:14 +03:00
. has_pgtbl_enable = 1 ,
2010-09-12 18:38:55 +04:00
. dma_mask_size = 32 ,
2010-11-05 15:30:14 +03:00
. setup = i810_setup ,
. cleanup = i810_cleanup ,
2010-11-04 22:07:57 +03:00
. check_flags = i830_check_flags ,
. write_entry = i810_write_entry ,
2010-09-12 14:34:44 +04:00
} ;
2010-09-08 23:18:53 +04:00
static const struct intel_gtt_driver i8xx_gtt_driver = {
. gen = 2 ,
2010-10-31 13:37:02 +03:00
. has_pgtbl_enable = 1 ,
2010-08-29 19:29:50 +04:00
. setup = i830_setup ,
2010-09-12 19:11:15 +04:00
. cleanup = i830_cleanup ,
2010-09-08 00:41:04 +04:00
. write_entry = i830_write_entry ,
2010-09-12 18:38:55 +04:00
. dma_mask_size = 32 ,
2010-09-11 23:31:04 +04:00
. check_flags = i830_check_flags ,
2010-09-12 02:27:24 +04:00
. chipset_flush = i830_chipset_flush ,
2010-09-08 23:18:53 +04:00
} ;
static const struct intel_gtt_driver i915_gtt_driver = {
. gen = 3 ,
2010-10-31 13:37:02 +03:00
. has_pgtbl_enable = 1 ,
2010-08-29 19:35:30 +04:00
. setup = i9xx_setup ,
2010-09-12 19:11:15 +04:00
. cleanup = i9xx_cleanup ,
2010-09-08 00:41:04 +04:00
/* i945 is the last gpu to need phys mem (for overlay and cursors). */
2010-11-04 22:07:57 +03:00
. write_entry = i830_write_entry ,
2010-09-12 18:38:55 +04:00
. dma_mask_size = 32 ,
2010-09-12 00:12:11 +04:00
. check_flags = i830_check_flags ,
2010-09-12 02:27:24 +04:00
. chipset_flush = i9xx_chipset_flush ,
2010-09-08 23:18:53 +04:00
} ;
static const struct intel_gtt_driver g33_gtt_driver = {
. gen = 3 ,
. is_g33 = 1 ,
2010-08-29 19:35:30 +04:00
. setup = i9xx_setup ,
2010-09-12 19:11:15 +04:00
. cleanup = i9xx_cleanup ,
2010-09-11 16:01:43 +04:00
. write_entry = i965_write_entry ,
2010-09-12 18:38:55 +04:00
. dma_mask_size = 36 ,
2010-09-12 01:48:25 +04:00
. check_flags = i830_check_flags ,
2010-09-12 02:27:24 +04:00
. chipset_flush = i9xx_chipset_flush ,
2010-09-08 23:18:53 +04:00
} ;
static const struct intel_gtt_driver pineview_gtt_driver = {
. gen = 3 ,
. is_pineview = 1 , . is_g33 = 1 ,
2010-08-29 19:35:30 +04:00
. setup = i9xx_setup ,
2010-09-12 19:11:15 +04:00
. cleanup = i9xx_cleanup ,
2010-09-11 16:01:43 +04:00
. write_entry = i965_write_entry ,
2010-09-12 18:38:55 +04:00
. dma_mask_size = 36 ,
2010-09-12 01:48:25 +04:00
. check_flags = i830_check_flags ,
2010-09-12 02:27:24 +04:00
. chipset_flush = i9xx_chipset_flush ,
2010-09-08 23:18:53 +04:00
} ;
static const struct intel_gtt_driver i965_gtt_driver = {
. gen = 4 ,
2010-10-31 13:37:02 +03:00
. has_pgtbl_enable = 1 ,
2010-08-29 19:35:30 +04:00
. setup = i9xx_setup ,
2010-09-12 19:11:15 +04:00
. cleanup = i9xx_cleanup ,
2010-09-11 16:01:43 +04:00
. write_entry = i965_write_entry ,
2010-09-12 18:38:55 +04:00
. dma_mask_size = 36 ,
2010-09-12 01:48:25 +04:00
. check_flags = i830_check_flags ,
2010-09-12 02:27:24 +04:00
. chipset_flush = i9xx_chipset_flush ,
2010-09-08 23:18:53 +04:00
} ;
static const struct intel_gtt_driver g4x_gtt_driver = {
. gen = 5 ,
2010-08-29 19:35:30 +04:00
. setup = i9xx_setup ,
2010-09-12 19:11:15 +04:00
. cleanup = i9xx_cleanup ,
2010-09-11 16:01:43 +04:00
. write_entry = i965_write_entry ,
2010-09-12 18:38:55 +04:00
. dma_mask_size = 36 ,
2010-09-12 01:48:25 +04:00
. check_flags = i830_check_flags ,
2010-09-12 02:27:24 +04:00
. chipset_flush = i9xx_chipset_flush ,
2010-09-08 23:18:53 +04:00
} ;
static const struct intel_gtt_driver ironlake_gtt_driver = {
. gen = 5 ,
. is_ironlake = 1 ,
2010-08-29 19:35:30 +04:00
. setup = i9xx_setup ,
2010-09-12 19:11:15 +04:00
. cleanup = i9xx_cleanup ,
2010-09-11 16:01:43 +04:00
. write_entry = i965_write_entry ,
2010-09-12 18:38:55 +04:00
. dma_mask_size = 36 ,
2010-09-12 01:48:25 +04:00
. check_flags = i830_check_flags ,
2010-09-12 02:27:24 +04:00
. chipset_flush = i9xx_chipset_flush ,
2010-09-08 23:18:53 +04:00
} ;
static const struct intel_gtt_driver sandybridge_gtt_driver = {
. gen = 6 ,
2010-08-29 19:35:30 +04:00
. setup = i9xx_setup ,
2010-09-12 19:11:15 +04:00
. cleanup = gen6_cleanup ,
2010-09-09 19:52:20 +04:00
. write_entry = gen6_write_entry ,
2010-09-12 18:38:55 +04:00
. dma_mask_size = 40 ,
2010-09-12 01:55:20 +04:00
. check_flags = gen6_check_flags ,
2010-09-12 02:27:24 +04:00
. chipset_flush = i9xx_chipset_flush ,
2010-09-08 23:18:53 +04:00
} ;
2010-08-24 21:39:48 +04:00
/* Table to describe Intel GMCH and AGP/PCIE GART drivers. At least one of
* driver and gmch_driver must be non - null , and find_gmch will determine
* which one should be used if a gmch_chip_id is present .
*/
static const struct intel_gtt_driver_description {
unsigned int gmch_chip_id ;
char * name ;
2010-09-08 23:18:53 +04:00
const struct intel_gtt_driver * gtt_driver ;
2010-08-24 21:39:48 +04:00
} intel_gtt_chipsets [ ] = {
2010-11-05 17:43:35 +03:00
{ PCI_DEVICE_ID_INTEL_82810_IG1 , " i810 " ,
2010-09-12 14:34:44 +04:00
& i81x_gtt_driver } ,
2010-11-05 17:43:35 +03:00
{ PCI_DEVICE_ID_INTEL_82810_IG3 , " i810 " ,
2010-09-12 14:34:44 +04:00
& i81x_gtt_driver } ,
2010-11-05 17:43:35 +03:00
{ PCI_DEVICE_ID_INTEL_82810E_IG , " i810 " ,
2010-09-12 14:34:44 +04:00
& i81x_gtt_driver } ,
2010-11-05 17:43:35 +03:00
{ PCI_DEVICE_ID_INTEL_82815_CGC , " i815 " ,
2010-09-12 14:34:44 +04:00
& i81x_gtt_driver } ,
2010-09-08 23:18:53 +04:00
{ PCI_DEVICE_ID_INTEL_82830_CGC , " 830M " ,
2010-11-05 17:43:35 +03:00
& i8xx_gtt_driver } ,
2010-06-20 01:08:37 +04:00
{ PCI_DEVICE_ID_INTEL_82845G_IG , " 845G " ,
2010-11-05 17:43:35 +03:00
& i8xx_gtt_driver } ,
2010-09-08 23:18:53 +04:00
{ PCI_DEVICE_ID_INTEL_82854_IG , " 854 " ,
2010-11-05 17:43:35 +03:00
& i8xx_gtt_driver } ,
2010-09-08 23:18:53 +04:00
{ PCI_DEVICE_ID_INTEL_82855GM_IG , " 855GM " ,
2010-11-05 17:43:35 +03:00
& i8xx_gtt_driver } ,
2010-09-08 23:18:53 +04:00
{ PCI_DEVICE_ID_INTEL_82865_IG , " 865 " ,
2010-11-05 17:43:35 +03:00
& i8xx_gtt_driver } ,
2010-09-08 23:18:53 +04:00
{ PCI_DEVICE_ID_INTEL_E7221_IG , " E7221 (i915) " ,
2010-11-05 17:43:35 +03:00
& i915_gtt_driver } ,
2010-09-08 23:18:53 +04:00
{ PCI_DEVICE_ID_INTEL_82915G_IG , " 915G " ,
2010-11-05 17:43:35 +03:00
& i915_gtt_driver } ,
2010-09-08 23:18:53 +04:00
{ PCI_DEVICE_ID_INTEL_82915GM_IG , " 915GM " ,
2010-11-05 17:43:35 +03:00
& i915_gtt_driver } ,
2010-09-08 23:18:53 +04:00
{ PCI_DEVICE_ID_INTEL_82945G_IG , " 945G " ,
2010-11-05 17:43:35 +03:00
& i915_gtt_driver } ,
2010-09-08 23:18:53 +04:00
{ PCI_DEVICE_ID_INTEL_82945GM_IG , " 945GM " ,
2010-11-05 17:43:35 +03:00
& i915_gtt_driver } ,
2010-09-08 23:18:53 +04:00
{ PCI_DEVICE_ID_INTEL_82945GME_IG , " 945GME " ,
2010-11-05 17:43:35 +03:00
& i915_gtt_driver } ,
2010-09-08 23:18:53 +04:00
{ PCI_DEVICE_ID_INTEL_82946GZ_IG , " 946GZ " ,
2010-11-05 17:43:35 +03:00
& i965_gtt_driver } ,
2010-09-08 23:18:53 +04:00
{ PCI_DEVICE_ID_INTEL_82G35_IG , " G35 " ,
2010-11-05 17:43:35 +03:00
& i965_gtt_driver } ,
2010-09-08 23:18:53 +04:00
{ PCI_DEVICE_ID_INTEL_82965Q_IG , " 965Q " ,
2010-11-05 17:43:35 +03:00
& i965_gtt_driver } ,
2010-09-08 23:18:53 +04:00
{ PCI_DEVICE_ID_INTEL_82965G_IG , " 965G " ,
2010-11-05 17:43:35 +03:00
& i965_gtt_driver } ,
2010-09-08 23:18:53 +04:00
{ PCI_DEVICE_ID_INTEL_82965GM_IG , " 965GM " ,
2010-11-05 17:43:35 +03:00
& i965_gtt_driver } ,
2010-09-08 23:18:53 +04:00
{ PCI_DEVICE_ID_INTEL_82965GME_IG , " 965GME/GLE " ,
2010-11-05 17:43:35 +03:00
& i965_gtt_driver } ,
2010-09-08 23:18:53 +04:00
{ PCI_DEVICE_ID_INTEL_G33_IG , " G33 " ,
2010-11-05 17:43:35 +03:00
& g33_gtt_driver } ,
2010-09-08 23:18:53 +04:00
{ PCI_DEVICE_ID_INTEL_Q35_IG , " Q35 " ,
2010-11-05 17:43:35 +03:00
& g33_gtt_driver } ,
2010-09-08 23:18:53 +04:00
{ PCI_DEVICE_ID_INTEL_Q33_IG , " Q33 " ,
2010-11-05 17:43:35 +03:00
& g33_gtt_driver } ,
2010-09-08 23:18:53 +04:00
{ PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG , " GMA3150 " ,
2010-11-05 17:43:35 +03:00
& pineview_gtt_driver } ,
2010-09-08 23:18:53 +04:00
{ PCI_DEVICE_ID_INTEL_PINEVIEW_IG , " GMA3150 " ,
2010-11-05 17:43:35 +03:00
& pineview_gtt_driver } ,
2010-09-08 23:18:53 +04:00
{ PCI_DEVICE_ID_INTEL_GM45_IG , " GM45 " ,
2010-11-05 17:43:35 +03:00
& g4x_gtt_driver } ,
2010-09-08 23:18:53 +04:00
{ PCI_DEVICE_ID_INTEL_EAGLELAKE_IG , " Eaglelake " ,
2010-11-05 17:43:35 +03:00
& g4x_gtt_driver } ,
2010-09-08 23:18:53 +04:00
{ PCI_DEVICE_ID_INTEL_Q45_IG , " Q45/Q43 " ,
2010-11-05 17:43:35 +03:00
& g4x_gtt_driver } ,
2010-09-08 23:18:53 +04:00
{ PCI_DEVICE_ID_INTEL_G45_IG , " G45/G43 " ,
2010-11-05 17:43:35 +03:00
& g4x_gtt_driver } ,
2010-09-08 23:18:53 +04:00
{ PCI_DEVICE_ID_INTEL_B43_IG , " B43 " ,
2010-11-05 17:43:35 +03:00
& g4x_gtt_driver } ,
2010-09-21 14:19:32 +04:00
{ PCI_DEVICE_ID_INTEL_B43_1_IG , " B43 " ,
2010-11-05 17:43:35 +03:00
& g4x_gtt_driver } ,
2010-09-08 23:18:53 +04:00
{ PCI_DEVICE_ID_INTEL_G41_IG , " G41 " ,
2010-11-05 17:43:35 +03:00
& g4x_gtt_driver } ,
2010-08-24 21:39:48 +04:00
{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG ,
2010-11-05 17:43:35 +03:00
" HD Graphics " , & ironlake_gtt_driver } ,
2010-08-24 21:39:48 +04:00
{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG ,
2010-11-05 17:43:35 +03:00
" HD Graphics " , & ironlake_gtt_driver } ,
2010-08-24 21:39:48 +04:00
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG ,
2010-11-05 17:43:35 +03:00
" Sandybridge " , & sandybridge_gtt_driver } ,
2010-08-24 21:39:48 +04:00
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG ,
2010-11-05 17:43:35 +03:00
" Sandybridge " , & sandybridge_gtt_driver } ,
2010-08-24 21:39:48 +04:00
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG ,
2010-11-05 17:43:35 +03:00
" Sandybridge " , & sandybridge_gtt_driver } ,
2010-08-24 21:39:48 +04:00
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG ,
2010-11-05 17:43:35 +03:00
" Sandybridge " , & sandybridge_gtt_driver } ,
2010-08-24 21:39:48 +04:00
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG ,
2010-11-05 17:43:35 +03:00
" Sandybridge " , & sandybridge_gtt_driver } ,
2010-08-24 21:39:48 +04:00
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG ,
2010-11-05 17:43:35 +03:00
" Sandybridge " , & sandybridge_gtt_driver } ,
2010-08-24 21:39:48 +04:00
{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG ,
2010-11-05 17:43:35 +03:00
" Sandybridge " , & sandybridge_gtt_driver } ,
2011-02-17 22:50:19 +03:00
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT1_IG ,
" Ivybridge " , & sandybridge_gtt_driver } ,
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_GT2_IG ,
" Ivybridge " , & sandybridge_gtt_driver } ,
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT1_IG ,
" Ivybridge " , & sandybridge_gtt_driver } ,
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_M_GT2_IG ,
" Ivybridge " , & sandybridge_gtt_driver } ,
{ PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT1_IG ,
" Ivybridge " , & sandybridge_gtt_driver } ,
2010-08-24 21:39:48 +04:00
{ 0 , NULL , NULL }
} ;
static int find_gmch ( u16 device )
{
struct pci_dev * gmch_device ;
gmch_device = pci_get_device ( PCI_VENDOR_ID_INTEL , device , NULL ) ;
if ( gmch_device & & PCI_FUNC ( gmch_device - > devfn ) ! = 0 ) {
gmch_device = pci_get_device ( PCI_VENDOR_ID_INTEL ,
device , gmch_device ) ;
}
if ( ! gmch_device )
return 0 ;
intel_private . pcidev = gmch_device ;
return 1 ;
}
2010-09-08 19:29:51 +04:00
int intel_gmch_probe ( struct pci_dev * pdev ,
2010-08-24 21:39:48 +04:00
struct agp_bridge_data * bridge )
{
int i , mask ;
2010-11-05 17:43:35 +03:00
intel_private . driver = NULL ;
2010-08-24 21:39:48 +04:00
for ( i = 0 ; intel_gtt_chipsets [ i ] . name ! = NULL ; i + + ) {
if ( find_gmch ( intel_gtt_chipsets [ i ] . gmch_chip_id ) ) {
2010-11-04 22:07:57 +03:00
intel_private . driver =
2010-09-08 23:18:53 +04:00
intel_gtt_chipsets [ i ] . gtt_driver ;
2010-08-24 21:39:48 +04:00
break ;
}
}
2010-11-05 17:43:35 +03:00
if ( ! intel_private . driver )
2010-08-24 21:39:48 +04:00
return 0 ;
2010-11-05 17:43:35 +03:00
bridge - > driver = & intel_fake_agp_driver ;
2010-08-24 21:39:48 +04:00
bridge - > dev_private_data = & intel_private ;
bridge - > dev = pdev ;
2010-08-25 01:06:19 +04:00
intel_private . bridge_dev = pci_dev_get ( pdev ) ;
2010-08-24 21:39:48 +04:00
dev_info ( & pdev - > dev , " Intel %s Chipset \n " , intel_gtt_chipsets [ i ] . name ) ;
2010-09-12 18:38:55 +04:00
mask = intel_private . driver - > dma_mask_size ;
2010-08-24 21:39:48 +04:00
if ( pci_set_dma_mask ( intel_private . pcidev , DMA_BIT_MASK ( mask ) ) )
dev_err ( & intel_private . pcidev - > dev ,
" set gfx device dma mask %d-bit failed! \n " , mask ) ;
else
pci_set_consistent_dma_mask ( intel_private . pcidev ,
DMA_BIT_MASK ( mask ) ) ;
2010-11-05 15:30:14 +03:00
/*if (bridge->driver == &intel_810_driver)
return 1 ; */
2010-09-08 23:01:04 +04:00
2010-08-29 16:18:49 +04:00
if ( intel_gtt_init ( ) ! = 0 )
return 0 ;
2010-09-08 23:01:04 +04:00
2010-08-24 21:39:48 +04:00
return 1 ;
}
2010-09-08 19:29:51 +04:00
EXPORT_SYMBOL ( intel_gmch_probe ) ;
2010-08-24 21:39:48 +04:00
2010-11-23 17:24:24 +03:00
const struct intel_gtt * intel_gtt_get ( void )
2010-09-06 22:08:44 +04:00
{
return & intel_private . base ;
}
EXPORT_SYMBOL ( intel_gtt_get ) ;
2010-11-05 20:12:18 +03:00
void intel_gtt_chipset_flush ( void )
{
if ( intel_private . driver - > chipset_flush )
intel_private . driver - > chipset_flush ( ) ;
}
EXPORT_SYMBOL ( intel_gtt_chipset_flush ) ;
2010-09-08 19:29:51 +04:00
void intel_gmch_remove ( struct pci_dev * pdev )
2010-08-24 21:39:48 +04:00
{
if ( intel_private . pcidev )
pci_dev_put ( intel_private . pcidev ) ;
2010-08-25 01:06:19 +04:00
if ( intel_private . bridge_dev )
pci_dev_put ( intel_private . bridge_dev ) ;
2010-08-24 21:39:48 +04:00
}
2010-09-08 19:29:51 +04:00
EXPORT_SYMBOL ( intel_gmch_remove ) ;
MODULE_AUTHOR ( " Dave Jones <davej@redhat.com> " ) ;
MODULE_LICENSE ( " GPL and additional rights " ) ;