2009-06-05 14:42:42 +02:00
/*
* Copyright 2008 Advanced Micro Devices , Inc .
* Copyright 2008 Red Hat Inc .
* Copyright 2009 Jerome Glisse .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
* Authors : Dave Airlie
* Alex Deucher
* Jerome Glisse
*/
# ifndef __RADEON_H__
# define __RADEON_H__
# include "radeon_object.h"
/* TODO: Here are things that needs to be done :
* - surface allocator & initializer : ( bit like scratch reg ) should
* initialize HDP_ stuff on RS600 , R600 , R700 hw , well anythings
* related to surface
* - WB : write back stuff ( do it bit like scratch reg things )
* - Vblank : look at Jesse ' s rework and what we should do
* - r600 / r700 : gart & cp
* - cs : clean cs ioctl use bitmap & things like that .
* - power management stuff
* - Barrier in gart code
* - Unmappabled vram ?
* - TESTING , TESTING , TESTING
*/
# include <asm/atomic.h>
# include <linux/wait.h>
# include <linux/list.h>
# include <linux/kref.h>
# include "radeon_mode.h"
# include "radeon_reg.h"
2009-06-17 13:28:30 +02:00
# include "r300.h"
2009-06-05 14:42:42 +02:00
/*
* Modules parameters .
*/
extern int radeon_no_wb ;
extern int radeon_modeset ;
extern int radeon_dynclks ;
extern int radeon_r4xx_atom ;
extern int radeon_agpmode ;
extern int radeon_vram_limit ;
extern int radeon_gart_size ;
extern int radeon_benchmarking ;
2009-07-21 11:23:57 +02:00
extern int radeon_testing ;
2009-06-05 14:42:42 +02:00
extern int radeon_connector_table ;
/*
* Copy from radeon_drv . h so we don ' t have to include both and have conflicting
* symbol ;
*/
# define RADEON_MAX_USEC_TIMEOUT 100000 /* 100 ms */
# define RADEON_IB_POOL_SIZE 16
# define RADEON_DEBUGFS_MAX_NUM_FILES 32
# define RADEONFB_CONN_LIMIT 4
enum radeon_family {
CHIP_R100 ,
CHIP_RV100 ,
CHIP_RS100 ,
CHIP_RV200 ,
CHIP_RS200 ,
CHIP_R200 ,
CHIP_RV250 ,
CHIP_RS300 ,
CHIP_RV280 ,
CHIP_R300 ,
CHIP_R350 ,
CHIP_RV350 ,
CHIP_RV380 ,
CHIP_R420 ,
CHIP_R423 ,
CHIP_RV410 ,
CHIP_RS400 ,
CHIP_RS480 ,
CHIP_RS600 ,
CHIP_RS690 ,
CHIP_RS740 ,
CHIP_RV515 ,
CHIP_R520 ,
CHIP_RV530 ,
CHIP_RV560 ,
CHIP_RV570 ,
CHIP_R580 ,
CHIP_R600 ,
CHIP_RV610 ,
CHIP_RV630 ,
CHIP_RV620 ,
CHIP_RV635 ,
CHIP_RV670 ,
CHIP_RS780 ,
CHIP_RV770 ,
CHIP_RV730 ,
CHIP_RV710 ,
2009-07-13 21:04:08 +02:00
CHIP_RS880 ,
2009-06-05 14:42:42 +02:00
CHIP_LAST ,
} ;
enum radeon_chip_flags {
RADEON_FAMILY_MASK = 0x0000ffffUL ,
RADEON_FLAGS_MASK = 0xffff0000UL ,
RADEON_IS_MOBILITY = 0x00010000UL ,
RADEON_IS_IGP = 0x00020000UL ,
RADEON_SINGLE_CRTC = 0x00040000UL ,
RADEON_IS_AGP = 0x00080000UL ,
RADEON_HAS_HIERZ = 0x00100000UL ,
RADEON_IS_PCIE = 0x00200000UL ,
RADEON_NEW_MEMMAP = 0x00400000UL ,
RADEON_IS_PCI = 0x00800000UL ,
RADEON_IS_IGPGART = 0x01000000UL ,
} ;
/*
* Errata workarounds .
*/
enum radeon_pll_errata {
CHIP_ERRATA_R300_CG = 0x00000001 ,
CHIP_ERRATA_PLL_DUMMYREADS = 0x00000002 ,
CHIP_ERRATA_PLL_DELAY = 0x00000004
} ;
struct radeon_device ;
/*
* BIOS .
*/
bool radeon_get_bios ( struct radeon_device * rdev ) ;
/*
* Clocks
*/
struct radeon_clock {
struct radeon_pll p1pll ;
struct radeon_pll p2pll ;
struct radeon_pll spll ;
struct radeon_pll mpll ;
/* 10 Khz units */
uint32_t default_mclk ;
uint32_t default_sclk ;
} ;
/*
* Fences .
*/
struct radeon_fence_driver {
uint32_t scratch_reg ;
atomic_t seq ;
uint32_t last_seq ;
unsigned long count_timeout ;
wait_queue_head_t queue ;
rwlock_t lock ;
struct list_head created ;
struct list_head emited ;
struct list_head signaled ;
} ;
struct radeon_fence {
struct radeon_device * rdev ;
struct kref kref ;
struct list_head list ;
/* protected by radeon_fence.lock */
uint32_t seq ;
unsigned long timeout ;
bool emited ;
bool signaled ;
} ;
int radeon_fence_driver_init ( struct radeon_device * rdev ) ;
void radeon_fence_driver_fini ( struct radeon_device * rdev ) ;
int radeon_fence_create ( struct radeon_device * rdev , struct radeon_fence * * fence ) ;
int radeon_fence_emit ( struct radeon_device * rdev , struct radeon_fence * fence ) ;
void radeon_fence_process ( struct radeon_device * rdev ) ;
bool radeon_fence_signaled ( struct radeon_fence * fence ) ;
int radeon_fence_wait ( struct radeon_fence * fence , bool interruptible ) ;
int radeon_fence_wait_next ( struct radeon_device * rdev ) ;
int radeon_fence_wait_last ( struct radeon_device * rdev ) ;
struct radeon_fence * radeon_fence_ref ( struct radeon_fence * fence ) ;
void radeon_fence_unref ( struct radeon_fence * * fence ) ;
2009-06-24 09:48:08 +10:00
/*
* Tiling registers
*/
struct radeon_surface_reg {
struct radeon_object * robj ;
} ;
# define RADEON_GEM_MAX_SURFACES 8
2009-06-05 14:42:42 +02:00
/*
* Radeon buffer .
*/
struct radeon_object ;
struct radeon_object_list {
struct list_head list ;
struct radeon_object * robj ;
uint64_t gpu_offset ;
unsigned rdomain ;
unsigned wdomain ;
2009-06-24 09:48:08 +10:00
uint32_t tiling_flags ;
2009-06-05 14:42:42 +02:00
} ;
int radeon_object_init ( struct radeon_device * rdev ) ;
void radeon_object_fini ( struct radeon_device * rdev ) ;
int radeon_object_create ( struct radeon_device * rdev ,
struct drm_gem_object * gobj ,
unsigned long size ,
bool kernel ,
uint32_t domain ,
bool interruptible ,
struct radeon_object * * robj_ptr ) ;
int radeon_object_kmap ( struct radeon_object * robj , void * * ptr ) ;
void radeon_object_kunmap ( struct radeon_object * robj ) ;
void radeon_object_unref ( struct radeon_object * * robj ) ;
int radeon_object_pin ( struct radeon_object * robj , uint32_t domain ,
uint64_t * gpu_addr ) ;
void radeon_object_unpin ( struct radeon_object * robj ) ;
int radeon_object_wait ( struct radeon_object * robj ) ;
2009-08-16 21:05:45 +10:00
int radeon_object_busy_domain ( struct radeon_object * robj , uint32_t * cur_placement ) ;
2009-06-05 14:42:42 +02:00
int radeon_object_evict_vram ( struct radeon_device * rdev ) ;
int radeon_object_mmap ( struct radeon_object * robj , uint64_t * offset ) ;
void radeon_object_force_delete ( struct radeon_device * rdev ) ;
void radeon_object_list_add_object ( struct radeon_object_list * lobj ,
struct list_head * head ) ;
int radeon_object_list_validate ( struct list_head * head , void * fence ) ;
void radeon_object_list_unvalidate ( struct list_head * head ) ;
void radeon_object_list_clean ( struct list_head * head ) ;
int radeon_object_fbdev_mmap ( struct radeon_object * robj ,
struct vm_area_struct * vma ) ;
unsigned long radeon_object_size ( struct radeon_object * robj ) ;
2009-06-24 09:48:08 +10:00
void radeon_object_clear_surface_reg ( struct radeon_object * robj ) ;
int radeon_object_check_tiling ( struct radeon_object * robj , bool has_moved ,
bool force_drop ) ;
void radeon_object_set_tiling_flags ( struct radeon_object * robj ,
uint32_t tiling_flags , uint32_t pitch ) ;
void radeon_object_get_tiling_flags ( struct radeon_object * robj , uint32_t * tiling_flags , uint32_t * pitch ) ;
void radeon_bo_move_notify ( struct ttm_buffer_object * bo ,
struct ttm_mem_reg * mem ) ;
void radeon_bo_fault_reserve_notify ( struct ttm_buffer_object * bo ) ;
2009-06-05 14:42:42 +02:00
/*
* GEM objects .
*/
struct radeon_gem {
struct list_head objects ;
} ;
int radeon_gem_init ( struct radeon_device * rdev ) ;
void radeon_gem_fini ( struct radeon_device * rdev ) ;
int radeon_gem_object_create ( struct radeon_device * rdev , int size ,
int alignment , int initial_domain ,
bool discardable , bool kernel ,
bool interruptible ,
struct drm_gem_object * * obj ) ;
int radeon_gem_object_pin ( struct drm_gem_object * obj , uint32_t pin_domain ,
uint64_t * gpu_addr ) ;
void radeon_gem_object_unpin ( struct drm_gem_object * obj ) ;
/*
* GART structures , functions & helpers
*/
struct radeon_mc ;
struct radeon_gart_table_ram {
volatile uint32_t * ptr ;
} ;
struct radeon_gart_table_vram {
struct radeon_object * robj ;
volatile uint32_t * ptr ;
} ;
union radeon_gart_table {
struct radeon_gart_table_ram ram ;
struct radeon_gart_table_vram vram ;
} ;
struct radeon_gart {
dma_addr_t table_addr ;
unsigned num_gpu_pages ;
unsigned num_cpu_pages ;
unsigned table_size ;
union radeon_gart_table table ;
struct page * * pages ;
dma_addr_t * pages_addr ;
bool ready ;
} ;
int radeon_gart_table_ram_alloc ( struct radeon_device * rdev ) ;
void radeon_gart_table_ram_free ( struct radeon_device * rdev ) ;
int radeon_gart_table_vram_alloc ( struct radeon_device * rdev ) ;
void radeon_gart_table_vram_free ( struct radeon_device * rdev ) ;
int radeon_gart_init ( struct radeon_device * rdev ) ;
void radeon_gart_fini ( struct radeon_device * rdev ) ;
void radeon_gart_unbind ( struct radeon_device * rdev , unsigned offset ,
int pages ) ;
int radeon_gart_bind ( struct radeon_device * rdev , unsigned offset ,
int pages , struct page * * pagelist ) ;
/*
* GPU MC structures , functions & helpers
*/
struct radeon_mc {
resource_size_t aper_size ;
resource_size_t aper_base ;
resource_size_t agp_base ;
unsigned gtt_location ;
unsigned gtt_size ;
unsigned vram_location ;
2009-07-21 20:39:30 +10:00
/* for some chips with <= 32MB we need to lie
* about vram size near mc fb location */
unsigned mc_vram_size ;
2009-06-05 14:42:42 +02:00
unsigned vram_width ;
2009-07-21 20:39:30 +10:00
unsigned real_vram_size ;
2009-06-05 14:42:42 +02:00
int vram_mtrr ;
bool vram_is_ddr ;
} ;
int radeon_mc_setup ( struct radeon_device * rdev ) ;
/*
* GPU scratch registers structures , functions & helpers
*/
struct radeon_scratch {
unsigned num_reg ;
bool free [ 32 ] ;
uint32_t reg [ 32 ] ;
} ;
int radeon_scratch_get ( struct radeon_device * rdev , uint32_t * reg ) ;
void radeon_scratch_free ( struct radeon_device * rdev , uint32_t reg ) ;
/*
* IRQS .
*/
struct radeon_irq {
bool installed ;
bool sw_int ;
/* FIXME: use a define max crtc rather than hardcode it */
bool crtc_vblank_int [ 2 ] ;
} ;
int radeon_irq_kms_init ( struct radeon_device * rdev ) ;
void radeon_irq_kms_fini ( struct radeon_device * rdev ) ;
/*
* CP & ring .
*/
struct radeon_ib {
struct list_head list ;
unsigned long idx ;
uint64_t gpu_addr ;
struct radeon_fence * fence ;
volatile uint32_t * ptr ;
uint32_t length_dw ;
} ;
struct radeon_ib_pool {
struct mutex mutex ;
struct radeon_object * robj ;
struct list_head scheduled_ibs ;
struct radeon_ib ibs [ RADEON_IB_POOL_SIZE ] ;
bool ready ;
DECLARE_BITMAP ( alloc_bm , RADEON_IB_POOL_SIZE ) ;
} ;
struct radeon_cp {
struct radeon_object * ring_obj ;
volatile uint32_t * ring ;
unsigned rptr ;
unsigned wptr ;
unsigned wptr_old ;
unsigned ring_size ;
unsigned ring_free_dw ;
int count_dw ;
uint64_t gpu_addr ;
uint32_t align_mask ;
uint32_t ptr_mask ;
struct mutex mutex ;
bool ready ;
} ;
int radeon_ib_get ( struct radeon_device * rdev , struct radeon_ib * * ib ) ;
void radeon_ib_free ( struct radeon_device * rdev , struct radeon_ib * * ib ) ;
int radeon_ib_schedule ( struct radeon_device * rdev , struct radeon_ib * ib ) ;
int radeon_ib_pool_init ( struct radeon_device * rdev ) ;
void radeon_ib_pool_fini ( struct radeon_device * rdev ) ;
int radeon_ib_test ( struct radeon_device * rdev ) ;
/* Ring access between begin & end cannot sleep */
void radeon_ring_free_size ( struct radeon_device * rdev ) ;
int radeon_ring_lock ( struct radeon_device * rdev , unsigned ndw ) ;
void radeon_ring_unlock_commit ( struct radeon_device * rdev ) ;
void radeon_ring_unlock_undo ( struct radeon_device * rdev ) ;
int radeon_ring_test ( struct radeon_device * rdev ) ;
int radeon_ring_init ( struct radeon_device * rdev , unsigned ring_size ) ;
void radeon_ring_fini ( struct radeon_device * rdev ) ;
/*
* CS .
*/
struct radeon_cs_reloc {
struct drm_gem_object * gobj ;
struct radeon_object * robj ;
struct radeon_object_list lobj ;
uint32_t handle ;
uint32_t flags ;
} ;
struct radeon_cs_chunk {
uint32_t chunk_id ;
uint32_t length_dw ;
uint32_t * kdata ;
} ;
struct radeon_cs_parser {
struct radeon_device * rdev ;
struct drm_file * filp ;
/* chunks */
unsigned nchunks ;
struct radeon_cs_chunk * chunks ;
uint64_t * chunks_array ;
/* IB */
unsigned idx ;
/* relocations */
unsigned nrelocs ;
struct radeon_cs_reloc * relocs ;
struct radeon_cs_reloc * * relocs_ptr ;
struct list_head validated ;
/* indices of various chunks */
int chunk_ib_idx ;
int chunk_relocs_idx ;
struct radeon_ib * ib ;
void * track ;
} ;
struct radeon_cs_packet {
unsigned idx ;
unsigned type ;
unsigned reg ;
unsigned opcode ;
int count ;
unsigned one_reg_wr ;
} ;
typedef int ( * radeon_packet0_check_t ) ( struct radeon_cs_parser * p ,
struct radeon_cs_packet * pkt ,
unsigned idx , unsigned reg ) ;
typedef int ( * radeon_packet3_check_t ) ( struct radeon_cs_parser * p ,
struct radeon_cs_packet * pkt ) ;
/*
* AGP
*/
int radeon_agp_init ( struct radeon_device * rdev ) ;
void radeon_agp_fini ( struct radeon_device * rdev ) ;
/*
* Writeback
*/
struct radeon_wb {
struct radeon_object * wb_obj ;
volatile uint32_t * wb ;
uint64_t gpu_addr ;
} ;
2009-07-13 21:04:08 +02:00
/**
* struct radeon_pm - power management datas
* @ max_bandwidth : maximum bandwidth the gpu has ( MByte / s )
* @ igp_sideport_mclk : sideport memory clock Mhz ( rs690 , rs740 , rs780 , rs880 )
* @ igp_system_mclk : system clock Mhz ( rs690 , rs740 , rs780 , rs880 )
* @ igp_ht_link_clk : ht link clock Mhz ( rs690 , rs740 , rs780 , rs880 )
* @ igp_ht_link_width : ht link width in bits ( rs690 , rs740 , rs780 , rs880 )
* @ k8_bandwidth : k8 bandwidth the gpu has ( MByte / s ) ( IGP )
* @ sideport_bandwidth : sideport bandwidth the gpu has ( MByte / s ) ( IGP )
* @ ht_bandwidth : ht bandwidth the gpu has ( MByte / s ) ( IGP )
* @ core_bandwidth : core GPU bandwidth the gpu has ( MByte / s ) ( IGP )
* @ sclk : GPU clock Mhz ( core bandwith depends of this clock )
* @ needed_bandwidth : current bandwidth needs
*
* It keeps track of various data needed to take powermanagement decision .
* Bandwith need is used to determine minimun clock of the GPU and memory .
* Equation between gpu / memory clock and available bandwidth is hw dependent
* ( type of memory , bus size , efficiency , . . . )
*/
struct radeon_pm {
fixed20_12 max_bandwidth ;
fixed20_12 igp_sideport_mclk ;
fixed20_12 igp_system_mclk ;
fixed20_12 igp_ht_link_clk ;
fixed20_12 igp_ht_link_width ;
fixed20_12 k8_bandwidth ;
fixed20_12 sideport_bandwidth ;
fixed20_12 ht_bandwidth ;
fixed20_12 core_bandwidth ;
fixed20_12 sclk ;
fixed20_12 needed_bandwidth ;
} ;
2009-06-05 14:42:42 +02:00
/*
* Benchmarking
*/
void radeon_benchmark ( struct radeon_device * rdev ) ;
2009-07-21 11:23:57 +02:00
/*
* Testing
*/
void radeon_test_moves ( struct radeon_device * rdev ) ;
2009-06-05 14:42:42 +02:00
/*
* Debugfs
*/
int radeon_debugfs_add_files ( struct radeon_device * rdev ,
struct drm_info_list * files ,
unsigned nfiles ) ;
int radeon_debugfs_fence_init ( struct radeon_device * rdev ) ;
int r100_debugfs_rbbm_init ( struct radeon_device * rdev ) ;
int r100_debugfs_cp_init ( struct radeon_device * rdev ) ;
/*
* ASIC specific functions .
*/
struct radeon_asic {
2009-06-17 13:28:30 +02:00
int ( * init ) ( struct radeon_device * rdev ) ;
2009-06-05 14:42:42 +02:00
void ( * errata ) ( struct radeon_device * rdev ) ;
void ( * vram_info ) ( struct radeon_device * rdev ) ;
int ( * gpu_reset ) ( struct radeon_device * rdev ) ;
int ( * mc_init ) ( struct radeon_device * rdev ) ;
void ( * mc_fini ) ( struct radeon_device * rdev ) ;
int ( * wb_init ) ( struct radeon_device * rdev ) ;
void ( * wb_fini ) ( struct radeon_device * rdev ) ;
int ( * gart_enable ) ( struct radeon_device * rdev ) ;
void ( * gart_disable ) ( struct radeon_device * rdev ) ;
void ( * gart_tlb_flush ) ( struct radeon_device * rdev ) ;
int ( * gart_set_page ) ( struct radeon_device * rdev , int i , uint64_t addr ) ;
int ( * cp_init ) ( struct radeon_device * rdev , unsigned ring_size ) ;
void ( * cp_fini ) ( struct radeon_device * rdev ) ;
void ( * cp_disable ) ( struct radeon_device * rdev ) ;
void ( * ring_start ) ( struct radeon_device * rdev ) ;
int ( * irq_set ) ( struct radeon_device * rdev ) ;
int ( * irq_process ) ( struct radeon_device * rdev ) ;
2009-08-13 11:10:51 +02:00
u32 ( * get_vblank_counter ) ( struct radeon_device * rdev , int crtc ) ;
2009-06-05 14:42:42 +02:00
void ( * fence_ring_emit ) ( struct radeon_device * rdev , struct radeon_fence * fence ) ;
int ( * cs_parse ) ( struct radeon_cs_parser * p ) ;
int ( * copy_blit ) ( struct radeon_device * rdev ,
uint64_t src_offset ,
uint64_t dst_offset ,
unsigned num_pages ,
struct radeon_fence * fence ) ;
int ( * copy_dma ) ( struct radeon_device * rdev ,
uint64_t src_offset ,
uint64_t dst_offset ,
unsigned num_pages ,
struct radeon_fence * fence ) ;
int ( * copy ) ( struct radeon_device * rdev ,
uint64_t src_offset ,
uint64_t dst_offset ,
unsigned num_pages ,
struct radeon_fence * fence ) ;
void ( * set_engine_clock ) ( struct radeon_device * rdev , uint32_t eng_clock ) ;
void ( * set_memory_clock ) ( struct radeon_device * rdev , uint32_t mem_clock ) ;
void ( * set_pcie_lanes ) ( struct radeon_device * rdev , int lanes ) ;
void ( * set_clock_gating ) ( struct radeon_device * rdev , int enable ) ;
2009-06-24 09:48:08 +10:00
int ( * set_surface_reg ) ( struct radeon_device * rdev , int reg ,
uint32_t tiling_flags , uint32_t pitch ,
uint32_t offset , uint32_t obj_size ) ;
int ( * clear_surface_reg ) ( struct radeon_device * rdev , int reg ) ;
2009-07-13 21:04:08 +02:00
void ( * bandwidth_update ) ( struct radeon_device * rdev ) ;
2009-06-05 14:42:42 +02:00
} ;
2009-09-01 15:25:57 +10:00
struct r100_asic {
const unsigned * reg_safe_bm ;
unsigned reg_safe_bm_size ;
} ;
2009-06-17 13:28:30 +02:00
union radeon_asic_config {
struct r300_asic r300 ;
2009-09-01 15:25:57 +10:00
struct r100_asic r100 ;
2009-06-17 13:28:30 +02:00
} ;
2009-06-05 14:42:42 +02:00
/*
* IOCTL .
*/
int radeon_gem_info_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * filp ) ;
int radeon_gem_create_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * filp ) ;
int radeon_gem_pin_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file_priv ) ;
int radeon_gem_unpin_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file_priv ) ;
int radeon_gem_pwrite_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file_priv ) ;
int radeon_gem_pread_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file_priv ) ;
int radeon_gem_set_domain_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * filp ) ;
int radeon_gem_mmap_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * filp ) ;
int radeon_gem_busy_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * filp ) ;
int radeon_gem_wait_idle_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * filp ) ;
int radeon_cs_ioctl ( struct drm_device * dev , void * data , struct drm_file * filp ) ;
2009-06-24 09:48:08 +10:00
int radeon_gem_set_tiling_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * filp ) ;
int radeon_gem_get_tiling_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * filp ) ;
2009-06-05 14:42:42 +02:00
/*
* Core structure , functions and helpers .
*/
typedef uint32_t ( * radeon_rreg_t ) ( struct radeon_device * , uint32_t ) ;
typedef void ( * radeon_wreg_t ) ( struct radeon_device * , uint32_t , uint32_t ) ;
struct radeon_device {
struct drm_device * ddev ;
struct pci_dev * pdev ;
/* ASIC */
2009-06-17 13:28:30 +02:00
union radeon_asic_config config ;
2009-06-05 14:42:42 +02:00
enum radeon_family family ;
unsigned long flags ;
int usec_timeout ;
enum radeon_pll_errata pll_errata ;
int num_gb_pipes ;
2009-08-19 19:11:39 -04:00
int num_z_pipes ;
2009-06-05 14:42:42 +02:00
int disp_priority ;
/* BIOS */
uint8_t * bios ;
bool is_atom_bios ;
uint16_t bios_header_start ;
struct radeon_object * stollen_vga_memory ;
struct fb_info * fbdev_info ;
struct radeon_object * fbdev_robj ;
struct radeon_framebuffer * fbdev_rfb ;
/* Register mmio */
2009-06-29 18:29:12 +10:00
resource_size_t rmmio_base ;
resource_size_t rmmio_size ;
2009-06-05 14:42:42 +02:00
void * rmmio ;
radeon_rreg_t mc_rreg ;
radeon_wreg_t mc_wreg ;
radeon_rreg_t pll_rreg ;
radeon_wreg_t pll_wreg ;
2009-08-12 18:43:14 +10:00
uint32_t pcie_reg_mask ;
2009-06-05 14:42:42 +02:00
radeon_rreg_t pciep_rreg ;
radeon_wreg_t pciep_wreg ;
struct radeon_clock clock ;
struct radeon_mc mc ;
struct radeon_gart gart ;
struct radeon_mode_info mode_info ;
struct radeon_scratch scratch ;
struct radeon_mman mman ;
struct radeon_fence_driver fence_drv ;
struct radeon_cp cp ;
struct radeon_ib_pool ib_pool ;
struct radeon_irq irq ;
struct radeon_asic * asic ;
struct radeon_gem gem ;
2009-07-13 21:04:08 +02:00
struct radeon_pm pm ;
2009-06-05 14:42:42 +02:00
struct mutex cs_mutex ;
struct radeon_wb wb ;
bool gpu_lockup ;
bool shutdown ;
bool suspend ;
2009-07-10 22:36:26 +10:00
bool need_dma32 ;
2009-06-24 09:48:08 +10:00
struct radeon_surface_reg surface_regs [ RADEON_GEM_MAX_SURFACES ] ;
2009-08-29 14:53:51 +01:00
const struct firmware * fw ; /* firmware */
2009-06-05 14:42:42 +02:00
} ;
int radeon_device_init ( struct radeon_device * rdev ,
struct drm_device * ddev ,
struct pci_dev * pdev ,
uint32_t flags ) ;
void radeon_device_fini ( struct radeon_device * rdev ) ;
int radeon_gpu_wait_for_idle ( struct radeon_device * rdev ) ;
2009-08-12 18:43:14 +10:00
static inline uint32_t r100_mm_rreg ( struct radeon_device * rdev , uint32_t reg )
{
if ( reg < 0x10000 )
return readl ( ( ( void __iomem * ) rdev - > rmmio ) + reg ) ;
else {
writel ( reg , ( ( void __iomem * ) rdev - > rmmio ) + RADEON_MM_INDEX ) ;
return readl ( ( ( void __iomem * ) rdev - > rmmio ) + RADEON_MM_DATA ) ;
}
}
static inline void r100_mm_wreg ( struct radeon_device * rdev , uint32_t reg , uint32_t v )
{
if ( reg < 0x10000 )
writel ( v , ( ( void __iomem * ) rdev - > rmmio ) + reg ) ;
else {
writel ( reg , ( ( void __iomem * ) rdev - > rmmio ) + RADEON_MM_INDEX ) ;
writel ( v , ( ( void __iomem * ) rdev - > rmmio ) + RADEON_MM_DATA ) ;
}
}
2009-06-05 14:42:42 +02:00
/*
* Registers read & write functions .
*/
# define RREG8(reg) readb(((void __iomem *)rdev->rmmio) + (reg))
# define WREG8(reg, v) writeb(v, ((void __iomem *)rdev->rmmio) + (reg))
2009-08-12 18:43:14 +10:00
# define RREG32(reg) r100_mm_rreg(rdev, (reg))
# define WREG32(reg, v) r100_mm_wreg(rdev, (reg), (v))
2009-06-05 14:42:42 +02:00
# define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
# define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK)
# define RREG32_PLL(reg) rdev->pll_rreg(rdev, (reg))
# define WREG32_PLL(reg, v) rdev->pll_wreg(rdev, (reg), (v))
# define RREG32_MC(reg) rdev->mc_rreg(rdev, (reg))
# define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
2009-08-12 18:43:14 +10:00
# define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg))
# define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
2009-06-05 14:42:42 +02:00
# define WREG32_P(reg, val, mask) \
do { \
uint32_t tmp_ = RREG32 ( reg ) ; \
tmp_ & = ( mask ) ; \
tmp_ | = ( ( val ) & ~ ( mask ) ) ; \
WREG32 ( reg , tmp_ ) ; \
} while ( 0 )
# define WREG32_PLL_P(reg, val, mask) \
do { \
uint32_t tmp_ = RREG32_PLL ( reg ) ; \
tmp_ & = ( mask ) ; \
tmp_ | = ( ( val ) & ~ ( mask ) ) ; \
WREG32_PLL ( reg , tmp_ ) ; \
} while ( 0 )
2009-08-12 18:43:14 +10:00
/*
* Indirect registers accessor
*/
static inline uint32_t rv370_pcie_rreg ( struct radeon_device * rdev , uint32_t reg )
{
uint32_t r ;
WREG32 ( RADEON_PCIE_INDEX , ( ( reg ) & rdev - > pcie_reg_mask ) ) ;
r = RREG32 ( RADEON_PCIE_DATA ) ;
return r ;
}
static inline void rv370_pcie_wreg ( struct radeon_device * rdev , uint32_t reg , uint32_t v )
{
WREG32 ( RADEON_PCIE_INDEX , ( ( reg ) & rdev - > pcie_reg_mask ) ) ;
WREG32 ( RADEON_PCIE_DATA , ( v ) ) ;
}
2009-06-05 14:42:42 +02:00
void r100_pll_errata_after_index ( struct radeon_device * rdev ) ;
/*
* ASICs helpers .
*/
2009-07-14 02:02:32 +10:00
# define ASIC_IS_RN50(rdev) ((rdev->pdev->device == 0x515e) || \
( rdev - > pdev - > device = = 0x5969 ) )
2009-06-05 14:42:42 +02:00
# define ASIC_IS_RV100(rdev) ((rdev->family == CHIP_RV100) || \
( rdev - > family = = CHIP_RV200 ) | | \
( rdev - > family = = CHIP_RS100 ) | | \
( rdev - > family = = CHIP_RS200 ) | | \
( rdev - > family = = CHIP_RV250 ) | | \
( rdev - > family = = CHIP_RV280 ) | | \
( rdev - > family = = CHIP_RS300 ) )
# define ASIC_IS_R300(rdev) ((rdev->family == CHIP_R300) || \
( rdev - > family = = CHIP_RV350 ) | | \
( rdev - > family = = CHIP_R350 ) | | \
( rdev - > family = = CHIP_RV380 ) | | \
( rdev - > family = = CHIP_R420 ) | | \
( rdev - > family = = CHIP_R423 ) | | \
( rdev - > family = = CHIP_RV410 ) | | \
( rdev - > family = = CHIP_RS400 ) | | \
( rdev - > family = = CHIP_RS480 ) )
# define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
# define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
# define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
/*
* BIOS helpers .
*/
# define RBIOS8(i) (rdev->bios[i])
# define RBIOS16(i) (RBIOS8(i) | (RBIOS8((i)+1) << 8))
# define RBIOS32(i) ((RBIOS16(i)) | (RBIOS16((i)+2) << 16))
int radeon_combios_init ( struct radeon_device * rdev ) ;
void radeon_combios_fini ( struct radeon_device * rdev ) ;
int radeon_atombios_init ( struct radeon_device * rdev ) ;
void radeon_atombios_fini ( struct radeon_device * rdev ) ;
/*
* RING helpers .
*/
# define CP_PACKET0 0x00000000
# define PACKET0_BASE_INDEX_SHIFT 0
# define PACKET0_BASE_INDEX_MASK (0x1ffff << 0)
# define PACKET0_COUNT_SHIFT 16
# define PACKET0_COUNT_MASK (0x3fff << 16)
# define CP_PACKET1 0x40000000
# define CP_PACKET2 0x80000000
# define PACKET2_PAD_SHIFT 0
# define PACKET2_PAD_MASK (0x3fffffff << 0)
# define CP_PACKET3 0xC0000000
# define PACKET3_IT_OPCODE_SHIFT 8
# define PACKET3_IT_OPCODE_MASK (0xff << 8)
# define PACKET3_COUNT_SHIFT 16
# define PACKET3_COUNT_MASK (0x3fff << 16)
/* PACKET3 op code */
# define PACKET3_NOP 0x10
# define PACKET3_3D_DRAW_VBUF 0x28
# define PACKET3_3D_DRAW_IMMD 0x29
# define PACKET3_3D_DRAW_INDX 0x2A
# define PACKET3_3D_LOAD_VBPNTR 0x2F
# define PACKET3_INDX_BUFFER 0x33
# define PACKET3_3D_DRAW_VBUF_2 0x34
# define PACKET3_3D_DRAW_IMMD_2 0x35
# define PACKET3_3D_DRAW_INDX_2 0x36
# define PACKET3_BITBLT_MULTI 0x9B
# define PACKET0(reg, n) (CP_PACKET0 | \
REG_SET ( PACKET0_BASE_INDEX , ( reg ) > > 2 ) | \
REG_SET ( PACKET0_COUNT , ( n ) ) )
# define PACKET2(v) (CP_PACKET2 | REG_SET(PACKET2_PAD, (v)))
# define PACKET3(op, n) (CP_PACKET3 | \
REG_SET ( PACKET3_IT_OPCODE , ( op ) ) | \
REG_SET ( PACKET3_COUNT , ( n ) ) )
# define PACKET_TYPE0 0
# define PACKET_TYPE1 1
# define PACKET_TYPE2 2
# define PACKET_TYPE3 3
# define CP_PACKET_GET_TYPE(h) (((h) >> 30) & 3)
# define CP_PACKET_GET_COUNT(h) (((h) >> 16) & 0x3FFF)
# define CP_PACKET0_GET_REG(h) (((h) & 0x1FFF) << 2)
# define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
# define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
static inline void radeon_ring_write ( struct radeon_device * rdev , uint32_t v )
{
# if DRM_DEBUG_CODE
if ( rdev - > cp . count_dw < = 0 ) {
DRM_ERROR ( " radeon: writting more dword to ring than expected ! \n " ) ;
}
# endif
rdev - > cp . ring [ rdev - > cp . wptr + + ] = v ;
rdev - > cp . wptr & = rdev - > cp . ptr_mask ;
rdev - > cp . count_dw - - ;
rdev - > cp . ring_free_dw - - ;
}
/*
* ASICs macro .
*/
2009-06-17 13:28:30 +02:00
# define radeon_init(rdev) (rdev)->asic->init((rdev))
2009-06-05 14:42:42 +02:00
# define radeon_cs_parse(p) rdev->asic->cs_parse((p))
# define radeon_errata(rdev) (rdev)->asic->errata((rdev))
# define radeon_vram_info(rdev) (rdev)->asic->vram_info((rdev))
# define radeon_gpu_reset(rdev) (rdev)->asic->gpu_reset((rdev))
# define radeon_mc_init(rdev) (rdev)->asic->mc_init((rdev))
# define radeon_mc_fini(rdev) (rdev)->asic->mc_fini((rdev))
# define radeon_wb_init(rdev) (rdev)->asic->wb_init((rdev))
# define radeon_wb_fini(rdev) (rdev)->asic->wb_fini((rdev))
# define radeon_gart_enable(rdev) (rdev)->asic->gart_enable((rdev))
# define radeon_gart_disable(rdev) (rdev)->asic->gart_disable((rdev))
# define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
# define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
# define radeon_cp_init(rdev,rsize) (rdev)->asic->cp_init((rdev), (rsize))
# define radeon_cp_fini(rdev) (rdev)->asic->cp_fini((rdev))
# define radeon_cp_disable(rdev) (rdev)->asic->cp_disable((rdev))
# define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev))
# define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev))
# define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev))
2009-08-13 11:10:51 +02:00
# define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc))
2009-06-05 14:42:42 +02:00
# define radeon_fence_ring_emit(rdev, fence) (rdev)->asic->fence_ring_emit((rdev), (fence))
# define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f))
# define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f))
# define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f))
# define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
# define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
# define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
# define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
2009-06-24 09:48:08 +10:00
# define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
# define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r)))
2009-07-13 21:04:08 +02:00
# define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev))
2009-06-05 14:42:42 +02:00
# endif