2018-05-08 16:20:54 +02:00
// SPDX-License-Identifier: GPL-2.0
2015-12-03 18:21:29 +01:00
/*
2018-05-08 16:20:54 +02:00
* Copyright ( C ) 2014 - 2018 Etnaviv Project
2015-12-03 18:21:29 +01:00
*/
2017-01-16 16:09:51 +01:00
# include "etnaviv_cmdbuf.h"
2015-12-03 18:21:29 +01:00
# include "etnaviv_gpu.h"
# include "etnaviv_gem.h"
# include "etnaviv_mmu.h"
# include "common.xml.h"
# include "state.xml.h"
2016-08-19 23:53:59 +02:00
# include "state_hi.xml.h"
2016-01-21 15:20:25 +00:00
# include "state_3d.xml.h"
2015-12-03 18:21:29 +01:00
# include "cmdstream.xml.h"
/*
* Command Buffer helper :
*/
static inline void OUT ( struct etnaviv_cmdbuf * buffer , u32 data )
{
u32 * vaddr = ( u32 * ) buffer - > vaddr ;
BUG_ON ( buffer - > user_size > = buffer - > size ) ;
vaddr [ buffer - > user_size / 4 ] = data ;
buffer - > user_size + = 4 ;
}
static inline void CMD_LOAD_STATE ( struct etnaviv_cmdbuf * buffer ,
u32 reg , u32 value )
{
u32 index = reg > > VIV_FE_LOAD_STATE_HEADER_OFFSET__SHR ;
buffer - > user_size = ALIGN ( buffer - > user_size , 8 ) ;
/* write a register via cmd stream */
OUT ( buffer , VIV_FE_LOAD_STATE_HEADER_OP_LOAD_STATE |
VIV_FE_LOAD_STATE_HEADER_COUNT ( 1 ) |
VIV_FE_LOAD_STATE_HEADER_OFFSET ( index ) ) ;
OUT ( buffer , value ) ;
}
static inline void CMD_END ( struct etnaviv_cmdbuf * buffer )
{
buffer - > user_size = ALIGN ( buffer - > user_size , 8 ) ;
OUT ( buffer , VIV_FE_END_HEADER_OP_END ) ;
}
static inline void CMD_WAIT ( struct etnaviv_cmdbuf * buffer )
{
buffer - > user_size = ALIGN ( buffer - > user_size , 8 ) ;
OUT ( buffer , VIV_FE_WAIT_HEADER_OP_WAIT | 200 ) ;
}
static inline void CMD_LINK ( struct etnaviv_cmdbuf * buffer ,
u16 prefetch , u32 address )
{
buffer - > user_size = ALIGN ( buffer - > user_size , 8 ) ;
OUT ( buffer , VIV_FE_LINK_HEADER_OP_LINK |
VIV_FE_LINK_HEADER_PREFETCH ( prefetch ) ) ;
OUT ( buffer , address ) ;
}
static inline void CMD_STALL ( struct etnaviv_cmdbuf * buffer ,
u32 from , u32 to )
{
buffer - > user_size = ALIGN ( buffer - > user_size , 8 ) ;
OUT ( buffer , VIV_FE_STALL_HEADER_OP_STALL ) ;
OUT ( buffer , VIV_FE_STALL_TOKEN_FROM ( from ) | VIV_FE_STALL_TOKEN_TO ( to ) ) ;
}
2016-01-21 15:20:14 +00:00
static inline void CMD_SEM ( struct etnaviv_cmdbuf * buffer , u32 from , u32 to )
{
CMD_LOAD_STATE ( buffer , VIVS_GL_SEMAPHORE_TOKEN ,
VIVS_GL_SEMAPHORE_TOKEN_FROM ( from ) |
VIVS_GL_SEMAPHORE_TOKEN_TO ( to ) ) ;
}
2016-01-21 15:20:30 +00:00
static void etnaviv_cmd_select_pipe ( struct etnaviv_gpu * gpu ,
struct etnaviv_cmdbuf * buffer , u8 pipe )
2015-12-03 18:21:29 +01:00
{
2016-01-21 15:20:30 +00:00
u32 flush = 0 ;
2015-12-03 18:21:29 +01:00
2017-11-17 17:51:19 +01:00
lockdep_assert_held ( & gpu - > lock ) ;
2015-12-03 18:21:29 +01:00
/*
* This assumes that if we ' re switching to 2 D , we ' re switching
* away from 3 D , and vice versa . Hence , if we ' re switching to
* the 2 D core , we need to flush the 3 D depth and color caches ,
* otherwise we need to flush the 2 D pixel engine cache .
*/
2016-01-21 15:20:30 +00:00
if ( gpu - > exec_state = = ETNA_PIPE_2D )
2015-12-03 18:21:29 +01:00
flush = VIVS_GL_FLUSH_CACHE_PE2D ;
2016-01-21 15:20:30 +00:00
else if ( gpu - > exec_state = = ETNA_PIPE_3D )
flush = VIVS_GL_FLUSH_CACHE_DEPTH | VIVS_GL_FLUSH_CACHE_COLOR ;
2015-12-03 18:21:29 +01:00
CMD_LOAD_STATE ( buffer , VIVS_GL_FLUSH_CACHE , flush ) ;
2016-01-21 15:20:14 +00:00
CMD_SEM ( buffer , SYNC_RECIPIENT_FE , SYNC_RECIPIENT_PE ) ;
2015-12-03 18:21:29 +01:00
CMD_STALL ( buffer , SYNC_RECIPIENT_FE , SYNC_RECIPIENT_PE ) ;
CMD_LOAD_STATE ( buffer , VIVS_GL_PIPE_SELECT ,
VIVS_GL_PIPE_SELECT_PIPE ( pipe ) ) ;
}
static void etnaviv_buffer_dump ( struct etnaviv_gpu * gpu ,
struct etnaviv_cmdbuf * buf , u32 off , u32 len )
{
u32 size = buf - > size ;
u32 * ptr = buf - > vaddr + off ;
dev_info ( gpu - > dev , " virt %p phys 0x%08x free 0x%08x \n " ,
2017-01-16 16:52:44 +01:00
ptr , etnaviv_cmdbuf_get_va ( buf ) + off , size - len * 4 - off ) ;
2015-12-03 18:21:29 +01:00
print_hex_dump ( KERN_INFO , " cmd " , DUMP_PREFIX_OFFSET , 16 , 4 ,
ptr , len * 4 , 0 ) ;
}
2016-01-21 15:20:09 +00:00
/*
* Safely replace the WAIT of a waitlink with a new command and argument .
* The GPU may be executing this WAIT while we ' re modifying it , so we have
* to write it in a specific order to avoid the GPU branching to somewhere
* else . ' wl_offset ' is the offset to the first byte of the WAIT command .
*/
static void etnaviv_buffer_replace_wait ( struct etnaviv_cmdbuf * buffer ,
unsigned int wl_offset , u32 cmd , u32 arg )
{
u32 * lw = buffer - > vaddr + wl_offset ;
lw [ 1 ] = arg ;
mb ( ) ;
lw [ 0 ] = cmd ;
mb ( ) ;
}
2016-01-21 15:20:04 +00:00
/*
* Ensure that there is space in the command buffer to contiguously write
* ' cmd_dwords ' 64 - bit words into the buffer , wrapping if necessary .
*/
static u32 etnaviv_buffer_reserve ( struct etnaviv_gpu * gpu ,
struct etnaviv_cmdbuf * buffer , unsigned int cmd_dwords )
{
if ( buffer - > user_size + cmd_dwords * sizeof ( u64 ) > buffer - > size )
buffer - > user_size = 0 ;
2017-01-16 16:52:44 +01:00
return etnaviv_cmdbuf_get_va ( buffer ) + buffer - > user_size ;
2016-01-21 15:20:04 +00:00
}
2015-12-03 18:21:29 +01:00
u16 etnaviv_buffer_init ( struct etnaviv_gpu * gpu )
{
2017-11-24 16:56:37 +01:00
struct etnaviv_cmdbuf * buffer = & gpu - > buffer ;
2015-12-03 18:21:29 +01:00
2017-11-17 17:51:19 +01:00
lockdep_assert_held ( & gpu - > lock ) ;
2015-12-03 18:21:29 +01:00
/* initialize buffer */
buffer - > user_size = 0 ;
CMD_WAIT ( buffer ) ;
2017-01-16 16:52:44 +01:00
CMD_LINK ( buffer , 2 , etnaviv_cmdbuf_get_va ( buffer ) +
2016-08-17 14:57:51 +02:00
buffer - > user_size - 4 ) ;
2015-12-03 18:21:29 +01:00
return buffer - > user_size / 8 ;
}
2016-08-19 23:53:59 +02:00
u16 etnaviv_buffer_config_mmuv2 ( struct etnaviv_gpu * gpu , u32 mtlb_addr , u32 safe_addr )
{
2017-11-24 16:56:37 +01:00
struct etnaviv_cmdbuf * buffer = & gpu - > buffer ;
2016-08-19 23:53:59 +02:00
2017-11-17 17:51:19 +01:00
lockdep_assert_held ( & gpu - > lock ) ;
2016-08-19 23:53:59 +02:00
buffer - > user_size = 0 ;
if ( gpu - > identity . features & chipFeatures_PIPE_3D ) {
CMD_LOAD_STATE ( buffer , VIVS_GL_PIPE_SELECT ,
VIVS_GL_PIPE_SELECT_PIPE ( ETNA_PIPE_3D ) ) ;
CMD_LOAD_STATE ( buffer , VIVS_MMUv2_CONFIGURATION ,
mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K ) ;
CMD_LOAD_STATE ( buffer , VIVS_MMUv2_SAFE_ADDRESS , safe_addr ) ;
CMD_SEM ( buffer , SYNC_RECIPIENT_FE , SYNC_RECIPIENT_PE ) ;
CMD_STALL ( buffer , SYNC_RECIPIENT_FE , SYNC_RECIPIENT_PE ) ;
}
if ( gpu - > identity . features & chipFeatures_PIPE_2D ) {
CMD_LOAD_STATE ( buffer , VIVS_GL_PIPE_SELECT ,
VIVS_GL_PIPE_SELECT_PIPE ( ETNA_PIPE_2D ) ) ;
CMD_LOAD_STATE ( buffer , VIVS_MMUv2_CONFIGURATION ,
mtlb_addr | VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K ) ;
CMD_LOAD_STATE ( buffer , VIVS_MMUv2_SAFE_ADDRESS , safe_addr ) ;
CMD_SEM ( buffer , SYNC_RECIPIENT_FE , SYNC_RECIPIENT_PE ) ;
CMD_STALL ( buffer , SYNC_RECIPIENT_FE , SYNC_RECIPIENT_PE ) ;
}
CMD_END ( buffer ) ;
buffer - > user_size = ALIGN ( buffer - > user_size , 8 ) ;
return buffer - > user_size / 8 ;
}
2018-01-22 12:28:10 +01:00
u16 etnaviv_buffer_config_pta ( struct etnaviv_gpu * gpu )
{
struct etnaviv_cmdbuf * buffer = & gpu - > buffer ;
lockdep_assert_held ( & gpu - > lock ) ;
buffer - > user_size = 0 ;
CMD_LOAD_STATE ( buffer , VIVS_MMUv2_PTA_CONFIG ,
VIVS_MMUv2_PTA_CONFIG_INDEX ( 0 ) ) ;
CMD_END ( buffer ) ;
buffer - > user_size = ALIGN ( buffer - > user_size , 8 ) ;
return buffer - > user_size / 8 ;
}
2015-12-03 18:21:29 +01:00
void etnaviv_buffer_end ( struct etnaviv_gpu * gpu )
{
2017-11-24 16:56:37 +01:00
struct etnaviv_cmdbuf * buffer = & gpu - > buffer ;
2016-01-21 15:20:25 +00:00
unsigned int waitlink_offset = buffer - > user_size - 16 ;
u32 link_target , flush = 0 ;
2015-12-03 18:21:29 +01:00
2017-11-17 17:51:19 +01:00
lockdep_assert_held ( & gpu - > lock ) ;
2016-01-21 15:20:25 +00:00
if ( gpu - > exec_state = = ETNA_PIPE_2D )
flush = VIVS_GL_FLUSH_CACHE_PE2D ;
else if ( gpu - > exec_state = = ETNA_PIPE_3D )
flush = VIVS_GL_FLUSH_CACHE_DEPTH |
VIVS_GL_FLUSH_CACHE_COLOR |
VIVS_GL_FLUSH_CACHE_TEXTURE |
VIVS_GL_FLUSH_CACHE_TEXTUREVS |
VIVS_GL_FLUSH_CACHE_SHADER_L2 ;
if ( flush ) {
unsigned int dwords = 7 ;
link_target = etnaviv_buffer_reserve ( gpu , buffer , dwords ) ;
CMD_SEM ( buffer , SYNC_RECIPIENT_FE , SYNC_RECIPIENT_PE ) ;
CMD_STALL ( buffer , SYNC_RECIPIENT_FE , SYNC_RECIPIENT_PE ) ;
CMD_LOAD_STATE ( buffer , VIVS_GL_FLUSH_CACHE , flush ) ;
if ( gpu - > exec_state = = ETNA_PIPE_3D )
CMD_LOAD_STATE ( buffer , VIVS_TS_FLUSH_CACHE ,
VIVS_TS_FLUSH_CACHE_FLUSH ) ;
CMD_SEM ( buffer , SYNC_RECIPIENT_FE , SYNC_RECIPIENT_PE ) ;
CMD_STALL ( buffer , SYNC_RECIPIENT_FE , SYNC_RECIPIENT_PE ) ;
CMD_END ( buffer ) ;
etnaviv_buffer_replace_wait ( buffer , waitlink_offset ,
VIV_FE_LINK_HEADER_OP_LINK |
VIV_FE_LINK_HEADER_PREFETCH ( dwords ) ,
link_target ) ;
} else {
/* Replace the last link-wait with an "END" command */
etnaviv_buffer_replace_wait ( buffer , waitlink_offset ,
VIV_FE_END_HEADER_OP_END , 0 ) ;
}
2015-12-03 18:21:29 +01:00
}
2017-09-24 15:15:28 +02:00
/* Append a 'sync point' to the ring buffer. */
void etnaviv_sync_point_queue ( struct etnaviv_gpu * gpu , unsigned int event )
{
2017-11-24 16:56:37 +01:00
struct etnaviv_cmdbuf * buffer = & gpu - > buffer ;
2017-09-24 15:15:28 +02:00
unsigned int waitlink_offset = buffer - > user_size - 16 ;
u32 dwords , target ;
2017-11-17 17:51:19 +01:00
lockdep_assert_held ( & gpu - > lock ) ;
2017-09-24 15:15:28 +02:00
/*
* We need at most 3 dwords in the return target :
* 1 event + 1 end + 1 wait + 1 link .
*/
dwords = 4 ;
target = etnaviv_buffer_reserve ( gpu , buffer , dwords ) ;
/* Signal sync point event */
CMD_LOAD_STATE ( buffer , VIVS_GL_EVENT , VIVS_GL_EVENT_EVENT_ID ( event ) |
VIVS_GL_EVENT_FROM_PE ) ;
/* Stop the FE to 'pause' the GPU */
CMD_END ( buffer ) ;
/* Append waitlink */
CMD_WAIT ( buffer ) ;
CMD_LINK ( buffer , 2 , etnaviv_cmdbuf_get_va ( buffer ) +
buffer - > user_size - 4 ) ;
/*
* Kick off the ' sync point ' command by replacing the previous
* WAIT with a link to the address in the ring buffer .
*/
etnaviv_buffer_replace_wait ( buffer , waitlink_offset ,
VIV_FE_LINK_HEADER_OP_LINK |
VIV_FE_LINK_HEADER_PREFETCH ( dwords ) ,
target ) ;
}
2016-01-21 15:20:40 +00:00
/* Append a command buffer to the ring buffer. */
2017-11-24 15:16:58 +01:00
void etnaviv_buffer_queue ( struct etnaviv_gpu * gpu , u32 exec_state ,
unsigned int event , struct etnaviv_cmdbuf * cmdbuf )
2015-12-03 18:21:29 +01:00
{
2017-11-24 16:56:37 +01:00
struct etnaviv_cmdbuf * buffer = & gpu - > buffer ;
2016-01-21 15:20:09 +00:00
unsigned int waitlink_offset = buffer - > user_size - 16 ;
2016-01-21 15:20:40 +00:00
u32 return_target , return_dwords ;
2016-01-21 15:20:35 +00:00
u32 link_target , link_dwords ;
2017-11-24 15:16:58 +01:00
bool switch_context = gpu - > exec_state ! = exec_state ;
2015-12-03 18:21:29 +01:00
2017-11-17 17:51:19 +01:00
lockdep_assert_held ( & gpu - > lock ) ;
2015-12-03 18:21:29 +01:00
if ( drm_debug & DRM_UT_DRIVER )
etnaviv_buffer_dump ( gpu , buffer , 0 , 0x50 ) ;
2017-01-16 16:52:44 +01:00
link_target = etnaviv_cmdbuf_get_va ( cmdbuf ) ;
2016-01-21 15:20:40 +00:00
link_dwords = cmdbuf - > size / 8 ;
2015-12-03 18:21:29 +01:00
/*
2016-01-21 15:20:40 +00:00
* If we need maintanence prior to submitting this buffer , we will
* need to append a mmu flush load state , followed by a new
2015-12-03 18:21:29 +01:00
* link to this buffer - a total of four additional words .
*/
2017-11-17 17:19:50 +01:00
if ( gpu - > mmu - > need_flush | | switch_context ) {
2016-01-21 15:20:40 +00:00
u32 target , extra_dwords ;
2015-12-03 18:21:29 +01:00
/* link command */
2016-01-21 15:20:40 +00:00
extra_dwords = 1 ;
2015-12-03 18:21:29 +01:00
/* flush command */
2016-08-20 00:01:24 +02:00
if ( gpu - > mmu - > need_flush ) {
if ( gpu - > mmu - > version = = ETNAVIV_IOMMU_V1 )
extra_dwords + = 1 ;
else
extra_dwords + = 3 ;
}
2016-01-21 15:20:40 +00:00
2015-12-03 18:21:29 +01:00
/* pipe switch commands */
2017-11-17 17:19:50 +01:00
if ( switch_context )
2016-01-21 15:20:40 +00:00
extra_dwords + = 4 ;
2015-12-03 18:21:29 +01:00
2016-01-21 15:20:40 +00:00
target = etnaviv_buffer_reserve ( gpu , buffer , extra_dwords ) ;
2015-12-03 18:21:29 +01:00
if ( gpu - > mmu - > need_flush ) {
/* Add the MMU flush */
2016-08-20 00:01:24 +02:00
if ( gpu - > mmu - > version = = ETNAVIV_IOMMU_V1 ) {
CMD_LOAD_STATE ( buffer , VIVS_GL_FLUSH_MMU ,
VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
VIVS_GL_FLUSH_MMU_FLUSH_UNK2 |
VIVS_GL_FLUSH_MMU_FLUSH_PEMMU |
VIVS_GL_FLUSH_MMU_FLUSH_UNK4 ) ;
} else {
CMD_LOAD_STATE ( buffer , VIVS_MMUv2_CONFIGURATION ,
VIVS_MMUv2_CONFIGURATION_MODE_MASK |
VIVS_MMUv2_CONFIGURATION_ADDRESS_MASK |
VIVS_MMUv2_CONFIGURATION_FLUSH_FLUSH ) ;
CMD_SEM ( buffer , SYNC_RECIPIENT_FE ,
SYNC_RECIPIENT_PE ) ;
CMD_STALL ( buffer , SYNC_RECIPIENT_FE ,
SYNC_RECIPIENT_PE ) ;
}
2015-12-03 18:21:29 +01:00
gpu - > mmu - > need_flush = false ;
}
2017-11-17 17:19:50 +01:00
if ( switch_context ) {
2017-11-24 15:16:58 +01:00
etnaviv_cmd_select_pipe ( gpu , buffer , exec_state ) ;
gpu - > exec_state = exec_state ;
2015-12-03 18:21:29 +01:00
}
2016-01-21 15:20:35 +00:00
/* And the link to the submitted buffer */
CMD_LINK ( buffer , link_dwords , link_target ) ;
2015-12-03 18:21:29 +01:00
/* Update the link target to point to above instructions */
2016-01-21 15:20:40 +00:00
link_target = target ;
link_dwords = extra_dwords ;
2015-12-03 18:21:29 +01:00
}
2016-01-21 15:20:40 +00:00
/*
* Append a LINK to the submitted command buffer to return to
* the ring buffer . return_target is the ring target address .
2016-10-05 18:30:43 +02:00
* We need at most 7 dwords in the return target : 2 cache flush +
* 2 semaphore stall + 1 event + 1 wait + 1 link .
2016-01-21 15:20:40 +00:00
*/
2016-10-05 18:30:43 +02:00
return_dwords = 7 ;
2016-01-21 15:20:40 +00:00
return_target = etnaviv_buffer_reserve ( gpu , buffer , return_dwords ) ;
CMD_LINK ( cmdbuf , return_dwords , return_target ) ;
/*
2016-10-05 18:30:43 +02:00
* Append a cache flush , stall , event , wait and link pointing back to
* the wait command to the ring buffer .
2016-01-21 15:20:40 +00:00
*/
2016-10-05 18:30:43 +02:00
if ( gpu - > exec_state = = ETNA_PIPE_2D ) {
CMD_LOAD_STATE ( buffer , VIVS_GL_FLUSH_CACHE ,
VIVS_GL_FLUSH_CACHE_PE2D ) ;
} else {
CMD_LOAD_STATE ( buffer , VIVS_GL_FLUSH_CACHE ,
VIVS_GL_FLUSH_CACHE_DEPTH |
VIVS_GL_FLUSH_CACHE_COLOR ) ;
CMD_LOAD_STATE ( buffer , VIVS_TS_FLUSH_CACHE ,
VIVS_TS_FLUSH_CACHE_FLUSH ) ;
}
CMD_SEM ( buffer , SYNC_RECIPIENT_FE , SYNC_RECIPIENT_PE ) ;
CMD_STALL ( buffer , SYNC_RECIPIENT_FE , SYNC_RECIPIENT_PE ) ;
2015-12-03 18:21:29 +01:00
CMD_LOAD_STATE ( buffer , VIVS_GL_EVENT , VIVS_GL_EVENT_EVENT_ID ( event ) |
VIVS_GL_EVENT_FROM_PE ) ;
CMD_WAIT ( buffer ) ;
2017-01-16 16:52:44 +01:00
CMD_LINK ( buffer , 2 , etnaviv_cmdbuf_get_va ( buffer ) +
2016-10-05 18:30:43 +02:00
buffer - > user_size - 4 ) ;
2016-01-21 15:20:40 +00:00
if ( drm_debug & DRM_UT_DRIVER )
pr_info ( " stream link to 0x%08x @ 0x%08x %p \n " ,
2017-01-16 16:52:44 +01:00
return_target , etnaviv_cmdbuf_get_va ( cmdbuf ) ,
cmdbuf - > vaddr ) ;
2016-01-21 15:20:40 +00:00
if ( drm_debug & DRM_UT_DRIVER ) {
print_hex_dump ( KERN_INFO , " cmd " , DUMP_PREFIX_OFFSET , 16 , 4 ,
cmdbuf - > vaddr , cmdbuf - > size , 0 ) ;
pr_info ( " link op: %p \n " , buffer - > vaddr + waitlink_offset ) ;
pr_info ( " addr: 0x%08x \n " , link_target ) ;
pr_info ( " back: 0x%08x \n " , return_target ) ;
pr_info ( " event: %d \n " , event ) ;
}
2015-12-03 18:21:29 +01:00
2016-01-21 15:20:40 +00:00
/*
* Kick off the submitted command by replacing the previous
* WAIT with a link to the address in the ring buffer .
*/
2016-01-21 15:20:09 +00:00
etnaviv_buffer_replace_wait ( buffer , waitlink_offset ,
VIV_FE_LINK_HEADER_OP_LINK |
2016-01-21 15:20:35 +00:00
VIV_FE_LINK_HEADER_PREFETCH ( link_dwords ) ,
2016-01-21 15:20:09 +00:00
link_target ) ;
2015-12-03 18:21:29 +01:00
if ( drm_debug & DRM_UT_DRIVER )
etnaviv_buffer_dump ( gpu , buffer , 0 , 0x50 ) ;
2017-11-17 17:19:50 +01:00
gpu - > lastctx = cmdbuf - > ctx ;
2015-12-03 18:21:29 +01:00
}