2018-05-07 01:16:26 +02:00
// SPDX-License-Identifier: GPL-2.0 OR MIT
2009-12-10 00:19:58 +00:00
/**************************************************************************
*
2018-05-07 01:16:26 +02:00
* Copyright 2009 - 2015 VMware , Inc . , Palo Alto , CA . , USA
2009-12-10 00:19:58 +00:00
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2019-06-23 12:23:34 +02:00
# include <linux/sched/signal.h>
2012-10-02 18:01:07 +01:00
# include <drm/ttm/ttm_placement.h>
2009-12-10 00:19:58 +00:00
2019-06-23 12:23:34 +02:00
# include "vmwgfx_drv.h"
2015-08-10 10:39:35 -07:00
struct vmw_temp_set_context {
SVGA3dCmdHeader header ;
SVGA3dCmdDXTempSetContext body ;
} ;
2010-01-30 03:38:06 +00:00
bool vmw_fifo_have_3d ( struct vmw_private * dev_priv )
{
2015-10-28 10:44:04 +01:00
u32 * fifo_mem = dev_priv - > mmio_virt ;
2010-01-30 03:38:06 +00:00
uint32_t fifo_min , hwversion ;
2011-11-28 13:19:08 +01:00
const struct vmw_fifo_state * fifo = & dev_priv - > fifo ;
2010-01-30 03:38:06 +00:00
2012-11-21 12:18:31 +01:00
if ( ! ( dev_priv - > capabilities & SVGA_CAP_3D ) )
return false ;
if ( dev_priv - > capabilities & SVGA_CAP_GBOBJECTS ) {
uint32_t result ;
if ( ! dev_priv - > has_mob )
return false ;
2015-01-14 02:33:39 -08:00
spin_lock ( & dev_priv - > cap_lock ) ;
2012-11-21 12:18:31 +01:00
vmw_write ( dev_priv , SVGA_REG_DEV_CAP , SVGA3D_DEVCAP_3D ) ;
result = vmw_read ( dev_priv , SVGA_REG_DEV_CAP ) ;
2015-01-14 02:33:39 -08:00
spin_unlock ( & dev_priv - > cap_lock ) ;
2012-11-21 12:18:31 +01:00
return ( result ! = 0 ) ;
}
2010-05-28 11:21:59 +02:00
if ( ! ( dev_priv - > capabilities & SVGA_CAP_EXTENDED_FIFO ) )
return false ;
2015-10-28 10:44:04 +01:00
fifo_min = vmw_mmio_read ( fifo_mem + SVGA_FIFO_MIN ) ;
2010-01-30 03:38:06 +00:00
if ( fifo_min < = SVGA_FIFO_3D_HWVERSION * sizeof ( unsigned int ) )
return false ;
2015-10-28 10:44:04 +01:00
hwversion = vmw_mmio_read ( fifo_mem +
( ( fifo - > capabilities &
SVGA_FIFO_CAP_3D_HWVERSION_REVISED ) ?
SVGA_FIFO_3D_HWVERSION_REVISED :
SVGA_FIFO_3D_HWVERSION ) ) ;
2011-11-28 13:19:08 +01:00
2010-01-30 03:38:06 +00:00
if ( hwversion = = 0 )
return false ;
2011-10-04 20:13:23 +02:00
if ( hwversion < SVGA3D_HWVERSION_WS8_B1 )
2010-01-30 03:38:06 +00:00
return false ;
2015-06-26 01:23:42 -07:00
/* Legacy Display Unit does not support surfaces */
if ( dev_priv - > active_display_unit = = vmw_du_legacy )
2011-10-04 20:13:24 +02:00
return false ;
2010-01-30 03:38:06 +00:00
return true ;
}
2010-05-28 11:21:59 +02:00
bool vmw_fifo_have_pitchlock ( struct vmw_private * dev_priv )
{
2015-10-28 10:44:04 +01:00
u32 * fifo_mem = dev_priv - > mmio_virt ;
2010-05-28 11:21:59 +02:00
uint32_t caps ;
if ( ! ( dev_priv - > capabilities & SVGA_CAP_EXTENDED_FIFO ) )
return false ;
2015-10-28 10:44:04 +01:00
caps = vmw_mmio_read ( fifo_mem + SVGA_FIFO_CAPABILITIES ) ;
2010-05-28 11:21:59 +02:00
if ( caps & SVGA_FIFO_CAP_PITCHLOCK )
return true ;
return false ;
}
2009-12-10 00:19:58 +00:00
int vmw_fifo_init ( struct vmw_private * dev_priv , struct vmw_fifo_state * fifo )
{
2015-10-28 10:44:04 +01:00
u32 * fifo_mem = dev_priv - > mmio_virt ;
2009-12-10 00:19:58 +00:00
uint32_t max ;
uint32_t min ;
2015-08-10 10:39:35 -07:00
fifo - > dx = false ;
2009-12-10 00:19:58 +00:00
fifo - > static_buffer_size = VMWGFX_FIFO_STATIC_SIZE ;
fifo - > static_buffer = vmalloc ( fifo - > static_buffer_size ) ;
if ( unlikely ( fifo - > static_buffer = = NULL ) )
return - ENOMEM ;
fifo - > dynamic_buffer = NULL ;
fifo - > reserved_size = 0 ;
fifo - > using_bounce_buffer = false ;
2010-02-08 09:57:25 +00:00
mutex_init ( & fifo - > fifo_mutex ) ;
2009-12-10 00:19:58 +00:00
init_rwsem ( & fifo - > rwsem ) ;
DRM_INFO ( " width %d \n " , vmw_read ( dev_priv , SVGA_REG_WIDTH ) ) ;
DRM_INFO ( " height %d \n " , vmw_read ( dev_priv , SVGA_REG_HEIGHT ) ) ;
DRM_INFO ( " bpp %d \n " , vmw_read ( dev_priv , SVGA_REG_BITS_PER_PIXEL ) ) ;
dev_priv - > enable_state = vmw_read ( dev_priv , SVGA_REG_ENABLE ) ;
dev_priv - > config_done_state = vmw_read ( dev_priv , SVGA_REG_CONFIG_DONE ) ;
2010-10-01 10:21:48 +02:00
dev_priv - > traces_state = vmw_read ( dev_priv , SVGA_REG_TRACES ) ;
2015-06-25 10:47:43 -07:00
2015-07-08 21:20:39 -07:00
vmw_write ( dev_priv , SVGA_REG_ENABLE , SVGA_REG_ENABLE_ENABLE |
SVGA_REG_ENABLE_HIDE ) ;
2015-06-25 10:47:43 -07:00
vmw_write ( dev_priv , SVGA_REG_TRACES , 0 ) ;
2009-12-10 00:19:58 +00:00
min = 4 ;
if ( dev_priv - > capabilities & SVGA_CAP_EXTENDED_FIFO )
min = vmw_read ( dev_priv , SVGA_REG_MEM_REGS ) ;
min < < = 2 ;
if ( min < PAGE_SIZE )
min = PAGE_SIZE ;
2015-10-28 10:44:04 +01:00
vmw_mmio_write ( min , fifo_mem + SVGA_FIFO_MIN ) ;
vmw_mmio_write ( dev_priv - > mmio_size , fifo_mem + SVGA_FIFO_MAX ) ;
2009-12-10 00:19:58 +00:00
wmb ( ) ;
2015-10-28 10:44:04 +01:00
vmw_mmio_write ( min , fifo_mem + SVGA_FIFO_NEXT_CMD ) ;
vmw_mmio_write ( min , fifo_mem + SVGA_FIFO_STOP ) ;
vmw_mmio_write ( 0 , fifo_mem + SVGA_FIFO_BUSY ) ;
2009-12-10 00:19:58 +00:00
mb ( ) ;
vmw_write ( dev_priv , SVGA_REG_CONFIG_DONE , 1 ) ;
2015-10-28 10:44:04 +01:00
max = vmw_mmio_read ( fifo_mem + SVGA_FIFO_MAX ) ;
min = vmw_mmio_read ( fifo_mem + SVGA_FIFO_MIN ) ;
fifo - > capabilities = vmw_mmio_read ( fifo_mem + SVGA_FIFO_CAPABILITIES ) ;
2009-12-10 00:19:58 +00:00
DRM_INFO ( " Fifo max 0x%08x min 0x%08x cap 0x%08x \n " ,
( unsigned int ) max ,
( unsigned int ) min ,
( unsigned int ) fifo - > capabilities ) ;
2011-09-01 20:18:42 +00:00
atomic_set ( & dev_priv - > marker_seq , dev_priv - > last_read_seqno ) ;
2015-10-28 10:44:04 +01:00
vmw_mmio_write ( dev_priv - > last_read_seqno , fifo_mem + SVGA_FIFO_FENCE ) ;
2011-09-01 20:18:42 +00:00
vmw_marker_queue_init ( & fifo - > marker_queue ) ;
2015-06-25 10:47:43 -07:00
return 0 ;
2009-12-10 00:19:58 +00:00
}
2015-01-14 02:33:39 -08:00
void vmw_fifo_ping_host ( struct vmw_private * dev_priv , uint32_t reason )
2009-12-10 00:19:58 +00:00
{
2015-10-28 10:44:04 +01:00
u32 * fifo_mem = dev_priv - > mmio_virt ;
2009-12-10 00:19:58 +00:00
2015-10-28 10:44:04 +01:00
if ( cmpxchg ( fifo_mem + SVGA_FIFO_BUSY , 0 , 1 ) = = 0 )
2009-12-10 00:19:58 +00:00
vmw_write ( dev_priv , SVGA_REG_SYNC , reason ) ;
}
void vmw_fifo_release ( struct vmw_private * dev_priv , struct vmw_fifo_state * fifo )
{
2015-10-28 10:44:04 +01:00
u32 * fifo_mem = dev_priv - > mmio_virt ;
2009-12-10 00:19:58 +00:00
2014-08-28 11:53:23 +02:00
vmw_write ( dev_priv , SVGA_REG_SYNC , SVGA_SYNC_GENERIC ) ;
2009-12-10 00:19:58 +00:00
while ( vmw_read ( dev_priv , SVGA_REG_BUSY ) ! = 0 )
2014-08-28 11:53:23 +02:00
;
2009-12-10 00:19:58 +00:00
2015-10-28 10:44:04 +01:00
dev_priv - > last_read_seqno = vmw_mmio_read ( fifo_mem + SVGA_FIFO_FENCE ) ;
2009-12-10 00:19:58 +00:00
vmw_write ( dev_priv , SVGA_REG_CONFIG_DONE ,
dev_priv - > config_done_state ) ;
vmw_write ( dev_priv , SVGA_REG_ENABLE ,
dev_priv - > enable_state ) ;
2010-10-01 10:21:48 +02:00
vmw_write ( dev_priv , SVGA_REG_TRACES ,
dev_priv - > traces_state ) ;
2009-12-10 00:19:58 +00:00
2011-09-01 20:18:42 +00:00
vmw_marker_queue_takedown ( & fifo - > marker_queue ) ;
2009-12-10 00:19:58 +00:00
if ( likely ( fifo - > static_buffer ! = NULL ) ) {
vfree ( fifo - > static_buffer ) ;
fifo - > static_buffer = NULL ;
}
if ( likely ( fifo - > dynamic_buffer ! = NULL ) ) {
vfree ( fifo - > dynamic_buffer ) ;
fifo - > dynamic_buffer = NULL ;
}
}
static bool vmw_fifo_is_full ( struct vmw_private * dev_priv , uint32_t bytes )
{
2015-10-28 10:44:04 +01:00
u32 * fifo_mem = dev_priv - > mmio_virt ;
uint32_t max = vmw_mmio_read ( fifo_mem + SVGA_FIFO_MAX ) ;
uint32_t next_cmd = vmw_mmio_read ( fifo_mem + SVGA_FIFO_NEXT_CMD ) ;
uint32_t min = vmw_mmio_read ( fifo_mem + SVGA_FIFO_MIN ) ;
uint32_t stop = vmw_mmio_read ( fifo_mem + SVGA_FIFO_STOP ) ;
2009-12-10 00:19:58 +00:00
return ( ( max - next_cmd ) + ( stop - min ) < = bytes ) ;
}
static int vmw_fifo_wait_noirq ( struct vmw_private * dev_priv ,
uint32_t bytes , bool interruptible ,
unsigned long timeout )
{
int ret = 0 ;
unsigned long end_jiffies = jiffies + timeout ;
DEFINE_WAIT ( __wait ) ;
DRM_INFO ( " Fifo wait noirq. \n " ) ;
for ( ; ; ) {
prepare_to_wait ( & dev_priv - > fifo_queue , & __wait ,
( interruptible ) ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE ) ;
if ( ! vmw_fifo_is_full ( dev_priv , bytes ) )
break ;
if ( time_after_eq ( jiffies , end_jiffies ) ) {
ret = - EBUSY ;
DRM_ERROR ( " SVGA device lockup. \n " ) ;
break ;
}
schedule_timeout ( 1 ) ;
if ( interruptible & & signal_pending ( current ) ) {
2009-12-08 12:59:34 +01:00
ret = - ERESTARTSYS ;
2009-12-10 00:19:58 +00:00
break ;
}
}
finish_wait ( & dev_priv - > fifo_queue , & __wait ) ;
wake_up_all ( & dev_priv - > fifo_queue ) ;
DRM_INFO ( " Fifo noirq exit. \n " ) ;
return ret ;
}
static int vmw_fifo_wait ( struct vmw_private * dev_priv ,
uint32_t bytes , bool interruptible ,
unsigned long timeout )
{
long ret = 1L ;
if ( likely ( ! vmw_fifo_is_full ( dev_priv , bytes ) ) )
return 0 ;
vmw_fifo_ping_host ( dev_priv , SVGA_SYNC_FIFOFULL ) ;
if ( ! ( dev_priv - > capabilities & SVGA_CAP_IRQMASK ) )
return vmw_fifo_wait_noirq ( dev_priv , bytes ,
interruptible , timeout ) ;
2015-10-28 19:07:35 +01:00
vmw_generic_waiter_add ( dev_priv , SVGA_IRQFLAG_FIFO_PROGRESS ,
& dev_priv - > fifo_queue_waiters ) ;
2009-12-10 00:19:58 +00:00
if ( interruptible )
ret = wait_event_interruptible_timeout
( dev_priv - > fifo_queue ,
! vmw_fifo_is_full ( dev_priv , bytes ) , timeout ) ;
else
ret = wait_event_timeout
( dev_priv - > fifo_queue ,
! vmw_fifo_is_full ( dev_priv , bytes ) , timeout ) ;
2009-12-08 12:59:34 +01:00
if ( unlikely ( ret = = 0 ) )
2009-12-10 00:19:58 +00:00
ret = - EBUSY ;
else if ( likely ( ret > 0 ) )
ret = 0 ;
2015-10-28 19:07:35 +01:00
vmw_generic_waiter_remove ( dev_priv , SVGA_IRQFLAG_FIFO_PROGRESS ,
& dev_priv - > fifo_queue_waiters ) ;
2009-12-10 00:19:58 +00:00
return ret ;
}
2011-10-04 20:13:13 +02:00
/**
* Reserve @ bytes number of bytes in the fifo .
*
* This function will return NULL ( error ) on two conditions :
* If it timeouts waiting for fifo space , or if @ bytes is larger than the
* available fifo space .
*
* Returns :
* Pointer to the fifo , or null on error ( possible hardware hang ) .
*/
2015-06-25 11:57:56 -07:00
static void * vmw_local_fifo_reserve ( struct vmw_private * dev_priv ,
uint32_t bytes )
2009-12-10 00:19:58 +00:00
{
struct vmw_fifo_state * fifo_state = & dev_priv - > fifo ;
2015-10-28 10:44:04 +01:00
u32 * fifo_mem = dev_priv - > mmio_virt ;
2009-12-10 00:19:58 +00:00
uint32_t max ;
uint32_t min ;
uint32_t next_cmd ;
uint32_t reserveable = fifo_state - > capabilities & SVGA_FIFO_CAP_RESERVE ;
int ret ;
2010-02-08 09:57:25 +00:00
mutex_lock ( & fifo_state - > fifo_mutex ) ;
2015-10-28 10:44:04 +01:00
max = vmw_mmio_read ( fifo_mem + SVGA_FIFO_MAX ) ;
min = vmw_mmio_read ( fifo_mem + SVGA_FIFO_MIN ) ;
next_cmd = vmw_mmio_read ( fifo_mem + SVGA_FIFO_NEXT_CMD ) ;
2009-12-10 00:19:58 +00:00
if ( unlikely ( bytes > = ( max - min ) ) )
goto out_err ;
BUG_ON ( fifo_state - > reserved_size ! = 0 ) ;
BUG_ON ( fifo_state - > dynamic_buffer ! = NULL ) ;
fifo_state - > reserved_size = bytes ;
while ( 1 ) {
2015-10-28 10:44:04 +01:00
uint32_t stop = vmw_mmio_read ( fifo_mem + SVGA_FIFO_STOP ) ;
2009-12-10 00:19:58 +00:00
bool need_bounce = false ;
bool reserve_in_place = false ;
if ( next_cmd > = stop ) {
if ( likely ( ( next_cmd + bytes < max | |
( next_cmd + bytes = = max & & stop > min ) ) ) )
reserve_in_place = true ;
else if ( vmw_fifo_is_full ( dev_priv , bytes ) ) {
ret = vmw_fifo_wait ( dev_priv , bytes ,
false , 3 * HZ ) ;
if ( unlikely ( ret ! = 0 ) )
goto out_err ;
} else
need_bounce = true ;
} else {
if ( likely ( ( next_cmd + bytes < stop ) ) )
reserve_in_place = true ;
else {
ret = vmw_fifo_wait ( dev_priv , bytes ,
false , 3 * HZ ) ;
if ( unlikely ( ret ! = 0 ) )
goto out_err ;
}
}
if ( reserve_in_place ) {
if ( reserveable | | bytes < = sizeof ( uint32_t ) ) {
fifo_state - > using_bounce_buffer = false ;
if ( reserveable )
2015-10-28 10:44:04 +01:00
vmw_mmio_write ( bytes , fifo_mem +
SVGA_FIFO_RESERVED ) ;
2015-04-02 02:39:45 -07:00
return ( void __force * ) ( fifo_mem +
( next_cmd > > 2 ) ) ;
2009-12-10 00:19:58 +00:00
} else {
need_bounce = true ;
}
}
if ( need_bounce ) {
fifo_state - > using_bounce_buffer = true ;
if ( bytes < fifo_state - > static_buffer_size )
return fifo_state - > static_buffer ;
else {
fifo_state - > dynamic_buffer = vmalloc ( bytes ) ;
2017-04-27 12:12:08 +03:00
if ( ! fifo_state - > dynamic_buffer )
goto out_err ;
2009-12-10 00:19:58 +00:00
return fifo_state - > dynamic_buffer ;
}
}
}
out_err :
fifo_state - > reserved_size = 0 ;
2010-02-08 09:57:25 +00:00
mutex_unlock ( & fifo_state - > fifo_mutex ) ;
2015-06-25 11:57:56 -07:00
2009-12-10 00:19:58 +00:00
return NULL ;
}
2015-08-10 10:39:35 -07:00
void * vmw_fifo_reserve_dx ( struct vmw_private * dev_priv , uint32_t bytes ,
int ctx_id )
2015-06-25 11:57:56 -07:00
{
void * ret ;
if ( dev_priv - > cman )
ret = vmw_cmdbuf_reserve ( dev_priv - > cman , bytes ,
2015-08-10 10:39:35 -07:00
ctx_id , false , NULL ) ;
else if ( ctx_id = = SVGA3D_INVALID_ID )
2015-06-25 11:57:56 -07:00
ret = vmw_local_fifo_reserve ( dev_priv , bytes ) ;
2015-08-10 10:39:35 -07:00
else {
2015-11-21 13:29:39 +03:00
WARN ( 1 , " Command buffer has not been allocated. \n " ) ;
2015-08-10 10:39:35 -07:00
ret = NULL ;
}
2019-02-14 16:15:39 -08:00
if ( IS_ERR_OR_NULL ( ret ) )
2015-06-25 11:57:56 -07:00
return NULL ;
return ret ;
}
2009-12-10 00:19:58 +00:00
static void vmw_fifo_res_copy ( struct vmw_fifo_state * fifo_state ,
2015-10-28 10:44:04 +01:00
u32 * fifo_mem ,
2009-12-10 00:19:58 +00:00
uint32_t next_cmd ,
uint32_t max , uint32_t min , uint32_t bytes )
{
uint32_t chunk_size = max - next_cmd ;
uint32_t rest ;
uint32_t * buffer = ( fifo_state - > dynamic_buffer ! = NULL ) ?
fifo_state - > dynamic_buffer : fifo_state - > static_buffer ;
if ( bytes < chunk_size )
chunk_size = bytes ;
2015-10-28 10:44:04 +01:00
vmw_mmio_write ( bytes , fifo_mem + SVGA_FIFO_RESERVED ) ;
2009-12-10 00:19:58 +00:00
mb ( ) ;
2015-10-28 10:44:04 +01:00
memcpy ( fifo_mem + ( next_cmd > > 2 ) , buffer , chunk_size ) ;
2009-12-10 00:19:58 +00:00
rest = bytes - chunk_size ;
if ( rest )
2015-10-28 10:44:04 +01:00
memcpy ( fifo_mem + ( min > > 2 ) , buffer + ( chunk_size > > 2 ) , rest ) ;
2009-12-10 00:19:58 +00:00
}
static void vmw_fifo_slow_copy ( struct vmw_fifo_state * fifo_state ,
2015-10-28 10:44:04 +01:00
u32 * fifo_mem ,
2009-12-10 00:19:58 +00:00
uint32_t next_cmd ,
uint32_t max , uint32_t min , uint32_t bytes )
{
uint32_t * buffer = ( fifo_state - > dynamic_buffer ! = NULL ) ?
fifo_state - > dynamic_buffer : fifo_state - > static_buffer ;
while ( bytes > 0 ) {
2015-10-28 10:44:04 +01:00
vmw_mmio_write ( * buffer + + , fifo_mem + ( next_cmd > > 2 ) ) ;
2009-12-10 00:19:58 +00:00
next_cmd + = sizeof ( uint32_t ) ;
if ( unlikely ( next_cmd = = max ) )
next_cmd = min ;
mb ( ) ;
2015-10-28 10:44:04 +01:00
vmw_mmio_write ( next_cmd , fifo_mem + SVGA_FIFO_NEXT_CMD ) ;
2009-12-10 00:19:58 +00:00
mb ( ) ;
bytes - = sizeof ( uint32_t ) ;
}
}
2015-04-02 02:39:45 -07:00
static void vmw_local_fifo_commit ( struct vmw_private * dev_priv , uint32_t bytes )
2009-12-10 00:19:58 +00:00
{
struct vmw_fifo_state * fifo_state = & dev_priv - > fifo ;
2015-10-28 10:44:04 +01:00
u32 * fifo_mem = dev_priv - > mmio_virt ;
uint32_t next_cmd = vmw_mmio_read ( fifo_mem + SVGA_FIFO_NEXT_CMD ) ;
uint32_t max = vmw_mmio_read ( fifo_mem + SVGA_FIFO_MAX ) ;
uint32_t min = vmw_mmio_read ( fifo_mem + SVGA_FIFO_MIN ) ;
2009-12-10 00:19:58 +00:00
bool reserveable = fifo_state - > capabilities & SVGA_FIFO_CAP_RESERVE ;
2015-08-10 10:39:35 -07:00
if ( fifo_state - > dx )
bytes + = sizeof ( struct vmw_temp_set_context ) ;
fifo_state - > dx = false ;
2009-12-10 00:19:58 +00:00
BUG_ON ( ( bytes & 3 ) ! = 0 ) ;
BUG_ON ( bytes > fifo_state - > reserved_size ) ;
fifo_state - > reserved_size = 0 ;
if ( fifo_state - > using_bounce_buffer ) {
if ( reserveable )
vmw_fifo_res_copy ( fifo_state , fifo_mem ,
next_cmd , max , min , bytes ) ;
else
vmw_fifo_slow_copy ( fifo_state , fifo_mem ,
next_cmd , max , min , bytes ) ;
if ( fifo_state - > dynamic_buffer ) {
vfree ( fifo_state - > dynamic_buffer ) ;
fifo_state - > dynamic_buffer = NULL ;
}
}
2010-02-08 09:57:25 +00:00
down_write ( & fifo_state - > rwsem ) ;
2009-12-10 00:19:58 +00:00
if ( fifo_state - > using_bounce_buffer | | reserveable ) {
next_cmd + = bytes ;
if ( next_cmd > = max )
next_cmd - = max - min ;
mb ( ) ;
2015-10-28 10:44:04 +01:00
vmw_mmio_write ( next_cmd , fifo_mem + SVGA_FIFO_NEXT_CMD ) ;
2009-12-10 00:19:58 +00:00
}
if ( reserveable )
2015-10-28 10:44:04 +01:00
vmw_mmio_write ( 0 , fifo_mem + SVGA_FIFO_RESERVED ) ;
2009-12-10 00:19:58 +00:00
mb ( ) ;
up_write ( & fifo_state - > rwsem ) ;
2010-02-08 09:57:25 +00:00
vmw_fifo_ping_host ( dev_priv , SVGA_SYNC_GENERIC ) ;
mutex_unlock ( & fifo_state - > fifo_mutex ) ;
2009-12-10 00:19:58 +00:00
}
2015-06-25 11:57:56 -07:00
void vmw_fifo_commit ( struct vmw_private * dev_priv , uint32_t bytes )
{
if ( dev_priv - > cman )
vmw_cmdbuf_commit ( dev_priv - > cman , bytes , NULL , false ) ;
else
vmw_local_fifo_commit ( dev_priv , bytes ) ;
}
/**
* vmw_fifo_commit_flush - Commit fifo space and flush any buffered commands .
*
* @ dev_priv : Pointer to device private structure .
* @ bytes : Number of bytes to commit .
*/
2015-08-10 10:39:35 -07:00
void vmw_fifo_commit_flush ( struct vmw_private * dev_priv , uint32_t bytes )
2015-06-25 11:57:56 -07:00
{
if ( dev_priv - > cman )
vmw_cmdbuf_commit ( dev_priv - > cman , bytes , NULL , true ) ;
else
vmw_local_fifo_commit ( dev_priv , bytes ) ;
}
/**
* vmw_fifo_flush - Flush any buffered commands and make sure command processing
* starts .
*
* @ dev_priv : Pointer to device private structure .
* @ interruptible : Whether to wait interruptible if function needs to sleep .
*/
int vmw_fifo_flush ( struct vmw_private * dev_priv , bool interruptible )
{
might_sleep ( ) ;
if ( dev_priv - > cman )
return vmw_cmdbuf_cur_flush ( dev_priv - > cman , interruptible ) ;
else
return 0 ;
}
2011-09-01 20:18:42 +00:00
int vmw_fifo_send_fence ( struct vmw_private * dev_priv , uint32_t * seqno )
2009-12-10 00:19:58 +00:00
{
struct vmw_fifo_state * fifo_state = & dev_priv - > fifo ;
struct svga_fifo_cmd_fence * cmd_fence ;
2015-04-02 02:39:45 -07:00
u32 * fm ;
2009-12-10 00:19:58 +00:00
int ret = 0 ;
2015-04-02 02:39:45 -07:00
uint32_t bytes = sizeof ( u32 ) + sizeof ( * cmd_fence ) ;
2009-12-10 00:19:58 +00:00
2019-02-14 16:15:39 -08:00
fm = VMW_FIFO_RESERVE ( dev_priv , bytes ) ;
2009-12-10 00:19:58 +00:00
if ( unlikely ( fm = = NULL ) ) {
2011-09-01 20:18:42 +00:00
* seqno = atomic_read ( & dev_priv - > marker_seq ) ;
2009-12-10 00:19:58 +00:00
ret = - ENOMEM ;
2011-09-01 20:18:42 +00:00
( void ) vmw_fallback_wait ( dev_priv , false , true , * seqno ,
2009-12-10 00:19:58 +00:00
false , 3 * HZ ) ;
goto out_err ;
}
do {
2011-09-01 20:18:42 +00:00
* seqno = atomic_add_return ( 1 , & dev_priv - > marker_seq ) ;
} while ( * seqno = = 0 ) ;
2009-12-10 00:19:58 +00:00
if ( ! ( fifo_state - > capabilities & SVGA_FIFO_CAP_FENCE ) ) {
/*
* Don ' t request hardware to send a fence . The
* waiting code in vmwgfx_irq . c will emulate this .
*/
vmw_fifo_commit ( dev_priv , 0 ) ;
return 0 ;
}
2015-04-02 02:39:45 -07:00
* fm + + = SVGA_CMD_FENCE ;
cmd_fence = ( struct svga_fifo_cmd_fence * ) fm ;
cmd_fence - > fence = * seqno ;
2015-06-25 11:57:56 -07:00
vmw_fifo_commit_flush ( dev_priv , bytes ) ;
2011-09-01 20:18:42 +00:00
( void ) vmw_marker_push ( & fifo_state - > marker_queue , * seqno ) ;
vmw_update_seqno ( dev_priv , fifo_state ) ;
2009-12-10 00:19:58 +00:00
out_err :
return ret ;
}
2011-10-04 20:13:30 +02:00
/**
2012-11-21 11:26:55 +01:00
* vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using
* legacy query commands .
2011-10-04 20:13:30 +02:00
*
* @ dev_priv : The device private structure .
* @ cid : The hardware context id used for the query .
*
2012-11-21 11:26:55 +01:00
* See the vmw_fifo_emit_dummy_query documentation .
2011-10-04 20:13:30 +02:00
*/
2012-11-21 11:26:55 +01:00
static int vmw_fifo_emit_dummy_legacy_query ( struct vmw_private * dev_priv ,
uint32_t cid )
2011-10-04 20:13:30 +02:00
{
/*
* A query wait without a preceding query end will
* actually finish all queries for this cid
* without writing to the query result structure .
*/
2015-06-26 00:25:37 -07:00
struct ttm_buffer_object * bo = & dev_priv - > dummy_query_bo - > base ;
2011-10-04 20:13:30 +02:00
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdWaitForQuery body ;
} * cmd ;
2019-02-14 16:15:39 -08:00
cmd = VMW_FIFO_RESERVE ( dev_priv , sizeof ( * cmd ) ) ;
if ( unlikely ( cmd = = NULL ) )
2011-10-04 20:13:30 +02:00
return - ENOMEM ;
cmd - > header . id = SVGA_3D_CMD_WAIT_FOR_QUERY ;
cmd - > header . size = sizeof ( cmd - > body ) ;
cmd - > body . cid = cid ;
cmd - > body . type = SVGA3D_QUERYTYPE_OCCLUSION ;
if ( bo - > mem . mem_type = = TTM_PL_VRAM ) {
cmd - > body . guestResult . gmrId = SVGA_GMR_FRAMEBUFFER ;
2020-06-24 20:26:43 +02:00
cmd - > body . guestResult . offset = bo - > mem . start < < PAGE_SHIFT ;
2011-10-04 20:13:30 +02:00
} else {
cmd - > body . guestResult . gmrId = bo - > mem . start ;
cmd - > body . guestResult . offset = 0 ;
}
vmw_fifo_commit ( dev_priv , sizeof ( * cmd ) ) ;
return 0 ;
}
2012-11-21 11:26:55 +01:00
/**
* vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
* guest - backed resource query commands .
*
* @ dev_priv : The device private structure .
* @ cid : The hardware context id used for the query .
*
* See the vmw_fifo_emit_dummy_query documentation .
*/
static int vmw_fifo_emit_dummy_gb_query ( struct vmw_private * dev_priv ,
uint32_t cid )
{
/*
* A query wait without a preceding query end will
* actually finish all queries for this cid
* without writing to the query result structure .
*/
2015-06-26 00:25:37 -07:00
struct ttm_buffer_object * bo = & dev_priv - > dummy_query_bo - > base ;
2012-11-21 11:26:55 +01:00
struct {
SVGA3dCmdHeader header ;
SVGA3dCmdWaitForGBQuery body ;
} * cmd ;
2019-02-14 16:15:39 -08:00
cmd = VMW_FIFO_RESERVE ( dev_priv , sizeof ( * cmd ) ) ;
if ( unlikely ( cmd = = NULL ) )
2012-11-21 11:26:55 +01:00
return - ENOMEM ;
cmd - > header . id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY ;
cmd - > header . size = sizeof ( cmd - > body ) ;
cmd - > body . cid = cid ;
cmd - > body . type = SVGA3D_QUERYTYPE_OCCLUSION ;
BUG_ON ( bo - > mem . mem_type ! = VMW_PL_MOB ) ;
cmd - > body . mobid = bo - > mem . start ;
cmd - > body . offset = 0 ;
vmw_fifo_commit ( dev_priv , sizeof ( * cmd ) ) ;
return 0 ;
}
/**
* vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
* appropriate resource query commands .
*
* @ dev_priv : The device private structure .
* @ cid : The hardware context id used for the query .
*
* This function is used to emit a dummy occlusion query with
* no primitives rendered between query begin and query end .
* It ' s used to provide a query barrier , in order to know that when
* this query is finished , all preceding queries are also finished .
*
* A Query results structure should have been initialized at the start
* of the dev_priv - > dummy_query_bo buffer object . And that buffer object
* must also be either reserved or pinned when this function is called .
*
* Returns - ENOMEM on failure to reserve fifo space .
*/
int vmw_fifo_emit_dummy_query ( struct vmw_private * dev_priv ,
uint32_t cid )
{
if ( dev_priv - > has_mob )
return vmw_fifo_emit_dummy_gb_query ( dev_priv , cid ) ;
return vmw_fifo_emit_dummy_legacy_query ( dev_priv , cid ) ;
}