2018-05-07 01:16:26 +02:00
// SPDX-License-Identifier: GPL-2.0 OR MIT
2009-12-10 00:19:58 +00:00
/**************************************************************************
*
2018-05-07 01:16:26 +02:00
* Copyright 2009 - 2015 VMware , Inc . , Palo Alto , CA . , USA
2009-12-10 00:19:58 +00:00
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
2021-07-06 09:22:53 +02:00
# include <linux/pci.h>
2019-06-23 12:23:34 +02:00
# include <linux/sched/signal.h>
2009-12-10 00:19:58 +00:00
# include "vmwgfx_drv.h"
# define VMW_FENCE_WRAP (1 << 24)
2022-03-02 10:24:22 -05:00
static u32 vmw_irqflag_fence_goal ( struct vmw_private * vmw )
{
if ( ( vmw - > capabilities2 & SVGA_CAP2_EXTRA_REGS ) ! = 0 )
return SVGA_IRQFLAG_REG_FENCE_GOAL ;
else
return SVGA_IRQFLAG_FENCE_GOAL ;
}
2017-08-24 08:06:28 +02:00
/**
* vmw_thread_fn - Deferred ( process context ) irq handler
*
* @ irq : irq number
* @ arg : Closure argument . Pointer to a struct drm_device cast to void *
*
* This function implements the deferred part of irq processing .
* The function is guaranteed to run at least once after the
* vmw_irq_handler has returned with IRQ_WAKE_THREAD .
*
*/
static irqreturn_t vmw_thread_fn ( int irq , void * arg )
{
struct drm_device * dev = ( struct drm_device * ) arg ;
struct vmw_private * dev_priv = vmw_priv ( dev ) ;
irqreturn_t ret = IRQ_NONE ;
if ( test_and_clear_bit ( VMW_IRQTHREAD_FENCE ,
dev_priv - > irqthread_pending ) ) {
vmw_fences_update ( dev_priv - > fman ) ;
wake_up_all ( & dev_priv - > fence_queue ) ;
ret = IRQ_HANDLED ;
}
if ( test_and_clear_bit ( VMW_IRQTHREAD_CMDBUF ,
dev_priv - > irqthread_pending ) ) {
vmw_cmdbuf_irqthread ( dev_priv - > cman ) ;
ret = IRQ_HANDLED ;
}
return ret ;
}
/**
2021-05-05 15:10:07 -04:00
* vmw_irq_handler : irq handler
2017-08-24 08:06:28 +02:00
*
* @ irq : irq number
* @ arg : Closure argument . Pointer to a struct drm_device cast to void *
*
* This function implements the quick part of irq processing .
* The function performs fast actions like clearing the device interrupt
* flags and also reasonably quick actions like waking processes waiting for
* FIFO space . Other IRQ actions are deferred to the IRQ thread .
*/
2017-08-24 08:06:27 +02:00
static irqreturn_t vmw_irq_handler ( int irq , void * arg )
2009-12-10 00:19:58 +00:00
{
struct drm_device * dev = ( struct drm_device * ) arg ;
struct vmw_private * dev_priv = vmw_priv ( dev ) ;
2011-10-10 12:23:26 +02:00
uint32_t status , masked_status ;
2017-08-24 08:06:28 +02:00
irqreturn_t ret = IRQ_HANDLED ;
2009-12-10 00:19:58 +00:00
2021-05-05 15:10:07 -04:00
status = vmw_irq_status_read ( dev_priv ) ;
2015-10-28 19:07:35 +01:00
masked_status = status & READ_ONCE ( dev_priv - > irq_mask ) ;
2009-12-10 00:19:58 +00:00
2011-10-10 12:23:26 +02:00
if ( likely ( status ) )
2021-05-05 15:10:07 -04:00
vmw_irq_status_write ( dev_priv , status ) ;
2011-09-01 20:18:44 +00:00
2015-10-28 19:07:35 +01:00
if ( ! status )
2011-10-10 12:23:26 +02:00
return IRQ_NONE ;
if ( masked_status & SVGA_IRQFLAG_FIFO_PROGRESS )
2009-12-10 00:19:58 +00:00
wake_up_all ( & dev_priv - > fifo_queue ) ;
2017-08-24 08:06:28 +02:00
if ( ( masked_status & ( SVGA_IRQFLAG_ANY_FENCE |
2022-03-02 10:24:22 -05:00
vmw_irqflag_fence_goal ( dev_priv ) ) ) & &
2017-08-24 08:06:28 +02:00
! test_and_set_bit ( VMW_IRQTHREAD_FENCE , dev_priv - > irqthread_pending ) )
ret = IRQ_WAKE_THREAD ;
if ( ( masked_status & ( SVGA_IRQFLAG_COMMAND_BUFFER |
SVGA_IRQFLAG_ERROR ) ) & &
! test_and_set_bit ( VMW_IRQTHREAD_CMDBUF ,
dev_priv - > irqthread_pending ) )
ret = IRQ_WAKE_THREAD ;
2009-12-10 00:19:58 +00:00
2017-08-24 08:06:28 +02:00
return ret ;
2009-12-10 00:19:58 +00:00
}
2011-09-01 20:18:42 +00:00
static bool vmw_fifo_idle ( struct vmw_private * dev_priv , uint32_t seqno )
2009-12-10 00:19:58 +00:00
{
2015-01-14 02:33:39 -08:00
return ( vmw_read ( dev_priv , SVGA_REG_BUSY ) = = 0 ) ;
2009-12-10 00:19:58 +00:00
}
2021-05-05 15:10:07 -04:00
void vmw_update_seqno ( struct vmw_private * dev_priv )
2010-05-28 11:21:57 +02:00
{
2021-05-05 15:10:07 -04:00
uint32_t seqno = vmw_fence_read ( dev_priv ) ;
2010-05-28 11:21:57 +02:00
2011-09-01 20:18:42 +00:00
if ( dev_priv - > last_read_seqno ! = seqno ) {
dev_priv - > last_read_seqno = seqno ;
2011-10-10 12:23:26 +02:00
vmw_fences_update ( dev_priv - > fman ) ;
2010-05-28 11:21:57 +02:00
}
}
2009-12-10 00:19:58 +00:00
2011-09-01 20:18:42 +00:00
bool vmw_seqno_passed ( struct vmw_private * dev_priv ,
uint32_t seqno )
2009-12-10 00:19:58 +00:00
{
bool ret ;
2011-09-01 20:18:42 +00:00
if ( likely ( dev_priv - > last_read_seqno - seqno < VMW_FENCE_WRAP ) )
2009-12-10 00:19:58 +00:00
return true ;
2021-05-05 15:10:07 -04:00
vmw_update_seqno ( dev_priv ) ;
2011-09-01 20:18:42 +00:00
if ( likely ( dev_priv - > last_read_seqno - seqno < VMW_FENCE_WRAP ) )
2009-12-10 00:19:58 +00:00
return true ;
2022-03-02 10:24:22 -05:00
if ( ! vmw_has_fences ( dev_priv ) & & vmw_fifo_idle ( dev_priv , seqno ) )
2009-12-10 00:19:58 +00:00
return true ;
/**
2011-09-01 20:18:42 +00:00
* Then check if the seqno is higher than what we ' ve actually
2009-12-10 00:19:58 +00:00
* emitted . Then the fence is stale and signaled .
*/
2011-09-01 20:18:42 +00:00
ret = ( ( atomic_read ( & dev_priv - > marker_seq ) - seqno )
2010-02-08 09:57:25 +00:00
> VMW_FENCE_WRAP ) ;
2009-12-10 00:19:58 +00:00
return ret ;
}
int vmw_fallback_wait ( struct vmw_private * dev_priv ,
bool lazy ,
bool fifo_idle ,
2011-09-01 20:18:42 +00:00
uint32_t seqno ,
2009-12-10 00:19:58 +00:00
bool interruptible ,
unsigned long timeout )
{
2021-05-05 15:10:07 -04:00
struct vmw_fifo_state * fifo_state = dev_priv - > fifo ;
2022-03-02 10:24:22 -05:00
bool fifo_down = false ;
2009-12-10 00:19:58 +00:00
uint32_t count = 0 ;
uint32_t signal_seq ;
int ret ;
unsigned long end_jiffies = jiffies + timeout ;
bool ( * wait_condition ) ( struct vmw_private * , uint32_t ) ;
DEFINE_WAIT ( __wait ) ;
wait_condition = ( fifo_idle ) ? & vmw_fifo_idle :
2011-09-01 20:18:42 +00:00
& vmw_seqno_passed ;
2009-12-10 00:19:58 +00:00
/**
* Block command submission while waiting for idle .
*/
2015-06-25 11:57:56 -07:00
if ( fifo_idle ) {
if ( dev_priv - > cman ) {
ret = vmw_cmdbuf_idle ( dev_priv - > cman , interruptible ,
10 * HZ ) ;
if ( ret )
goto out_err ;
2022-03-02 10:24:22 -05:00
} else if ( fifo_state ) {
down_read ( & fifo_state - > rwsem ) ;
fifo_down = true ;
2015-06-25 11:57:56 -07:00
}
}
2011-09-01 20:18:42 +00:00
signal_seq = atomic_read ( & dev_priv - > marker_seq ) ;
2009-12-10 00:19:58 +00:00
ret = 0 ;
for ( ; ; ) {
prepare_to_wait ( & dev_priv - > fence_queue , & __wait ,
( interruptible ) ?
TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE ) ;
2011-09-01 20:18:42 +00:00
if ( wait_condition ( dev_priv , seqno ) )
2009-12-10 00:19:58 +00:00
break ;
if ( time_after_eq ( jiffies , end_jiffies ) ) {
DRM_ERROR ( " SVGA device lockup. \n " ) ;
break ;
}
if ( lazy )
schedule_timeout ( 1 ) ;
else if ( ( + + count & 0x0F ) = = 0 ) {
/**
* FIXME : Use schedule_hr_timeout here for
* newer kernels and lower CPU utilization .
*/
__set_current_state ( TASK_RUNNING ) ;
schedule ( ) ;
__set_current_state ( ( interruptible ) ?
TASK_INTERRUPTIBLE :
TASK_UNINTERRUPTIBLE ) ;
}
if ( interruptible & & signal_pending ( current ) ) {
2009-12-08 12:59:34 +01:00
ret = - ERESTARTSYS ;
2009-12-10 00:19:58 +00:00
break ;
}
}
finish_wait ( & dev_priv - > fence_queue , & __wait ) ;
2022-03-02 10:24:22 -05:00
if ( ret = = 0 & & fifo_idle & & fifo_state )
2021-05-05 15:10:07 -04:00
vmw_fence_write ( dev_priv , signal_seq ) ;
2015-10-28 10:44:04 +01:00
2009-12-10 00:19:58 +00:00
wake_up_all ( & dev_priv - > fence_queue ) ;
2015-06-25 11:57:56 -07:00
out_err :
2022-03-02 10:24:22 -05:00
if ( fifo_down )
2009-12-10 00:19:58 +00:00
up_read ( & fifo_state - > rwsem ) ;
return ret ;
}
2015-10-28 19:07:35 +01:00
void vmw_generic_waiter_add ( struct vmw_private * dev_priv ,
u32 flag , int * waiter_count )
2011-09-01 20:18:43 +00:00
{
2015-10-28 19:07:35 +01:00
spin_lock_bh ( & dev_priv - > waiter_lock ) ;
if ( ( * waiter_count ) + + = = 0 ) {
2021-05-05 15:10:07 -04:00
vmw_irq_status_write ( dev_priv , flag ) ;
2015-10-28 19:07:35 +01:00
dev_priv - > irq_mask | = flag ;
2011-10-10 12:23:26 +02:00
vmw_write ( dev_priv , SVGA_REG_IRQMASK , dev_priv - > irq_mask ) ;
2011-09-01 20:18:43 +00:00
}
2015-10-28 19:07:35 +01:00
spin_unlock_bh ( & dev_priv - > waiter_lock ) ;
2011-09-01 20:18:43 +00:00
}
2015-10-28 19:07:35 +01:00
void vmw_generic_waiter_remove ( struct vmw_private * dev_priv ,
u32 flag , int * waiter_count )
2011-09-01 20:18:43 +00:00
{
2015-10-28 19:07:35 +01:00
spin_lock_bh ( & dev_priv - > waiter_lock ) ;
if ( - - ( * waiter_count ) = = 0 ) {
dev_priv - > irq_mask & = ~ flag ;
2011-10-10 12:23:26 +02:00
vmw_write ( dev_priv , SVGA_REG_IRQMASK , dev_priv - > irq_mask ) ;
}
2015-10-28 19:07:35 +01:00
spin_unlock_bh ( & dev_priv - > waiter_lock ) ;
2011-10-10 12:23:26 +02:00
}
2015-10-28 19:07:35 +01:00
void vmw_seqno_waiter_add ( struct vmw_private * dev_priv )
{
vmw_generic_waiter_add ( dev_priv , SVGA_IRQFLAG_ANY_FENCE ,
& dev_priv - > fence_queue_waiters ) ;
}
void vmw_seqno_waiter_remove ( struct vmw_private * dev_priv )
{
vmw_generic_waiter_remove ( dev_priv , SVGA_IRQFLAG_ANY_FENCE ,
& dev_priv - > fence_queue_waiters ) ;
}
2011-10-10 12:23:26 +02:00
void vmw_goal_waiter_add ( struct vmw_private * dev_priv )
{
2022-03-02 10:24:22 -05:00
vmw_generic_waiter_add ( dev_priv , vmw_irqflag_fence_goal ( dev_priv ) ,
2015-10-28 19:07:35 +01:00
& dev_priv - > goal_queue_waiters ) ;
2011-10-10 12:23:26 +02:00
}
void vmw_goal_waiter_remove ( struct vmw_private * dev_priv )
{
2022-03-02 10:24:22 -05:00
vmw_generic_waiter_remove ( dev_priv , vmw_irqflag_fence_goal ( dev_priv ) ,
2015-10-28 19:07:35 +01:00
& dev_priv - > goal_queue_waiters ) ;
2011-09-01 20:18:43 +00:00
}
2017-08-24 08:06:27 +02:00
static void vmw_irq_preinstall ( struct drm_device * dev )
2009-12-10 00:19:58 +00:00
{
struct vmw_private * dev_priv = vmw_priv ( dev ) ;
uint32_t status ;
2021-05-05 15:10:07 -04:00
status = vmw_irq_status_read ( dev_priv ) ;
vmw_irq_status_write ( dev_priv , status ) ;
2009-12-10 00:19:58 +00:00
}
void vmw_irq_uninstall ( struct drm_device * dev )
{
struct vmw_private * dev_priv = vmw_priv ( dev ) ;
2021-07-06 09:22:53 +02:00
struct pci_dev * pdev = to_pci_dev ( dev - > dev ) ;
2009-12-10 00:19:58 +00:00
uint32_t status ;
2022-03-07 11:24:12 -05:00
u32 i ;
2009-12-10 00:19:58 +00:00
if ( ! ( dev_priv - > capabilities & SVGA_CAP_IRQMASK ) )
return ;
vmw_write ( dev_priv , SVGA_REG_IRQMASK , 0 ) ;
2021-05-05 15:10:07 -04:00
status = vmw_irq_status_read ( dev_priv ) ;
vmw_irq_status_write ( dev_priv , status ) ;
2017-08-24 08:06:27 +02:00
2022-03-07 11:24:12 -05:00
for ( i = 0 ; i < dev_priv - > num_irq_vectors ; + + i )
free_irq ( dev_priv - > irqs [ i ] , dev ) ;
pci_free_irq_vectors ( pdev ) ;
dev_priv - > num_irq_vectors = 0 ;
2017-08-24 08:06:27 +02:00
}
/**
* vmw_irq_install - Install the irq handlers
*
2022-03-07 11:24:12 -05:00
* @ dev_priv : Pointer to the vmw_private device .
2017-08-24 08:06:27 +02:00
* Return : Zero if successful . Negative number otherwise .
*/
2022-03-07 11:24:12 -05:00
int vmw_irq_install ( struct vmw_private * dev_priv )
2017-08-24 08:06:27 +02:00
{
2022-03-07 11:24:12 -05:00
struct pci_dev * pdev = to_pci_dev ( dev_priv - > drm . dev ) ;
struct drm_device * dev = & dev_priv - > drm ;
int ret ;
int nvec ;
int i = 0 ;
BUILD_BUG_ON ( ( SVGA_IRQFLAG_MAX > > VMWGFX_MAX_NUM_IRQS ) ! = 1 ) ;
BUG_ON ( VMWGFX_MAX_NUM_IRQS ! = get_count_order ( SVGA_IRQFLAG_MAX ) ) ;
nvec = pci_alloc_irq_vectors ( pdev , 1 , VMWGFX_MAX_NUM_IRQS ,
PCI_IRQ_ALL_TYPES ) ;
if ( nvec < = 0 ) {
drm_err ( & dev_priv - > drm ,
" IRQ's are unavailable, nvec: %d \n " , nvec ) ;
ret = nvec ;
goto done ;
}
2017-08-24 08:06:27 +02:00
vmw_irq_preinstall ( dev ) ;
2022-03-07 11:24:12 -05:00
for ( i = 0 ; i < nvec ; + + i ) {
ret = pci_irq_vector ( pdev , i ) ;
if ( ret < 0 ) {
drm_err ( & dev_priv - > drm ,
" failed getting irq vector: %d \n " , ret ) ;
goto done ;
}
dev_priv - > irqs [ i ] = ret ;
ret = request_threaded_irq ( dev_priv - > irqs [ i ] , vmw_irq_handler , vmw_thread_fn ,
IRQF_SHARED , VMWGFX_DRIVER_NAME , dev ) ;
if ( ret ! = 0 ) {
drm_err ( & dev_priv - > drm ,
" Failed installing irq(%d): %d \n " ,
dev_priv - > irqs [ i ] , ret ) ;
goto done ;
}
}
done :
dev_priv - > num_irq_vectors = i ;
return ret ;
2009-12-10 00:19:58 +00:00
}