2010-05-28 11:21:57 +02:00
/**************************************************************************
*
* Copyright ( C ) 2010 VMware , Inc . , Palo Alto , CA . , USA
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
# include "vmwgfx_drv.h"
2011-09-01 20:18:42 +00:00
struct vmw_marker {
2010-05-28 11:21:57 +02:00
struct list_head head ;
2011-09-01 20:18:42 +00:00
uint32_t seqno ;
2014-07-16 21:05:07 +00:00
u64 submitted ;
2010-05-28 11:21:57 +02:00
} ;
2011-09-01 20:18:42 +00:00
void vmw_marker_queue_init ( struct vmw_marker_queue * queue )
2010-05-28 11:21:57 +02:00
{
INIT_LIST_HEAD ( & queue - > head ) ;
2014-07-16 21:05:07 +00:00
queue - > lag = 0 ;
queue - > lag_time = ktime_get_raw_ns ( ) ;
2010-05-28 11:21:57 +02:00
spin_lock_init ( & queue - > lock ) ;
}
2011-09-01 20:18:42 +00:00
void vmw_marker_queue_takedown ( struct vmw_marker_queue * queue )
2010-05-28 11:21:57 +02:00
{
2011-09-01 20:18:42 +00:00
struct vmw_marker * marker , * next ;
2010-05-28 11:21:57 +02:00
spin_lock ( & queue - > lock ) ;
2011-09-01 20:18:42 +00:00
list_for_each_entry_safe ( marker , next , & queue - > head , head ) {
kfree ( marker ) ;
2010-05-28 11:21:57 +02:00
}
spin_unlock ( & queue - > lock ) ;
}
2011-09-01 20:18:42 +00:00
int vmw_marker_push ( struct vmw_marker_queue * queue ,
uint32_t seqno )
2010-05-28 11:21:57 +02:00
{
2011-09-01 20:18:42 +00:00
struct vmw_marker * marker = kmalloc ( sizeof ( * marker ) , GFP_KERNEL ) ;
2010-05-28 11:21:57 +02:00
2011-09-01 20:18:42 +00:00
if ( unlikely ( ! marker ) )
2010-05-28 11:21:57 +02:00
return - ENOMEM ;
2011-09-01 20:18:42 +00:00
marker - > seqno = seqno ;
2014-07-16 21:05:07 +00:00
marker - > submitted = ktime_get_raw_ns ( ) ;
2010-05-28 11:21:57 +02:00
spin_lock ( & queue - > lock ) ;
2011-09-01 20:18:42 +00:00
list_add_tail ( & marker - > head , & queue - > head ) ;
2010-05-28 11:21:57 +02:00
spin_unlock ( & queue - > lock ) ;
return 0 ;
}
2011-09-01 20:18:42 +00:00
int vmw_marker_pull ( struct vmw_marker_queue * queue ,
uint32_t signaled_seqno )
2010-05-28 11:21:57 +02:00
{
2011-09-01 20:18:42 +00:00
struct vmw_marker * marker , * next ;
2010-05-28 11:21:57 +02:00
bool updated = false ;
2014-07-16 21:05:07 +00:00
u64 now ;
2010-05-28 11:21:57 +02:00
spin_lock ( & queue - > lock ) ;
2014-07-16 21:05:07 +00:00
now = ktime_get_raw_ns ( ) ;
2010-05-28 11:21:57 +02:00
if ( list_empty ( & queue - > head ) ) {
2014-07-16 21:05:07 +00:00
queue - > lag = 0 ;
2010-05-28 11:21:57 +02:00
queue - > lag_time = now ;
updated = true ;
goto out_unlock ;
}
2011-09-01 20:18:42 +00:00
list_for_each_entry_safe ( marker , next , & queue - > head , head ) {
if ( signaled_seqno - marker - > seqno > ( 1 < < 30 ) )
2010-05-28 11:21:57 +02:00
continue ;
2014-07-16 21:05:07 +00:00
queue - > lag = now - marker - > submitted ;
2010-05-28 11:21:57 +02:00
queue - > lag_time = now ;
updated = true ;
2011-09-01 20:18:42 +00:00
list_del ( & marker - > head ) ;
kfree ( marker ) ;
2010-05-28 11:21:57 +02:00
}
out_unlock :
spin_unlock ( & queue - > lock ) ;
return ( updated ) ? 0 : - EBUSY ;
}
2014-07-16 21:05:07 +00:00
static u64 vmw_fifo_lag ( struct vmw_marker_queue * queue )
2010-05-28 11:21:57 +02:00
{
2014-07-16 21:05:07 +00:00
u64 now ;
2010-05-28 11:21:57 +02:00
spin_lock ( & queue - > lock ) ;
2014-07-16 21:05:07 +00:00
now = ktime_get_raw_ns ( ) ;
queue - > lag + = now - queue - > lag_time ;
2010-05-28 11:21:57 +02:00
queue - > lag_time = now ;
spin_unlock ( & queue - > lock ) ;
return queue - > lag ;
}
2011-09-01 20:18:42 +00:00
static bool vmw_lag_lt ( struct vmw_marker_queue * queue ,
2010-05-28 11:21:57 +02:00
uint32_t us )
{
2014-07-16 21:05:07 +00:00
u64 cond = ( u64 ) us * NSEC_PER_USEC ;
2010-05-28 11:21:57 +02:00
2014-07-16 21:05:07 +00:00
return vmw_fifo_lag ( queue ) < = cond ;
2010-05-28 11:21:57 +02:00
}
int vmw_wait_lag ( struct vmw_private * dev_priv ,
2011-09-01 20:18:42 +00:00
struct vmw_marker_queue * queue , uint32_t us )
2010-05-28 11:21:57 +02:00
{
2011-09-01 20:18:42 +00:00
struct vmw_marker * marker ;
uint32_t seqno ;
2010-05-28 11:21:57 +02:00
int ret ;
while ( ! vmw_lag_lt ( queue , us ) ) {
spin_lock ( & queue - > lock ) ;
if ( list_empty ( & queue - > head ) )
2011-09-01 20:18:42 +00:00
seqno = atomic_read ( & dev_priv - > marker_seq ) ;
2010-05-28 11:21:57 +02:00
else {
2011-09-01 20:18:42 +00:00
marker = list_first_entry ( & queue - > head ,
struct vmw_marker , head ) ;
seqno = marker - > seqno ;
2010-05-28 11:21:57 +02:00
}
spin_unlock ( & queue - > lock ) ;
2011-09-01 20:18:42 +00:00
ret = vmw_wait_seqno ( dev_priv , false , seqno , true ,
3 * HZ ) ;
2010-05-28 11:21:57 +02:00
if ( unlikely ( ret ! = 0 ) )
return ret ;
2011-09-01 20:18:42 +00:00
( void ) vmw_marker_pull ( queue , seqno ) ;
2010-05-28 11:21:57 +02:00
}
return 0 ;
}