2018-04-22 17:33:20 -04:00
// SPDX-License-Identifier: GPL-2.0+
2015-09-07 01:40:25 -03:00
/*
2018-04-22 17:33:20 -04:00
* vsp1_dl . c - - R - Car VSP1 Display List
2015-09-07 01:40:25 -03:00
*
* Copyright ( C ) 2015 Renesas Corporation
*
* Contact : Laurent Pinchart ( laurent . pinchart @ ideasonboard . com )
*/
# include <linux/device.h>
# include <linux/dma-mapping.h>
# include <linux/gfp.h>
2018-05-18 16:42:00 -04:00
# include <linux/refcount.h>
2015-09-07 01:40:25 -03:00
# include <linux/slab.h>
2016-05-13 19:17:02 -03:00
# include <linux/workqueue.h>
2015-09-07 01:40:25 -03:00
# include "vsp1.h"
# include "vsp1_dl.h"
2016-03-03 13:36:34 -03:00
# define VSP1_DL_NUM_ENTRIES 256
2015-09-07 01:40:25 -03:00
2015-11-14 22:48:27 -02:00
# define VSP1_DLH_INT_ENABLE (1 << 1)
# define VSP1_DLH_AUTO_START (1 << 0)
2016-03-03 13:36:34 -03:00
struct vsp1_dl_header_list {
u32 num_bytes ;
u32 addr ;
2018-08-03 07:37:21 -04:00
} __packed ;
2016-03-03 13:36:34 -03:00
2015-11-14 22:48:27 -02:00
struct vsp1_dl_header {
u32 num_lists ;
2016-03-03 13:36:34 -03:00
struct vsp1_dl_header_list lists [ 8 ] ;
2015-11-14 22:48:27 -02:00
u32 next_header ;
u32 flags ;
2018-08-03 07:37:21 -04:00
} __packed ;
2015-11-14 22:48:27 -02:00
2015-09-07 01:40:25 -03:00
struct vsp1_dl_entry {
u32 addr ;
u32 data ;
2018-08-03 07:37:21 -04:00
} __packed ;
2015-09-07 01:40:25 -03:00
2016-03-03 13:36:34 -03:00
/**
* struct vsp1_dl_body - Display list body
* @ list : entry in the display list list of bodies
2018-05-18 16:41:58 -04:00
* @ free : entry in the pool free body list
2018-05-28 06:24:20 -04:00
* @ refcnt : reference tracking for the body
2018-05-18 16:41:58 -04:00
* @ pool : pool to which this body belongs
2016-03-03 13:36:34 -03:00
* @ entries : array of entries
* @ dma : DMA address of the entries
* @ size : size of the DMA memory in bytes
* @ num_entries : number of stored entries
2018-05-18 16:41:57 -04:00
* @ max_entries : number of entries available
2016-03-03 13:36:34 -03:00
*/
struct vsp1_dl_body {
2015-11-08 20:06:57 -02:00
struct list_head list ;
2018-05-18 16:41:58 -04:00
struct list_head free ;
2018-05-18 16:42:00 -04:00
refcount_t refcnt ;
2018-05-18 16:41:58 -04:00
struct vsp1_dl_body_pool * pool ;
2016-03-03 13:36:34 -03:00
struct vsp1_dl_entry * entries ;
dma_addr_t dma ;
size_t size ;
unsigned int num_entries ;
2018-05-18 16:41:57 -04:00
unsigned int max_entries ;
2016-03-03 13:36:34 -03:00
} ;
2015-09-07 01:40:25 -03:00
2018-05-18 16:41:58 -04:00
/**
* struct vsp1_dl_body_pool - display list body pool
* @ dma : DMA address of the entries
* @ size : size of the full DMA memory pool in bytes
* @ mem : CPU memory pointer for the pool
* @ bodies : Array of DLB structures for the pool
* @ free : List of free DLB entries
* @ lock : Protects the free list
* @ vsp1 : the VSP1 device
*/
struct vsp1_dl_body_pool {
/* DMA allocation */
dma_addr_t dma ;
size_t size ;
void * mem ;
/* Body management */
struct vsp1_dl_body * bodies ;
struct list_head free ;
spinlock_t lock ;
struct vsp1_device * vsp1 ;
} ;
2016-03-03 13:36:34 -03:00
/**
* struct vsp1_dl_list - Display list
* @ list : entry in the display list manager lists
* @ dlm : the display list manager
* @ header : display list header , NULL for headerless lists
* @ dma : DMA address for the header
* @ body0 : first display list body
2018-05-18 16:41:56 -04:00
* @ bodies : list of extra display list bodies
2017-11-29 10:11:04 -05:00
* @ has_chain : if true , indicates that there ' s a partition chain
2016-07-12 13:49:46 -03:00
* @ chain : entry in the display list partition chain
2018-02-22 14:26:21 -05:00
* @ internal : whether the display list is used for internal purpose
2016-03-03 13:36:34 -03:00
*/
struct vsp1_dl_list {
struct list_head list ;
2015-11-08 20:06:57 -02:00
struct vsp1_dl_manager * dlm ;
2015-09-07 01:40:25 -03:00
2015-11-14 22:48:27 -02:00
struct vsp1_dl_header * header ;
2015-09-07 01:40:25 -03:00
dma_addr_t dma ;
2018-05-18 16:41:59 -04:00
struct vsp1_dl_body * body0 ;
2018-05-18 16:41:56 -04:00
struct list_head bodies ;
2016-07-12 13:49:46 -03:00
bool has_chain ;
struct list_head chain ;
2018-02-22 14:26:21 -05:00
bool internal ;
2015-09-07 01:40:25 -03:00
} ;
2015-11-14 22:48:27 -02:00
enum vsp1_dl_mode {
VSP1_DL_MODE_HEADER ,
VSP1_DL_MODE_HEADERLESS ,
} ;
2015-11-14 22:27:52 -02:00
/**
* struct vsp1_dl_manager - Display List manager
2015-11-14 22:48:27 -02:00
* @ index : index of the related WPF
* @ mode : display list operation mode ( header or headerless )
2017-05-29 13:41:31 +03:00
* @ singleshot : execute the display list in single - shot mode
2015-11-14 22:27:52 -02:00
* @ vsp1 : the VSP1 device
2018-05-18 16:41:59 -04:00
* @ lock : protects the free , active , queued , and pending lists
2015-11-14 22:27:52 -02:00
* @ free : array of all free display lists
* @ active : list currently being processed ( loaded ) by hardware
* @ queued : list queued to the hardware ( written to the DL registers )
* @ pending : list waiting to be queued to the hardware
2018-05-18 16:41:58 -04:00
* @ pool : body pool for the display list bodies
2015-11-14 22:27:52 -02:00
*/
struct vsp1_dl_manager {
2015-11-14 22:48:27 -02:00
unsigned int index ;
enum vsp1_dl_mode mode ;
2017-05-29 13:41:31 +03:00
bool singleshot ;
2015-11-14 22:27:52 -02:00
struct vsp1_device * vsp1 ;
spinlock_t lock ;
struct list_head free ;
struct vsp1_dl_list * active ;
struct vsp1_dl_list * queued ;
struct vsp1_dl_list * pending ;
2016-05-13 19:17:02 -03:00
2018-05-18 16:41:58 -04:00
struct vsp1_dl_body_pool * pool ;
2015-11-14 22:27:52 -02:00
} ;
2016-03-03 13:36:34 -03:00
/* -----------------------------------------------------------------------------
* Display List Body Management
*/
2018-05-18 16:41:58 -04:00
/**
* vsp1_dl_body_pool_create - Create a pool of bodies from a single allocation
* @ vsp1 : The VSP1 device
* @ num_bodies : The number of bodies to allocate
* @ num_entries : The maximum number of entries that a body can contain
* @ extra_size : Extra allocation provided for the bodies
*
* Allocate a pool of display list bodies each with enough memory to contain the
* requested number of entries plus the @ extra_size .
*
* Return a pointer to a pool on success or NULL if memory can ' t be allocated .
*/
struct vsp1_dl_body_pool *
vsp1_dl_body_pool_create ( struct vsp1_device * vsp1 , unsigned int num_bodies ,
unsigned int num_entries , size_t extra_size )
{
struct vsp1_dl_body_pool * pool ;
size_t dlb_size ;
unsigned int i ;
pool = kzalloc ( sizeof ( * pool ) , GFP_KERNEL ) ;
if ( ! pool )
return NULL ;
pool - > vsp1 = vsp1 ;
/*
* TODO : ' extra_size ' is only used by vsp1_dlm_create ( ) , to allocate
* extra memory for the display list header . We need only one header per
* display list , not per display list body , thus this allocation is
* extraneous and should be reworked in the future .
*/
dlb_size = num_entries * sizeof ( struct vsp1_dl_entry ) + extra_size ;
pool - > size = dlb_size * num_bodies ;
pool - > bodies = kcalloc ( num_bodies , sizeof ( * pool - > bodies ) , GFP_KERNEL ) ;
if ( ! pool - > bodies ) {
kfree ( pool ) ;
return NULL ;
}
pool - > mem = dma_alloc_wc ( vsp1 - > bus_master , pool - > size , & pool - > dma ,
GFP_KERNEL ) ;
if ( ! pool - > mem ) {
kfree ( pool - > bodies ) ;
kfree ( pool ) ;
return NULL ;
}
spin_lock_init ( & pool - > lock ) ;
INIT_LIST_HEAD ( & pool - > free ) ;
for ( i = 0 ; i < num_bodies ; + + i ) {
struct vsp1_dl_body * dlb = & pool - > bodies [ i ] ;
dlb - > pool = pool ;
dlb - > max_entries = num_entries ;
dlb - > dma = pool - > dma + i * dlb_size ;
dlb - > entries = pool - > mem + i * dlb_size ;
list_add_tail ( & dlb - > free , & pool - > free ) ;
}
return pool ;
}
/**
* vsp1_dl_body_pool_destroy - Release a body pool
* @ pool : The body pool
*
* Release all components of a pool allocation .
*/
void vsp1_dl_body_pool_destroy ( struct vsp1_dl_body_pool * pool )
{
if ( ! pool )
return ;
if ( pool - > mem )
dma_free_wc ( pool - > vsp1 - > bus_master , pool - > size , pool - > mem ,
pool - > dma ) ;
kfree ( pool - > bodies ) ;
kfree ( pool ) ;
}
/**
* vsp1_dl_body_get - Obtain a body from a pool
* @ pool : The body pool
*
* Obtain a body from the pool without blocking .
*
* Returns a display list body or NULL if there are none available .
*/
struct vsp1_dl_body * vsp1_dl_body_get ( struct vsp1_dl_body_pool * pool )
{
struct vsp1_dl_body * dlb = NULL ;
unsigned long flags ;
spin_lock_irqsave ( & pool - > lock , flags ) ;
if ( ! list_empty ( & pool - > free ) ) {
dlb = list_first_entry ( & pool - > free , struct vsp1_dl_body , free ) ;
list_del ( & dlb - > free ) ;
2018-05-18 16:42:00 -04:00
refcount_set ( & dlb - > refcnt , 1 ) ;
2018-05-18 16:41:58 -04:00
}
spin_unlock_irqrestore ( & pool - > lock , flags ) ;
return dlb ;
}
/**
* vsp1_dl_body_put - Return a body back to its pool
* @ dlb : The display list body
*
* Return a body back to the pool , and reset the num_entries to clear the list .
*/
void vsp1_dl_body_put ( struct vsp1_dl_body * dlb )
{
unsigned long flags ;
if ( ! dlb )
return ;
2018-05-18 16:42:00 -04:00
if ( ! refcount_dec_and_test ( & dlb - > refcnt ) )
return ;
2018-05-18 16:41:58 -04:00
dlb - > num_entries = 0 ;
spin_lock_irqsave ( & dlb - > pool - > lock , flags ) ;
list_add_tail ( & dlb - > free , & dlb - > pool - > free ) ;
spin_unlock_irqrestore ( & dlb - > pool - > lock , flags ) ;
}
2016-03-03 13:36:34 -03:00
/**
2018-05-18 16:41:56 -04:00
* vsp1_dl_body_write - Write a register to a display list body
* @ dlb : The body
2016-03-03 13:36:34 -03:00
* @ reg : The register address
* @ data : The register value
*
2018-05-18 16:41:56 -04:00
* Write the given register and value to the display list body . The maximum
* number of entries that can be written in a body is specified when the body is
* allocated by vsp1_dl_body_alloc ( ) .
2016-03-03 13:36:34 -03:00
*/
2018-05-18 16:41:56 -04:00
void vsp1_dl_body_write ( struct vsp1_dl_body * dlb , u32 reg , u32 data )
2016-03-03 13:36:34 -03:00
{
2018-05-18 16:41:57 -04:00
if ( WARN_ONCE ( dlb - > num_entries > = dlb - > max_entries ,
" DLB size exceeded (max %u) " , dlb - > max_entries ) )
return ;
2016-03-03 13:36:34 -03:00
dlb - > entries [ dlb - > num_entries ] . addr = reg ;
dlb - > entries [ dlb - > num_entries ] . data = data ;
dlb - > num_entries + + ;
}
2015-09-07 01:40:25 -03:00
/* -----------------------------------------------------------------------------
* Display List Transaction Management
*/
2015-11-08 20:06:57 -02:00
static struct vsp1_dl_list * vsp1_dl_list_alloc ( struct vsp1_dl_manager * dlm )
2015-09-07 01:40:25 -03:00
{
2015-11-08 20:06:57 -02:00
struct vsp1_dl_list * dl ;
2015-09-07 01:40:25 -03:00
2015-11-08 20:06:57 -02:00
dl = kzalloc ( sizeof ( * dl ) , GFP_KERNEL ) ;
if ( ! dl )
return NULL ;
2015-09-07 01:40:25 -03:00
2018-05-18 16:41:56 -04:00
INIT_LIST_HEAD ( & dl - > bodies ) ;
2015-11-08 20:06:57 -02:00
dl - > dlm = dlm ;
2018-05-18 16:41:59 -04:00
/* Get a default body for our list. */
dl - > body0 = vsp1_dl_body_get ( dlm - > pool ) ;
if ( ! dl - > body0 )
2015-11-08 20:06:57 -02:00
return NULL ;
2015-11-14 22:48:27 -02:00
if ( dlm - > mode = = VSP1_DL_MODE_HEADER ) {
2018-05-18 16:41:59 -04:00
size_t header_offset = dl - > body0 - > max_entries
* sizeof ( * dl - > body0 - > entries ) ;
2016-03-03 13:36:34 -03:00
2018-05-18 16:41:59 -04:00
dl - > header = ( ( void * ) dl - > body0 - > entries ) + header_offset ;
dl - > dma = dl - > body0 - > dma + header_offset ;
2016-03-03 13:36:34 -03:00
2015-11-14 22:48:27 -02:00
memset ( dl - > header , 0 , sizeof ( * dl - > header ) ) ;
2018-05-18 16:41:59 -04:00
dl - > header - > lists [ 0 ] . addr = dl - > body0 - > dma ;
2015-11-14 22:48:27 -02:00
}
2015-11-08 20:06:57 -02:00
return dl ;
}
2015-09-07 01:40:25 -03:00
2018-05-18 16:41:59 -04:00
static void vsp1_dl_list_bodies_put ( struct vsp1_dl_list * dl )
{
struct vsp1_dl_body * dlb , * tmp ;
list_for_each_entry_safe ( dlb , tmp , & dl - > bodies , list ) {
list_del ( & dlb - > list ) ;
vsp1_dl_body_put ( dlb ) ;
}
}
2015-11-08 20:06:57 -02:00
static void vsp1_dl_list_free ( struct vsp1_dl_list * dl )
{
2018-05-18 16:41:59 -04:00
vsp1_dl_body_put ( dl - > body0 ) ;
vsp1_dl_list_bodies_put ( dl ) ;
2015-11-08 20:06:57 -02:00
kfree ( dl ) ;
2015-09-07 01:40:25 -03:00
}
2015-11-08 20:06:57 -02:00
/**
* vsp1_dl_list_get - Get a free display list
* @ dlm : The display list manager
*
* Get a display list from the pool of free lists and return it .
*
* This function must be called without the display list manager lock held .
*/
struct vsp1_dl_list * vsp1_dl_list_get ( struct vsp1_dl_manager * dlm )
2015-09-07 01:40:25 -03:00
{
2015-11-08 20:06:57 -02:00
struct vsp1_dl_list * dl = NULL ;
2015-09-07 01:40:25 -03:00
unsigned long flags ;
2015-11-08 20:06:57 -02:00
spin_lock_irqsave ( & dlm - > lock , flags ) ;
2015-09-07 01:40:25 -03:00
2015-11-08 20:06:57 -02:00
if ( ! list_empty ( & dlm - > free ) ) {
dl = list_first_entry ( & dlm - > free , struct vsp1_dl_list , list ) ;
list_del ( & dl - > list ) ;
2016-07-12 13:49:46 -03:00
2016-09-19 15:18:01 -03:00
/*
* The display list chain must be initialised to ensure every
2016-07-12 13:49:46 -03:00
* display list can assert list_empty ( ) if it is not in a chain .
*/
INIT_LIST_HEAD ( & dl - > chain ) ;
2015-09-07 01:40:25 -03:00
}
2015-11-08 20:06:57 -02:00
spin_unlock_irqrestore ( & dlm - > lock , flags ) ;
return dl ;
}
2015-09-07 01:40:25 -03:00
2016-03-03 09:26:47 -03:00
/* This function must be called with the display list manager lock held.*/
static void __vsp1_dl_list_put ( struct vsp1_dl_list * dl )
{
2018-08-03 07:37:22 -04:00
struct vsp1_dl_list * dl_next ;
2016-07-12 13:49:46 -03:00
2016-03-03 09:26:47 -03:00
if ( ! dl )
return ;
2016-09-19 15:18:01 -03:00
/*
* Release any linked display - lists which were chained for a single
2016-07-12 13:49:46 -03:00
* hardware operation .
*/
if ( dl - > has_chain ) {
2018-08-03 07:37:22 -04:00
list_for_each_entry ( dl_next , & dl - > chain , chain )
__vsp1_dl_list_put ( dl_next ) ;
2016-07-12 13:49:46 -03:00
}
dl - > has_chain = false ;
2018-05-18 16:41:59 -04:00
vsp1_dl_list_bodies_put ( dl ) ;
2016-09-19 15:18:01 -03:00
/*
2018-05-18 16:41:59 -04:00
* body0 is reused as as an optimisation as presently every display list
* has at least one body , thus we reinitialise the entries list .
2016-05-13 19:17:02 -03:00
*/
2018-05-18 16:41:59 -04:00
dl - > body0 - > num_entries = 0 ;
2016-03-03 09:26:47 -03:00
list_add_tail ( & dl - > list , & dl - > dlm - > free ) ;
}
2015-11-08 20:06:57 -02:00
/**
* vsp1_dl_list_put - Release a display list
* @ dl : The display list
*
* Release the display list and return it to the pool of free lists .
*
* Passing a NULL pointer to this function is safe , in that case no operation
* will be performed .
*/
void vsp1_dl_list_put ( struct vsp1_dl_list * dl )
{
2016-03-03 09:26:47 -03:00
unsigned long flags ;
2015-11-08 20:06:57 -02:00
if ( ! dl )
return ;
2015-09-07 01:40:25 -03:00
2016-03-03 09:26:47 -03:00
spin_lock_irqsave ( & dl - > dlm - > lock , flags ) ;
__vsp1_dl_list_put ( dl ) ;
spin_unlock_irqrestore ( & dl - > dlm - > lock , flags ) ;
2015-09-07 01:40:25 -03:00
}
2016-03-03 13:36:34 -03:00
/**
2018-05-18 16:42:02 -04:00
* vsp1_dl_list_get_body0 - Obtain the default body for the display list
2016-03-03 13:36:34 -03:00
* @ dl : The display list
*
2018-05-18 16:42:02 -04:00
* Obtain a pointer to the internal display list body allowing this to be passed
* directly to configure operations .
2016-03-03 13:36:34 -03:00
*/
2018-05-18 16:42:02 -04:00
struct vsp1_dl_body * vsp1_dl_list_get_body0 ( struct vsp1_dl_list * dl )
2015-09-07 01:40:25 -03:00
{
2018-05-18 16:42:02 -04:00
return dl - > body0 ;
2016-03-03 13:36:34 -03:00
}
/**
2018-05-18 16:41:56 -04:00
* vsp1_dl_list_add_body - Add a body to the display list
2016-03-03 13:36:34 -03:00
* @ dl : The display list
2018-05-18 16:41:56 -04:00
* @ dlb : The body
2016-03-03 13:36:34 -03:00
*
2018-05-18 16:41:56 -04:00
* Add a display list body to a display list . Registers contained in bodies are
* processed after registers contained in the main display list , in the order in
* which bodies are added .
2016-03-03 13:36:34 -03:00
*
2018-05-18 16:41:56 -04:00
* Adding a body to a display list passes ownership of the body to the list . The
2018-05-18 16:42:00 -04:00
* caller retains its reference to the fragment when adding it to the display
* list , but is not allowed to add new entries to the body .
*
* The reference must be explicitly released by a call to vsp1_dl_body_put ( )
* when the body isn ' t needed anymore .
2016-03-03 13:36:34 -03:00
*
2018-05-18 16:41:56 -04:00
* Additional bodies are only usable for display lists in header mode .
* Attempting to add a body to a header - less display list will return an error .
2016-03-03 13:36:34 -03:00
*/
2018-05-18 16:41:56 -04:00
int vsp1_dl_list_add_body ( struct vsp1_dl_list * dl , struct vsp1_dl_body * dlb )
2016-03-03 13:36:34 -03:00
{
/* Multi-body lists are only available in header mode. */
if ( dl - > dlm - > mode ! = VSP1_DL_MODE_HEADER )
return - EINVAL ;
2018-05-18 16:42:00 -04:00
refcount_inc ( & dlb - > refcnt ) ;
2018-05-18 16:41:56 -04:00
list_add_tail ( & dlb - > list , & dl - > bodies ) ;
2016-03-03 13:36:34 -03:00
return 0 ;
2015-09-07 01:40:25 -03:00
}
2016-07-12 13:49:46 -03:00
/**
* vsp1_dl_list_add_chain - Add a display list to a chain
* @ head : The head display list
* @ dl : The new display list
*
* Add a display list to an existing display list chain . The chained lists
* will be automatically processed by the hardware without intervention from
* the CPU . A display list end interrupt will only complete after the last
* display list in the chain has completed processing .
*
* Adding a display list to a chain passes ownership of the display list to
* the head display list item . The chain is released when the head dl item is
* put back with __vsp1_dl_list_put ( ) .
*
* Chained display lists are only usable in header mode . Attempts to add a
* display list to a chain in header - less mode will return an error .
*/
int vsp1_dl_list_add_chain ( struct vsp1_dl_list * head ,
struct vsp1_dl_list * dl )
{
/* Chained lists are only available in header mode. */
if ( head - > dlm - > mode ! = VSP1_DL_MODE_HEADER )
return - EINVAL ;
head - > has_chain = true ;
list_add_tail ( & dl - > chain , & head - > chain ) ;
return 0 ;
}
static void vsp1_dl_list_fill_header ( struct vsp1_dl_list * dl , bool is_last )
{
2017-05-29 13:41:31 +03:00
struct vsp1_dl_manager * dlm = dl - > dlm ;
2016-07-12 13:49:46 -03:00
struct vsp1_dl_header_list * hdr = dl - > header - > lists ;
struct vsp1_dl_body * dlb ;
unsigned int num_lists = 0 ;
2016-09-19 15:18:01 -03:00
/*
* Fill the header with the display list bodies addresses and sizes . The
2016-07-12 13:49:46 -03:00
* address of the first body has already been filled when the display
* list was allocated .
*/
2018-05-18 16:41:59 -04:00
hdr - > num_bytes = dl - > body0 - > num_entries
2016-07-12 13:49:46 -03:00
* sizeof ( * dl - > header - > lists ) ;
2018-05-18 16:41:56 -04:00
list_for_each_entry ( dlb , & dl - > bodies , list ) {
2016-07-12 13:49:46 -03:00
num_lists + + ;
hdr + + ;
hdr - > addr = dlb - > dma ;
hdr - > num_bytes = dlb - > num_entries
* sizeof ( * dl - > header - > lists ) ;
}
dl - > header - > num_lists = num_lists ;
if ( ! list_empty ( & dl - > chain ) & & ! is_last ) {
2017-05-29 13:41:31 +03:00
/*
* If this display list ' s chain is not empty , we are on a list ,
* and the next item is the display list that we must queue for
* automatic processing by the hardware .
*/
2016-07-12 13:49:46 -03:00
struct vsp1_dl_list * next = list_next_entry ( dl , chain ) ;
dl - > header - > next_header = next - > dma ;
dl - > header - > flags = VSP1_DLH_AUTO_START ;
2017-05-29 13:41:31 +03:00
} else if ( ! dlm - > singleshot ) {
/*
* if the display list manager works in continuous mode , the VSP
* should loop over the display list continuously until
* instructed to do otherwise .
*/
dl - > header - > next_header = dl - > dma ;
dl - > header - > flags = VSP1_DLH_INT_ENABLE | VSP1_DLH_AUTO_START ;
2016-07-12 13:49:46 -03:00
} else {
2017-05-29 13:41:31 +03:00
/*
* Otherwise , in mem - to - mem mode , we work in single - shot mode
* and the next display list must not be started automatically .
*/
2016-07-12 13:49:46 -03:00
dl - > header - > flags = VSP1_DLH_INT_ENABLE ;
}
}
2017-05-29 13:41:31 +03:00
static bool vsp1_dl_list_hw_update_pending ( struct vsp1_dl_manager * dlm )
2015-09-07 01:40:25 -03:00
{
2015-11-08 20:06:57 -02:00
struct vsp1_device * vsp1 = dlm - > vsp1 ;
2016-03-03 13:36:34 -03:00
2017-05-29 13:41:31 +03:00
if ( ! dlm - > queued )
return false ;
2016-07-12 13:49:46 -03:00
2017-05-29 13:41:31 +03:00
/*
* Check whether the VSP1 has taken the update . In headerless mode the
* hardware indicates this by clearing the UPD bit in the DL_BODY_SIZE
* register , and in header mode by clearing the UPDHDR bit in the CMD
* register .
*/
if ( dlm - > mode = = VSP1_DL_MODE_HEADERLESS )
return ! ! ( vsp1_read ( vsp1 , VI6_DL_BODY_SIZE )
& VI6_DL_BODY_SIZE_UPD ) ;
else
2018-02-09 09:50:34 -05:00
return ! ! ( vsp1_read ( vsp1 , VI6_CMD ( dlm - > index ) )
& VI6_CMD_UPDHDR ) ;
2017-05-29 13:41:31 +03:00
}
2016-03-03 13:36:34 -03:00
2017-05-29 13:41:31 +03:00
static void vsp1_dl_list_hw_enqueue ( struct vsp1_dl_list * dl )
{
struct vsp1_dl_manager * dlm = dl - > dlm ;
struct vsp1_device * vsp1 = dlm - > vsp1 ;
2017-05-30 03:40:35 +03:00
2017-05-29 13:41:31 +03:00
if ( dlm - > mode = = VSP1_DL_MODE_HEADERLESS ) {
2016-09-19 15:18:01 -03:00
/*
2017-05-29 13:41:31 +03:00
* In headerless mode , program the hardware directly with the
* display list body address and size and set the UPD bit . The
* bit will be cleared by the hardware when the display list
* processing starts .
*/
2018-05-18 16:41:59 -04:00
vsp1_write ( vsp1 , VI6_DL_HDR_ADDR ( 0 ) , dl - > body0 - > dma ) ;
2017-05-29 13:41:31 +03:00
vsp1_write ( vsp1 , VI6_DL_BODY_SIZE , VI6_DL_BODY_SIZE_UPD |
2018-05-18 16:41:59 -04:00
( dl - > body0 - > num_entries * sizeof ( * dl - > header - > lists ) ) ) ;
2017-05-29 13:41:31 +03:00
} else {
/*
* In header mode , program the display list header address . If
* the hardware is idle ( single - shot mode or first frame in
* continuous mode ) it will then be started independently . If
* the hardware is operating , the VI6_DL_HDR_REF_ADDR register
* will be updated with the display list address .
2016-07-12 13:49:46 -03:00
*/
2015-11-14 22:48:27 -02:00
vsp1_write ( vsp1 , VI6_DL_HDR_ADDR ( dlm - > index ) , dl - > dma ) ;
}
2017-05-29 13:41:31 +03:00
}
static void vsp1_dl_list_commit_continuous ( struct vsp1_dl_list * dl )
{
struct vsp1_dl_manager * dlm = dl - > dlm ;
2015-11-14 22:48:27 -02:00
2017-02-26 10:29:50 -03:00
/*
2017-05-29 13:41:31 +03:00
* If a previous display list has been queued to the hardware but not
* processed yet , the VSP can start processing it at any time . In that
* case we can ' t replace the queued list by the new one , as we could
* race with the hardware . We thus mark the update as pending , it will
* be queued up to the hardware by the frame end interrupt handler .
2018-02-22 14:26:21 -05:00
*
* If a display list is already pending we simply drop it as the new
* display list is assumed to contain a more recent configuration . It is
* an error if the already pending list has the internal flag set , as
* there is then a process waiting for that list to complete . This
* shouldn ' t happen as the waiting process should perform proper
* locking , but warn just in case .
2015-09-07 01:40:25 -03:00
*/
2017-05-29 13:41:31 +03:00
if ( vsp1_dl_list_hw_update_pending ( dlm ) ) {
2018-02-22 14:26:21 -05:00
WARN_ON ( dlm - > pending & & dlm - > pending - > internal ) ;
2016-03-03 09:26:47 -03:00
__vsp1_dl_list_put ( dlm - > pending ) ;
2015-11-08 20:06:57 -02:00
dlm - > pending = dl ;
2017-05-29 13:41:31 +03:00
return ;
2015-09-07 01:40:25 -03:00
}
2017-02-26 10:29:50 -03:00
/*
2017-05-29 13:41:31 +03:00
* Pass the new display list to the hardware and mark it as queued . It
* will become active when the hardware starts processing it .
2015-09-07 01:40:25 -03:00
*/
2017-05-29 13:41:31 +03:00
vsp1_dl_list_hw_enqueue ( dl ) ;
2015-09-07 01:40:25 -03:00
2016-03-03 09:26:47 -03:00
__vsp1_dl_list_put ( dlm - > queued ) ;
2015-11-08 20:06:57 -02:00
dlm - > queued = dl ;
2017-05-29 13:41:31 +03:00
}
static void vsp1_dl_list_commit_singleshot ( struct vsp1_dl_list * dl )
{
struct vsp1_dl_manager * dlm = dl - > dlm ;
/*
* When working in single - shot mode , the caller guarantees that the
* hardware is idle at this point . Just commit the head display list
* to hardware . Chained lists will be started automatically .
*/
vsp1_dl_list_hw_enqueue ( dl ) ;
dlm - > active = dl ;
}
2018-02-22 14:26:21 -05:00
void vsp1_dl_list_commit ( struct vsp1_dl_list * dl , bool internal )
2017-05-29 13:41:31 +03:00
{
struct vsp1_dl_manager * dlm = dl - > dlm ;
2018-08-03 07:37:22 -04:00
struct vsp1_dl_list * dl_next ;
2017-05-29 13:41:31 +03:00
unsigned long flags ;
if ( dlm - > mode = = VSP1_DL_MODE_HEADER ) {
/* Fill the header for the head and chained display lists. */
vsp1_dl_list_fill_header ( dl , list_empty ( & dl - > chain ) ) ;
2018-08-03 07:37:22 -04:00
list_for_each_entry ( dl_next , & dl - > chain , chain ) {
bool last = list_is_last ( & dl_next - > chain , & dl - > chain ) ;
2017-05-29 13:41:31 +03:00
2018-08-03 07:37:22 -04:00
vsp1_dl_list_fill_header ( dl_next , last ) ;
2017-05-29 13:41:31 +03:00
}
}
2018-02-22 14:26:21 -05:00
dl - > internal = internal ;
2017-05-29 13:41:31 +03:00
spin_lock_irqsave ( & dlm - > lock , flags ) ;
if ( dlm - > singleshot )
vsp1_dl_list_commit_singleshot ( dl ) ;
else
vsp1_dl_list_commit_continuous ( dl ) ;
2015-09-07 01:40:25 -03:00
2015-11-08 20:06:57 -02:00
spin_unlock_irqrestore ( & dlm - > lock , flags ) ;
2015-09-07 01:40:25 -03:00
}
/* -----------------------------------------------------------------------------
2015-11-08 20:06:57 -02:00
* Display List Manager
2015-09-07 01:40:25 -03:00
*/
2017-03-04 02:01:18 +00:00
/**
* vsp1_dlm_irq_frame_end - Display list handler for the frame end interrupt
* @ dlm : the display list manager
*
2018-04-04 17:30:49 -04:00
* Return a set of flags that indicates display list completion status .
*
* The VSP1_DL_FRAME_END_COMPLETED flag indicates that the previous display list
* has completed at frame end . If the flag is not returned display list
* completion has been delayed by one frame because the display list commit
* raced with the frame end interrupt . The function always returns with the flag
* set in header mode as display list processing is then not continuous and
* races never occur .
2018-02-22 14:26:21 -05:00
*
* The VSP1_DL_FRAME_END_INTERNAL flag indicates that the previous display list
* has completed and had been queued with the internal notification flag .
* Internal notification is only supported for continuous mode .
2017-03-04 02:01:18 +00:00
*/
2018-04-04 17:30:49 -04:00
unsigned int vsp1_dlm_irq_frame_end ( struct vsp1_dl_manager * dlm )
2015-09-07 01:40:25 -03:00
{
2018-04-04 17:30:49 -04:00
unsigned int flags = 0 ;
2015-09-07 01:40:25 -03:00
2015-11-08 20:06:57 -02:00
spin_lock ( & dlm - > lock ) ;
2017-02-26 10:29:50 -03:00
/*
2017-05-29 13:41:31 +03:00
* The mem - to - mem pipelines work in single - shot mode . No new display
* list can be queued , we don ' t have to do anything .
2015-11-14 22:48:27 -02:00
*/
2017-05-29 13:41:31 +03:00
if ( dlm - > singleshot ) {
__vsp1_dl_list_put ( dlm - > active ) ;
dlm - > active = NULL ;
2018-04-04 17:30:49 -04:00
flags | = VSP1_DL_FRAME_END_COMPLETED ;
2015-11-14 22:48:27 -02:00
goto done ;
2017-03-04 02:01:18 +00:00
}
2015-11-14 22:48:27 -02:00
2017-02-26 10:29:50 -03:00
/*
2017-05-29 13:41:31 +03:00
* If the commit operation raced with the interrupt and occurred after
* the frame end event but before interrupt processing , the hardware
* hasn ' t taken the update into account yet . We have to skip one frame
* and retry .
2015-09-07 01:40:25 -03:00
*/
2017-05-29 13:41:31 +03:00
if ( vsp1_dl_list_hw_update_pending ( dlm ) )
2015-09-07 01:40:25 -03:00
goto done ;
2017-02-26 10:29:50 -03:00
/*
* The device starts processing the queued display list right after the
2015-09-07 01:40:25 -03:00
* frame end interrupt . The display list thus becomes active .
*/
2015-11-08 20:06:57 -02:00
if ( dlm - > queued ) {
2018-02-22 14:26:21 -05:00
if ( dlm - > queued - > internal )
flags | = VSP1_DL_FRAME_END_INTERNAL ;
dlm - > queued - > internal = false ;
2017-05-29 13:41:31 +03:00
__vsp1_dl_list_put ( dlm - > active ) ;
2015-11-08 20:06:57 -02:00
dlm - > active = dlm - > queued ;
dlm - > queued = NULL ;
2018-04-04 17:30:49 -04:00
flags | = VSP1_DL_FRAME_END_COMPLETED ;
2015-09-07 01:40:25 -03:00
}
2017-02-26 10:29:50 -03:00
/*
2017-05-29 13:41:31 +03:00
* Now that the VSP has started processing the queued display list , we
* can queue the pending display list to the hardware if one has been
* prepared .
2015-09-07 01:40:25 -03:00
*/
2015-11-08 20:06:57 -02:00
if ( dlm - > pending ) {
2017-05-29 13:41:31 +03:00
vsp1_dl_list_hw_enqueue ( dlm - > pending ) ;
dlm - > queued = dlm - > pending ;
2015-11-08 20:06:57 -02:00
dlm - > pending = NULL ;
2015-09-07 01:40:25 -03:00
}
done :
2015-11-08 20:06:57 -02:00
spin_unlock ( & dlm - > lock ) ;
2017-03-04 02:01:18 +00:00
2018-04-04 17:30:49 -04:00
return flags ;
2015-09-07 01:40:25 -03:00
}
2015-11-08 20:06:57 -02:00
/* Hardware Setup */
void vsp1_dlm_setup ( struct vsp1_device * vsp1 )
2015-09-07 01:40:25 -03:00
{
2015-11-01 15:18:56 -02:00
u32 ctrl = ( 256 < < VI6_DL_CTRL_AR_WAIT_SHIFT )
| VI6_DL_CTRL_DC2 | VI6_DL_CTRL_DC1 | VI6_DL_CTRL_DC0
| VI6_DL_CTRL_DLE ;
2015-09-07 01:40:25 -03:00
2017-02-26 10:29:50 -03:00
/*
* The DRM pipeline operates with display lists in Continuous Frame
2015-11-01 15:18:56 -02:00
* Mode , all other pipelines use manual start .
2015-09-07 01:40:25 -03:00
*/
if ( vsp1 - > drm )
2015-11-01 15:18:56 -02:00
ctrl | = VI6_DL_CTRL_CFM0 | VI6_DL_CTRL_NH0 ;
2015-09-07 01:40:25 -03:00
vsp1_write ( vsp1 , VI6_DL_CTRL , ctrl ) ;
vsp1_write ( vsp1 , VI6_DL_SWAP , VI6_DL_SWAP_LWS ) ;
}
2015-11-08 20:06:57 -02:00
void vsp1_dlm_reset ( struct vsp1_dl_manager * dlm )
{
2016-03-03 09:26:47 -03:00
unsigned long flags ;
spin_lock_irqsave ( & dlm - > lock , flags ) ;
__vsp1_dl_list_put ( dlm - > active ) ;
__vsp1_dl_list_put ( dlm - > queued ) ;
__vsp1_dl_list_put ( dlm - > pending ) ;
spin_unlock_irqrestore ( & dlm - > lock , flags ) ;
2015-11-08 20:06:57 -02:00
dlm - > active = NULL ;
dlm - > queued = NULL ;
dlm - > pending = NULL ;
}
2015-09-07 01:40:25 -03:00
2018-05-18 16:42:03 -04:00
struct vsp1_dl_body * vsp1_dlm_dl_body_get ( struct vsp1_dl_manager * dlm )
{
return vsp1_dl_body_get ( dlm - > pool ) ;
}
2015-11-14 22:27:52 -02:00
struct vsp1_dl_manager * vsp1_dlm_create ( struct vsp1_device * vsp1 ,
2015-11-14 22:48:27 -02:00
unsigned int index ,
2015-11-14 22:27:52 -02:00
unsigned int prealloc )
2015-09-07 01:40:25 -03:00
{
2015-11-14 22:27:52 -02:00
struct vsp1_dl_manager * dlm ;
2018-05-18 16:41:59 -04:00
size_t header_size ;
2015-09-07 01:40:25 -03:00
unsigned int i ;
2015-11-14 22:27:52 -02:00
dlm = devm_kzalloc ( vsp1 - > dev , sizeof ( * dlm ) , GFP_KERNEL ) ;
if ( ! dlm )
return NULL ;
2015-11-14 22:48:27 -02:00
dlm - > index = index ;
dlm - > mode = index = = 0 & & ! vsp1 - > info - > uapi
? VSP1_DL_MODE_HEADERLESS : VSP1_DL_MODE_HEADER ;
2017-05-29 13:41:31 +03:00
dlm - > singleshot = vsp1 - > info - > uapi ;
2015-11-08 20:06:57 -02:00
dlm - > vsp1 = vsp1 ;
2015-09-07 01:40:25 -03:00
2015-11-08 20:06:57 -02:00
spin_lock_init ( & dlm - > lock ) ;
INIT_LIST_HEAD ( & dlm - > free ) ;
2018-05-18 16:41:59 -04:00
/*
* Initialize the display list body and allocate DMA memory for the body
* and the optional header . Both are allocated together to avoid memory
* fragmentation , with the header located right after the body in
2018-05-18 16:42:03 -04:00
* memory . An extra body is allocated on top of the prealloc to account
* for the cached body used by the vsp1_pipeline object .
2018-05-18 16:41:59 -04:00
*/
header_size = dlm - > mode = = VSP1_DL_MODE_HEADER
? ALIGN ( sizeof ( struct vsp1_dl_header ) , 8 )
: 0 ;
2018-05-18 16:42:03 -04:00
dlm - > pool = vsp1_dl_body_pool_create ( vsp1 , prealloc + 1 ,
2018-05-18 16:41:59 -04:00
VSP1_DL_NUM_ENTRIES , header_size ) ;
if ( ! dlm - > pool )
return NULL ;
2015-09-07 01:40:25 -03:00
2015-11-08 20:06:57 -02:00
for ( i = 0 ; i < prealloc ; + + i ) {
struct vsp1_dl_list * dl ;
2015-09-07 01:40:25 -03:00
2015-11-08 20:06:57 -02:00
dl = vsp1_dl_list_alloc ( dlm ) ;
if ( ! dl )
2015-11-14 22:27:52 -02:00
return NULL ;
2015-09-07 01:40:25 -03:00
2015-11-08 20:06:57 -02:00
list_add_tail ( & dl - > list , & dlm - > free ) ;
2015-09-07 01:40:25 -03:00
}
2015-11-14 22:27:52 -02:00
return dlm ;
2015-09-07 01:40:25 -03:00
}
2015-11-14 22:27:52 -02:00
void vsp1_dlm_destroy ( struct vsp1_dl_manager * dlm )
2015-09-07 01:40:25 -03:00
{
2015-11-08 20:06:57 -02:00
struct vsp1_dl_list * dl , * next ;
2015-11-14 22:27:52 -02:00
if ( ! dlm )
return ;
2015-11-08 20:06:57 -02:00
list_for_each_entry_safe ( dl , next , & dlm - > free , list ) {
list_del ( & dl - > list ) ;
vsp1_dl_list_free ( dl ) ;
}
2016-05-13 19:17:02 -03:00
2018-05-18 16:41:59 -04:00
vsp1_dl_body_pool_destroy ( dlm - > pool ) ;
2015-09-07 01:40:25 -03:00
}