2020-07-02 13:50:16 +02:00
/*
* Copyright 2012 Red Hat Inc .
* Parts based on xf86 - video - ast
* Copyright ( c ) 2005 ASPEED Technology Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the
* " Software " ) , to deal in the Software without restriction , including
* without limitation the rights to use , copy , modify , merge , publish ,
* distribute , sub license , and / or sell copies of the Software , and to
* permit persons to whom the Software is furnished to do so , subject to
* the following conditions :
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NON - INFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS , AUTHORS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM ,
* DAMAGES OR OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR
* OTHERWISE , ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE .
*
* The above copyright notice and this permission notice ( including the
* next paragraph ) shall be included in all copies or substantial portions
* of the Software .
*/
/*
* Authors : Dave Airlie < airlied @ redhat . com >
*/
# include <drm/drm_gem_vram_helper.h>
2020-07-02 13:50:25 +02:00
# include <drm/drm_managed.h>
2020-07-02 13:50:16 +02:00
# include "ast_drv.h"
2020-07-02 13:50:25 +02:00
static void ast_cursor_fini ( struct ast_private * ast )
{
size_t i ;
struct drm_gem_vram_object * gbo ;
for ( i = 0 ; i < ARRAY_SIZE ( ast - > cursor . gbo ) ; + + i ) {
gbo = ast - > cursor . gbo [ i ] ;
2020-11-03 10:30:11 +01:00
drm_gem_vram_vunmap ( gbo , & ast - > cursor . map [ i ] ) ;
2020-07-02 13:50:25 +02:00
drm_gem_vram_unpin ( gbo ) ;
drm_gem_vram_put ( gbo ) ;
}
}
static void ast_cursor_release ( struct drm_device * dev , void * ptr )
{
2020-07-30 15:52:01 +02:00
struct ast_private * ast = to_ast_private ( dev ) ;
2020-07-02 13:50:25 +02:00
ast_cursor_fini ( ast ) ;
}
2020-07-02 13:50:16 +02:00
/*
* Allocate cursor BOs and pins them at the end of VRAM .
*/
2020-07-02 13:50:17 +02:00
int ast_cursor_init ( struct ast_private * ast )
2020-07-02 13:50:16 +02:00
{
2020-07-30 15:52:03 +02:00
struct drm_device * dev = & ast - > base ;
2020-07-02 13:50:16 +02:00
size_t size , i ;
struct drm_gem_vram_object * gbo ;
2020-11-03 10:30:11 +01:00
struct dma_buf_map map ;
2020-07-02 13:50:16 +02:00
int ret ;
size = roundup ( AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE , PAGE_SIZE ) ;
for ( i = 0 ; i < ARRAY_SIZE ( ast - > cursor . gbo ) ; + + i ) {
gbo = drm_gem_vram_create ( dev , size , 0 ) ;
if ( IS_ERR ( gbo ) ) {
ret = PTR_ERR ( gbo ) ;
goto err_drm_gem_vram_put ;
}
ret = drm_gem_vram_pin ( gbo , DRM_GEM_VRAM_PL_FLAG_VRAM |
DRM_GEM_VRAM_PL_FLAG_TOPDOWN ) ;
if ( ret ) {
drm_gem_vram_put ( gbo ) ;
goto err_drm_gem_vram_put ;
}
2020-11-03 10:30:11 +01:00
ret = drm_gem_vram_vmap ( gbo , & map ) ;
if ( ret ) {
2020-07-02 13:50:24 +02:00
drm_gem_vram_unpin ( gbo ) ;
drm_gem_vram_put ( gbo ) ;
goto err_drm_gem_vram_put ;
}
2020-07-02 13:50:16 +02:00
ast - > cursor . gbo [ i ] = gbo ;
2020-11-03 10:30:11 +01:00
ast - > cursor . map [ i ] = map ;
2020-07-02 13:50:16 +02:00
}
2020-07-02 13:50:25 +02:00
return drmm_add_action_or_reset ( dev , ast_cursor_release , NULL ) ;
2020-07-02 13:50:16 +02:00
err_drm_gem_vram_put :
while ( i ) {
- - i ;
gbo = ast - > cursor . gbo [ i ] ;
2020-11-03 10:30:11 +01:00
drm_gem_vram_vunmap ( gbo , & ast - > cursor . map [ i ] ) ;
2020-07-02 13:50:16 +02:00
drm_gem_vram_unpin ( gbo ) ;
drm_gem_vram_put ( gbo ) ;
}
return ret ;
}
2020-07-02 13:50:19 +02:00
static void update_cursor_image ( u8 __iomem * dst , const u8 * src , int width , int height )
2020-07-02 13:50:16 +02:00
{
union {
u32 ul ;
u8 b [ 4 ] ;
} srcdata32 [ 2 ] , data32 ;
union {
u16 us ;
u8 b [ 2 ] ;
} data16 ;
u32 csum = 0 ;
s32 alpha_dst_delta , last_alpha_dst_delta ;
2020-07-02 13:50:19 +02:00
u8 __iomem * dstxor ;
const u8 * srcxor ;
2020-07-02 13:50:16 +02:00
int i , j ;
u32 per_pixel_copy , two_pixel_copy ;
alpha_dst_delta = AST_MAX_HWC_WIDTH < < 1 ;
last_alpha_dst_delta = alpha_dst_delta - ( width < < 1 ) ;
srcxor = src ;
dstxor = ( u8 * ) dst + last_alpha_dst_delta + ( AST_MAX_HWC_HEIGHT - height ) * alpha_dst_delta ;
per_pixel_copy = width & 1 ;
two_pixel_copy = width > > 1 ;
for ( j = 0 ; j < height ; j + + ) {
for ( i = 0 ; i < two_pixel_copy ; i + + ) {
srcdata32 [ 0 ] . ul = * ( ( u32 * ) srcxor ) & 0xf0f0f0f0 ;
srcdata32 [ 1 ] . ul = * ( ( u32 * ) ( srcxor + 4 ) ) & 0xf0f0f0f0 ;
data32 . b [ 0 ] = srcdata32 [ 0 ] . b [ 1 ] | ( srcdata32 [ 0 ] . b [ 0 ] > > 4 ) ;
data32 . b [ 1 ] = srcdata32 [ 0 ] . b [ 3 ] | ( srcdata32 [ 0 ] . b [ 2 ] > > 4 ) ;
data32 . b [ 2 ] = srcdata32 [ 1 ] . b [ 1 ] | ( srcdata32 [ 1 ] . b [ 0 ] > > 4 ) ;
data32 . b [ 3 ] = srcdata32 [ 1 ] . b [ 3 ] | ( srcdata32 [ 1 ] . b [ 2 ] > > 4 ) ;
writel ( data32 . ul , dstxor ) ;
csum + = data32 . ul ;
dstxor + = 4 ;
srcxor + = 8 ;
}
for ( i = 0 ; i < per_pixel_copy ; i + + ) {
srcdata32 [ 0 ] . ul = * ( ( u32 * ) srcxor ) & 0xf0f0f0f0 ;
data16 . b [ 0 ] = srcdata32 [ 0 ] . b [ 1 ] | ( srcdata32 [ 0 ] . b [ 0 ] > > 4 ) ;
data16 . b [ 1 ] = srcdata32 [ 0 ] . b [ 3 ] | ( srcdata32 [ 0 ] . b [ 2 ] > > 4 ) ;
writew ( data16 . us , dstxor ) ;
csum + = ( u32 ) data16 . us ;
dstxor + = 2 ;
srcxor + = 4 ;
}
dstxor + = last_alpha_dst_delta ;
}
/* write checksum + signature */
dst + = AST_HWC_SIZE ;
writel ( csum , dst ) ;
writel ( width , dst + AST_HWC_SIGNATURE_SizeX ) ;
writel ( height , dst + AST_HWC_SIGNATURE_SizeY ) ;
writel ( 0 , dst + AST_HWC_SIGNATURE_HOTSPOTX ) ;
writel ( 0 , dst + AST_HWC_SIGNATURE_HOTSPOTY ) ;
}
2020-07-02 13:50:18 +02:00
int ast_cursor_blit ( struct ast_private * ast , struct drm_framebuffer * fb )
{
2020-07-30 15:52:03 +02:00
struct drm_device * dev = & ast - > base ;
2020-07-02 13:50:18 +02:00
struct drm_gem_vram_object * gbo ;
2020-11-03 10:30:11 +01:00
struct dma_buf_map map ;
2020-07-02 13:50:18 +02:00
int ret ;
void * src ;
2020-07-02 13:50:24 +02:00
void __iomem * dst ;
2020-07-02 13:50:18 +02:00
if ( drm_WARN_ON_ONCE ( dev , fb - > width > AST_MAX_HWC_WIDTH ) | |
drm_WARN_ON_ONCE ( dev , fb - > height > AST_MAX_HWC_HEIGHT ) )
return - EINVAL ;
gbo = drm_gem_vram_of_gem ( fb - > obj [ 0 ] ) ;
2020-11-03 10:30:11 +01:00
ret = drm_gem_vram_vmap ( gbo , & map ) ;
if ( ret )
2020-12-09 15:25:20 +01:00
return ret ;
2020-11-03 10:30:11 +01:00
src = map . vaddr ; /* TODO: Use mapping abstraction properly */
2020-07-02 13:50:18 +02:00
2020-11-03 10:30:11 +01:00
dst = ast - > cursor . map [ ast - > cursor . next_index ] . vaddr_iomem ;
2020-07-02 13:50:18 +02:00
2020-07-02 13:50:19 +02:00
/* do data transfer to cursor BO */
update_cursor_image ( dst , src , fb - > width , fb - > height ) ;
2020-07-02 13:50:18 +02:00
2020-11-03 10:30:11 +01:00
drm_gem_vram_vunmap ( gbo , & map ) ;
2020-07-02 13:50:18 +02:00
return 0 ;
}
2020-07-02 13:50:20 +02:00
static void ast_cursor_set_base ( struct ast_private * ast , u64 address )
2020-07-02 13:50:16 +02:00
{
u8 addr0 = ( address > > 3 ) & 0xff ;
u8 addr1 = ( address > > 11 ) & 0xff ;
u8 addr2 = ( address > > 19 ) & 0xff ;
ast_set_index_reg ( ast , AST_IO_CRTC_PORT , 0xc8 , addr0 ) ;
ast_set_index_reg ( ast , AST_IO_CRTC_PORT , 0xc9 , addr1 ) ;
ast_set_index_reg ( ast , AST_IO_CRTC_PORT , 0xca , addr2 ) ;
}
2020-07-02 13:50:20 +02:00
void ast_cursor_page_flip ( struct ast_private * ast )
{
2020-07-30 15:52:03 +02:00
struct drm_device * dev = & ast - > base ;
2020-07-02 13:50:20 +02:00
struct drm_gem_vram_object * gbo ;
s64 off ;
gbo = ast - > cursor . gbo [ ast - > cursor . next_index ] ;
off = drm_gem_vram_offset ( gbo ) ;
if ( drm_WARN_ON_ONCE ( dev , off < 0 ) )
return ; /* Bug: we didn't pin the cursor HW BO to VRAM. */
ast_cursor_set_base ( ast , off ) ;
+ + ast - > cursor . next_index ;
ast - > cursor . next_index % = ARRAY_SIZE ( ast - > cursor . gbo ) ;
}
2020-07-02 13:50:21 +02:00
static void ast_cursor_set_location ( struct ast_private * ast , u16 x , u16 y ,
u8 x_offset , u8 y_offset )
{
u8 x0 = ( x & 0x00ff ) ;
u8 x1 = ( x & 0x0f00 ) > > 8 ;
u8 y0 = ( y & 0x00ff ) ;
u8 y1 = ( y & 0x0700 ) > > 8 ;
ast_set_index_reg ( ast , AST_IO_CRTC_PORT , 0xc2 , x_offset ) ;
ast_set_index_reg ( ast , AST_IO_CRTC_PORT , 0xc3 , y_offset ) ;
ast_set_index_reg ( ast , AST_IO_CRTC_PORT , 0xc4 , x0 ) ;
ast_set_index_reg ( ast , AST_IO_CRTC_PORT , 0xc5 , x1 ) ;
ast_set_index_reg ( ast , AST_IO_CRTC_PORT , 0xc6 , y0 ) ;
ast_set_index_reg ( ast , AST_IO_CRTC_PORT , 0xc7 , y1 ) ;
}
2020-07-02 13:50:24 +02:00
void ast_cursor_show ( struct ast_private * ast , int x , int y ,
unsigned int offset_x , unsigned int offset_y )
2020-07-02 13:50:16 +02:00
{
2020-07-02 13:50:21 +02:00
u8 x_offset , y_offset ;
2020-08-18 13:28:11 -07:00
u8 __iomem * dst ;
u8 __iomem * sig ;
2020-07-02 13:50:16 +02:00
u8 jreg ;
2020-11-03 10:30:11 +01:00
dst = ast - > cursor . map [ ast - > cursor . next_index ] . vaddr ;
2020-07-02 13:50:16 +02:00
sig = dst + AST_HWC_SIZE ;
writel ( x , sig + AST_HWC_SIGNATURE_X ) ;
writel ( y , sig + AST_HWC_SIGNATURE_Y ) ;
if ( x < 0 ) {
2020-07-02 13:50:21 +02:00
x_offset = ( - x ) + offset_x ;
2020-07-02 13:50:16 +02:00
x = 0 ;
2020-07-02 13:50:21 +02:00
} else {
x_offset = offset_x ;
2020-07-02 13:50:16 +02:00
}
if ( y < 0 ) {
2020-07-02 13:50:21 +02:00
y_offset = ( - y ) + offset_y ;
2020-07-02 13:50:16 +02:00
y = 0 ;
2020-07-02 13:50:21 +02:00
} else {
y_offset = offset_y ;
2020-07-02 13:50:16 +02:00
}
2020-07-02 13:50:21 +02:00
ast_cursor_set_location ( ast , x , y , x_offset , y_offset ) ;
2020-07-02 13:50:16 +02:00
/* dummy write to fire HWC */
jreg = 0x02 |
0x01 ; /* enable ARGB4444 cursor */
ast_set_index_reg_mask ( ast , AST_IO_CRTC_PORT , 0xcb , 0xfc , jreg ) ;
}
2020-07-02 13:50:23 +02:00
void ast_cursor_hide ( struct ast_private * ast )
{
ast_set_index_reg_mask ( ast , AST_IO_CRTC_PORT , 0xcb , 0xfc , 0x00 ) ;
}