2015-03-02 13:01:12 -08:00
/*
* Copyright ( C ) 2015 Broadcom
*
* This program is free software ; you can redistribute it and / or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation .
*/
# include "drmP.h"
# include "drm_gem_cma_helper.h"
struct vc4_dev {
struct drm_device * dev ;
struct vc4_hdmi * hdmi ;
struct vc4_hvs * hvs ;
struct vc4_crtc * crtc [ 3 ] ;
2015-07-02 11:19:54 -05:00
struct drm_fbdev_cma * fbdev ;
2015-10-09 20:25:07 -07:00
/* The kernel-space BO cache. Tracks buffers that have been
* unreferenced by all other users ( refcounts of 0 ! ) but not
* yet freed , so we can do cheap allocations .
*/
struct vc4_bo_cache {
/* Array of list heads for entries in the BO cache,
* based on number of pages , so we can do O ( 1 ) lookups
* in the cache when allocating .
*/
struct list_head * size_list ;
uint32_t size_list_size ;
/* List of all BOs in the cache, ordered by age, so we
* can do O ( 1 ) lookups when trying to free old
* buffers .
*/
struct list_head time_list ;
struct work_struct time_work ;
struct timer_list time_timer ;
} bo_cache ;
struct vc4_bo_stats {
u32 num_allocated ;
u32 size_allocated ;
u32 num_cached ;
u32 size_cached ;
} bo_stats ;
/* Protects bo_cache and the BO stats. */
struct mutex bo_lock ;
2015-03-02 13:01:12 -08:00
} ;
static inline struct vc4_dev *
to_vc4_dev ( struct drm_device * dev )
{
return ( struct vc4_dev * ) dev - > dev_private ;
}
struct vc4_bo {
struct drm_gem_cma_object base ;
2015-10-09 20:25:07 -07:00
/* List entry for the BO's position in either
* vc4_exec_info - > unref_list or vc4_dev - > bo_cache . time_list
*/
struct list_head unref_head ;
/* Time in jiffies when the BO was put in vc4->bo_cache. */
unsigned long free_time ;
/* List entry for the BO's position in vc4_dev->bo_cache.size_list */
struct list_head size_head ;
2015-03-02 13:01:12 -08:00
} ;
static inline struct vc4_bo *
to_vc4_bo ( struct drm_gem_object * bo )
{
return ( struct vc4_bo * ) bo ;
}
struct vc4_hvs {
struct platform_device * pdev ;
void __iomem * regs ;
void __iomem * dlist ;
} ;
struct vc4_plane {
struct drm_plane base ;
} ;
static inline struct vc4_plane *
to_vc4_plane ( struct drm_plane * plane )
{
return ( struct vc4_plane * ) plane ;
}
enum vc4_encoder_type {
VC4_ENCODER_TYPE_HDMI ,
VC4_ENCODER_TYPE_VEC ,
VC4_ENCODER_TYPE_DSI0 ,
VC4_ENCODER_TYPE_DSI1 ,
VC4_ENCODER_TYPE_SMI ,
VC4_ENCODER_TYPE_DPI ,
} ;
struct vc4_encoder {
struct drm_encoder base ;
enum vc4_encoder_type type ;
u32 clock_select ;
} ;
static inline struct vc4_encoder *
to_vc4_encoder ( struct drm_encoder * encoder )
{
return container_of ( encoder , struct vc4_encoder , base ) ;
}
# define HVS_READ(offset) readl(vc4->hvs->regs + offset)
# define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset)
/**
* _wait_for - magic ( register ) wait macro
*
* Does the right thing for modeset paths when run under kdgb or similar atomic
* contexts . Note that it ' s important that we check the condition again after
* having timed out , since the timeout could be due to preemption or similar and
* we ' ve never had a chance to check the condition before the timeout .
*/
# define _wait_for(COND, MS, W) ({ \
unsigned long timeout__ = jiffies + msecs_to_jiffies ( MS ) + 1 ; \
int ret__ = 0 ; \
while ( ! ( COND ) ) { \
if ( time_after ( jiffies , timeout__ ) ) { \
if ( ! ( COND ) ) \
ret__ = - ETIMEDOUT ; \
break ; \
} \
if ( W & & drm_can_sleep ( ) ) { \
msleep ( W ) ; \
} else { \
cpu_relax ( ) ; \
} \
} \
ret__ ; \
} )
# define wait_for(COND, MS) _wait_for(COND, MS, 1)
/* vc4_bo.c */
2015-10-09 20:25:07 -07:00
struct drm_gem_object * vc4_create_object ( struct drm_device * dev , size_t size ) ;
2015-03-02 13:01:12 -08:00
void vc4_free_object ( struct drm_gem_object * gem_obj ) ;
2015-10-09 20:25:07 -07:00
struct vc4_bo * vc4_bo_create ( struct drm_device * dev , size_t size ,
bool from_cache ) ;
2015-03-02 13:01:12 -08:00
int vc4_dumb_create ( struct drm_file * file_priv ,
struct drm_device * dev ,
struct drm_mode_create_dumb * args ) ;
struct dma_buf * vc4_prime_export ( struct drm_device * dev ,
struct drm_gem_object * obj , int flags ) ;
2015-10-09 20:25:07 -07:00
void vc4_bo_cache_init ( struct drm_device * dev ) ;
void vc4_bo_cache_destroy ( struct drm_device * dev ) ;
int vc4_bo_stats_debugfs ( struct seq_file * m , void * arg ) ;
2015-03-02 13:01:12 -08:00
/* vc4_crtc.c */
extern struct platform_driver vc4_crtc_driver ;
2015-10-22 10:23:31 +10:00
int vc4_enable_vblank ( struct drm_device * dev , unsigned int crtc_id ) ;
void vc4_disable_vblank ( struct drm_device * dev , unsigned int crtc_id ) ;
2015-03-02 13:01:12 -08:00
void vc4_cancel_page_flip ( struct drm_crtc * crtc , struct drm_file * file ) ;
int vc4_crtc_debugfs_regs ( struct seq_file * m , void * arg ) ;
/* vc4_debugfs.c */
int vc4_debugfs_init ( struct drm_minor * minor ) ;
void vc4_debugfs_cleanup ( struct drm_minor * minor ) ;
/* vc4_drv.c */
void __iomem * vc4_ioremap_regs ( struct platform_device * dev , int index ) ;
/* vc4_hdmi.c */
extern struct platform_driver vc4_hdmi_driver ;
int vc4_hdmi_debugfs_regs ( struct seq_file * m , void * unused ) ;
/* vc4_hvs.c */
extern struct platform_driver vc4_hvs_driver ;
void vc4_hvs_dump_state ( struct drm_device * dev ) ;
int vc4_hvs_debugfs_regs ( struct seq_file * m , void * unused ) ;
/* vc4_kms.c */
int vc4_kms_load ( struct drm_device * dev ) ;
/* vc4_plane.c */
struct drm_plane * vc4_plane_init ( struct drm_device * dev ,
enum drm_plane_type type ) ;
u32 vc4_plane_write_dlist ( struct drm_plane * plane , u32 __iomem * dlist ) ;
u32 vc4_plane_dlist_size ( struct drm_plane_state * state ) ;