2009-10-28 11:59:56 +02:00
/*
* linux / drivers / video / omap2 / dss / dsi . c
*
* Copyright ( C ) 2009 Nokia Corporation
* Author : Tomi Valkeinen < tomi . valkeinen @ nokia . com >
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation .
*
* This program is distributed in the hope that it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
* You should have received a copy of the GNU General Public License along with
* this program . If not , see < http : //www.gnu.org/licenses/>.
*/
# define DSS_SUBSYS_NAME "DSI"
# include <linux/kernel.h>
# include <linux/io.h>
# include <linux/clk.h>
# include <linux/device.h>
# include <linux/err.h>
# include <linux/interrupt.h>
# include <linux/delay.h>
# include <linux/mutex.h>
2010-01-11 16:33:56 +02:00
# include <linux/semaphore.h>
2009-10-28 11:59:56 +02:00
# include <linux/seq_file.h>
# include <linux/platform_device.h>
# include <linux/regulator/consumer.h>
# include <linux/wait.h>
2010-01-12 14:16:41 +02:00
# include <linux/workqueue.h>
2010-07-28 15:53:38 +03:00
# include <linux/sched.h>
2011-05-12 17:26:27 +05:30
# include <linux/slab.h>
2011-05-12 17:26:29 +05:30
# include <linux/debugfs.h>
2011-05-27 10:52:19 +03:00
# include <linux/pm_runtime.h>
2009-10-28 11:59:56 +02:00
2011-05-11 14:05:07 +03:00
# include <video/omapdss.h>
2011-08-25 18:25:03 +05:30
# include <video/mipi_display.h>
2009-10-28 11:59:56 +02:00
# include <plat/clock.h>
# include "dss.h"
2011-03-01 11:54:00 +05:30
# include "dss_features.h"
2009-10-28 11:59:56 +02:00
/*#define VERBOSE_IRQ*/
# define DSI_CATCH_MISSING_TE
struct dsi_reg { u16 idx ; } ;
# define DSI_REG(idx) ((const struct dsi_reg) { idx })
# define DSI_SZ_REGS SZ_1K
/* DSI Protocol Engine */
# define DSI_REVISION DSI_REG(0x0000)
# define DSI_SYSCONFIG DSI_REG(0x0010)
# define DSI_SYSSTATUS DSI_REG(0x0014)
# define DSI_IRQSTATUS DSI_REG(0x0018)
# define DSI_IRQENABLE DSI_REG(0x001C)
# define DSI_CTRL DSI_REG(0x0040)
2011-05-16 15:17:08 +05:30
# define DSI_GNQ DSI_REG(0x0044)
2009-10-28 11:59:56 +02:00
# define DSI_COMPLEXIO_CFG1 DSI_REG(0x0048)
# define DSI_COMPLEXIO_IRQ_STATUS DSI_REG(0x004C)
# define DSI_COMPLEXIO_IRQ_ENABLE DSI_REG(0x0050)
# define DSI_CLK_CTRL DSI_REG(0x0054)
# define DSI_TIMING1 DSI_REG(0x0058)
# define DSI_TIMING2 DSI_REG(0x005C)
# define DSI_VM_TIMING1 DSI_REG(0x0060)
# define DSI_VM_TIMING2 DSI_REG(0x0064)
# define DSI_VM_TIMING3 DSI_REG(0x0068)
# define DSI_CLK_TIMING DSI_REG(0x006C)
# define DSI_TX_FIFO_VC_SIZE DSI_REG(0x0070)
# define DSI_RX_FIFO_VC_SIZE DSI_REG(0x0074)
# define DSI_COMPLEXIO_CFG2 DSI_REG(0x0078)
# define DSI_RX_FIFO_VC_FULLNESS DSI_REG(0x007C)
# define DSI_VM_TIMING4 DSI_REG(0x0080)
# define DSI_TX_FIFO_VC_EMPTINESS DSI_REG(0x0084)
# define DSI_VM_TIMING5 DSI_REG(0x0088)
# define DSI_VM_TIMING6 DSI_REG(0x008C)
# define DSI_VM_TIMING7 DSI_REG(0x0090)
# define DSI_STOPCLK_TIMING DSI_REG(0x0094)
# define DSI_VC_CTRL(n) DSI_REG(0x0100 + (n * 0x20))
# define DSI_VC_TE(n) DSI_REG(0x0104 + (n * 0x20))
# define DSI_VC_LONG_PACKET_HEADER(n) DSI_REG(0x0108 + (n * 0x20))
# define DSI_VC_LONG_PACKET_PAYLOAD(n) DSI_REG(0x010C + (n * 0x20))
# define DSI_VC_SHORT_PACKET_HEADER(n) DSI_REG(0x0110 + (n * 0x20))
# define DSI_VC_IRQSTATUS(n) DSI_REG(0x0118 + (n * 0x20))
# define DSI_VC_IRQENABLE(n) DSI_REG(0x011C + (n * 0x20))
/* DSIPHY_SCP */
# define DSI_DSIPHY_CFG0 DSI_REG(0x200 + 0x0000)
# define DSI_DSIPHY_CFG1 DSI_REG(0x200 + 0x0004)
# define DSI_DSIPHY_CFG2 DSI_REG(0x200 + 0x0008)
# define DSI_DSIPHY_CFG5 DSI_REG(0x200 + 0x0014)
2010-07-27 11:11:48 +03:00
# define DSI_DSIPHY_CFG10 DSI_REG(0x200 + 0x0028)
2009-10-28 11:59:56 +02:00
/* DSI_PLL_CTRL_SCP */
# define DSI_PLL_CONTROL DSI_REG(0x300 + 0x0000)
# define DSI_PLL_STATUS DSI_REG(0x300 + 0x0004)
# define DSI_PLL_GO DSI_REG(0x300 + 0x0008)
# define DSI_PLL_CONFIGURATION1 DSI_REG(0x300 + 0x000C)
# define DSI_PLL_CONFIGURATION2 DSI_REG(0x300 + 0x0010)
2011-05-12 17:26:26 +05:30
# define REG_GET(dsidev, idx, start, end) \
FLD_GET ( dsi_read_reg ( dsidev , idx ) , start , end )
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
# define REG_FLD_MOD(dsidev, idx, val, start, end) \
dsi_write_reg ( dsidev , idx , FLD_MOD ( dsi_read_reg ( dsidev , idx ) , val , start , end ) )
2009-10-28 11:59:56 +02:00
/* Global interrupts */
# define DSI_IRQ_VC0 (1 << 0)
# define DSI_IRQ_VC1 (1 << 1)
# define DSI_IRQ_VC2 (1 << 2)
# define DSI_IRQ_VC3 (1 << 3)
# define DSI_IRQ_WAKEUP (1 << 4)
# define DSI_IRQ_RESYNC (1 << 5)
# define DSI_IRQ_PLL_LOCK (1 << 7)
# define DSI_IRQ_PLL_UNLOCK (1 << 8)
# define DSI_IRQ_PLL_RECALL (1 << 9)
# define DSI_IRQ_COMPLEXIO_ERR (1 << 10)
# define DSI_IRQ_HS_TX_TIMEOUT (1 << 14)
# define DSI_IRQ_LP_RX_TIMEOUT (1 << 15)
# define DSI_IRQ_TE_TRIGGER (1 << 16)
# define DSI_IRQ_ACK_TRIGGER (1 << 17)
# define DSI_IRQ_SYNC_LOST (1 << 18)
# define DSI_IRQ_LDO_POWER_GOOD (1 << 19)
# define DSI_IRQ_TA_TIMEOUT (1 << 20)
# define DSI_IRQ_ERROR_MASK \
( DSI_IRQ_HS_TX_TIMEOUT | DSI_IRQ_LP_RX_TIMEOUT | DSI_IRQ_SYNC_LOST | \
DSI_IRQ_TA_TIMEOUT )
# define DSI_IRQ_CHANNEL_MASK 0xf
/* Virtual channel interrupts */
# define DSI_VC_IRQ_CS (1 << 0)
# define DSI_VC_IRQ_ECC_CORR (1 << 1)
# define DSI_VC_IRQ_PACKET_SENT (1 << 2)
# define DSI_VC_IRQ_FIFO_TX_OVF (1 << 3)
# define DSI_VC_IRQ_FIFO_RX_OVF (1 << 4)
# define DSI_VC_IRQ_BTA (1 << 5)
# define DSI_VC_IRQ_ECC_NO_CORR (1 << 6)
# define DSI_VC_IRQ_FIFO_TX_UDF (1 << 7)
# define DSI_VC_IRQ_PP_BUSY_CHANGE (1 << 8)
# define DSI_VC_IRQ_ERROR_MASK \
( DSI_VC_IRQ_CS | DSI_VC_IRQ_ECC_CORR | DSI_VC_IRQ_FIFO_TX_OVF | \
DSI_VC_IRQ_FIFO_RX_OVF | DSI_VC_IRQ_ECC_NO_CORR | \
DSI_VC_IRQ_FIFO_TX_UDF )
/* ComplexIO interrupts */
# define DSI_CIO_IRQ_ERRSYNCESC1 (1 << 0)
# define DSI_CIO_IRQ_ERRSYNCESC2 (1 << 1)
# define DSI_CIO_IRQ_ERRSYNCESC3 (1 << 2)
2011-03-24 16:30:17 +02:00
# define DSI_CIO_IRQ_ERRSYNCESC4 (1 << 3)
# define DSI_CIO_IRQ_ERRSYNCESC5 (1 << 4)
2009-10-28 11:59:56 +02:00
# define DSI_CIO_IRQ_ERRESC1 (1 << 5)
# define DSI_CIO_IRQ_ERRESC2 (1 << 6)
# define DSI_CIO_IRQ_ERRESC3 (1 << 7)
2011-03-24 16:30:17 +02:00
# define DSI_CIO_IRQ_ERRESC4 (1 << 8)
# define DSI_CIO_IRQ_ERRESC5 (1 << 9)
2009-10-28 11:59:56 +02:00
# define DSI_CIO_IRQ_ERRCONTROL1 (1 << 10)
# define DSI_CIO_IRQ_ERRCONTROL2 (1 << 11)
# define DSI_CIO_IRQ_ERRCONTROL3 (1 << 12)
2011-03-24 16:30:17 +02:00
# define DSI_CIO_IRQ_ERRCONTROL4 (1 << 13)
# define DSI_CIO_IRQ_ERRCONTROL5 (1 << 14)
2009-10-28 11:59:56 +02:00
# define DSI_CIO_IRQ_STATEULPS1 (1 << 15)
# define DSI_CIO_IRQ_STATEULPS2 (1 << 16)
# define DSI_CIO_IRQ_STATEULPS3 (1 << 17)
2011-03-24 16:30:17 +02:00
# define DSI_CIO_IRQ_STATEULPS4 (1 << 18)
# define DSI_CIO_IRQ_STATEULPS5 (1 << 19)
2009-10-28 11:59:56 +02:00
# define DSI_CIO_IRQ_ERRCONTENTIONLP0_1 (1 << 20)
# define DSI_CIO_IRQ_ERRCONTENTIONLP1_1 (1 << 21)
# define DSI_CIO_IRQ_ERRCONTENTIONLP0_2 (1 << 22)
# define DSI_CIO_IRQ_ERRCONTENTIONLP1_2 (1 << 23)
# define DSI_CIO_IRQ_ERRCONTENTIONLP0_3 (1 << 24)
# define DSI_CIO_IRQ_ERRCONTENTIONLP1_3 (1 << 25)
2011-03-24 16:30:17 +02:00
# define DSI_CIO_IRQ_ERRCONTENTIONLP0_4 (1 << 26)
# define DSI_CIO_IRQ_ERRCONTENTIONLP1_4 (1 << 27)
# define DSI_CIO_IRQ_ERRCONTENTIONLP0_5 (1 << 28)
# define DSI_CIO_IRQ_ERRCONTENTIONLP1_5 (1 << 29)
2009-10-28 11:59:56 +02:00
# define DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 (1 << 30)
# define DSI_CIO_IRQ_ULPSACTIVENOT_ALL1 (1 << 31)
2010-05-10 14:35:33 +03:00
# define DSI_CIO_IRQ_ERROR_MASK \
( DSI_CIO_IRQ_ERRSYNCESC1 | DSI_CIO_IRQ_ERRSYNCESC2 | \
2011-03-24 16:30:17 +02:00
DSI_CIO_IRQ_ERRSYNCESC3 | DSI_CIO_IRQ_ERRSYNCESC4 | \
DSI_CIO_IRQ_ERRSYNCESC5 | \
DSI_CIO_IRQ_ERRESC1 | DSI_CIO_IRQ_ERRESC2 | \
DSI_CIO_IRQ_ERRESC3 | DSI_CIO_IRQ_ERRESC4 | \
DSI_CIO_IRQ_ERRESC5 | \
DSI_CIO_IRQ_ERRCONTROL1 | DSI_CIO_IRQ_ERRCONTROL2 | \
DSI_CIO_IRQ_ERRCONTROL3 | DSI_CIO_IRQ_ERRCONTROL4 | \
DSI_CIO_IRQ_ERRCONTROL5 | \
2010-05-10 14:35:33 +03:00
DSI_CIO_IRQ_ERRCONTENTIONLP0_1 | DSI_CIO_IRQ_ERRCONTENTIONLP1_1 | \
DSI_CIO_IRQ_ERRCONTENTIONLP0_2 | DSI_CIO_IRQ_ERRCONTENTIONLP1_2 | \
2011-03-24 16:30:17 +02:00
DSI_CIO_IRQ_ERRCONTENTIONLP0_3 | DSI_CIO_IRQ_ERRCONTENTIONLP1_3 | \
DSI_CIO_IRQ_ERRCONTENTIONLP0_4 | DSI_CIO_IRQ_ERRCONTENTIONLP1_4 | \
DSI_CIO_IRQ_ERRCONTENTIONLP0_5 | DSI_CIO_IRQ_ERRCONTENTIONLP1_5 )
2009-10-28 11:59:56 +02:00
2011-03-02 14:47:04 +02:00
typedef void ( * omap_dsi_isr_t ) ( void * arg , u32 mask ) ;
# define DSI_MAX_NR_ISRS 2
struct dsi_isr_data {
omap_dsi_isr_t isr ;
void * arg ;
u32 mask ;
} ;
2009-10-28 11:59:56 +02:00
enum fifo_size {
DSI_FIFO_SIZE_0 = 0 ,
DSI_FIFO_SIZE_32 = 1 ,
DSI_FIFO_SIZE_64 = 2 ,
DSI_FIFO_SIZE_96 = 3 ,
DSI_FIFO_SIZE_128 = 4 ,
} ;
2011-08-22 11:58:08 +05:30
enum dsi_vc_source {
DSI_VC_SOURCE_L4 = 0 ,
DSI_VC_SOURCE_VP ,
2009-10-28 11:59:56 +02:00
} ;
2010-07-27 11:11:48 +03:00
enum dsi_lane {
DSI_CLK_P = 1 < < 0 ,
DSI_CLK_N = 1 < < 1 ,
DSI_DATA1_P = 1 < < 2 ,
DSI_DATA1_N = 1 < < 3 ,
DSI_DATA2_P = 1 < < 4 ,
DSI_DATA2_N = 1 < < 5 ,
2011-05-16 15:17:08 +05:30
DSI_DATA3_P = 1 < < 6 ,
DSI_DATA3_N = 1 < < 7 ,
DSI_DATA4_P = 1 < < 8 ,
DSI_DATA4_N = 1 < < 9 ,
2010-07-27 11:11:48 +03:00
} ;
2009-10-28 11:59:56 +02:00
struct dsi_update_region {
u16 x , y , w , h ;
struct omap_dss_device * device ;
} ;
2009-12-17 14:35:21 +02:00
struct dsi_irq_stats {
unsigned long last_reset ;
unsigned irq_count ;
unsigned dsi_irqs [ 32 ] ;
unsigned vc_irqs [ 4 ] [ 32 ] ;
unsigned cio_irqs [ 32 ] ;
} ;
2011-03-02 14:47:04 +02:00
struct dsi_isr_tables {
struct dsi_isr_data isr_table [ DSI_MAX_NR_ISRS ] ;
struct dsi_isr_data isr_table_vc [ 4 ] [ DSI_MAX_NR_ISRS ] ;
struct dsi_isr_data isr_table_cio [ DSI_MAX_NR_ISRS ] ;
} ;
2011-05-12 17:26:27 +05:30
struct dsi_data {
2011-01-24 06:22:02 +00:00
struct platform_device * pdev ;
2009-10-28 11:59:56 +02:00
void __iomem * base ;
2011-05-27 10:52:19 +03:00
2011-02-23 08:41:03 +00:00
int irq ;
2009-10-28 11:59:56 +02:00
2011-05-27 10:52:19 +03:00
struct clk * dss_clk ;
struct clk * sys_clk ;
2011-06-15 15:21:12 +03:00
int ( * enable_pads ) ( int dsi_id , unsigned lane_mask ) ;
void ( * disable_pads ) ( int dsi_id , unsigned lane_mask ) ;
2010-07-30 11:57:57 +03:00
2009-10-28 11:59:56 +02:00
struct dsi_clock_info current_cinfo ;
2010-07-30 12:39:34 +03:00
bool vdds_dsi_enabled ;
2009-10-28 11:59:56 +02:00
struct regulator * vdds_dsi_reg ;
struct {
2011-08-22 11:58:08 +05:30
enum dsi_vc_source source ;
2009-10-28 11:59:56 +02:00
struct omap_dss_device * dssdev ;
enum fifo_size fifo_size ;
2011-03-02 12:35:53 +05:30
int vc_id ;
2009-10-28 11:59:56 +02:00
} vc [ 4 ] ;
struct mutex lock ;
2010-01-11 16:33:56 +02:00
struct semaphore bus_lock ;
2009-10-28 11:59:56 +02:00
unsigned pll_locked ;
2011-03-02 14:47:04 +02:00
spinlock_t irq_lock ;
struct dsi_isr_tables isr_tables ;
/* space for a copy used by the interrupt handler */
struct dsi_isr_tables isr_tables_copy ;
2010-01-12 14:16:41 +02:00
int update_channel ;
2009-10-28 11:59:56 +02:00
struct dsi_update_region update_region ;
bool te_enabled ;
2010-07-28 15:53:38 +03:00
bool ulps_enabled ;
2009-10-28 11:59:56 +02:00
2010-01-12 14:16:41 +02:00
void ( * framedone_callback ) ( int , void * ) ;
void * framedone_data ;
struct delayed_work framedone_timeout_work ;
2009-10-28 11:59:56 +02:00
# ifdef DSI_CATCH_MISSING_TE
struct timer_list te_timer ;
# endif
unsigned long cache_req_pck ;
unsigned long cache_clk_freq ;
struct dsi_clock_info cache_cinfo ;
u32 errors ;
spinlock_t errors_lock ;
# ifdef DEBUG
ktime_t perf_setup_time ;
ktime_t perf_start_time ;
# endif
int debug_read ;
int debug_write ;
2009-12-17 14:35:21 +02:00
# ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
spinlock_t irq_stats_lock ;
struct dsi_irq_stats irq_stats ;
# endif
2011-03-14 23:28:23 -05:00
/* DSI PLL Parameter Ranges */
unsigned long regm_max , regn_max ;
unsigned long regm_dispc_max , regm_dsi_max ;
unsigned long fint_min , fint_max ;
unsigned long lpdiv_max ;
2011-04-13 17:12:52 +03:00
2011-05-16 15:17:08 +05:30
int num_data_lanes ;
2011-04-13 17:12:52 +03:00
unsigned scp_clk_refcount ;
2011-05-12 17:26:27 +05:30
} ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:28 +05:30
struct dsi_packet_sent_handler_data {
struct platform_device * dsidev ;
struct completion * completion ;
} ;
2011-05-12 17:26:26 +05:30
static struct platform_device * dsi_pdev_map [ MAX_NUM_DSI ] ;
2009-10-28 11:59:56 +02:00
# ifdef DEBUG
static unsigned int dsi_perf ;
module_param_named ( dsi_perf , dsi_perf , bool , 0644 ) ;
# endif
2011-05-12 17:26:27 +05:30
static inline struct dsi_data * dsi_get_dsidrv_data ( struct platform_device * dsidev )
{
return dev_get_drvdata ( & dsidev - > dev ) ;
}
2011-05-12 17:26:26 +05:30
static inline struct platform_device * dsi_get_dsidev_from_dssdev ( struct omap_dss_device * dssdev )
{
return dsi_pdev_map [ dssdev - > phy . dsi . module ] ;
}
struct platform_device * dsi_get_dsidev_from_id ( int module )
{
return dsi_pdev_map [ module ] ;
}
2011-08-03 14:00:57 +03:00
static inline int dsi_get_dsidev_id ( struct platform_device * dsidev )
2011-05-12 17:26:27 +05:30
{
2011-08-03 14:00:57 +03:00
return dsidev - > id ;
2011-05-12 17:26:27 +05:30
}
2011-05-12 17:26:26 +05:30
static inline void dsi_write_reg ( struct platform_device * dsidev ,
const struct dsi_reg idx , u32 val )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
__raw_writel ( val , dsi - > base + idx . idx ) ;
2009-10-28 11:59:56 +02:00
}
2011-05-12 17:26:26 +05:30
static inline u32 dsi_read_reg ( struct platform_device * dsidev ,
const struct dsi_reg idx )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
return __raw_readl ( dsi - > base + idx . idx ) ;
2009-10-28 11:59:56 +02:00
}
2011-05-12 17:26:24 +05:30
void dsi_bus_lock ( struct omap_dss_device * dssdev )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:27 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
down ( & dsi - > bus_lock ) ;
2009-10-28 11:59:56 +02:00
}
EXPORT_SYMBOL ( dsi_bus_lock ) ;
2011-05-12 17:26:24 +05:30
void dsi_bus_unlock ( struct omap_dss_device * dssdev )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:27 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
up ( & dsi - > bus_lock ) ;
2009-10-28 11:59:56 +02:00
}
EXPORT_SYMBOL ( dsi_bus_unlock ) ;
2011-05-12 17:26:26 +05:30
static bool dsi_bus_is_locked ( struct platform_device * dsidev )
2010-01-18 16:27:52 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
return dsi - > bus_lock . count = = 0 ;
2010-01-18 16:27:52 +02:00
}
2011-03-02 14:48:41 +02:00
static void dsi_completion_handler ( void * data , u32 mask )
{
complete ( ( struct completion * ) data ) ;
}
2011-05-12 17:26:26 +05:30
static inline int wait_for_bit_change ( struct platform_device * dsidev ,
const struct dsi_reg idx , int bitnum , int value )
2009-10-28 11:59:56 +02:00
{
int t = 100000 ;
2011-05-12 17:26:26 +05:30
while ( REG_GET ( dsidev , idx , bitnum , bitnum ) ! = value ) {
2009-10-28 11:59:56 +02:00
if ( - - t = = 0 )
return ! value ;
}
return value ;
}
# ifdef DEBUG
2011-05-12 17:26:26 +05:30
static void dsi_perf_mark_setup ( struct platform_device * dsidev )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
dsi - > perf_setup_time = ktime_get ( ) ;
2009-10-28 11:59:56 +02:00
}
2011-05-12 17:26:26 +05:30
static void dsi_perf_mark_start ( struct platform_device * dsidev )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
dsi - > perf_start_time = ktime_get ( ) ;
2009-10-28 11:59:56 +02:00
}
2011-05-12 17:26:26 +05:30
static void dsi_perf_show ( struct platform_device * dsidev , const char * name )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2009-10-28 11:59:56 +02:00
ktime_t t , setup_time , trans_time ;
u32 total_bytes ;
u32 setup_us , trans_us , total_us ;
if ( ! dsi_perf )
return ;
t = ktime_get ( ) ;
2011-05-12 17:26:27 +05:30
setup_time = ktime_sub ( dsi - > perf_start_time , dsi - > perf_setup_time ) ;
2009-10-28 11:59:56 +02:00
setup_us = ( u32 ) ktime_to_us ( setup_time ) ;
if ( setup_us = = 0 )
setup_us = 1 ;
2011-05-12 17:26:27 +05:30
trans_time = ktime_sub ( t , dsi - > perf_start_time ) ;
2009-10-28 11:59:56 +02:00
trans_us = ( u32 ) ktime_to_us ( trans_time ) ;
if ( trans_us = = 0 )
trans_us = 1 ;
total_us = setup_us + trans_us ;
2011-05-12 17:26:27 +05:30
total_bytes = dsi - > update_region . w *
dsi - > update_region . h *
dsi - > update_region . device - > ctrl . pixel_size / 8 ;
2009-10-28 11:59:56 +02:00
2010-01-11 16:41:10 +02:00
printk ( KERN_INFO " DSI(%s): %u us + %u us = %u us (%uHz), "
" %u bytes, %u kbytes/sec \n " ,
name ,
setup_us ,
trans_us ,
total_us ,
1000 * 1000 / total_us ,
total_bytes ,
total_bytes * 1000 / total_us ) ;
2009-10-28 11:59:56 +02:00
}
# else
2011-05-23 16:36:09 +03:00
static inline void dsi_perf_mark_setup ( struct platform_device * dsidev )
{
}
static inline void dsi_perf_mark_start ( struct platform_device * dsidev )
{
}
static inline void dsi_perf_show ( struct platform_device * dsidev ,
const char * name )
{
}
2009-10-28 11:59:56 +02:00
# endif
static void print_irq_status ( u32 status )
{
2011-03-02 15:53:07 +02:00
if ( status = = 0 )
return ;
2009-10-28 11:59:56 +02:00
# ifndef VERBOSE_IRQ
if ( ( status & ~ DSI_IRQ_CHANNEL_MASK ) = = 0 )
return ;
# endif
printk ( KERN_DEBUG " DSI IRQ: 0x%x: " , status ) ;
# define PIS(x) \
if ( status & DSI_IRQ_ # # x ) \
printk ( # x " " ) ;
# ifdef VERBOSE_IRQ
PIS ( VC0 ) ;
PIS ( VC1 ) ;
PIS ( VC2 ) ;
PIS ( VC3 ) ;
# endif
PIS ( WAKEUP ) ;
PIS ( RESYNC ) ;
PIS ( PLL_LOCK ) ;
PIS ( PLL_UNLOCK ) ;
PIS ( PLL_RECALL ) ;
PIS ( COMPLEXIO_ERR ) ;
PIS ( HS_TX_TIMEOUT ) ;
PIS ( LP_RX_TIMEOUT ) ;
PIS ( TE_TRIGGER ) ;
PIS ( ACK_TRIGGER ) ;
PIS ( SYNC_LOST ) ;
PIS ( LDO_POWER_GOOD ) ;
PIS ( TA_TIMEOUT ) ;
# undef PIS
printk ( " \n " ) ;
}
static void print_irq_status_vc ( int channel , u32 status )
{
2011-03-02 15:53:07 +02:00
if ( status = = 0 )
return ;
2009-10-28 11:59:56 +02:00
# ifndef VERBOSE_IRQ
if ( ( status & ~ DSI_VC_IRQ_PACKET_SENT ) = = 0 )
return ;
# endif
printk ( KERN_DEBUG " DSI VC(%d) IRQ 0x%x: " , channel , status ) ;
# define PIS(x) \
if ( status & DSI_VC_IRQ_ # # x ) \
printk ( # x " " ) ;
PIS ( CS ) ;
PIS ( ECC_CORR ) ;
# ifdef VERBOSE_IRQ
PIS ( PACKET_SENT ) ;
# endif
PIS ( FIFO_TX_OVF ) ;
PIS ( FIFO_RX_OVF ) ;
PIS ( BTA ) ;
PIS ( ECC_NO_CORR ) ;
PIS ( FIFO_TX_UDF ) ;
PIS ( PP_BUSY_CHANGE ) ;
# undef PIS
printk ( " \n " ) ;
}
static void print_irq_status_cio ( u32 status )
{
2011-03-02 15:53:07 +02:00
if ( status = = 0 )
return ;
2009-10-28 11:59:56 +02:00
printk ( KERN_DEBUG " DSI CIO IRQ 0x%x: " , status ) ;
# define PIS(x) \
if ( status & DSI_CIO_IRQ_ # # x ) \
printk ( # x " " ) ;
PIS ( ERRSYNCESC1 ) ;
PIS ( ERRSYNCESC2 ) ;
PIS ( ERRSYNCESC3 ) ;
PIS ( ERRESC1 ) ;
PIS ( ERRESC2 ) ;
PIS ( ERRESC3 ) ;
PIS ( ERRCONTROL1 ) ;
PIS ( ERRCONTROL2 ) ;
PIS ( ERRCONTROL3 ) ;
PIS ( STATEULPS1 ) ;
PIS ( STATEULPS2 ) ;
PIS ( STATEULPS3 ) ;
PIS ( ERRCONTENTIONLP0_1 ) ;
PIS ( ERRCONTENTIONLP1_1 ) ;
PIS ( ERRCONTENTIONLP0_2 ) ;
PIS ( ERRCONTENTIONLP1_2 ) ;
PIS ( ERRCONTENTIONLP0_3 ) ;
PIS ( ERRCONTENTIONLP1_3 ) ;
PIS ( ULPSACTIVENOT_ALL0 ) ;
PIS ( ULPSACTIVENOT_ALL1 ) ;
# undef PIS
printk ( " \n " ) ;
}
2011-03-02 14:44:27 +02:00
# ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
2011-05-12 17:26:26 +05:30
static void dsi_collect_irq_stats ( struct platform_device * dsidev , u32 irqstatus ,
u32 * vcstatus , u32 ciostatus )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2009-10-28 11:59:56 +02:00
int i ;
2011-05-12 17:26:27 +05:30
spin_lock ( & dsi - > irq_stats_lock ) ;
2011-03-02 14:44:27 +02:00
2011-05-12 17:26:27 +05:30
dsi - > irq_stats . irq_count + + ;
dss_collect_irq_stats ( irqstatus , dsi - > irq_stats . dsi_irqs ) ;
2011-03-02 14:44:27 +02:00
for ( i = 0 ; i < 4 ; + + i )
2011-05-12 17:26:27 +05:30
dss_collect_irq_stats ( vcstatus [ i ] , dsi - > irq_stats . vc_irqs [ i ] ) ;
2011-03-02 14:44:27 +02:00
2011-05-12 17:26:27 +05:30
dss_collect_irq_stats ( ciostatus , dsi - > irq_stats . cio_irqs ) ;
2011-03-02 14:44:27 +02:00
2011-05-12 17:26:27 +05:30
spin_unlock ( & dsi - > irq_stats_lock ) ;
2011-03-02 14:44:27 +02:00
}
# else
2011-05-12 17:26:26 +05:30
# define dsi_collect_irq_stats(dsidev, irqstatus, vcstatus, ciostatus)
2009-12-17 14:35:21 +02:00
# endif
2011-03-02 14:44:27 +02:00
static int debug_irq ;
2011-05-12 17:26:26 +05:30
static void dsi_handle_irq_errors ( struct platform_device * dsidev , u32 irqstatus ,
u32 * vcstatus , u32 ciostatus )
2011-03-02 14:44:27 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2011-03-02 14:44:27 +02:00
int i ;
2009-10-28 11:59:56 +02:00
if ( irqstatus & DSI_IRQ_ERROR_MASK ) {
DSSERR ( " DSI error, irqstatus %x \n " , irqstatus ) ;
print_irq_status ( irqstatus ) ;
2011-05-12 17:26:27 +05:30
spin_lock ( & dsi - > errors_lock ) ;
dsi - > errors | = irqstatus & DSI_IRQ_ERROR_MASK ;
spin_unlock ( & dsi - > errors_lock ) ;
2009-10-28 11:59:56 +02:00
} else if ( debug_irq ) {
print_irq_status ( irqstatus ) ;
}
for ( i = 0 ; i < 4 ; + + i ) {
2011-03-02 14:44:27 +02:00
if ( vcstatus [ i ] & DSI_VC_IRQ_ERROR_MASK ) {
DSSERR ( " DSI VC(%d) error, vc irqstatus %x \n " ,
i , vcstatus [ i ] ) ;
print_irq_status_vc ( i , vcstatus [ i ] ) ;
} else if ( debug_irq ) {
print_irq_status_vc ( i , vcstatus [ i ] ) ;
}
}
2009-10-28 11:59:56 +02:00
2011-03-02 14:44:27 +02:00
if ( ciostatus & DSI_CIO_IRQ_ERROR_MASK ) {
DSSERR ( " DSI CIO error, cio irqstatus %x \n " , ciostatus ) ;
print_irq_status_cio ( ciostatus ) ;
} else if ( debug_irq ) {
print_irq_status_cio ( ciostatus ) ;
}
}
2009-10-28 11:59:56 +02:00
2011-03-02 14:47:04 +02:00
static void dsi_call_isrs ( struct dsi_isr_data * isr_array ,
unsigned isr_array_size , u32 irqstatus )
{
struct dsi_isr_data * isr_data ;
int i ;
for ( i = 0 ; i < isr_array_size ; i + + ) {
isr_data = & isr_array [ i ] ;
if ( isr_data - > isr & & isr_data - > mask & irqstatus )
isr_data - > isr ( isr_data - > arg , irqstatus ) ;
}
}
static void dsi_handle_isrs ( struct dsi_isr_tables * isr_tables ,
u32 irqstatus , u32 * vcstatus , u32 ciostatus )
{
int i ;
dsi_call_isrs ( isr_tables - > isr_table ,
ARRAY_SIZE ( isr_tables - > isr_table ) ,
irqstatus ) ;
for ( i = 0 ; i < 4 ; + + i ) {
if ( vcstatus [ i ] = = 0 )
continue ;
dsi_call_isrs ( isr_tables - > isr_table_vc [ i ] ,
ARRAY_SIZE ( isr_tables - > isr_table_vc [ i ] ) ,
vcstatus [ i ] ) ;
}
if ( ciostatus ! = 0 )
dsi_call_isrs ( isr_tables - > isr_table_cio ,
ARRAY_SIZE ( isr_tables - > isr_table_cio ) ,
ciostatus ) ;
}
2011-03-02 14:44:27 +02:00
static irqreturn_t omap_dsi_irq_handler ( int irq , void * arg )
{
2011-05-12 17:26:26 +05:30
struct platform_device * dsidev ;
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi ;
2011-03-02 14:44:27 +02:00
u32 irqstatus , vcstatus [ 4 ] , ciostatus ;
int i ;
2009-12-17 14:35:21 +02:00
2011-05-12 17:26:26 +05:30
dsidev = ( struct platform_device * ) arg ;
2011-05-12 17:26:27 +05:30
dsi = dsi_get_dsidrv_data ( dsidev ) ;
2011-05-12 17:26:26 +05:30
2011-05-12 17:26:27 +05:30
spin_lock ( & dsi - > irq_lock ) ;
2011-03-02 14:47:04 +02:00
2011-05-12 17:26:26 +05:30
irqstatus = dsi_read_reg ( dsidev , DSI_IRQSTATUS ) ;
2009-10-28 11:59:56 +02:00
2011-03-02 14:44:27 +02:00
/* IRQ is not for us */
2011-03-02 14:47:04 +02:00
if ( ! irqstatus ) {
2011-05-12 17:26:27 +05:30
spin_unlock ( & dsi - > irq_lock ) ;
2011-03-02 14:44:27 +02:00
return IRQ_NONE ;
2011-03-02 14:47:04 +02:00
}
2010-06-09 15:31:01 +03:00
2011-05-12 17:26:26 +05:30
dsi_write_reg ( dsidev , DSI_IRQSTATUS , irqstatus & ~ DSI_IRQ_CHANNEL_MASK ) ;
2011-03-02 14:44:27 +02:00
/* flush posted write */
2011-05-12 17:26:26 +05:30
dsi_read_reg ( dsidev , DSI_IRQSTATUS ) ;
2011-03-02 14:44:27 +02:00
for ( i = 0 ; i < 4 ; + + i ) {
if ( ( irqstatus & ( 1 < < i ) ) = = 0 ) {
vcstatus [ i ] = 0 ;
continue ;
2009-10-28 11:59:56 +02:00
}
2011-05-12 17:26:26 +05:30
vcstatus [ i ] = dsi_read_reg ( dsidev , DSI_VC_IRQSTATUS ( i ) ) ;
2011-03-02 14:44:27 +02:00
2011-05-12 17:26:26 +05:30
dsi_write_reg ( dsidev , DSI_VC_IRQSTATUS ( i ) , vcstatus [ i ] ) ;
2009-10-28 11:59:56 +02:00
/* flush posted write */
2011-05-12 17:26:26 +05:30
dsi_read_reg ( dsidev , DSI_VC_IRQSTATUS ( i ) ) ;
2009-10-28 11:59:56 +02:00
}
if ( irqstatus & DSI_IRQ_COMPLEXIO_ERR ) {
2011-05-12 17:26:26 +05:30
ciostatus = dsi_read_reg ( dsidev , DSI_COMPLEXIO_IRQ_STATUS ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
dsi_write_reg ( dsidev , DSI_COMPLEXIO_IRQ_STATUS , ciostatus ) ;
2009-10-28 11:59:56 +02:00
/* flush posted write */
2011-05-12 17:26:26 +05:30
dsi_read_reg ( dsidev , DSI_COMPLEXIO_IRQ_STATUS ) ;
2011-03-02 14:44:27 +02:00
} else {
ciostatus = 0 ;
}
2009-10-28 11:59:56 +02:00
2011-03-02 14:44:27 +02:00
# ifdef DSI_CATCH_MISSING_TE
if ( irqstatus & DSI_IRQ_TE_TRIGGER )
2011-05-12 17:26:27 +05:30
del_timer ( & dsi - > te_timer ) ;
2011-03-02 14:44:27 +02:00
# endif
2011-03-02 14:47:04 +02:00
/* make a copy and unlock, so that isrs can unregister
* themselves */
2011-05-12 17:26:27 +05:30
memcpy ( & dsi - > isr_tables_copy , & dsi - > isr_tables ,
sizeof ( dsi - > isr_tables ) ) ;
2011-03-02 14:47:04 +02:00
2011-05-12 17:26:27 +05:30
spin_unlock ( & dsi - > irq_lock ) ;
2011-03-02 14:47:04 +02:00
2011-05-12 17:26:27 +05:30
dsi_handle_isrs ( & dsi - > isr_tables_copy , irqstatus , vcstatus , ciostatus ) ;
2011-03-02 14:47:04 +02:00
2011-05-12 17:26:26 +05:30
dsi_handle_irq_errors ( dsidev , irqstatus , vcstatus , ciostatus ) ;
2011-03-02 14:44:27 +02:00
2011-05-12 17:26:26 +05:30
dsi_collect_irq_stats ( dsidev , irqstatus , vcstatus , ciostatus ) ;
2009-12-17 14:35:21 +02:00
2011-02-23 08:41:03 +00:00
return IRQ_HANDLED ;
2009-10-28 11:59:56 +02:00
}
2011-05-12 17:26:27 +05:30
/* dsi->irq_lock has to be locked by the caller */
2011-05-12 17:26:26 +05:30
static void _omap_dsi_configure_irqs ( struct platform_device * dsidev ,
struct dsi_isr_data * isr_array ,
2011-03-02 14:47:04 +02:00
unsigned isr_array_size , u32 default_mask ,
const struct dsi_reg enable_reg ,
const struct dsi_reg status_reg )
2009-10-28 11:59:56 +02:00
{
2011-03-02 14:47:04 +02:00
struct dsi_isr_data * isr_data ;
u32 mask ;
u32 old_mask ;
2009-10-28 11:59:56 +02:00
int i ;
2011-03-02 14:47:04 +02:00
mask = default_mask ;
2009-10-28 11:59:56 +02:00
2011-03-02 14:47:04 +02:00
for ( i = 0 ; i < isr_array_size ; i + + ) {
isr_data = & isr_array [ i ] ;
2009-10-28 11:59:56 +02:00
2011-03-02 14:47:04 +02:00
if ( isr_data - > isr = = NULL )
continue ;
mask | = isr_data - > mask ;
2009-10-28 11:59:56 +02:00
}
2011-05-12 17:26:26 +05:30
old_mask = dsi_read_reg ( dsidev , enable_reg ) ;
2011-03-02 14:47:04 +02:00
/* clear the irqstatus for newly enabled irqs */
2011-05-12 17:26:26 +05:30
dsi_write_reg ( dsidev , status_reg , ( mask ^ old_mask ) & mask ) ;
dsi_write_reg ( dsidev , enable_reg , mask ) ;
2011-03-02 14:47:04 +02:00
/* flush posted writes */
2011-05-12 17:26:26 +05:30
dsi_read_reg ( dsidev , enable_reg ) ;
dsi_read_reg ( dsidev , status_reg ) ;
2011-03-02 14:47:04 +02:00
}
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:27 +05:30
/* dsi->irq_lock has to be locked by the caller */
2011-05-12 17:26:26 +05:30
static void _omap_dsi_set_irqs ( struct platform_device * dsidev )
2011-03-02 14:47:04 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2011-03-02 14:47:04 +02:00
u32 mask = DSI_IRQ_ERROR_MASK ;
2009-10-28 11:59:56 +02:00
# ifdef DSI_CATCH_MISSING_TE
2011-03-02 14:47:04 +02:00
mask | = DSI_IRQ_TE_TRIGGER ;
2009-10-28 11:59:56 +02:00
# endif
2011-05-12 17:26:27 +05:30
_omap_dsi_configure_irqs ( dsidev , dsi - > isr_tables . isr_table ,
ARRAY_SIZE ( dsi - > isr_tables . isr_table ) , mask ,
2011-03-02 14:47:04 +02:00
DSI_IRQENABLE , DSI_IRQSTATUS ) ;
}
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:27 +05:30
/* dsi->irq_lock has to be locked by the caller */
2011-05-12 17:26:26 +05:30
static void _omap_dsi_set_irqs_vc ( struct platform_device * dsidev , int vc )
2011-03-02 14:47:04 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
_omap_dsi_configure_irqs ( dsidev , dsi - > isr_tables . isr_table_vc [ vc ] ,
ARRAY_SIZE ( dsi - > isr_tables . isr_table_vc [ vc ] ) ,
2011-03-02 14:47:04 +02:00
DSI_VC_IRQ_ERROR_MASK ,
DSI_VC_IRQENABLE ( vc ) , DSI_VC_IRQSTATUS ( vc ) ) ;
}
2011-05-12 17:26:27 +05:30
/* dsi->irq_lock has to be locked by the caller */
2011-05-12 17:26:26 +05:30
static void _omap_dsi_set_irqs_cio ( struct platform_device * dsidev )
2011-03-02 14:47:04 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
_omap_dsi_configure_irqs ( dsidev , dsi - > isr_tables . isr_table_cio ,
ARRAY_SIZE ( dsi - > isr_tables . isr_table_cio ) ,
2011-03-02 14:47:04 +02:00
DSI_CIO_IRQ_ERROR_MASK ,
DSI_COMPLEXIO_IRQ_ENABLE , DSI_COMPLEXIO_IRQ_STATUS ) ;
}
2011-05-12 17:26:26 +05:30
static void _dsi_initialize_irq ( struct platform_device * dsidev )
2011-03-02 14:47:04 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2011-03-02 14:47:04 +02:00
unsigned long flags ;
int vc ;
2011-05-12 17:26:27 +05:30
spin_lock_irqsave ( & dsi - > irq_lock , flags ) ;
2011-03-02 14:47:04 +02:00
2011-05-12 17:26:27 +05:30
memset ( & dsi - > isr_tables , 0 , sizeof ( dsi - > isr_tables ) ) ;
2011-03-02 14:47:04 +02:00
2011-05-12 17:26:26 +05:30
_omap_dsi_set_irqs ( dsidev ) ;
2011-03-02 14:47:04 +02:00
for ( vc = 0 ; vc < 4 ; + + vc )
2011-05-12 17:26:26 +05:30
_omap_dsi_set_irqs_vc ( dsidev , vc ) ;
_omap_dsi_set_irqs_cio ( dsidev ) ;
2011-03-02 14:47:04 +02:00
2011-05-12 17:26:27 +05:30
spin_unlock_irqrestore ( & dsi - > irq_lock , flags ) ;
2011-03-02 14:47:04 +02:00
}
2009-10-28 11:59:56 +02:00
2011-03-02 14:47:04 +02:00
static int _dsi_register_isr ( omap_dsi_isr_t isr , void * arg , u32 mask ,
struct dsi_isr_data * isr_array , unsigned isr_array_size )
{
struct dsi_isr_data * isr_data ;
int free_idx ;
int i ;
BUG_ON ( isr = = NULL ) ;
/* check for duplicate entry and find a free slot */
free_idx = - 1 ;
for ( i = 0 ; i < isr_array_size ; i + + ) {
isr_data = & isr_array [ i ] ;
if ( isr_data - > isr = = isr & & isr_data - > arg = = arg & &
isr_data - > mask = = mask ) {
return - EINVAL ;
}
if ( isr_data - > isr = = NULL & & free_idx = = - 1 )
free_idx = i ;
}
if ( free_idx = = - 1 )
return - EBUSY ;
isr_data = & isr_array [ free_idx ] ;
isr_data - > isr = isr ;
isr_data - > arg = arg ;
isr_data - > mask = mask ;
return 0 ;
}
static int _dsi_unregister_isr ( omap_dsi_isr_t isr , void * arg , u32 mask ,
struct dsi_isr_data * isr_array , unsigned isr_array_size )
{
struct dsi_isr_data * isr_data ;
int i ;
for ( i = 0 ; i < isr_array_size ; i + + ) {
isr_data = & isr_array [ i ] ;
if ( isr_data - > isr ! = isr | | isr_data - > arg ! = arg | |
isr_data - > mask ! = mask )
continue ;
isr_data - > isr = NULL ;
isr_data - > arg = NULL ;
isr_data - > mask = 0 ;
return 0 ;
}
return - EINVAL ;
}
2011-05-12 17:26:26 +05:30
static int dsi_register_isr ( struct platform_device * dsidev , omap_dsi_isr_t isr ,
void * arg , u32 mask )
2011-03-02 14:47:04 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2011-03-02 14:47:04 +02:00
unsigned long flags ;
int r ;
2011-05-12 17:26:27 +05:30
spin_lock_irqsave ( & dsi - > irq_lock , flags ) ;
2011-03-02 14:47:04 +02:00
2011-05-12 17:26:27 +05:30
r = _dsi_register_isr ( isr , arg , mask , dsi - > isr_tables . isr_table ,
ARRAY_SIZE ( dsi - > isr_tables . isr_table ) ) ;
2011-03-02 14:47:04 +02:00
if ( r = = 0 )
2011-05-12 17:26:26 +05:30
_omap_dsi_set_irqs ( dsidev ) ;
2011-03-02 14:47:04 +02:00
2011-05-12 17:26:27 +05:30
spin_unlock_irqrestore ( & dsi - > irq_lock , flags ) ;
2011-03-02 14:47:04 +02:00
return r ;
}
2011-05-12 17:26:26 +05:30
static int dsi_unregister_isr ( struct platform_device * dsidev ,
omap_dsi_isr_t isr , void * arg , u32 mask )
2011-03-02 14:47:04 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2011-03-02 14:47:04 +02:00
unsigned long flags ;
int r ;
2011-05-12 17:26:27 +05:30
spin_lock_irqsave ( & dsi - > irq_lock , flags ) ;
2011-03-02 14:47:04 +02:00
2011-05-12 17:26:27 +05:30
r = _dsi_unregister_isr ( isr , arg , mask , dsi - > isr_tables . isr_table ,
ARRAY_SIZE ( dsi - > isr_tables . isr_table ) ) ;
2011-03-02 14:47:04 +02:00
if ( r = = 0 )
2011-05-12 17:26:26 +05:30
_omap_dsi_set_irqs ( dsidev ) ;
2011-03-02 14:47:04 +02:00
2011-05-12 17:26:27 +05:30
spin_unlock_irqrestore ( & dsi - > irq_lock , flags ) ;
2011-03-02 14:47:04 +02:00
return r ;
}
2011-05-12 17:26:26 +05:30
static int dsi_register_isr_vc ( struct platform_device * dsidev , int channel ,
omap_dsi_isr_t isr , void * arg , u32 mask )
2011-03-02 14:47:04 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2011-03-02 14:47:04 +02:00
unsigned long flags ;
int r ;
2011-05-12 17:26:27 +05:30
spin_lock_irqsave ( & dsi - > irq_lock , flags ) ;
2011-03-02 14:47:04 +02:00
r = _dsi_register_isr ( isr , arg , mask ,
2011-05-12 17:26:27 +05:30
dsi - > isr_tables . isr_table_vc [ channel ] ,
ARRAY_SIZE ( dsi - > isr_tables . isr_table_vc [ channel ] ) ) ;
2011-03-02 14:47:04 +02:00
if ( r = = 0 )
2011-05-12 17:26:26 +05:30
_omap_dsi_set_irqs_vc ( dsidev , channel ) ;
2011-03-02 14:47:04 +02:00
2011-05-12 17:26:27 +05:30
spin_unlock_irqrestore ( & dsi - > irq_lock , flags ) ;
2011-03-02 14:47:04 +02:00
return r ;
}
2011-05-12 17:26:26 +05:30
static int dsi_unregister_isr_vc ( struct platform_device * dsidev , int channel ,
omap_dsi_isr_t isr , void * arg , u32 mask )
2011-03-02 14:47:04 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2011-03-02 14:47:04 +02:00
unsigned long flags ;
int r ;
2011-05-12 17:26:27 +05:30
spin_lock_irqsave ( & dsi - > irq_lock , flags ) ;
2011-03-02 14:47:04 +02:00
r = _dsi_unregister_isr ( isr , arg , mask ,
2011-05-12 17:26:27 +05:30
dsi - > isr_tables . isr_table_vc [ channel ] ,
ARRAY_SIZE ( dsi - > isr_tables . isr_table_vc [ channel ] ) ) ;
2011-03-02 14:47:04 +02:00
if ( r = = 0 )
2011-05-12 17:26:26 +05:30
_omap_dsi_set_irqs_vc ( dsidev , channel ) ;
2011-03-02 14:47:04 +02:00
2011-05-12 17:26:27 +05:30
spin_unlock_irqrestore ( & dsi - > irq_lock , flags ) ;
2011-03-02 14:47:04 +02:00
return r ;
}
2011-05-12 17:26:26 +05:30
static int dsi_register_isr_cio ( struct platform_device * dsidev ,
omap_dsi_isr_t isr , void * arg , u32 mask )
2011-03-02 14:47:04 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2011-03-02 14:47:04 +02:00
unsigned long flags ;
int r ;
2011-05-12 17:26:27 +05:30
spin_lock_irqsave ( & dsi - > irq_lock , flags ) ;
2011-03-02 14:47:04 +02:00
2011-05-12 17:26:27 +05:30
r = _dsi_register_isr ( isr , arg , mask , dsi - > isr_tables . isr_table_cio ,
ARRAY_SIZE ( dsi - > isr_tables . isr_table_cio ) ) ;
2011-03-02 14:47:04 +02:00
if ( r = = 0 )
2011-05-12 17:26:26 +05:30
_omap_dsi_set_irqs_cio ( dsidev ) ;
2011-03-02 14:47:04 +02:00
2011-05-12 17:26:27 +05:30
spin_unlock_irqrestore ( & dsi - > irq_lock , flags ) ;
2011-03-02 14:47:04 +02:00
return r ;
}
2011-05-12 17:26:26 +05:30
static int dsi_unregister_isr_cio ( struct platform_device * dsidev ,
omap_dsi_isr_t isr , void * arg , u32 mask )
2011-03-02 14:47:04 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2011-03-02 14:47:04 +02:00
unsigned long flags ;
int r ;
2011-05-12 17:26:27 +05:30
spin_lock_irqsave ( & dsi - > irq_lock , flags ) ;
2011-03-02 14:47:04 +02:00
2011-05-12 17:26:27 +05:30
r = _dsi_unregister_isr ( isr , arg , mask , dsi - > isr_tables . isr_table_cio ,
ARRAY_SIZE ( dsi - > isr_tables . isr_table_cio ) ) ;
2011-03-02 14:47:04 +02:00
if ( r = = 0 )
2011-05-12 17:26:26 +05:30
_omap_dsi_set_irqs_cio ( dsidev ) ;
2011-03-02 14:47:04 +02:00
2011-05-12 17:26:27 +05:30
spin_unlock_irqrestore ( & dsi - > irq_lock , flags ) ;
2011-03-02 14:47:04 +02:00
return r ;
2009-10-28 11:59:56 +02:00
}
2011-05-12 17:26:26 +05:30
static u32 dsi_get_errors ( struct platform_device * dsidev )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2009-10-28 11:59:56 +02:00
unsigned long flags ;
u32 e ;
2011-05-12 17:26:27 +05:30
spin_lock_irqsave ( & dsi - > errors_lock , flags ) ;
e = dsi - > errors ;
dsi - > errors = 0 ;
spin_unlock_irqrestore ( & dsi - > errors_lock , flags ) ;
2009-10-28 11:59:56 +02:00
return e ;
}
2011-05-27 10:52:19 +03:00
int dsi_runtime_get ( struct platform_device * dsidev )
2009-10-28 11:59:56 +02:00
{
2011-05-27 10:52:19 +03:00
int r ;
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
DSSDBG ( " dsi_runtime_get \n " ) ;
r = pm_runtime_get_sync ( & dsi - > pdev - > dev ) ;
WARN_ON ( r < 0 ) ;
return r < 0 ? r : 0 ;
}
void dsi_runtime_put ( struct platform_device * dsidev )
{
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
int r ;
DSSDBG ( " dsi_runtime_put \n " ) ;
r = pm_runtime_put ( & dsi - > pdev - > dev ) ;
WARN_ON ( r < 0 ) ;
2009-10-28 11:59:56 +02:00
}
/* source clock for DSI PLL. this could also be PCLKFREE */
2011-05-12 17:26:26 +05:30
static inline void dsi_enable_pll_clock ( struct platform_device * dsidev ,
bool enable )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2009-10-28 11:59:56 +02:00
if ( enable )
2011-05-27 10:52:19 +03:00
clk_enable ( dsi - > sys_clk ) ;
2009-10-28 11:59:56 +02:00
else
2011-05-27 10:52:19 +03:00
clk_disable ( dsi - > sys_clk ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:27 +05:30
if ( enable & & dsi - > pll_locked ) {
2011-05-12 17:26:26 +05:30
if ( wait_for_bit_change ( dsidev , DSI_PLL_STATUS , 1 , 1 ) ! = 1 )
2009-10-28 11:59:56 +02:00
DSSERR ( " cannot lock PLL when enabling clocks \n " ) ;
}
}
# ifdef DEBUG
2011-05-12 17:26:26 +05:30
static void _dsi_print_reset_status ( struct platform_device * dsidev )
2009-10-28 11:59:56 +02:00
{
u32 l ;
2010-10-07 13:27:42 +03:00
int b0 , b1 , b2 ;
2009-10-28 11:59:56 +02:00
if ( ! dss_debug )
return ;
/* A dummy read using the SCP interface to any DSIPHY register is
* required after DSIPHY reset to complete the reset of the DSI complex
* I / O . */
2011-05-12 17:26:26 +05:30
l = dsi_read_reg ( dsidev , DSI_DSIPHY_CFG5 ) ;
2009-10-28 11:59:56 +02:00
printk ( KERN_DEBUG " DSI resets: " ) ;
2011-05-12 17:26:26 +05:30
l = dsi_read_reg ( dsidev , DSI_PLL_STATUS ) ;
2009-10-28 11:59:56 +02:00
printk ( " PLL (%d) " , FLD_GET ( l , 0 , 0 ) ) ;
2011-05-12 17:26:26 +05:30
l = dsi_read_reg ( dsidev , DSI_COMPLEXIO_CFG1 ) ;
2009-10-28 11:59:56 +02:00
printk ( " CIO (%d) " , FLD_GET ( l , 29 , 29 ) ) ;
2010-10-07 13:27:42 +03:00
if ( dss_has_feature ( FEAT_DSI_REVERSE_TXCLKESC ) ) {
b0 = 28 ;
b1 = 27 ;
b2 = 26 ;
} else {
b0 = 24 ;
b1 = 25 ;
b2 = 26 ;
}
2011-05-12 17:26:26 +05:30
l = dsi_read_reg ( dsidev , DSI_DSIPHY_CFG5 ) ;
2010-10-07 13:27:42 +03:00
printk ( " PHY (%x%x%x, %d, %d, %d) \n " ,
FLD_GET ( l , b0 , b0 ) ,
FLD_GET ( l , b1 , b1 ) ,
FLD_GET ( l , b2 , b2 ) ,
2009-10-28 11:59:56 +02:00
FLD_GET ( l , 29 , 29 ) ,
FLD_GET ( l , 30 , 30 ) ,
FLD_GET ( l , 31 , 31 ) ) ;
}
# else
2011-05-12 17:26:26 +05:30
# define _dsi_print_reset_status(x)
2009-10-28 11:59:56 +02:00
# endif
2011-05-12 17:26:26 +05:30
static inline int dsi_if_enable ( struct platform_device * dsidev , bool enable )
2009-10-28 11:59:56 +02:00
{
DSSDBG ( " dsi_if_enable(%d) \n " , enable ) ;
enable = enable ? 1 : 0 ;
2011-05-12 17:26:26 +05:30
REG_FLD_MOD ( dsidev , DSI_CTRL , enable , 0 , 0 ) ; /* IF_EN */
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
if ( wait_for_bit_change ( dsidev , DSI_CTRL , 0 , enable ) ! = enable ) {
2009-10-28 11:59:56 +02:00
DSSERR ( " Failed to set dsi_if_enable to %d \n " , enable ) ;
return - EIO ;
}
return 0 ;
}
2011-05-12 17:26:26 +05:30
unsigned long dsi_get_pll_hsdiv_dispc_rate ( struct platform_device * dsidev )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
return dsi - > current_cinfo . dsi_pll_hsdiv_dispc_clk ;
2009-10-28 11:59:56 +02:00
}
2011-05-12 17:26:26 +05:30
static unsigned long dsi_get_pll_hsdiv_dsi_rate ( struct platform_device * dsidev )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
return dsi - > current_cinfo . dsi_pll_hsdiv_dsi_clk ;
2009-10-28 11:59:56 +02:00
}
2011-05-12 17:26:26 +05:30
static unsigned long dsi_get_txbyteclkhs ( struct platform_device * dsidev )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
return dsi - > current_cinfo . clkin4ddr / 16 ;
2009-10-28 11:59:56 +02:00
}
2011-05-12 17:26:26 +05:30
static unsigned long dsi_fclk_rate ( struct platform_device * dsidev )
2009-10-28 11:59:56 +02:00
{
unsigned long r ;
2011-05-12 17:26:29 +05:30
int dsi_module = dsi_get_dsidev_id ( dsidev ) ;
2011-05-27 10:52:19 +03:00
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:29 +05:30
if ( dss_get_dsi_clk_source ( dsi_module ) = = OMAP_DSS_CLK_SRC_FCK ) {
2011-02-24 14:17:30 +05:30
/* DSI FCLK source is DSS_CLK_FCK */
2011-05-27 10:52:19 +03:00
r = clk_get_rate ( dsi - > dss_clk ) ;
2009-10-28 11:59:56 +02:00
} else {
2011-02-24 14:17:30 +05:30
/* DSI FCLK source is dsi_pll_hsdiv_dsi_clk */
2011-05-12 17:26:26 +05:30
r = dsi_get_pll_hsdiv_dsi_rate ( dsidev ) ;
2009-10-28 11:59:56 +02:00
}
return r ;
}
static int dsi_set_lp_clk_divisor ( struct omap_dss_device * dssdev )
{
2011-05-12 17:26:26 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2009-10-28 11:59:56 +02:00
unsigned long dsi_fclk ;
unsigned lp_clk_div ;
unsigned long lp_clk ;
2011-02-22 13:36:10 +02:00
lp_clk_div = dssdev - > clocks . dsi . lp_clk_div ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:27 +05:30
if ( lp_clk_div = = 0 | | lp_clk_div > dsi - > lpdiv_max )
2009-10-28 11:59:56 +02:00
return - EINVAL ;
2011-05-12 17:26:26 +05:30
dsi_fclk = dsi_fclk_rate ( dsidev ) ;
2009-10-28 11:59:56 +02:00
lp_clk = dsi_fclk / 2 / lp_clk_div ;
DSSDBG ( " LP_CLK_DIV %u, LP_CLK %lu \n " , lp_clk_div , lp_clk ) ;
2011-05-12 17:26:27 +05:30
dsi - > current_cinfo . lp_clk = lp_clk ;
dsi - > current_cinfo . lp_clk_div = lp_clk_div ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
/* LP_CLK_DIVISOR */
REG_FLD_MOD ( dsidev , DSI_CLK_CTRL , lp_clk_div , 12 , 0 ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
/* LP_RX_SYNCHRO_ENABLE */
REG_FLD_MOD ( dsidev , DSI_CLK_CTRL , dsi_fclk > 30000000 ? 1 : 0 , 21 , 21 ) ;
2009-10-28 11:59:56 +02:00
return 0 ;
}
2011-05-12 17:26:26 +05:30
static void dsi_enable_scp_clk ( struct platform_device * dsidev )
2011-04-13 17:12:52 +03:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
if ( dsi - > scp_clk_refcount + + = = 0 )
2011-05-12 17:26:26 +05:30
REG_FLD_MOD ( dsidev , DSI_CLK_CTRL , 1 , 14 , 14 ) ; /* CIO_CLK_ICG */
2011-04-13 17:12:52 +03:00
}
2011-05-12 17:26:26 +05:30
static void dsi_disable_scp_clk ( struct platform_device * dsidev )
2011-04-13 17:12:52 +03:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
WARN_ON ( dsi - > scp_clk_refcount = = 0 ) ;
if ( - - dsi - > scp_clk_refcount = = 0 )
2011-05-12 17:26:26 +05:30
REG_FLD_MOD ( dsidev , DSI_CLK_CTRL , 0 , 14 , 14 ) ; /* CIO_CLK_ICG */
2011-04-13 17:12:52 +03:00
}
2009-10-28 11:59:56 +02:00
enum dsi_pll_power_state {
DSI_PLL_POWER_OFF = 0x0 ,
DSI_PLL_POWER_ON_HSCLK = 0x1 ,
DSI_PLL_POWER_ON_ALL = 0x2 ,
DSI_PLL_POWER_ON_DIV = 0x3 ,
} ;
2011-05-12 17:26:26 +05:30
static int dsi_pll_power ( struct platform_device * dsidev ,
enum dsi_pll_power_state state )
2009-10-28 11:59:56 +02:00
{
int t = 0 ;
2011-04-15 10:42:59 +03:00
/* DSI-PLL power command 0x3 is not working */
if ( dss_has_feature ( FEAT_DSI_PLL_PWR_BUG ) & &
state = = DSI_PLL_POWER_ON_DIV )
state = DSI_PLL_POWER_ON_ALL ;
2011-05-12 17:26:26 +05:30
/* PLL_PWR_CMD */
REG_FLD_MOD ( dsidev , DSI_CLK_CTRL , state , 31 , 30 ) ;
2009-10-28 11:59:56 +02:00
/* PLL_PWR_STATUS */
2011-05-12 17:26:26 +05:30
while ( FLD_GET ( dsi_read_reg ( dsidev , DSI_CLK_CTRL ) , 29 , 28 ) ! = state ) {
2010-01-07 14:19:48 +02:00
if ( + + t > 1000 ) {
2009-10-28 11:59:56 +02:00
DSSERR ( " Failed to set DSI PLL power mode to %d \n " ,
state ) ;
return - ENODEV ;
}
2010-01-07 14:19:48 +02:00
udelay ( 1 ) ;
2009-10-28 11:59:56 +02:00
}
return 0 ;
}
/* calculate clock rates using dividers in cinfo */
2010-12-02 11:27:11 +00:00
static int dsi_calc_clock_rates ( struct omap_dss_device * dssdev ,
struct dsi_clock_info * cinfo )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:27 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
if ( cinfo - > regn = = 0 | | cinfo - > regn > dsi - > regn_max )
2009-10-28 11:59:56 +02:00
return - EINVAL ;
2011-05-12 17:26:27 +05:30
if ( cinfo - > regm = = 0 | | cinfo - > regm > dsi - > regm_max )
2009-10-28 11:59:56 +02:00
return - EINVAL ;
2011-05-12 17:26:27 +05:30
if ( cinfo - > regm_dispc > dsi - > regm_dispc_max )
2009-10-28 11:59:56 +02:00
return - EINVAL ;
2011-05-12 17:26:27 +05:30
if ( cinfo - > regm_dsi > dsi - > regm_dsi_max )
2009-10-28 11:59:56 +02:00
return - EINVAL ;
2011-02-24 14:17:30 +05:30
if ( cinfo - > use_sys_clk ) {
2011-05-27 10:52:19 +03:00
cinfo - > clkin = clk_get_rate ( dsi - > sys_clk ) ;
2009-10-28 11:59:56 +02:00
/* XXX it is unclear if highfreq should be used
2011-02-24 14:17:30 +05:30
* with DSS_SYS_CLK source also */
2009-10-28 11:59:56 +02:00
cinfo - > highfreq = 0 ;
} else {
2011-08-16 13:45:15 +03:00
cinfo - > clkin = dispc_mgr_pclk_rate ( dssdev - > manager - > id ) ;
2009-10-28 11:59:56 +02:00
if ( cinfo - > clkin < 32000000 )
cinfo - > highfreq = 0 ;
else
cinfo - > highfreq = 1 ;
}
cinfo - > fint = cinfo - > clkin / ( cinfo - > regn * ( cinfo - > highfreq ? 2 : 1 ) ) ;
2011-05-12 17:26:27 +05:30
if ( cinfo - > fint > dsi - > fint_max | | cinfo - > fint < dsi - > fint_min )
2009-10-28 11:59:56 +02:00
return - EINVAL ;
cinfo - > clkin4ddr = 2 * cinfo - > regm * cinfo - > fint ;
if ( cinfo - > clkin4ddr > 1800 * 1000 * 1000 )
return - EINVAL ;
2011-02-24 14:17:30 +05:30
if ( cinfo - > regm_dispc > 0 )
cinfo - > dsi_pll_hsdiv_dispc_clk =
cinfo - > clkin4ddr / cinfo - > regm_dispc ;
2009-10-28 11:59:56 +02:00
else
2011-02-24 14:17:30 +05:30
cinfo - > dsi_pll_hsdiv_dispc_clk = 0 ;
2009-10-28 11:59:56 +02:00
2011-02-24 14:17:30 +05:30
if ( cinfo - > regm_dsi > 0 )
cinfo - > dsi_pll_hsdiv_dsi_clk =
cinfo - > clkin4ddr / cinfo - > regm_dsi ;
2009-10-28 11:59:56 +02:00
else
2011-02-24 14:17:30 +05:30
cinfo - > dsi_pll_hsdiv_dsi_clk = 0 ;
2009-10-28 11:59:56 +02:00
return 0 ;
}
2011-05-12 17:26:26 +05:30
int dsi_pll_calc_clock_div_pck ( struct platform_device * dsidev , bool is_tft ,
unsigned long req_pck , struct dsi_clock_info * dsi_cinfo ,
2009-10-28 11:59:56 +02:00
struct dispc_clock_info * dispc_cinfo )
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2009-10-28 11:59:56 +02:00
struct dsi_clock_info cur , best ;
struct dispc_clock_info best_dispc ;
int min_fck_per_pck ;
int match = 0 ;
2011-02-24 14:17:30 +05:30
unsigned long dss_sys_clk , max_dss_fck ;
2009-10-28 11:59:56 +02:00
2011-05-27 10:52:19 +03:00
dss_sys_clk = clk_get_rate ( dsi - > sys_clk ) ;
2009-10-28 11:59:56 +02:00
2011-03-14 23:28:22 -05:00
max_dss_fck = dss_feat_get_param_max ( FEAT_PARAM_DSS_FCK ) ;
2011-03-01 11:54:00 +05:30
2011-05-12 17:26:27 +05:30
if ( req_pck = = dsi - > cache_req_pck & &
dsi - > cache_cinfo . clkin = = dss_sys_clk ) {
2009-10-28 11:59:56 +02:00
DSSDBG ( " DSI clock info found from cache \n " ) ;
2011-05-12 17:26:27 +05:30
* dsi_cinfo = dsi - > cache_cinfo ;
2011-02-24 14:17:30 +05:30
dispc_find_clk_divs ( is_tft , req_pck ,
dsi_cinfo - > dsi_pll_hsdiv_dispc_clk , dispc_cinfo ) ;
2009-10-28 11:59:56 +02:00
return 0 ;
}
min_fck_per_pck = CONFIG_OMAP2_DSS_MIN_FCK_PER_PCK ;
if ( min_fck_per_pck & &
2011-03-01 11:54:00 +05:30
req_pck * min_fck_per_pck > max_dss_fck ) {
2009-10-28 11:59:56 +02:00
DSSERR ( " Requested pixel clock not possible with the current "
" OMAP2_DSS_MIN_FCK_PER_PCK setting. Turning "
" the constraint off. \n " ) ;
min_fck_per_pck = 0 ;
}
DSSDBG ( " dsi_pll_calc \n " ) ;
retry :
memset ( & best , 0 , sizeof ( best ) ) ;
memset ( & best_dispc , 0 , sizeof ( best_dispc ) ) ;
memset ( & cur , 0 , sizeof ( cur ) ) ;
2011-02-24 14:17:30 +05:30
cur . clkin = dss_sys_clk ;
cur . use_sys_clk = 1 ;
2009-10-28 11:59:56 +02:00
cur . highfreq = 0 ;
/* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */
/* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */
/* To reduce PLL lock time, keep Fint high (around 2 MHz) */
2011-05-12 17:26:27 +05:30
for ( cur . regn = 1 ; cur . regn < dsi - > regn_max ; + + cur . regn ) {
2009-10-28 11:59:56 +02:00
if ( cur . highfreq = = 0 )
cur . fint = cur . clkin / cur . regn ;
else
cur . fint = cur . clkin / ( 2 * cur . regn ) ;
2011-05-12 17:26:27 +05:30
if ( cur . fint > dsi - > fint_max | | cur . fint < dsi - > fint_min )
2009-10-28 11:59:56 +02:00
continue ;
/* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */
2011-05-12 17:26:27 +05:30
for ( cur . regm = 1 ; cur . regm < dsi - > regm_max ; + + cur . regm ) {
2009-10-28 11:59:56 +02:00
unsigned long a , b ;
a = 2 * cur . regm * ( cur . clkin / 1000 ) ;
b = cur . regn * ( cur . highfreq + 1 ) ;
cur . clkin4ddr = a / b * 1000 ;
if ( cur . clkin4ddr > 1800 * 1000 * 1000 )
break ;
2011-02-24 14:17:30 +05:30
/* dsi_pll_hsdiv_dispc_clk(MHz) =
* DSIPHY ( MHz ) / regm_dispc < 173 MHz / 186 Mhz */
2011-05-12 17:26:27 +05:30
for ( cur . regm_dispc = 1 ; cur . regm_dispc <
dsi - > regm_dispc_max ; + + cur . regm_dispc ) {
2009-10-28 11:59:56 +02:00
struct dispc_clock_info cur_dispc ;
2011-02-24 14:17:30 +05:30
cur . dsi_pll_hsdiv_dispc_clk =
cur . clkin4ddr / cur . regm_dispc ;
2009-10-28 11:59:56 +02:00
/* this will narrow down the search a bit,
* but still give pixclocks below what was
* requested */
2011-02-24 14:17:30 +05:30
if ( cur . dsi_pll_hsdiv_dispc_clk < req_pck )
2009-10-28 11:59:56 +02:00
break ;
2011-02-24 14:17:30 +05:30
if ( cur . dsi_pll_hsdiv_dispc_clk > max_dss_fck )
2009-10-28 11:59:56 +02:00
continue ;
if ( min_fck_per_pck & &
2011-02-24 14:17:30 +05:30
cur . dsi_pll_hsdiv_dispc_clk <
2009-10-28 11:59:56 +02:00
req_pck * min_fck_per_pck )
continue ;
match = 1 ;
dispc_find_clk_divs ( is_tft , req_pck ,
2011-02-24 14:17:30 +05:30
cur . dsi_pll_hsdiv_dispc_clk ,
2009-10-28 11:59:56 +02:00
& cur_dispc ) ;
if ( abs ( cur_dispc . pck - req_pck ) <
abs ( best_dispc . pck - req_pck ) ) {
best = cur ;
best_dispc = cur_dispc ;
if ( cur_dispc . pck = = req_pck )
goto found ;
}
}
}
}
found :
if ( ! match ) {
if ( min_fck_per_pck ) {
DSSERR ( " Could not find suitable clock settings. \n "
" Turning FCK/PCK constraint off and "
" trying again. \n " ) ;
min_fck_per_pck = 0 ;
goto retry ;
}
DSSERR ( " Could not find suitable clock settings. \n " ) ;
return - EINVAL ;
}
2011-02-24 14:17:30 +05:30
/* dsi_pll_hsdiv_dsi_clk (regm_dsi) is not used */
best . regm_dsi = 0 ;
best . dsi_pll_hsdiv_dsi_clk = 0 ;
2009-10-28 11:59:56 +02:00
if ( dsi_cinfo )
* dsi_cinfo = best ;
if ( dispc_cinfo )
* dispc_cinfo = best_dispc ;
2011-05-12 17:26:27 +05:30
dsi - > cache_req_pck = req_pck ;
dsi - > cache_clk_freq = 0 ;
dsi - > cache_cinfo = best ;
2009-10-28 11:59:56 +02:00
return 0 ;
}
2011-05-12 17:26:26 +05:30
int dsi_pll_set_clock_div ( struct platform_device * dsidev ,
struct dsi_clock_info * cinfo )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2009-10-28 11:59:56 +02:00
int r = 0 ;
u32 l ;
2011-03-22 06:33:36 -05:00
int f = 0 ;
2011-03-14 23:28:23 -05:00
u8 regn_start , regn_end , regm_start , regm_end ;
u8 regm_dispc_start , regm_dispc_end , regm_dsi_start , regm_dsi_end ;
2009-10-28 11:59:56 +02:00
DSSDBGF ( ) ;
2011-05-12 17:26:27 +05:30
dsi - > current_cinfo . use_sys_clk = cinfo - > use_sys_clk ;
dsi - > current_cinfo . highfreq = cinfo - > highfreq ;
2011-04-07 15:28:47 +03:00
2011-05-12 17:26:27 +05:30
dsi - > current_cinfo . fint = cinfo - > fint ;
dsi - > current_cinfo . clkin4ddr = cinfo - > clkin4ddr ;
dsi - > current_cinfo . dsi_pll_hsdiv_dispc_clk =
2011-02-24 14:17:30 +05:30
cinfo - > dsi_pll_hsdiv_dispc_clk ;
2011-05-12 17:26:27 +05:30
dsi - > current_cinfo . dsi_pll_hsdiv_dsi_clk =
2011-02-24 14:17:30 +05:30
cinfo - > dsi_pll_hsdiv_dsi_clk ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:27 +05:30
dsi - > current_cinfo . regn = cinfo - > regn ;
dsi - > current_cinfo . regm = cinfo - > regm ;
dsi - > current_cinfo . regm_dispc = cinfo - > regm_dispc ;
dsi - > current_cinfo . regm_dsi = cinfo - > regm_dsi ;
2009-10-28 11:59:56 +02:00
DSSDBG ( " DSI Fint %ld \n " , cinfo - > fint ) ;
DSSDBG ( " clkin (%s) rate %ld, highfreq %d \n " ,
2011-02-24 14:17:30 +05:30
cinfo - > use_sys_clk ? " dss_sys_clk " : " pclkfree " ,
2009-10-28 11:59:56 +02:00
cinfo - > clkin ,
cinfo - > highfreq ) ;
/* DSIPHY == CLKIN4DDR */
DSSDBG ( " CLKIN4DDR = 2 * %d / %d * %lu / %d = %lu \n " ,
cinfo - > regm ,
cinfo - > regn ,
cinfo - > clkin ,
cinfo - > highfreq + 1 ,
cinfo - > clkin4ddr ) ;
DSSDBG ( " Data rate on 1 DSI lane %ld Mbps \n " ,
cinfo - > clkin4ddr / 1000 / 1000 / 2 ) ;
DSSDBG ( " Clock lane freq %ld Hz \n " , cinfo - > clkin4ddr / 4 ) ;
2011-02-24 14:17:30 +05:30
DSSDBG ( " regm_dispc = %d, %s (%s) = %lu \n " , cinfo - > regm_dispc ,
2011-04-12 13:52:23 +05:30
dss_get_generic_clk_source_name ( OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC ) ,
dss_feat_get_clk_source_name ( OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC ) ,
2011-02-24 14:17:30 +05:30
cinfo - > dsi_pll_hsdiv_dispc_clk ) ;
DSSDBG ( " regm_dsi = %d, %s (%s) = %lu \n " , cinfo - > regm_dsi ,
2011-04-12 13:52:23 +05:30
dss_get_generic_clk_source_name ( OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI ) ,
dss_feat_get_clk_source_name ( OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI ) ,
2011-02-24 14:17:30 +05:30
cinfo - > dsi_pll_hsdiv_dsi_clk ) ;
2009-10-28 11:59:56 +02:00
2011-03-14 23:28:23 -05:00
dss_feat_get_reg_field ( FEAT_REG_DSIPLL_REGN , & regn_start , & regn_end ) ;
dss_feat_get_reg_field ( FEAT_REG_DSIPLL_REGM , & regm_start , & regm_end ) ;
dss_feat_get_reg_field ( FEAT_REG_DSIPLL_REGM_DISPC , & regm_dispc_start ,
& regm_dispc_end ) ;
dss_feat_get_reg_field ( FEAT_REG_DSIPLL_REGM_DSI , & regm_dsi_start ,
& regm_dsi_end ) ;
2011-05-12 17:26:26 +05:30
/* DSI_PLL_AUTOMODE = manual */
REG_FLD_MOD ( dsidev , DSI_PLL_CONTROL , 0 , 0 , 0 ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
l = dsi_read_reg ( dsidev , DSI_PLL_CONFIGURATION1 ) ;
2009-10-28 11:59:56 +02:00
l = FLD_MOD ( l , 1 , 0 , 0 ) ; /* DSI_PLL_STOPMODE */
2011-03-14 23:28:23 -05:00
/* DSI_PLL_REGN */
l = FLD_MOD ( l , cinfo - > regn - 1 , regn_start , regn_end ) ;
/* DSI_PLL_REGM */
l = FLD_MOD ( l , cinfo - > regm , regm_start , regm_end ) ;
/* DSI_CLOCK_DIV */
2011-02-24 14:17:30 +05:30
l = FLD_MOD ( l , cinfo - > regm_dispc > 0 ? cinfo - > regm_dispc - 1 : 0 ,
2011-03-14 23:28:23 -05:00
regm_dispc_start , regm_dispc_end ) ;
/* DSIPROTO_CLOCK_DIV */
2011-02-24 14:17:30 +05:30
l = FLD_MOD ( l , cinfo - > regm_dsi > 0 ? cinfo - > regm_dsi - 1 : 0 ,
2011-03-14 23:28:23 -05:00
regm_dsi_start , regm_dsi_end ) ;
2011-05-12 17:26:26 +05:30
dsi_write_reg ( dsidev , DSI_PLL_CONFIGURATION1 , l ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:27 +05:30
BUG_ON ( cinfo - > fint < dsi - > fint_min | | cinfo - > fint > dsi - > fint_max ) ;
2011-03-22 06:33:36 -05:00
if ( dss_has_feature ( FEAT_DSI_PLL_FREQSEL ) ) {
f = cinfo - > fint < 1000000 ? 0x3 :
cinfo - > fint < 1250000 ? 0x4 :
cinfo - > fint < 1500000 ? 0x5 :
cinfo - > fint < 1750000 ? 0x6 :
0x7 ;
}
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
l = dsi_read_reg ( dsidev , DSI_PLL_CONFIGURATION2 ) ;
2011-03-22 06:33:36 -05:00
if ( dss_has_feature ( FEAT_DSI_PLL_FREQSEL ) )
l = FLD_MOD ( l , f , 4 , 1 ) ; /* DSI_PLL_FREQSEL */
2011-02-24 14:17:30 +05:30
l = FLD_MOD ( l , cinfo - > use_sys_clk ? 0 : 1 ,
2009-10-28 11:59:56 +02:00
11 , 11 ) ; /* DSI_PLL_CLKSEL */
l = FLD_MOD ( l , cinfo - > highfreq ,
12 , 12 ) ; /* DSI_PLL_HIGHFREQ */
l = FLD_MOD ( l , 1 , 13 , 13 ) ; /* DSI_PLL_REFEN */
l = FLD_MOD ( l , 0 , 14 , 14 ) ; /* DSIPHY_CLKINEN */
l = FLD_MOD ( l , 1 , 20 , 20 ) ; /* DSI_HSDIVBYPASS */
2011-05-12 17:26:26 +05:30
dsi_write_reg ( dsidev , DSI_PLL_CONFIGURATION2 , l ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
REG_FLD_MOD ( dsidev , DSI_PLL_GO , 1 , 0 , 0 ) ; /* DSI_PLL_GO */
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
if ( wait_for_bit_change ( dsidev , DSI_PLL_GO , 0 , 0 ) ! = 0 ) {
2009-10-28 11:59:56 +02:00
DSSERR ( " dsi pll go bit not going down. \n " ) ;
r = - EIO ;
goto err ;
}
2011-05-12 17:26:26 +05:30
if ( wait_for_bit_change ( dsidev , DSI_PLL_STATUS , 1 , 1 ) ! = 1 ) {
2009-10-28 11:59:56 +02:00
DSSERR ( " cannot lock PLL \n " ) ;
r = - EIO ;
goto err ;
}
2011-05-12 17:26:27 +05:30
dsi - > pll_locked = 1 ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
l = dsi_read_reg ( dsidev , DSI_PLL_CONFIGURATION2 ) ;
2009-10-28 11:59:56 +02:00
l = FLD_MOD ( l , 0 , 0 , 0 ) ; /* DSI_PLL_IDLE */
l = FLD_MOD ( l , 0 , 5 , 5 ) ; /* DSI_PLL_PLLLPMODE */
l = FLD_MOD ( l , 0 , 6 , 6 ) ; /* DSI_PLL_LOWCURRSTBY */
l = FLD_MOD ( l , 0 , 7 , 7 ) ; /* DSI_PLL_TIGHTPHASELOCK */
l = FLD_MOD ( l , 0 , 8 , 8 ) ; /* DSI_PLL_DRIFTGUARDEN */
l = FLD_MOD ( l , 0 , 10 , 9 ) ; /* DSI_PLL_LOCKSEL */
l = FLD_MOD ( l , 1 , 13 , 13 ) ; /* DSI_PLL_REFEN */
l = FLD_MOD ( l , 1 , 14 , 14 ) ; /* DSIPHY_CLKINEN */
l = FLD_MOD ( l , 0 , 15 , 15 ) ; /* DSI_BYPASSEN */
l = FLD_MOD ( l , 1 , 16 , 16 ) ; /* DSS_CLOCK_EN */
l = FLD_MOD ( l , 0 , 17 , 17 ) ; /* DSS_CLOCK_PWDN */
l = FLD_MOD ( l , 1 , 18 , 18 ) ; /* DSI_PROTO_CLOCK_EN */
l = FLD_MOD ( l , 0 , 19 , 19 ) ; /* DSI_PROTO_CLOCK_PWDN */
l = FLD_MOD ( l , 0 , 20 , 20 ) ; /* DSI_HSDIVBYPASS */
2011-05-12 17:26:26 +05:30
dsi_write_reg ( dsidev , DSI_PLL_CONFIGURATION2 , l ) ;
2009-10-28 11:59:56 +02:00
DSSDBG ( " PLL config done \n " ) ;
err :
return r ;
}
2011-05-12 17:26:26 +05:30
int dsi_pll_init ( struct platform_device * dsidev , bool enable_hsclk ,
bool enable_hsdiv )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2009-10-28 11:59:56 +02:00
int r = 0 ;
enum dsi_pll_power_state pwstate ;
DSSDBG ( " PLL init \n " ) ;
2011-05-12 17:26:27 +05:30
if ( dsi - > vdds_dsi_reg = = NULL ) {
2011-03-02 10:06:48 +02:00
struct regulator * vdds_dsi ;
2011-05-12 17:26:27 +05:30
vdds_dsi = regulator_get ( & dsi - > pdev - > dev , " vdds_dsi " ) ;
2011-03-02 10:06:48 +02:00
if ( IS_ERR ( vdds_dsi ) ) {
DSSERR ( " can't get VDDS_DSI regulator \n " ) ;
return PTR_ERR ( vdds_dsi ) ;
}
2011-05-12 17:26:27 +05:30
dsi - > vdds_dsi_reg = vdds_dsi ;
2011-03-02 10:06:48 +02:00
}
2011-05-12 17:26:26 +05:30
dsi_enable_pll_clock ( dsidev , 1 ) ;
2011-04-13 17:12:52 +03:00
/*
* Note : SCP CLK is not required on OMAP3 , but it is required on OMAP4 .
*/
2011-05-12 17:26:26 +05:30
dsi_enable_scp_clk ( dsidev ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:27 +05:30
if ( ! dsi - > vdds_dsi_enabled ) {
r = regulator_enable ( dsi - > vdds_dsi_reg ) ;
2010-07-30 12:39:34 +03:00
if ( r )
goto err0 ;
2011-05-12 17:26:27 +05:30
dsi - > vdds_dsi_enabled = true ;
2010-07-30 12:39:34 +03:00
}
2009-10-28 11:59:56 +02:00
/* XXX PLL does not come out of reset without this... */
dispc_pck_free_enable ( 1 ) ;
2011-05-12 17:26:26 +05:30
if ( wait_for_bit_change ( dsidev , DSI_PLL_STATUS , 0 , 1 ) ! = 1 ) {
2009-10-28 11:59:56 +02:00
DSSERR ( " PLL not coming out of reset. \n " ) ;
r = - ENODEV ;
2010-04-22 22:50:04 +02:00
dispc_pck_free_enable ( 0 ) ;
2009-10-28 11:59:56 +02:00
goto err1 ;
}
/* XXX ... but if left on, we get problems when planes do not
* fill the whole display . No idea about this */
dispc_pck_free_enable ( 0 ) ;
if ( enable_hsclk & & enable_hsdiv )
pwstate = DSI_PLL_POWER_ON_ALL ;
else if ( enable_hsclk )
pwstate = DSI_PLL_POWER_ON_HSCLK ;
else if ( enable_hsdiv )
pwstate = DSI_PLL_POWER_ON_DIV ;
else
pwstate = DSI_PLL_POWER_OFF ;
2011-05-12 17:26:26 +05:30
r = dsi_pll_power ( dsidev , pwstate ) ;
2009-10-28 11:59:56 +02:00
if ( r )
goto err1 ;
DSSDBG ( " PLL init done \n " ) ;
return 0 ;
err1 :
2011-05-12 17:26:27 +05:30
if ( dsi - > vdds_dsi_enabled ) {
regulator_disable ( dsi - > vdds_dsi_reg ) ;
dsi - > vdds_dsi_enabled = false ;
2010-07-30 12:39:34 +03:00
}
2009-10-28 11:59:56 +02:00
err0 :
2011-05-12 17:26:26 +05:30
dsi_disable_scp_clk ( dsidev ) ;
dsi_enable_pll_clock ( dsidev , 0 ) ;
2009-10-28 11:59:56 +02:00
return r ;
}
2011-05-12 17:26:26 +05:30
void dsi_pll_uninit ( struct platform_device * dsidev , bool disconnect_lanes )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
dsi - > pll_locked = 0 ;
2011-05-12 17:26:26 +05:30
dsi_pll_power ( dsidev , DSI_PLL_POWER_OFF ) ;
2010-07-30 12:39:34 +03:00
if ( disconnect_lanes ) {
2011-05-12 17:26:27 +05:30
WARN_ON ( ! dsi - > vdds_dsi_enabled ) ;
regulator_disable ( dsi - > vdds_dsi_reg ) ;
dsi - > vdds_dsi_enabled = false ;
2010-07-30 12:39:34 +03:00
}
2011-04-13 17:12:52 +03:00
2011-05-12 17:26:26 +05:30
dsi_disable_scp_clk ( dsidev ) ;
dsi_enable_pll_clock ( dsidev , 0 ) ;
2011-04-13 17:12:52 +03:00
2009-10-28 11:59:56 +02:00
DSSDBG ( " PLL uninit done \n " ) ;
}
2011-05-12 17:26:29 +05:30
static void dsi_dump_dsidev_clocks ( struct platform_device * dsidev ,
struct seq_file * s )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
struct dsi_clock_info * cinfo = & dsi - > current_cinfo ;
2011-04-12 13:52:23 +05:30
enum omap_dss_clk_source dispc_clk_src , dsi_clk_src ;
2011-05-12 17:26:29 +05:30
int dsi_module = dsi_get_dsidev_id ( dsidev ) ;
2011-03-02 11:57:25 +05:30
dispc_clk_src = dss_get_dispc_clk_source ( ) ;
2011-05-12 17:26:29 +05:30
dsi_clk_src = dss_get_dsi_clk_source ( dsi_module ) ;
2009-10-28 11:59:56 +02:00
2011-05-27 10:52:19 +03:00
if ( dsi_runtime_get ( dsidev ) )
return ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:29 +05:30
seq_printf ( s , " - DSI%d PLL - \n " , dsi_module + 1 ) ;
2009-10-28 11:59:56 +02:00
seq_printf ( s , " dsi pll source = %s \n " ,
2011-04-04 10:02:53 +03:00
cinfo - > use_sys_clk ? " dss_sys_clk " : " pclkfree " ) ;
2009-10-28 11:59:56 +02:00
seq_printf ( s , " Fint \t \t %-16luregn %u \n " , cinfo - > fint , cinfo - > regn ) ;
seq_printf ( s , " CLKIN4DDR \t %-16luregm %u \n " ,
cinfo - > clkin4ddr , cinfo - > regm ) ;
2011-02-24 14:17:30 +05:30
seq_printf ( s , " %s (%s) \t %-16luregm_dispc %u \t (%s) \n " ,
2011-03-02 11:57:25 +05:30
dss_get_generic_clk_source_name ( dispc_clk_src ) ,
dss_feat_get_clk_source_name ( dispc_clk_src ) ,
2011-02-24 14:17:30 +05:30
cinfo - > dsi_pll_hsdiv_dispc_clk ,
cinfo - > regm_dispc ,
2011-04-12 13:52:23 +05:30
dispc_clk_src = = OMAP_DSS_CLK_SRC_FCK ?
2010-02-23 17:40:00 +02:00
" off " : " on " ) ;
2009-10-28 11:59:56 +02:00
2011-02-24 14:17:30 +05:30
seq_printf ( s , " %s (%s) \t %-16luregm_dsi %u \t (%s) \n " ,
2011-03-02 11:57:25 +05:30
dss_get_generic_clk_source_name ( dsi_clk_src ) ,
dss_feat_get_clk_source_name ( dsi_clk_src ) ,
2011-02-24 14:17:30 +05:30
cinfo - > dsi_pll_hsdiv_dsi_clk ,
cinfo - > regm_dsi ,
2011-04-12 13:52:23 +05:30
dsi_clk_src = = OMAP_DSS_CLK_SRC_FCK ?
2010-02-23 17:40:00 +02:00
" off " : " on " ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:29 +05:30
seq_printf ( s , " - DSI%d - \n " , dsi_module + 1 ) ;
2009-10-28 11:59:56 +02:00
2011-03-02 11:57:25 +05:30
seq_printf ( s , " dsi fclk source = %s (%s) \n " ,
dss_get_generic_clk_source_name ( dsi_clk_src ) ,
dss_feat_get_clk_source_name ( dsi_clk_src ) ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
seq_printf ( s , " DSI_FCLK \t %lu \n " , dsi_fclk_rate ( dsidev ) ) ;
2009-10-28 11:59:56 +02:00
seq_printf ( s , " DDR_CLK \t \t %lu \n " ,
cinfo - > clkin4ddr / 4 ) ;
2011-05-12 17:26:26 +05:30
seq_printf ( s , " TxByteClkHS \t %lu \n " , dsi_get_txbyteclkhs ( dsidev ) ) ;
2009-10-28 11:59:56 +02:00
seq_printf ( s , " LP_CLK \t \t %lu \n " , cinfo - > lp_clk ) ;
2011-05-27 10:52:19 +03:00
dsi_runtime_put ( dsidev ) ;
2009-10-28 11:59:56 +02:00
}
2011-05-12 17:26:29 +05:30
void dsi_dump_clocks ( struct seq_file * s )
{
struct platform_device * dsidev ;
int i ;
for ( i = 0 ; i < MAX_NUM_DSI ; i + + ) {
dsidev = dsi_get_dsidev_from_id ( i ) ;
if ( dsidev )
dsi_dump_dsidev_clocks ( dsidev , s ) ;
}
}
2009-12-17 14:35:21 +02:00
# ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
2011-05-12 17:26:29 +05:30
static void dsi_dump_dsidev_irqs ( struct platform_device * dsidev ,
struct seq_file * s )
2009-12-17 14:35:21 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2009-12-17 14:35:21 +02:00
unsigned long flags ;
struct dsi_irq_stats stats ;
2011-05-12 17:26:29 +05:30
int dsi_module = dsi_get_dsidev_id ( dsidev ) ;
2009-12-17 14:35:21 +02:00
2011-05-12 17:26:27 +05:30
spin_lock_irqsave ( & dsi - > irq_stats_lock , flags ) ;
2009-12-17 14:35:21 +02:00
2011-05-12 17:26:27 +05:30
stats = dsi - > irq_stats ;
memset ( & dsi - > irq_stats , 0 , sizeof ( dsi - > irq_stats ) ) ;
dsi - > irq_stats . last_reset = jiffies ;
2009-12-17 14:35:21 +02:00
2011-05-12 17:26:27 +05:30
spin_unlock_irqrestore ( & dsi - > irq_stats_lock , flags ) ;
2009-12-17 14:35:21 +02:00
seq_printf ( s , " period %u ms \n " ,
jiffies_to_msecs ( jiffies - stats . last_reset ) ) ;
seq_printf ( s , " irqs %d \n " , stats . irq_count ) ;
# define PIS(x) \
seq_printf ( s , " %-20s %10d \n " , # x , stats . dsi_irqs [ ffs ( DSI_IRQ_ # # x ) - 1 ] ) ;
2011-05-12 17:26:29 +05:30
seq_printf ( s , " -- DSI%d interrupts -- \n " , dsi_module + 1 ) ;
2009-12-17 14:35:21 +02:00
PIS ( VC0 ) ;
PIS ( VC1 ) ;
PIS ( VC2 ) ;
PIS ( VC3 ) ;
PIS ( WAKEUP ) ;
PIS ( RESYNC ) ;
PIS ( PLL_LOCK ) ;
PIS ( PLL_UNLOCK ) ;
PIS ( PLL_RECALL ) ;
PIS ( COMPLEXIO_ERR ) ;
PIS ( HS_TX_TIMEOUT ) ;
PIS ( LP_RX_TIMEOUT ) ;
PIS ( TE_TRIGGER ) ;
PIS ( ACK_TRIGGER ) ;
PIS ( SYNC_LOST ) ;
PIS ( LDO_POWER_GOOD ) ;
PIS ( TA_TIMEOUT ) ;
# undef PIS
# define PIS(x) \
seq_printf ( s , " %-20s %10d %10d %10d %10d \n " , # x , \
stats . vc_irqs [ 0 ] [ ffs ( DSI_VC_IRQ_ # # x ) - 1 ] , \
stats . vc_irqs [ 1 ] [ ffs ( DSI_VC_IRQ_ # # x ) - 1 ] , \
stats . vc_irqs [ 2 ] [ ffs ( DSI_VC_IRQ_ # # x ) - 1 ] , \
stats . vc_irqs [ 3 ] [ ffs ( DSI_VC_IRQ_ # # x ) - 1 ] ) ;
seq_printf ( s , " -- VC interrupts -- \n " ) ;
PIS ( CS ) ;
PIS ( ECC_CORR ) ;
PIS ( PACKET_SENT ) ;
PIS ( FIFO_TX_OVF ) ;
PIS ( FIFO_RX_OVF ) ;
PIS ( BTA ) ;
PIS ( ECC_NO_CORR ) ;
PIS ( FIFO_TX_UDF ) ;
PIS ( PP_BUSY_CHANGE ) ;
# undef PIS
# define PIS(x) \
seq_printf ( s , " %-20s %10d \n " , # x , \
stats . cio_irqs [ ffs ( DSI_CIO_IRQ_ # # x ) - 1 ] ) ;
seq_printf ( s , " -- CIO interrupts -- \n " ) ;
PIS ( ERRSYNCESC1 ) ;
PIS ( ERRSYNCESC2 ) ;
PIS ( ERRSYNCESC3 ) ;
PIS ( ERRESC1 ) ;
PIS ( ERRESC2 ) ;
PIS ( ERRESC3 ) ;
PIS ( ERRCONTROL1 ) ;
PIS ( ERRCONTROL2 ) ;
PIS ( ERRCONTROL3 ) ;
PIS ( STATEULPS1 ) ;
PIS ( STATEULPS2 ) ;
PIS ( STATEULPS3 ) ;
PIS ( ERRCONTENTIONLP0_1 ) ;
PIS ( ERRCONTENTIONLP1_1 ) ;
PIS ( ERRCONTENTIONLP0_2 ) ;
PIS ( ERRCONTENTIONLP1_2 ) ;
PIS ( ERRCONTENTIONLP0_3 ) ;
PIS ( ERRCONTENTIONLP1_3 ) ;
PIS ( ULPSACTIVENOT_ALL0 ) ;
PIS ( ULPSACTIVENOT_ALL1 ) ;
# undef PIS
}
2011-05-12 17:26:29 +05:30
static void dsi1_dump_irqs ( struct seq_file * s )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:26 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_id ( 0 ) ;
2011-05-12 17:26:29 +05:30
dsi_dump_dsidev_irqs ( dsidev , s ) ;
}
static void dsi2_dump_irqs ( struct seq_file * s )
{
struct platform_device * dsidev = dsi_get_dsidev_from_id ( 1 ) ;
dsi_dump_dsidev_irqs ( dsidev , s ) ;
}
void dsi_create_debugfs_files_irq ( struct dentry * debugfs_dir ,
const struct file_operations * debug_fops )
{
struct platform_device * dsidev ;
dsidev = dsi_get_dsidev_from_id ( 0 ) ;
if ( dsidev )
debugfs_create_file ( " dsi1_irqs " , S_IRUGO , debugfs_dir ,
& dsi1_dump_irqs , debug_fops ) ;
dsidev = dsi_get_dsidev_from_id ( 1 ) ;
if ( dsidev )
debugfs_create_file ( " dsi2_irqs " , S_IRUGO , debugfs_dir ,
& dsi2_dump_irqs , debug_fops ) ;
}
# endif
static void dsi_dump_dsidev_regs ( struct platform_device * dsidev ,
struct seq_file * s )
{
2011-05-12 17:26:26 +05:30
# define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsidev, r))
2009-10-28 11:59:56 +02:00
2011-05-27 10:52:19 +03:00
if ( dsi_runtime_get ( dsidev ) )
return ;
2011-05-12 17:26:26 +05:30
dsi_enable_scp_clk ( dsidev ) ;
2009-10-28 11:59:56 +02:00
DUMPREG ( DSI_REVISION ) ;
DUMPREG ( DSI_SYSCONFIG ) ;
DUMPREG ( DSI_SYSSTATUS ) ;
DUMPREG ( DSI_IRQSTATUS ) ;
DUMPREG ( DSI_IRQENABLE ) ;
DUMPREG ( DSI_CTRL ) ;
DUMPREG ( DSI_COMPLEXIO_CFG1 ) ;
DUMPREG ( DSI_COMPLEXIO_IRQ_STATUS ) ;
DUMPREG ( DSI_COMPLEXIO_IRQ_ENABLE ) ;
DUMPREG ( DSI_CLK_CTRL ) ;
DUMPREG ( DSI_TIMING1 ) ;
DUMPREG ( DSI_TIMING2 ) ;
DUMPREG ( DSI_VM_TIMING1 ) ;
DUMPREG ( DSI_VM_TIMING2 ) ;
DUMPREG ( DSI_VM_TIMING3 ) ;
DUMPREG ( DSI_CLK_TIMING ) ;
DUMPREG ( DSI_TX_FIFO_VC_SIZE ) ;
DUMPREG ( DSI_RX_FIFO_VC_SIZE ) ;
DUMPREG ( DSI_COMPLEXIO_CFG2 ) ;
DUMPREG ( DSI_RX_FIFO_VC_FULLNESS ) ;
DUMPREG ( DSI_VM_TIMING4 ) ;
DUMPREG ( DSI_TX_FIFO_VC_EMPTINESS ) ;
DUMPREG ( DSI_VM_TIMING5 ) ;
DUMPREG ( DSI_VM_TIMING6 ) ;
DUMPREG ( DSI_VM_TIMING7 ) ;
DUMPREG ( DSI_STOPCLK_TIMING ) ;
DUMPREG ( DSI_VC_CTRL ( 0 ) ) ;
DUMPREG ( DSI_VC_TE ( 0 ) ) ;
DUMPREG ( DSI_VC_LONG_PACKET_HEADER ( 0 ) ) ;
DUMPREG ( DSI_VC_LONG_PACKET_PAYLOAD ( 0 ) ) ;
DUMPREG ( DSI_VC_SHORT_PACKET_HEADER ( 0 ) ) ;
DUMPREG ( DSI_VC_IRQSTATUS ( 0 ) ) ;
DUMPREG ( DSI_VC_IRQENABLE ( 0 ) ) ;
DUMPREG ( DSI_VC_CTRL ( 1 ) ) ;
DUMPREG ( DSI_VC_TE ( 1 ) ) ;
DUMPREG ( DSI_VC_LONG_PACKET_HEADER ( 1 ) ) ;
DUMPREG ( DSI_VC_LONG_PACKET_PAYLOAD ( 1 ) ) ;
DUMPREG ( DSI_VC_SHORT_PACKET_HEADER ( 1 ) ) ;
DUMPREG ( DSI_VC_IRQSTATUS ( 1 ) ) ;
DUMPREG ( DSI_VC_IRQENABLE ( 1 ) ) ;
DUMPREG ( DSI_VC_CTRL ( 2 ) ) ;
DUMPREG ( DSI_VC_TE ( 2 ) ) ;
DUMPREG ( DSI_VC_LONG_PACKET_HEADER ( 2 ) ) ;
DUMPREG ( DSI_VC_LONG_PACKET_PAYLOAD ( 2 ) ) ;
DUMPREG ( DSI_VC_SHORT_PACKET_HEADER ( 2 ) ) ;
DUMPREG ( DSI_VC_IRQSTATUS ( 2 ) ) ;
DUMPREG ( DSI_VC_IRQENABLE ( 2 ) ) ;
DUMPREG ( DSI_VC_CTRL ( 3 ) ) ;
DUMPREG ( DSI_VC_TE ( 3 ) ) ;
DUMPREG ( DSI_VC_LONG_PACKET_HEADER ( 3 ) ) ;
DUMPREG ( DSI_VC_LONG_PACKET_PAYLOAD ( 3 ) ) ;
DUMPREG ( DSI_VC_SHORT_PACKET_HEADER ( 3 ) ) ;
DUMPREG ( DSI_VC_IRQSTATUS ( 3 ) ) ;
DUMPREG ( DSI_VC_IRQENABLE ( 3 ) ) ;
DUMPREG ( DSI_DSIPHY_CFG0 ) ;
DUMPREG ( DSI_DSIPHY_CFG1 ) ;
DUMPREG ( DSI_DSIPHY_CFG2 ) ;
DUMPREG ( DSI_DSIPHY_CFG5 ) ;
DUMPREG ( DSI_PLL_CONTROL ) ;
DUMPREG ( DSI_PLL_STATUS ) ;
DUMPREG ( DSI_PLL_GO ) ;
DUMPREG ( DSI_PLL_CONFIGURATION1 ) ;
DUMPREG ( DSI_PLL_CONFIGURATION2 ) ;
2011-05-12 17:26:26 +05:30
dsi_disable_scp_clk ( dsidev ) ;
2011-05-27 10:52:19 +03:00
dsi_runtime_put ( dsidev ) ;
2009-10-28 11:59:56 +02:00
# undef DUMPREG
}
2011-05-12 17:26:29 +05:30
static void dsi1_dump_regs ( struct seq_file * s )
{
struct platform_device * dsidev = dsi_get_dsidev_from_id ( 0 ) ;
dsi_dump_dsidev_regs ( dsidev , s ) ;
}
static void dsi2_dump_regs ( struct seq_file * s )
{
struct platform_device * dsidev = dsi_get_dsidev_from_id ( 1 ) ;
dsi_dump_dsidev_regs ( dsidev , s ) ;
}
void dsi_create_debugfs_files_reg ( struct dentry * debugfs_dir ,
const struct file_operations * debug_fops )
{
struct platform_device * dsidev ;
dsidev = dsi_get_dsidev_from_id ( 0 ) ;
if ( dsidev )
debugfs_create_file ( " dsi1_regs " , S_IRUGO , debugfs_dir ,
& dsi1_dump_regs , debug_fops ) ;
dsidev = dsi_get_dsidev_from_id ( 1 ) ;
if ( dsidev )
debugfs_create_file ( " dsi2_regs " , S_IRUGO , debugfs_dir ,
& dsi2_dump_regs , debug_fops ) ;
}
2010-10-06 15:18:13 +03:00
enum dsi_cio_power_state {
2009-10-28 11:59:56 +02:00
DSI_COMPLEXIO_POWER_OFF = 0x0 ,
DSI_COMPLEXIO_POWER_ON = 0x1 ,
DSI_COMPLEXIO_POWER_ULPS = 0x2 ,
} ;
2011-05-12 17:26:26 +05:30
static int dsi_cio_power ( struct platform_device * dsidev ,
enum dsi_cio_power_state state )
2009-10-28 11:59:56 +02:00
{
int t = 0 ;
/* PWR_CMD */
2011-05-12 17:26:26 +05:30
REG_FLD_MOD ( dsidev , DSI_COMPLEXIO_CFG1 , state , 28 , 27 ) ;
2009-10-28 11:59:56 +02:00
/* PWR_STATUS */
2011-05-12 17:26:26 +05:30
while ( FLD_GET ( dsi_read_reg ( dsidev , DSI_COMPLEXIO_CFG1 ) ,
26 , 25 ) ! = state ) {
2010-01-07 14:19:48 +02:00
if ( + + t > 1000 ) {
2009-10-28 11:59:56 +02:00
DSSERR ( " failed to set complexio power state to "
" %d \n " , state ) ;
return - ENODEV ;
}
2010-01-07 14:19:48 +02:00
udelay ( 1 ) ;
2009-10-28 11:59:56 +02:00
}
return 0 ;
}
2011-05-16 15:17:08 +05:30
/* Number of data lanes present on DSI interface */
static inline int dsi_get_num_data_lanes ( struct platform_device * dsidev )
{
/* DSI on OMAP3 doesn't have register DSI_GNQ, set number
* of data lanes as 2 by default */
if ( dss_has_feature ( FEAT_DSI_GNQ ) )
return REG_GET ( dsidev , DSI_GNQ , 11 , 9 ) ; /* NB_DATA_LANES */
else
return 2 ;
}
/* Number of data lanes used by the dss device */
static inline int dsi_get_num_data_lanes_dssdev ( struct omap_dss_device * dssdev )
{
int num_data_lanes = 0 ;
if ( dssdev - > phy . dsi . data1_lane ! = 0 )
num_data_lanes + + ;
if ( dssdev - > phy . dsi . data2_lane ! = 0 )
num_data_lanes + + ;
if ( dssdev - > phy . dsi . data3_lane ! = 0 )
num_data_lanes + + ;
if ( dssdev - > phy . dsi . data4_lane ! = 0 )
num_data_lanes + + ;
return num_data_lanes ;
}
2011-05-16 15:17:09 +05:30
static unsigned dsi_get_line_buf_size ( struct platform_device * dsidev )
{
int val ;
/* line buffer on OMAP3 is 1024 x 24bits */
/* XXX: for some reason using full buffer size causes
* considerable TX slowdown with update sizes that fill the
* whole buffer */
if ( ! dss_has_feature ( FEAT_DSI_GNQ ) )
return 1023 * 3 ;
val = REG_GET ( dsidev , DSI_GNQ , 14 , 12 ) ; /* VP1_LINE_BUFFER_SIZE */
switch ( val ) {
case 1 :
return 512 * 3 ; /* 512x24 bits */
case 2 :
return 682 * 3 ; /* 682x24 bits */
case 3 :
return 853 * 3 ; /* 853x24 bits */
case 4 :
return 1024 * 3 ; /* 1024x24 bits */
case 5 :
return 1194 * 3 ; /* 1194x24 bits */
case 6 :
return 1365 * 3 ; /* 1365x24 bits */
default :
BUG ( ) ;
}
}
2010-10-06 15:18:13 +03:00
static void dsi_set_lane_config ( struct omap_dss_device * dssdev )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:26 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
2009-10-28 11:59:56 +02:00
u32 r ;
2011-05-16 15:17:08 +05:30
int num_data_lanes_dssdev = dsi_get_num_data_lanes_dssdev ( dssdev ) ;
2009-10-28 11:59:56 +02:00
int clk_lane = dssdev - > phy . dsi . clk_lane ;
int data1_lane = dssdev - > phy . dsi . data1_lane ;
int data2_lane = dssdev - > phy . dsi . data2_lane ;
int clk_pol = dssdev - > phy . dsi . clk_pol ;
int data1_pol = dssdev - > phy . dsi . data1_pol ;
int data2_pol = dssdev - > phy . dsi . data2_pol ;
2011-05-12 17:26:26 +05:30
r = dsi_read_reg ( dsidev , DSI_COMPLEXIO_CFG1 ) ;
2009-10-28 11:59:56 +02:00
r = FLD_MOD ( r , clk_lane , 2 , 0 ) ;
r = FLD_MOD ( r , clk_pol , 3 , 3 ) ;
r = FLD_MOD ( r , data1_lane , 6 , 4 ) ;
r = FLD_MOD ( r , data1_pol , 7 , 7 ) ;
r = FLD_MOD ( r , data2_lane , 10 , 8 ) ;
r = FLD_MOD ( r , data2_pol , 11 , 11 ) ;
2011-05-16 15:17:08 +05:30
if ( num_data_lanes_dssdev > 2 ) {
int data3_lane = dssdev - > phy . dsi . data3_lane ;
int data3_pol = dssdev - > phy . dsi . data3_pol ;
r = FLD_MOD ( r , data3_lane , 14 , 12 ) ;
r = FLD_MOD ( r , data3_pol , 15 , 15 ) ;
}
if ( num_data_lanes_dssdev > 3 ) {
int data4_lane = dssdev - > phy . dsi . data4_lane ;
int data4_pol = dssdev - > phy . dsi . data4_pol ;
r = FLD_MOD ( r , data4_lane , 18 , 16 ) ;
r = FLD_MOD ( r , data4_pol , 19 , 19 ) ;
}
2011-05-12 17:26:26 +05:30
dsi_write_reg ( dsidev , DSI_COMPLEXIO_CFG1 , r ) ;
2009-10-28 11:59:56 +02:00
/* The configuration of the DSI complex I/O (number of data lanes,
position , differential order ) should not be changed while
DSS . DSI_CLK_CRTRL [ 20 ] LP_CLK_ENABLE bit is set to 1. In order for
the hardware to take into account a new configuration of the complex
I / O ( done in DSS . DSI_COMPLEXIO_CFG1 register ) , it is recommended to
follow this sequence : First set the DSS . DSI_CTRL [ 0 ] IF_EN bit to 1 ,
then reset the DSS . DSI_CTRL [ 0 ] IF_EN to 0 , then set
DSS . DSI_CLK_CTRL [ 20 ] LP_CLK_ENABLE to 1 and finally set again the
DSS . DSI_CTRL [ 0 ] IF_EN bit to 1. If the sequence is not followed , the
DSI complex I / O configuration is unknown . */
/*
2011-05-12 17:26:26 +05:30
REG_FLD_MOD ( dsidev , DSI_CTRL , 1 , 0 , 0 ) ;
REG_FLD_MOD ( dsidev , DSI_CTRL , 0 , 0 , 0 ) ;
REG_FLD_MOD ( dsidev , DSI_CLK_CTRL , 1 , 20 , 20 ) ;
REG_FLD_MOD ( dsidev , DSI_CTRL , 1 , 0 , 0 ) ;
2009-10-28 11:59:56 +02:00
*/
}
2011-05-12 17:26:26 +05:30
static inline unsigned ns2ddr ( struct platform_device * dsidev , unsigned ns )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2009-10-28 11:59:56 +02:00
/* convert time in ns to ddr ticks, rounding up */
2011-05-12 17:26:27 +05:30
unsigned long ddr_clk = dsi - > current_cinfo . clkin4ddr / 4 ;
2009-10-28 11:59:56 +02:00
return ( ns * ( ddr_clk / 1000 / 1000 ) + 999 ) / 1000 ;
}
2011-05-12 17:26:26 +05:30
static inline unsigned ddr2ns ( struct platform_device * dsidev , unsigned ddr )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
unsigned long ddr_clk = dsi - > current_cinfo . clkin4ddr / 4 ;
2009-10-28 11:59:56 +02:00
return ddr * 1000 * 1000 / ( ddr_clk / 1000 ) ;
}
2011-05-12 17:26:26 +05:30
static void dsi_cio_timings ( struct platform_device * dsidev )
2009-10-28 11:59:56 +02:00
{
u32 r ;
u32 ths_prepare , ths_prepare_ths_zero , ths_trail , ths_exit ;
u32 tlpx_half , tclk_trail , tclk_zero ;
u32 tclk_prepare ;
/* calculate timings */
/* 1 * DDR_CLK = 2 * UI */
/* min 40ns + 4*UI max 85ns + 6*UI */
2011-05-12 17:26:26 +05:30
ths_prepare = ns2ddr ( dsidev , 70 ) + 2 ;
2009-10-28 11:59:56 +02:00
/* min 145ns + 10*UI */
2011-05-12 17:26:26 +05:30
ths_prepare_ths_zero = ns2ddr ( dsidev , 175 ) + 2 ;
2009-10-28 11:59:56 +02:00
/* min max(8*UI, 60ns+4*UI) */
2011-05-12 17:26:26 +05:30
ths_trail = ns2ddr ( dsidev , 60 ) + 5 ;
2009-10-28 11:59:56 +02:00
/* min 100ns */
2011-05-12 17:26:26 +05:30
ths_exit = ns2ddr ( dsidev , 145 ) ;
2009-10-28 11:59:56 +02:00
/* tlpx min 50n */
2011-05-12 17:26:26 +05:30
tlpx_half = ns2ddr ( dsidev , 25 ) ;
2009-10-28 11:59:56 +02:00
/* min 60ns */
2011-05-12 17:26:26 +05:30
tclk_trail = ns2ddr ( dsidev , 60 ) + 2 ;
2009-10-28 11:59:56 +02:00
/* min 38ns, max 95ns */
2011-05-12 17:26:26 +05:30
tclk_prepare = ns2ddr ( dsidev , 65 ) ;
2009-10-28 11:59:56 +02:00
/* min tclk-prepare + tclk-zero = 300ns */
2011-05-12 17:26:26 +05:30
tclk_zero = ns2ddr ( dsidev , 260 ) ;
2009-10-28 11:59:56 +02:00
DSSDBG ( " ths_prepare %u (%uns), ths_prepare_ths_zero %u (%uns) \n " ,
2011-05-12 17:26:26 +05:30
ths_prepare , ddr2ns ( dsidev , ths_prepare ) ,
ths_prepare_ths_zero , ddr2ns ( dsidev , ths_prepare_ths_zero ) ) ;
2009-10-28 11:59:56 +02:00
DSSDBG ( " ths_trail %u (%uns), ths_exit %u (%uns) \n " ,
2011-05-12 17:26:26 +05:30
ths_trail , ddr2ns ( dsidev , ths_trail ) ,
ths_exit , ddr2ns ( dsidev , ths_exit ) ) ;
2009-10-28 11:59:56 +02:00
DSSDBG ( " tlpx_half %u (%uns), tclk_trail %u (%uns), "
" tclk_zero %u (%uns) \n " ,
2011-05-12 17:26:26 +05:30
tlpx_half , ddr2ns ( dsidev , tlpx_half ) ,
tclk_trail , ddr2ns ( dsidev , tclk_trail ) ,
tclk_zero , ddr2ns ( dsidev , tclk_zero ) ) ;
2009-10-28 11:59:56 +02:00
DSSDBG ( " tclk_prepare %u (%uns) \n " ,
2011-05-12 17:26:26 +05:30
tclk_prepare , ddr2ns ( dsidev , tclk_prepare ) ) ;
2009-10-28 11:59:56 +02:00
/* program timings */
2011-05-12 17:26:26 +05:30
r = dsi_read_reg ( dsidev , DSI_DSIPHY_CFG0 ) ;
2009-10-28 11:59:56 +02:00
r = FLD_MOD ( r , ths_prepare , 31 , 24 ) ;
r = FLD_MOD ( r , ths_prepare_ths_zero , 23 , 16 ) ;
r = FLD_MOD ( r , ths_trail , 15 , 8 ) ;
r = FLD_MOD ( r , ths_exit , 7 , 0 ) ;
2011-05-12 17:26:26 +05:30
dsi_write_reg ( dsidev , DSI_DSIPHY_CFG0 , r ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
r = dsi_read_reg ( dsidev , DSI_DSIPHY_CFG1 ) ;
2009-10-28 11:59:56 +02:00
r = FLD_MOD ( r , tlpx_half , 22 , 16 ) ;
r = FLD_MOD ( r , tclk_trail , 15 , 8 ) ;
r = FLD_MOD ( r , tclk_zero , 7 , 0 ) ;
2011-05-12 17:26:26 +05:30
dsi_write_reg ( dsidev , DSI_DSIPHY_CFG1 , r ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
r = dsi_read_reg ( dsidev , DSI_DSIPHY_CFG2 ) ;
2009-10-28 11:59:56 +02:00
r = FLD_MOD ( r , tclk_prepare , 7 , 0 ) ;
2011-05-12 17:26:26 +05:30
dsi_write_reg ( dsidev , DSI_DSIPHY_CFG2 , r ) ;
2009-10-28 11:59:56 +02:00
}
2010-10-06 15:18:13 +03:00
static void dsi_cio_enable_lane_override ( struct omap_dss_device * dssdev ,
2010-07-27 11:11:48 +03:00
enum dsi_lane lanes )
{
2011-05-12 17:26:26 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
2011-05-16 15:17:08 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2010-07-27 11:11:48 +03:00
int clk_lane = dssdev - > phy . dsi . clk_lane ;
int data1_lane = dssdev - > phy . dsi . data1_lane ;
int data2_lane = dssdev - > phy . dsi . data2_lane ;
2011-05-16 15:17:08 +05:30
int data3_lane = dssdev - > phy . dsi . data3_lane ;
int data4_lane = dssdev - > phy . dsi . data4_lane ;
2010-07-27 11:11:48 +03:00
int clk_pol = dssdev - > phy . dsi . clk_pol ;
int data1_pol = dssdev - > phy . dsi . data1_pol ;
int data2_pol = dssdev - > phy . dsi . data2_pol ;
2011-05-16 15:17:08 +05:30
int data3_pol = dssdev - > phy . dsi . data3_pol ;
int data4_pol = dssdev - > phy . dsi . data4_pol ;
2010-07-27 11:11:48 +03:00
u32 l = 0 ;
2011-05-16 15:17:08 +05:30
u8 lptxscp_start = dsi - > num_data_lanes = = 2 ? 22 : 26 ;
2010-07-27 11:11:48 +03:00
if ( lanes & DSI_CLK_P )
l | = 1 < < ( ( clk_lane - 1 ) * 2 + ( clk_pol ? 0 : 1 ) ) ;
if ( lanes & DSI_CLK_N )
l | = 1 < < ( ( clk_lane - 1 ) * 2 + ( clk_pol ? 1 : 0 ) ) ;
if ( lanes & DSI_DATA1_P )
l | = 1 < < ( ( data1_lane - 1 ) * 2 + ( data1_pol ? 0 : 1 ) ) ;
if ( lanes & DSI_DATA1_N )
l | = 1 < < ( ( data1_lane - 1 ) * 2 + ( data1_pol ? 1 : 0 ) ) ;
if ( lanes & DSI_DATA2_P )
l | = 1 < < ( ( data2_lane - 1 ) * 2 + ( data2_pol ? 0 : 1 ) ) ;
if ( lanes & DSI_DATA2_N )
l | = 1 < < ( ( data2_lane - 1 ) * 2 + ( data2_pol ? 1 : 0 ) ) ;
2011-05-16 15:17:08 +05:30
if ( lanes & DSI_DATA3_P )
l | = 1 < < ( ( data3_lane - 1 ) * 2 + ( data3_pol ? 0 : 1 ) ) ;
if ( lanes & DSI_DATA3_N )
l | = 1 < < ( ( data3_lane - 1 ) * 2 + ( data3_pol ? 1 : 0 ) ) ;
if ( lanes & DSI_DATA4_P )
l | = 1 < < ( ( data4_lane - 1 ) * 2 + ( data4_pol ? 0 : 1 ) ) ;
if ( lanes & DSI_DATA4_N )
l | = 1 < < ( ( data4_lane - 1 ) * 2 + ( data4_pol ? 1 : 0 ) ) ;
2010-07-27 11:11:48 +03:00
/*
* Bits in REGLPTXSCPDAT4TO0DXDY :
* 17 : DY0 18 : DX0
* 19 : DY1 20 : DX1
* 21 : DY2 22 : DX2
2011-05-16 15:17:08 +05:30
* 23 : DY3 24 : DX3
* 25 : DY4 26 : DX4
2010-07-27 11:11:48 +03:00
*/
/* Set the lane override configuration */
2011-05-12 17:26:26 +05:30
/* REGLPTXSCPDAT4TO0DXDY */
2011-05-16 15:17:08 +05:30
REG_FLD_MOD ( dsidev , DSI_DSIPHY_CFG10 , l , lptxscp_start , 17 ) ;
2010-07-27 11:11:48 +03:00
/* Enable lane override */
2011-05-12 17:26:26 +05:30
/* ENLPTXSCPDAT */
REG_FLD_MOD ( dsidev , DSI_DSIPHY_CFG10 , 1 , 27 , 27 ) ;
2010-07-27 11:11:48 +03:00
}
2011-05-12 17:26:26 +05:30
static void dsi_cio_disable_lane_override ( struct platform_device * dsidev )
2010-07-27 11:11:48 +03:00
{
/* Disable lane override */
2011-05-12 17:26:26 +05:30
REG_FLD_MOD ( dsidev , DSI_DSIPHY_CFG10 , 0 , 27 , 27 ) ; /* ENLPTXSCPDAT */
2010-07-27 11:11:48 +03:00
/* Reset the lane override configuration */
2011-05-12 17:26:26 +05:30
/* REGLPTXSCPDAT4TO0DXDY */
REG_FLD_MOD ( dsidev , DSI_DSIPHY_CFG10 , 0 , 22 , 17 ) ;
2010-07-27 11:11:48 +03:00
}
2009-10-28 11:59:56 +02:00
2010-10-07 13:59:22 +03:00
static int dsi_cio_wait_tx_clk_esc_reset ( struct omap_dss_device * dssdev )
{
2011-05-12 17:26:26 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
2010-10-07 13:59:22 +03:00
int t ;
int bits [ 3 ] ;
bool in_use [ 3 ] ;
if ( dss_has_feature ( FEAT_DSI_REVERSE_TXCLKESC ) ) {
bits [ 0 ] = 28 ;
bits [ 1 ] = 27 ;
bits [ 2 ] = 26 ;
} else {
bits [ 0 ] = 24 ;
bits [ 1 ] = 25 ;
bits [ 2 ] = 26 ;
}
in_use [ 0 ] = false ;
in_use [ 1 ] = false ;
in_use [ 2 ] = false ;
if ( dssdev - > phy . dsi . clk_lane ! = 0 )
in_use [ dssdev - > phy . dsi . clk_lane - 1 ] = true ;
if ( dssdev - > phy . dsi . data1_lane ! = 0 )
in_use [ dssdev - > phy . dsi . data1_lane - 1 ] = true ;
if ( dssdev - > phy . dsi . data2_lane ! = 0 )
in_use [ dssdev - > phy . dsi . data2_lane - 1 ] = true ;
t = 100000 ;
while ( true ) {
u32 l ;
int i ;
int ok ;
2011-05-12 17:26:26 +05:30
l = dsi_read_reg ( dsidev , DSI_DSIPHY_CFG5 ) ;
2010-10-07 13:59:22 +03:00
ok = 0 ;
for ( i = 0 ; i < 3 ; + + i ) {
if ( ! in_use [ i ] | | ( l & ( 1 < < bits [ i ] ) ) )
ok + + ;
}
if ( ok = = 3 )
break ;
if ( - - t = = 0 ) {
for ( i = 0 ; i < 3 ; + + i ) {
if ( ! in_use [ i ] | | ( l & ( 1 < < bits [ i ] ) ) )
continue ;
DSSERR ( " CIO TXCLKESC%d domain not coming " \
" out of reset \n " , i ) ;
}
return - EIO ;
}
}
return 0 ;
}
2011-06-15 15:21:12 +03:00
static unsigned dsi_get_lane_mask ( struct omap_dss_device * dssdev )
{
unsigned lanes = 0 ;
if ( dssdev - > phy . dsi . clk_lane ! = 0 )
lanes | = 1 < < ( dssdev - > phy . dsi . clk_lane - 1 ) ;
if ( dssdev - > phy . dsi . data1_lane ! = 0 )
lanes | = 1 < < ( dssdev - > phy . dsi . data1_lane - 1 ) ;
if ( dssdev - > phy . dsi . data2_lane ! = 0 )
lanes | = 1 < < ( dssdev - > phy . dsi . data2_lane - 1 ) ;
if ( dssdev - > phy . dsi . data3_lane ! = 0 )
lanes | = 1 < < ( dssdev - > phy . dsi . data3_lane - 1 ) ;
if ( dssdev - > phy . dsi . data4_lane ! = 0 )
lanes | = 1 < < ( dssdev - > phy . dsi . data4_lane - 1 ) ;
return lanes ;
}
2010-10-06 15:18:13 +03:00
static int dsi_cio_init ( struct omap_dss_device * dssdev )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:26 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2011-04-15 11:58:41 +03:00
int r ;
2011-05-16 15:17:08 +05:30
int num_data_lanes_dssdev = dsi_get_num_data_lanes_dssdev ( dssdev ) ;
2010-07-28 15:53:38 +03:00
u32 l ;
2009-10-28 11:59:56 +02:00
2010-10-06 15:18:13 +03:00
DSSDBGF ( ) ;
2009-10-28 11:59:56 +02:00
2011-06-15 15:21:12 +03:00
r = dsi - > enable_pads ( dsidev - > id , dsi_get_lane_mask ( dssdev ) ) ;
if ( r )
return r ;
2010-07-30 11:57:57 +03:00
2011-05-12 17:26:26 +05:30
dsi_enable_scp_clk ( dsidev ) ;
2010-07-28 15:53:38 +03:00
2009-10-28 11:59:56 +02:00
/* A dummy read using the SCP interface to any DSIPHY register is
* required after DSIPHY reset to complete the reset of the DSI complex
* I / O . */
2011-05-12 17:26:26 +05:30
dsi_read_reg ( dsidev , DSI_DSIPHY_CFG5 ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
if ( wait_for_bit_change ( dsidev , DSI_DSIPHY_CFG5 , 30 , 1 ) ! = 1 ) {
2011-04-15 11:58:41 +03:00
DSSERR ( " CIO SCP Clock domain not coming out of reset. \n " ) ;
r = - EIO ;
goto err_scp_clk_dom ;
2009-10-28 11:59:56 +02:00
}
2010-10-06 15:18:13 +03:00
dsi_set_lane_config ( dssdev ) ;
2009-10-28 11:59:56 +02:00
2010-07-28 15:53:38 +03:00
/* set TX STOP MODE timer to maximum for this operation */
2011-05-12 17:26:26 +05:30
l = dsi_read_reg ( dsidev , DSI_TIMING1 ) ;
2010-07-28 15:53:38 +03:00
l = FLD_MOD ( l , 1 , 15 , 15 ) ; /* FORCE_TX_STOP_MODE_IO */
l = FLD_MOD ( l , 1 , 14 , 14 ) ; /* STOP_STATE_X16_IO */
l = FLD_MOD ( l , 1 , 13 , 13 ) ; /* STOP_STATE_X4_IO */
l = FLD_MOD ( l , 0x1fff , 12 , 0 ) ; /* STOP_STATE_COUNTER_IO */
2011-05-12 17:26:26 +05:30
dsi_write_reg ( dsidev , DSI_TIMING1 , l ) ;
2010-07-28 15:53:38 +03:00
2011-05-12 17:26:27 +05:30
if ( dsi - > ulps_enabled ) {
2011-05-16 15:17:08 +05:30
u32 lane_mask = DSI_CLK_P | DSI_DATA1_P | DSI_DATA2_P ;
2011-04-15 11:58:41 +03:00
DSSDBG ( " manual ulps exit \n " ) ;
2010-07-28 15:53:38 +03:00
/* ULPS is exited by Mark-1 state for 1ms, followed by
* stop state . DSS HW cannot do this via the normal
* ULPS exit sequence , as after reset the DSS HW thinks
* that we are not in ULPS mode , and refuses to send the
* sequence . So we need to send the ULPS exit sequence
* manually .
*/
2011-05-16 15:17:08 +05:30
if ( num_data_lanes_dssdev > 2 )
lane_mask | = DSI_DATA3_P ;
if ( num_data_lanes_dssdev > 3 )
lane_mask | = DSI_DATA4_P ;
dsi_cio_enable_lane_override ( dssdev , lane_mask ) ;
2010-07-28 15:53:38 +03:00
}
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
r = dsi_cio_power ( dsidev , DSI_COMPLEXIO_POWER_ON ) ;
2009-10-28 11:59:56 +02:00
if ( r )
2011-04-15 11:58:41 +03:00
goto err_cio_pwr ;
2011-05-12 17:26:26 +05:30
if ( wait_for_bit_change ( dsidev , DSI_COMPLEXIO_CFG1 , 29 , 1 ) ! = 1 ) {
2011-04-15 11:58:41 +03:00
DSSERR ( " CIO PWR clock domain not coming out of reset. \n " ) ;
r = - ENODEV ;
goto err_cio_pwr_dom ;
}
2011-05-12 17:26:26 +05:30
dsi_if_enable ( dsidev , true ) ;
dsi_if_enable ( dsidev , false ) ;
REG_FLD_MOD ( dsidev , DSI_CLK_CTRL , 1 , 20 , 20 ) ; /* LP_CLK_ENABLE */
2009-10-28 11:59:56 +02:00
2010-10-07 13:59:22 +03:00
r = dsi_cio_wait_tx_clk_esc_reset ( dssdev ) ;
if ( r )
goto err_tx_clk_esc_rst ;
2011-05-12 17:26:27 +05:30
if ( dsi - > ulps_enabled ) {
2010-07-28 15:53:38 +03:00
/* Keep Mark-1 state for 1ms (as per DSI spec) */
ktime_t wait = ns_to_ktime ( 1000 * 1000 ) ;
set_current_state ( TASK_UNINTERRUPTIBLE ) ;
schedule_hrtimeout ( & wait , HRTIMER_MODE_REL ) ;
/* Disable the override. The lanes should be set to Mark-11
* state by the HW */
2011-05-12 17:26:26 +05:30
dsi_cio_disable_lane_override ( dsidev ) ;
2010-07-28 15:53:38 +03:00
}
/* FORCE_TX_STOP_MODE_IO */
2011-05-12 17:26:26 +05:30
REG_FLD_MOD ( dsidev , DSI_TIMING1 , 0 , 15 , 15 ) ;
2010-07-28 15:53:38 +03:00
2011-05-12 17:26:26 +05:30
dsi_cio_timings ( dsidev ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:27 +05:30
dsi - > ulps_enabled = false ;
2009-10-28 11:59:56 +02:00
DSSDBG ( " CIO init done \n " ) ;
2011-04-15 11:58:41 +03:00
return 0 ;
2010-10-07 13:59:22 +03:00
err_tx_clk_esc_rst :
2011-05-12 17:26:26 +05:30
REG_FLD_MOD ( dsidev , DSI_CLK_CTRL , 0 , 20 , 20 ) ; /* LP_CLK_ENABLE */
2011-04-15 11:58:41 +03:00
err_cio_pwr_dom :
2011-05-12 17:26:26 +05:30
dsi_cio_power ( dsidev , DSI_COMPLEXIO_POWER_OFF ) ;
2011-04-15 11:58:41 +03:00
err_cio_pwr :
2011-05-12 17:26:27 +05:30
if ( dsi - > ulps_enabled )
2011-05-12 17:26:26 +05:30
dsi_cio_disable_lane_override ( dsidev ) ;
2011-04-15 11:58:41 +03:00
err_scp_clk_dom :
2011-05-12 17:26:26 +05:30
dsi_disable_scp_clk ( dsidev ) ;
2011-06-15 15:21:12 +03:00
dsi - > disable_pads ( dsidev - > id , dsi_get_lane_mask ( dssdev ) ) ;
2009-10-28 11:59:56 +02:00
return r ;
}
2011-06-15 15:21:12 +03:00
static void dsi_cio_uninit ( struct omap_dss_device * dssdev )
2009-10-28 11:59:56 +02:00
{
2011-06-15 15:21:12 +03:00
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2011-05-12 17:26:26 +05:30
dsi_cio_power ( dsidev , DSI_COMPLEXIO_POWER_OFF ) ;
dsi_disable_scp_clk ( dsidev ) ;
2011-06-15 15:21:12 +03:00
dsi - > disable_pads ( dsidev - > id , dsi_get_lane_mask ( dssdev ) ) ;
2009-10-28 11:59:56 +02:00
}
2011-05-12 17:26:26 +05:30
static void dsi_config_tx_fifo ( struct platform_device * dsidev ,
enum fifo_size size1 , enum fifo_size size2 ,
2009-10-28 11:59:56 +02:00
enum fifo_size size3 , enum fifo_size size4 )
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2009-10-28 11:59:56 +02:00
u32 r = 0 ;
int add = 0 ;
int i ;
2011-05-12 17:26:27 +05:30
dsi - > vc [ 0 ] . fifo_size = size1 ;
dsi - > vc [ 1 ] . fifo_size = size2 ;
dsi - > vc [ 2 ] . fifo_size = size3 ;
dsi - > vc [ 3 ] . fifo_size = size4 ;
2009-10-28 11:59:56 +02:00
for ( i = 0 ; i < 4 ; i + + ) {
u8 v ;
2011-05-12 17:26:27 +05:30
int size = dsi - > vc [ i ] . fifo_size ;
2009-10-28 11:59:56 +02:00
if ( add + size > 4 ) {
DSSERR ( " Illegal FIFO configuration \n " ) ;
BUG ( ) ;
}
v = FLD_VAL ( add , 2 , 0 ) | FLD_VAL ( size , 7 , 4 ) ;
r | = v < < ( 8 * i ) ;
/*DSSDBG("TX FIFO vc %d: size %d, add %d\n", i, size, add); */
add + = size ;
}
2011-05-12 17:26:26 +05:30
dsi_write_reg ( dsidev , DSI_TX_FIFO_VC_SIZE , r ) ;
2009-10-28 11:59:56 +02:00
}
2011-05-12 17:26:26 +05:30
static void dsi_config_rx_fifo ( struct platform_device * dsidev ,
enum fifo_size size1 , enum fifo_size size2 ,
2009-10-28 11:59:56 +02:00
enum fifo_size size3 , enum fifo_size size4 )
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2009-10-28 11:59:56 +02:00
u32 r = 0 ;
int add = 0 ;
int i ;
2011-05-12 17:26:27 +05:30
dsi - > vc [ 0 ] . fifo_size = size1 ;
dsi - > vc [ 1 ] . fifo_size = size2 ;
dsi - > vc [ 2 ] . fifo_size = size3 ;
dsi - > vc [ 3 ] . fifo_size = size4 ;
2009-10-28 11:59:56 +02:00
for ( i = 0 ; i < 4 ; i + + ) {
u8 v ;
2011-05-12 17:26:27 +05:30
int size = dsi - > vc [ i ] . fifo_size ;
2009-10-28 11:59:56 +02:00
if ( add + size > 4 ) {
DSSERR ( " Illegal FIFO configuration \n " ) ;
BUG ( ) ;
}
v = FLD_VAL ( add , 2 , 0 ) | FLD_VAL ( size , 7 , 4 ) ;
r | = v < < ( 8 * i ) ;
/*DSSDBG("RX FIFO vc %d: size %d, add %d\n", i, size, add); */
add + = size ;
}
2011-05-12 17:26:26 +05:30
dsi_write_reg ( dsidev , DSI_RX_FIFO_VC_SIZE , r ) ;
2009-10-28 11:59:56 +02:00
}
2011-05-12 17:26:26 +05:30
static int dsi_force_tx_stop_mode_io ( struct platform_device * dsidev )
2009-10-28 11:59:56 +02:00
{
u32 r ;
2011-05-12 17:26:26 +05:30
r = dsi_read_reg ( dsidev , DSI_TIMING1 ) ;
2009-10-28 11:59:56 +02:00
r = FLD_MOD ( r , 1 , 15 , 15 ) ; /* FORCE_TX_STOP_MODE_IO */
2011-05-12 17:26:26 +05:30
dsi_write_reg ( dsidev , DSI_TIMING1 , r ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
if ( wait_for_bit_change ( dsidev , DSI_TIMING1 , 15 , 0 ) ! = 0 ) {
2009-10-28 11:59:56 +02:00
DSSERR ( " TX_STOP bit not going down \n " ) ;
return - EIO ;
}
return 0 ;
}
2011-05-12 17:26:26 +05:30
static bool dsi_vc_is_enabled ( struct platform_device * dsidev , int channel )
2011-03-23 09:59:34 +00:00
{
2011-05-12 17:26:26 +05:30
return REG_GET ( dsidev , DSI_VC_CTRL ( channel ) , 0 , 0 ) ;
2011-03-23 09:59:34 +00:00
}
static void dsi_packet_sent_handler_vp ( void * data , u32 mask )
{
2011-05-12 17:26:28 +05:30
struct dsi_packet_sent_handler_data * vp_data =
( struct dsi_packet_sent_handler_data * ) data ;
struct dsi_data * dsi = dsi_get_dsidrv_data ( vp_data - > dsidev ) ;
2011-05-12 17:26:27 +05:30
const int channel = dsi - > update_channel ;
u8 bit = dsi - > te_enabled ? 30 : 31 ;
2011-03-23 09:59:34 +00:00
2011-05-12 17:26:28 +05:30
if ( REG_GET ( vp_data - > dsidev , DSI_VC_TE ( channel ) , bit , bit ) = = 0 )
complete ( vp_data - > completion ) ;
2011-03-23 09:59:34 +00:00
}
2011-05-12 17:26:26 +05:30
static int dsi_sync_vc_vp ( struct platform_device * dsidev , int channel )
2011-03-23 09:59:34 +00:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2011-05-12 17:26:28 +05:30
DECLARE_COMPLETION_ONSTACK ( completion ) ;
struct dsi_packet_sent_handler_data vp_data = { dsidev , & completion } ;
2011-03-23 09:59:34 +00:00
int r = 0 ;
u8 bit ;
2011-05-12 17:26:27 +05:30
bit = dsi - > te_enabled ? 30 : 31 ;
2011-03-23 09:59:34 +00:00
2011-05-12 17:26:26 +05:30
r = dsi_register_isr_vc ( dsidev , channel , dsi_packet_sent_handler_vp ,
2011-05-12 17:26:28 +05:30
& vp_data , DSI_VC_IRQ_PACKET_SENT ) ;
2011-03-23 09:59:34 +00:00
if ( r )
goto err0 ;
/* Wait for completion only if TE_EN/TE_START is still set */
2011-05-12 17:26:26 +05:30
if ( REG_GET ( dsidev , DSI_VC_TE ( channel ) , bit , bit ) ) {
2011-03-23 09:59:34 +00:00
if ( wait_for_completion_timeout ( & completion ,
msecs_to_jiffies ( 10 ) ) = = 0 ) {
DSSERR ( " Failed to complete previous frame transfer \n " ) ;
r = - EIO ;
goto err1 ;
}
}
2011-05-12 17:26:26 +05:30
dsi_unregister_isr_vc ( dsidev , channel , dsi_packet_sent_handler_vp ,
2011-05-12 17:26:28 +05:30
& vp_data , DSI_VC_IRQ_PACKET_SENT ) ;
2011-03-23 09:59:34 +00:00
return 0 ;
err1 :
2011-05-12 17:26:26 +05:30
dsi_unregister_isr_vc ( dsidev , channel , dsi_packet_sent_handler_vp ,
2011-05-12 17:26:28 +05:30
& vp_data , DSI_VC_IRQ_PACKET_SENT ) ;
2011-03-23 09:59:34 +00:00
err0 :
return r ;
}
static void dsi_packet_sent_handler_l4 ( void * data , u32 mask )
{
2011-05-12 17:26:28 +05:30
struct dsi_packet_sent_handler_data * l4_data =
( struct dsi_packet_sent_handler_data * ) data ;
struct dsi_data * dsi = dsi_get_dsidrv_data ( l4_data - > dsidev ) ;
2011-05-12 17:26:27 +05:30
const int channel = dsi - > update_channel ;
2011-03-23 09:59:34 +00:00
2011-05-12 17:26:28 +05:30
if ( REG_GET ( l4_data - > dsidev , DSI_VC_CTRL ( channel ) , 5 , 5 ) = = 0 )
complete ( l4_data - > completion ) ;
2011-03-23 09:59:34 +00:00
}
2011-05-12 17:26:26 +05:30
static int dsi_sync_vc_l4 ( struct platform_device * dsidev , int channel )
2011-03-23 09:59:34 +00:00
{
DECLARE_COMPLETION_ONSTACK ( completion ) ;
2011-05-12 17:26:28 +05:30
struct dsi_packet_sent_handler_data l4_data = { dsidev , & completion } ;
int r = 0 ;
2011-03-23 09:59:34 +00:00
2011-05-12 17:26:26 +05:30
r = dsi_register_isr_vc ( dsidev , channel , dsi_packet_sent_handler_l4 ,
2011-05-12 17:26:28 +05:30
& l4_data , DSI_VC_IRQ_PACKET_SENT ) ;
2011-03-23 09:59:34 +00:00
if ( r )
goto err0 ;
/* Wait for completion only if TX_FIFO_NOT_EMPTY is still set */
2011-05-12 17:26:26 +05:30
if ( REG_GET ( dsidev , DSI_VC_CTRL ( channel ) , 5 , 5 ) ) {
2011-03-23 09:59:34 +00:00
if ( wait_for_completion_timeout ( & completion ,
msecs_to_jiffies ( 10 ) ) = = 0 ) {
DSSERR ( " Failed to complete previous l4 transfer \n " ) ;
r = - EIO ;
goto err1 ;
}
}
2011-05-12 17:26:26 +05:30
dsi_unregister_isr_vc ( dsidev , channel , dsi_packet_sent_handler_l4 ,
2011-05-12 17:26:28 +05:30
& l4_data , DSI_VC_IRQ_PACKET_SENT ) ;
2011-03-23 09:59:34 +00:00
return 0 ;
err1 :
2011-05-12 17:26:26 +05:30
dsi_unregister_isr_vc ( dsidev , channel , dsi_packet_sent_handler_l4 ,
2011-05-12 17:26:28 +05:30
& l4_data , DSI_VC_IRQ_PACKET_SENT ) ;
2011-03-23 09:59:34 +00:00
err0 :
return r ;
}
2011-05-12 17:26:26 +05:30
static int dsi_sync_vc ( struct platform_device * dsidev , int channel )
2011-03-23 09:59:34 +00:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2011-05-12 17:26:26 +05:30
WARN_ON ( ! dsi_bus_is_locked ( dsidev ) ) ;
2011-03-23 09:59:34 +00:00
WARN_ON ( in_interrupt ( ) ) ;
2011-05-12 17:26:26 +05:30
if ( ! dsi_vc_is_enabled ( dsidev , channel ) )
2011-03-23 09:59:34 +00:00
return 0 ;
2011-08-22 11:58:08 +05:30
switch ( dsi - > vc [ channel ] . source ) {
case DSI_VC_SOURCE_VP :
2011-05-12 17:26:26 +05:30
return dsi_sync_vc_vp ( dsidev , channel ) ;
2011-08-22 11:58:08 +05:30
case DSI_VC_SOURCE_L4 :
2011-05-12 17:26:26 +05:30
return dsi_sync_vc_l4 ( dsidev , channel ) ;
2011-03-23 09:59:34 +00:00
default :
BUG ( ) ;
}
}
2011-05-12 17:26:26 +05:30
static int dsi_vc_enable ( struct platform_device * dsidev , int channel ,
bool enable )
2009-10-28 11:59:56 +02:00
{
2010-01-11 16:12:31 +02:00
DSSDBG ( " dsi_vc_enable channel %d, enable %d \n " ,
channel , enable ) ;
2009-10-28 11:59:56 +02:00
enable = enable ? 1 : 0 ;
2011-05-12 17:26:26 +05:30
REG_FLD_MOD ( dsidev , DSI_VC_CTRL ( channel ) , enable , 0 , 0 ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
if ( wait_for_bit_change ( dsidev , DSI_VC_CTRL ( channel ) ,
0 , enable ) ! = enable ) {
2009-10-28 11:59:56 +02:00
DSSERR ( " Failed to set dsi_vc_enable to %d \n " , enable ) ;
return - EIO ;
}
return 0 ;
}
2011-05-12 17:26:26 +05:30
static void dsi_vc_initial_config ( struct platform_device * dsidev , int channel )
2009-10-28 11:59:56 +02:00
{
u32 r ;
DSSDBGF ( " %d " , channel ) ;
2011-05-12 17:26:26 +05:30
r = dsi_read_reg ( dsidev , DSI_VC_CTRL ( channel ) ) ;
2009-10-28 11:59:56 +02:00
if ( FLD_GET ( r , 15 , 15 ) ) /* VC_BUSY */
DSSERR ( " VC(%d) busy when trying to configure it! \n " ,
channel ) ;
r = FLD_MOD ( r , 0 , 1 , 1 ) ; /* SOURCE, 0 = L4 */
r = FLD_MOD ( r , 0 , 2 , 2 ) ; /* BTA_SHORT_EN */
r = FLD_MOD ( r , 0 , 3 , 3 ) ; /* BTA_LONG_EN */
r = FLD_MOD ( r , 0 , 4 , 4 ) ; /* MODE, 0 = command */
r = FLD_MOD ( r , 1 , 7 , 7 ) ; /* CS_TX_EN */
r = FLD_MOD ( r , 1 , 8 , 8 ) ; /* ECC_TX_EN */
r = FLD_MOD ( r , 0 , 9 , 9 ) ; /* MODE_SPEED, high speed on/off */
2011-03-22 06:33:36 -05:00
if ( dss_has_feature ( FEAT_DSI_VC_OCP_WIDTH ) )
r = FLD_MOD ( r , 3 , 11 , 10 ) ; /* OCP_WIDTH = 32 bit */
2009-10-28 11:59:56 +02:00
r = FLD_MOD ( r , 4 , 29 , 27 ) ; /* DMA_RX_REQ_NB = no dma */
r = FLD_MOD ( r , 4 , 23 , 21 ) ; /* DMA_TX_REQ_NB = no dma */
2011-05-12 17:26:26 +05:30
dsi_write_reg ( dsidev , DSI_VC_CTRL ( channel ) , r ) ;
2009-10-28 11:59:56 +02:00
}
2011-08-22 11:58:08 +05:30
static int dsi_vc_config_source ( struct platform_device * dsidev , int channel ,
enum dsi_vc_source source )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2011-08-22 11:58:08 +05:30
if ( dsi - > vc [ channel ] . source = = source )
2010-04-30 11:24:33 +03:00
return 0 ;
2009-10-28 11:59:56 +02:00
DSSDBGF ( " %d " , channel ) ;
2011-05-12 17:26:26 +05:30
dsi_sync_vc ( dsidev , channel ) ;
2011-03-23 09:59:34 +00:00
2011-05-12 17:26:26 +05:30
dsi_vc_enable ( dsidev , channel , 0 ) ;
2009-10-28 11:59:56 +02:00
2010-04-30 11:24:33 +03:00
/* VC_BUSY */
2011-05-12 17:26:26 +05:30
if ( wait_for_bit_change ( dsidev , DSI_VC_CTRL ( channel ) , 15 , 0 ) ! = 0 ) {
2009-10-28 11:59:56 +02:00
DSSERR ( " vc(%d) busy when trying to config for VP \n " , channel ) ;
2010-04-30 11:24:33 +03:00
return - EIO ;
}
2009-10-28 11:59:56 +02:00
2011-08-22 11:58:08 +05:30
/* SOURCE, 0 = L4, 1 = video port */
REG_FLD_MOD ( dsidev , DSI_VC_CTRL ( channel ) , source , 1 , 1 ) ;
2009-10-28 11:59:56 +02:00
2011-03-22 06:33:36 -05:00
/* DCS_CMD_ENABLE */
2011-08-22 11:58:08 +05:30
if ( dss_has_feature ( FEAT_DSI_DCS_CMD_CONFIG_VC ) ) {
bool enable = source = = DSI_VC_SOURCE_VP ;
REG_FLD_MOD ( dsidev , DSI_VC_CTRL ( channel ) , enable , 30 , 30 ) ;
}
2011-03-22 06:33:36 -05:00
2011-05-12 17:26:26 +05:30
dsi_vc_enable ( dsidev , channel , 1 ) ;
2009-10-28 11:59:56 +02:00
2011-08-22 11:58:08 +05:30
dsi - > vc [ channel ] . source = source ;
2010-04-30 11:24:33 +03:00
return 0 ;
2009-10-28 11:59:56 +02:00
}
2011-05-12 17:26:24 +05:30
void omapdss_dsi_vc_enable_hs ( struct omap_dss_device * dssdev , int channel ,
bool enable )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:26 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
2009-10-28 11:59:56 +02:00
DSSDBG ( " dsi_vc_enable_hs(%d, %d) \n " , channel , enable ) ;
2011-05-12 17:26:26 +05:30
WARN_ON ( ! dsi_bus_is_locked ( dsidev ) ) ;
2010-01-12 16:00:30 +02:00
2011-05-12 17:26:26 +05:30
dsi_vc_enable ( dsidev , channel , 0 ) ;
dsi_if_enable ( dsidev , 0 ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
REG_FLD_MOD ( dsidev , DSI_VC_CTRL ( channel ) , enable , 9 , 9 ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
dsi_vc_enable ( dsidev , channel , 1 ) ;
dsi_if_enable ( dsidev , 1 ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
dsi_force_tx_stop_mode_io ( dsidev ) ;
2009-10-28 11:59:56 +02:00
}
2010-01-12 16:00:30 +02:00
EXPORT_SYMBOL ( omapdss_dsi_vc_enable_hs ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
static void dsi_vc_flush_long_data ( struct platform_device * dsidev , int channel )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:26 +05:30
while ( REG_GET ( dsidev , DSI_VC_CTRL ( channel ) , 20 , 20 ) ) {
2009-10-28 11:59:56 +02:00
u32 val ;
2011-05-12 17:26:26 +05:30
val = dsi_read_reg ( dsidev , DSI_VC_SHORT_PACKET_HEADER ( channel ) ) ;
2009-10-28 11:59:56 +02:00
DSSDBG ( " \t \t b1 %#02x b2 %#02x b3 %#02x b4 %#02x \n " ,
( val > > 0 ) & 0xff ,
( val > > 8 ) & 0xff ,
( val > > 16 ) & 0xff ,
( val > > 24 ) & 0xff ) ;
}
}
static void dsi_show_rx_ack_with_err ( u16 err )
{
DSSERR ( " \t ACK with ERROR (%#x): \n " , err ) ;
if ( err & ( 1 < < 0 ) )
DSSERR ( " \t \t SoT Error \n " ) ;
if ( err & ( 1 < < 1 ) )
DSSERR ( " \t \t SoT Sync Error \n " ) ;
if ( err & ( 1 < < 2 ) )
DSSERR ( " \t \t EoT Sync Error \n " ) ;
if ( err & ( 1 < < 3 ) )
DSSERR ( " \t \t Escape Mode Entry Command Error \n " ) ;
if ( err & ( 1 < < 4 ) )
DSSERR ( " \t \t LP Transmit Sync Error \n " ) ;
if ( err & ( 1 < < 5 ) )
DSSERR ( " \t \t HS Receive Timeout Error \n " ) ;
if ( err & ( 1 < < 6 ) )
DSSERR ( " \t \t False Control Error \n " ) ;
if ( err & ( 1 < < 7 ) )
DSSERR ( " \t \t (reserved7) \n " ) ;
if ( err & ( 1 < < 8 ) )
DSSERR ( " \t \t ECC Error, single-bit (corrected) \n " ) ;
if ( err & ( 1 < < 9 ) )
DSSERR ( " \t \t ECC Error, multi-bit (not corrected) \n " ) ;
if ( err & ( 1 < < 10 ) )
DSSERR ( " \t \t Checksum Error \n " ) ;
if ( err & ( 1 < < 11 ) )
DSSERR ( " \t \t Data type not recognized \n " ) ;
if ( err & ( 1 < < 12 ) )
DSSERR ( " \t \t Invalid VC ID \n " ) ;
if ( err & ( 1 < < 13 ) )
DSSERR ( " \t \t Invalid Transmission Length \n " ) ;
if ( err & ( 1 < < 14 ) )
DSSERR ( " \t \t (reserved14) \n " ) ;
if ( err & ( 1 < < 15 ) )
DSSERR ( " \t \t DSI Protocol Violation \n " ) ;
}
2011-05-12 17:26:26 +05:30
static u16 dsi_vc_flush_receive_data ( struct platform_device * dsidev ,
int channel )
2009-10-28 11:59:56 +02:00
{
/* RX_FIFO_NOT_EMPTY */
2011-05-12 17:26:26 +05:30
while ( REG_GET ( dsidev , DSI_VC_CTRL ( channel ) , 20 , 20 ) ) {
2009-10-28 11:59:56 +02:00
u32 val ;
u8 dt ;
2011-05-12 17:26:26 +05:30
val = dsi_read_reg ( dsidev , DSI_VC_SHORT_PACKET_HEADER ( channel ) ) ;
2010-03-16 16:19:06 +02:00
DSSERR ( " \t rawval %#08x \n " , val ) ;
2009-10-28 11:59:56 +02:00
dt = FLD_GET ( val , 5 , 0 ) ;
2011-08-25 18:25:03 +05:30
if ( dt = = MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT ) {
2009-10-28 11:59:56 +02:00
u16 err = FLD_GET ( val , 23 , 8 ) ;
dsi_show_rx_ack_with_err ( err ) ;
2011-08-25 18:25:03 +05:30
} else if ( dt = = MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE ) {
2010-03-16 16:19:06 +02:00
DSSERR ( " \t DCS short response, 1 byte: %#x \n " ,
2009-10-28 11:59:56 +02:00
FLD_GET ( val , 23 , 8 ) ) ;
2011-08-25 18:25:03 +05:30
} else if ( dt = = MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE ) {
2010-03-16 16:19:06 +02:00
DSSERR ( " \t DCS short response, 2 byte: %#x \n " ,
2009-10-28 11:59:56 +02:00
FLD_GET ( val , 23 , 8 ) ) ;
2011-08-25 18:25:03 +05:30
} else if ( dt = = MIPI_DSI_RX_DCS_LONG_READ_RESPONSE ) {
2010-03-16 16:19:06 +02:00
DSSERR ( " \t DCS long response, len %d \n " ,
2009-10-28 11:59:56 +02:00
FLD_GET ( val , 23 , 8 ) ) ;
2011-05-12 17:26:26 +05:30
dsi_vc_flush_long_data ( dsidev , channel ) ;
2009-10-28 11:59:56 +02:00
} else {
DSSERR ( " \t unknown datatype 0x%02x \n " , dt ) ;
}
}
return 0 ;
}
2011-05-12 17:26:26 +05:30
static int dsi_vc_send_bta ( struct platform_device * dsidev , int channel )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
if ( dsi - > debug_write | | dsi - > debug_read )
2009-10-28 11:59:56 +02:00
DSSDBG ( " dsi_vc_send_bta %d \n " , channel ) ;
2011-05-12 17:26:26 +05:30
WARN_ON ( ! dsi_bus_is_locked ( dsidev ) ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
/* RX_FIFO_NOT_EMPTY */
if ( REG_GET ( dsidev , DSI_VC_CTRL ( channel ) , 20 , 20 ) ) {
2009-10-28 11:59:56 +02:00
DSSERR ( " rx fifo not empty when sending BTA, dumping data: \n " ) ;
2011-05-12 17:26:26 +05:30
dsi_vc_flush_receive_data ( dsidev , channel ) ;
2009-10-28 11:59:56 +02:00
}
2011-05-12 17:26:26 +05:30
REG_FLD_MOD ( dsidev , DSI_VC_CTRL ( channel ) , 1 , 6 , 6 ) ; /* BTA_EN */
2009-10-28 11:59:56 +02:00
return 0 ;
}
2011-05-12 17:26:24 +05:30
int dsi_vc_send_bta_sync ( struct omap_dss_device * dssdev , int channel )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:26 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
2011-03-02 14:48:41 +02:00
DECLARE_COMPLETION_ONSTACK ( completion ) ;
2009-10-28 11:59:56 +02:00
int r = 0 ;
u32 err ;
2011-05-12 17:26:26 +05:30
r = dsi_register_isr_vc ( dsidev , channel , dsi_completion_handler ,
2011-03-02 14:48:41 +02:00
& completion , DSI_VC_IRQ_BTA ) ;
if ( r )
goto err0 ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
r = dsi_register_isr ( dsidev , dsi_completion_handler , & completion ,
2010-10-08 16:15:25 +03:00
DSI_IRQ_ERROR_MASK ) ;
2009-10-28 11:59:56 +02:00
if ( r )
2011-03-02 14:48:41 +02:00
goto err1 ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
r = dsi_vc_send_bta ( dsidev , channel ) ;
2010-10-08 16:15:25 +03:00
if ( r )
goto err2 ;
2011-03-02 14:48:41 +02:00
if ( wait_for_completion_timeout ( & completion ,
2009-10-28 11:59:56 +02:00
msecs_to_jiffies ( 500 ) ) = = 0 ) {
DSSERR ( " Failed to receive BTA \n " ) ;
r = - EIO ;
2010-10-08 16:15:25 +03:00
goto err2 ;
2009-10-28 11:59:56 +02:00
}
2011-05-12 17:26:26 +05:30
err = dsi_get_errors ( dsidev ) ;
2009-10-28 11:59:56 +02:00
if ( err ) {
DSSERR ( " Error while sending BTA: %x \n " , err ) ;
r = - EIO ;
2010-10-08 16:15:25 +03:00
goto err2 ;
2009-10-28 11:59:56 +02:00
}
2010-10-08 16:15:25 +03:00
err2 :
2011-05-12 17:26:26 +05:30
dsi_unregister_isr ( dsidev , dsi_completion_handler , & completion ,
2010-10-08 16:15:25 +03:00
DSI_IRQ_ERROR_MASK ) ;
2011-03-02 14:48:41 +02:00
err1 :
2011-05-12 17:26:26 +05:30
dsi_unregister_isr_vc ( dsidev , channel , dsi_completion_handler ,
2011-03-02 14:48:41 +02:00
& completion , DSI_VC_IRQ_BTA ) ;
err0 :
2009-10-28 11:59:56 +02:00
return r ;
}
EXPORT_SYMBOL ( dsi_vc_send_bta_sync ) ;
2011-05-12 17:26:26 +05:30
static inline void dsi_vc_write_long_header ( struct platform_device * dsidev ,
int channel , u8 data_type , u16 len , u8 ecc )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2009-10-28 11:59:56 +02:00
u32 val ;
u8 data_id ;
2011-05-12 17:26:26 +05:30
WARN_ON ( ! dsi_bus_is_locked ( dsidev ) ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:27 +05:30
data_id = data_type | dsi - > vc [ channel ] . vc_id < < 6 ;
2009-10-28 11:59:56 +02:00
val = FLD_VAL ( data_id , 7 , 0 ) | FLD_VAL ( len , 23 , 8 ) |
FLD_VAL ( ecc , 31 , 24 ) ;
2011-05-12 17:26:26 +05:30
dsi_write_reg ( dsidev , DSI_VC_LONG_PACKET_HEADER ( channel ) , val ) ;
2009-10-28 11:59:56 +02:00
}
2011-05-12 17:26:26 +05:30
static inline void dsi_vc_write_long_payload ( struct platform_device * dsidev ,
int channel , u8 b1 , u8 b2 , u8 b3 , u8 b4 )
2009-10-28 11:59:56 +02:00
{
u32 val ;
val = b4 < < 24 | b3 < < 16 | b2 < < 8 | b1 < < 0 ;
/* DSSDBG("\twriting %02x, %02x, %02x, %02x (%#010x)\n",
b1 , b2 , b3 , b4 , val ) ; */
2011-05-12 17:26:26 +05:30
dsi_write_reg ( dsidev , DSI_VC_LONG_PACKET_PAYLOAD ( channel ) , val ) ;
2009-10-28 11:59:56 +02:00
}
2011-05-12 17:26:26 +05:30
static int dsi_vc_send_long ( struct platform_device * dsidev , int channel ,
u8 data_type , u8 * data , u16 len , u8 ecc )
2009-10-28 11:59:56 +02:00
{
/*u32 val; */
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2009-10-28 11:59:56 +02:00
int i ;
u8 * p ;
int r = 0 ;
u8 b1 , b2 , b3 , b4 ;
2011-05-12 17:26:27 +05:30
if ( dsi - > debug_write )
2009-10-28 11:59:56 +02:00
DSSDBG ( " dsi_vc_send_long, %d bytes \n " , len ) ;
/* len + header */
2011-05-12 17:26:27 +05:30
if ( dsi - > vc [ channel ] . fifo_size * 32 * 4 < len + 4 ) {
2009-10-28 11:59:56 +02:00
DSSERR ( " unable to send long packet: packet too long. \n " ) ;
return - EINVAL ;
}
2011-08-22 11:58:08 +05:30
dsi_vc_config_source ( dsidev , channel , DSI_VC_SOURCE_L4 ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
dsi_vc_write_long_header ( dsidev , channel , data_type , len , ecc ) ;
2009-10-28 11:59:56 +02:00
p = data ;
for ( i = 0 ; i < len > > 2 ; i + + ) {
2011-05-12 17:26:27 +05:30
if ( dsi - > debug_write )
2009-10-28 11:59:56 +02:00
DSSDBG ( " \t sending full packet %d \n " , i ) ;
b1 = * p + + ;
b2 = * p + + ;
b3 = * p + + ;
b4 = * p + + ;
2011-05-12 17:26:26 +05:30
dsi_vc_write_long_payload ( dsidev , channel , b1 , b2 , b3 , b4 ) ;
2009-10-28 11:59:56 +02:00
}
i = len % 4 ;
if ( i ) {
b1 = 0 ; b2 = 0 ; b3 = 0 ;
2011-05-12 17:26:27 +05:30
if ( dsi - > debug_write )
2009-10-28 11:59:56 +02:00
DSSDBG ( " \t sending remainder bytes %d \n " , i ) ;
switch ( i ) {
case 3 :
b1 = * p + + ;
b2 = * p + + ;
b3 = * p + + ;
break ;
case 2 :
b1 = * p + + ;
b2 = * p + + ;
break ;
case 1 :
b1 = * p + + ;
break ;
}
2011-05-12 17:26:26 +05:30
dsi_vc_write_long_payload ( dsidev , channel , b1 , b2 , b3 , 0 ) ;
2009-10-28 11:59:56 +02:00
}
return r ;
}
2011-05-12 17:26:26 +05:30
static int dsi_vc_send_short ( struct platform_device * dsidev , int channel ,
u8 data_type , u16 data , u8 ecc )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2009-10-28 11:59:56 +02:00
u32 r ;
u8 data_id ;
2011-05-12 17:26:26 +05:30
WARN_ON ( ! dsi_bus_is_locked ( dsidev ) ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:27 +05:30
if ( dsi - > debug_write )
2009-10-28 11:59:56 +02:00
DSSDBG ( " dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x) \n " ,
channel ,
data_type , data & 0xff , ( data > > 8 ) & 0xff ) ;
2011-08-22 11:58:08 +05:30
dsi_vc_config_source ( dsidev , channel , DSI_VC_SOURCE_L4 ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
if ( FLD_GET ( dsi_read_reg ( dsidev , DSI_VC_CTRL ( channel ) ) , 16 , 16 ) ) {
2009-10-28 11:59:56 +02:00
DSSERR ( " ERROR FIFO FULL, aborting transfer \n " ) ;
return - EINVAL ;
}
2011-05-12 17:26:27 +05:30
data_id = data_type | dsi - > vc [ channel ] . vc_id < < 6 ;
2009-10-28 11:59:56 +02:00
r = ( data_id < < 0 ) | ( data < < 8 ) | ( ecc < < 24 ) ;
2011-05-12 17:26:26 +05:30
dsi_write_reg ( dsidev , DSI_VC_SHORT_PACKET_HEADER ( channel ) , r ) ;
2009-10-28 11:59:56 +02:00
return 0 ;
}
2011-05-12 17:26:24 +05:30
int dsi_vc_send_null ( struct omap_dss_device * dssdev , int channel )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:26 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
2009-10-28 11:59:56 +02:00
u8 nullpkg [ ] = { 0 , 0 , 0 , 0 } ;
2011-05-12 17:26:26 +05:30
2011-08-25 18:25:03 +05:30
return dsi_vc_send_long ( dsidev , channel , MIPI_DSI_NULL_PACKET , nullpkg ,
2011-05-12 17:26:26 +05:30
4 , 0 ) ;
2009-10-28 11:59:56 +02:00
}
EXPORT_SYMBOL ( dsi_vc_send_null ) ;
2011-08-25 18:35:58 +05:30
static int dsi_vc_write_nosync_common ( struct omap_dss_device * dssdev ,
int channel , u8 * data , int len , enum dss_dsi_content_type type )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:26 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
2009-10-28 11:59:56 +02:00
int r ;
2011-08-25 18:35:58 +05:30
if ( len = = 0 ) {
BUG_ON ( type = = DSS_DSI_CONTENT_DCS ) ;
2011-08-25 18:25:03 +05:30
r = dsi_vc_send_short ( dsidev , channel ,
2011-08-25 18:35:58 +05:30
MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM , 0 , 0 ) ;
} else if ( len = = 1 ) {
r = dsi_vc_send_short ( dsidev , channel ,
type = = DSS_DSI_CONTENT_GENERIC ?
MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM :
2011-08-25 18:25:03 +05:30
MIPI_DSI_DCS_SHORT_WRITE , data [ 0 ] , 0 ) ;
2009-10-28 11:59:56 +02:00
} else if ( len = = 2 ) {
2011-08-25 18:25:03 +05:30
r = dsi_vc_send_short ( dsidev , channel ,
2011-08-25 18:35:58 +05:30
type = = DSS_DSI_CONTENT_GENERIC ?
MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM :
2011-08-25 18:25:03 +05:30
MIPI_DSI_DCS_SHORT_WRITE_PARAM ,
2009-10-28 11:59:56 +02:00
data [ 0 ] | ( data [ 1 ] < < 8 ) , 0 ) ;
} else {
2011-08-25 18:35:58 +05:30
r = dsi_vc_send_long ( dsidev , channel ,
type = = DSS_DSI_CONTENT_GENERIC ?
MIPI_DSI_GENERIC_LONG_WRITE :
MIPI_DSI_DCS_LONG_WRITE , data , len , 0 ) ;
2009-10-28 11:59:56 +02:00
}
return r ;
}
2011-08-25 18:35:58 +05:30
int dsi_vc_dcs_write_nosync ( struct omap_dss_device * dssdev , int channel ,
u8 * data , int len )
{
return dsi_vc_write_nosync_common ( dssdev , channel , data , len ,
DSS_DSI_CONTENT_DCS ) ;
}
2009-10-28 11:59:56 +02:00
EXPORT_SYMBOL ( dsi_vc_dcs_write_nosync ) ;
2011-08-25 18:35:58 +05:30
int dsi_vc_generic_write_nosync ( struct omap_dss_device * dssdev , int channel ,
u8 * data , int len )
{
return dsi_vc_write_nosync_common ( dssdev , channel , data , len ,
DSS_DSI_CONTENT_GENERIC ) ;
}
EXPORT_SYMBOL ( dsi_vc_generic_write_nosync ) ;
static int dsi_vc_write_common ( struct omap_dss_device * dssdev , int channel ,
u8 * data , int len , enum dss_dsi_content_type type )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:26 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
2009-10-28 11:59:56 +02:00
int r ;
2011-08-25 18:35:58 +05:30
r = dsi_vc_write_nosync_common ( dssdev , channel , data , len , type ) ;
2009-10-28 11:59:56 +02:00
if ( r )
2010-02-26 11:32:56 +02:00
goto err ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:24 +05:30
r = dsi_vc_send_bta_sync ( dssdev , channel ) ;
2010-02-26 11:32:56 +02:00
if ( r )
goto err ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
/* RX_FIFO_NOT_EMPTY */
if ( REG_GET ( dsidev , DSI_VC_CTRL ( channel ) , 20 , 20 ) ) {
2010-04-09 13:20:57 +03:00
DSSERR ( " rx fifo not empty after write, dumping data: \n " ) ;
2011-05-12 17:26:26 +05:30
dsi_vc_flush_receive_data ( dsidev , channel ) ;
2010-04-09 13:20:57 +03:00
r = - EIO ;
goto err ;
}
2010-02-26 11:32:56 +02:00
return 0 ;
err :
2011-08-25 18:35:58 +05:30
DSSERR ( " dsi_vc_write_common(ch %d, cmd 0x%02x, len %d) failed \n " ,
2010-02-26 11:32:56 +02:00
channel , data [ 0 ] , len ) ;
2009-10-28 11:59:56 +02:00
return r ;
}
2011-08-25 18:35:58 +05:30
int dsi_vc_dcs_write ( struct omap_dss_device * dssdev , int channel , u8 * data ,
int len )
{
return dsi_vc_write_common ( dssdev , channel , data , len ,
DSS_DSI_CONTENT_DCS ) ;
}
2009-10-28 11:59:56 +02:00
EXPORT_SYMBOL ( dsi_vc_dcs_write ) ;
2011-08-25 18:35:58 +05:30
int dsi_vc_generic_write ( struct omap_dss_device * dssdev , int channel , u8 * data ,
int len )
{
return dsi_vc_write_common ( dssdev , channel , data , len ,
DSS_DSI_CONTENT_GENERIC ) ;
}
EXPORT_SYMBOL ( dsi_vc_generic_write ) ;
2011-05-12 17:26:24 +05:30
int dsi_vc_dcs_write_0 ( struct omap_dss_device * dssdev , int channel , u8 dcs_cmd )
2009-12-16 14:53:15 +02:00
{
2011-05-12 17:26:24 +05:30
return dsi_vc_dcs_write ( dssdev , channel , & dcs_cmd , 1 ) ;
2009-12-16 14:53:15 +02:00
}
EXPORT_SYMBOL ( dsi_vc_dcs_write_0 ) ;
2011-08-25 18:35:58 +05:30
int dsi_vc_generic_write_0 ( struct omap_dss_device * dssdev , int channel )
{
return dsi_vc_generic_write ( dssdev , channel , NULL , 0 ) ;
}
EXPORT_SYMBOL ( dsi_vc_generic_write_0 ) ;
2011-05-12 17:26:24 +05:30
int dsi_vc_dcs_write_1 ( struct omap_dss_device * dssdev , int channel , u8 dcs_cmd ,
u8 param )
2009-12-16 14:53:15 +02:00
{
u8 buf [ 2 ] ;
buf [ 0 ] = dcs_cmd ;
buf [ 1 ] = param ;
2011-05-12 17:26:24 +05:30
return dsi_vc_dcs_write ( dssdev , channel , buf , 2 ) ;
2009-12-16 14:53:15 +02:00
}
EXPORT_SYMBOL ( dsi_vc_dcs_write_1 ) ;
2011-08-25 18:35:58 +05:30
int dsi_vc_generic_write_1 ( struct omap_dss_device * dssdev , int channel ,
u8 param )
{
return dsi_vc_generic_write ( dssdev , channel , & param , 1 ) ;
}
EXPORT_SYMBOL ( dsi_vc_generic_write_1 ) ;
int dsi_vc_generic_write_2 ( struct omap_dss_device * dssdev , int channel ,
u8 param1 , u8 param2 )
{
u8 buf [ 2 ] ;
buf [ 0 ] = param1 ;
buf [ 1 ] = param2 ;
return dsi_vc_generic_write ( dssdev , channel , buf , 2 ) ;
}
EXPORT_SYMBOL ( dsi_vc_generic_write_2 ) ;
2011-08-30 15:48:23 +05:30
static int dsi_vc_dcs_send_read_request ( struct omap_dss_device * dssdev ,
int channel , u8 dcs_cmd )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:26 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2009-10-28 11:59:56 +02:00
int r ;
2011-05-12 17:26:27 +05:30
if ( dsi - > debug_read )
2011-08-30 15:48:23 +05:30
DSSDBG ( " dsi_vc_dcs_send_read_request(ch%d, dcs_cmd %x) \n " ,
channel , dcs_cmd ) ;
2009-10-28 11:59:56 +02:00
2011-08-25 18:25:03 +05:30
r = dsi_vc_send_short ( dsidev , channel , MIPI_DSI_DCS_READ , dcs_cmd , 0 ) ;
2011-08-30 15:48:23 +05:30
if ( r ) {
DSSERR ( " dsi_vc_dcs_send_read_request(ch %d, cmd 0x%02x) "
" failed \n " , channel , dcs_cmd ) ;
return r ;
}
2009-10-28 11:59:56 +02:00
2011-08-30 15:48:23 +05:30
return 0 ;
}
2011-08-30 16:07:39 +05:30
static int dsi_vc_generic_send_read_request ( struct omap_dss_device * dssdev ,
int channel , u8 * reqdata , int reqlen )
{
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
u16 data ;
u8 data_type ;
int r ;
if ( dsi - > debug_read )
DSSDBG ( " dsi_vc_generic_send_read_request(ch %d, reqlen %d) \n " ,
channel , reqlen ) ;
if ( reqlen = = 0 ) {
data_type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM ;
data = 0 ;
} else if ( reqlen = = 1 ) {
data_type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM ;
data = reqdata [ 0 ] ;
} else if ( reqlen = = 2 ) {
data_type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM ;
data = reqdata [ 0 ] | ( reqdata [ 1 ] < < 8 ) ;
} else {
BUG ( ) ;
}
r = dsi_vc_send_short ( dsidev , channel , data_type , data , 0 ) ;
if ( r ) {
DSSERR ( " dsi_vc_generic_send_read_request(ch %d, reqlen %d) "
" failed \n " , channel , reqlen ) ;
return r ;
}
return 0 ;
}
static int dsi_vc_read_rx_fifo ( struct platform_device * dsidev , int channel ,
u8 * buf , int buflen , enum dss_dsi_content_type type )
2011-08-30 15:48:23 +05:30
{
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
u32 val ;
u8 dt ;
int r ;
2009-10-28 11:59:56 +02:00
/* RX_FIFO_NOT_EMPTY */
2011-05-12 17:26:26 +05:30
if ( REG_GET ( dsidev , DSI_VC_CTRL ( channel ) , 20 , 20 ) = = 0 ) {
2009-10-28 11:59:56 +02:00
DSSERR ( " RX fifo empty when trying to read. \n " ) ;
2010-02-26 11:32:56 +02:00
r = - EIO ;
goto err ;
2009-10-28 11:59:56 +02:00
}
2011-05-12 17:26:26 +05:30
val = dsi_read_reg ( dsidev , DSI_VC_SHORT_PACKET_HEADER ( channel ) ) ;
2011-05-12 17:26:27 +05:30
if ( dsi - > debug_read )
2009-10-28 11:59:56 +02:00
DSSDBG ( " \t header: %08x \n " , val ) ;
dt = FLD_GET ( val , 5 , 0 ) ;
2011-08-25 18:25:03 +05:30
if ( dt = = MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT ) {
2009-10-28 11:59:56 +02:00
u16 err = FLD_GET ( val , 23 , 8 ) ;
dsi_show_rx_ack_with_err ( err ) ;
2010-02-26 11:32:56 +02:00
r = - EIO ;
goto err ;
2009-10-28 11:59:56 +02:00
2011-08-30 16:07:39 +05:30
} else if ( dt = = ( type = = DSS_DSI_CONTENT_GENERIC ?
MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE :
MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE ) ) {
2009-10-28 11:59:56 +02:00
u8 data = FLD_GET ( val , 15 , 8 ) ;
2011-05-12 17:26:27 +05:30
if ( dsi - > debug_read )
2011-08-30 16:07:39 +05:30
DSSDBG ( " \t %s short response, 1 byte: %02x \n " ,
type = = DSS_DSI_CONTENT_GENERIC ? " GENERIC " :
" DCS " , data ) ;
2009-10-28 11:59:56 +02:00
2010-02-26 11:32:56 +02:00
if ( buflen < 1 ) {
r = - EIO ;
goto err ;
}
2009-10-28 11:59:56 +02:00
buf [ 0 ] = data ;
return 1 ;
2011-08-30 16:07:39 +05:30
} else if ( dt = = ( type = = DSS_DSI_CONTENT_GENERIC ?
MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_2BYTE :
MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE ) ) {
2009-10-28 11:59:56 +02:00
u16 data = FLD_GET ( val , 23 , 8 ) ;
2011-05-12 17:26:27 +05:30
if ( dsi - > debug_read )
2011-08-30 16:07:39 +05:30
DSSDBG ( " \t %s short response, 2 byte: %04x \n " ,
type = = DSS_DSI_CONTENT_GENERIC ? " GENERIC " :
" DCS " , data ) ;
2009-10-28 11:59:56 +02:00
2010-02-26 11:32:56 +02:00
if ( buflen < 2 ) {
r = - EIO ;
goto err ;
}
2009-10-28 11:59:56 +02:00
buf [ 0 ] = data & 0xff ;
buf [ 1 ] = ( data > > 8 ) & 0xff ;
return 2 ;
2011-08-30 16:07:39 +05:30
} else if ( dt = = ( type = = DSS_DSI_CONTENT_GENERIC ?
MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE :
MIPI_DSI_RX_DCS_LONG_READ_RESPONSE ) ) {
2009-10-28 11:59:56 +02:00
int w ;
int len = FLD_GET ( val , 23 , 8 ) ;
2011-05-12 17:26:27 +05:30
if ( dsi - > debug_read )
2011-08-30 16:07:39 +05:30
DSSDBG ( " \t %s long response, len %d \n " ,
type = = DSS_DSI_CONTENT_GENERIC ? " GENERIC " :
" DCS " , len ) ;
2009-10-28 11:59:56 +02:00
2010-02-26 11:32:56 +02:00
if ( len > buflen ) {
r = - EIO ;
goto err ;
}
2009-10-28 11:59:56 +02:00
/* two byte checksum ends the packet, not included in len */
for ( w = 0 ; w < len + 2 ; ) {
int b ;
2011-05-12 17:26:26 +05:30
val = dsi_read_reg ( dsidev ,
DSI_VC_SHORT_PACKET_HEADER ( channel ) ) ;
2011-05-12 17:26:27 +05:30
if ( dsi - > debug_read )
2009-10-28 11:59:56 +02:00
DSSDBG ( " \t \t %02x %02x %02x %02x \n " ,
( val > > 0 ) & 0xff ,
( val > > 8 ) & 0xff ,
( val > > 16 ) & 0xff ,
( val > > 24 ) & 0xff ) ;
for ( b = 0 ; b < 4 ; + + b ) {
if ( w < len )
buf [ w ] = ( val > > ( b * 8 ) ) & 0xff ;
/* we discard the 2 byte checksum */
+ + w ;
}
}
return len ;
} else {
DSSERR ( " \t unknown datatype 0x%02x \n " , dt ) ;
2010-02-26 11:32:56 +02:00
r = - EIO ;
goto err ;
2009-10-28 11:59:56 +02:00
}
2010-02-26 11:32:56 +02:00
BUG ( ) ;
err :
2011-08-30 16:07:39 +05:30
DSSERR ( " dsi_vc_read_rx_fifo(ch %d type %s) failed \n " , channel ,
type = = DSS_DSI_CONTENT_GENERIC ? " GENERIC " : " DCS " ) ;
2011-08-30 15:48:23 +05:30
2010-02-26 11:32:56 +02:00
return r ;
2011-08-30 15:48:23 +05:30
}
int dsi_vc_dcs_read ( struct omap_dss_device * dssdev , int channel , u8 dcs_cmd ,
u8 * buf , int buflen )
{
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
int r ;
r = dsi_vc_dcs_send_read_request ( dssdev , channel , dcs_cmd ) ;
if ( r )
goto err ;
2010-02-26 11:32:56 +02:00
2011-08-30 15:48:23 +05:30
r = dsi_vc_send_bta_sync ( dssdev , channel ) ;
if ( r )
goto err ;
2011-08-30 16:07:39 +05:30
r = dsi_vc_read_rx_fifo ( dsidev , channel , buf , buflen ,
DSS_DSI_CONTENT_DCS ) ;
2011-08-30 15:48:23 +05:30
if ( r < 0 )
goto err ;
if ( r ! = buflen ) {
r = - EIO ;
goto err ;
}
return 0 ;
err :
DSSERR ( " dsi_vc_dcs_read(ch %d, cmd 0x%02x) failed \n " , channel , dcs_cmd ) ;
return r ;
2009-10-28 11:59:56 +02:00
}
EXPORT_SYMBOL ( dsi_vc_dcs_read ) ;
2011-08-30 16:07:39 +05:30
static int dsi_vc_generic_read ( struct omap_dss_device * dssdev , int channel ,
u8 * reqdata , int reqlen , u8 * buf , int buflen )
{
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
int r ;
r = dsi_vc_generic_send_read_request ( dssdev , channel , reqdata , reqlen ) ;
if ( r )
return r ;
r = dsi_vc_send_bta_sync ( dssdev , channel ) ;
if ( r )
return r ;
r = dsi_vc_read_rx_fifo ( dsidev , channel , buf , buflen ,
DSS_DSI_CONTENT_GENERIC ) ;
if ( r < 0 )
return r ;
if ( r ! = buflen ) {
r = - EIO ;
return r ;
}
return 0 ;
}
int dsi_vc_generic_read_0 ( struct omap_dss_device * dssdev , int channel , u8 * buf ,
int buflen )
{
int r ;
r = dsi_vc_generic_read ( dssdev , channel , NULL , 0 , buf , buflen ) ;
if ( r ) {
DSSERR ( " dsi_vc_generic_read_0(ch %d) failed \n " , channel ) ;
return r ;
}
return 0 ;
}
EXPORT_SYMBOL ( dsi_vc_generic_read_0 ) ;
int dsi_vc_generic_read_1 ( struct omap_dss_device * dssdev , int channel , u8 param ,
u8 * buf , int buflen )
{
int r ;
r = dsi_vc_generic_read ( dssdev , channel , & param , 1 , buf , buflen ) ;
if ( r ) {
DSSERR ( " dsi_vc_generic_read_1(ch %d) failed \n " , channel ) ;
return r ;
}
return 0 ;
}
EXPORT_SYMBOL ( dsi_vc_generic_read_1 ) ;
int dsi_vc_generic_read_2 ( struct omap_dss_device * dssdev , int channel ,
u8 param1 , u8 param2 , u8 * buf , int buflen )
{
int r ;
u8 reqdata [ 2 ] ;
reqdata [ 0 ] = param1 ;
reqdata [ 1 ] = param2 ;
r = dsi_vc_generic_read ( dssdev , channel , reqdata , 2 , buf , buflen ) ;
if ( r ) {
DSSERR ( " dsi_vc_generic_read_2(ch %d) failed \n " , channel ) ;
return r ;
}
return 0 ;
}
EXPORT_SYMBOL ( dsi_vc_generic_read_2 ) ;
2011-05-12 17:26:24 +05:30
int dsi_vc_set_max_rx_packet_size ( struct omap_dss_device * dssdev , int channel ,
u16 len )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:26 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
2011-08-25 18:25:03 +05:30
return dsi_vc_send_short ( dsidev , channel ,
MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE , len , 0 ) ;
2009-10-28 11:59:56 +02:00
}
EXPORT_SYMBOL ( dsi_vc_set_max_rx_packet_size ) ;
2011-05-12 17:26:26 +05:30
static int dsi_enter_ulps ( struct platform_device * dsidev )
2010-07-28 15:53:38 +03:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2010-07-28 15:53:38 +03:00
DECLARE_COMPLETION_ONSTACK ( completion ) ;
int r ;
DSSDBGF ( ) ;
2011-05-12 17:26:26 +05:30
WARN_ON ( ! dsi_bus_is_locked ( dsidev ) ) ;
2010-07-28 15:53:38 +03:00
2011-05-12 17:26:27 +05:30
WARN_ON ( dsi - > ulps_enabled ) ;
2010-07-28 15:53:38 +03:00
2011-05-12 17:26:27 +05:30
if ( dsi - > ulps_enabled )
2010-07-28 15:53:38 +03:00
return 0 ;
2011-05-12 17:26:26 +05:30
if ( REG_GET ( dsidev , DSI_CLK_CTRL , 13 , 13 ) ) {
2010-07-28 15:53:38 +03:00
DSSERR ( " DDR_CLK_ALWAYS_ON enabled when entering ULPS \n " ) ;
return - EIO ;
}
2011-05-12 17:26:26 +05:30
dsi_sync_vc ( dsidev , 0 ) ;
dsi_sync_vc ( dsidev , 1 ) ;
dsi_sync_vc ( dsidev , 2 ) ;
dsi_sync_vc ( dsidev , 3 ) ;
2010-07-28 15:53:38 +03:00
2011-05-12 17:26:26 +05:30
dsi_force_tx_stop_mode_io ( dsidev ) ;
2010-07-28 15:53:38 +03:00
2011-05-12 17:26:26 +05:30
dsi_vc_enable ( dsidev , 0 , false ) ;
dsi_vc_enable ( dsidev , 1 , false ) ;
dsi_vc_enable ( dsidev , 2 , false ) ;
dsi_vc_enable ( dsidev , 3 , false ) ;
2010-07-28 15:53:38 +03:00
2011-05-12 17:26:26 +05:30
if ( REG_GET ( dsidev , DSI_COMPLEXIO_CFG2 , 16 , 16 ) ) { /* HS_BUSY */
2010-07-28 15:53:38 +03:00
DSSERR ( " HS busy when enabling ULPS \n " ) ;
return - EIO ;
}
2011-05-12 17:26:26 +05:30
if ( REG_GET ( dsidev , DSI_COMPLEXIO_CFG2 , 17 , 17 ) ) { /* LP_BUSY */
2010-07-28 15:53:38 +03:00
DSSERR ( " LP busy when enabling ULPS \n " ) ;
return - EIO ;
}
2011-05-12 17:26:26 +05:30
r = dsi_register_isr_cio ( dsidev , dsi_completion_handler , & completion ,
2010-07-28 15:53:38 +03:00
DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 ) ;
if ( r )
return r ;
/* Assert TxRequestEsc for data lanes and TxUlpsClk for clk lane */
/* LANEx_ULPS_SIG2 */
2011-05-12 17:26:26 +05:30
REG_FLD_MOD ( dsidev , DSI_COMPLEXIO_CFG2 , ( 1 < < 0 ) | ( 1 < < 1 ) | ( 1 < < 2 ) ,
7 , 5 ) ;
2010-07-28 15:53:38 +03:00
if ( wait_for_completion_timeout ( & completion ,
msecs_to_jiffies ( 1000 ) ) = = 0 ) {
DSSERR ( " ULPS enable timeout \n " ) ;
r = - EIO ;
goto err ;
}
2011-05-12 17:26:26 +05:30
dsi_unregister_isr_cio ( dsidev , dsi_completion_handler , & completion ,
2010-07-28 15:53:38 +03:00
DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 ) ;
2011-05-31 16:55:47 +03:00
/* Reset LANEx_ULPS_SIG2 */
REG_FLD_MOD ( dsidev , DSI_COMPLEXIO_CFG2 , ( 0 < < 0 ) | ( 0 < < 1 ) | ( 0 < < 2 ) ,
7 , 5 ) ;
2011-05-12 17:26:26 +05:30
dsi_cio_power ( dsidev , DSI_COMPLEXIO_POWER_ULPS ) ;
2010-07-28 15:53:38 +03:00
2011-05-12 17:26:26 +05:30
dsi_if_enable ( dsidev , false ) ;
2010-07-28 15:53:38 +03:00
2011-05-12 17:26:27 +05:30
dsi - > ulps_enabled = true ;
2010-07-28 15:53:38 +03:00
return 0 ;
err :
2011-05-12 17:26:26 +05:30
dsi_unregister_isr_cio ( dsidev , dsi_completion_handler , & completion ,
2010-07-28 15:53:38 +03:00
DSI_CIO_IRQ_ULPSACTIVENOT_ALL0 ) ;
return r ;
}
2011-05-12 17:26:26 +05:30
static void dsi_set_lp_rx_timeout ( struct platform_device * dsidev ,
unsigned ticks , bool x4 , bool x16 )
2009-10-28 11:59:56 +02:00
{
unsigned long fck ;
2010-04-12 10:40:12 +03:00
unsigned long total_ticks ;
u32 r ;
2009-10-28 11:59:56 +02:00
2010-04-12 10:40:12 +03:00
BUG_ON ( ticks > 0x1fff ) ;
2009-10-28 11:59:56 +02:00
2010-04-12 10:40:12 +03:00
/* ticks in DSI_FCK */
2011-05-12 17:26:26 +05:30
fck = dsi_fclk_rate ( dsidev ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
r = dsi_read_reg ( dsidev , DSI_TIMING2 ) ;
2009-10-28 11:59:56 +02:00
r = FLD_MOD ( r , 1 , 15 , 15 ) ; /* LP_RX_TO */
2010-04-12 10:40:12 +03:00
r = FLD_MOD ( r , x16 ? 1 : 0 , 14 , 14 ) ; /* LP_RX_TO_X16 */
r = FLD_MOD ( r , x4 ? 1 : 0 , 13 , 13 ) ; /* LP_RX_TO_X4 */
2009-10-28 11:59:56 +02:00
r = FLD_MOD ( r , ticks , 12 , 0 ) ; /* LP_RX_COUNTER */
2011-05-12 17:26:26 +05:30
dsi_write_reg ( dsidev , DSI_TIMING2 , r ) ;
2009-10-28 11:59:56 +02:00
2010-04-12 10:40:12 +03:00
total_ticks = ticks * ( x16 ? 16 : 1 ) * ( x4 ? 4 : 1 ) ;
DSSDBG ( " LP_RX_TO %lu ticks (%#x%s%s) = %lu ns \n " ,
total_ticks ,
ticks , x4 ? " x4 " : " " , x16 ? " x16 " : " " ,
( total_ticks * 1000 ) / ( fck / 1000 / 1000 ) ) ;
2009-10-28 11:59:56 +02:00
}
2011-05-12 17:26:26 +05:30
static void dsi_set_ta_timeout ( struct platform_device * dsidev , unsigned ticks ,
bool x8 , bool x16 )
2009-10-28 11:59:56 +02:00
{
unsigned long fck ;
2010-04-12 10:40:12 +03:00
unsigned long total_ticks ;
u32 r ;
BUG_ON ( ticks > 0x1fff ) ;
2009-10-28 11:59:56 +02:00
/* ticks in DSI_FCK */
2011-05-12 17:26:26 +05:30
fck = dsi_fclk_rate ( dsidev ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
r = dsi_read_reg ( dsidev , DSI_TIMING1 ) ;
2009-10-28 11:59:56 +02:00
r = FLD_MOD ( r , 1 , 31 , 31 ) ; /* TA_TO */
2010-04-12 10:40:12 +03:00
r = FLD_MOD ( r , x16 ? 1 : 0 , 30 , 30 ) ; /* TA_TO_X16 */
r = FLD_MOD ( r , x8 ? 1 : 0 , 29 , 29 ) ; /* TA_TO_X8 */
2009-10-28 11:59:56 +02:00
r = FLD_MOD ( r , ticks , 28 , 16 ) ; /* TA_TO_COUNTER */
2011-05-12 17:26:26 +05:30
dsi_write_reg ( dsidev , DSI_TIMING1 , r ) ;
2009-10-28 11:59:56 +02:00
2010-04-12 10:40:12 +03:00
total_ticks = ticks * ( x16 ? 16 : 1 ) * ( x8 ? 8 : 1 ) ;
DSSDBG ( " TA_TO %lu ticks (%#x%s%s) = %lu ns \n " ,
total_ticks ,
ticks , x8 ? " x8 " : " " , x16 ? " x16 " : " " ,
( total_ticks * 1000 ) / ( fck / 1000 / 1000 ) ) ;
2009-10-28 11:59:56 +02:00
}
2011-05-12 17:26:26 +05:30
static void dsi_set_stop_state_counter ( struct platform_device * dsidev ,
unsigned ticks , bool x4 , bool x16 )
2009-10-28 11:59:56 +02:00
{
unsigned long fck ;
2010-04-12 10:40:12 +03:00
unsigned long total_ticks ;
u32 r ;
2009-10-28 11:59:56 +02:00
2010-04-12 10:40:12 +03:00
BUG_ON ( ticks > 0x1fff ) ;
2009-10-28 11:59:56 +02:00
2010-04-12 10:40:12 +03:00
/* ticks in DSI_FCK */
2011-05-12 17:26:26 +05:30
fck = dsi_fclk_rate ( dsidev ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
r = dsi_read_reg ( dsidev , DSI_TIMING1 ) ;
2009-10-28 11:59:56 +02:00
r = FLD_MOD ( r , 1 , 15 , 15 ) ; /* FORCE_TX_STOP_MODE_IO */
2010-04-12 10:40:12 +03:00
r = FLD_MOD ( r , x16 ? 1 : 0 , 14 , 14 ) ; /* STOP_STATE_X16_IO */
r = FLD_MOD ( r , x4 ? 1 : 0 , 13 , 13 ) ; /* STOP_STATE_X4_IO */
2009-10-28 11:59:56 +02:00
r = FLD_MOD ( r , ticks , 12 , 0 ) ; /* STOP_STATE_COUNTER_IO */
2011-05-12 17:26:26 +05:30
dsi_write_reg ( dsidev , DSI_TIMING1 , r ) ;
2009-10-28 11:59:56 +02:00
2010-04-12 10:40:12 +03:00
total_ticks = ticks * ( x16 ? 16 : 1 ) * ( x4 ? 4 : 1 ) ;
DSSDBG ( " STOP_STATE_COUNTER %lu ticks (%#x%s%s) = %lu ns \n " ,
total_ticks ,
ticks , x4 ? " x4 " : " " , x16 ? " x16 " : " " ,
( total_ticks * 1000 ) / ( fck / 1000 / 1000 ) ) ;
2009-10-28 11:59:56 +02:00
}
2011-05-12 17:26:26 +05:30
static void dsi_set_hs_tx_timeout ( struct platform_device * dsidev ,
unsigned ticks , bool x4 , bool x16 )
2009-10-28 11:59:56 +02:00
{
unsigned long fck ;
2010-04-12 10:40:12 +03:00
unsigned long total_ticks ;
u32 r ;
2009-10-28 11:59:56 +02:00
2010-04-12 10:40:12 +03:00
BUG_ON ( ticks > 0x1fff ) ;
2009-10-28 11:59:56 +02:00
2010-04-12 10:40:12 +03:00
/* ticks in TxByteClkHS */
2011-05-12 17:26:26 +05:30
fck = dsi_get_txbyteclkhs ( dsidev ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
r = dsi_read_reg ( dsidev , DSI_TIMING2 ) ;
2009-10-28 11:59:56 +02:00
r = FLD_MOD ( r , 1 , 31 , 31 ) ; /* HS_TX_TO */
2010-04-12 10:40:12 +03:00
r = FLD_MOD ( r , x16 ? 1 : 0 , 30 , 30 ) ; /* HS_TX_TO_X16 */
r = FLD_MOD ( r , x4 ? 1 : 0 , 29 , 29 ) ; /* HS_TX_TO_X8 (4 really) */
2009-10-28 11:59:56 +02:00
r = FLD_MOD ( r , ticks , 28 , 16 ) ; /* HS_TX_TO_COUNTER */
2011-05-12 17:26:26 +05:30
dsi_write_reg ( dsidev , DSI_TIMING2 , r ) ;
2009-10-28 11:59:56 +02:00
2010-04-12 10:40:12 +03:00
total_ticks = ticks * ( x16 ? 16 : 1 ) * ( x4 ? 4 : 1 ) ;
DSSDBG ( " HS_TX_TO %lu ticks (%#x%s%s) = %lu ns \n " ,
total_ticks ,
ticks , x4 ? " x4 " : " " , x16 ? " x16 " : " " ,
( total_ticks * 1000 ) / ( fck / 1000 / 1000 ) ) ;
2009-10-28 11:59:56 +02:00
}
static int dsi_proto_config ( struct omap_dss_device * dssdev )
{
2011-05-12 17:26:26 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
2009-10-28 11:59:56 +02:00
u32 r ;
int buswidth = 0 ;
2011-05-12 17:26:26 +05:30
dsi_config_tx_fifo ( dsidev , DSI_FIFO_SIZE_32 ,
2009-12-16 16:49:03 +02:00
DSI_FIFO_SIZE_32 ,
DSI_FIFO_SIZE_32 ,
DSI_FIFO_SIZE_32 ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
dsi_config_rx_fifo ( dsidev , DSI_FIFO_SIZE_32 ,
2009-12-16 16:49:03 +02:00
DSI_FIFO_SIZE_32 ,
DSI_FIFO_SIZE_32 ,
DSI_FIFO_SIZE_32 ) ;
2009-10-28 11:59:56 +02:00
/* XXX what values for the timeouts? */
2011-05-12 17:26:26 +05:30
dsi_set_stop_state_counter ( dsidev , 0x1000 , false , false ) ;
dsi_set_ta_timeout ( dsidev , 0x1fff , true , true ) ;
dsi_set_lp_rx_timeout ( dsidev , 0x1fff , true , true ) ;
dsi_set_hs_tx_timeout ( dsidev , 0x1fff , true , true ) ;
2009-10-28 11:59:56 +02:00
switch ( dssdev - > ctrl . pixel_size ) {
case 16 :
buswidth = 0 ;
break ;
case 18 :
buswidth = 1 ;
break ;
case 24 :
buswidth = 2 ;
break ;
default :
BUG ( ) ;
}
2011-05-12 17:26:26 +05:30
r = dsi_read_reg ( dsidev , DSI_CTRL ) ;
2009-10-28 11:59:56 +02:00
r = FLD_MOD ( r , 1 , 1 , 1 ) ; /* CS_RX_EN */
r = FLD_MOD ( r , 1 , 2 , 2 ) ; /* ECC_RX_EN */
r = FLD_MOD ( r , 1 , 3 , 3 ) ; /* TX_FIFO_ARBITRATION */
r = FLD_MOD ( r , 1 , 4 , 4 ) ; /* VP_CLK_RATIO, always 1, see errata*/
r = FLD_MOD ( r , buswidth , 7 , 6 ) ; /* VP_DATA_BUS_WIDTH */
r = FLD_MOD ( r , 0 , 8 , 8 ) ; /* VP_CLK_POL */
r = FLD_MOD ( r , 2 , 13 , 12 ) ; /* LINE_BUFFER, 2 lines */
r = FLD_MOD ( r , 1 , 14 , 14 ) ; /* TRIGGER_RESET_MODE */
r = FLD_MOD ( r , 1 , 19 , 19 ) ; /* EOT_ENABLE */
2011-03-22 06:33:36 -05:00
if ( ! dss_has_feature ( FEAT_DSI_DCS_CMD_CONFIG_VC ) ) {
r = FLD_MOD ( r , 1 , 24 , 24 ) ; /* DCS_CMD_ENABLE */
/* DCS_CMD_CODE, 1=start, 0=continue */
r = FLD_MOD ( r , 0 , 25 , 25 ) ;
}
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
dsi_write_reg ( dsidev , DSI_CTRL , r ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
dsi_vc_initial_config ( dsidev , 0 ) ;
dsi_vc_initial_config ( dsidev , 1 ) ;
dsi_vc_initial_config ( dsidev , 2 ) ;
dsi_vc_initial_config ( dsidev , 3 ) ;
2009-10-28 11:59:56 +02:00
return 0 ;
}
static void dsi_proto_timings ( struct omap_dss_device * dssdev )
{
2011-05-12 17:26:26 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
2009-10-28 11:59:56 +02:00
unsigned tlpx , tclk_zero , tclk_prepare , tclk_trail ;
unsigned tclk_pre , tclk_post ;
unsigned ths_prepare , ths_prepare_ths_zero , ths_zero ;
unsigned ths_trail , ths_exit ;
unsigned ddr_clk_pre , ddr_clk_post ;
unsigned enter_hs_mode_lat , exit_hs_mode_lat ;
unsigned ths_eot ;
u32 r ;
2011-05-12 17:26:26 +05:30
r = dsi_read_reg ( dsidev , DSI_DSIPHY_CFG0 ) ;
2009-10-28 11:59:56 +02:00
ths_prepare = FLD_GET ( r , 31 , 24 ) ;
ths_prepare_ths_zero = FLD_GET ( r , 23 , 16 ) ;
ths_zero = ths_prepare_ths_zero - ths_prepare ;
ths_trail = FLD_GET ( r , 15 , 8 ) ;
ths_exit = FLD_GET ( r , 7 , 0 ) ;
2011-05-12 17:26:26 +05:30
r = dsi_read_reg ( dsidev , DSI_DSIPHY_CFG1 ) ;
2009-10-28 11:59:56 +02:00
tlpx = FLD_GET ( r , 22 , 16 ) * 2 ;
tclk_trail = FLD_GET ( r , 15 , 8 ) ;
tclk_zero = FLD_GET ( r , 7 , 0 ) ;
2011-05-12 17:26:26 +05:30
r = dsi_read_reg ( dsidev , DSI_DSIPHY_CFG2 ) ;
2009-10-28 11:59:56 +02:00
tclk_prepare = FLD_GET ( r , 7 , 0 ) ;
/* min 8*UI */
tclk_pre = 20 ;
/* min 60ns + 52*UI */
2011-05-12 17:26:26 +05:30
tclk_post = ns2ddr ( dsidev , 60 ) + 26 ;
2009-10-28 11:59:56 +02:00
2011-05-16 15:17:08 +05:30
ths_eot = DIV_ROUND_UP ( 4 , dsi_get_num_data_lanes_dssdev ( dssdev ) ) ;
2009-10-28 11:59:56 +02:00
ddr_clk_pre = DIV_ROUND_UP ( tclk_pre + tlpx + tclk_zero + tclk_prepare ,
4 ) ;
ddr_clk_post = DIV_ROUND_UP ( tclk_post + ths_trail , 4 ) + ths_eot ;
BUG_ON ( ddr_clk_pre = = 0 | | ddr_clk_pre > 255 ) ;
BUG_ON ( ddr_clk_post = = 0 | | ddr_clk_post > 255 ) ;
2011-05-12 17:26:26 +05:30
r = dsi_read_reg ( dsidev , DSI_CLK_TIMING ) ;
2009-10-28 11:59:56 +02:00
r = FLD_MOD ( r , ddr_clk_pre , 15 , 8 ) ;
r = FLD_MOD ( r , ddr_clk_post , 7 , 0 ) ;
2011-05-12 17:26:26 +05:30
dsi_write_reg ( dsidev , DSI_CLK_TIMING , r ) ;
2009-10-28 11:59:56 +02:00
DSSDBG ( " ddr_clk_pre %u, ddr_clk_post %u \n " ,
ddr_clk_pre ,
ddr_clk_post ) ;
enter_hs_mode_lat = 1 + DIV_ROUND_UP ( tlpx , 4 ) +
DIV_ROUND_UP ( ths_prepare , 4 ) +
DIV_ROUND_UP ( ths_zero + 3 , 4 ) ;
exit_hs_mode_lat = DIV_ROUND_UP ( ths_trail + ths_exit , 4 ) + 1 + ths_eot ;
r = FLD_VAL ( enter_hs_mode_lat , 31 , 16 ) |
FLD_VAL ( exit_hs_mode_lat , 15 , 0 ) ;
2011-05-12 17:26:26 +05:30
dsi_write_reg ( dsidev , DSI_VM_TIMING7 , r ) ;
2009-10-28 11:59:56 +02:00
DSSDBG ( " enter_hs_mode_lat %u, exit_hs_mode_lat %u \n " ,
enter_hs_mode_lat , exit_hs_mode_lat ) ;
}
static void dsi_update_screen_dispc ( struct omap_dss_device * dssdev ,
u16 x , u16 y , u16 w , u16 h )
{
2011-05-12 17:26:26 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2009-10-28 11:59:56 +02:00
unsigned bytespp ;
unsigned bytespl ;
unsigned bytespf ;
unsigned total_len ;
unsigned packet_payload ;
unsigned packet_len ;
u32 l ;
OMAP: DSS2: DSI: use a private workqueue
Using the shared workqueue led to to a deadlock in the case where the
display was unblanked via keyboard.
What happens is something like this:
- User presses a key
context 1:
- drivers/char/keyboard.c calls schedule_console_callback()
- fb_unblank takes the console semaphore
- dsi bus lock is taken, and frame transfer is started (dsi bus lock is
left on)
- Unblank code tries to set the panel backlight, which tries to take dsi
bus lock, but is blocked while the frame transfer is going on
context 2, shared workqueue, console_callback in drivers/char/vt.c:
- Tries to take console semaphore
- Blocks, as console semaphore is being held by context 1
- No other shared workqueue work can be run
context 3, HW irq, caused by FRAMEDONE interrupt:
- Interrupt handler schedules framedone-work in shared workqueue
- Framedone-work is never ran, as the shared workqueue is blocked. This
means that the unblank thread stays blocked, which means that context 2
stays blocked.
While I think the real problem is in keyboard/virtual terminal code, using
a private workqueue in the DSI driver is perhaps safer and more robust
than using the shared one. The DSI works should not be delayed more than a
millisecond or so, and even if the private workqueue gives us no hard
promise of doing so, it's still safer bet than the shared workqueue.
Signed-off-by: Tomi Valkeinen <tomi.valkeinen@nokia.com>
2010-04-12 09:57:19 +03:00
int r ;
2011-05-12 17:26:27 +05:30
const unsigned channel = dsi - > update_channel ;
2011-05-16 15:17:09 +05:30
const unsigned line_buf_size = dsi_get_line_buf_size ( dsidev ) ;
2009-10-28 11:59:56 +02:00
2010-01-11 16:12:31 +02:00
DSSDBG ( " dsi_update_screen_dispc(%d,%d %dx%d) \n " ,
x , y , w , h ) ;
2009-10-28 11:59:56 +02:00
2011-08-22 11:58:08 +05:30
dsi_vc_config_source ( dsidev , channel , DSI_VC_SOURCE_VP ) ;
2010-01-12 14:16:41 +02:00
2009-10-28 11:59:56 +02:00
bytespp = dssdev - > ctrl . pixel_size / 8 ;
bytespl = w * bytespp ;
bytespf = bytespl * h ;
/* NOTE: packet_payload has to be equal to N * bytespl, where N is
* number of lines in a packet . See errata about VP_CLK_RATIO */
if ( bytespf < line_buf_size )
packet_payload = bytespf ;
else
packet_payload = ( line_buf_size ) / bytespl * bytespl ;
packet_len = packet_payload + 1 ; /* 1 byte for DCS cmd */
total_len = ( bytespf / packet_payload ) * packet_len ;
if ( bytespf % packet_payload )
total_len + = ( bytespf % packet_payload ) + 1 ;
l = FLD_VAL ( total_len , 23 , 0 ) ; /* TE_SIZE */
2011-05-12 17:26:26 +05:30
dsi_write_reg ( dsidev , DSI_VC_TE ( channel ) , l ) ;
2009-10-28 11:59:56 +02:00
2011-08-25 18:25:03 +05:30
dsi_vc_write_long_header ( dsidev , channel , MIPI_DSI_DCS_LONG_WRITE ,
2011-05-12 17:26:26 +05:30
packet_len , 0 ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:27 +05:30
if ( dsi - > te_enabled )
2009-10-28 11:59:56 +02:00
l = FLD_MOD ( l , 1 , 30 , 30 ) ; /* TE_EN */
else
l = FLD_MOD ( l , 1 , 31 , 31 ) ; /* TE_START */
2011-05-12 17:26:26 +05:30
dsi_write_reg ( dsidev , DSI_VC_TE ( channel ) , l ) ;
2009-10-28 11:59:56 +02:00
/* We put SIDLEMODE to no-idle for the duration of the transfer,
* because DSS interrupts are not capable of waking up the CPU and the
* framedone interrupt could be delayed for quite a long time . I think
* the same goes for any DSS interrupts , but for some reason I have not
* seen the problem anywhere else than here .
*/
dispc_disable_sidle ( ) ;
2011-05-12 17:26:26 +05:30
dsi_perf_mark_start ( dsidev ) ;
2010-01-12 14:16:41 +02:00
2011-05-16 15:17:07 +05:30
r = schedule_delayed_work ( & dsi - > framedone_timeout_work ,
msecs_to_jiffies ( 250 ) ) ;
OMAP: DSS2: DSI: use a private workqueue
Using the shared workqueue led to to a deadlock in the case where the
display was unblanked via keyboard.
What happens is something like this:
- User presses a key
context 1:
- drivers/char/keyboard.c calls schedule_console_callback()
- fb_unblank takes the console semaphore
- dsi bus lock is taken, and frame transfer is started (dsi bus lock is
left on)
- Unblank code tries to set the panel backlight, which tries to take dsi
bus lock, but is blocked while the frame transfer is going on
context 2, shared workqueue, console_callback in drivers/char/vt.c:
- Tries to take console semaphore
- Blocks, as console semaphore is being held by context 1
- No other shared workqueue work can be run
context 3, HW irq, caused by FRAMEDONE interrupt:
- Interrupt handler schedules framedone-work in shared workqueue
- Framedone-work is never ran, as the shared workqueue is blocked. This
means that the unblank thread stays blocked, which means that context 2
stays blocked.
While I think the real problem is in keyboard/virtual terminal code, using
a private workqueue in the DSI driver is perhaps safer and more robust
than using the shared one. The DSI works should not be delayed more than a
millisecond or so, and even if the private workqueue gives us no hard
promise of doing so, it's still safer bet than the shared workqueue.
Signed-off-by: Tomi Valkeinen <tomi.valkeinen@nokia.com>
2010-04-12 09:57:19 +03:00
BUG_ON ( r = = 0 ) ;
2010-01-12 14:16:41 +02:00
2009-10-28 11:59:56 +02:00
dss_start_update ( dssdev ) ;
2011-05-12 17:26:27 +05:30
if ( dsi - > te_enabled ) {
2009-10-28 11:59:56 +02:00
/* disable LP_RX_TO, so that we can receive TE. Time to wait
* for TE is longer than the timer allows */
2011-05-12 17:26:26 +05:30
REG_FLD_MOD ( dsidev , DSI_TIMING2 , 0 , 15 , 15 ) ; /* LP_RX_TO */
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
dsi_vc_send_bta ( dsidev , channel ) ;
2009-10-28 11:59:56 +02:00
# ifdef DSI_CATCH_MISSING_TE
2011-05-12 17:26:27 +05:30
mod_timer ( & dsi - > te_timer , jiffies + msecs_to_jiffies ( 250 ) ) ;
2009-10-28 11:59:56 +02:00
# endif
}
}
# ifdef DSI_CATCH_MISSING_TE
static void dsi_te_timeout ( unsigned long arg )
{
DSSERR ( " TE not received for 250ms! \n " ) ;
}
# endif
2011-05-12 17:26:26 +05:30
static void dsi_handle_framedone ( struct platform_device * dsidev , int error )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2009-10-28 11:59:56 +02:00
/* SIDLEMODE back to smart-idle */
dispc_enable_sidle ( ) ;
2011-05-12 17:26:27 +05:30
if ( dsi - > te_enabled ) {
2010-01-12 14:16:41 +02:00
/* enable LP_RX_TO again after the TE */
2011-05-12 17:26:26 +05:30
REG_FLD_MOD ( dsidev , DSI_TIMING2 , 1 , 15 , 15 ) ; /* LP_RX_TO */
2009-10-28 11:59:56 +02:00
}
2011-05-12 17:26:27 +05:30
dsi - > framedone_callback ( error , dsi - > framedone_data ) ;
2010-06-09 15:31:01 +03:00
if ( ! error )
2011-05-12 17:26:26 +05:30
dsi_perf_show ( dsidev , " DISPC " ) ;
2010-01-12 14:16:41 +02:00
}
2009-10-28 11:59:56 +02:00
2010-06-09 15:31:01 +03:00
static void dsi_framedone_timeout_work_callback ( struct work_struct * work )
2010-01-12 14:16:41 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = container_of ( work , struct dsi_data ,
framedone_timeout_work . work ) ;
2010-06-09 15:31:01 +03:00
/* XXX While extremely unlikely, we could get FRAMEDONE interrupt after
* 250 ms which would conflict with this timeout work . What should be
* done is first cancel the transfer on the HW , and then cancel the
* possibly scheduled framedone work . However , cancelling the transfer
* on the HW is buggy , and would probably require resetting the whole
* DSI */
2010-01-12 14:16:41 +02:00
2010-06-09 15:31:01 +03:00
DSSERR ( " Framedone not received for 250ms! \n " ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:27 +05:30
dsi_handle_framedone ( dsi - > pdev , - ETIMEDOUT ) ;
2009-10-28 11:59:56 +02:00
}
2010-06-09 15:31:01 +03:00
static void dsi_framedone_irq_callback ( void * data , u32 mask )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:26 +05:30
struct omap_dss_device * dssdev = ( struct omap_dss_device * ) data ;
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2010-06-09 15:31:01 +03:00
/* Note: We get FRAMEDONE when DISPC has finished sending pixels and
* turns itself off . However , DSI still has the pixels in its buffers ,
* and is sending the data .
*/
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:27 +05:30
__cancel_delayed_work ( & dsi - > framedone_timeout_work ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
dsi_handle_framedone ( dsidev , 0 ) ;
2009-10-28 11:59:56 +02:00
2011-03-23 09:59:34 +00:00
# ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
dispc_fake_vsync_irq ( ) ;
# endif
2010-01-12 14:16:41 +02:00
}
2009-10-28 11:59:56 +02:00
2010-01-12 14:16:41 +02:00
int omap_dsi_prepare_update ( struct omap_dss_device * dssdev ,
2010-06-09 15:31:34 +03:00
u16 * x , u16 * y , u16 * w , u16 * h ,
bool enlarge_update_area )
2010-01-12 14:16:41 +02:00
{
2011-05-12 17:26:26 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
2010-01-12 14:16:41 +02:00
u16 dw , dh ;
2009-10-28 11:59:56 +02:00
2010-01-12 14:16:41 +02:00
dssdev - > driver - > get_resolution ( dssdev , & dw , & dh ) ;
2009-10-28 11:59:56 +02:00
2010-01-12 14:16:41 +02:00
if ( * x > dw | | * y > dh )
return - EINVAL ;
2009-10-28 11:59:56 +02:00
2010-01-12 14:16:41 +02:00
if ( * x + * w > dw )
return - EINVAL ;
2009-10-28 11:59:56 +02:00
2010-01-12 14:16:41 +02:00
if ( * y + * h > dh )
return - EINVAL ;
2009-10-28 11:59:56 +02:00
2010-01-12 14:16:41 +02:00
if ( * w = = 1 )
return - EINVAL ;
2009-10-28 11:59:56 +02:00
2010-01-12 14:16:41 +02:00
if ( * w = = 0 | | * h = = 0 )
return - EINVAL ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
dsi_perf_mark_setup ( dsidev ) ;
2009-10-28 11:59:56 +02:00
2011-08-15 11:22:21 +03:00
dss_setup_partial_planes ( dssdev , x , y , w , h ,
enlarge_update_area ) ;
2011-08-16 13:45:15 +03:00
dispc_mgr_set_lcd_size ( dssdev - > manager - > id , * w , * h ) ;
2009-10-28 11:59:56 +02:00
2010-01-12 14:16:41 +02:00
return 0 ;
}
EXPORT_SYMBOL ( omap_dsi_prepare_update ) ;
2009-10-28 11:59:56 +02:00
2010-01-12 14:16:41 +02:00
int omap_dsi_update ( struct omap_dss_device * dssdev ,
int channel ,
u16 x , u16 y , u16 w , u16 h ,
void ( * callback ) ( int , void * ) , void * data )
{
2011-05-12 17:26:26 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2011-05-12 17:26:26 +05:30
2011-05-12 17:26:27 +05:30
dsi - > update_channel = channel ;
2009-10-28 11:59:56 +02:00
2010-05-25 17:01:28 +03:00
/* OMAP DSS cannot send updates of odd widths.
* omap_dsi_prepare_update ( ) makes the widths even , but add a BUG_ON
* here to make sure we catch erroneous updates . Otherwise we ' ll only
* see rather obscure HW error happening , as DSS halts . */
BUG_ON ( x % 2 = = 1 ) ;
2011-08-15 11:22:21 +03:00
dsi - > framedone_callback = callback ;
dsi - > framedone_data = data ;
2010-07-14 14:11:50 +02:00
2011-08-15 11:22:21 +03:00
dsi - > update_region . x = x ;
dsi - > update_region . y = y ;
dsi - > update_region . w = w ;
dsi - > update_region . h = h ;
dsi - > update_region . device = dssdev ;
2010-07-14 14:11:50 +02:00
2011-08-15 11:22:21 +03:00
dsi_update_screen_dispc ( dssdev , x , y , w , h ) ;
2009-10-28 11:59:56 +02:00
return 0 ;
}
2010-01-12 14:16:41 +02:00
EXPORT_SYMBOL ( omap_dsi_update ) ;
2009-10-28 11:59:56 +02:00
/* Display funcs */
static int dsi_display_init_dispc ( struct omap_dss_device * dssdev )
{
int r ;
2011-05-12 17:26:29 +05:30
u32 irq ;
irq = dssdev - > manager - > id = = OMAP_DSS_CHANNEL_LCD ?
DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2 ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
r = omap_dispc_register_isr ( dsi_framedone_irq_callback , ( void * ) dssdev ,
2011-05-12 17:26:29 +05:30
irq ) ;
2009-10-28 11:59:56 +02:00
if ( r ) {
DSSERR ( " can't get FRAMEDONE irq \n " ) ;
return r ;
}
2011-08-16 13:45:15 +03:00
dispc_mgr_set_lcd_display_type ( dssdev - > manager - > id ,
2010-12-02 11:27:10 +00:00
OMAP_DSS_LCD_DISPLAY_TFT ) ;
2009-10-28 11:59:56 +02:00
2011-08-22 17:41:57 +05:30
dispc_mgr_enable_stallmode ( dssdev - > manager - > id , true ) ;
2011-08-16 13:45:15 +03:00
dispc_mgr_enable_fifohandcheck ( dssdev - > manager - > id , 1 ) ;
2009-10-28 11:59:56 +02:00
2011-08-16 13:45:15 +03:00
dispc_mgr_set_tft_data_lines ( dssdev - > manager - > id ,
dssdev - > ctrl . pixel_size ) ;
2009-10-28 11:59:56 +02:00
{
struct omap_video_timings timings = {
. hsw = 1 ,
. hfp = 1 ,
. hbp = 1 ,
. vsw = 1 ,
. vfp = 0 ,
. vbp = 0 ,
} ;
2011-08-16 13:45:15 +03:00
dispc_mgr_set_lcd_timings ( dssdev - > manager - > id , & timings ) ;
2009-10-28 11:59:56 +02:00
}
return 0 ;
}
static void dsi_display_uninit_dispc ( struct omap_dss_device * dssdev )
{
2011-05-12 17:26:29 +05:30
u32 irq ;
irq = dssdev - > manager - > id = = OMAP_DSS_CHANNEL_LCD ?
DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2 ;
2011-05-12 17:26:26 +05:30
omap_dispc_unregister_isr ( dsi_framedone_irq_callback , ( void * ) dssdev ,
2011-05-12 17:26:29 +05:30
irq ) ;
2009-10-28 11:59:56 +02:00
}
static int dsi_configure_dsi_clocks ( struct omap_dss_device * dssdev )
{
2011-05-12 17:26:26 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
2009-10-28 11:59:56 +02:00
struct dsi_clock_info cinfo ;
int r ;
2011-02-24 14:17:30 +05:30
/* we always use DSS_CLK_SYSCK as input clock */
cinfo . use_sys_clk = true ;
2011-02-22 13:36:10 +02:00
cinfo . regn = dssdev - > clocks . dsi . regn ;
cinfo . regm = dssdev - > clocks . dsi . regm ;
cinfo . regm_dispc = dssdev - > clocks . dsi . regm_dispc ;
cinfo . regm_dsi = dssdev - > clocks . dsi . regm_dsi ;
2010-12-02 11:27:11 +00:00
r = dsi_calc_clock_rates ( dssdev , & cinfo ) ;
2010-04-22 22:50:05 +02:00
if ( r ) {
DSSERR ( " Failed to calc dsi clocks \n " ) ;
2009-10-28 11:59:56 +02:00
return r ;
2010-04-22 22:50:05 +02:00
}
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
r = dsi_pll_set_clock_div ( dsidev , & cinfo ) ;
2009-10-28 11:59:56 +02:00
if ( r ) {
DSSERR ( " Failed to set dsi clocks \n " ) ;
return r ;
}
return 0 ;
}
static int dsi_configure_dispc_clocks ( struct omap_dss_device * dssdev )
{
2011-05-12 17:26:26 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
2009-10-28 11:59:56 +02:00
struct dispc_clock_info dispc_cinfo ;
int r ;
unsigned long long fck ;
2011-05-12 17:26:26 +05:30
fck = dsi_get_pll_hsdiv_dispc_rate ( dsidev ) ;
2009-10-28 11:59:56 +02:00
2011-04-12 13:52:24 +05:30
dispc_cinfo . lck_div = dssdev - > clocks . dispc . channel . lck_div ;
dispc_cinfo . pck_div = dssdev - > clocks . dispc . channel . pck_div ;
2009-10-28 11:59:56 +02:00
r = dispc_calc_clock_rates ( fck , & dispc_cinfo ) ;
if ( r ) {
DSSERR ( " Failed to calc dispc clocks \n " ) ;
return r ;
}
2011-08-16 13:45:15 +03:00
r = dispc_mgr_set_clock_div ( dssdev - > manager - > id , & dispc_cinfo ) ;
2009-10-28 11:59:56 +02:00
if ( r ) {
DSSERR ( " Failed to set dispc clocks \n " ) ;
return r ;
}
return 0 ;
}
static int dsi_display_init_dsi ( struct omap_dss_device * dssdev )
{
2011-05-12 17:26:26 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
2011-05-12 17:26:29 +05:30
int dsi_module = dsi_get_dsidev_id ( dsidev ) ;
2009-10-28 11:59:56 +02:00
int r ;
2011-05-12 17:26:26 +05:30
r = dsi_pll_init ( dsidev , true , true ) ;
2009-10-28 11:59:56 +02:00
if ( r )
goto err0 ;
r = dsi_configure_dsi_clocks ( dssdev ) ;
if ( r )
goto err1 ;
2011-04-12 13:52:24 +05:30
dss_select_dispc_clk_source ( dssdev - > clocks . dispc . dispc_fclk_src ) ;
2011-05-12 17:26:29 +05:30
dss_select_dsi_clk_source ( dsi_module , dssdev - > clocks . dsi . dsi_fclk_src ) ;
2011-03-22 06:33:36 -05:00
dss_select_lcd_clk_source ( dssdev - > manager - > id ,
2011-04-12 13:52:24 +05:30
dssdev - > clocks . dispc . channel . lcd_clk_src ) ;
2009-10-28 11:59:56 +02:00
DSSDBG ( " PLL OK \n " ) ;
r = dsi_configure_dispc_clocks ( dssdev ) ;
if ( r )
goto err2 ;
2010-10-06 15:18:13 +03:00
r = dsi_cio_init ( dssdev ) ;
2009-10-28 11:59:56 +02:00
if ( r )
goto err2 ;
2011-05-12 17:26:26 +05:30
_dsi_print_reset_status ( dsidev ) ;
2009-10-28 11:59:56 +02:00
dsi_proto_timings ( dssdev ) ;
dsi_set_lp_clk_divisor ( dssdev ) ;
if ( 1 )
2011-05-12 17:26:26 +05:30
_dsi_print_reset_status ( dsidev ) ;
2009-10-28 11:59:56 +02:00
r = dsi_proto_config ( dssdev ) ;
if ( r )
goto err3 ;
/* enable interface */
2011-05-12 17:26:26 +05:30
dsi_vc_enable ( dsidev , 0 , 1 ) ;
dsi_vc_enable ( dsidev , 1 , 1 ) ;
dsi_vc_enable ( dsidev , 2 , 1 ) ;
dsi_vc_enable ( dsidev , 3 , 1 ) ;
dsi_if_enable ( dsidev , 1 ) ;
dsi_force_tx_stop_mode_io ( dsidev ) ;
2009-10-28 11:59:56 +02:00
return 0 ;
err3 :
2011-06-15 15:21:12 +03:00
dsi_cio_uninit ( dssdev ) ;
2009-10-28 11:59:56 +02:00
err2 :
2011-04-12 13:52:23 +05:30
dss_select_dispc_clk_source ( OMAP_DSS_CLK_SRC_FCK ) ;
2011-05-12 17:26:29 +05:30
dss_select_dsi_clk_source ( dsi_module , OMAP_DSS_CLK_SRC_FCK ) ;
2011-08-10 11:25:36 +03:00
dss_select_lcd_clk_source ( dssdev - > manager - > id , OMAP_DSS_CLK_SRC_FCK ) ;
2009-10-28 11:59:56 +02:00
err1 :
2011-05-12 17:26:26 +05:30
dsi_pll_uninit ( dsidev , true ) ;
2009-10-28 11:59:56 +02:00
err0 :
return r ;
}
2010-07-30 12:39:34 +03:00
static void dsi_display_uninit_dsi ( struct omap_dss_device * dssdev ,
2010-10-11 11:33:30 +03:00
bool disconnect_lanes , bool enter_ulps )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:26 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2011-05-12 17:26:29 +05:30
int dsi_module = dsi_get_dsidev_id ( dsidev ) ;
2011-05-12 17:26:26 +05:30
2011-05-12 17:26:27 +05:30
if ( enter_ulps & & ! dsi - > ulps_enabled )
2011-05-12 17:26:26 +05:30
dsi_enter_ulps ( dsidev ) ;
2010-07-28 15:53:38 +03:00
2010-04-22 22:50:09 +02:00
/* disable interface */
2011-05-12 17:26:26 +05:30
dsi_if_enable ( dsidev , 0 ) ;
dsi_vc_enable ( dsidev , 0 , 0 ) ;
dsi_vc_enable ( dsidev , 1 , 0 ) ;
dsi_vc_enable ( dsidev , 2 , 0 ) ;
dsi_vc_enable ( dsidev , 3 , 0 ) ;
2010-04-22 22:50:09 +02:00
2011-04-12 13:52:23 +05:30
dss_select_dispc_clk_source ( OMAP_DSS_CLK_SRC_FCK ) ;
2011-05-12 17:26:29 +05:30
dss_select_dsi_clk_source ( dsi_module , OMAP_DSS_CLK_SRC_FCK ) ;
2011-08-10 11:25:36 +03:00
dss_select_lcd_clk_source ( dssdev - > manager - > id , OMAP_DSS_CLK_SRC_FCK ) ;
2011-06-15 15:21:12 +03:00
dsi_cio_uninit ( dssdev ) ;
2011-05-12 17:26:26 +05:30
dsi_pll_uninit ( dsidev , disconnect_lanes ) ;
2009-10-28 11:59:56 +02:00
}
2010-01-12 15:12:07 +02:00
int omapdss_dsi_display_enable ( struct omap_dss_device * dssdev )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:26 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2009-10-28 11:59:56 +02:00
int r = 0 ;
DSSDBG ( " dsi_display_enable \n " ) ;
2011-05-12 17:26:26 +05:30
WARN_ON ( ! dsi_bus_is_locked ( dsidev ) ) ;
2010-01-12 15:12:07 +02:00
2011-05-12 17:26:27 +05:30
mutex_lock ( & dsi - > lock ) ;
2009-10-28 11:59:56 +02:00
2011-06-23 16:38:21 +03:00
if ( dssdev - > manager = = NULL ) {
DSSERR ( " failed to enable display: no manager \n " ) ;
r = - ENODEV ;
goto err_start_dev ;
}
2009-10-28 11:59:56 +02:00
r = omap_dss_start_device ( dssdev ) ;
if ( r ) {
DSSERR ( " failed to start device \n " ) ;
2011-05-27 10:52:19 +03:00
goto err_start_dev ;
2009-10-28 11:59:56 +02:00
}
2011-05-27 10:52:19 +03:00
r = dsi_runtime_get ( dsidev ) ;
2009-10-28 11:59:56 +02:00
if ( r )
2011-05-27 10:52:19 +03:00
goto err_get_dsi ;
dsi_enable_pll_clock ( dsidev , 1 ) ;
2009-10-28 11:59:56 +02:00
2011-05-27 10:52:19 +03:00
_dsi_initialize_irq ( dsidev ) ;
2009-10-28 11:59:56 +02:00
r = dsi_display_init_dispc ( dssdev ) ;
if ( r )
2011-05-27 10:52:19 +03:00
goto err_init_dispc ;
2009-10-28 11:59:56 +02:00
r = dsi_display_init_dsi ( dssdev ) ;
if ( r )
2011-05-27 10:52:19 +03:00
goto err_init_dsi ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:27 +05:30
mutex_unlock ( & dsi - > lock ) ;
2009-10-28 11:59:56 +02:00
return 0 ;
2011-05-27 10:52:19 +03:00
err_init_dsi :
2010-01-12 15:12:07 +02:00
dsi_display_uninit_dispc ( dssdev ) ;
2011-05-27 10:52:19 +03:00
err_init_dispc :
2011-05-12 17:26:26 +05:30
dsi_enable_pll_clock ( dsidev , 0 ) ;
2011-05-27 10:52:19 +03:00
dsi_runtime_put ( dsidev ) ;
err_get_dsi :
2009-10-28 11:59:56 +02:00
omap_dss_stop_device ( dssdev ) ;
2011-05-27 10:52:19 +03:00
err_start_dev :
2011-05-12 17:26:27 +05:30
mutex_unlock ( & dsi - > lock ) ;
2009-10-28 11:59:56 +02:00
DSSDBG ( " dsi_display_enable FAILED \n " ) ;
return r ;
}
2010-01-12 15:12:07 +02:00
EXPORT_SYMBOL ( omapdss_dsi_display_enable ) ;
2009-10-28 11:59:56 +02:00
2010-07-30 12:39:34 +03:00
void omapdss_dsi_display_disable ( struct omap_dss_device * dssdev ,
2010-10-11 11:33:30 +03:00
bool disconnect_lanes , bool enter_ulps )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:26 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2011-05-12 17:26:26 +05:30
2009-10-28 11:59:56 +02:00
DSSDBG ( " dsi_display_disable \n " ) ;
2011-05-12 17:26:26 +05:30
WARN_ON ( ! dsi_bus_is_locked ( dsidev ) ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:27 +05:30
mutex_lock ( & dsi - > lock ) ;
2009-10-28 11:59:56 +02:00
2011-06-16 14:34:06 +03:00
dsi_sync_vc ( dsidev , 0 ) ;
dsi_sync_vc ( dsidev , 1 ) ;
dsi_sync_vc ( dsidev , 2 ) ;
dsi_sync_vc ( dsidev , 3 ) ;
2009-10-28 11:59:56 +02:00
dsi_display_uninit_dispc ( dssdev ) ;
2010-10-11 11:33:30 +03:00
dsi_display_uninit_dsi ( dssdev , disconnect_lanes , enter_ulps ) ;
2009-10-28 11:59:56 +02:00
2011-05-27 10:52:19 +03:00
dsi_runtime_put ( dsidev ) ;
2011-05-12 17:26:26 +05:30
dsi_enable_pll_clock ( dsidev , 0 ) ;
2009-10-28 11:59:56 +02:00
2010-01-12 15:12:07 +02:00
omap_dss_stop_device ( dssdev ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:27 +05:30
mutex_unlock ( & dsi - > lock ) ;
2009-10-28 11:59:56 +02:00
}
2010-01-12 15:12:07 +02:00
EXPORT_SYMBOL ( omapdss_dsi_display_disable ) ;
2009-10-28 11:59:56 +02:00
2010-01-11 15:11:01 +02:00
int omapdss_dsi_enable_te ( struct omap_dss_device * dssdev , bool enable )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:27 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
dsi - > te_enabled = enable ;
2010-01-11 15:11:01 +02:00
return 0 ;
2009-10-28 11:59:56 +02:00
}
2010-01-11 15:11:01 +02:00
EXPORT_SYMBOL ( omapdss_dsi_enable_te ) ;
2009-10-28 11:59:56 +02:00
void dsi_get_overlay_fifo_thresholds ( enum omap_plane plane ,
2011-06-21 09:35:36 +03:00
u32 fifo_size , u32 burst_size ,
2009-10-28 11:59:56 +02:00
u32 * fifo_low , u32 * fifo_high )
{
2011-06-21 09:35:36 +03:00
* fifo_high = fifo_size - burst_size ;
* fifo_low = fifo_size - burst_size * 2 ;
2009-10-28 11:59:56 +02:00
}
int dsi_init_display ( struct omap_dss_device * dssdev )
{
2011-05-12 17:26:27 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2011-05-16 15:17:08 +05:30
int dsi_module = dsi_get_dsidev_id ( dsidev ) ;
2011-05-12 17:26:27 +05:30
2009-10-28 11:59:56 +02:00
DSSDBG ( " DSI init \n " ) ;
2011-07-22 12:45:04 +05:30
if ( dssdev - > panel . dsi_mode = = OMAP_DSS_DSI_CMD_MODE ) {
dssdev - > caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
OMAP_DSS_DISPLAY_CAP_TEAR_ELIM ;
}
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:27 +05:30
if ( dsi - > vdds_dsi_reg = = NULL ) {
2011-02-22 15:53:46 +02:00
struct regulator * vdds_dsi ;
2011-05-12 17:26:27 +05:30
vdds_dsi = regulator_get ( & dsi - > pdev - > dev , " vdds_dsi " ) ;
2011-02-22 15:53:46 +02:00
if ( IS_ERR ( vdds_dsi ) ) {
DSSERR ( " can't get VDDS_DSI regulator \n " ) ;
return PTR_ERR ( vdds_dsi ) ;
}
2011-05-12 17:26:27 +05:30
dsi - > vdds_dsi_reg = vdds_dsi ;
2011-02-22 15:53:46 +02:00
}
2011-05-16 15:17:08 +05:30
if ( dsi_get_num_data_lanes_dssdev ( dssdev ) > dsi - > num_data_lanes ) {
DSSERR ( " DSI%d can't support more than %d data lanes \n " ,
dsi_module + 1 , dsi - > num_data_lanes ) ;
return - EINVAL ;
}
2009-10-28 11:59:56 +02:00
return 0 ;
}
2011-03-02 12:35:53 +05:30
int omap_dsi_request_vc ( struct omap_dss_device * dssdev , int * channel )
{
2011-05-12 17:26:27 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2011-03-02 12:35:53 +05:30
int i ;
2011-05-12 17:26:27 +05:30
for ( i = 0 ; i < ARRAY_SIZE ( dsi - > vc ) ; i + + ) {
if ( ! dsi - > vc [ i ] . dssdev ) {
dsi - > vc [ i ] . dssdev = dssdev ;
2011-03-02 12:35:53 +05:30
* channel = i ;
return 0 ;
}
}
DSSERR ( " cannot get VC for display %s " , dssdev - > name ) ;
return - ENOSPC ;
}
EXPORT_SYMBOL ( omap_dsi_request_vc ) ;
int omap_dsi_set_vc_id ( struct omap_dss_device * dssdev , int channel , int vc_id )
{
2011-05-12 17:26:27 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2011-03-02 12:35:53 +05:30
if ( vc_id < 0 | | vc_id > 3 ) {
DSSERR ( " VC ID out of range \n " ) ;
return - EINVAL ;
}
if ( channel < 0 | | channel > 3 ) {
DSSERR ( " Virtual Channel out of range \n " ) ;
return - EINVAL ;
}
2011-05-12 17:26:27 +05:30
if ( dsi - > vc [ channel ] . dssdev ! = dssdev ) {
2011-03-02 12:35:53 +05:30
DSSERR ( " Virtual Channel not allocated to display %s \n " ,
dssdev - > name ) ;
return - EINVAL ;
}
2011-05-12 17:26:27 +05:30
dsi - > vc [ channel ] . vc_id = vc_id ;
2011-03-02 12:35:53 +05:30
return 0 ;
}
EXPORT_SYMBOL ( omap_dsi_set_vc_id ) ;
void omap_dsi_release_vc ( struct omap_dss_device * dssdev , int channel )
{
2011-05-12 17:26:27 +05:30
struct platform_device * dsidev = dsi_get_dsidev_from_dssdev ( dssdev ) ;
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2011-03-02 12:35:53 +05:30
if ( ( channel > = 0 & & channel < = 3 ) & &
2011-05-12 17:26:27 +05:30
dsi - > vc [ channel ] . dssdev = = dssdev ) {
dsi - > vc [ channel ] . dssdev = NULL ;
dsi - > vc [ channel ] . vc_id = 0 ;
2011-03-02 12:35:53 +05:30
}
}
EXPORT_SYMBOL ( omap_dsi_release_vc ) ;
2011-05-12 17:26:26 +05:30
void dsi_wait_pll_hsdiv_dispc_active ( struct platform_device * dsidev )
2010-06-09 15:28:12 +03:00
{
2011-05-12 17:26:26 +05:30
if ( wait_for_bit_change ( dsidev , DSI_PLL_STATUS , 7 , 1 ) ! = 1 )
2011-03-02 11:57:25 +05:30
DSSERR ( " %s (%s) not active \n " ,
2011-04-12 13:52:23 +05:30
dss_get_generic_clk_source_name ( OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC ) ,
dss_feat_get_clk_source_name ( OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC ) ) ;
2010-06-09 15:28:12 +03:00
}
2011-05-12 17:26:26 +05:30
void dsi_wait_pll_hsdiv_dsi_active ( struct platform_device * dsidev )
2010-06-09 15:28:12 +03:00
{
2011-05-12 17:26:26 +05:30
if ( wait_for_bit_change ( dsidev , DSI_PLL_STATUS , 8 , 1 ) ! = 1 )
2011-03-02 11:57:25 +05:30
DSSERR ( " %s (%s) not active \n " ,
2011-04-12 13:52:23 +05:30
dss_get_generic_clk_source_name ( OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI ) ,
dss_feat_get_clk_source_name ( OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI ) ) ;
2010-06-09 15:28:12 +03:00
}
2011-05-12 17:26:26 +05:30
static void dsi_calc_clock_param_ranges ( struct platform_device * dsidev )
2011-03-14 23:28:23 -05:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
dsi - > regn_max = dss_feat_get_param_max ( FEAT_PARAM_DSIPLL_REGN ) ;
dsi - > regm_max = dss_feat_get_param_max ( FEAT_PARAM_DSIPLL_REGM ) ;
dsi - > regm_dispc_max =
dss_feat_get_param_max ( FEAT_PARAM_DSIPLL_REGM_DISPC ) ;
dsi - > regm_dsi_max = dss_feat_get_param_max ( FEAT_PARAM_DSIPLL_REGM_DSI ) ;
dsi - > fint_min = dss_feat_get_param_min ( FEAT_PARAM_DSIPLL_FINT ) ;
dsi - > fint_max = dss_feat_get_param_max ( FEAT_PARAM_DSIPLL_FINT ) ;
dsi - > lpdiv_max = dss_feat_get_param_max ( FEAT_PARAM_DSIPLL_LPDIV ) ;
2011-03-14 23:28:23 -05:00
}
2011-05-27 10:52:19 +03:00
static int dsi_get_clocks ( struct platform_device * dsidev )
{
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
struct clk * clk ;
clk = clk_get ( & dsidev - > dev , " fck " ) ;
if ( IS_ERR ( clk ) ) {
DSSERR ( " can't get fck \n " ) ;
return PTR_ERR ( clk ) ;
}
dsi - > dss_clk = clk ;
2011-08-04 11:22:54 +03:00
clk = clk_get ( & dsidev - > dev , " sys_clk " ) ;
2011-05-27 10:52:19 +03:00
if ( IS_ERR ( clk ) ) {
DSSERR ( " can't get sys_clk \n " ) ;
clk_put ( dsi - > dss_clk ) ;
dsi - > dss_clk = NULL ;
return PTR_ERR ( clk ) ;
}
dsi - > sys_clk = clk ;
return 0 ;
}
static void dsi_put_clocks ( struct platform_device * dsidev )
{
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
if ( dsi - > dss_clk )
clk_put ( dsi - > dss_clk ) ;
if ( dsi - > sys_clk )
clk_put ( dsi - > sys_clk ) ;
}
2011-05-16 13:52:51 +03:00
/* DSI1 HW IP initialisation */
2011-08-03 14:00:57 +03:00
static int omap_dsihw_probe ( struct platform_device * dsidev )
2009-10-28 11:59:56 +02:00
{
2010-07-30 11:57:57 +03:00
struct omap_display_platform_data * dss_plat_data ;
struct omap_dss_board_info * board_info ;
2009-10-28 11:59:56 +02:00
u32 rev ;
2011-05-12 17:26:27 +05:30
int r , i , dsi_module = dsi_get_dsidev_id ( dsidev ) ;
2011-01-24 06:22:04 +00:00
struct resource * dsi_mem ;
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi ;
dsi = kzalloc ( sizeof ( * dsi ) , GFP_KERNEL ) ;
if ( ! dsi ) {
r = - ENOMEM ;
2011-05-27 10:52:19 +03:00
goto err_alloc ;
2011-05-12 17:26:27 +05:30
}
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:27 +05:30
dsi - > pdev = dsidev ;
dsi_pdev_map [ dsi_module ] = dsidev ;
dev_set_drvdata ( & dsidev - > dev , dsi ) ;
2011-05-12 17:26:26 +05:30
dss_plat_data = dsidev - > dev . platform_data ;
2010-07-30 11:57:57 +03:00
board_info = dss_plat_data - > board_data ;
2011-06-15 15:21:12 +03:00
dsi - > enable_pads = board_info - > dsi_enable_pads ;
dsi - > disable_pads = board_info - > dsi_disable_pads ;
2010-07-30 11:57:57 +03:00
2011-05-12 17:26:27 +05:30
spin_lock_init ( & dsi - > irq_lock ) ;
spin_lock_init ( & dsi - > errors_lock ) ;
dsi - > errors = 0 ;
2009-10-28 11:59:56 +02:00
2009-12-17 14:35:21 +02:00
# ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
2011-05-12 17:26:27 +05:30
spin_lock_init ( & dsi - > irq_stats_lock ) ;
dsi - > irq_stats . last_reset = jiffies ;
2009-12-17 14:35:21 +02:00
# endif
2011-05-12 17:26:27 +05:30
mutex_init ( & dsi - > lock ) ;
sema_init ( & dsi - > bus_lock , 1 ) ;
2009-10-28 11:59:56 +02:00
2011-05-27 10:52:19 +03:00
r = dsi_get_clocks ( dsidev ) ;
if ( r )
goto err_get_clk ;
pm_runtime_enable ( & dsidev - > dev ) ;
2011-05-12 17:26:27 +05:30
INIT_DELAYED_WORK_DEFERRABLE ( & dsi - > framedone_timeout_work ,
2010-01-12 14:16:41 +02:00
dsi_framedone_timeout_work_callback ) ;
2009-10-28 11:59:56 +02:00
# ifdef DSI_CATCH_MISSING_TE
2011-05-12 17:26:27 +05:30
init_timer ( & dsi - > te_timer ) ;
dsi - > te_timer . function = dsi_te_timeout ;
dsi - > te_timer . data = 0 ;
2009-10-28 11:59:56 +02:00
# endif
2011-05-12 17:26:27 +05:30
dsi_mem = platform_get_resource ( dsi - > pdev , IORESOURCE_MEM , 0 ) ;
2011-01-24 06:22:04 +00:00
if ( ! dsi_mem ) {
DSSERR ( " can't get IORESOURCE_MEM DSI \n " ) ;
r = - EINVAL ;
2011-05-27 10:52:19 +03:00
goto err_ioremap ;
2011-01-24 06:22:04 +00:00
}
2011-05-12 17:26:27 +05:30
dsi - > base = ioremap ( dsi_mem - > start , resource_size ( dsi_mem ) ) ;
if ( ! dsi - > base ) {
2009-10-28 11:59:56 +02:00
DSSERR ( " can't ioremap DSI \n " ) ;
r = - ENOMEM ;
2011-05-27 10:52:19 +03:00
goto err_ioremap ;
2009-10-28 11:59:56 +02:00
}
2011-05-12 17:26:27 +05:30
dsi - > irq = platform_get_irq ( dsi - > pdev , 0 ) ;
if ( dsi - > irq < 0 ) {
2011-02-23 08:41:03 +00:00
DSSERR ( " platform_get_irq failed \n " ) ;
r = - ENODEV ;
2011-05-27 10:52:19 +03:00
goto err_get_irq ;
2011-02-23 08:41:03 +00:00
}
2011-05-12 17:26:27 +05:30
r = request_irq ( dsi - > irq , omap_dsi_irq_handler , IRQF_SHARED ,
dev_name ( & dsidev - > dev ) , dsi - > pdev ) ;
2011-02-23 08:41:03 +00:00
if ( r < 0 ) {
DSSERR ( " request_irq failed \n " ) ;
2011-05-27 10:52:19 +03:00
goto err_get_irq ;
2011-02-23 08:41:03 +00:00
}
2009-10-28 11:59:56 +02:00
2011-03-02 12:35:53 +05:30
/* DSI VCs initialization */
2011-05-12 17:26:27 +05:30
for ( i = 0 ; i < ARRAY_SIZE ( dsi - > vc ) ; i + + ) {
2011-08-22 11:58:08 +05:30
dsi - > vc [ i ] . source = DSI_VC_SOURCE_L4 ;
2011-05-12 17:26:27 +05:30
dsi - > vc [ i ] . dssdev = NULL ;
dsi - > vc [ i ] . vc_id = 0 ;
2011-03-02 12:35:53 +05:30
}
2011-05-12 17:26:26 +05:30
dsi_calc_clock_param_ranges ( dsidev ) ;
2011-03-14 23:28:23 -05:00
2011-05-27 10:52:19 +03:00
r = dsi_runtime_get ( dsidev ) ;
if ( r )
goto err_get_dsi ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:26 +05:30
rev = dsi_read_reg ( dsidev , DSI_REVISION ) ;
dev_dbg ( & dsidev - > dev , " OMAP DSI rev %d.%d \n " ,
2009-10-28 11:59:56 +02:00
FLD_GET ( rev , 7 , 4 ) , FLD_GET ( rev , 3 , 0 ) ) ;
2011-05-16 15:17:08 +05:30
dsi - > num_data_lanes = dsi_get_num_data_lanes ( dsidev ) ;
2011-05-27 10:52:19 +03:00
dsi_runtime_put ( dsidev ) ;
2009-10-28 11:59:56 +02:00
return 0 ;
2011-05-27 10:52:19 +03:00
err_get_dsi :
free_irq ( dsi - > irq , dsi - > pdev ) ;
err_get_irq :
2011-05-16 15:17:07 +05:30
iounmap ( dsi - > base ) ;
2011-05-27 10:52:19 +03:00
err_ioremap :
pm_runtime_disable ( & dsidev - > dev ) ;
err_get_clk :
2011-05-12 17:26:27 +05:30
kfree ( dsi ) ;
2011-05-27 10:52:19 +03:00
err_alloc :
2009-10-28 11:59:56 +02:00
return r ;
}
2011-08-03 14:00:57 +03:00
static int omap_dsihw_remove ( struct platform_device * dsidev )
2009-10-28 11:59:56 +02:00
{
2011-05-12 17:26:27 +05:30
struct dsi_data * dsi = dsi_get_dsidrv_data ( dsidev ) ;
2011-05-16 13:52:51 +03:00
WARN_ON ( dsi - > scp_clk_refcount > 0 ) ;
2011-05-27 10:52:19 +03:00
pm_runtime_disable ( & dsidev - > dev ) ;
dsi_put_clocks ( dsidev ) ;
2011-05-12 17:26:27 +05:30
if ( dsi - > vdds_dsi_reg ! = NULL ) {
if ( dsi - > vdds_dsi_enabled ) {
regulator_disable ( dsi - > vdds_dsi_reg ) ;
dsi - > vdds_dsi_enabled = false ;
2010-12-20 16:26:22 +02:00
}
2011-05-12 17:26:27 +05:30
regulator_put ( dsi - > vdds_dsi_reg ) ;
dsi - > vdds_dsi_reg = NULL ;
2011-01-24 06:22:02 +00:00
}
2011-05-12 17:26:27 +05:30
free_irq ( dsi - > irq , dsi - > pdev ) ;
iounmap ( dsi - > base ) ;
2009-10-28 11:59:56 +02:00
2011-05-12 17:26:27 +05:30
kfree ( dsi ) ;
OMAP: DSS2: DSI: use a private workqueue
Using the shared workqueue led to to a deadlock in the case where the
display was unblanked via keyboard.
What happens is something like this:
- User presses a key
context 1:
- drivers/char/keyboard.c calls schedule_console_callback()
- fb_unblank takes the console semaphore
- dsi bus lock is taken, and frame transfer is started (dsi bus lock is
left on)
- Unblank code tries to set the panel backlight, which tries to take dsi
bus lock, but is blocked while the frame transfer is going on
context 2, shared workqueue, console_callback in drivers/char/vt.c:
- Tries to take console semaphore
- Blocks, as console semaphore is being held by context 1
- No other shared workqueue work can be run
context 3, HW irq, caused by FRAMEDONE interrupt:
- Interrupt handler schedules framedone-work in shared workqueue
- Framedone-work is never ran, as the shared workqueue is blocked. This
means that the unblank thread stays blocked, which means that context 2
stays blocked.
While I think the real problem is in keyboard/virtual terminal code, using
a private workqueue in the DSI driver is perhaps safer and more robust
than using the shared one. The DSI works should not be delayed more than a
millisecond or so, and even if the private workqueue gives us no hard
promise of doing so, it's still safer bet than the shared workqueue.
Signed-off-by: Tomi Valkeinen <tomi.valkeinen@nokia.com>
2010-04-12 09:57:19 +03:00
2011-01-24 06:22:02 +00:00
return 0 ;
}
2011-05-27 10:52:19 +03:00
static int dsi_runtime_suspend ( struct device * dev )
{
dispc_runtime_put ( ) ;
dss_runtime_put ( ) ;
return 0 ;
}
static int dsi_runtime_resume ( struct device * dev )
{
int r ;
r = dss_runtime_get ( ) ;
if ( r )
goto err_get_dss ;
r = dispc_runtime_get ( ) ;
if ( r )
goto err_get_dispc ;
return 0 ;
err_get_dispc :
dss_runtime_put ( ) ;
err_get_dss :
return r ;
}
static const struct dev_pm_ops dsi_pm_ops = {
. runtime_suspend = dsi_runtime_suspend ,
. runtime_resume = dsi_runtime_resume ,
} ;
2011-08-03 14:00:57 +03:00
static struct platform_driver omap_dsihw_driver = {
. probe = omap_dsihw_probe ,
. remove = omap_dsihw_remove ,
2011-01-24 06:22:02 +00:00
. driver = {
2011-08-03 14:00:57 +03:00
. name = " omapdss_dsi " ,
2011-01-24 06:22:02 +00:00
. owner = THIS_MODULE ,
2011-05-27 10:52:19 +03:00
. pm = & dsi_pm_ops ,
2011-01-24 06:22:02 +00:00
} ,
} ;
int dsi_init_platform_driver ( void )
{
2011-08-03 14:00:57 +03:00
return platform_driver_register ( & omap_dsihw_driver ) ;
2011-01-24 06:22:02 +00:00
}
void dsi_uninit_platform_driver ( void )
{
2011-08-03 14:00:57 +03:00
return platform_driver_unregister ( & omap_dsihw_driver ) ;
2011-01-24 06:22:02 +00:00
}