2017-11-06 18:11:51 +01:00
// SPDX-License-Identifier: GPL-2.0
2015-06-10 21:19:36 +02:00
/*
* Copyright ( C ) Maxime Coquelin 2015
2017-07-13 15:08:26 +00:00
* Copyright ( C ) STMicroelectronics SA 2017
2016-09-15 18:42:33 +02:00
* Authors : Maxime Coquelin < mcoquelin . stm32 @ gmail . com >
2021-01-06 17:21:59 +01:00
* Gerald Baeza < gerald . baeza @ foss . st . com >
* Erwan Le Ray < erwan . leray @ foss . st . com >
2015-06-10 21:19:36 +02:00
*
* Inspired by st - asc . c from STMicroelectronics ( c )
*/
2016-09-15 18:42:40 +02:00
# include <linux/clk.h>
2015-06-10 21:19:36 +02:00
# include <linux/console.h>
2016-09-15 18:42:40 +02:00
# include <linux/delay.h>
# include <linux/dma-direction.h>
# include <linux/dmaengine.h>
# include <linux/dma-mapping.h>
2015-06-10 21:19:36 +02:00
# include <linux/io.h>
2016-09-15 18:42:40 +02:00
# include <linux/iopoll.h>
2015-06-10 21:19:36 +02:00
# include <linux/irq.h>
2016-09-15 18:42:40 +02:00
# include <linux/module.h>
2015-06-10 21:19:36 +02:00
# include <linux/of.h>
# include <linux/of_platform.h>
2019-06-13 15:49:53 +02:00
# include <linux/pinctrl/consumer.h>
2016-09-15 18:42:40 +02:00
# include <linux/platform_device.h>
# include <linux/pm_runtime.h>
2017-07-13 15:08:30 +00:00
# include <linux/pm_wakeirq.h>
2015-06-10 21:19:36 +02:00
# include <linux/serial_core.h>
2016-09-15 18:42:40 +02:00
# include <linux/serial.h>
# include <linux/spinlock.h>
# include <linux/sysrq.h>
# include <linux/tty_flip.h>
# include <linux/tty.h>
2015-06-10 21:19:36 +02:00
2020-04-20 22:32:04 +05:30
# include "serial_mctrl_gpio.h"
2016-09-15 18:42:35 +02:00
# include "stm32-usart.h"
2015-06-10 21:19:36 +02:00
2021-01-06 17:21:58 +01:00
static void stm32_usart_stop_tx ( struct uart_port * port ) ;
static void stm32_usart_transmit_chars ( struct uart_port * port ) ;
2015-06-10 21:19:36 +02:00
static inline struct stm32_port * to_stm32_port ( struct uart_port * port )
{
return container_of ( port , struct stm32_port , port ) ;
}
2021-01-06 17:21:58 +01:00
static void stm32_usart_set_bits ( struct uart_port * port , u32 reg , u32 bits )
2015-06-10 21:19:36 +02:00
{
u32 val ;
val = readl_relaxed ( port - > membase + reg ) ;
val | = bits ;
writel_relaxed ( val , port - > membase + reg ) ;
}
2021-01-06 17:21:58 +01:00
static void stm32_usart_clr_bits ( struct uart_port * port , u32 reg , u32 bits )
2015-06-10 21:19:36 +02:00
{
u32 val ;
val = readl_relaxed ( port - > membase + reg ) ;
val & = ~ bits ;
writel_relaxed ( val , port - > membase + reg ) ;
}
2021-01-06 17:21:58 +01:00
static void stm32_usart_config_reg_rs485 ( u32 * cr1 , u32 * cr3 , u32 delay_ADE ,
u32 delay_DDE , u32 baud )
2018-03-12 09:50:05 +00:00
{
u32 rs485_deat_dedt ;
u32 rs485_deat_dedt_max = ( USART_CR1_DEAT_MASK > > USART_CR1_DEAT_SHIFT ) ;
bool over8 ;
* cr3 | = USART_CR3_DEM ;
over8 = * cr1 & USART_CR1_OVER8 ;
if ( over8 )
rs485_deat_dedt = delay_ADE * baud * 8 ;
else
rs485_deat_dedt = delay_ADE * baud * 16 ;
rs485_deat_dedt = DIV_ROUND_CLOSEST ( rs485_deat_dedt , 1000 ) ;
rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ?
rs485_deat_dedt_max : rs485_deat_dedt ;
rs485_deat_dedt = ( rs485_deat_dedt < < USART_CR1_DEAT_SHIFT ) &
USART_CR1_DEAT_MASK ;
* cr1 | = rs485_deat_dedt ;
if ( over8 )
rs485_deat_dedt = delay_DDE * baud * 8 ;
else
rs485_deat_dedt = delay_DDE * baud * 16 ;
rs485_deat_dedt = DIV_ROUND_CLOSEST ( rs485_deat_dedt , 1000 ) ;
rs485_deat_dedt = rs485_deat_dedt > rs485_deat_dedt_max ?
rs485_deat_dedt_max : rs485_deat_dedt ;
rs485_deat_dedt = ( rs485_deat_dedt < < USART_CR1_DEDT_SHIFT ) &
USART_CR1_DEDT_MASK ;
* cr1 | = rs485_deat_dedt ;
}
2021-01-06 17:21:58 +01:00
static int stm32_usart_config_rs485 ( struct uart_port * port ,
struct serial_rs485 * rs485conf )
2018-03-12 09:50:05 +00:00
{
struct stm32_port * stm32_port = to_stm32_port ( port ) ;
2021-01-22 19:44:25 -08:00
const struct stm32_usart_offsets * ofs = & stm32_port - > info - > ofs ;
const struct stm32_usart_config * cfg = & stm32_port - > info - > cfg ;
2018-03-12 09:50:05 +00:00
u32 usartdiv , baud , cr1 , cr3 ;
bool over8 ;
2021-01-06 17:21:58 +01:00
stm32_usart_clr_bits ( port , ofs - > cr1 , BIT ( cfg - > uart_enable_bit ) ) ;
2018-03-12 09:50:05 +00:00
port - > rs485 = * rs485conf ;
rs485conf - > flags | = SER_RS485_RX_DURING_TX ;
if ( rs485conf - > flags & SER_RS485_ENABLED ) {
cr1 = readl_relaxed ( port - > membase + ofs - > cr1 ) ;
cr3 = readl_relaxed ( port - > membase + ofs - > cr3 ) ;
usartdiv = readl_relaxed ( port - > membase + ofs - > brr ) ;
usartdiv = usartdiv & GENMASK ( 15 , 0 ) ;
over8 = cr1 & USART_CR1_OVER8 ;
if ( over8 )
usartdiv = usartdiv | ( usartdiv & GENMASK ( 4 , 0 ) )
< < USART_BRR_04_R_SHIFT ;
baud = DIV_ROUND_CLOSEST ( port - > uartclk , usartdiv ) ;
2021-01-06 17:21:58 +01:00
stm32_usart_config_reg_rs485 ( & cr1 , & cr3 ,
rs485conf - > delay_rts_before_send ,
rs485conf - > delay_rts_after_send ,
baud ) ;
2018-03-12 09:50:05 +00:00
if ( rs485conf - > flags & SER_RS485_RTS_ON_SEND ) {
cr3 & = ~ USART_CR3_DEP ;
rs485conf - > flags & = ~ SER_RS485_RTS_AFTER_SEND ;
} else {
cr3 | = USART_CR3_DEP ;
rs485conf - > flags | = SER_RS485_RTS_AFTER_SEND ;
}
writel_relaxed ( cr3 , port - > membase + ofs - > cr3 ) ;
writel_relaxed ( cr1 , port - > membase + ofs - > cr1 ) ;
} else {
2021-01-06 17:21:58 +01:00
stm32_usart_clr_bits ( port , ofs - > cr3 ,
USART_CR3_DEM | USART_CR3_DEP ) ;
stm32_usart_clr_bits ( port , ofs - > cr1 ,
USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK ) ;
2018-03-12 09:50:05 +00:00
}
2021-01-06 17:21:58 +01:00
stm32_usart_set_bits ( port , ofs - > cr1 , BIT ( cfg - > uart_enable_bit ) ) ;
2018-03-12 09:50:05 +00:00
return 0 ;
}
2021-01-06 17:21:58 +01:00
static int stm32_usart_init_rs485 ( struct uart_port * port ,
struct platform_device * pdev )
2018-03-12 09:50:05 +00:00
{
struct serial_rs485 * rs485conf = & port - > rs485 ;
rs485conf - > flags = 0 ;
rs485conf - > delay_rts_before_send = 0 ;
rs485conf - > delay_rts_after_send = 0 ;
if ( ! pdev - > dev . of_node )
return - ENODEV ;
2020-05-12 14:40:02 +02:00
return uart_get_rs485_mode ( port ) ;
2018-03-12 09:50:05 +00:00
}
2021-01-06 17:21:58 +01:00
static int stm32_usart_pending_rx ( struct uart_port * port , u32 * sr ,
int * last_res , bool threaded )
2016-09-15 18:42:40 +02:00
{
struct stm32_port * stm32_port = to_stm32_port ( port ) ;
2021-01-22 19:44:25 -08:00
const struct stm32_usart_offsets * ofs = & stm32_port - > info - > ofs ;
2016-09-15 18:42:40 +02:00
enum dma_status status ;
struct dma_tx_state state ;
* sr = readl_relaxed ( port - > membase + ofs - > isr ) ;
if ( threaded & & stm32_port - > rx_ch ) {
status = dmaengine_tx_status ( stm32_port - > rx_ch ,
stm32_port - > rx_ch - > cookie ,
& state ) ;
2021-01-06 17:21:57 +01:00
if ( status = = DMA_IN_PROGRESS & & ( * last_res ! = state . residue ) )
2016-09-15 18:42:40 +02:00
return 1 ;
else
return 0 ;
} else if ( * sr & USART_SR_RXNE ) {
return 1 ;
}
return 0 ;
}
2021-01-06 17:21:58 +01:00
static unsigned long stm32_usart_get_char ( struct uart_port * port , u32 * sr ,
int * last_res )
2016-09-15 18:42:40 +02:00
{
struct stm32_port * stm32_port = to_stm32_port ( port ) ;
2021-01-22 19:44:25 -08:00
const struct stm32_usart_offsets * ofs = & stm32_port - > info - > ofs ;
2016-09-15 18:42:40 +02:00
unsigned long c ;
if ( stm32_port - > rx_ch ) {
c = stm32_port - > rx_buf [ RX_BUF_L - ( * last_res ) - - ] ;
if ( ( * last_res ) = = 0 )
* last_res = RX_BUF_L ;
} else {
2019-05-21 17:45:43 +02:00
c = readl_relaxed ( port - > membase + ofs - > rdr ) ;
/* apply RDR data mask */
c & = stm32_port - > rdr_mask ;
2016-09-15 18:42:40 +02:00
}
2019-05-21 17:45:43 +02:00
return c ;
2016-09-15 18:42:40 +02:00
}
2021-01-06 17:21:58 +01:00
static void stm32_usart_receive_chars ( struct uart_port * port , bool threaded )
2015-06-10 21:19:36 +02:00
{
struct tty_port * tport = & port - > state - > port ;
2016-09-15 18:42:33 +02:00
struct stm32_port * stm32_port = to_stm32_port ( port ) ;
2021-01-22 19:44:25 -08:00
const struct stm32_usart_offsets * ofs = & stm32_port - > info - > ofs ;
2021-04-16 16:05:56 +02:00
unsigned long c ;
2015-06-10 21:19:36 +02:00
u32 sr ;
char flag ;
2021-04-16 16:05:56 +02:00
spin_lock ( & port - > lock ) ;
2021-03-04 17:23:00 +01:00
2021-01-06 17:21:58 +01:00
while ( stm32_usart_pending_rx ( port , & sr , & stm32_port - > last_res ,
threaded ) ) {
2015-06-10 21:19:36 +02:00
sr | = USART_SR_DUMMY_RX ;
flag = TTY_NORMAL ;
2019-05-21 17:45:42 +02:00
/*
* Status bits has to be cleared before reading the RDR :
* In FIFO mode , reading the RDR will pop the next data
* ( if any ) along with its status bits into the SR .
* Not doing so leads to misalignement between RDR and SR ,
* and clear status bits of the next rx data .
*
* Clear errors flags for stm32f7 and stm32h7 compatible
* devices . On stm32f4 compatible devices , the error bit is
* cleared by the sequence [ read SR - read DR ] .
*/
if ( ( sr & USART_SR_ERR_MASK ) & & ofs - > icr ! = UNDEF_REG )
2019-11-21 09:10:49 +01:00
writel_relaxed ( sr & USART_SR_ERR_MASK ,
port - > membase + ofs - > icr ) ;
2019-05-21 17:45:42 +02:00
2021-01-06 17:21:58 +01:00
c = stm32_usart_get_char ( port , & sr , & stm32_port - > last_res ) ;
2019-05-21 17:45:42 +02:00
port - > icount . rx + + ;
2015-06-10 21:19:36 +02:00
if ( sr & USART_SR_ERR_MASK ) {
2019-05-21 17:45:42 +02:00
if ( sr & USART_SR_ORE ) {
2015-06-10 21:19:36 +02:00
port - > icount . overrun + + ;
} else if ( sr & USART_SR_PE ) {
port - > icount . parity + + ;
} else if ( sr & USART_SR_FE ) {
2019-05-21 17:45:42 +02:00
/* Break detection if character is null */
if ( ! c ) {
port - > icount . brk + + ;
if ( uart_handle_break ( port ) )
continue ;
} else {
port - > icount . frame + + ;
}
2015-06-10 21:19:36 +02:00
}
sr & = port - > read_status_mask ;
2019-05-21 17:45:42 +02:00
if ( sr & USART_SR_PE ) {
2015-06-10 21:19:36 +02:00
flag = TTY_PARITY ;
2019-05-21 17:45:42 +02:00
} else if ( sr & USART_SR_FE ) {
if ( ! c )
flag = TTY_BREAK ;
else
flag = TTY_FRAME ;
}
2015-06-10 21:19:36 +02:00
}
2021-04-16 16:05:57 +02:00
if ( uart_prepare_sysrq_char ( port , c ) )
2015-06-10 21:19:36 +02:00
continue ;
uart_insert_char ( port , sr , USART_SR_ORE , c , flag ) ;
}
2021-04-16 16:05:57 +02:00
uart_unlock_and_check_sysrq ( port ) ;
2021-03-04 17:23:00 +01:00
2015-06-10 21:19:36 +02:00
tty_flip_buffer_push ( tport ) ;
}
2021-01-06 17:21:58 +01:00
static void stm32_usart_tx_dma_complete ( void * arg )
2016-09-15 18:42:40 +02:00
{
struct uart_port * port = arg ;
struct stm32_port * stm32port = to_stm32_port ( port ) ;
2021-01-22 19:44:25 -08:00
const struct stm32_usart_offsets * ofs = & stm32port - > info - > ofs ;
2021-03-04 17:23:04 +01:00
unsigned long flags ;
2016-09-15 18:42:40 +02:00
2021-03-04 17:23:03 +01:00
dmaengine_terminate_async ( stm32port - > tx_ch ) ;
2021-01-06 17:21:58 +01:00
stm32_usart_clr_bits ( port , ofs - > cr3 , USART_CR3_DMAT ) ;
2016-09-15 18:42:40 +02:00
stm32port - > tx_dma_busy = false ;
/* Let's see if we have pending data to send */
2021-03-04 17:23:04 +01:00
spin_lock_irqsave ( & port - > lock , flags ) ;
2021-01-06 17:21:58 +01:00
stm32_usart_transmit_chars ( port ) ;
2021-03-04 17:23:04 +01:00
spin_unlock_irqrestore ( & port - > lock , flags ) ;
2016-09-15 18:42:40 +02:00
}
2021-01-06 17:21:58 +01:00
static void stm32_usart_tx_interrupt_enable ( struct uart_port * port )
2019-06-18 12:02:24 +02:00
{
struct stm32_port * stm32_port = to_stm32_port ( port ) ;
2021-01-22 19:44:25 -08:00
const struct stm32_usart_offsets * ofs = & stm32_port - > info - > ofs ;
2019-06-18 12:02:24 +02:00
/*
* Enables TX FIFO threashold irq when FIFO is enabled ,
* or TX empty irq when FIFO is disabled
*/
2021-04-13 19:40:15 +02:00
if ( stm32_port - > fifoen & & stm32_port - > txftcfg > = 0 )
2021-01-06 17:21:58 +01:00
stm32_usart_set_bits ( port , ofs - > cr3 , USART_CR3_TXFTIE ) ;
2019-06-18 12:02:24 +02:00
else
2021-01-06 17:21:58 +01:00
stm32_usart_set_bits ( port , ofs - > cr1 , USART_CR1_TXEIE ) ;
2019-06-18 12:02:24 +02:00
}
2021-01-06 17:21:58 +01:00
static void stm32_usart_tx_interrupt_disable ( struct uart_port * port )
2019-06-18 12:02:24 +02:00
{
struct stm32_port * stm32_port = to_stm32_port ( port ) ;
2021-01-22 19:44:25 -08:00
const struct stm32_usart_offsets * ofs = & stm32_port - > info - > ofs ;
2019-06-18 12:02:24 +02:00
2021-04-13 19:40:15 +02:00
if ( stm32_port - > fifoen & & stm32_port - > txftcfg > = 0 )
2021-01-06 17:21:58 +01:00
stm32_usart_clr_bits ( port , ofs - > cr3 , USART_CR3_TXFTIE ) ;
2019-06-18 12:02:24 +02:00
else
2021-01-06 17:21:58 +01:00
stm32_usart_clr_bits ( port , ofs - > cr1 , USART_CR1_TXEIE ) ;
2019-06-18 12:02:24 +02:00
}
2021-01-06 17:21:58 +01:00
static void stm32_usart_transmit_chars_pio ( struct uart_port * port )
2016-09-15 18:42:40 +02:00
{
struct stm32_port * stm32_port = to_stm32_port ( port ) ;
2021-01-22 19:44:25 -08:00
const struct stm32_usart_offsets * ofs = & stm32_port - > info - > ofs ;
2016-09-15 18:42:40 +02:00
struct circ_buf * xmit = & port - > state - > xmit ;
if ( stm32_port - > tx_dma_busy ) {
2021-01-06 17:21:58 +01:00
stm32_usart_clr_bits ( port , ofs - > cr3 , USART_CR3_DMAT ) ;
2016-09-15 18:42:40 +02:00
stm32_port - > tx_dma_busy = false ;
}
2019-06-18 12:02:23 +02:00
while ( ! uart_circ_empty ( xmit ) ) {
/* Check that TDR is empty before filling FIFO */
if ( ! ( readl_relaxed ( port - > membase + ofs - > isr ) & USART_SR_TXE ) )
break ;
writel_relaxed ( xmit - > buf [ xmit - > tail ] , port - > membase + ofs - > tdr ) ;
xmit - > tail = ( xmit - > tail + 1 ) & ( UART_XMIT_SIZE - 1 ) ;
port - > icount . tx + + ;
}
2016-09-15 18:42:40 +02:00
2019-06-18 12:02:23 +02:00
/* rely on TXE irq (mask or unmask) for sending remaining data */
if ( uart_circ_empty ( xmit ) )
2021-01-06 17:21:58 +01:00
stm32_usart_tx_interrupt_disable ( port ) ;
2019-06-18 12:02:23 +02:00
else
2021-01-06 17:21:58 +01:00
stm32_usart_tx_interrupt_enable ( port ) ;
2016-09-15 18:42:40 +02:00
}
2021-01-06 17:21:58 +01:00
static void stm32_usart_transmit_chars_dma ( struct uart_port * port )
2016-09-15 18:42:40 +02:00
{
struct stm32_port * stm32port = to_stm32_port ( port ) ;
2021-01-22 19:44:25 -08:00
const struct stm32_usart_offsets * ofs = & stm32port - > info - > ofs ;
2016-09-15 18:42:40 +02:00
struct circ_buf * xmit = & port - > state - > xmit ;
struct dma_async_tx_descriptor * desc = NULL ;
unsigned int count , i ;
if ( stm32port - > tx_dma_busy )
return ;
stm32port - > tx_dma_busy = true ;
count = uart_circ_chars_pending ( xmit ) ;
if ( count > TX_BUF_L )
count = TX_BUF_L ;
if ( xmit - > tail < xmit - > head ) {
memcpy ( & stm32port - > tx_buf [ 0 ] , & xmit - > buf [ xmit - > tail ] , count ) ;
} else {
size_t one = UART_XMIT_SIZE - xmit - > tail ;
size_t two ;
if ( one > count )
one = count ;
two = count - one ;
memcpy ( & stm32port - > tx_buf [ 0 ] , & xmit - > buf [ xmit - > tail ] , one ) ;
if ( two )
memcpy ( & stm32port - > tx_buf [ one ] , & xmit - > buf [ 0 ] , two ) ;
}
desc = dmaengine_prep_slave_single ( stm32port - > tx_ch ,
stm32port - > tx_dma_buf ,
count ,
DMA_MEM_TO_DEV ,
DMA_PREP_INTERRUPT ) ;
2021-01-06 17:21:56 +01:00
if ( ! desc )
goto fallback_err ;
2016-09-15 18:42:40 +02:00
2021-01-06 17:21:58 +01:00
desc - > callback = stm32_usart_tx_dma_complete ;
2016-09-15 18:42:40 +02:00
desc - > callback_param = port ;
/* Push current DMA TX transaction in the pending queue */
2021-01-06 17:21:56 +01:00
if ( dma_submit_error ( dmaengine_submit ( desc ) ) ) {
/* dma no yet started, safe to free resources */
dmaengine_terminate_async ( stm32port - > tx_ch ) ;
goto fallback_err ;
}
2016-09-15 18:42:40 +02:00
/* Issue pending DMA TX requests */
dma_async_issue_pending ( stm32port - > tx_ch ) ;
2021-01-06 17:21:58 +01:00
stm32_usart_set_bits ( port , ofs - > cr3 , USART_CR3_DMAT ) ;
2016-09-15 18:42:40 +02:00
xmit - > tail = ( xmit - > tail + count ) & ( UART_XMIT_SIZE - 1 ) ;
port - > icount . tx + = count ;
2021-01-06 17:21:56 +01:00
return ;
fallback_err :
for ( i = count ; i > 0 ; i - - )
2021-01-06 17:21:58 +01:00
stm32_usart_transmit_chars_pio ( port ) ;
2016-09-15 18:42:40 +02:00
}
2021-01-06 17:21:58 +01:00
static void stm32_usart_transmit_chars ( struct uart_port * port )
2015-06-10 21:19:36 +02:00
{
2016-09-15 18:42:33 +02:00
struct stm32_port * stm32_port = to_stm32_port ( port ) ;
2021-01-22 19:44:25 -08:00
const struct stm32_usart_offsets * ofs = & stm32_port - > info - > ofs ;
2015-06-10 21:19:36 +02:00
struct circ_buf * xmit = & port - > state - > xmit ;
if ( port - > x_char ) {
2016-09-15 18:42:40 +02:00
if ( stm32_port - > tx_dma_busy )
2021-01-06 17:21:58 +01:00
stm32_usart_clr_bits ( port , ofs - > cr3 , USART_CR3_DMAT ) ;
2016-09-15 18:42:33 +02:00
writel_relaxed ( port - > x_char , port - > membase + ofs - > tdr ) ;
2015-06-10 21:19:36 +02:00
port - > x_char = 0 ;
port - > icount . tx + + ;
2016-09-15 18:42:40 +02:00
if ( stm32_port - > tx_dma_busy )
2021-01-06 17:21:58 +01:00
stm32_usart_set_bits ( port , ofs - > cr3 , USART_CR3_DMAT ) ;
2015-06-10 21:19:36 +02:00
return ;
}
2019-05-21 17:45:44 +02:00
if ( uart_circ_empty ( xmit ) | | uart_tx_stopped ( port ) ) {
2021-01-06 17:21:58 +01:00
stm32_usart_tx_interrupt_disable ( port ) ;
2015-06-10 21:19:36 +02:00
return ;
}
2019-05-21 17:45:45 +02:00
if ( ofs - > icr = = UNDEF_REG )
2021-01-06 17:21:58 +01:00
stm32_usart_clr_bits ( port , ofs - > isr , USART_SR_TC ) ;
2019-05-21 17:45:45 +02:00
else
2019-11-21 09:10:49 +01:00
writel_relaxed ( USART_ICR_TCCF , port - > membase + ofs - > icr ) ;
2019-05-21 17:45:45 +02:00
2016-09-15 18:42:40 +02:00
if ( stm32_port - > tx_ch )
2021-01-06 17:21:58 +01:00
stm32_usart_transmit_chars_dma ( port ) ;
2016-09-15 18:42:40 +02:00
else
2021-01-06 17:21:58 +01:00
stm32_usart_transmit_chars_pio ( port ) ;
2015-06-10 21:19:36 +02:00
if ( uart_circ_chars_pending ( xmit ) < WAKEUP_CHARS )
uart_write_wakeup ( port ) ;
if ( uart_circ_empty ( xmit ) )
2021-01-06 17:21:58 +01:00
stm32_usart_tx_interrupt_disable ( port ) ;
2015-06-10 21:19:36 +02:00
}
2021-01-06 17:21:58 +01:00
static irqreturn_t stm32_usart_interrupt ( int irq , void * ptr )
2015-06-10 21:19:36 +02:00
{
struct uart_port * port = ptr ;
2021-03-04 17:23:01 +01:00
struct tty_port * tport = & port - > state - > port ;
2016-09-15 18:42:33 +02:00
struct stm32_port * stm32_port = to_stm32_port ( port ) ;
2021-01-22 19:44:25 -08:00
const struct stm32_usart_offsets * ofs = & stm32_port - > info - > ofs ;
2015-06-10 21:19:36 +02:00
u32 sr ;
2016-09-15 18:42:33 +02:00
sr = readl_relaxed ( port - > membase + ofs - > isr ) ;
2015-06-10 21:19:36 +02:00
2019-06-18 12:02:22 +02:00
if ( ( sr & USART_SR_RTOF ) & & ofs - > icr ! = UNDEF_REG )
writel_relaxed ( USART_ICR_RTOCF ,
port - > membase + ofs - > icr ) ;
2021-03-04 17:23:01 +01:00
if ( ( sr & USART_SR_WUF ) & & ofs - > icr ! = UNDEF_REG ) {
/* Clear wake up flag and disable wake up interrupt */
2017-07-13 15:08:30 +00:00
writel_relaxed ( USART_ICR_WUCF ,
port - > membase + ofs - > icr ) ;
2021-03-04 17:23:01 +01:00
stm32_usart_clr_bits ( port , ofs - > cr3 , USART_CR3_WUFIE ) ;
if ( irqd_is_wakeup_set ( irq_get_irq_data ( port - > irq ) ) )
pm_wakeup_event ( tport - > tty - > dev , 0 ) ;
}
2017-07-13 15:08:30 +00:00
2016-09-15 18:42:40 +02:00
if ( ( sr & USART_SR_RXNE ) & & ! ( stm32_port - > rx_ch ) )
2021-01-06 17:21:58 +01:00
stm32_usart_receive_chars ( port , false ) ;
2015-06-10 21:19:36 +02:00
2021-03-04 17:23:00 +01:00
if ( ( sr & USART_SR_TXE ) & & ! ( stm32_port - > tx_ch ) ) {
spin_lock ( & port - > lock ) ;
2021-01-06 17:21:58 +01:00
stm32_usart_transmit_chars ( port ) ;
2021-03-04 17:23:00 +01:00
spin_unlock ( & port - > lock ) ;
}
2016-09-15 18:42:41 +02:00
2016-09-15 18:42:40 +02:00
if ( stm32_port - > rx_ch )
return IRQ_WAKE_THREAD ;
else
return IRQ_HANDLED ;
}
2021-01-06 17:21:58 +01:00
static irqreturn_t stm32_usart_threaded_interrupt ( int irq , void * ptr )
2016-09-15 18:42:40 +02:00
{
struct uart_port * port = ptr ;
struct stm32_port * stm32_port = to_stm32_port ( port ) ;
if ( stm32_port - > rx_ch )
2021-01-06 17:21:58 +01:00
stm32_usart_receive_chars ( port , true ) ;
2016-09-15 18:42:40 +02:00
2015-06-10 21:19:36 +02:00
return IRQ_HANDLED ;
}
2021-01-06 17:21:58 +01:00
static unsigned int stm32_usart_tx_empty ( struct uart_port * port )
2015-06-10 21:19:36 +02:00
{
2016-09-15 18:42:33 +02:00
struct stm32_port * stm32_port = to_stm32_port ( port ) ;
2021-01-22 19:44:25 -08:00
const struct stm32_usart_offsets * ofs = & stm32_port - > info - > ofs ;
2016-09-15 18:42:33 +02:00
2021-03-04 17:23:07 +01:00
if ( readl_relaxed ( port - > membase + ofs - > isr ) & USART_SR_TC )
return TIOCSER_TEMT ;
return 0 ;
2015-06-10 21:19:36 +02:00
}
2021-01-06 17:21:58 +01:00
static void stm32_usart_set_mctrl ( struct uart_port * port , unsigned int mctrl )
2015-06-10 21:19:36 +02:00
{
2016-09-15 18:42:33 +02:00
struct stm32_port * stm32_port = to_stm32_port ( port ) ;
2021-01-22 19:44:25 -08:00
const struct stm32_usart_offsets * ofs = & stm32_port - > info - > ofs ;
2016-09-15 18:42:33 +02:00
2015-06-10 21:19:36 +02:00
if ( ( mctrl & TIOCM_RTS ) & & ( port - > status & UPSTAT_AUTORTS ) )
2021-01-06 17:21:58 +01:00
stm32_usart_set_bits ( port , ofs - > cr3 , USART_CR3_RTSE ) ;
2015-06-10 21:19:36 +02:00
else
2021-01-06 17:21:58 +01:00
stm32_usart_clr_bits ( port , ofs - > cr3 , USART_CR3_RTSE ) ;
2020-04-20 22:32:04 +05:30
mctrl_gpio_set ( stm32_port - > gpios , mctrl ) ;
2015-06-10 21:19:36 +02:00
}
2021-01-06 17:21:58 +01:00
static unsigned int stm32_usart_get_mctrl ( struct uart_port * port )
2015-06-10 21:19:36 +02:00
{
2020-04-20 22:32:04 +05:30
struct stm32_port * stm32_port = to_stm32_port ( port ) ;
unsigned int ret ;
2015-06-10 21:19:36 +02:00
/* This routine is used to get signals of: DCD, DSR, RI, and CTS */
2020-04-20 22:32:04 +05:30
ret = TIOCM_CAR | TIOCM_DSR | TIOCM_CTS ;
return mctrl_gpio_get ( stm32_port - > gpios , & ret ) ;
}
2021-01-06 17:21:58 +01:00
static void stm32_usart_enable_ms ( struct uart_port * port )
2020-04-20 22:32:04 +05:30
{
mctrl_gpio_enable_ms ( to_stm32_port ( port ) - > gpios ) ;
}
2021-01-06 17:21:58 +01:00
static void stm32_usart_disable_ms ( struct uart_port * port )
2020-04-20 22:32:04 +05:30
{
mctrl_gpio_disable_ms ( to_stm32_port ( port ) - > gpios ) ;
2015-06-10 21:19:36 +02:00
}
/* Transmit stop */
2021-01-06 17:21:58 +01:00
static void stm32_usart_stop_tx ( struct uart_port * port )
2015-06-10 21:19:36 +02:00
{
2020-08-31 19:10:45 +02:00
struct stm32_port * stm32_port = to_stm32_port ( port ) ;
struct serial_rs485 * rs485conf = & port - > rs485 ;
2021-01-06 17:21:58 +01:00
stm32_usart_tx_interrupt_disable ( port ) ;
2020-08-31 19:10:45 +02:00
if ( rs485conf - > flags & SER_RS485_ENABLED ) {
if ( rs485conf - > flags & SER_RS485_RTS_ON_SEND ) {
mctrl_gpio_set ( stm32_port - > gpios ,
stm32_port - > port . mctrl & ~ TIOCM_RTS ) ;
} else {
mctrl_gpio_set ( stm32_port - > gpios ,
stm32_port - > port . mctrl | TIOCM_RTS ) ;
}
}
2015-06-10 21:19:36 +02:00
}
/* There are probably characters waiting to be transmitted. */
2021-01-06 17:21:58 +01:00
static void stm32_usart_start_tx ( struct uart_port * port )
2015-06-10 21:19:36 +02:00
{
2020-08-31 19:10:45 +02:00
struct stm32_port * stm32_port = to_stm32_port ( port ) ;
struct serial_rs485 * rs485conf = & port - > rs485 ;
2015-06-10 21:19:36 +02:00
struct circ_buf * xmit = & port - > state - > xmit ;
if ( uart_circ_empty ( xmit ) )
return ;
2020-08-31 19:10:45 +02:00
if ( rs485conf - > flags & SER_RS485_ENABLED ) {
if ( rs485conf - > flags & SER_RS485_RTS_ON_SEND ) {
mctrl_gpio_set ( stm32_port - > gpios ,
stm32_port - > port . mctrl | TIOCM_RTS ) ;
} else {
mctrl_gpio_set ( stm32_port - > gpios ,
stm32_port - > port . mctrl & ~ TIOCM_RTS ) ;
}
}
2021-01-06 17:21:58 +01:00
stm32_usart_transmit_chars ( port ) ;
2015-06-10 21:19:36 +02:00
}
2021-03-04 17:23:08 +01:00
/* Flush the transmit buffer. */
static void stm32_usart_flush_buffer ( struct uart_port * port )
{
struct stm32_port * stm32_port = to_stm32_port ( port ) ;
const struct stm32_usart_offsets * ofs = & stm32_port - > info - > ofs ;
if ( stm32_port - > tx_ch ) {
dmaengine_terminate_async ( stm32_port - > tx_ch ) ;
stm32_usart_clr_bits ( port , ofs - > cr3 , USART_CR3_DMAT ) ;
stm32_port - > tx_dma_busy = false ;
}
}
2015-06-10 21:19:36 +02:00
/* Throttle the remote when input buffer is about to overflow. */
2021-01-06 17:21:58 +01:00
static void stm32_usart_throttle ( struct uart_port * port )
2015-06-10 21:19:36 +02:00
{
2016-09-15 18:42:33 +02:00
struct stm32_port * stm32_port = to_stm32_port ( port ) ;
2021-01-22 19:44:25 -08:00
const struct stm32_usart_offsets * ofs = & stm32_port - > info - > ofs ;
2015-06-10 21:19:36 +02:00
unsigned long flags ;
spin_lock_irqsave ( & port - > lock , flags ) ;
2021-01-06 17:21:58 +01:00
stm32_usart_clr_bits ( port , ofs - > cr1 , stm32_port - > cr1_irq ) ;
2019-06-18 12:02:25 +02:00
if ( stm32_port - > cr3_irq )
2021-01-06 17:21:58 +01:00
stm32_usart_clr_bits ( port , ofs - > cr3 , stm32_port - > cr3_irq ) ;
2019-06-18 12:02:25 +02:00
2015-06-10 21:19:36 +02:00
spin_unlock_irqrestore ( & port - > lock , flags ) ;
}
/* Unthrottle the remote, the input buffer can now accept data. */
2021-01-06 17:21:58 +01:00
static void stm32_usart_unthrottle ( struct uart_port * port )
2015-06-10 21:19:36 +02:00
{
2016-09-15 18:42:33 +02:00
struct stm32_port * stm32_port = to_stm32_port ( port ) ;
2021-01-22 19:44:25 -08:00
const struct stm32_usart_offsets * ofs = & stm32_port - > info - > ofs ;
2015-06-10 21:19:36 +02:00
unsigned long flags ;
spin_lock_irqsave ( & port - > lock , flags ) ;
2021-01-06 17:21:58 +01:00
stm32_usart_set_bits ( port , ofs - > cr1 , stm32_port - > cr1_irq ) ;
2019-06-18 12:02:25 +02:00
if ( stm32_port - > cr3_irq )
2021-01-06 17:21:58 +01:00
stm32_usart_set_bits ( port , ofs - > cr3 , stm32_port - > cr3_irq ) ;
2019-06-18 12:02:25 +02:00
2015-06-10 21:19:36 +02:00
spin_unlock_irqrestore ( & port - > lock , flags ) ;
}
/* Receive stop */
2021-01-06 17:21:58 +01:00
static void stm32_usart_stop_rx ( struct uart_port * port )
2015-06-10 21:19:36 +02:00
{
2016-09-15 18:42:33 +02:00
struct stm32_port * stm32_port = to_stm32_port ( port ) ;
2021-01-22 19:44:25 -08:00
const struct stm32_usart_offsets * ofs = & stm32_port - > info - > ofs ;
2016-09-15 18:42:33 +02:00
2021-01-06 17:21:58 +01:00
stm32_usart_clr_bits ( port , ofs - > cr1 , stm32_port - > cr1_irq ) ;
2019-06-18 12:02:25 +02:00
if ( stm32_port - > cr3_irq )
2021-01-06 17:21:58 +01:00
stm32_usart_clr_bits ( port , ofs - > cr3 , stm32_port - > cr3_irq ) ;
2015-06-10 21:19:36 +02:00
}
/* Handle breaks - ignored by us */
2021-01-06 17:21:58 +01:00
static void stm32_usart_break_ctl ( struct uart_port * port , int break_state )
2015-06-10 21:19:36 +02:00
{
}
2021-01-06 17:21:58 +01:00
static int stm32_usart_startup ( struct uart_port * port )
2015-06-10 21:19:36 +02:00
{
2016-09-15 18:42:33 +02:00
struct stm32_port * stm32_port = to_stm32_port ( port ) ;
2021-01-22 19:44:25 -08:00
const struct stm32_usart_offsets * ofs = & stm32_port - > info - > ofs ;
2021-03-04 17:22:57 +01:00
const struct stm32_usart_config * cfg = & stm32_port - > info - > cfg ;
2015-06-10 21:19:36 +02:00
const char * name = to_platform_device ( port - > dev ) - > name ;
u32 val ;
int ret ;
2021-01-06 17:21:58 +01:00
ret = request_threaded_irq ( port - > irq , stm32_usart_interrupt ,
stm32_usart_threaded_interrupt ,
2021-04-16 16:05:56 +02:00
IRQF_ONESHOT | IRQF_NO_SUSPEND ,
name , port ) ;
2015-06-10 21:19:36 +02:00
if ( ret )
return ret ;
2021-03-28 17:43:06 +02:00
if ( stm32_port - > swap ) {
val = readl_relaxed ( port - > membase + ofs - > cr2 ) ;
val | = USART_CR2_SWAP ;
writel_relaxed ( val , port - > membase + ofs - > cr2 ) ;
}
2019-06-18 12:02:26 +02:00
/* RX FIFO Flush */
if ( ofs - > rqr ! = UNDEF_REG )
2021-03-04 17:23:05 +01:00
writel_relaxed ( USART_RQR_RXFRQ , port - > membase + ofs - > rqr ) ;
2015-06-10 21:19:36 +02:00
2021-03-04 17:22:59 +01:00
/* RX enabling */
2021-03-04 17:22:57 +01:00
val = stm32_port - > cr1_irq | USART_CR1_RE | BIT ( cfg - > uart_enable_bit ) ;
2021-01-06 17:21:58 +01:00
stm32_usart_set_bits ( port , ofs - > cr1 , val ) ;
2019-06-18 12:02:26 +02:00
2015-06-10 21:19:36 +02:00
return 0 ;
}
2021-01-06 17:21:58 +01:00
static void stm32_usart_shutdown ( struct uart_port * port )
2015-06-10 21:19:36 +02:00
{
2016-09-15 18:42:33 +02:00
struct stm32_port * stm32_port = to_stm32_port ( port ) ;
2021-01-22 19:44:25 -08:00
const struct stm32_usart_offsets * ofs = & stm32_port - > info - > ofs ;
const struct stm32_usart_config * cfg = & stm32_port - > info - > cfg ;
2019-05-21 17:45:45 +02:00
u32 val , isr ;
int ret ;
2015-06-10 21:19:36 +02:00
2020-04-20 22:32:04 +05:30
/* Disable modem control interrupts */
2021-01-06 17:21:58 +01:00
stm32_usart_disable_ms ( port ) ;
2020-04-20 22:32:04 +05:30
2019-06-18 12:02:22 +02:00
val = USART_CR1_TXEIE | USART_CR1_TE ;
val | = stm32_port - > cr1_irq | USART_CR1_RE ;
2016-09-15 18:42:42 +02:00
val | = BIT ( cfg - > uart_enable_bit ) ;
2017-07-13 15:08:30 +00:00
if ( stm32_port - > fifoen )
val | = USART_CR1_FIFOEN ;
2019-05-21 17:45:45 +02:00
ret = readl_relaxed_poll_timeout ( port - > membase + ofs - > isr ,
isr , ( isr & USART_SR_TC ) ,
10 , 100000 ) ;
2021-01-06 17:22:03 +01:00
/* Send the TC error message only when ISR_TC is not set */
2019-05-21 17:45:45 +02:00
if ( ret )
2021-01-06 17:22:03 +01:00
dev_err ( port - > dev , " Transmission is not complete \n " ) ;
2019-05-21 17:45:45 +02:00
2021-03-04 17:23:06 +01:00
/* flush RX & TX FIFO */
if ( ofs - > rqr ! = UNDEF_REG )
writel_relaxed ( USART_RQR_TXFRQ | USART_RQR_RXFRQ ,
port - > membase + ofs - > rqr ) ;
2021-01-06 17:21:58 +01:00
stm32_usart_clr_bits ( port , ofs - > cr1 , val ) ;
2015-06-10 21:19:36 +02:00
free_irq ( port - > irq , port ) ;
}
2021-01-06 17:21:58 +01:00
static void stm32_usart_set_termios ( struct uart_port * port ,
struct ktermios * termios ,
struct ktermios * old )
2015-06-10 21:19:36 +02:00
{
struct stm32_port * stm32_port = to_stm32_port ( port ) ;
2021-01-22 19:44:25 -08:00
const struct stm32_usart_offsets * ofs = & stm32_port - > info - > ofs ;
const struct stm32_usart_config * cfg = & stm32_port - > info - > cfg ;
2018-03-12 09:50:05 +00:00
struct serial_rs485 * rs485conf = & port - > rs485 ;
2019-05-21 17:45:41 +02:00
unsigned int baud , bits ;
2015-06-10 21:19:36 +02:00
u32 usartdiv , mantissa , fraction , oversampling ;
tcflag_t cflag = termios - > c_cflag ;
2021-03-04 17:22:58 +01:00
u32 cr1 , cr2 , cr3 , isr ;
2015-06-10 21:19:36 +02:00
unsigned long flags ;
2021-03-04 17:22:58 +01:00
int ret ;
2015-06-10 21:19:36 +02:00
if ( ! stm32_port - > hw_flow_control )
cflag & = ~ CRTSCTS ;
baud = uart_get_baud_rate ( port , termios , old , 0 , port - > uartclk / 8 ) ;
spin_lock_irqsave ( & port - > lock , flags ) ;
2021-03-04 17:22:58 +01:00
ret = readl_relaxed_poll_timeout_atomic ( port - > membase + ofs - > isr ,
isr ,
( isr & USART_SR_TC ) ,
10 , 100000 ) ;
/* Send the TC error message only when ISR_TC is not set. */
if ( ret )
dev_err ( port - > dev , " Transmission is not complete \n " ) ;
2015-06-10 21:19:36 +02:00
/* Stop serial port and reset value */
2016-09-15 18:42:33 +02:00
writel_relaxed ( 0 , port - > membase + ofs - > cr1 ) ;
2015-06-10 21:19:36 +02:00
2019-06-18 12:02:26 +02:00
/* flush RX & TX FIFO */
if ( ofs - > rqr ! = UNDEF_REG )
2021-03-04 17:23:05 +01:00
writel_relaxed ( USART_RQR_TXFRQ | USART_RQR_RXFRQ ,
port - > membase + ofs - > rqr ) ;
2018-03-12 09:50:05 +00:00
2019-06-18 12:02:26 +02:00
cr1 = USART_CR1_TE | USART_CR1_RE ;
2017-07-13 15:08:30 +00:00
if ( stm32_port - > fifoen )
cr1 | = USART_CR1_FIFOEN ;
2021-03-28 17:43:06 +02:00
cr2 = stm32_port - > swap ? USART_CR2_SWAP : 0 ;
2021-03-04 17:22:59 +01:00
/* Tx and RX FIFO configuration */
2019-06-18 12:02:24 +02:00
cr3 = readl_relaxed ( port - > membase + ofs - > cr3 ) ;
2021-03-04 17:22:59 +01:00
cr3 & = USART_CR3_TXFTIE | USART_CR3_RXFTIE ;
if ( stm32_port - > fifoen ) {
2021-04-13 19:40:15 +02:00
if ( stm32_port - > txftcfg > = 0 )
cr3 | = stm32_port - > txftcfg < < USART_CR3_TXFTCFG_SHIFT ;
if ( stm32_port - > rxftcfg > = 0 )
cr3 | = stm32_port - > rxftcfg < < USART_CR3_RXFTCFG_SHIFT ;
2021-03-04 17:22:59 +01:00
}
2015-06-10 21:19:36 +02:00
if ( cflag & CSTOPB )
cr2 | = USART_CR2_STOP_2B ;
2021-06-10 11:02:47 +02:00
bits = tty_get_char_size ( cflag ) ;
2019-05-21 17:45:43 +02:00
stm32_port - > rdr_mask = ( BIT ( bits ) - 1 ) ;
2019-05-21 17:45:41 +02:00
2015-06-10 21:19:36 +02:00
if ( cflag & PARENB ) {
2019-05-21 17:45:41 +02:00
bits + + ;
2015-06-10 21:19:36 +02:00
cr1 | = USART_CR1_PCE ;
}
2019-05-21 17:45:41 +02:00
/*
* Word length configuration :
* CS8 + parity , 9 bits word aka [ M1 : M0 ] = 0 b01
* CS7 or ( CS6 + parity ) , 7 bits word aka [ M1 : M0 ] = 0 b10
* CS8 or ( CS7 + parity ) , 8 bits word aka [ M1 : M0 ] = 0 b00
* M0 and M1 already cleared by cr1 initialization .
*/
if ( bits = = 9 )
cr1 | = USART_CR1_M0 ;
else if ( ( bits = = 7 ) & & cfg - > has_7bits_data )
cr1 | = USART_CR1_M1 ;
else if ( bits ! = 8 )
dev_dbg ( port - > dev , " Unsupported data bits config: %u bits \n "
, bits ) ;
2019-06-18 12:02:22 +02:00
if ( ofs - > rtor ! = UNDEF_REG & & ( stm32_port - > rx_ch | |
2021-04-13 19:40:15 +02:00
( stm32_port - > fifoen & &
stm32_port - > rxftcfg > = 0 ) ) ) {
2019-06-18 12:02:22 +02:00
if ( cflag & CSTOPB )
bits = bits + 3 ; /* 1 start bit + 2 stop bits */
else
bits = bits + 2 ; /* 1 start bit + 1 stop bit */
/* RX timeout irq to occur after last stop bit + bits */
stm32_port - > cr1_irq = USART_CR1_RTOIE ;
writel_relaxed ( bits , port - > membase + ofs - > rtor ) ;
cr2 | = USART_CR2_RTOEN ;
2019-06-18 12:02:25 +02:00
/* Not using dma, enable fifo threshold irq */
if ( ! stm32_port - > rx_ch )
stm32_port - > cr3_irq = USART_CR3_RXFTIE ;
2019-06-18 12:02:22 +02:00
}
2019-06-18 12:02:25 +02:00
cr1 | = stm32_port - > cr1_irq ;
cr3 | = stm32_port - > cr3_irq ;
2015-06-10 21:19:36 +02:00
if ( cflag & PARODD )
cr1 | = USART_CR1_PS ;
port - > status & = ~ ( UPSTAT_AUTOCTS | UPSTAT_AUTORTS ) ;
if ( cflag & CRTSCTS ) {
port - > status | = UPSTAT_AUTOCTS | UPSTAT_AUTORTS ;
2017-07-13 15:08:28 +00:00
cr3 | = USART_CR3_CTSE | USART_CR3_RTSE ;
2015-06-10 21:19:36 +02:00
}
usartdiv = DIV_ROUND_CLOSEST ( port - > uartclk , baud ) ;
/*
* The USART supports 16 or 8 times oversampling .
* By default we prefer 16 times oversampling , so that the receiver
* has a better tolerance to clock deviations .
* 8 times oversampling is only used to achieve higher speeds .
*/
if ( usartdiv < 16 ) {
oversampling = 8 ;
2018-03-12 09:50:05 +00:00
cr1 | = USART_CR1_OVER8 ;
2021-01-06 17:21:58 +01:00
stm32_usart_set_bits ( port , ofs - > cr1 , USART_CR1_OVER8 ) ;
2015-06-10 21:19:36 +02:00
} else {
oversampling = 16 ;
2018-03-12 09:50:05 +00:00
cr1 & = ~ USART_CR1_OVER8 ;
2021-01-06 17:21:58 +01:00
stm32_usart_clr_bits ( port , ofs - > cr1 , USART_CR1_OVER8 ) ;
2015-06-10 21:19:36 +02:00
}
mantissa = ( usartdiv / oversampling ) < < USART_BRR_DIV_M_SHIFT ;
fraction = usartdiv % oversampling ;
2016-09-15 18:42:33 +02:00
writel_relaxed ( mantissa | fraction , port - > membase + ofs - > brr ) ;
2015-06-10 21:19:36 +02:00
uart_update_timeout ( port , cflag , baud ) ;
port - > read_status_mask = USART_SR_ORE ;
if ( termios - > c_iflag & INPCK )
port - > read_status_mask | = USART_SR_PE | USART_SR_FE ;
if ( termios - > c_iflag & ( IGNBRK | BRKINT | PARMRK ) )
2019-05-21 17:45:42 +02:00
port - > read_status_mask | = USART_SR_FE ;
2015-06-10 21:19:36 +02:00
/* Characters to ignore */
port - > ignore_status_mask = 0 ;
if ( termios - > c_iflag & IGNPAR )
port - > ignore_status_mask = USART_SR_PE | USART_SR_FE ;
if ( termios - > c_iflag & IGNBRK ) {
2019-05-21 17:45:42 +02:00
port - > ignore_status_mask | = USART_SR_FE ;
2015-06-10 21:19:36 +02:00
/*
* If we ' re ignoring parity and break indicators ,
* ignore overruns too ( for real raw support ) .
*/
if ( termios - > c_iflag & IGNPAR )
port - > ignore_status_mask | = USART_SR_ORE ;
}
/* Ignore all characters if CREAD is not set */
if ( ( termios - > c_cflag & CREAD ) = = 0 )
port - > ignore_status_mask | = USART_SR_DUMMY_RX ;
2016-09-15 18:42:40 +02:00
if ( stm32_port - > rx_ch )
cr3 | = USART_CR3_DMAR ;
2018-03-12 09:50:05 +00:00
if ( rs485conf - > flags & SER_RS485_ENABLED ) {
2021-01-06 17:21:58 +01:00
stm32_usart_config_reg_rs485 ( & cr1 , & cr3 ,
rs485conf - > delay_rts_before_send ,
rs485conf - > delay_rts_after_send ,
baud ) ;
2018-03-12 09:50:05 +00:00
if ( rs485conf - > flags & SER_RS485_RTS_ON_SEND ) {
cr3 & = ~ USART_CR3_DEP ;
rs485conf - > flags & = ~ SER_RS485_RTS_AFTER_SEND ;
} else {
cr3 | = USART_CR3_DEP ;
rs485conf - > flags | = SER_RS485_RTS_AFTER_SEND ;
}
} else {
cr3 & = ~ ( USART_CR3_DEM | USART_CR3_DEP ) ;
cr1 & = ~ ( USART_CR1_DEDT_MASK | USART_CR1_DEAT_MASK ) ;
}
2021-03-04 17:23:01 +01:00
/* Configure wake up from low power on start bit detection */
2021-03-19 19:42:52 +01:00
if ( stm32_port - > wakeup_src ) {
2021-03-04 17:23:01 +01:00
cr3 & = ~ USART_CR3_WUS_MASK ;
cr3 | = USART_CR3_WUS_START_BIT ;
}
2016-09-15 18:42:33 +02:00
writel_relaxed ( cr3 , port - > membase + ofs - > cr3 ) ;
writel_relaxed ( cr2 , port - > membase + ofs - > cr2 ) ;
writel_relaxed ( cr1 , port - > membase + ofs - > cr1 ) ;
2015-06-10 21:19:36 +02:00
2021-01-06 17:21:58 +01:00
stm32_usart_set_bits ( port , ofs - > cr1 , BIT ( cfg - > uart_enable_bit ) ) ;
2015-06-10 21:19:36 +02:00
spin_unlock_irqrestore ( & port - > lock , flags ) ;
2021-03-04 17:23:02 +01:00
/* Handle modem control interrupts */
if ( UART_ENABLE_MS ( port , termios - > c_cflag ) )
stm32_usart_enable_ms ( port ) ;
else
stm32_usart_disable_ms ( port ) ;
2015-06-10 21:19:36 +02:00
}
2021-01-06 17:21:58 +01:00
static const char * stm32_usart_type ( struct uart_port * port )
2015-06-10 21:19:36 +02:00
{
return ( port - > type = = PORT_STM32 ) ? DRIVER_NAME : NULL ;
}
2021-01-06 17:21:58 +01:00
static void stm32_usart_release_port ( struct uart_port * port )
2015-06-10 21:19:36 +02:00
{
}
2021-01-06 17:21:58 +01:00
static int stm32_usart_request_port ( struct uart_port * port )
2015-06-10 21:19:36 +02:00
{
return 0 ;
}
2021-01-06 17:21:58 +01:00
static void stm32_usart_config_port ( struct uart_port * port , int flags )
2015-06-10 21:19:36 +02:00
{
if ( flags & UART_CONFIG_TYPE )
port - > type = PORT_STM32 ;
}
static int
2021-01-06 17:21:58 +01:00
stm32_usart_verify_port ( struct uart_port * port , struct serial_struct * ser )
2015-06-10 21:19:36 +02:00
{
/* No user changeable parameters */
return - EINVAL ;
}
2021-01-06 17:21:58 +01:00
static void stm32_usart_pm ( struct uart_port * port , unsigned int state ,
unsigned int oldstate )
2015-06-10 21:19:36 +02:00
{
struct stm32_port * stm32port = container_of ( port ,
struct stm32_port , port ) ;
2021-01-22 19:44:25 -08:00
const struct stm32_usart_offsets * ofs = & stm32port - > info - > ofs ;
const struct stm32_usart_config * cfg = & stm32port - > info - > cfg ;
2021-05-19 11:25:41 +02:00
unsigned long flags ;
2015-06-10 21:19:36 +02:00
switch ( state ) {
case UART_PM_STATE_ON :
2019-06-13 15:49:54 +02:00
pm_runtime_get_sync ( port - > dev ) ;
2015-06-10 21:19:36 +02:00
break ;
case UART_PM_STATE_OFF :
spin_lock_irqsave ( & port - > lock , flags ) ;
2021-01-06 17:21:58 +01:00
stm32_usart_clr_bits ( port , ofs - > cr1 , BIT ( cfg - > uart_enable_bit ) ) ;
2015-06-10 21:19:36 +02:00
spin_unlock_irqrestore ( & port - > lock , flags ) ;
2019-06-13 15:49:54 +02:00
pm_runtime_put_sync ( port - > dev ) ;
2015-06-10 21:19:36 +02:00
break ;
}
}
static const struct uart_ops stm32_uart_ops = {
2021-01-06 17:21:58 +01:00
. tx_empty = stm32_usart_tx_empty ,
. set_mctrl = stm32_usart_set_mctrl ,
. get_mctrl = stm32_usart_get_mctrl ,
. stop_tx = stm32_usart_stop_tx ,
. start_tx = stm32_usart_start_tx ,
. throttle = stm32_usart_throttle ,
. unthrottle = stm32_usart_unthrottle ,
. stop_rx = stm32_usart_stop_rx ,
. enable_ms = stm32_usart_enable_ms ,
. break_ctl = stm32_usart_break_ctl ,
. startup = stm32_usart_startup ,
. shutdown = stm32_usart_shutdown ,
2021-03-04 17:23:08 +01:00
. flush_buffer = stm32_usart_flush_buffer ,
2021-01-06 17:21:58 +01:00
. set_termios = stm32_usart_set_termios ,
. pm = stm32_usart_pm ,
. type = stm32_usart_type ,
. release_port = stm32_usart_release_port ,
. request_port = stm32_usart_request_port ,
. config_port = stm32_usart_config_port ,
. verify_port = stm32_usart_verify_port ,
2015-06-10 21:19:36 +02:00
} ;
2021-04-13 19:40:15 +02:00
/*
* STM32H7 RX & TX FIFO threshold configuration ( CR3 RXFTCFG / TXFTCFG )
* Note : 1 isn ' t a valid value in RXFTCFG / TXFTCFG . In this case ,
* RXNEIE / TXEIE can be used instead of threshold irqs : RXFTIE / TXFTIE .
* So , RXFTCFG / TXFTCFG bitfields values are encoded as array index + 1.
*/
static const u32 stm32h7_usart_fifo_thresh_cfg [ ] = { 1 , 2 , 4 , 8 , 12 , 14 , 16 } ;
static void stm32_usart_get_ftcfg ( struct platform_device * pdev , const char * p ,
int * ftcfg )
{
u32 bytes , i ;
/* DT option to get RX & TX FIFO threshold (default to 8 bytes) */
if ( of_property_read_u32 ( pdev - > dev . of_node , p , & bytes ) )
bytes = 8 ;
for ( i = 0 ; i < ARRAY_SIZE ( stm32h7_usart_fifo_thresh_cfg ) ; i + + )
if ( stm32h7_usart_fifo_thresh_cfg [ i ] > = bytes )
break ;
if ( i > = ARRAY_SIZE ( stm32h7_usart_fifo_thresh_cfg ) )
i = ARRAY_SIZE ( stm32h7_usart_fifo_thresh_cfg ) - 1 ;
dev_dbg ( & pdev - > dev , " %s set to %d bytes \n " , p ,
stm32h7_usart_fifo_thresh_cfg [ i ] ) ;
/* Provide FIFO threshold ftcfg (1 is invalid: threshold irq unused) */
if ( i )
* ftcfg = i - 1 ;
else
* ftcfg = - EINVAL ;
}
2021-01-06 17:22:02 +01:00
static void stm32_usart_deinit_port ( struct stm32_port * stm32port )
{
clk_disable_unprepare ( stm32port - > clk ) ;
}
2021-01-06 17:21:58 +01:00
static int stm32_usart_init_port ( struct stm32_port * stm32port ,
struct platform_device * pdev )
2015-06-10 21:19:36 +02:00
{
struct uart_port * port = & stm32port - > port ;
struct resource * res ;
2021-01-21 15:23:09 +01:00
int ret , irq ;
2015-06-10 21:19:36 +02:00
2021-01-21 15:23:09 +01:00
irq = platform_get_irq ( pdev , 0 ) ;
if ( irq < = 0 )
return irq ? : - ENODEV ;
2021-01-06 17:21:57 +01:00
2015-06-10 21:19:36 +02:00
port - > iotype = UPIO_MEM ;
port - > flags = UPF_BOOT_AUTOCONF ;
port - > ops = & stm32_uart_ops ;
port - > dev = & pdev - > dev ;
2019-06-18 12:02:24 +02:00
port - > fifosize = stm32port - > info - > cfg . fifosize ;
2019-12-13 00:06:43 +00:00
port - > has_sysrq = IS_ENABLED ( CONFIG_SERIAL_STM32_CONSOLE ) ;
2021-01-21 15:23:09 +01:00
port - > irq = irq ;
2021-01-06 17:21:58 +01:00
port - > rs485_config = stm32_usart_config_rs485 ;
2018-03-15 08:44:46 +00:00
2021-01-06 17:21:58 +01:00
ret = stm32_usart_init_rs485 ( port , pdev ) ;
2020-05-12 14:40:02 +02:00
if ( ret )
return ret ;
2018-03-15 08:44:46 +00:00
2021-03-19 19:42:52 +01:00
stm32port - > wakeup_src = stm32port - > info - > cfg . has_wakeup & &
of_property_read_bool ( pdev - > dev . of_node , " wakeup-source " ) ;
2019-05-21 17:45:47 +02:00
2021-03-28 17:43:06 +02:00
stm32port - > swap = stm32port - > info - > cfg . has_swap & &
of_property_read_bool ( pdev - > dev . of_node , " rx-tx-swap " ) ;
2017-07-13 15:08:30 +00:00
stm32port - > fifoen = stm32port - > info - > cfg . has_fifo ;
2021-04-13 19:40:15 +02:00
if ( stm32port - > fifoen ) {
stm32_usart_get_ftcfg ( pdev , " rx-threshold " ,
& stm32port - > rxftcfg ) ;
stm32_usart_get_ftcfg ( pdev , " tx-threshold " ,
& stm32port - > txftcfg ) ;
}
2015-06-10 21:19:36 +02:00
res = platform_get_resource ( pdev , IORESOURCE_MEM , 0 ) ;
port - > membase = devm_ioremap_resource ( & pdev - > dev , res ) ;
if ( IS_ERR ( port - > membase ) )
return PTR_ERR ( port - > membase ) ;
port - > mapbase = res - > start ;
spin_lock_init ( & port - > lock ) ;
stm32port - > clk = devm_clk_get ( & pdev - > dev , NULL ) ;
if ( IS_ERR ( stm32port - > clk ) )
return PTR_ERR ( stm32port - > clk ) ;
/* Ensure that clk rate is correct by enabling the clk */
ret = clk_prepare_enable ( stm32port - > clk ) ;
if ( ret )
return ret ;
stm32port - > port . uartclk = clk_get_rate ( stm32port - > clk ) ;
2017-07-13 15:08:29 +00:00
if ( ! stm32port - > port . uartclk ) {
2015-06-10 21:19:36 +02:00
ret = - EINVAL ;
2020-04-20 22:32:04 +05:30
goto err_clk ;
}
stm32port - > gpios = mctrl_gpio_init ( & stm32port - > port , 0 ) ;
if ( IS_ERR ( stm32port - > gpios ) ) {
ret = PTR_ERR ( stm32port - > gpios ) ;
goto err_clk ;
}
2021-01-06 17:22:01 +01:00
/*
* Both CTS / RTS gpios and " st,hw-flow-ctrl " ( deprecated ) or " uart-has-rtscts "
* properties should not be specified .
*/
2020-04-20 22:32:04 +05:30
if ( stm32port - > hw_flow_control ) {
if ( mctrl_gpio_to_gpiod ( stm32port - > gpios , UART_GPIO_CTS ) | |
mctrl_gpio_to_gpiod ( stm32port - > gpios , UART_GPIO_RTS ) ) {
dev_err ( & pdev - > dev , " Conflicting RTS/CTS config \n " ) ;
ret = - EINVAL ;
goto err_clk ;
}
2017-07-13 15:08:29 +00:00
}
2015-06-10 21:19:36 +02:00
2020-04-20 22:32:04 +05:30
return ret ;
err_clk :
clk_disable_unprepare ( stm32port - > clk ) ;
2015-06-10 21:19:36 +02:00
return ret ;
}
2021-01-06 17:21:58 +01:00
static struct stm32_port * stm32_usart_of_get_port ( struct platform_device * pdev )
2015-06-10 21:19:36 +02:00
{
struct device_node * np = pdev - > dev . of_node ;
int id ;
if ( ! np )
return NULL ;
id = of_alias_get_id ( np , " serial " ) ;
2017-07-13 15:08:27 +00:00
if ( id < 0 ) {
dev_err ( & pdev - > dev , " failed to get alias id, errno %d \n " , id ) ;
return NULL ;
}
2015-06-10 21:19:36 +02:00
if ( WARN_ON ( id > = STM32_MAX_PORTS ) )
return NULL ;
2020-05-20 15:39:32 +02:00
stm32_ports [ id ] . hw_flow_control =
of_property_read_bool ( np , " st,hw-flow-ctrl " ) /*deprecated*/ | |
of_property_read_bool ( np , " uart-has-rtscts " ) ;
2015-06-10 21:19:36 +02:00
stm32_ports [ id ] . port . line = id ;
2019-06-18 12:02:22 +02:00
stm32_ports [ id ] . cr1_irq = USART_CR1_RXNEIE ;
2019-06-18 12:02:25 +02:00
stm32_ports [ id ] . cr3_irq = 0 ;
2017-07-13 15:08:27 +00:00
stm32_ports [ id ] . last_res = RX_BUF_L ;
2015-06-10 21:19:36 +02:00
return & stm32_ports [ id ] ;
}
# ifdef CONFIG_OF
static const struct of_device_id stm32_match [ ] = {
2016-09-15 18:42:33 +02:00
{ . compatible = " st,stm32-uart " , . data = & stm32f4_info } ,
{ . compatible = " st,stm32f7-uart " , . data = & stm32f7_info } ,
2017-07-13 15:08:30 +00:00
{ . compatible = " st,stm32h7-uart " , . data = & stm32h7_info } ,
2015-06-10 21:19:36 +02:00
{ } ,
} ;
MODULE_DEVICE_TABLE ( of , stm32_match ) ;
# endif
2021-06-10 12:00:20 +02:00
static void stm32_usart_of_dma_rx_remove ( struct stm32_port * stm32port ,
struct platform_device * pdev )
{
if ( stm32port - > rx_buf )
dma_free_coherent ( & pdev - > dev , RX_BUF_L , stm32port - > rx_buf ,
stm32port - > rx_dma_buf ) ;
}
2021-01-06 17:21:58 +01:00
static int stm32_usart_of_dma_rx_probe ( struct stm32_port * stm32port ,
struct platform_device * pdev )
2016-09-15 18:42:40 +02:00
{
2021-01-22 19:44:25 -08:00
const struct stm32_usart_offsets * ofs = & stm32port - > info - > ofs ;
2016-09-15 18:42:40 +02:00
struct uart_port * port = & stm32port - > port ;
struct device * dev = & pdev - > dev ;
struct dma_slave_config config ;
struct dma_async_tx_descriptor * desc = NULL ;
int ret ;
2021-04-16 16:05:56 +02:00
/*
* Using DMA and threaded handler for the console could lead to
* deadlocks .
*/
if ( uart_console ( port ) )
return - ENODEV ;
2016-09-15 18:42:40 +02:00
stm32port - > rx_buf = dma_alloc_coherent ( & pdev - > dev , RX_BUF_L ,
2021-01-06 17:21:57 +01:00
& stm32port - > rx_dma_buf ,
GFP_KERNEL ) ;
2021-06-10 12:00:20 +02:00
if ( ! stm32port - > rx_buf )
return - ENOMEM ;
2016-09-15 18:42:40 +02:00
/* Configure DMA channel */
memset ( & config , 0 , sizeof ( config ) ) ;
2016-09-23 21:38:51 +02:00
config . src_addr = port - > mapbase + ofs - > rdr ;
2016-09-15 18:42:40 +02:00
config . src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE ;
ret = dmaengine_slave_config ( stm32port - > rx_ch , & config ) ;
if ( ret < 0 ) {
dev_err ( dev , " rx dma channel config failed \n " ) ;
2021-06-10 12:00:20 +02:00
stm32_usart_of_dma_rx_remove ( stm32port , pdev ) ;
return ret ;
2016-09-15 18:42:40 +02:00
}
/* Prepare a DMA cyclic transaction */
desc = dmaengine_prep_dma_cyclic ( stm32port - > rx_ch ,
stm32port - > rx_dma_buf ,
RX_BUF_L , RX_BUF_P , DMA_DEV_TO_MEM ,
DMA_PREP_INTERRUPT ) ;
if ( ! desc ) {
dev_err ( dev , " rx dma prep cyclic failed \n " ) ;
2021-06-10 12:00:20 +02:00
stm32_usart_of_dma_rx_remove ( stm32port , pdev ) ;
return - ENODEV ;
2016-09-15 18:42:40 +02:00
}
/* No callback as dma buffer is drained on usart interrupt */
desc - > callback = NULL ;
desc - > callback_param = NULL ;
/* Push current DMA transaction in the pending queue */
2021-01-06 17:21:56 +01:00
ret = dma_submit_error ( dmaengine_submit ( desc ) ) ;
if ( ret ) {
dmaengine_terminate_sync ( stm32port - > rx_ch ) ;
2021-06-10 12:00:20 +02:00
stm32_usart_of_dma_rx_remove ( stm32port , pdev ) ;
return ret ;
2021-01-06 17:21:56 +01:00
}
2016-09-15 18:42:40 +02:00
/* Issue pending DMA requests */
dma_async_issue_pending ( stm32port - > rx_ch ) ;
return 0 ;
2021-06-10 12:00:20 +02:00
}
2016-09-15 18:42:40 +02:00
2021-06-10 12:00:20 +02:00
static void stm32_usart_of_dma_tx_remove ( struct stm32_port * stm32port ,
struct platform_device * pdev )
{
if ( stm32port - > tx_buf )
dma_free_coherent ( & pdev - > dev , TX_BUF_L , stm32port - > tx_buf ,
stm32port - > tx_dma_buf ) ;
2016-09-15 18:42:40 +02:00
}
2021-01-06 17:21:58 +01:00
static int stm32_usart_of_dma_tx_probe ( struct stm32_port * stm32port ,
struct platform_device * pdev )
2016-09-15 18:42:40 +02:00
{
2021-01-22 19:44:25 -08:00
const struct stm32_usart_offsets * ofs = & stm32port - > info - > ofs ;
2016-09-15 18:42:40 +02:00
struct uart_port * port = & stm32port - > port ;
struct device * dev = & pdev - > dev ;
struct dma_slave_config config ;
int ret ;
stm32port - > tx_dma_busy = false ;
stm32port - > tx_buf = dma_alloc_coherent ( & pdev - > dev , TX_BUF_L ,
2021-01-06 17:21:57 +01:00
& stm32port - > tx_dma_buf ,
GFP_KERNEL ) ;
2021-06-10 12:00:20 +02:00
if ( ! stm32port - > tx_buf )
return - ENOMEM ;
2016-09-15 18:42:40 +02:00
/* Configure DMA channel */
memset ( & config , 0 , sizeof ( config ) ) ;
2016-09-23 21:38:51 +02:00
config . dst_addr = port - > mapbase + ofs - > tdr ;
2016-09-15 18:42:40 +02:00
config . dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE ;
ret = dmaengine_slave_config ( stm32port - > tx_ch , & config ) ;
if ( ret < 0 ) {
dev_err ( dev , " tx dma channel config failed \n " ) ;
2021-06-10 12:00:20 +02:00
stm32_usart_of_dma_tx_remove ( stm32port , pdev ) ;
return ret ;
2016-09-15 18:42:40 +02:00
}
return 0 ;
}
2021-01-06 17:21:58 +01:00
static int stm32_usart_serial_probe ( struct platform_device * pdev )
2015-06-10 21:19:36 +02:00
{
struct stm32_port * stm32port ;
2016-09-15 18:42:33 +02:00
int ret ;
2015-06-10 21:19:36 +02:00
2021-01-06 17:21:58 +01:00
stm32port = stm32_usart_of_get_port ( pdev ) ;
2015-06-10 21:19:36 +02:00
if ( ! stm32port )
return - ENODEV ;
2021-01-22 19:44:25 -08:00
stm32port - > info = of_device_get_match_data ( & pdev - > dev ) ;
if ( ! stm32port - > info )
2016-09-15 18:42:33 +02:00
return - EINVAL ;
2021-01-06 17:21:58 +01:00
ret = stm32_usart_init_port ( stm32port , pdev ) ;
2015-06-10 21:19:36 +02:00
if ( ret )
return ret ;
2021-03-19 19:42:52 +01:00
if ( stm32port - > wakeup_src ) {
device_set_wakeup_capable ( & pdev - > dev , true ) ;
ret = dev_pm_set_wake_irq ( & pdev - > dev , stm32port - > port . irq ) ;
2019-05-21 17:45:46 +02:00
if ( ret )
2021-06-10 12:00:20 +02:00
goto err_deinit_port ;
2017-07-13 15:08:30 +00:00
}
2021-06-10 12:00:20 +02:00
stm32port - > rx_ch = dma_request_chan ( & pdev - > dev , " rx " ) ;
if ( PTR_ERR ( stm32port - > rx_ch ) = = - EPROBE_DEFER ) {
ret = - EPROBE_DEFER ;
goto err_wakeirq ;
}
/* Fall back in interrupt mode for any non-deferral error */
if ( IS_ERR ( stm32port - > rx_ch ) )
stm32port - > rx_ch = NULL ;
stm32port - > tx_ch = dma_request_chan ( & pdev - > dev , " tx " ) ;
if ( PTR_ERR ( stm32port - > tx_ch ) = = - EPROBE_DEFER ) {
ret = - EPROBE_DEFER ;
goto err_dma_rx ;
}
/* Fall back in interrupt mode for any non-deferral error */
if ( IS_ERR ( stm32port - > tx_ch ) )
stm32port - > tx_ch = NULL ;
2016-09-15 18:42:40 +02:00
2021-06-10 12:00:20 +02:00
if ( stm32port - > rx_ch & & stm32_usart_of_dma_rx_probe ( stm32port , pdev ) ) {
/* Fall back in interrupt mode */
dma_release_channel ( stm32port - > rx_ch ) ;
stm32port - > rx_ch = NULL ;
}
if ( stm32port - > tx_ch & & stm32_usart_of_dma_tx_probe ( stm32port , pdev ) ) {
/* Fall back in interrupt mode */
dma_release_channel ( stm32port - > tx_ch ) ;
stm32port - > tx_ch = NULL ;
}
if ( ! stm32port - > rx_ch )
dev_info ( & pdev - > dev , " interrupt mode for rx (no dma) \n " ) ;
if ( ! stm32port - > tx_ch )
dev_info ( & pdev - > dev , " interrupt mode for tx (no dma) \n " ) ;
2016-09-15 18:42:40 +02:00
2015-06-10 21:19:36 +02:00
platform_set_drvdata ( pdev , & stm32port - > port ) ;
2019-06-13 15:49:54 +02:00
pm_runtime_get_noresume ( & pdev - > dev ) ;
pm_runtime_set_active ( & pdev - > dev ) ;
pm_runtime_enable ( & pdev - > dev ) ;
2021-03-04 17:22:56 +01:00
ret = uart_add_one_port ( & stm32_usart_driver , & stm32port - > port ) ;
if ( ret )
goto err_port ;
2019-06-13 15:49:54 +02:00
pm_runtime_put_sync ( & pdev - > dev ) ;
2015-06-10 21:19:36 +02:00
return 0 ;
2017-07-13 15:08:29 +00:00
2021-03-04 17:22:56 +01:00
err_port :
pm_runtime_disable ( & pdev - > dev ) ;
pm_runtime_set_suspended ( & pdev - > dev ) ;
pm_runtime_put_noidle ( & pdev - > dev ) ;
if ( stm32port - > tx_ch ) {
2021-06-10 12:00:20 +02:00
stm32_usart_of_dma_tx_remove ( stm32port , pdev ) ;
2021-03-04 17:22:56 +01:00
dma_release_channel ( stm32port - > tx_ch ) ;
}
2021-06-10 12:00:20 +02:00
if ( stm32port - > rx_ch )
stm32_usart_of_dma_rx_remove ( stm32port , pdev ) ;
2021-03-04 17:22:56 +01:00
2021-06-10 12:00:20 +02:00
err_dma_rx :
if ( stm32port - > rx_ch )
dma_release_channel ( stm32port - > rx_ch ) ;
err_wakeirq :
2021-03-19 19:42:52 +01:00
if ( stm32port - > wakeup_src )
2019-05-21 17:45:46 +02:00
dev_pm_clear_wake_irq ( & pdev - > dev ) ;
2021-06-10 12:00:20 +02:00
err_deinit_port :
2021-03-19 19:42:52 +01:00
if ( stm32port - > wakeup_src )
device_set_wakeup_capable ( & pdev - > dev , false ) ;
2017-07-13 15:08:30 +00:00
2021-01-06 17:22:02 +01:00
stm32_usart_deinit_port ( stm32port ) ;
2017-07-13 15:08:29 +00:00
return ret ;
2015-06-10 21:19:36 +02:00
}
2021-01-06 17:21:58 +01:00
static int stm32_usart_serial_remove ( struct platform_device * pdev )
2015-06-10 21:19:36 +02:00
{
struct uart_port * port = platform_get_drvdata ( pdev ) ;
2016-09-15 18:42:38 +02:00
struct stm32_port * stm32_port = to_stm32_port ( port ) ;
2021-01-22 19:44:25 -08:00
const struct stm32_usart_offsets * ofs = & stm32_port - > info - > ofs ;
2019-06-13 15:49:54 +02:00
int err ;
pm_runtime_get_sync ( & pdev - > dev ) ;
2021-03-04 17:22:56 +01:00
err = uart_remove_one_port ( & stm32_usart_driver , port ) ;
if ( err )
return ( err ) ;
pm_runtime_disable ( & pdev - > dev ) ;
pm_runtime_set_suspended ( & pdev - > dev ) ;
pm_runtime_put_noidle ( & pdev - > dev ) ;
2016-09-15 18:42:40 +02:00
2021-01-06 17:21:58 +01:00
stm32_usart_clr_bits ( port , ofs - > cr3 , USART_CR3_DMAR ) ;
2016-09-15 18:42:40 +02:00
2021-06-10 12:00:20 +02:00
if ( stm32_port - > tx_ch ) {
dmaengine_terminate_async ( stm32_port - > tx_ch ) ;
stm32_usart_of_dma_tx_remove ( stm32_port , pdev ) ;
dma_release_channel ( stm32_port - > tx_ch ) ;
}
2021-03-04 17:22:56 +01:00
if ( stm32_port - > rx_ch ) {
dmaengine_terminate_async ( stm32_port - > rx_ch ) ;
2021-06-10 12:00:20 +02:00
stm32_usart_of_dma_rx_remove ( stm32_port , pdev ) ;
2016-09-15 18:42:40 +02:00
dma_release_channel ( stm32_port - > rx_ch ) ;
2021-03-04 17:22:56 +01:00
}
2016-09-15 18:42:40 +02:00
2021-01-06 17:21:58 +01:00
stm32_usart_clr_bits ( port , ofs - > cr3 , USART_CR3_DMAT ) ;
2016-09-15 18:42:40 +02:00
2021-03-19 19:42:52 +01:00
if ( stm32_port - > wakeup_src ) {
2019-05-21 17:45:46 +02:00
dev_pm_clear_wake_irq ( & pdev - > dev ) ;
2017-07-13 15:08:30 +00:00
device_init_wakeup ( & pdev - > dev , false ) ;
2019-05-21 17:45:46 +02:00
}
2017-07-13 15:08:30 +00:00
2021-01-06 17:22:02 +01:00
stm32_usart_deinit_port ( stm32_port ) ;
2015-06-10 21:19:36 +02:00
2021-03-04 17:22:56 +01:00
return 0 ;
2015-06-10 21:19:36 +02:00
}
# ifdef CONFIG_SERIAL_STM32_CONSOLE
2021-01-06 17:21:58 +01:00
static void stm32_usart_console_putchar ( struct uart_port * port , int ch )
2015-06-10 21:19:36 +02:00
{
2016-09-15 18:42:33 +02:00
struct stm32_port * stm32_port = to_stm32_port ( port ) ;
2021-01-22 19:44:25 -08:00
const struct stm32_usart_offsets * ofs = & stm32_port - > info - > ofs ;
2016-09-15 18:42:33 +02:00
while ( ! ( readl_relaxed ( port - > membase + ofs - > isr ) & USART_SR_TXE ) )
2015-06-10 21:19:36 +02:00
cpu_relax ( ) ;
2016-09-15 18:42:33 +02:00
writel_relaxed ( ch , port - > membase + ofs - > tdr ) ;
2015-06-10 21:19:36 +02:00
}
2021-01-06 17:21:58 +01:00
static void stm32_usart_console_write ( struct console * co , const char * s ,
unsigned int cnt )
2015-06-10 21:19:36 +02:00
{
struct uart_port * port = & stm32_ports [ co - > index ] . port ;
2016-09-15 18:42:33 +02:00
struct stm32_port * stm32_port = to_stm32_port ( port ) ;
2021-01-22 19:44:25 -08:00
const struct stm32_usart_offsets * ofs = & stm32_port - > info - > ofs ;
const struct stm32_usart_config * cfg = & stm32_port - > info - > cfg ;
2015-06-10 21:19:36 +02:00
unsigned long flags ;
u32 old_cr1 , new_cr1 ;
int locked = 1 ;
2021-04-16 16:05:57 +02:00
if ( oops_in_progress )
locked = spin_trylock_irqsave ( & port - > lock , flags ) ;
2015-06-10 21:19:36 +02:00
else
2021-04-16 16:05:57 +02:00
spin_lock_irqsave ( & port - > lock , flags ) ;
2015-06-10 21:19:36 +02:00
2016-09-15 18:42:42 +02:00
/* Save and disable interrupts, enable the transmitter */
2016-09-15 18:42:33 +02:00
old_cr1 = readl_relaxed ( port - > membase + ofs - > cr1 ) ;
2015-06-10 21:19:36 +02:00
new_cr1 = old_cr1 & ~ USART_CR1_IE_MASK ;
2016-09-15 18:42:42 +02:00
new_cr1 | = USART_CR1_TE | BIT ( cfg - > uart_enable_bit ) ;
2016-09-15 18:42:33 +02:00
writel_relaxed ( new_cr1 , port - > membase + ofs - > cr1 ) ;
2015-06-10 21:19:36 +02:00
2021-01-06 17:21:58 +01:00
uart_console_write ( port , s , cnt , stm32_usart_console_putchar ) ;
2015-06-10 21:19:36 +02:00
/* Restore interrupt state */
2016-09-15 18:42:33 +02:00
writel_relaxed ( old_cr1 , port - > membase + ofs - > cr1 ) ;
2015-06-10 21:19:36 +02:00
if ( locked )
2021-04-16 16:05:57 +02:00
spin_unlock_irqrestore ( & port - > lock , flags ) ;
2015-06-10 21:19:36 +02:00
}
2021-01-06 17:21:58 +01:00
static int stm32_usart_console_setup ( struct console * co , char * options )
2015-06-10 21:19:36 +02:00
{
struct stm32_port * stm32port ;
int baud = 9600 ;
int bits = 8 ;
int parity = ' n ' ;
int flow = ' n ' ;
if ( co - > index > = STM32_MAX_PORTS )
return - ENODEV ;
stm32port = & stm32_ports [ co - > index ] ;
/*
* This driver does not support early console initialization
* ( use ARM early printk support instead ) , so we only expect
* this to be called during the uart port registration when the
* driver gets probed and the port should be mapped at that point .
*/
2021-01-06 17:21:57 +01:00
if ( stm32port - > port . mapbase = = 0 | | ! stm32port - > port . membase )
2015-06-10 21:19:36 +02:00
return - ENXIO ;
if ( options )
uart_parse_options ( options , & baud , & parity , & bits , & flow ) ;
return uart_set_options ( & stm32port - > port , co , baud , parity , bits , flow ) ;
}
static struct console stm32_console = {
. name = STM32_SERIAL_NAME ,
. device = uart_console_device ,
2021-01-06 17:21:58 +01:00
. write = stm32_usart_console_write ,
. setup = stm32_usart_console_setup ,
2015-06-10 21:19:36 +02:00
. flags = CON_PRINTBUFFER ,
. index = - 1 ,
. data = & stm32_usart_driver ,
} ;
# define STM32_SERIAL_CONSOLE (&stm32_console)
# else
# define STM32_SERIAL_CONSOLE NULL
# endif /* CONFIG_SERIAL_STM32_CONSOLE */
static struct uart_driver stm32_usart_driver = {
. driver_name = DRIVER_NAME ,
. dev_name = STM32_SERIAL_NAME ,
. major = 0 ,
. minor = 0 ,
. nr = STM32_MAX_PORTS ,
. cons = STM32_SERIAL_CONSOLE ,
} ;
2021-01-06 17:21:58 +01:00
static void __maybe_unused stm32_usart_serial_en_wakeup ( struct uart_port * port ,
bool enable )
2017-07-13 15:08:30 +00:00
{
struct stm32_port * stm32_port = to_stm32_port ( port ) ;
2021-01-22 19:44:25 -08:00
const struct stm32_usart_offsets * ofs = & stm32_port - > info - > ofs ;
2017-07-13 15:08:30 +00:00
2021-03-19 19:42:52 +01:00
if ( ! stm32_port - > wakeup_src )
2017-07-13 15:08:30 +00:00
return ;
2021-03-04 17:23:01 +01:00
/*
* Enable low - power wake - up and wake - up irq if argument is set to
* " enable " , disable low - power wake - up and wake - up irq otherwise
*/
2017-07-13 15:08:30 +00:00
if ( enable ) {
2021-01-06 17:21:58 +01:00
stm32_usart_set_bits ( port , ofs - > cr1 , USART_CR1_UESM ) ;
2021-03-04 17:23:01 +01:00
stm32_usart_set_bits ( port , ofs - > cr3 , USART_CR3_WUFIE ) ;
2017-07-13 15:08:30 +00:00
} else {
2021-01-06 17:21:58 +01:00
stm32_usart_clr_bits ( port , ofs - > cr1 , USART_CR1_UESM ) ;
2021-03-04 17:23:01 +01:00
stm32_usart_clr_bits ( port , ofs - > cr3 , USART_CR3_WUFIE ) ;
2017-07-13 15:08:30 +00:00
}
}
2021-01-06 17:21:58 +01:00
static int __maybe_unused stm32_usart_serial_suspend ( struct device * dev )
2017-07-13 15:08:30 +00:00
{
struct uart_port * port = dev_get_drvdata ( dev ) ;
uart_suspend_port ( & stm32_usart_driver , port ) ;
2021-03-19 19:42:49 +01:00
if ( device_may_wakeup ( dev ) | | device_wakeup_path ( dev ) )
2021-01-06 17:21:58 +01:00
stm32_usart_serial_en_wakeup ( port , true ) ;
2017-07-13 15:08:30 +00:00
2020-05-19 11:41:04 +02:00
/*
* When " no_console_suspend " is enabled , keep the pinctrl default state
* and rely on bootloader stage to restore this state upon resume .
* Otherwise , apply the idle or sleep states depending on wakeup
* capabilities .
*/
if ( console_suspend_enabled | | ! uart_console ( port ) ) {
2021-03-19 19:42:49 +01:00
if ( device_may_wakeup ( dev ) | | device_wakeup_path ( dev ) )
2020-05-19 11:41:04 +02:00
pinctrl_pm_select_idle_state ( dev ) ;
else
pinctrl_pm_select_sleep_state ( dev ) ;
}
2019-06-13 15:49:53 +02:00
2017-07-13 15:08:30 +00:00
return 0 ;
}
2021-01-06 17:21:58 +01:00
static int __maybe_unused stm32_usart_serial_resume ( struct device * dev )
2017-07-13 15:08:30 +00:00
{
struct uart_port * port = dev_get_drvdata ( dev ) ;
2019-06-13 15:49:53 +02:00
pinctrl_pm_select_default_state ( dev ) ;
2021-03-19 19:42:49 +01:00
if ( device_may_wakeup ( dev ) | | device_wakeup_path ( dev ) )
2021-01-06 17:21:58 +01:00
stm32_usart_serial_en_wakeup ( port , false ) ;
2017-07-13 15:08:30 +00:00
return uart_resume_port ( & stm32_usart_driver , port ) ;
}
2021-01-06 17:21:58 +01:00
static int __maybe_unused stm32_usart_runtime_suspend ( struct device * dev )
2019-06-13 15:49:54 +02:00
{
struct uart_port * port = dev_get_drvdata ( dev ) ;
struct stm32_port * stm32port = container_of ( port ,
struct stm32_port , port ) ;
clk_disable_unprepare ( stm32port - > clk ) ;
return 0 ;
}
2021-01-06 17:21:58 +01:00
static int __maybe_unused stm32_usart_runtime_resume ( struct device * dev )
2019-06-13 15:49:54 +02:00
{
struct uart_port * port = dev_get_drvdata ( dev ) ;
struct stm32_port * stm32port = container_of ( port ,
struct stm32_port , port ) ;
return clk_prepare_enable ( stm32port - > clk ) ;
}
2017-07-13 15:08:30 +00:00
static const struct dev_pm_ops stm32_serial_pm_ops = {
2021-01-06 17:21:58 +01:00
SET_RUNTIME_PM_OPS ( stm32_usart_runtime_suspend ,
stm32_usart_runtime_resume , NULL )
SET_SYSTEM_SLEEP_PM_OPS ( stm32_usart_serial_suspend ,
stm32_usart_serial_resume )
2017-07-13 15:08:30 +00:00
} ;
2015-06-10 21:19:36 +02:00
static struct platform_driver stm32_serial_driver = {
2021-01-06 17:21:58 +01:00
. probe = stm32_usart_serial_probe ,
. remove = stm32_usart_serial_remove ,
2015-06-10 21:19:36 +02:00
. driver = {
. name = DRIVER_NAME ,
2017-07-13 15:08:30 +00:00
. pm = & stm32_serial_pm_ops ,
2015-06-10 21:19:36 +02:00
. of_match_table = of_match_ptr ( stm32_match ) ,
} ,
} ;
2021-01-06 17:21:58 +01:00
static int __init stm32_usart_init ( void )
2015-06-10 21:19:36 +02:00
{
static char banner [ ] __initdata = " STM32 USART driver initialized " ;
int ret ;
pr_info ( " %s \n " , banner ) ;
ret = uart_register_driver ( & stm32_usart_driver ) ;
if ( ret )
return ret ;
ret = platform_driver_register ( & stm32_serial_driver ) ;
if ( ret )
uart_unregister_driver ( & stm32_usart_driver ) ;
return ret ;
}
2021-01-06 17:21:58 +01:00
static void __exit stm32_usart_exit ( void )
2015-06-10 21:19:36 +02:00
{
platform_driver_unregister ( & stm32_serial_driver ) ;
uart_unregister_driver ( & stm32_usart_driver ) ;
}
2021-01-06 17:21:58 +01:00
module_init ( stm32_usart_init ) ;
module_exit ( stm32_usart_exit ) ;
2015-06-10 21:19:36 +02:00
MODULE_ALIAS ( " platform: " DRIVER_NAME ) ;
MODULE_DESCRIPTION ( " STMicroelectronics STM32 serial port driver " ) ;
MODULE_LICENSE ( " GPL v2 " ) ;