2019-05-29 17:18:02 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2016-08-19 20:05:04 +03:00
/*
* Copyright ( c ) 2014 - 2016 , NVIDIA CORPORATION . All rights reserved .
*/
# include <soc/tegra/ivc.h>
# define TEGRA_IVC_ALIGN 64
/*
* IVC channel reset protocol .
*
* Each end uses its tx_channel . state to indicate its synchronization state .
*/
enum tegra_ivc_state {
/*
* This value is zero for backwards compatibility with services that
* assume channels to be initially zeroed . Such channels are in an
* initially valid state , but cannot be asynchronously reset , and must
* maintain a valid state at all times .
*
* The transmitting end can enter the established state from the sync or
* ack state when it observes the receiving endpoint in the ack or
* established state , indicating that has cleared the counters in our
* rx_channel .
*/
TEGRA_IVC_STATE_ESTABLISHED = 0 ,
/*
* If an endpoint is observed in the sync state , the remote endpoint is
* allowed to clear the counters it owns asynchronously with respect to
* the current endpoint . Therefore , the current endpoint is no longer
* allowed to communicate .
*/
TEGRA_IVC_STATE_SYNC ,
/*
* When the transmitting end observes the receiving end in the sync
* state , it can clear the w_count and r_count and transition to the ack
* state . If the remote endpoint observes us in the ack state , it can
* return to the established state once it has cleared its counters .
*/
TEGRA_IVC_STATE_ACK
} ;
/*
* This structure is divided into two - cache aligned parts , the first is only
* written through the tx . channel pointer , while the second is only written
* through the rx . channel pointer . This delineates ownership of the cache
* lines , which is critical to performance and necessary in non - cache coherent
* implementations .
*/
struct tegra_ivc_header {
union {
struct {
/* fields owned by the transmitting end */
u32 count ;
u32 state ;
} ;
u8 pad [ TEGRA_IVC_ALIGN ] ;
} tx ;
union {
/* fields owned by the receiving end */
u32 count ;
u8 pad [ TEGRA_IVC_ALIGN ] ;
} rx ;
} ;
2022-09-22 16:41:24 +03:00
# define tegra_ivc_header_read_field(hdr, field) \
iosys_map_rd_field ( hdr , 0 , struct tegra_ivc_header , field )
# define tegra_ivc_header_write_field(hdr, field, value) \
iosys_map_wr_field ( hdr , 0 , struct tegra_ivc_header , field , value )
2016-08-19 20:05:04 +03:00
static inline void tegra_ivc_invalidate ( struct tegra_ivc * ivc , dma_addr_t phys )
{
if ( ! ivc - > peer )
return ;
dma_sync_single_for_cpu ( ivc - > peer , phys , TEGRA_IVC_ALIGN ,
DMA_FROM_DEVICE ) ;
}
static inline void tegra_ivc_flush ( struct tegra_ivc * ivc , dma_addr_t phys )
{
if ( ! ivc - > peer )
return ;
dma_sync_single_for_device ( ivc - > peer , phys , TEGRA_IVC_ALIGN ,
DMA_TO_DEVICE ) ;
}
2022-09-22 16:41:24 +03:00
static inline bool tegra_ivc_empty ( struct tegra_ivc * ivc , struct iosys_map * map )
2016-08-19 20:05:04 +03:00
{
/*
* This function performs multiple checks on the same values with
locking/atomics, firmware/ivc: Convert ACCESS_ONCE() to READ_ONCE()/WRITE_ONCE()
workqueue: kill off ACCESS_ONCE()
For several reasons, it is desirable to use {READ,WRITE}_ONCE() in
preference to ACCESS_ONCE(), and new code is expected to use one of the
former. So far, there's been no reason to change most existing uses of
ACCESS_ONCE(), as these aren't currently harmful.
However, for some features it is necessary to instrument reads and
writes separately, which is not possible with ACCESS_ONCE(). This
distinction is critical to correct operation.
It's possible to transform the bulk of kernel code using the Coccinelle
script below. However, this doesn't handle comments, leaving references
to ACCESS_ONCE() instances which have been removed. As a preparatory
step, this patch converts the Tegra IVC code and comments to use
{READ,WRITE}_ONCE() consistently.
----
virtual patch
@ depends on patch @
expression E1, E2;
@@
- ACCESS_ONCE(E1) = E2
+ WRITE_ONCE(E1, E2)
@ depends on patch @
expression E;
@@
- ACCESS_ONCE(E)
+ READ_ONCE(E)
----
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Jonathan Hunter <jonathanh@nvidia.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thierry Reding <thierry.reding@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: davem@davemloft.net
Cc: linux-arch@vger.kernel.org
Cc: mpe@ellerman.id.au
Cc: shuah@kernel.org
Cc: snitzer@redhat.com
Cc: thor.thayer@linux.intel.com
Cc: tj@kernel.org
Cc: viro@zeniv.linux.org.uk
Cc: will.deacon@arm.com
Link: http://lkml.kernel.org/r/1508792849-3115-3-git-send-email-paulmck@linux.vnet.ibm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2017-10-24 00:07:13 +03:00
* security implications , so create snapshots with READ_ONCE ( ) to
2016-08-19 20:05:04 +03:00
* ensure that these checks use the same values .
*/
2022-09-22 16:41:24 +03:00
u32 tx = tegra_ivc_header_read_field ( map , tx . count ) ;
u32 rx = tegra_ivc_header_read_field ( map , rx . count ) ;
2016-08-19 20:05:04 +03:00
/*
* Perform an over - full check to prevent denial of service attacks
* where a server could be easily fooled into believing that there ' s
* an extremely large number of frames ready , since receivers are not
* expected to check for full or over - full conditions .
*
* Although the channel isn ' t empty , this is an invalid case caused by
* a potentially malicious peer , so returning empty is safer , because
* it gives the impression that the channel has gone silent .
*/
if ( tx - rx > ivc - > num_frames )
return true ;
return tx = = rx ;
}
2022-09-22 16:41:24 +03:00
static inline bool tegra_ivc_full ( struct tegra_ivc * ivc , struct iosys_map * map )
2016-08-19 20:05:04 +03:00
{
2022-09-22 16:41:24 +03:00
u32 tx = tegra_ivc_header_read_field ( map , tx . count ) ;
u32 rx = tegra_ivc_header_read_field ( map , rx . count ) ;
2016-08-19 20:05:04 +03:00
/*
* Invalid cases where the counters indicate that the queue is over
* capacity also appear full .
*/
return tx - rx > = ivc - > num_frames ;
}
2022-09-22 16:41:24 +03:00
static inline u32 tegra_ivc_available ( struct tegra_ivc * ivc , struct iosys_map * map )
2016-08-19 20:05:04 +03:00
{
2022-09-22 16:41:24 +03:00
u32 tx = tegra_ivc_header_read_field ( map , tx . count ) ;
u32 rx = tegra_ivc_header_read_field ( map , rx . count ) ;
2016-08-19 20:05:04 +03:00
/*
* This function isn ' t expected to be used in scenarios where an
* over - full situation can lead to denial of service attacks . See the
* comment in tegra_ivc_empty ( ) for an explanation about special
* over - full considerations .
*/
return tx - rx ;
}
static inline void tegra_ivc_advance_tx ( struct tegra_ivc * ivc )
{
2022-09-22 16:41:24 +03:00
unsigned int count = tegra_ivc_header_read_field ( & ivc - > tx . map , tx . count ) ;
tegra_ivc_header_write_field ( & ivc - > tx . map , tx . count , count + 1 ) ;
2016-08-19 20:05:04 +03:00
if ( ivc - > tx . position = = ivc - > num_frames - 1 )
ivc - > tx . position = 0 ;
else
ivc - > tx . position + + ;
}
static inline void tegra_ivc_advance_rx ( struct tegra_ivc * ivc )
{
2022-09-22 16:41:24 +03:00
unsigned int count = tegra_ivc_header_read_field ( & ivc - > rx . map , rx . count ) ;
tegra_ivc_header_write_field ( & ivc - > rx . map , rx . count , count + 1 ) ;
2016-08-19 20:05:04 +03:00
if ( ivc - > rx . position = = ivc - > num_frames - 1 )
ivc - > rx . position = 0 ;
else
ivc - > rx . position + + ;
}
static inline int tegra_ivc_check_read ( struct tegra_ivc * ivc )
{
unsigned int offset = offsetof ( struct tegra_ivc_header , tx . count ) ;
2022-09-22 16:41:24 +03:00
unsigned int state ;
2016-08-19 20:05:04 +03:00
/*
* tx . channel - > state is set locally , so it is not synchronized with
* state from the remote peer . The remote peer cannot reset its
* transmit counters until we ' ve acknowledged its synchronization
* request , so no additional synchronization is required because an
* asynchronous transition of rx . channel - > state to
* TEGRA_IVC_STATE_ACK is not allowed .
*/
2022-09-22 16:41:24 +03:00
state = tegra_ivc_header_read_field ( & ivc - > tx . map , tx . state ) ;
if ( state ! = TEGRA_IVC_STATE_ESTABLISHED )
2016-08-19 20:05:04 +03:00
return - ECONNRESET ;
/*
* Avoid unnecessary invalidations when performing repeated accesses
* to an IVC channel by checking the old queue pointers first .
*
* Synchronization is only necessary when these pointers indicate
* empty or full .
*/
2022-09-22 16:41:24 +03:00
if ( ! tegra_ivc_empty ( ivc , & ivc - > rx . map ) )
2016-08-19 20:05:04 +03:00
return 0 ;
tegra_ivc_invalidate ( ivc , ivc - > rx . phys + offset ) ;
2022-09-22 16:41:24 +03:00
if ( tegra_ivc_empty ( ivc , & ivc - > rx . map ) )
2016-08-19 20:05:04 +03:00
return - ENOSPC ;
return 0 ;
}
static inline int tegra_ivc_check_write ( struct tegra_ivc * ivc )
{
unsigned int offset = offsetof ( struct tegra_ivc_header , rx . count ) ;
2022-09-22 16:41:24 +03:00
unsigned int state ;
2016-08-19 20:05:04 +03:00
2022-09-22 16:41:24 +03:00
state = tegra_ivc_header_read_field ( & ivc - > tx . map , tx . state ) ;
if ( state ! = TEGRA_IVC_STATE_ESTABLISHED )
2016-08-19 20:05:04 +03:00
return - ECONNRESET ;
2022-09-22 16:41:24 +03:00
if ( ! tegra_ivc_full ( ivc , & ivc - > tx . map ) )
2016-08-19 20:05:04 +03:00
return 0 ;
tegra_ivc_invalidate ( ivc , ivc - > tx . phys + offset ) ;
2022-09-22 16:41:24 +03:00
if ( tegra_ivc_full ( ivc , & ivc - > tx . map ) )
2016-08-19 20:05:04 +03:00
return - ENOSPC ;
return 0 ;
}
2022-09-22 16:41:24 +03:00
static int tegra_ivc_frame_virt ( struct tegra_ivc * ivc , const struct iosys_map * header ,
unsigned int frame , struct iosys_map * map )
2016-08-19 20:05:04 +03:00
{
2022-09-22 16:41:24 +03:00
size_t offset = sizeof ( struct tegra_ivc_header ) + ivc - > frame_size * frame ;
2016-08-19 20:05:04 +03:00
if ( WARN_ON ( frame > = ivc - > num_frames ) )
2022-09-22 16:41:24 +03:00
return - EINVAL ;
2016-08-19 20:05:04 +03:00
2022-09-22 16:41:24 +03:00
* map = IOSYS_MAP_INIT_OFFSET ( header , offset ) ;
return 0 ;
2016-08-19 20:05:04 +03:00
}
static inline dma_addr_t tegra_ivc_frame_phys ( struct tegra_ivc * ivc ,
dma_addr_t phys ,
unsigned int frame )
{
unsigned long offset ;
offset = sizeof ( struct tegra_ivc_header ) + ivc - > frame_size * frame ;
return phys + offset ;
}
static inline void tegra_ivc_invalidate_frame ( struct tegra_ivc * ivc ,
dma_addr_t phys ,
unsigned int frame ,
unsigned int offset ,
size_t size )
{
if ( ! ivc - > peer | | WARN_ON ( frame > = ivc - > num_frames ) )
return ;
phys = tegra_ivc_frame_phys ( ivc , phys , frame ) + offset ;
dma_sync_single_for_cpu ( ivc - > peer , phys , size , DMA_FROM_DEVICE ) ;
}
static inline void tegra_ivc_flush_frame ( struct tegra_ivc * ivc ,
dma_addr_t phys ,
unsigned int frame ,
unsigned int offset ,
size_t size )
{
if ( ! ivc - > peer | | WARN_ON ( frame > = ivc - > num_frames ) )
return ;
phys = tegra_ivc_frame_phys ( ivc , phys , frame ) + offset ;
dma_sync_single_for_device ( ivc - > peer , phys , size , DMA_TO_DEVICE ) ;
}
/* directly peek at the next frame rx'ed */
2022-09-22 16:41:24 +03:00
int tegra_ivc_read_get_next_frame ( struct tegra_ivc * ivc , struct iosys_map * map )
2016-08-19 20:05:04 +03:00
{
int err ;
if ( WARN_ON ( ivc = = NULL ) )
2022-09-22 16:41:24 +03:00
return - EINVAL ;
2016-08-19 20:05:04 +03:00
err = tegra_ivc_check_read ( ivc ) ;
if ( err < 0 )
2022-09-22 16:41:24 +03:00
return err ;
2016-08-19 20:05:04 +03:00
/*
* Order observation of ivc - > rx . position potentially indicating new
* data before data read .
*/
smp_rmb ( ) ;
tegra_ivc_invalidate_frame ( ivc , ivc - > rx . phys , ivc - > rx . position , 0 ,
ivc - > frame_size ) ;
2022-09-22 16:41:24 +03:00
return tegra_ivc_frame_virt ( ivc , & ivc - > rx . map , ivc - > rx . position , map ) ;
2016-08-19 20:05:04 +03:00
}
EXPORT_SYMBOL ( tegra_ivc_read_get_next_frame ) ;
int tegra_ivc_read_advance ( struct tegra_ivc * ivc )
{
unsigned int rx = offsetof ( struct tegra_ivc_header , rx . count ) ;
unsigned int tx = offsetof ( struct tegra_ivc_header , tx . count ) ;
int err ;
/*
* No read barriers or synchronization here : the caller is expected to
* have already observed the channel non - empty . This check is just to
* catch programming errors .
*/
err = tegra_ivc_check_read ( ivc ) ;
if ( err < 0 )
return err ;
tegra_ivc_advance_rx ( ivc ) ;
tegra_ivc_flush ( ivc , ivc - > rx . phys + rx ) ;
/*
* Ensure our write to ivc - > rx . position occurs before our read from
* ivc - > tx . position .
*/
smp_mb ( ) ;
/*
* Notify only upon transition from full to non - full . The available
* count can only asynchronously increase , so the worst possible
* side - effect will be a spurious notification .
*/
tegra_ivc_invalidate ( ivc , ivc - > rx . phys + tx ) ;
2022-09-22 16:41:24 +03:00
if ( tegra_ivc_available ( ivc , & ivc - > rx . map ) = = ivc - > num_frames - 1 )
2016-08-19 20:05:04 +03:00
ivc - > notify ( ivc , ivc - > notify_data ) ;
return 0 ;
}
EXPORT_SYMBOL ( tegra_ivc_read_advance ) ;
/* directly poke at the next frame to be tx'ed */
2022-09-22 16:41:24 +03:00
int tegra_ivc_write_get_next_frame ( struct tegra_ivc * ivc , struct iosys_map * map )
2016-08-19 20:05:04 +03:00
{
int err ;
err = tegra_ivc_check_write ( ivc ) ;
if ( err < 0 )
2022-09-22 16:41:24 +03:00
return err ;
2016-08-19 20:05:04 +03:00
2022-09-22 16:41:24 +03:00
return tegra_ivc_frame_virt ( ivc , & ivc - > tx . map , ivc - > tx . position , map ) ;
2016-08-19 20:05:04 +03:00
}
EXPORT_SYMBOL ( tegra_ivc_write_get_next_frame ) ;
/* advance the tx buffer */
int tegra_ivc_write_advance ( struct tegra_ivc * ivc )
{
unsigned int tx = offsetof ( struct tegra_ivc_header , tx . count ) ;
unsigned int rx = offsetof ( struct tegra_ivc_header , rx . count ) ;
int err ;
err = tegra_ivc_check_write ( ivc ) ;
if ( err < 0 )
return err ;
tegra_ivc_flush_frame ( ivc , ivc - > tx . phys , ivc - > tx . position , 0 ,
ivc - > frame_size ) ;
/*
* Order any possible stores to the frame before update of
* ivc - > tx . position .
*/
smp_wmb ( ) ;
tegra_ivc_advance_tx ( ivc ) ;
tegra_ivc_flush ( ivc , ivc - > tx . phys + tx ) ;
/*
* Ensure our write to ivc - > tx . position occurs before our read from
* ivc - > rx . position .
*/
smp_mb ( ) ;
/*
* Notify only upon transition from empty to non - empty . The available
* count can only asynchronously decrease , so the worst possible
* side - effect will be a spurious notification .
*/
tegra_ivc_invalidate ( ivc , ivc - > tx . phys + rx ) ;
2022-09-22 16:41:24 +03:00
if ( tegra_ivc_available ( ivc , & ivc - > tx . map ) = = 1 )
2016-08-19 20:05:04 +03:00
ivc - > notify ( ivc , ivc - > notify_data ) ;
return 0 ;
}
EXPORT_SYMBOL ( tegra_ivc_write_advance ) ;
void tegra_ivc_reset ( struct tegra_ivc * ivc )
{
unsigned int offset = offsetof ( struct tegra_ivc_header , tx . count ) ;
2022-09-22 16:41:24 +03:00
tegra_ivc_header_write_field ( & ivc - > tx . map , tx . state , TEGRA_IVC_STATE_SYNC ) ;
2016-08-19 20:05:04 +03:00
tegra_ivc_flush ( ivc , ivc - > tx . phys + offset ) ;
ivc - > notify ( ivc , ivc - > notify_data ) ;
}
EXPORT_SYMBOL ( tegra_ivc_reset ) ;
/*
* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
* IVC State Transition Table - see tegra_ivc_notified ( )
* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
*
* local remote action
* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
* SYNC EST < none >
* SYNC ACK reset counters ; move to EST ; notify
* SYNC SYNC reset counters ; move to ACK ; notify
* ACK EST move to EST ; notify
* ACK ACK move to EST ; notify
* ACK SYNC reset counters ; move to ACK ; notify
* EST EST < none >
* EST ACK < none >
* EST SYNC reset counters ; move to ACK ; notify
*
* = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
*/
int tegra_ivc_notified ( struct tegra_ivc * ivc )
{
unsigned int offset = offsetof ( struct tegra_ivc_header , tx . count ) ;
2022-09-22 16:41:24 +03:00
enum tegra_ivc_state rx_state , tx_state ;
2016-08-19 20:05:04 +03:00
/* Copy the receiver's state out of shared memory. */
tegra_ivc_invalidate ( ivc , ivc - > rx . phys + offset ) ;
2022-09-22 16:41:24 +03:00
rx_state = tegra_ivc_header_read_field ( & ivc - > rx . map , tx . state ) ;
tx_state = tegra_ivc_header_read_field ( & ivc - > tx . map , tx . state ) ;
2016-08-19 20:05:04 +03:00
2022-09-22 16:41:24 +03:00
if ( rx_state = = TEGRA_IVC_STATE_SYNC ) {
2016-08-19 20:05:04 +03:00
offset = offsetof ( struct tegra_ivc_header , tx . count ) ;
/*
* Order observation of TEGRA_IVC_STATE_SYNC before stores
* clearing tx . channel .
*/
smp_rmb ( ) ;
/*
* Reset tx . channel counters . The remote end is in the SYNC
* state and won ' t make progress until we change our state ,
* so the counters are not in use at this time .
*/
2022-09-22 16:41:24 +03:00
tegra_ivc_header_write_field ( & ivc - > tx . map , tx . count , 0 ) ;
tegra_ivc_header_write_field ( & ivc - > rx . map , rx . count , 0 ) ;
2016-08-19 20:05:04 +03:00
ivc - > tx . position = 0 ;
ivc - > rx . position = 0 ;
/*
* Ensure that counters appear cleared before new state can be
* observed .
*/
smp_wmb ( ) ;
/*
* Move to ACK state . We have just cleared our counters , so it
* is now safe for the remote end to start using these values .
*/
2022-09-22 16:41:24 +03:00
tegra_ivc_header_write_field ( & ivc - > tx . map , tx . state , TEGRA_IVC_STATE_ACK ) ;
2016-08-19 20:05:04 +03:00
tegra_ivc_flush ( ivc , ivc - > tx . phys + offset ) ;
/*
* Notify remote end to observe state transition .
*/
ivc - > notify ( ivc , ivc - > notify_data ) ;
2022-09-22 16:41:24 +03:00
} else if ( tx_state = = TEGRA_IVC_STATE_SYNC & &
rx_state = = TEGRA_IVC_STATE_ACK ) {
2016-08-19 20:05:04 +03:00
offset = offsetof ( struct tegra_ivc_header , tx . count ) ;
/*
* Order observation of ivc_state_sync before stores clearing
* tx_channel .
*/
smp_rmb ( ) ;
/*
* Reset tx . channel counters . The remote end is in the ACK
* state and won ' t make progress until we change our state ,
* so the counters are not in use at this time .
*/
2022-09-22 16:41:24 +03:00
tegra_ivc_header_write_field ( & ivc - > tx . map , tx . count , 0 ) ;
tegra_ivc_header_write_field ( & ivc - > rx . map , rx . count , 0 ) ;
2016-08-19 20:05:04 +03:00
ivc - > tx . position = 0 ;
ivc - > rx . position = 0 ;
/*
* Ensure that counters appear cleared before new state can be
* observed .
*/
smp_wmb ( ) ;
/*
* Move to ESTABLISHED state . We know that the remote end has
* already cleared its counters , so it is safe to start
* writing / reading on this channel .
*/
2022-09-22 16:41:24 +03:00
tegra_ivc_header_write_field ( & ivc - > tx . map , tx . state , TEGRA_IVC_STATE_ESTABLISHED ) ;
2016-08-19 20:05:04 +03:00
tegra_ivc_flush ( ivc , ivc - > tx . phys + offset ) ;
/*
* Notify remote end to observe state transition .
*/
ivc - > notify ( ivc , ivc - > notify_data ) ;
2022-09-22 16:41:24 +03:00
} else if ( tx_state = = TEGRA_IVC_STATE_ACK ) {
2016-08-19 20:05:04 +03:00
offset = offsetof ( struct tegra_ivc_header , tx . count ) ;
/*
* At this point , we have observed the peer to be in either
* the ACK or ESTABLISHED state . Next , order observation of
* peer state before storing to tx . channel .
*/
smp_rmb ( ) ;
/*
* Move to ESTABLISHED state . We know that we have previously
* cleared our counters , and we know that the remote end has
* cleared its counters , so it is safe to start writing / reading
* on this channel .
*/
2022-09-22 16:41:24 +03:00
tegra_ivc_header_write_field ( & ivc - > tx . map , tx . state , TEGRA_IVC_STATE_ESTABLISHED ) ;
2016-08-19 20:05:04 +03:00
tegra_ivc_flush ( ivc , ivc - > tx . phys + offset ) ;
/*
* Notify remote end to observe state transition .
*/
ivc - > notify ( ivc , ivc - > notify_data ) ;
} else {
/*
* There is no need to handle any further action . Either the
* channel is already fully established , or we are waiting for
* the remote end to catch up with our current state . Refer
* to the diagram in " IVC State Transition Table " above .
*/
}
2022-09-22 16:41:24 +03:00
if ( tx_state ! = TEGRA_IVC_STATE_ESTABLISHED )
2016-08-19 20:05:04 +03:00
return - EAGAIN ;
return 0 ;
}
EXPORT_SYMBOL ( tegra_ivc_notified ) ;
size_t tegra_ivc_align ( size_t size )
{
return ALIGN ( size , TEGRA_IVC_ALIGN ) ;
}
EXPORT_SYMBOL ( tegra_ivc_align ) ;
unsigned tegra_ivc_total_queue_size ( unsigned queue_size )
{
if ( ! IS_ALIGNED ( queue_size , TEGRA_IVC_ALIGN ) ) {
pr_err ( " %s: queue_size (%u) must be %u-byte aligned \n " ,
__func__ , queue_size , TEGRA_IVC_ALIGN ) ;
return 0 ;
}
return queue_size + sizeof ( struct tegra_ivc_header ) ;
}
EXPORT_SYMBOL ( tegra_ivc_total_queue_size ) ;
static int tegra_ivc_check_params ( unsigned long rx , unsigned long tx ,
unsigned int num_frames , size_t frame_size )
{
BUILD_BUG_ON ( ! IS_ALIGNED ( offsetof ( struct tegra_ivc_header , tx . count ) ,
TEGRA_IVC_ALIGN ) ) ;
BUILD_BUG_ON ( ! IS_ALIGNED ( offsetof ( struct tegra_ivc_header , rx . count ) ,
TEGRA_IVC_ALIGN ) ) ;
BUILD_BUG_ON ( ! IS_ALIGNED ( sizeof ( struct tegra_ivc_header ) ,
TEGRA_IVC_ALIGN ) ) ;
if ( ( uint64_t ) num_frames * ( uint64_t ) frame_size > = 0x100000000UL ) {
pr_err ( " num_frames * frame_size overflows \n " ) ;
return - EINVAL ;
}
if ( ! IS_ALIGNED ( frame_size , TEGRA_IVC_ALIGN ) ) {
pr_err ( " frame size not adequately aligned: %zu \n " , frame_size ) ;
return - EINVAL ;
}
/*
* The headers must at least be aligned enough for counters
* to be accessed atomically .
*/
if ( ! IS_ALIGNED ( rx , TEGRA_IVC_ALIGN ) ) {
pr_err ( " IVC channel start not aligned: %#lx \n " , rx ) ;
return - EINVAL ;
}
if ( ! IS_ALIGNED ( tx , TEGRA_IVC_ALIGN ) ) {
pr_err ( " IVC channel start not aligned: %#lx \n " , tx ) ;
return - EINVAL ;
}
if ( rx < tx ) {
if ( rx + frame_size * num_frames > tx ) {
pr_err ( " queue regions overlap: %#lx + %zx > %#lx \n " ,
rx , frame_size * num_frames , tx ) ;
return - EINVAL ;
}
} else {
if ( tx + frame_size * num_frames > rx ) {
pr_err ( " queue regions overlap: %#lx + %zx > %#lx \n " ,
tx , frame_size * num_frames , rx ) ;
return - EINVAL ;
}
}
return 0 ;
}
2022-09-22 16:41:24 +03:00
static inline void iosys_map_copy ( struct iosys_map * dst , const struct iosys_map * src )
{
* dst = * src ;
}
static inline unsigned long iosys_map_get_address ( const struct iosys_map * map )
{
if ( map - > is_iomem )
return ( unsigned long ) map - > vaddr_iomem ;
return ( unsigned long ) map - > vaddr ;
}
static inline void * iosys_map_get_vaddr ( const struct iosys_map * map )
{
if ( WARN_ON ( map - > is_iomem ) )
return NULL ;
return map - > vaddr ;
}
int tegra_ivc_init ( struct tegra_ivc * ivc , struct device * peer , const struct iosys_map * rx ,
dma_addr_t rx_phys , const struct iosys_map * tx , dma_addr_t tx_phys ,
2016-08-19 20:05:04 +03:00
unsigned int num_frames , size_t frame_size ,
void ( * notify ) ( struct tegra_ivc * ivc , void * data ) ,
void * data )
{
size_t queue_size ;
int err ;
if ( WARN_ON ( ! ivc | | ! notify ) )
return - EINVAL ;
/*
* All sizes that can be returned by communication functions should
* fit in an int .
*/
if ( frame_size > INT_MAX )
return - E2BIG ;
2022-09-22 16:41:24 +03:00
err = tegra_ivc_check_params ( iosys_map_get_address ( rx ) , iosys_map_get_address ( tx ) ,
2016-08-19 20:05:04 +03:00
num_frames , frame_size ) ;
if ( err < 0 )
return err ;
queue_size = tegra_ivc_total_queue_size ( num_frames * frame_size ) ;
if ( peer ) {
2022-09-22 16:41:24 +03:00
ivc - > rx . phys = dma_map_single ( peer , iosys_map_get_vaddr ( rx ) , queue_size ,
2016-08-19 20:05:04 +03:00
DMA_BIDIRECTIONAL ) ;
2017-05-21 13:33:39 +03:00
if ( dma_mapping_error ( peer , ivc - > rx . phys ) )
2016-08-19 20:05:04 +03:00
return - ENOMEM ;
2022-09-22 16:41:24 +03:00
ivc - > tx . phys = dma_map_single ( peer , iosys_map_get_vaddr ( tx ) , queue_size ,
2016-08-19 20:05:04 +03:00
DMA_BIDIRECTIONAL ) ;
2017-05-21 13:33:39 +03:00
if ( dma_mapping_error ( peer , ivc - > tx . phys ) ) {
2016-08-19 20:05:04 +03:00
dma_unmap_single ( peer , ivc - > rx . phys , queue_size ,
DMA_BIDIRECTIONAL ) ;
return - ENOMEM ;
}
} else {
ivc - > rx . phys = rx_phys ;
ivc - > tx . phys = tx_phys ;
}
2022-09-22 16:41:24 +03:00
iosys_map_copy ( & ivc - > rx . map , rx ) ;
iosys_map_copy ( & ivc - > tx . map , tx ) ;
2016-08-19 20:05:04 +03:00
ivc - > peer = peer ;
ivc - > notify = notify ;
ivc - > notify_data = data ;
ivc - > frame_size = frame_size ;
ivc - > num_frames = num_frames ;
/*
* These values aren ' t necessarily correct until the channel has been
* reset .
*/
ivc - > tx . position = 0 ;
ivc - > rx . position = 0 ;
return 0 ;
}
EXPORT_SYMBOL ( tegra_ivc_init ) ;
void tegra_ivc_cleanup ( struct tegra_ivc * ivc )
{
if ( ivc - > peer ) {
size_t size = tegra_ivc_total_queue_size ( ivc - > num_frames *
ivc - > frame_size ) ;
dma_unmap_single ( ivc - > peer , ivc - > rx . phys , size ,
DMA_BIDIRECTIONAL ) ;
dma_unmap_single ( ivc - > peer , ivc - > tx . phys , size ,
DMA_BIDIRECTIONAL ) ;
}
}
EXPORT_SYMBOL ( tegra_ivc_cleanup ) ;