2019-06-04 11:11:37 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2006-02-01 14:05:16 +03:00
/*
* Copyright 2006 PathScale , Inc . All Rights Reserved .
*/
2011-11-17 06:29:17 +04:00
# include <linux/export.h>
2006-02-03 10:06:42 +03:00
# include <linux/io.h>
2006-02-01 14:05:16 +03:00
/**
* __iowrite32_copy - copy data to MMIO space , in 32 - bit units
* @ to : destination , in MMIO space ( must be 32 - bit aligned )
* @ from : source ( must be 32 - bit aligned )
* @ count : number of 32 - bit quantities to copy
*
* Copy data from kernel space to MMIO space , in units of 32 bits at a
* time . Order of access is not guaranteed , nor is a memory barrier
* performed afterwards .
*/
void __attribute__ ( ( weak ) ) __iowrite32_copy ( void __iomem * to ,
const void * from ,
size_t count )
{
u32 __iomem * dst = to ;
const u32 * src = from ;
const u32 * end = src + count ;
while ( src < end )
__raw_writel ( * src + + , dst + + ) ;
}
EXPORT_SYMBOL_GPL ( __iowrite32_copy ) ;
2006-06-21 07:03:02 +04:00
2016-01-21 01:58:35 +03:00
/**
* __ioread32_copy - copy data from MMIO space , in 32 - bit units
* @ to : destination ( must be 32 - bit aligned )
* @ from : source , in MMIO space ( must be 32 - bit aligned )
* @ count : number of 32 - bit quantities to copy
*
* Copy data from MMIO space to kernel space , in units of 32 bits at a
* time . Order of access is not guaranteed , nor is a memory barrier
* performed afterwards .
*/
void __ioread32_copy ( void * to , const void __iomem * from , size_t count )
{
u32 * dst = to ;
const u32 __iomem * src = from ;
const u32 __iomem * end = src + count ;
while ( src < end )
* dst + + = __raw_readl ( src + + ) ;
}
EXPORT_SYMBOL_GPL ( __ioread32_copy ) ;
2006-06-21 07:03:02 +04:00
/**
* __iowrite64_copy - copy data to MMIO space , in 64 - bit or 32 - bit units
* @ to : destination , in MMIO space ( must be 64 - bit aligned )
* @ from : source ( must be 64 - bit aligned )
* @ count : number of 64 - bit quantities to copy
*
* Copy data from kernel space to MMIO space , in units of 32 or 64 bits at a
* time . Order of access is not guaranteed , nor is a memory barrier
* performed afterwards .
*/
void __attribute__ ( ( weak ) ) __iowrite64_copy ( void __iomem * to ,
const void * from ,
size_t count )
{
# ifdef CONFIG_64BIT
u64 __iomem * dst = to ;
const u64 * src = from ;
const u64 * end = src + count ;
while ( src < end )
__raw_writeq ( * src + + , dst + + ) ;
# else
__iowrite32_copy ( to , from , count * 2 ) ;
# endif
}
EXPORT_SYMBOL_GPL ( __iowrite64_copy ) ;