2019-05-29 16:57:47 -07:00
// SPDX-License-Identifier: GPL-2.0-only
2007-02-04 16:36:55 -06:00
/*
2008-04-04 13:06:33 -05:00
* Copyright ( C ) 2005 - 2008 , PA Semi , Inc
2007-02-04 16:36:55 -06:00
*
* Maintained by : Olof Johansson < olof @ lixom . net >
*/
# undef DEBUG
2012-07-25 21:20:03 +00:00
# include <linux/memblock.h>
2007-02-04 16:36:55 -06:00
# include <linux/types.h>
# include <linux/spinlock.h>
# include <linux/pci.h>
# include <asm/iommu.h>
# include <asm/machdep.h>
2007-10-03 13:03:54 -05:00
# include <asm/firmware.h>
2007-02-04 16:36:55 -06:00
2015-03-31 16:00:52 +11:00
# include "pasemi.h"
2007-02-04 16:36:55 -06:00
# define IOBMAP_PAGE_SHIFT 12
# define IOBMAP_PAGE_SIZE (1 << IOBMAP_PAGE_SHIFT)
# define IOBMAP_PAGE_MASK (IOBMAP_PAGE_SIZE - 1)
# define IOB_BASE 0xe0000000
# define IOB_SIZE 0x3000
/* Configuration registers */
2008-04-04 13:06:33 -05:00
# define IOBCAP_REG 0x40
# define IOBCOM_REG 0x100
2007-02-04 16:36:55 -06:00
/* Enable IOB address translation */
# define IOBCOM_ATEN 0x00000100
/* Address decode configuration register */
2008-04-04 13:06:33 -05:00
# define IOB_AD_REG 0x14c
2007-02-04 16:36:55 -06:00
/* IOBCOM_AD_REG fields */
# define IOB_AD_VGPRT 0x00000e00
# define IOB_AD_VGAEN 0x00000100
/* Direct mapping settings */
# define IOB_AD_MPSEL_MASK 0x00000030
# define IOB_AD_MPSEL_B38 0x00000000
# define IOB_AD_MPSEL_B40 0x00000010
# define IOB_AD_MPSEL_B42 0x00000020
/* Translation window size / enable */
# define IOB_AD_TRNG_MASK 0x00000003
# define IOB_AD_TRNG_256M 0x00000000
# define IOB_AD_TRNG_2G 0x00000001
# define IOB_AD_TRNG_128G 0x00000003
2008-04-04 13:06:33 -05:00
# define IOB_TABLEBASE_REG 0x154
2007-02-04 16:36:55 -06:00
/* Base of the 64 4-byte L1 registers */
2008-04-04 13:06:33 -05:00
# define IOB_XLT_L1_REGBASE 0x2b00
2007-02-04 16:36:55 -06:00
/* Register to invalidate TLB entries */
2008-04-04 13:06:33 -05:00
# define IOB_AT_INVAL_TLB_REG 0x2d00
2007-02-04 16:36:55 -06:00
/* The top two bits of the level 1 entry contains valid and type flags */
# define IOBMAP_L1E_V 0x40000000
# define IOBMAP_L1E_V_B 0x80000000
/* For big page entries, the bottom two bits contains flags */
# define IOBMAP_L1E_BIG_CACHED 0x00000002
# define IOBMAP_L1E_BIG_PRIORITY 0x00000001
/* For regular level 2 entries, top 2 bits contain valid and cache flags */
# define IOBMAP_L2E_V 0x80000000
# define IOBMAP_L2E_V_CACHED 0xc0000000
2008-04-04 13:06:33 -05:00
static void __iomem * iob ;
2007-02-04 16:36:55 -06:00
static u32 iob_l1_emptyval ;
static u32 iob_l2_emptyval ;
static u32 * iob_l2_base ;
static struct iommu_table iommu_table_iobmap ;
static int iommu_table_iobmap_inited ;
2008-07-24 04:31:16 +10:00
static int iobmap_build ( struct iommu_table * tbl , long index ,
2007-02-04 16:36:55 -06:00
long npages , unsigned long uaddr ,
2008-07-16 05:51:47 +10:00
enum dma_data_direction direction ,
2016-08-03 13:46:00 -07:00
unsigned long attrs )
2007-02-04 16:36:55 -06:00
{
u32 * ip ;
u32 rpn ;
unsigned long bus_addr ;
pr_debug ( " iobmap: build at: %lx, %lx, addr: %lx \n " , index , npages , uaddr ) ;
2007-08-17 13:57:39 +10:00
bus_addr = ( tbl - > it_offset + index ) < < IOBMAP_PAGE_SHIFT ;
2007-02-04 16:36:55 -06:00
ip = ( ( u32 * ) tbl - > it_base ) + index ;
while ( npages - - ) {
2012-07-25 21:19:51 +00:00
rpn = __pa ( uaddr ) > > IOBMAP_PAGE_SHIFT ;
2007-02-04 16:36:55 -06:00
* ( ip + + ) = IOBMAP_L2E_V | rpn ;
/* invalidate tlb, can be optimized more */
out_le32 ( iob + IOB_AT_INVAL_TLB_REG , bus_addr > > 14 ) ;
uaddr + = IOBMAP_PAGE_SIZE ;
bus_addr + = IOBMAP_PAGE_SIZE ;
}
2008-07-24 04:31:16 +10:00
return 0 ;
2007-02-04 16:36:55 -06:00
}
static void iobmap_free ( struct iommu_table * tbl , long index ,
long npages )
{
u32 * ip ;
unsigned long bus_addr ;
pr_debug ( " iobmap: free at: %lx, %lx \n " , index , npages ) ;
2007-08-17 13:57:39 +10:00
bus_addr = ( tbl - > it_offset + index ) < < IOBMAP_PAGE_SHIFT ;
2007-02-04 16:36:55 -06:00
ip = ( ( u32 * ) tbl - > it_base ) + index ;
while ( npages - - ) {
* ( ip + + ) = iob_l2_emptyval ;
/* invalidate tlb, can be optimized more */
out_le32 ( iob + IOB_AT_INVAL_TLB_REG , bus_addr > > 14 ) ;
bus_addr + = IOBMAP_PAGE_SIZE ;
}
}
2015-06-05 16:35:06 +10:00
static struct iommu_table_ops iommu_table_iobmap_ops = {
. set = iobmap_build ,
. clear = iobmap_free
} ;
2007-02-04 16:36:55 -06:00
static void iommu_table_iobmap_setup ( void )
{
pr_debug ( " -> %s \n " , __func__ ) ;
iommu_table_iobmap . it_busno = 0 ;
iommu_table_iobmap . it_offset = 0 ;
2013-12-09 18:17:02 +11:00
iommu_table_iobmap . it_page_shift = IOBMAP_PAGE_SHIFT ;
2007-02-04 16:36:55 -06:00
/* it_size is in number of entries */
2013-12-09 18:17:02 +11:00
iommu_table_iobmap . it_size =
0x80000000 > > iommu_table_iobmap . it_page_shift ;
2007-02-04 16:36:55 -06:00
/* Initialize the common IOMMU code */
iommu_table_iobmap . it_base = ( unsigned long ) iob_l2_base ;
iommu_table_iobmap . it_index = 0 ;
/* XXXOJN tune this to avoid IOB cache invals.
* Should probably be 8 ( 64 bytes )
*/
iommu_table_iobmap . it_blocksize = 4 ;
2015-06-05 16:35:06 +10:00
iommu_table_iobmap . it_ops = & iommu_table_iobmap_ops ;
2019-07-18 15:11:39 +10:00
iommu_init_table ( & iommu_table_iobmap , 0 , 0 , 0 ) ;
2007-02-04 16:36:55 -06:00
pr_debug ( " <- %s \n " , __func__ ) ;
}
static void pci_dma_bus_setup_pasemi ( struct pci_bus * bus )
{
pr_debug ( " pci_dma_bus_setup, bus %p, bus->self %p \n " , bus , bus - > self ) ;
if ( ! iommu_table_iobmap_inited ) {
iommu_table_iobmap_inited = 1 ;
iommu_table_iobmap_setup ( ) ;
}
}
static void pci_dma_dev_setup_pasemi ( struct pci_dev * dev )
{
pr_debug ( " pci_dma_dev_setup, dev %p (%s) \n " , dev , pci_name ( dev ) ) ;
2007-10-03 13:03:54 -05:00
# if !defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE)
/* For non-LPAR environment, don't translate anything for the DMA
* engine . The exception to this is if the user has enabled
* CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE at build time .
2007-02-04 16:36:55 -06:00
*/
2007-10-03 13:03:54 -05:00
if ( dev - > vendor = = 0x1959 & & dev - > device = = 0xa007 & &
2008-01-31 00:41:30 -06:00
! firmware_has_feature ( FW_FEATURE_LPAR ) ) {
2019-02-13 08:01:30 +01:00
dev - > dev . dma_ops = NULL ;
2016-07-27 15:41:29 +01:00
/*
* Set the coherent DMA mask to prevent the iommu
* being used unnecessarily
*/
dev - > dev . coherent_dma_mask = DMA_BIT_MASK ( 44 ) ;
2008-01-31 17:50:02 -06:00
return ;
2008-01-31 00:41:30 -06:00
}
2007-10-03 13:03:54 -05:00
# endif
2009-09-21 08:26:35 +00:00
set_iommu_table_base ( & dev - > dev , & iommu_table_iobmap ) ;
2007-02-04 16:36:55 -06:00
}
2016-09-06 15:32:40 +10:00
static int __init iob_init ( struct device_node * dn )
2007-02-04 16:36:55 -06:00
{
unsigned long tmp ;
u32 regword ;
int i ;
pr_debug ( " -> %s \n " , __func__ ) ;
2016-07-05 15:03:52 +10:00
/* For 2G space, 8x64 pages (2^21 bytes) is max total l2 size */
2019-03-07 16:30:48 -08:00
iob_l2_base = memblock_alloc_try_nid_raw ( 1UL < < 21 , 1UL < < 21 ,
MEMBLOCK_LOW_LIMIT , 0x80000000 ,
NUMA_NO_NODE ) ;
2019-03-11 23:30:31 -07:00
if ( ! iob_l2_base )
panic ( " %s: Failed to allocate %lu bytes align=0x%lx max_addr=%x \n " ,
__func__ , 1UL < < 21 , 1UL < < 21 , 0x80000000 ) ;
2016-07-05 15:03:52 +10:00
2018-08-03 21:15:10 +10:00
pr_info ( " IOBMAP L2 allocated at: %p \n " , iob_l2_base ) ;
2016-07-05 15:03:52 +10:00
2007-02-04 16:36:55 -06:00
/* Allocate a spare page to map all invalid IOTLB pages. */
memblock: rename memblock_alloc{_nid,_try_nid} to memblock_phys_alloc*
Make it explicit that the caller gets a physical address rather than a
virtual one.
This will also allow using meblock_alloc prefix for memblock allocations
returning virtual address, which is done in the following patches.
The conversion is done using the following semantic patch:
@@
expression e1, e2, e3;
@@
(
- memblock_alloc(e1, e2)
+ memblock_phys_alloc(e1, e2)
|
- memblock_alloc_nid(e1, e2, e3)
+ memblock_phys_alloc_nid(e1, e2, e3)
|
- memblock_alloc_try_nid(e1, e2, e3)
+ memblock_phys_alloc_try_nid(e1, e2, e3)
)
Link: http://lkml.kernel.org/r/1536927045-23536-7-git-send-email-rppt@linux.vnet.ibm.com
Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chris Zankel <chris@zankel.net>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Guan Xuetao <gxt@pku.edu.cn>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "James E.J. Bottomley" <jejb@parisc-linux.org>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Ley Foon Tan <lftan@altera.com>
Cc: Mark Salter <msalter@redhat.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Palmer Dabbelt <palmer@sifive.com>
Cc: Paul Burton <paul.burton@mips.com>
Cc: Richard Kuo <rkuo@codeaurora.org>
Cc: Richard Weinberger <richard@nod.at>
Cc: Rich Felker <dalias@libc.org>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Serge Semin <fancer.lancer@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Vineet Gupta <vgupta@synopsys.com>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2018-10-30 15:07:59 -07:00
tmp = memblock_phys_alloc ( IOBMAP_PAGE_SIZE , IOBMAP_PAGE_SIZE ) ;
2007-02-04 16:36:55 -06:00
if ( ! tmp )
panic ( " IOBMAP: Cannot allocate spare page! " ) ;
/* Empty l1 is marked invalid */
iob_l1_emptyval = 0 ;
/* Empty l2 is mapped to dummy page */
iob_l2_emptyval = IOBMAP_L2E_V | ( tmp > > IOBMAP_PAGE_SHIFT ) ;
iob = ioremap ( IOB_BASE , IOB_SIZE ) ;
if ( ! iob )
panic ( " IOBMAP: Cannot map registers! " ) ;
/* setup direct mapping of the L1 entries */
for ( i = 0 ; i < 64 ; i + + ) {
/* Each L1 covers 32MB, i.e. 8K entries = 32K of ram */
regword = IOBMAP_L1E_V | ( __pa ( iob_l2_base + i * 0x2000 ) > > 12 ) ;
2008-04-04 13:06:33 -05:00
out_le32 ( iob + IOB_XLT_L1_REGBASE + i * 4 , regword ) ;
2007-02-04 16:36:55 -06:00
}
/* set 2GB translation window, based at 0 */
regword = in_le32 ( iob + IOB_AD_REG ) ;
regword & = ~ IOB_AD_TRNG_MASK ;
regword | = IOB_AD_TRNG_2G ;
out_le32 ( iob + IOB_AD_REG , regword ) ;
/* Enable translation */
regword = in_le32 ( iob + IOBCOM_REG ) ;
regword | = IOBCOM_ATEN ;
out_le32 ( iob + IOBCOM_REG , regword ) ;
pr_debug ( " <- %s \n " , __func__ ) ;
return 0 ;
}
/* These are called very early. */
2007-08-15 20:58:23 +10:00
void __init iommu_init_early_pasemi ( void )
2007-02-04 16:36:55 -06:00
{
int iommu_off ;
# ifndef CONFIG_PPC_PASEMI_IOMMU
iommu_off = 1 ;
# else
iommu_off = of_chosen & &
2007-04-03 22:26:41 +10:00
of_get_property ( of_chosen , " linux,iommu-off " , NULL ) ;
2007-02-04 16:36:55 -06:00
# endif
2010-10-18 07:27:00 +00:00
if ( iommu_off )
2007-02-04 16:36:55 -06:00
return ;
iob_init ( NULL ) ;
2015-03-31 16:00:52 +11:00
pasemi_pci_controller_ops . dma_dev_setup = pci_dma_dev_setup_pasemi ;
pasemi_pci_controller_ops . dma_bus_setup = pci_dma_bus_setup_pasemi ;
2007-03-04 16:58:39 +11:00
set_pci_dma_ops ( & dma_iommu_ops ) ;
2007-02-04 16:36:55 -06:00
}