2012-11-29 14:33:30 +01:00
/*
* Copyright IBM Corp . 2012
*
* Author ( s ) :
* Jan Glauber < jang @ linux . vnet . ibm . com >
*/
# include <linux/kernel.h>
# include <linux/slab.h>
# include <linux/export.h>
# include <linux/iommu-helper.h>
# include <linux/dma-mapping.h>
2013-08-29 20:31:50 +02:00
# include <linux/vmalloc.h>
2012-11-29 14:33:30 +01:00
# include <linux/pci.h>
# include <asm/pci_dma.h>
static struct kmem_cache * dma_region_table_cache ;
static struct kmem_cache * dma_page_table_cache ;
2014-07-18 17:37:08 +02:00
static int s390_iommu_strict ;
static int zpci_refresh_global ( struct zpci_dev * zdev )
{
return zpci_refresh_trans ( ( u64 ) zdev - > fh < < 32 , zdev - > start_dma ,
zdev - > iommu_pages * PAGE_SIZE ) ;
}
2012-11-29 14:33:30 +01:00
static unsigned long * dma_alloc_cpu_table ( void )
{
unsigned long * table , * entry ;
table = kmem_cache_alloc ( dma_region_table_cache , GFP_ATOMIC ) ;
if ( ! table )
return NULL ;
for ( entry = table ; entry < table + ZPCI_TABLE_ENTRIES ; entry + + )
* entry = ZPCI_TABLE_INVALID | ZPCI_TABLE_PROTECTED ;
return table ;
}
static void dma_free_cpu_table ( void * table )
{
kmem_cache_free ( dma_region_table_cache , table ) ;
}
static unsigned long * dma_alloc_page_table ( void )
{
unsigned long * table , * entry ;
table = kmem_cache_alloc ( dma_page_table_cache , GFP_ATOMIC ) ;
if ( ! table )
return NULL ;
for ( entry = table ; entry < table + ZPCI_PT_ENTRIES ; entry + + )
* entry = ZPCI_PTE_INVALID | ZPCI_TABLE_PROTECTED ;
return table ;
}
static void dma_free_page_table ( void * table )
{
kmem_cache_free ( dma_page_table_cache , table ) ;
}
static unsigned long * dma_get_seg_table_origin ( unsigned long * entry )
{
unsigned long * sto ;
if ( reg_entry_isvalid ( * entry ) )
sto = get_rt_sto ( * entry ) ;
else {
sto = dma_alloc_cpu_table ( ) ;
if ( ! sto )
return NULL ;
set_rt_sto ( entry , sto ) ;
validate_rt_entry ( entry ) ;
entry_clr_protected ( entry ) ;
}
return sto ;
}
static unsigned long * dma_get_page_table_origin ( unsigned long * entry )
{
unsigned long * pto ;
if ( reg_entry_isvalid ( * entry ) )
pto = get_st_pto ( * entry ) ;
else {
pto = dma_alloc_page_table ( ) ;
if ( ! pto )
return NULL ;
set_st_pto ( entry , pto ) ;
validate_st_entry ( entry ) ;
entry_clr_protected ( entry ) ;
}
return pto ;
}
static unsigned long * dma_walk_cpu_trans ( unsigned long * rto , dma_addr_t dma_addr )
{
unsigned long * sto , * pto ;
unsigned int rtx , sx , px ;
rtx = calc_rtx ( dma_addr ) ;
sto = dma_get_seg_table_origin ( & rto [ rtx ] ) ;
if ( ! sto )
return NULL ;
sx = calc_sx ( dma_addr ) ;
pto = dma_get_page_table_origin ( & sto [ sx ] ) ;
if ( ! pto )
return NULL ;
px = calc_px ( dma_addr ) ;
return & pto [ px ] ;
}
static void dma_update_cpu_trans ( struct zpci_dev * zdev , void * page_addr ,
dma_addr_t dma_addr , int flags )
{
unsigned long * entry ;
entry = dma_walk_cpu_trans ( zdev - > dma_table , dma_addr ) ;
if ( ! entry ) {
WARN_ON_ONCE ( 1 ) ;
return ;
}
if ( flags & ZPCI_PTE_INVALID ) {
invalidate_pt_entry ( entry ) ;
return ;
} else {
set_pt_pfaa ( entry , page_addr ) ;
validate_pt_entry ( entry ) ;
}
if ( flags & ZPCI_TABLE_PROTECTED )
entry_set_protected ( entry ) ;
else
entry_clr_protected ( entry ) ;
}
static int dma_update_trans ( struct zpci_dev * zdev , unsigned long pa ,
dma_addr_t dma_addr , size_t size , int flags )
{
unsigned int nr_pages = PAGE_ALIGN ( size ) > > PAGE_SHIFT ;
u8 * page_addr = ( u8 * ) ( pa & PAGE_MASK ) ;
dma_addr_t start_dma_addr = dma_addr ;
unsigned long irq_flags ;
int i , rc = 0 ;
if ( ! nr_pages )
return - EINVAL ;
spin_lock_irqsave ( & zdev - > dma_table_lock , irq_flags ) ;
2013-10-22 15:17:19 +02:00
if ( ! zdev - > dma_table )
2012-11-29 14:33:30 +01:00
goto no_refresh ;
for ( i = 0 ; i < nr_pages ; i + + ) {
dma_update_cpu_trans ( zdev , page_addr , dma_addr , flags ) ;
page_addr + = PAGE_SIZE ;
dma_addr + = PAGE_SIZE ;
}
/*
2014-07-18 17:37:08 +02:00
* With zdev - > tlb_refresh = = 0 , rpcit is not required to establish new
* translations when previously invalid translation - table entries are
* validated . With lazy unmap , it also is skipped for previously valid
* entries , but a global rpcit is then required before any address can
* be re - used , i . e . after each iommu bitmap wrap - around .
2012-11-29 14:33:30 +01:00
*/
if ( ! zdev - > tlb_refresh & &
2014-07-18 17:37:08 +02:00
( ! s390_iommu_strict | |
( ( flags & ZPCI_PTE_VALID_MASK ) = = ZPCI_PTE_VALID ) ) )
2012-11-29 14:33:30 +01:00
goto no_refresh ;
2013-04-16 14:15:42 +02:00
2013-06-25 14:52:23 +02:00
rc = zpci_refresh_trans ( ( u64 ) zdev - > fh < < 32 , start_dma_addr ,
nr_pages * PAGE_SIZE ) ;
2012-11-29 14:33:30 +01:00
no_refresh :
spin_unlock_irqrestore ( & zdev - > dma_table_lock , irq_flags ) ;
return rc ;
}
static void dma_free_seg_table ( unsigned long entry )
{
unsigned long * sto = get_rt_sto ( entry ) ;
int sx ;
for ( sx = 0 ; sx < ZPCI_TABLE_ENTRIES ; sx + + )
if ( reg_entry_isvalid ( sto [ sx ] ) )
dma_free_page_table ( get_st_pto ( sto [ sx ] ) ) ;
dma_free_cpu_table ( sto ) ;
}
static void dma_cleanup_tables ( struct zpci_dev * zdev )
{
2012-12-03 16:15:00 +01:00
unsigned long * table ;
2012-11-29 14:33:30 +01:00
int rtx ;
if ( ! zdev | | ! zdev - > dma_table )
return ;
2012-12-03 16:15:00 +01:00
table = zdev - > dma_table ;
2012-11-29 14:33:30 +01:00
for ( rtx = 0 ; rtx < ZPCI_TABLE_ENTRIES ; rtx + + )
if ( reg_entry_isvalid ( table [ rtx ] ) )
dma_free_seg_table ( table [ rtx ] ) ;
dma_free_cpu_table ( table ) ;
zdev - > dma_table = NULL ;
}
2014-02-18 19:47:17 +01:00
static unsigned long __dma_alloc_iommu ( struct zpci_dev * zdev ,
unsigned long start , int size )
2012-11-29 14:33:30 +01:00
{
2014-02-18 19:47:17 +01:00
unsigned long boundary_size ;
2012-11-29 14:33:30 +01:00
2014-02-18 19:47:17 +01:00
boundary_size = ALIGN ( dma_get_seg_boundary ( & zdev - > pdev - > dev ) + 1 ,
PAGE_SIZE ) > > PAGE_SHIFT ;
2012-11-29 14:33:30 +01:00
return iommu_area_alloc ( zdev - > iommu_bitmap , zdev - > iommu_pages ,
start , size , 0 , boundary_size , 0 ) ;
}
static unsigned long dma_alloc_iommu ( struct zpci_dev * zdev , int size )
{
unsigned long offset , flags ;
2014-07-18 17:37:08 +02:00
int wrap = 0 ;
2012-11-29 14:33:30 +01:00
spin_lock_irqsave ( & zdev - > iommu_bitmap_lock , flags ) ;
offset = __dma_alloc_iommu ( zdev , zdev - > next_bit , size ) ;
2014-07-18 17:37:08 +02:00
if ( offset = = - 1 ) {
/* wrap-around */
2012-11-29 14:33:30 +01:00
offset = __dma_alloc_iommu ( zdev , 0 , size ) ;
2014-07-18 17:37:08 +02:00
wrap = 1 ;
}
2012-11-29 14:33:30 +01:00
if ( offset ! = - 1 ) {
zdev - > next_bit = offset + size ;
2014-07-18 17:37:08 +02:00
if ( ! zdev - > tlb_refresh & & ! s390_iommu_strict & & wrap )
/* global flush after wrap-around with lazy unmap */
zpci_refresh_global ( zdev ) ;
2012-11-29 14:33:30 +01:00
}
spin_unlock_irqrestore ( & zdev - > iommu_bitmap_lock , flags ) ;
return offset ;
}
static void dma_free_iommu ( struct zpci_dev * zdev , unsigned long offset , int size )
{
unsigned long flags ;
spin_lock_irqsave ( & zdev - > iommu_bitmap_lock , flags ) ;
if ( ! zdev - > iommu_bitmap )
goto out ;
bitmap_clear ( zdev - > iommu_bitmap , offset , size ) ;
2014-07-18 17:37:08 +02:00
/*
* Lazy flush for unmap : need to move next_bit to avoid address re - use
* until wrap - around .
*/
if ( ! s390_iommu_strict & & offset > = zdev - > next_bit )
2012-11-29 14:33:30 +01:00
zdev - > next_bit = offset + size ;
out :
spin_unlock_irqrestore ( & zdev - > iommu_bitmap_lock , flags ) ;
}
int dma_set_mask ( struct device * dev , u64 mask )
{
if ( ! dev - > dma_mask | | ! dma_supported ( dev , mask ) )
return - EIO ;
* dev - > dma_mask = mask ;
return 0 ;
}
EXPORT_SYMBOL_GPL ( dma_set_mask ) ;
static dma_addr_t s390_dma_map_pages ( struct device * dev , struct page * page ,
unsigned long offset , size_t size ,
enum dma_data_direction direction ,
struct dma_attrs * attrs )
{
2013-05-17 16:33:40 +02:00
struct zpci_dev * zdev = get_zdev ( to_pci_dev ( dev ) ) ;
2012-11-29 14:33:30 +01:00
unsigned long nr_pages , iommu_page_index ;
unsigned long pa = page_to_phys ( page ) + offset ;
int flags = ZPCI_PTE_VALID ;
dma_addr_t dma_addr ;
/* This rounds up number of pages based on size and offset */
nr_pages = iommu_num_pages ( pa , size , PAGE_SIZE ) ;
iommu_page_index = dma_alloc_iommu ( zdev , nr_pages ) ;
if ( iommu_page_index = = - 1 )
goto out_err ;
/* Use rounded up size */
size = nr_pages * PAGE_SIZE ;
dma_addr = zdev - > start_dma + iommu_page_index * PAGE_SIZE ;
2013-10-22 15:17:19 +02:00
if ( dma_addr + size > zdev - > end_dma )
2012-11-29 14:33:30 +01:00
goto out_free ;
if ( direction = = DMA_NONE | | direction = = DMA_TO_DEVICE )
flags | = ZPCI_TABLE_PROTECTED ;
2012-12-11 14:53:35 +01:00
if ( ! dma_update_trans ( zdev , pa , dma_addr , size , flags ) ) {
2013-12-12 17:53:59 +01:00
atomic64_add ( nr_pages , & zdev - > fmb - > mapped_pages ) ;
2013-04-22 19:27:17 +02:00
return dma_addr + ( offset & ~ PAGE_MASK ) ;
2012-12-11 14:53:35 +01:00
}
2012-11-29 14:33:30 +01:00
out_free :
dma_free_iommu ( zdev , iommu_page_index , nr_pages ) ;
out_err :
2013-10-22 15:17:19 +02:00
zpci_err ( " map error: \n " ) ;
zpci_err_hex ( & pa , sizeof ( pa ) ) ;
2012-11-29 14:33:30 +01:00
return DMA_ERROR_CODE ;
}
static void s390_dma_unmap_pages ( struct device * dev , dma_addr_t dma_addr ,
size_t size , enum dma_data_direction direction ,
struct dma_attrs * attrs )
{
2013-05-17 16:33:40 +02:00
struct zpci_dev * zdev = get_zdev ( to_pci_dev ( dev ) ) ;
2012-11-29 14:33:30 +01:00
unsigned long iommu_page_index ;
int npages ;
npages = iommu_num_pages ( dma_addr , size , PAGE_SIZE ) ;
dma_addr = dma_addr & PAGE_MASK ;
if ( dma_update_trans ( zdev , 0 , dma_addr , npages * PAGE_SIZE ,
2013-10-22 15:17:19 +02:00
ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID ) ) {
zpci_err ( " unmap error: \n " ) ;
zpci_err_hex ( & dma_addr , sizeof ( dma_addr ) ) ;
}
2012-11-29 14:33:30 +01:00
2013-12-12 17:53:59 +01:00
atomic64_add ( npages , & zdev - > fmb - > unmapped_pages ) ;
2012-11-29 14:33:30 +01:00
iommu_page_index = ( dma_addr - zdev - > start_dma ) > > PAGE_SHIFT ;
dma_free_iommu ( zdev , iommu_page_index , npages ) ;
}
static void * s390_dma_alloc ( struct device * dev , size_t size ,
dma_addr_t * dma_handle , gfp_t flag ,
struct dma_attrs * attrs )
{
2013-05-17 16:33:40 +02:00
struct zpci_dev * zdev = get_zdev ( to_pci_dev ( dev ) ) ;
2012-11-29 14:33:30 +01:00
struct page * page ;
unsigned long pa ;
dma_addr_t map ;
size = PAGE_ALIGN ( size ) ;
page = alloc_pages ( flag , get_order ( size ) ) ;
if ( ! page )
return NULL ;
2012-12-11 14:53:35 +01:00
2012-11-29 14:33:30 +01:00
pa = page_to_phys ( page ) ;
memset ( ( void * ) pa , 0 , size ) ;
map = s390_dma_map_pages ( dev , page , pa % PAGE_SIZE ,
size , DMA_BIDIRECTIONAL , NULL ) ;
if ( dma_mapping_error ( dev , map ) ) {
free_pages ( pa , get_order ( size ) ) ;
return NULL ;
}
2013-12-12 17:53:59 +01:00
atomic64_add ( size / PAGE_SIZE , & zdev - > fmb - > allocated_pages ) ;
2012-11-29 14:33:30 +01:00
if ( dma_handle )
* dma_handle = map ;
return ( void * ) pa ;
}
static void s390_dma_free ( struct device * dev , size_t size ,
void * pa , dma_addr_t dma_handle ,
struct dma_attrs * attrs )
{
2013-12-12 17:53:59 +01:00
struct zpci_dev * zdev = get_zdev ( to_pci_dev ( dev ) ) ;
size = PAGE_ALIGN ( size ) ;
atomic64_sub ( size / PAGE_SIZE , & zdev - > fmb - > allocated_pages ) ;
s390_dma_unmap_pages ( dev , dma_handle , size , DMA_BIDIRECTIONAL , NULL ) ;
2012-11-29 14:33:30 +01:00
free_pages ( ( unsigned long ) pa , get_order ( size ) ) ;
}
static int s390_dma_map_sg ( struct device * dev , struct scatterlist * sg ,
int nr_elements , enum dma_data_direction dir ,
struct dma_attrs * attrs )
{
int mapped_elements = 0 ;
struct scatterlist * s ;
int i ;
for_each_sg ( sg , s , nr_elements , i ) {
struct page * page = sg_page ( s ) ;
s - > dma_address = s390_dma_map_pages ( dev , page , s - > offset ,
s - > length , dir , NULL ) ;
if ( ! dma_mapping_error ( dev , s - > dma_address ) ) {
s - > dma_length = s - > length ;
mapped_elements + + ;
} else
goto unmap ;
}
out :
return mapped_elements ;
unmap :
for_each_sg ( sg , s , mapped_elements , i ) {
if ( s - > dma_address )
s390_dma_unmap_pages ( dev , s - > dma_address , s - > dma_length ,
dir , NULL ) ;
s - > dma_address = 0 ;
s - > dma_length = 0 ;
}
mapped_elements = 0 ;
goto out ;
}
static void s390_dma_unmap_sg ( struct device * dev , struct scatterlist * sg ,
int nr_elements , enum dma_data_direction dir ,
struct dma_attrs * attrs )
{
struct scatterlist * s ;
int i ;
for_each_sg ( sg , s , nr_elements , i ) {
s390_dma_unmap_pages ( dev , s - > dma_address , s - > dma_length , dir , NULL ) ;
s - > dma_address = 0 ;
s - > dma_length = 0 ;
}
}
int zpci_dma_init_device ( struct zpci_dev * zdev )
{
int rc ;
spin_lock_init ( & zdev - > iommu_bitmap_lock ) ;
spin_lock_init ( & zdev - > dma_table_lock ) ;
zdev - > dma_table = dma_alloc_cpu_table ( ) ;
if ( ! zdev - > dma_table ) {
rc = - ENOMEM ;
goto out_clean ;
}
zdev - > iommu_size = ( unsigned long ) high_memory - PAGE_OFFSET ;
zdev - > iommu_pages = zdev - > iommu_size > > PAGE_SHIFT ;
2013-08-29 20:31:50 +02:00
zdev - > iommu_bitmap = vzalloc ( zdev - > iommu_pages / 8 ) ;
2012-11-29 14:33:30 +01:00
if ( ! zdev - > iommu_bitmap ) {
rc = - ENOMEM ;
goto out_reg ;
}
rc = zpci_register_ioat ( zdev ,
0 ,
zdev - > start_dma + PAGE_OFFSET ,
zdev - > start_dma + zdev - > iommu_size - 1 ,
( u64 ) zdev - > dma_table ) ;
if ( rc )
goto out_reg ;
return 0 ;
out_reg :
dma_free_cpu_table ( zdev - > dma_table ) ;
out_clean :
return rc ;
}
void zpci_dma_exit_device ( struct zpci_dev * zdev )
{
zpci_unregister_ioat ( zdev , 0 ) ;
dma_cleanup_tables ( zdev ) ;
2013-08-29 20:31:50 +02:00
vfree ( zdev - > iommu_bitmap ) ;
2012-11-29 14:33:30 +01:00
zdev - > iommu_bitmap = NULL ;
zdev - > next_bit = 0 ;
}
static int __init dma_alloc_cpu_table_caches ( void )
{
dma_region_table_cache = kmem_cache_create ( " PCI_DMA_region_tables " ,
ZPCI_TABLE_SIZE , ZPCI_TABLE_ALIGN ,
0 , NULL ) ;
if ( ! dma_region_table_cache )
return - ENOMEM ;
dma_page_table_cache = kmem_cache_create ( " PCI_DMA_page_tables " ,
ZPCI_PT_SIZE , ZPCI_PT_ALIGN ,
0 , NULL ) ;
if ( ! dma_page_table_cache ) {
kmem_cache_destroy ( dma_region_table_cache ) ;
return - ENOMEM ;
}
return 0 ;
}
int __init zpci_dma_init ( void )
{
return dma_alloc_cpu_table_caches ( ) ;
}
void zpci_dma_exit ( void )
{
kmem_cache_destroy ( dma_page_table_cache ) ;
kmem_cache_destroy ( dma_region_table_cache ) ;
}
# define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
static int __init dma_debug_do_init ( void )
{
dma_debug_init ( PREALLOC_DMA_DEBUG_ENTRIES ) ;
return 0 ;
}
fs_initcall ( dma_debug_do_init ) ;
struct dma_map_ops s390_dma_ops = {
. alloc = s390_dma_alloc ,
. free = s390_dma_free ,
. map_sg = s390_dma_map_sg ,
. unmap_sg = s390_dma_unmap_sg ,
. map_page = s390_dma_map_pages ,
. unmap_page = s390_dma_unmap_pages ,
/* if we support direct DMA this must be conditional */
. is_phys = 0 ,
/* dma_supported is unconditionally true without a callback */
} ;
EXPORT_SYMBOL_GPL ( s390_dma_ops ) ;
2014-07-18 17:37:08 +02:00
static int __init s390_iommu_setup ( char * str )
{
if ( ! strncmp ( str , " strict " , 6 ) )
s390_iommu_strict = 1 ;
return 0 ;
}
__setup ( " s390_iommu= " , s390_iommu_setup ) ;