2017-11-24 15:00:36 +01:00
// SPDX-License-Identifier: GPL-2.0
2012-11-29 14:33:30 +01:00
/*
* Copyright IBM Corp . 2012
*
* Author ( s ) :
* Jan Glauber < jang @ linux . vnet . ibm . com >
*/
# include <linux/kernel.h>
# include <linux/slab.h>
# include <linux/export.h>
# include <linux/iommu-helper.h>
# include <linux/dma-mapping.h>
2013-08-29 20:31:50 +02:00
# include <linux/vmalloc.h>
2012-11-29 14:33:30 +01:00
# include <linux/pci.h>
# include <asm/pci_dma.h>
2017-05-21 13:04:09 +02:00
# define S390_MAPPING_ERROR (~(dma_addr_t) 0x0)
2012-11-29 14:33:30 +01:00
static struct kmem_cache * dma_region_table_cache ;
static struct kmem_cache * dma_page_table_cache ;
2014-07-18 17:37:08 +02:00
static int s390_iommu_strict ;
static int zpci_refresh_global ( struct zpci_dev * zdev )
{
return zpci_refresh_trans ( ( u64 ) zdev - > fh < < 32 , zdev - > start_dma ,
zdev - > iommu_pages * PAGE_SIZE ) ;
}
2012-11-29 14:33:30 +01:00
2015-08-27 15:33:03 +02:00
unsigned long * dma_alloc_cpu_table ( void )
2012-11-29 14:33:30 +01:00
{
unsigned long * table , * entry ;
table = kmem_cache_alloc ( dma_region_table_cache , GFP_ATOMIC ) ;
if ( ! table )
return NULL ;
for ( entry = table ; entry < table + ZPCI_TABLE_ENTRIES ; entry + + )
2015-10-26 11:15:28 +01:00
* entry = ZPCI_TABLE_INVALID ;
2012-11-29 14:33:30 +01:00
return table ;
}
static void dma_free_cpu_table ( void * table )
{
kmem_cache_free ( dma_region_table_cache , table ) ;
}
static unsigned long * dma_alloc_page_table ( void )
{
unsigned long * table , * entry ;
table = kmem_cache_alloc ( dma_page_table_cache , GFP_ATOMIC ) ;
if ( ! table )
return NULL ;
for ( entry = table ; entry < table + ZPCI_PT_ENTRIES ; entry + + )
2015-10-26 11:15:28 +01:00
* entry = ZPCI_PTE_INVALID ;
2012-11-29 14:33:30 +01:00
return table ;
}
static void dma_free_page_table ( void * table )
{
kmem_cache_free ( dma_page_table_cache , table ) ;
}
static unsigned long * dma_get_seg_table_origin ( unsigned long * entry )
{
unsigned long * sto ;
if ( reg_entry_isvalid ( * entry ) )
sto = get_rt_sto ( * entry ) ;
else {
sto = dma_alloc_cpu_table ( ) ;
if ( ! sto )
return NULL ;
set_rt_sto ( entry , sto ) ;
validate_rt_entry ( entry ) ;
entry_clr_protected ( entry ) ;
}
return sto ;
}
static unsigned long * dma_get_page_table_origin ( unsigned long * entry )
{
unsigned long * pto ;
if ( reg_entry_isvalid ( * entry ) )
pto = get_st_pto ( * entry ) ;
else {
pto = dma_alloc_page_table ( ) ;
if ( ! pto )
return NULL ;
set_st_pto ( entry , pto ) ;
validate_st_entry ( entry ) ;
entry_clr_protected ( entry ) ;
}
return pto ;
}
2015-10-26 11:19:13 +01:00
unsigned long * dma_walk_cpu_trans ( unsigned long * rto , dma_addr_t dma_addr )
2012-11-29 14:33:30 +01:00
{
unsigned long * sto , * pto ;
unsigned int rtx , sx , px ;
rtx = calc_rtx ( dma_addr ) ;
sto = dma_get_seg_table_origin ( & rto [ rtx ] ) ;
if ( ! sto )
return NULL ;
sx = calc_sx ( dma_addr ) ;
pto = dma_get_page_table_origin ( & sto [ sx ] ) ;
if ( ! pto )
return NULL ;
px = calc_px ( dma_addr ) ;
return & pto [ px ] ;
}
2015-10-26 11:19:13 +01:00
void dma_update_cpu_trans ( unsigned long * entry , void * page_addr , int flags )
2012-11-29 14:33:30 +01:00
{
if ( flags & ZPCI_PTE_INVALID ) {
invalidate_pt_entry ( entry ) ;
} else {
set_pt_pfaa ( entry , page_addr ) ;
validate_pt_entry ( entry ) ;
}
if ( flags & ZPCI_TABLE_PROTECTED )
entry_set_protected ( entry ) ;
else
entry_clr_protected ( entry ) ;
}
2016-09-05 17:49:17 +02:00
static int __dma_update_trans ( struct zpci_dev * zdev , unsigned long pa ,
dma_addr_t dma_addr , size_t size , int flags )
2012-11-29 14:33:30 +01:00
{
unsigned int nr_pages = PAGE_ALIGN ( size ) > > PAGE_SHIFT ;
u8 * page_addr = ( u8 * ) ( pa & PAGE_MASK ) ;
unsigned long irq_flags ;
2015-10-26 11:19:13 +01:00
unsigned long * entry ;
2012-11-29 14:33:30 +01:00
int i , rc = 0 ;
if ( ! nr_pages )
return - EINVAL ;
spin_lock_irqsave ( & zdev - > dma_table_lock , irq_flags ) ;
2015-10-26 11:19:13 +01:00
if ( ! zdev - > dma_table ) {
rc = - EINVAL ;
2016-09-05 17:49:17 +02:00
goto out_unlock ;
2015-10-26 11:19:13 +01:00
}
2012-11-29 14:33:30 +01:00
for ( i = 0 ; i < nr_pages ; i + + ) {
2015-10-26 11:19:13 +01:00
entry = dma_walk_cpu_trans ( zdev - > dma_table , dma_addr ) ;
if ( ! entry ) {
rc = - ENOMEM ;
goto undo_cpu_trans ;
}
dma_update_cpu_trans ( entry , page_addr , flags ) ;
2012-11-29 14:33:30 +01:00
page_addr + = PAGE_SIZE ;
dma_addr + = PAGE_SIZE ;
}
2015-10-26 11:19:13 +01:00
undo_cpu_trans :
if ( rc & & ( ( flags & ZPCI_PTE_VALID_MASK ) = = ZPCI_PTE_VALID ) ) {
flags = ZPCI_PTE_INVALID ;
while ( i - - > 0 ) {
page_addr - = PAGE_SIZE ;
dma_addr - = PAGE_SIZE ;
entry = dma_walk_cpu_trans ( zdev - > dma_table , dma_addr ) ;
if ( ! entry )
break ;
dma_update_cpu_trans ( entry , page_addr , flags ) ;
}
}
2016-09-05 17:49:17 +02:00
out_unlock :
2012-11-29 14:33:30 +01:00
spin_unlock_irqrestore ( & zdev - > dma_table_lock , irq_flags ) ;
return rc ;
}
2016-09-05 17:49:17 +02:00
static int __dma_purge_tlb ( struct zpci_dev * zdev , dma_addr_t dma_addr ,
size_t size , int flags )
{
/*
* With zdev - > tlb_refresh = = 0 , rpcit is not required to establish new
* translations when previously invalid translation - table entries are
2016-09-08 13:44:57 +02:00
* validated . With lazy unmap , rpcit is skipped for previously valid
2016-09-05 17:49:17 +02:00
* entries , but a global rpcit is then required before any address can
* be re - used , i . e . after each iommu bitmap wrap - around .
*/
2016-09-08 13:44:57 +02:00
if ( ( flags & ZPCI_PTE_VALID_MASK ) = = ZPCI_PTE_VALID ) {
if ( ! zdev - > tlb_refresh )
return 0 ;
} else {
if ( ! s390_iommu_strict )
return 0 ;
}
2016-09-05 17:49:17 +02:00
return zpci_refresh_trans ( ( u64 ) zdev - > fh < < 32 , dma_addr ,
PAGE_ALIGN ( size ) ) ;
}
static int dma_update_trans ( struct zpci_dev * zdev , unsigned long pa ,
dma_addr_t dma_addr , size_t size , int flags )
{
int rc ;
rc = __dma_update_trans ( zdev , pa , dma_addr , size , flags ) ;
if ( rc )
return rc ;
rc = __dma_purge_tlb ( zdev , dma_addr , size , flags ) ;
if ( rc & & ( ( flags & ZPCI_PTE_VALID_MASK ) = = ZPCI_PTE_VALID ) )
__dma_update_trans ( zdev , pa , dma_addr , size , ZPCI_PTE_INVALID ) ;
return rc ;
}
2015-08-27 15:33:03 +02:00
void dma_free_seg_table ( unsigned long entry )
2012-11-29 14:33:30 +01:00
{
unsigned long * sto = get_rt_sto ( entry ) ;
int sx ;
for ( sx = 0 ; sx < ZPCI_TABLE_ENTRIES ; sx + + )
if ( reg_entry_isvalid ( sto [ sx ] ) )
dma_free_page_table ( get_st_pto ( sto [ sx ] ) ) ;
dma_free_cpu_table ( sto ) ;
}
2015-08-27 15:33:03 +02:00
void dma_cleanup_tables ( unsigned long * table )
2012-11-29 14:33:30 +01:00
{
int rtx ;
2015-08-27 15:33:03 +02:00
if ( ! table )
2012-11-29 14:33:30 +01:00
return ;
for ( rtx = 0 ; rtx < ZPCI_TABLE_ENTRIES ; rtx + + )
if ( reg_entry_isvalid ( table [ rtx ] ) )
dma_free_seg_table ( table [ rtx ] ) ;
dma_free_cpu_table ( table ) ;
}
2016-01-29 15:13:30 +01:00
static unsigned long __dma_alloc_iommu ( struct device * dev ,
2014-02-18 19:47:17 +01:00
unsigned long start , int size )
2012-11-29 14:33:30 +01:00
{
2016-01-29 15:13:30 +01:00
struct zpci_dev * zdev = to_zpci ( to_pci_dev ( dev ) ) ;
2014-02-18 19:47:17 +01:00
unsigned long boundary_size ;
2012-11-29 14:33:30 +01:00
2016-01-29 15:13:30 +01:00
boundary_size = ALIGN ( dma_get_seg_boundary ( dev ) + 1 ,
2014-02-18 19:47:17 +01:00
PAGE_SIZE ) > > PAGE_SHIFT ;
2012-11-29 14:33:30 +01:00
return iommu_area_alloc ( zdev - > iommu_bitmap , zdev - > iommu_pages ,
2016-06-03 19:05:38 +02:00
start , size , zdev - > start_dma > > PAGE_SHIFT ,
boundary_size , 0 ) ;
2012-11-29 14:33:30 +01:00
}
2016-08-17 13:51:11 +02:00
static dma_addr_t dma_alloc_address ( struct device * dev , int size )
2012-11-29 14:33:30 +01:00
{
2016-01-29 15:13:30 +01:00
struct zpci_dev * zdev = to_zpci ( to_pci_dev ( dev ) ) ;
2012-11-29 14:33:30 +01:00
unsigned long offset , flags ;
spin_lock_irqsave ( & zdev - > iommu_bitmap_lock , flags ) ;
2016-01-29 15:13:30 +01:00
offset = __dma_alloc_iommu ( dev , zdev - > next_bit , size ) ;
2014-07-18 17:37:08 +02:00
if ( offset = = - 1 ) {
2016-09-08 13:44:57 +02:00
if ( ! s390_iommu_strict ) {
2016-09-08 13:25:01 +02:00
/* global flush before DMA addresses are reused */
if ( zpci_refresh_global ( zdev ) )
goto out_error ;
bitmap_andnot ( zdev - > iommu_bitmap , zdev - > iommu_bitmap ,
zdev - > lazy_bitmap , zdev - > iommu_pages ) ;
bitmap_zero ( zdev - > lazy_bitmap , zdev - > iommu_pages ) ;
}
2014-07-18 17:37:08 +02:00
/* wrap-around */
2016-01-29 15:13:30 +01:00
offset = __dma_alloc_iommu ( dev , 0 , size ) ;
2016-09-08 13:25:01 +02:00
if ( offset = = - 1 )
goto out_error ;
2012-11-29 14:33:30 +01:00
}
2016-08-17 13:51:11 +02:00
zdev - > next_bit = offset + size ;
2012-11-29 14:33:30 +01:00
spin_unlock_irqrestore ( & zdev - > iommu_bitmap_lock , flags ) ;
2016-08-17 13:51:11 +02:00
return zdev - > start_dma + offset * PAGE_SIZE ;
2016-09-08 13:25:01 +02:00
out_error :
spin_unlock_irqrestore ( & zdev - > iommu_bitmap_lock , flags ) ;
2017-05-21 13:04:09 +02:00
return S390_MAPPING_ERROR ;
2012-11-29 14:33:30 +01:00
}
2016-08-17 13:51:11 +02:00
static void dma_free_address ( struct device * dev , dma_addr_t dma_addr , int size )
2012-11-29 14:33:30 +01:00
{
2016-01-29 15:13:30 +01:00
struct zpci_dev * zdev = to_zpci ( to_pci_dev ( dev ) ) ;
2016-08-17 13:51:11 +02:00
unsigned long flags , offset ;
offset = ( dma_addr - zdev - > start_dma ) > > PAGE_SHIFT ;
2012-11-29 14:33:30 +01:00
spin_lock_irqsave ( & zdev - > iommu_bitmap_lock , flags ) ;
if ( ! zdev - > iommu_bitmap )
goto out ;
2016-09-08 13:25:01 +02:00
2016-09-08 13:44:57 +02:00
if ( s390_iommu_strict )
2016-09-08 13:25:01 +02:00
bitmap_clear ( zdev - > iommu_bitmap , offset , size ) ;
else
bitmap_set ( zdev - > lazy_bitmap , offset , size ) ;
2012-11-29 14:33:30 +01:00
out :
spin_unlock_irqrestore ( & zdev - > iommu_bitmap_lock , flags ) ;
}
2015-10-26 11:20:44 +01:00
static inline void zpci_err_dma ( unsigned long rc , unsigned long addr )
{
struct {
unsigned long rc ;
unsigned long addr ;
} __packed data = { rc , addr } ;
zpci_err_hex ( & data , sizeof ( data ) ) ;
}
2012-11-29 14:33:30 +01:00
static dma_addr_t s390_dma_map_pages ( struct device * dev , struct page * page ,
unsigned long offset , size_t size ,
enum dma_data_direction direction ,
2016-08-03 13:46:00 -07:00
unsigned long attrs )
2012-11-29 14:33:30 +01:00
{
2015-06-23 14:06:35 +02:00
struct zpci_dev * zdev = to_zpci ( to_pci_dev ( dev ) ) ;
2012-11-29 14:33:30 +01:00
unsigned long pa = page_to_phys ( page ) + offset ;
int flags = ZPCI_PTE_VALID ;
2016-08-17 13:51:11 +02:00
unsigned long nr_pages ;
2012-11-29 14:33:30 +01:00
dma_addr_t dma_addr ;
2015-10-26 11:20:44 +01:00
int ret ;
2012-11-29 14:33:30 +01:00
/* This rounds up number of pages based on size and offset */
nr_pages = iommu_num_pages ( pa , size , PAGE_SIZE ) ;
2016-08-17 13:51:11 +02:00
dma_addr = dma_alloc_address ( dev , nr_pages ) ;
2017-05-21 13:04:09 +02:00
if ( dma_addr = = S390_MAPPING_ERROR ) {
2015-10-26 11:20:44 +01:00
ret = - ENOSPC ;
2012-11-29 14:33:30 +01:00
goto out_err ;
2015-10-26 11:20:44 +01:00
}
2012-11-29 14:33:30 +01:00
/* Use rounded up size */
size = nr_pages * PAGE_SIZE ;
if ( direction = = DMA_NONE | | direction = = DMA_TO_DEVICE )
flags | = ZPCI_TABLE_PROTECTED ;
2015-10-26 11:20:44 +01:00
ret = dma_update_trans ( zdev , pa , dma_addr , size , flags ) ;
if ( ret )
goto out_free ;
atomic64_add ( nr_pages , & zdev - > mapped_pages ) ;
return dma_addr + ( offset & ~ PAGE_MASK ) ;
2012-11-29 14:33:30 +01:00
out_free :
2016-08-17 13:51:11 +02:00
dma_free_address ( dev , dma_addr , nr_pages ) ;
2012-11-29 14:33:30 +01:00
out_err :
2013-10-22 15:17:19 +02:00
zpci_err ( " map error: \n " ) ;
2015-10-26 11:20:44 +01:00
zpci_err_dma ( ret , pa ) ;
2017-05-21 13:04:09 +02:00
return S390_MAPPING_ERROR ;
2012-11-29 14:33:30 +01:00
}
static void s390_dma_unmap_pages ( struct device * dev , dma_addr_t dma_addr ,
size_t size , enum dma_data_direction direction ,
2016-08-03 13:46:00 -07:00
unsigned long attrs )
2012-11-29 14:33:30 +01:00
{
2015-06-23 14:06:35 +02:00
struct zpci_dev * zdev = to_zpci ( to_pci_dev ( dev ) ) ;
2015-10-26 11:20:44 +01:00
int npages , ret ;
2012-11-29 14:33:30 +01:00
npages = iommu_num_pages ( dma_addr , size , PAGE_SIZE ) ;
dma_addr = dma_addr & PAGE_MASK ;
2015-10-26 11:20:44 +01:00
ret = dma_update_trans ( zdev , 0 , dma_addr , npages * PAGE_SIZE ,
ZPCI_PTE_INVALID ) ;
if ( ret ) {
2013-10-22 15:17:19 +02:00
zpci_err ( " unmap error: \n " ) ;
2015-10-26 11:20:44 +01:00
zpci_err_dma ( ret , dma_addr ) ;
return ;
2013-10-22 15:17:19 +02:00
}
2012-11-29 14:33:30 +01:00
2015-04-10 14:33:08 +02:00
atomic64_add ( npages , & zdev - > unmapped_pages ) ;
2016-08-17 13:51:11 +02:00
dma_free_address ( dev , dma_addr , npages ) ;
2012-11-29 14:33:30 +01:00
}
static void * s390_dma_alloc ( struct device * dev , size_t size ,
dma_addr_t * dma_handle , gfp_t flag ,
2016-08-03 13:46:00 -07:00
unsigned long attrs )
2012-11-29 14:33:30 +01:00
{
2015-06-23 14:06:35 +02:00
struct zpci_dev * zdev = to_zpci ( to_pci_dev ( dev ) ) ;
2012-11-29 14:33:30 +01:00
struct page * page ;
unsigned long pa ;
dma_addr_t map ;
size = PAGE_ALIGN ( size ) ;
page = alloc_pages ( flag , get_order ( size ) ) ;
if ( ! page )
return NULL ;
2012-12-11 14:53:35 +01:00
2012-11-29 14:33:30 +01:00
pa = page_to_phys ( page ) ;
2016-08-03 13:46:00 -07:00
map = s390_dma_map_pages ( dev , page , 0 , size , DMA_BIDIRECTIONAL , 0 ) ;
2012-11-29 14:33:30 +01:00
if ( dma_mapping_error ( dev , map ) ) {
free_pages ( pa , get_order ( size ) ) ;
return NULL ;
}
2015-04-10 14:33:08 +02:00
atomic64_add ( size / PAGE_SIZE , & zdev - > allocated_pages ) ;
2012-11-29 14:33:30 +01:00
if ( dma_handle )
* dma_handle = map ;
return ( void * ) pa ;
}
static void s390_dma_free ( struct device * dev , size_t size ,
void * pa , dma_addr_t dma_handle ,
2016-08-03 13:46:00 -07:00
unsigned long attrs )
2012-11-29 14:33:30 +01:00
{
2015-06-23 14:06:35 +02:00
struct zpci_dev * zdev = to_zpci ( to_pci_dev ( dev ) ) ;
2013-12-12 17:53:59 +01:00
size = PAGE_ALIGN ( size ) ;
2015-04-10 14:33:08 +02:00
atomic64_sub ( size / PAGE_SIZE , & zdev - > allocated_pages ) ;
2016-08-03 13:46:00 -07:00
s390_dma_unmap_pages ( dev , dma_handle , size , DMA_BIDIRECTIONAL , 0 ) ;
2012-11-29 14:33:30 +01:00
free_pages ( ( unsigned long ) pa , get_order ( size ) ) ;
}
2016-08-19 09:12:09 +02:00
/* Map a segment into a contiguous dma address area */
static int __s390_dma_map_sg ( struct device * dev , struct scatterlist * sg ,
size_t size , dma_addr_t * handle ,
enum dma_data_direction dir )
2012-11-29 14:33:30 +01:00
{
2016-11-07 15:06:03 +01:00
unsigned long nr_pages = PAGE_ALIGN ( size ) > > PAGE_SHIFT ;
2016-08-19 09:12:09 +02:00
struct zpci_dev * zdev = to_zpci ( to_pci_dev ( dev ) ) ;
dma_addr_t dma_addr_base , dma_addr ;
int flags = ZPCI_PTE_VALID ;
2012-11-29 14:33:30 +01:00
struct scatterlist * s ;
2016-11-10 17:44:48 +01:00
unsigned long pa = 0 ;
2016-08-19 09:12:09 +02:00
int ret ;
2012-11-29 14:33:30 +01:00
2016-11-07 15:06:03 +01:00
dma_addr_base = dma_alloc_address ( dev , nr_pages ) ;
2017-05-21 13:04:09 +02:00
if ( dma_addr_base = = S390_MAPPING_ERROR )
2016-08-19 09:12:09 +02:00
return - ENOMEM ;
dma_addr = dma_addr_base ;
if ( dir = = DMA_NONE | | dir = = DMA_TO_DEVICE )
flags | = ZPCI_TABLE_PROTECTED ;
for ( s = sg ; dma_addr < dma_addr_base + size ; s = sg_next ( s ) ) {
2016-11-07 15:06:03 +01:00
pa = page_to_phys ( sg_page ( s ) ) ;
ret = __dma_update_trans ( zdev , pa , dma_addr ,
s - > offset + s - > length , flags ) ;
2016-08-19 09:12:09 +02:00
if ( ret )
2012-11-29 14:33:30 +01:00
goto unmap ;
2016-08-19 09:12:09 +02:00
2016-11-07 15:06:03 +01:00
dma_addr + = s - > offset + s - > length ;
2012-11-29 14:33:30 +01:00
}
2016-09-05 17:49:17 +02:00
ret = __dma_purge_tlb ( zdev , dma_addr_base , size , flags ) ;
if ( ret )
goto unmap ;
2016-08-19 09:12:09 +02:00
* handle = dma_addr_base ;
2016-11-07 15:06:03 +01:00
atomic64_add ( nr_pages , & zdev - > mapped_pages ) ;
2016-08-19 09:12:09 +02:00
return ret ;
2012-11-29 14:33:30 +01:00
unmap :
2016-08-19 09:12:09 +02:00
dma_update_trans ( zdev , 0 , dma_addr_base , dma_addr - dma_addr_base ,
ZPCI_PTE_INVALID ) ;
2016-11-07 15:06:03 +01:00
dma_free_address ( dev , dma_addr_base , nr_pages ) ;
2016-08-19 09:12:09 +02:00
zpci_err ( " map error: \n " ) ;
zpci_err_dma ( ret , pa ) ;
return ret ;
}
static int s390_dma_map_sg ( struct device * dev , struct scatterlist * sg ,
int nr_elements , enum dma_data_direction dir ,
unsigned long attrs )
{
struct scatterlist * s = sg , * start = sg , * dma = sg ;
unsigned int max = dma_get_max_seg_size ( dev ) ;
unsigned int size = s - > offset + s - > length ;
unsigned int offset = s - > offset ;
int count = 0 , i ;
for ( i = 1 ; i < nr_elements ; i + + ) {
s = sg_next ( s ) ;
2017-05-21 13:04:09 +02:00
s - > dma_address = S390_MAPPING_ERROR ;
2012-11-29 14:33:30 +01:00
s - > dma_length = 0 ;
2016-08-19 09:12:09 +02:00
if ( s - > offset | | ( size & ~ PAGE_MASK ) | |
size + s - > length > max ) {
if ( __s390_dma_map_sg ( dev , start , size ,
& dma - > dma_address , dir ) )
goto unmap ;
dma - > dma_address + = offset ;
dma - > dma_length = size - offset ;
size = offset = s - > offset ;
start = s ;
dma = sg_next ( dma ) ;
count + + ;
}
size + = s - > length ;
2012-11-29 14:33:30 +01:00
}
2016-08-19 09:12:09 +02:00
if ( __s390_dma_map_sg ( dev , start , size , & dma - > dma_address , dir ) )
goto unmap ;
dma - > dma_address + = offset ;
dma - > dma_length = size - offset ;
return count + 1 ;
unmap :
for_each_sg ( sg , s , count , i )
s390_dma_unmap_pages ( dev , sg_dma_address ( s ) , sg_dma_len ( s ) ,
dir , attrs ) ;
return 0 ;
2012-11-29 14:33:30 +01:00
}
static void s390_dma_unmap_sg ( struct device * dev , struct scatterlist * sg ,
int nr_elements , enum dma_data_direction dir ,
2016-08-03 13:46:00 -07:00
unsigned long attrs )
2012-11-29 14:33:30 +01:00
{
struct scatterlist * s ;
int i ;
for_each_sg ( sg , s , nr_elements , i ) {
2016-08-19 09:12:09 +02:00
if ( s - > dma_length )
s390_dma_unmap_pages ( dev , s - > dma_address , s - > dma_length ,
dir , attrs ) ;
2012-11-29 14:33:30 +01:00
s - > dma_address = 0 ;
s - > dma_length = 0 ;
}
}
2017-05-21 13:04:09 +02:00
static int s390_mapping_error ( struct device * dev , dma_addr_t dma_addr )
{
return dma_addr = = S390_MAPPING_ERROR ;
}
2012-11-29 14:33:30 +01:00
int zpci_dma_init_device ( struct zpci_dev * zdev )
{
int rc ;
2015-08-27 15:33:03 +02:00
/*
* At this point , if the device is part of an IOMMU domain , this would
* be a strong hint towards a bug in the IOMMU API ( common ) code and / or
* simultaneous access via IOMMU and DMA API . So let ' s issue a warning .
*/
WARN_ON ( zdev - > s390_domain ) ;
2012-11-29 14:33:30 +01:00
spin_lock_init ( & zdev - > iommu_bitmap_lock ) ;
spin_lock_init ( & zdev - > dma_table_lock ) ;
zdev - > dma_table = dma_alloc_cpu_table ( ) ;
if ( ! zdev - > dma_table ) {
rc = - ENOMEM ;
2016-04-15 09:41:35 +02:00
goto out ;
2012-11-29 14:33:30 +01:00
}
2015-11-16 14:35:48 +01:00
/*
* Restrict the iommu bitmap size to the minimum of the following :
* - main memory size
* - 3 - level pagetable address limit minus start_dma offset
* - DMA address range allowed by the hardware ( clp query pci fn )
*
* Also set zdev - > end_dma to the actual end address of the usable
* range , instead of the theoretical maximum as reported by hardware .
*/
2016-06-03 19:03:12 +02:00
zdev - > start_dma = PAGE_ALIGN ( zdev - > start_dma ) ;
2015-11-16 14:35:48 +01:00
zdev - > iommu_size = min3 ( ( u64 ) high_memory ,
ZPCI_TABLE_SIZE_RT - zdev - > start_dma ,
zdev - > end_dma - zdev - > start_dma + 1 ) ;
zdev - > end_dma = zdev - > start_dma + zdev - > iommu_size - 1 ;
2012-11-29 14:33:30 +01:00
zdev - > iommu_pages = zdev - > iommu_size > > PAGE_SHIFT ;
2013-08-29 20:31:50 +02:00
zdev - > iommu_bitmap = vzalloc ( zdev - > iommu_pages / 8 ) ;
2012-11-29 14:33:30 +01:00
if ( ! zdev - > iommu_bitmap ) {
rc = - ENOMEM ;
2016-04-15 09:41:35 +02:00
goto free_dma_table ;
2012-11-29 14:33:30 +01:00
}
2016-09-08 13:44:57 +02:00
if ( ! s390_iommu_strict ) {
2016-09-08 13:25:01 +02:00
zdev - > lazy_bitmap = vzalloc ( zdev - > iommu_pages / 8 ) ;
if ( ! zdev - > lazy_bitmap ) {
rc = - ENOMEM ;
goto free_bitmap ;
}
2012-11-29 14:33:30 +01:00
2016-09-08 13:25:01 +02:00
}
2015-11-16 14:35:48 +01:00
rc = zpci_register_ioat ( zdev , 0 , zdev - > start_dma , zdev - > end_dma ,
2012-11-29 14:33:30 +01:00
( u64 ) zdev - > dma_table ) ;
if ( rc )
2016-04-15 09:41:35 +02:00
goto free_bitmap ;
2012-11-29 14:33:30 +01:00
2016-04-15 09:41:35 +02:00
return 0 ;
free_bitmap :
vfree ( zdev - > iommu_bitmap ) ;
zdev - > iommu_bitmap = NULL ;
2016-09-08 13:25:01 +02:00
vfree ( zdev - > lazy_bitmap ) ;
zdev - > lazy_bitmap = NULL ;
2016-04-15 09:41:35 +02:00
free_dma_table :
2012-11-29 14:33:30 +01:00
dma_free_cpu_table ( zdev - > dma_table ) ;
2016-04-15 09:41:35 +02:00
zdev - > dma_table = NULL ;
out :
2012-11-29 14:33:30 +01:00
return rc ;
}
void zpci_dma_exit_device ( struct zpci_dev * zdev )
{
2015-08-27 15:33:03 +02:00
/*
* At this point , if the device is part of an IOMMU domain , this would
* be a strong hint towards a bug in the IOMMU API ( common ) code and / or
* simultaneous access via IOMMU and DMA API . So let ' s issue a warning .
*/
WARN_ON ( zdev - > s390_domain ) ;
2017-06-10 14:10:00 +02:00
if ( zpci_unregister_ioat ( zdev , 0 ) )
return ;
2015-08-27 15:33:03 +02:00
dma_cleanup_tables ( zdev - > dma_table ) ;
zdev - > dma_table = NULL ;
2013-08-29 20:31:50 +02:00
vfree ( zdev - > iommu_bitmap ) ;
2012-11-29 14:33:30 +01:00
zdev - > iommu_bitmap = NULL ;
2016-09-08 13:25:01 +02:00
vfree ( zdev - > lazy_bitmap ) ;
zdev - > lazy_bitmap = NULL ;
2012-11-29 14:33:30 +01:00
zdev - > next_bit = 0 ;
}
static int __init dma_alloc_cpu_table_caches ( void )
{
dma_region_table_cache = kmem_cache_create ( " PCI_DMA_region_tables " ,
ZPCI_TABLE_SIZE , ZPCI_TABLE_ALIGN ,
0 , NULL ) ;
if ( ! dma_region_table_cache )
return - ENOMEM ;
dma_page_table_cache = kmem_cache_create ( " PCI_DMA_page_tables " ,
ZPCI_PT_SIZE , ZPCI_PT_ALIGN ,
0 , NULL ) ;
if ( ! dma_page_table_cache ) {
kmem_cache_destroy ( dma_region_table_cache ) ;
return - ENOMEM ;
}
return 0 ;
}
int __init zpci_dma_init ( void )
{
return dma_alloc_cpu_table_caches ( ) ;
}
void zpci_dma_exit ( void )
{
kmem_cache_destroy ( dma_page_table_cache ) ;
kmem_cache_destroy ( dma_region_table_cache ) ;
}
# define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
static int __init dma_debug_do_init ( void )
{
dma_debug_init ( PREALLOC_DMA_DEBUG_ENTRIES ) ;
return 0 ;
}
fs_initcall ( dma_debug_do_init ) ;
2017-01-20 13:04:01 -08:00
const struct dma_map_ops s390_pci_dma_ops = {
2012-11-29 14:33:30 +01:00
. alloc = s390_dma_alloc ,
. free = s390_dma_free ,
. map_sg = s390_dma_map_sg ,
. unmap_sg = s390_dma_unmap_sg ,
. map_page = s390_dma_map_pages ,
. unmap_page = s390_dma_unmap_pages ,
2017-05-21 13:04:09 +02:00
. mapping_error = s390_mapping_error ,
2012-11-29 14:33:30 +01:00
/* if we support direct DMA this must be conditional */
. is_phys = 0 ,
/* dma_supported is unconditionally true without a callback */
} ;
2016-02-02 21:46:34 -08:00
EXPORT_SYMBOL_GPL ( s390_pci_dma_ops ) ;
2014-07-18 17:37:08 +02:00
static int __init s390_iommu_setup ( char * str )
{
if ( ! strncmp ( str , " strict " , 6 ) )
s390_iommu_strict = 1 ;
return 0 ;
}
__setup ( " s390_iommu= " , s390_iommu_setup ) ;