2005-04-17 02:20:36 +04:00
/*
* DMA region bookkeeping routines
*
* Copyright ( C ) 2002 Maas Digital LLC
*
* This code is licensed under the GPL . See the file COPYING in the root
* directory of the kernel sources for details .
*/
2006-07-03 20:02:29 +04:00
# include <linux/mm.h>
2005-04-17 02:20:36 +04:00
# include <linux/module.h>
2006-07-03 20:02:29 +04:00
# include <linux/pci.h>
# include <linux/vmalloc.h>
2007-10-23 22:42:11 +04:00
# include <linux/scatterlist.h>
2006-07-03 20:02:29 +04:00
2005-04-17 02:20:36 +04:00
# include "dma.h"
/* dma_prog_region */
void dma_prog_region_init ( struct dma_prog_region * prog )
{
prog - > kvirt = NULL ;
prog - > dev = NULL ;
prog - > n_pages = 0 ;
prog - > bus_addr = 0 ;
}
2005-11-22 20:18:28 +03:00
int dma_prog_region_alloc ( struct dma_prog_region * prog , unsigned long n_bytes ,
struct pci_dev * dev )
2005-04-17 02:20:36 +04:00
{
/* round up to page size */
n_bytes = PAGE_ALIGN ( n_bytes ) ;
prog - > n_pages = n_bytes > > PAGE_SHIFT ;
prog - > kvirt = pci_alloc_consistent ( dev , n_bytes , & prog - > bus_addr ) ;
if ( ! prog - > kvirt ) {
2005-11-22 20:18:28 +03:00
printk ( KERN_ERR
" dma_prog_region_alloc: pci_alloc_consistent() failed \n " ) ;
2005-04-17 02:20:36 +04:00
dma_prog_region_free ( prog ) ;
return - ENOMEM ;
}
prog - > dev = dev ;
return 0 ;
}
void dma_prog_region_free ( struct dma_prog_region * prog )
{
if ( prog - > kvirt ) {
2005-11-22 20:18:28 +03:00
pci_free_consistent ( prog - > dev , prog - > n_pages < < PAGE_SHIFT ,
prog - > kvirt , prog - > bus_addr ) ;
2005-04-17 02:20:36 +04:00
}
prog - > kvirt = NULL ;
prog - > dev = NULL ;
prog - > n_pages = 0 ;
prog - > bus_addr = 0 ;
}
/* dma_region */
2007-03-05 05:06:23 +03:00
/**
* dma_region_init - clear out all fields but do not allocate anything
*/
2005-04-17 02:20:36 +04:00
void dma_region_init ( struct dma_region * dma )
{
dma - > kvirt = NULL ;
dma - > dev = NULL ;
dma - > n_pages = 0 ;
dma - > n_dma_pages = 0 ;
dma - > sglist = NULL ;
}
2007-03-05 05:06:23 +03:00
/**
* dma_region_alloc - allocate the buffer and map it to the IOMMU
*/
2005-11-22 20:18:28 +03:00
int dma_region_alloc ( struct dma_region * dma , unsigned long n_bytes ,
struct pci_dev * dev , int direction )
2005-04-17 02:20:36 +04:00
{
unsigned int i ;
/* round up to page size */
n_bytes = PAGE_ALIGN ( n_bytes ) ;
dma - > n_pages = n_bytes > > PAGE_SHIFT ;
dma - > kvirt = vmalloc_32 ( n_bytes ) ;
if ( ! dma - > kvirt ) {
printk ( KERN_ERR " dma_region_alloc: vmalloc_32() failed \n " ) ;
goto err ;
}
/* Clear the ram out, no junk to the user */
memset ( dma - > kvirt , 0 , n_bytes ) ;
/* allocate scatter/gather list */
dma - > sglist = vmalloc ( dma - > n_pages * sizeof ( * dma - > sglist ) ) ;
if ( ! dma - > sglist ) {
printk ( KERN_ERR " dma_region_alloc: vmalloc(sglist) failed \n " ) ;
goto err ;
}
2007-11-04 11:44:56 +03:00
sg_init_table ( dma - > sglist , dma - > n_pages ) ;
2005-04-17 02:20:36 +04:00
/* fill scatter/gather list with pages */
for ( i = 0 ; i < dma - > n_pages ; i + + ) {
2005-11-22 20:18:28 +03:00
unsigned long va =
( unsigned long ) dma - > kvirt + ( i < < PAGE_SHIFT ) ;
2005-04-17 02:20:36 +04:00
2007-10-24 13:20:47 +04:00
sg_set_page ( & dma - > sglist [ i ] , vmalloc_to_page ( ( void * ) va ) ,
PAGE_SIZE , 0 ) ;
2005-04-17 02:20:36 +04:00
}
/* map sglist to the IOMMU */
2005-11-22 20:18:28 +03:00
dma - > n_dma_pages =
pci_map_sg ( dev , dma - > sglist , dma - > n_pages , direction ) ;
2005-04-17 02:20:36 +04:00
if ( dma - > n_dma_pages = = 0 ) {
printk ( KERN_ERR " dma_region_alloc: pci_map_sg() failed \n " ) ;
goto err ;
}
dma - > dev = dev ;
dma - > direction = direction ;
return 0 ;
2005-11-22 20:18:28 +03:00
err :
2005-04-17 02:20:36 +04:00
dma_region_free ( dma ) ;
return - ENOMEM ;
}
2007-03-05 05:06:23 +03:00
/**
* dma_region_free - unmap and free the buffer
*/
2005-04-17 02:20:36 +04:00
void dma_region_free ( struct dma_region * dma )
{
if ( dma - > n_dma_pages ) {
2005-11-22 20:18:28 +03:00
pci_unmap_sg ( dma - > dev , dma - > sglist , dma - > n_pages ,
dma - > direction ) ;
2005-04-17 02:20:36 +04:00
dma - > n_dma_pages = 0 ;
dma - > dev = NULL ;
}
vfree ( dma - > sglist ) ;
dma - > sglist = NULL ;
vfree ( dma - > kvirt ) ;
dma - > kvirt = NULL ;
dma - > n_pages = 0 ;
}
/* find the scatterlist index and remaining offset corresponding to a
given offset from the beginning of the buffer */
2005-11-22 20:18:28 +03:00
static inline int dma_region_find ( struct dma_region * dma , unsigned long offset ,
2006-06-13 01:52:59 +04:00
unsigned int start , unsigned long * rem )
2005-04-17 02:20:36 +04:00
{
int i ;
unsigned long off = offset ;
2006-06-13 01:52:59 +04:00
for ( i = start ; i < dma - > n_dma_pages ; i + + ) {
2005-04-17 02:20:36 +04:00
if ( off < sg_dma_len ( & dma - > sglist [ i ] ) ) {
* rem = off ;
break ;
}
off - = sg_dma_len ( & dma - > sglist [ i ] ) ;
}
BUG_ON ( i > = dma - > n_dma_pages ) ;
return i ;
}
2007-03-05 05:06:23 +03:00
/**
* dma_region_offset_to_bus - get bus address of an offset within a DMA region
*
* Returns the DMA bus address of the byte with the given @ offset relative to
* the beginning of the @ dma .
*/
2005-11-22 20:18:28 +03:00
dma_addr_t dma_region_offset_to_bus ( struct dma_region * dma ,
unsigned long offset )
2005-04-17 02:20:36 +04:00
{
2005-07-10 04:01:23 +04:00
unsigned long rem = 0 ;
2005-04-17 02:20:36 +04:00
2005-11-22 20:18:28 +03:00
struct scatterlist * sg =
2006-06-13 01:52:59 +04:00
& dma - > sglist [ dma_region_find ( dma , offset , 0 , & rem ) ] ;
2005-04-17 02:20:36 +04:00
return sg_dma_address ( sg ) + rem ;
}
2007-03-05 05:06:23 +03:00
/**
* dma_region_sync_for_cpu - sync the CPU ' s view of the buffer
*/
2005-11-22 20:18:28 +03:00
void dma_region_sync_for_cpu ( struct dma_region * dma , unsigned long offset ,
unsigned long len )
2005-04-17 02:20:36 +04:00
{
int first , last ;
2006-06-13 01:52:59 +04:00
unsigned long rem = 0 ;
2005-04-17 02:20:36 +04:00
if ( ! len )
len = 1 ;
2006-06-13 01:52:59 +04:00
first = dma_region_find ( dma , offset , 0 , & rem ) ;
last = dma_region_find ( dma , rem + len - 1 , first , & rem ) ;
2005-04-17 02:20:36 +04:00
2005-11-22 20:18:28 +03:00
pci_dma_sync_sg_for_cpu ( dma - > dev , & dma - > sglist [ first ] , last - first + 1 ,
dma - > direction ) ;
2005-04-17 02:20:36 +04:00
}
2007-03-05 05:06:23 +03:00
/**
* dma_region_sync_for_device - sync the IO bus ' view of the buffer
*/
2005-11-22 20:18:28 +03:00
void dma_region_sync_for_device ( struct dma_region * dma , unsigned long offset ,
unsigned long len )
2005-04-17 02:20:36 +04:00
{
int first , last ;
2006-06-13 01:52:59 +04:00
unsigned long rem = 0 ;
2005-04-17 02:20:36 +04:00
if ( ! len )
len = 1 ;
2006-06-13 01:52:59 +04:00
first = dma_region_find ( dma , offset , 0 , & rem ) ;
last = dma_region_find ( dma , rem + len - 1 , first , & rem ) ;
2005-04-17 02:20:36 +04:00
2005-11-22 20:18:28 +03:00
pci_dma_sync_sg_for_device ( dma - > dev , & dma - > sglist [ first ] ,
last - first + 1 , dma - > direction ) ;
2005-04-17 02:20:36 +04:00
}
# ifdef CONFIG_MMU
2007-12-05 10:15:53 +03:00
static int dma_region_pagefault ( struct vm_area_struct * vma ,
2007-12-15 16:04:42 +03:00
struct vm_fault * vmf )
2005-04-17 02:20:36 +04:00
{
2007-12-05 10:15:53 +03:00
struct dma_region * dma = ( struct dma_region * ) vma - > vm_private_data ;
2005-04-17 02:20:36 +04:00
if ( ! dma - > kvirt )
2007-12-15 16:04:42 +03:00
return VM_FAULT_SIGBUS ;
2007-12-05 10:15:53 +03:00
if ( vmf - > pgoff > = dma - > n_pages )
2007-12-15 16:04:42 +03:00
return VM_FAULT_SIGBUS ;
2007-12-05 10:15:53 +03:00
2007-12-15 16:04:42 +03:00
vmf - > page = vmalloc_to_page ( dma - > kvirt + ( vmf - > pgoff < < PAGE_SHIFT ) ) ;
2007-12-05 10:15:53 +03:00
get_page ( vmf - > page ) ;
return 0 ;
2005-04-17 02:20:36 +04:00
}
2009-09-27 22:29:37 +04:00
static const struct vm_operations_struct dma_region_vm_ops = {
2007-12-05 10:15:53 +03:00
. fault = dma_region_pagefault ,
2005-04-17 02:20:36 +04:00
} ;
2007-03-05 05:06:23 +03:00
/**
* dma_region_mmap - map the buffer into a user space process
*/
2005-11-22 20:18:28 +03:00
int dma_region_mmap ( struct dma_region * dma , struct file * file ,
struct vm_area_struct * vma )
2005-04-17 02:20:36 +04:00
{
unsigned long size ;
if ( ! dma - > kvirt )
return - EINVAL ;
2007-12-05 10:15:53 +03:00
/* must be page-aligned (XXX: comment is wrong, we could allow pgoff) */
2005-04-17 02:20:36 +04:00
if ( vma - > vm_pgoff ! = 0 )
return - EINVAL ;
/* check the length */
size = vma - > vm_end - vma - > vm_start ;
if ( size > ( dma - > n_pages < < PAGE_SHIFT ) )
return - EINVAL ;
vma - > vm_ops = & dma_region_vm_ops ;
vma - > vm_private_data = dma ;
vma - > vm_file = file ;
2008-07-03 20:52:28 +04:00
vma - > vm_flags | = VM_RESERVED | VM_ALWAYSDUMP ;
2005-04-17 02:20:36 +04:00
return 0 ;
}
2005-11-22 20:18:28 +03:00
# else /* CONFIG_MMU */
2005-04-17 02:20:36 +04:00
2005-11-22 20:18:28 +03:00
int dma_region_mmap ( struct dma_region * dma , struct file * file ,
struct vm_area_struct * vma )
2005-04-17 02:20:36 +04:00
{
return - EINVAL ;
}
2005-11-22 20:18:28 +03:00
# endif /* CONFIG_MMU */