2005-04-17 02:20:36 +04:00
/**
2005-09-25 08:28:13 +04:00
* \ file drm_vm . c
2005-04-17 02:20:36 +04:00
* Memory mapping for DRM
2005-09-25 08:28:13 +04:00
*
2005-04-17 02:20:36 +04:00
* \ author Rickard E . ( Rik ) Faith < faith @ valinux . com >
* \ author Gareth Hughes < gareth @ valinux . com >
*/
/*
* Created : Mon Jan 4 08 : 58 : 31 1999 by faith @ valinux . com
*
* Copyright 1999 Precision Insight , Inc . , Cedar Park , Texas .
* Copyright 2000 VA Linux Systems , Inc . , Sunnyvale , California .
* All Rights Reserved .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice ( including the next
* paragraph ) shall be included in all copies or substantial portions of the
* Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* VA LINUX SYSTEMS AND / OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*/
# include "drmP.h"
# if defined(__ia64__)
# include <linux/efi.h>
# endif
2005-07-07 15:03:38 +04:00
static void drm_vm_open ( struct vm_area_struct * vma ) ;
static void drm_vm_close ( struct vm_area_struct * vma ) ;
2005-04-17 02:20:36 +04:00
/**
* \ c nopage method for AGP virtual memory .
*
* \ param vma virtual memory area .
* \ param address access address .
* \ return pointer to the page structure .
2005-09-25 08:28:13 +04:00
*
2005-04-17 02:20:36 +04:00
* Find the right map and if it ' s AGP memory find the real physical page to
* map , get the page , increment the use count and return it .
*/
# if __OS_HAS_AGP
static __inline__ struct page * drm_do_vm_nopage ( struct vm_area_struct * vma ,
2005-09-25 08:28:13 +04:00
unsigned long address )
2005-04-17 02:20:36 +04:00
{
2005-09-25 08:28:13 +04:00
drm_file_t * priv = vma - > vm_file - > private_data ;
2005-04-17 02:20:36 +04:00
drm_device_t * dev = priv - > head - > dev ;
2005-09-25 08:28:13 +04:00
drm_map_t * map = NULL ;
drm_map_list_t * r_list ;
2005-04-17 02:20:36 +04:00
struct list_head * list ;
/*
2005-09-25 08:28:13 +04:00
* Find the right map
*/
2005-04-17 02:20:36 +04:00
if ( ! drm_core_has_AGP ( dev ) )
goto vm_nopage_error ;
2005-09-25 08:28:13 +04:00
if ( ! dev - > agp | | ! dev - > agp - > cant_use_aperture )
goto vm_nopage_error ;
2005-04-17 02:20:36 +04:00
list_for_each ( list , & dev - > maplist - > head ) {
r_list = list_entry ( list , drm_map_list_t , head ) ;
map = r_list - > map ;
2005-09-25 08:28:13 +04:00
if ( ! map )
continue ;
2006-08-07 14:07:43 +04:00
if ( r_list - > user_token = = ( vma - > vm_pgoff < < PAGE_SHIFT ) )
2005-08-05 16:11:22 +04:00
break ;
2005-04-17 02:20:36 +04:00
}
if ( map & & map - > type = = _DRM_AGP ) {
unsigned long offset = address - vma - > vm_start ;
2005-08-05 16:11:22 +04:00
unsigned long baddr = map - > offset + offset ;
2005-04-17 02:20:36 +04:00
struct drm_agp_mem * agpmem ;
struct page * page ;
# ifdef __alpha__
/*
2005-09-25 08:28:13 +04:00
* Adjust to a bus - relative address
*/
2005-04-17 02:20:36 +04:00
baddr - = dev - > hose - > mem_space - > start ;
# endif
/*
2005-09-25 08:28:13 +04:00
* It ' s AGP memory - find the real physical page to map
*/
for ( agpmem = dev - > agp - > memory ; agpmem ; agpmem = agpmem - > next ) {
2005-04-17 02:20:36 +04:00
if ( agpmem - > bound < = baddr & &
2005-09-25 08:28:13 +04:00
agpmem - > bound + agpmem - > pages * PAGE_SIZE > baddr )
2005-04-17 02:20:36 +04:00
break ;
}
2005-09-25 08:28:13 +04:00
if ( ! agpmem )
goto vm_nopage_error ;
2005-04-17 02:20:36 +04:00
/*
2005-09-25 08:28:13 +04:00
* Get the page , inc the use count , and return it
*/
2005-04-17 02:20:36 +04:00
offset = ( baddr - agpmem - > bound ) > > PAGE_SHIFT ;
page = virt_to_page ( __va ( agpmem - > memory - > memory [ offset ] ) ) ;
get_page ( page ) ;
2005-09-25 08:28:13 +04:00
DRM_DEBUG
( " baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d \n " ,
baddr , __va ( agpmem - > memory - > memory [ offset ] ) , offset ,
page_count ( page ) ) ;
2005-04-17 02:20:36 +04:00
return page ;
2005-09-25 08:28:13 +04:00
}
vm_nopage_error :
return NOPAGE_SIGBUS ; /* Disallow mremap */
2005-04-17 02:20:36 +04:00
}
2005-09-25 08:28:13 +04:00
# else /* __OS_HAS_AGP */
2005-04-17 02:20:36 +04:00
static __inline__ struct page * drm_do_vm_nopage ( struct vm_area_struct * vma ,
2005-09-25 08:28:13 +04:00
unsigned long address )
2005-04-17 02:20:36 +04:00
{
return NOPAGE_SIGBUS ;
}
2005-09-25 08:28:13 +04:00
# endif /* __OS_HAS_AGP */
2005-04-17 02:20:36 +04:00
/**
* \ c nopage method for shared virtual memory .
*
* \ param vma virtual memory area .
* \ param address access address .
* \ return pointer to the page structure .
2005-09-25 08:28:13 +04:00
*
2005-04-17 02:20:36 +04:00
* Get the the mapping , find the real physical page to map , get the page , and
* return it .
*/
static __inline__ struct page * drm_do_vm_shm_nopage ( struct vm_area_struct * vma ,
2005-09-25 08:28:13 +04:00
unsigned long address )
2005-04-17 02:20:36 +04:00
{
2005-09-25 08:28:13 +04:00
drm_map_t * map = ( drm_map_t * ) vma - > vm_private_data ;
unsigned long offset ;
unsigned long i ;
struct page * page ;
2005-04-17 02:20:36 +04:00
2005-09-25 08:28:13 +04:00
if ( address > vma - > vm_end )
return NOPAGE_SIGBUS ; /* Disallow mremap */
if ( ! map )
return NOPAGE_OOM ; /* Nothing allocated */
2005-04-17 02:20:36 +04:00
2005-09-25 08:28:13 +04:00
offset = address - vma - > vm_start ;
2005-04-17 02:20:36 +04:00
i = ( unsigned long ) map - > handle + offset ;
2005-09-25 08:28:13 +04:00
page = ( map - > type = = _DRM_CONSISTENT ) ?
2005-10-22 09:25:01 +04:00
virt_to_page ( ( void * ) i ) : vmalloc_to_page ( ( void * ) i ) ;
2005-04-17 02:20:36 +04:00
if ( ! page )
return NOPAGE_OOM ;
get_page ( page ) ;
DRM_DEBUG ( " shm_nopage 0x%lx \n " , address ) ;
return page ;
}
/**
* \ c close method for shared virtual memory .
2005-09-25 08:28:13 +04:00
*
2005-04-17 02:20:36 +04:00
* \ param vma virtual memory area .
2005-09-25 08:28:13 +04:00
*
2005-04-17 02:20:36 +04:00
* Deletes map information if we are the last
* person to close a mapping and it ' s not in the global maplist .
*/
2005-07-07 15:03:38 +04:00
static void drm_vm_shm_close ( struct vm_area_struct * vma )
2005-04-17 02:20:36 +04:00
{
2005-09-25 08:28:13 +04:00
drm_file_t * priv = vma - > vm_file - > private_data ;
drm_device_t * dev = priv - > head - > dev ;
2005-04-17 02:20:36 +04:00
drm_vma_entry_t * pt , * prev , * next ;
drm_map_t * map ;
drm_map_list_t * r_list ;
struct list_head * list ;
int found_maps = 0 ;
DRM_DEBUG ( " 0x%08lx,0x%08lx \n " ,
vma - > vm_start , vma - > vm_end - vma - > vm_start ) ;
atomic_dec ( & dev - > vma_count ) ;
map = vma - > vm_private_data ;
2006-02-02 11:37:46 +03:00
mutex_lock ( & dev - > struct_mutex ) ;
2005-04-17 02:20:36 +04:00
for ( pt = dev - > vmalist , prev = NULL ; pt ; pt = next ) {
next = pt - > next ;
2005-09-25 08:28:13 +04:00
if ( pt - > vma - > vm_private_data = = map )
found_maps + + ;
2005-04-17 02:20:36 +04:00
if ( pt - > vma = = vma ) {
if ( prev ) {
prev - > next = pt - > next ;
} else {
dev - > vmalist = pt - > next ;
}
drm_free ( pt , sizeof ( * pt ) , DRM_MEM_VMAS ) ;
} else {
prev = pt ;
}
}
/* We were the only map that was found */
2005-09-25 08:28:13 +04:00
if ( found_maps = = 1 & & map - > flags & _DRM_REMOVABLE ) {
2005-04-17 02:20:36 +04:00
/* Check to see if we are in the maplist, if we are not, then
* we delete this mappings information .
*/
found_maps = 0 ;
list = & dev - > maplist - > head ;
list_for_each ( list , & dev - > maplist - > head ) {
r_list = list_entry ( list , drm_map_list_t , head ) ;
2005-09-25 08:28:13 +04:00
if ( r_list - > map = = map )
found_maps + + ;
2005-04-17 02:20:36 +04:00
}
2005-09-25 08:28:13 +04:00
if ( ! found_maps ) {
2005-07-10 09:38:56 +04:00
drm_dma_handle_t dmah ;
2005-04-17 02:20:36 +04:00
switch ( map - > type ) {
case _DRM_REGISTERS :
case _DRM_FRAME_BUFFER :
if ( drm_core_has_MTRR ( dev ) & & map - > mtrr > = 0 ) {
int retcode ;
retcode = mtrr_del ( map - > mtrr ,
map - > offset ,
map - > size ) ;
DRM_DEBUG ( " mtrr_del = %d \n " , retcode ) ;
}
drm_ioremapfree ( map - > handle , map - > size , dev ) ;
break ;
case _DRM_SHM :
vfree ( map - > handle ) ;
break ;
case _DRM_AGP :
case _DRM_SCATTER_GATHER :
break ;
2005-07-10 08:34:13 +04:00
case _DRM_CONSISTENT :
2005-07-10 09:38:56 +04:00
dmah . vaddr = map - > handle ;
dmah . busaddr = map - > offset ;
dmah . size = map - > size ;
__drm_pci_free ( dev , & dmah ) ;
2005-07-10 08:34:13 +04:00
break ;
2005-04-17 02:20:36 +04:00
}
drm_free ( map , sizeof ( * map ) , DRM_MEM_MAPS ) ;
}
}
2006-02-02 11:37:46 +03:00
mutex_unlock ( & dev - > struct_mutex ) ;
2005-04-17 02:20:36 +04:00
}
/**
* \ c nopage method for DMA virtual memory .
*
* \ param vma virtual memory area .
* \ param address access address .
* \ return pointer to the page structure .
2005-09-25 08:28:13 +04:00
*
2005-04-17 02:20:36 +04:00
* Determine the page number from the page offset and get it from drm_device_dma : : pagelist .
*/
static __inline__ struct page * drm_do_vm_dma_nopage ( struct vm_area_struct * vma ,
2005-09-25 08:28:13 +04:00
unsigned long address )
2005-04-17 02:20:36 +04:00
{
2005-09-25 08:28:13 +04:00
drm_file_t * priv = vma - > vm_file - > private_data ;
drm_device_t * dev = priv - > head - > dev ;
drm_device_dma_t * dma = dev - > dma ;
unsigned long offset ;
unsigned long page_nr ;
struct page * page ;
if ( ! dma )
return NOPAGE_SIGBUS ; /* Error */
if ( address > vma - > vm_end )
return NOPAGE_SIGBUS ; /* Disallow mremap */
if ( ! dma - > pagelist )
return NOPAGE_OOM ; /* Nothing allocated */
offset = address - vma - > vm_start ; /* vm_[pg]off[set] should be 0 */
page_nr = offset > > PAGE_SHIFT ;
page = virt_to_page ( ( dma - > pagelist [ page_nr ] + ( offset & ( ~ PAGE_MASK ) ) ) ) ;
2005-04-17 02:20:36 +04:00
get_page ( page ) ;
DRM_DEBUG ( " dma_nopage 0x%lx (page %lu) \n " , address , page_nr ) ;
return page ;
}
/**
* \ c nopage method for scatter - gather virtual memory .
*
* \ param vma virtual memory area .
* \ param address access address .
* \ return pointer to the page structure .
2005-09-25 08:28:13 +04:00
*
2005-04-17 02:20:36 +04:00
* Determine the map offset from the page offset and get it from drm_sg_mem : : pagelist .
*/
static __inline__ struct page * drm_do_vm_sg_nopage ( struct vm_area_struct * vma ,
2005-09-25 08:28:13 +04:00
unsigned long address )
2005-04-17 02:20:36 +04:00
{
2005-09-25 08:28:13 +04:00
drm_map_t * map = ( drm_map_t * ) vma - > vm_private_data ;
2005-04-17 02:20:36 +04:00
drm_file_t * priv = vma - > vm_file - > private_data ;
drm_device_t * dev = priv - > head - > dev ;
drm_sg_mem_t * entry = dev - > sg ;
unsigned long offset ;
unsigned long map_offset ;
unsigned long page_offset ;
struct page * page ;
2005-09-25 08:28:13 +04:00
if ( ! entry )
return NOPAGE_SIGBUS ; /* Error */
if ( address > vma - > vm_end )
return NOPAGE_SIGBUS ; /* Disallow mremap */
if ( ! entry - > pagelist )
return NOPAGE_OOM ; /* Nothing allocated */
2005-04-17 02:20:36 +04:00
offset = address - vma - > vm_start ;
2005-08-05 16:11:22 +04:00
map_offset = map - > offset - ( unsigned long ) dev - > sg - > virtual ;
2005-04-17 02:20:36 +04:00
page_offset = ( offset > > PAGE_SHIFT ) + ( map_offset > > PAGE_SHIFT ) ;
page = entry - > pagelist [ page_offset ] ;
get_page ( page ) ;
return page ;
}
static struct page * drm_vm_nopage ( struct vm_area_struct * vma ,
2005-09-25 08:28:13 +04:00
unsigned long address , int * type )
{
if ( type )
* type = VM_FAULT_MINOR ;
2005-04-17 02:20:36 +04:00
return drm_do_vm_nopage ( vma , address ) ;
}
static struct page * drm_vm_shm_nopage ( struct vm_area_struct * vma ,
2005-09-25 08:28:13 +04:00
unsigned long address , int * type )
{
if ( type )
* type = VM_FAULT_MINOR ;
2005-04-17 02:20:36 +04:00
return drm_do_vm_shm_nopage ( vma , address ) ;
}
static struct page * drm_vm_dma_nopage ( struct vm_area_struct * vma ,
2005-09-25 08:28:13 +04:00
unsigned long address , int * type )
{
if ( type )
* type = VM_FAULT_MINOR ;
2005-04-17 02:20:36 +04:00
return drm_do_vm_dma_nopage ( vma , address ) ;
}
static struct page * drm_vm_sg_nopage ( struct vm_area_struct * vma ,
2005-09-25 08:28:13 +04:00
unsigned long address , int * type )
{
if ( type )
* type = VM_FAULT_MINOR ;
2005-04-17 02:20:36 +04:00
return drm_do_vm_sg_nopage ( vma , address ) ;
}
/** AGP virtual memory operations */
2005-09-25 08:28:13 +04:00
static struct vm_operations_struct drm_vm_ops = {
2005-04-17 02:20:36 +04:00
. nopage = drm_vm_nopage ,
2005-09-25 08:28:13 +04:00
. open = drm_vm_open ,
. close = drm_vm_close ,
2005-04-17 02:20:36 +04:00
} ;
/** Shared virtual memory operations */
2005-09-25 08:28:13 +04:00
static struct vm_operations_struct drm_vm_shm_ops = {
2005-04-17 02:20:36 +04:00
. nopage = drm_vm_shm_nopage ,
2005-09-25 08:28:13 +04:00
. open = drm_vm_open ,
. close = drm_vm_shm_close ,
2005-04-17 02:20:36 +04:00
} ;
/** DMA virtual memory operations */
2005-09-25 08:28:13 +04:00
static struct vm_operations_struct drm_vm_dma_ops = {
2005-04-17 02:20:36 +04:00
. nopage = drm_vm_dma_nopage ,
2005-09-25 08:28:13 +04:00
. open = drm_vm_open ,
. close = drm_vm_close ,
2005-04-17 02:20:36 +04:00
} ;
/** Scatter-gather virtual memory operations */
2005-09-25 08:28:13 +04:00
static struct vm_operations_struct drm_vm_sg_ops = {
2005-04-17 02:20:36 +04:00
. nopage = drm_vm_sg_nopage ,
2005-09-25 08:28:13 +04:00
. open = drm_vm_open ,
. close = drm_vm_close ,
2005-04-17 02:20:36 +04:00
} ;
/**
* \ c open method for shared virtual memory .
2005-09-25 08:28:13 +04:00
*
2005-04-17 02:20:36 +04:00
* \ param vma virtual memory area .
2005-09-25 08:28:13 +04:00
*
2005-04-17 02:20:36 +04:00
* Create a new drm_vma_entry structure as the \ p vma private data entry and
* add it to drm_device : : vmalist .
*/
2005-07-07 15:03:38 +04:00
static void drm_vm_open ( struct vm_area_struct * vma )
2005-04-17 02:20:36 +04:00
{
2005-09-25 08:28:13 +04:00
drm_file_t * priv = vma - > vm_file - > private_data ;
drm_device_t * dev = priv - > head - > dev ;
2005-04-17 02:20:36 +04:00
drm_vma_entry_t * vma_entry ;
DRM_DEBUG ( " 0x%08lx,0x%08lx \n " ,
vma - > vm_start , vma - > vm_end - vma - > vm_start ) ;
atomic_inc ( & dev - > vma_count ) ;
vma_entry = drm_alloc ( sizeof ( * vma_entry ) , DRM_MEM_VMAS ) ;
if ( vma_entry ) {
2006-02-02 11:37:46 +03:00
mutex_lock ( & dev - > struct_mutex ) ;
2005-09-25 08:28:13 +04:00
vma_entry - > vma = vma ;
2005-04-17 02:20:36 +04:00
vma_entry - > next = dev - > vmalist ;
2005-09-25 08:28:13 +04:00
vma_entry - > pid = current - > pid ;
dev - > vmalist = vma_entry ;
2006-02-02 11:37:46 +03:00
mutex_unlock ( & dev - > struct_mutex ) ;
2005-04-17 02:20:36 +04:00
}
}
/**
* \ c close method for all virtual memory types .
2005-09-25 08:28:13 +04:00
*
2005-04-17 02:20:36 +04:00
* \ param vma virtual memory area .
2005-09-25 08:28:13 +04:00
*
2005-04-17 02:20:36 +04:00
* Search the \ p vma private data entry in drm_device : : vmalist , unlink it , and
* free it .
*/
2005-07-07 15:03:38 +04:00
static void drm_vm_close ( struct vm_area_struct * vma )
2005-04-17 02:20:36 +04:00
{
2005-09-25 08:28:13 +04:00
drm_file_t * priv = vma - > vm_file - > private_data ;
drm_device_t * dev = priv - > head - > dev ;
2005-04-17 02:20:36 +04:00
drm_vma_entry_t * pt , * prev ;
DRM_DEBUG ( " 0x%08lx,0x%08lx \n " ,
vma - > vm_start , vma - > vm_end - vma - > vm_start ) ;
atomic_dec ( & dev - > vma_count ) ;
2006-02-02 11:37:46 +03:00
mutex_lock ( & dev - > struct_mutex ) ;
2005-04-17 02:20:36 +04:00
for ( pt = dev - > vmalist , prev = NULL ; pt ; prev = pt , pt = pt - > next ) {
if ( pt - > vma = = vma ) {
if ( prev ) {
prev - > next = pt - > next ;
} else {
dev - > vmalist = pt - > next ;
}
drm_free ( pt , sizeof ( * pt ) , DRM_MEM_VMAS ) ;
break ;
}
}
2006-02-02 11:37:46 +03:00
mutex_unlock ( & dev - > struct_mutex ) ;
2005-04-17 02:20:36 +04:00
}
/**
* mmap DMA memory .
*
* \ param filp file pointer .
* \ param vma virtual memory area .
* \ return zero on success or a negative number on failure .
2005-09-25 08:28:13 +04:00
*
2005-04-17 02:20:36 +04:00
* Sets the virtual memory area operations structure to vm_dma_ops , the file
* pointer , and calls vm_open ( ) .
*/
2005-07-07 15:03:38 +04:00
static int drm_mmap_dma ( struct file * filp , struct vm_area_struct * vma )
2005-04-17 02:20:36 +04:00
{
2005-09-25 08:28:13 +04:00
drm_file_t * priv = filp - > private_data ;
drm_device_t * dev ;
2005-04-17 02:20:36 +04:00
drm_device_dma_t * dma ;
2005-09-25 08:28:13 +04:00
unsigned long length = vma - > vm_end - vma - > vm_start ;
2005-04-17 02:20:36 +04:00
lock_kernel ( ) ;
2005-09-25 08:28:13 +04:00
dev = priv - > head - > dev ;
dma = dev - > dma ;
2005-04-17 02:20:36 +04:00
DRM_DEBUG ( " start = 0x%lx, end = 0x%lx, offset = 0x%lx \n " ,
2006-08-07 14:07:43 +04:00
vma - > vm_start , vma - > vm_end , vma - > vm_pgoff < < PAGE_SHIFT ) ;
2005-04-17 02:20:36 +04:00
2005-09-25 08:28:13 +04:00
/* Length must match exact page count */
2005-04-17 02:20:36 +04:00
if ( ! dma | | ( length > > PAGE_SHIFT ) ! = dma - > page_count ) {
unlock_kernel ( ) ;
return - EINVAL ;
}
unlock_kernel ( ) ;
2005-09-25 08:28:13 +04:00
vma - > vm_ops = & drm_vm_dma_ops ;
2005-04-17 02:20:36 +04:00
2005-09-25 08:28:13 +04:00
vma - > vm_flags | = VM_RESERVED ; /* Don't swap */
2005-04-17 02:20:36 +04:00
2005-09-25 08:28:13 +04:00
vma - > vm_file = filp ; /* Needed for drm_vm_open() */
2005-04-17 02:20:36 +04:00
drm_vm_open ( vma ) ;
return 0 ;
}
2005-09-25 08:28:13 +04:00
unsigned long drm_core_get_map_ofs ( drm_map_t * map )
2005-04-17 02:20:36 +04:00
{
return map - > offset ;
}
2005-09-25 08:28:13 +04:00
2005-04-17 02:20:36 +04:00
EXPORT_SYMBOL ( drm_core_get_map_ofs ) ;
unsigned long drm_core_get_reg_ofs ( struct drm_device * dev )
{
# ifdef __alpha__
return dev - > hose - > dense_mem_base - dev - > hose - > mem_space - > start ;
# else
return 0 ;
# endif
}
2005-09-25 08:28:13 +04:00
2005-04-17 02:20:36 +04:00
EXPORT_SYMBOL ( drm_core_get_reg_ofs ) ;
/**
* mmap DMA memory .
*
* \ param filp file pointer .
* \ param vma virtual memory area .
* \ return zero on success or a negative number on failure .
2005-09-25 08:28:13 +04:00
*
2005-04-17 02:20:36 +04:00
* If the virtual memory area has no offset associated with it then it ' s a DMA
* area , so calls mmap_dma ( ) . Otherwise searches the map in drm_device : : maplist ,
* checks that the restricted flag is not set , sets the virtual memory operations
* according to the mapping type and remaps the pages . Finally sets the file
* pointer and calls vm_open ( ) .
*/
int drm_mmap ( struct file * filp , struct vm_area_struct * vma )
{
2005-09-25 08:28:13 +04:00
drm_file_t * priv = filp - > private_data ;
drm_device_t * dev = priv - > head - > dev ;
drm_map_t * map = NULL ;
drm_map_list_t * r_list ;
unsigned long offset = 0 ;
2005-04-17 02:20:36 +04:00
struct list_head * list ;
DRM_DEBUG ( " start = 0x%lx, end = 0x%lx, offset = 0x%lx \n " ,
2006-08-07 14:07:43 +04:00
vma - > vm_start , vma - > vm_end , vma - > vm_pgoff < < PAGE_SHIFT ) ;
2005-04-17 02:20:36 +04:00
2005-09-25 08:28:13 +04:00
if ( ! priv - > authenticated )
return - EACCES ;
2005-04-17 02:20:36 +04:00
/* We check for "dma". On Apple's UniNorth, it's valid to have
* the AGP mapped at physical address 0
* - - BenH .
*/
2006-08-07 14:07:43 +04:00
if ( ! ( vma - > vm_pgoff < < PAGE_SHIFT )
2005-04-17 02:20:36 +04:00
# if __OS_HAS_AGP
2005-09-25 08:28:13 +04:00
& & ( ! dev - > agp
| | dev - > agp - > agp_info . device - > vendor ! = PCI_VENDOR_ID_APPLE )
2005-04-17 02:20:36 +04:00
# endif
)
return drm_mmap_dma ( filp , vma ) ;
2005-09-25 08:28:13 +04:00
/* A sequential search of a linked list is
fine here because : 1 ) there will only be
about 5 - 10 entries in the list and , 2 ) a
DRI client only has to do this mapping
once , so it doesn ' t have to be optimized
for performance , even if the list was a
bit longer . */
2005-04-17 02:20:36 +04:00
list_for_each ( list , & dev - > maplist - > head ) {
r_list = list_entry ( list , drm_map_list_t , head ) ;
map = r_list - > map ;
2005-09-25 08:28:13 +04:00
if ( ! map )
continue ;
2006-08-07 14:07:43 +04:00
if ( r_list - > user_token = = vma - > vm_pgoff < < PAGE_SHIFT )
2005-08-05 16:11:22 +04:00
break ;
2005-04-17 02:20:36 +04:00
}
2005-09-25 08:28:13 +04:00
if ( ! map | | ( ( map - > flags & _DRM_RESTRICTED ) & & ! capable ( CAP_SYS_ADMIN ) ) )
2005-04-17 02:20:36 +04:00
return - EPERM ;
2005-09-25 08:28:13 +04:00
/* Check for valid size. */
if ( map - > size ! = vma - > vm_end - vma - > vm_start )
return - EINVAL ;
2005-04-17 02:20:36 +04:00
if ( ! capable ( CAP_SYS_ADMIN ) & & ( map - > flags & _DRM_READ_ONLY ) ) {
vma - > vm_flags & = ~ ( VM_WRITE | VM_MAYWRITE ) ;
# if defined(__i386__) || defined(__x86_64__)
pgprot_val ( vma - > vm_page_prot ) & = ~ _PAGE_RW ;
# else
2005-09-25 08:28:13 +04:00
/* Ye gads this is ugly. With more thought
we could move this up higher and use
` protection_map ' instead . */
vma - > vm_page_prot =
__pgprot ( pte_val
( pte_wrprotect
( __pte ( pgprot_val ( vma - > vm_page_prot ) ) ) ) ) ;
2005-04-17 02:20:36 +04:00
# endif
}
switch ( map - > type ) {
2005-09-25 08:28:13 +04:00
case _DRM_AGP :
if ( drm_core_has_AGP ( dev ) & & dev - > agp - > cant_use_aperture ) {
/*
* On some platforms we can ' t talk to bus dma address from the CPU , so for
* memory of type DRM_AGP , we ' ll deal with sorting out the real physical
* pages and mappings in nopage ( )
*/
2005-04-17 02:20:36 +04:00
# if defined(__powerpc__)
2005-09-25 08:28:13 +04:00
pgprot_val ( vma - > vm_page_prot ) | = _PAGE_NO_CACHE ;
2005-04-17 02:20:36 +04:00
# endif
2005-09-25 08:28:13 +04:00
vma - > vm_ops = & drm_vm_ops ;
break ;
}
/* fall through to _DRM_FRAME_BUFFER... */
2005-04-17 02:20:36 +04:00
case _DRM_FRAME_BUFFER :
case _DRM_REGISTERS :
# if defined(__i386__) || defined(__x86_64__)
2005-08-20 11:43:33 +04:00
if ( boot_cpu_data . x86 > 3 & & map - > type ! = _DRM_AGP ) {
pgprot_val ( vma - > vm_page_prot ) | = _PAGE_PCD ;
pgprot_val ( vma - > vm_page_prot ) & = ~ _PAGE_PWT ;
}
2005-04-17 02:20:36 +04:00
# elif defined(__powerpc__)
2005-08-20 11:43:33 +04:00
pgprot_val ( vma - > vm_page_prot ) | = _PAGE_NO_CACHE ;
if ( map - > type = = _DRM_REGISTERS )
pgprot_val ( vma - > vm_page_prot ) | = _PAGE_GUARDED ;
2005-04-17 02:20:36 +04:00
# endif
2005-08-20 11:43:33 +04:00
vma - > vm_flags | = VM_IO ; /* not in core dump */
2005-04-17 02:20:36 +04:00
# if defined(__ia64__)
2005-09-25 08:28:13 +04:00
if ( efi_range_is_wc ( vma - > vm_start , vma - > vm_end - vma - > vm_start ) )
2005-04-17 02:20:36 +04:00
vma - > vm_page_prot =
2005-09-25 08:28:13 +04:00
pgprot_writecombine ( vma - > vm_page_prot ) ;
2005-04-17 02:20:36 +04:00
else
2005-09-25 08:28:13 +04:00
vma - > vm_page_prot = pgprot_noncached ( vma - > vm_page_prot ) ;
2005-04-17 02:20:36 +04:00
# endif
offset = dev - > driver - > get_reg_ofs ( dev ) ;
# ifdef __sparc__
2006-03-21 13:29:39 +03:00
vma - > vm_page_prot = pgprot_noncached ( vma - > vm_page_prot ) ;
2006-08-07 14:07:43 +04:00
if ( io_remap_pfn_range ( vma , vma - > vm_start ,
2005-09-25 08:28:13 +04:00
( map - > offset + offset ) > > PAGE_SHIFT ,
vma - > vm_end - vma - > vm_start ,
vma - > vm_page_prot ) )
2005-04-17 02:20:36 +04:00
# else
if ( io_remap_pfn_range ( vma , vma - > vm_start ,
2005-09-25 08:28:13 +04:00
( map - > offset + offset ) > > PAGE_SHIFT ,
vma - > vm_end - vma - > vm_start ,
vma - > vm_page_prot ) )
2005-04-17 02:20:36 +04:00
# endif
2005-09-25 08:28:13 +04:00
return - EAGAIN ;
2005-04-17 02:20:36 +04:00
DRM_DEBUG ( " Type = %d; start = 0x%lx, end = 0x%lx, "
" offset = 0x%lx \n " ,
map - > type ,
2005-08-05 16:11:22 +04:00
vma - > vm_start , vma - > vm_end , map - > offset + offset ) ;
2005-04-17 02:20:36 +04:00
vma - > vm_ops = & drm_vm_ops ;
break ;
case _DRM_SHM :
2005-07-10 08:34:13 +04:00
case _DRM_CONSISTENT :
/* Consistent memory is really like shared memory. It's only
* allocate in a different way */
2005-04-17 02:20:36 +04:00
vma - > vm_ops = & drm_vm_shm_ops ;
vma - > vm_private_data = ( void * ) map ;
2005-09-25 08:28:13 +04:00
/* Don't let this area swap. Change when
DRM_KERNEL advisory is supported . */
2005-04-17 02:20:36 +04:00
vma - > vm_flags | = VM_RESERVED ;
break ;
case _DRM_SCATTER_GATHER :
vma - > vm_ops = & drm_vm_sg_ops ;
vma - > vm_private_data = ( void * ) map ;
vma - > vm_flags | = VM_RESERVED ;
2005-09-25 08:28:13 +04:00
break ;
2005-04-17 02:20:36 +04:00
default :
return - EINVAL ; /* This should never happen. */
}
2005-09-25 08:28:13 +04:00
vma - > vm_flags | = VM_RESERVED ; /* Don't swap */
2005-04-17 02:20:36 +04:00
2005-09-25 08:28:13 +04:00
vma - > vm_file = filp ; /* Needed for drm_vm_open() */
2005-04-17 02:20:36 +04:00
drm_vm_open ( vma ) ;
return 0 ;
}
2005-09-25 08:28:13 +04:00
2005-04-17 02:20:36 +04:00
EXPORT_SYMBOL ( drm_mmap ) ;