2010-08-27 04:00:25 +04:00
/*
* Copyright 2010 Red Hat Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
* Authors : Ben Skeggs
*/
# ifndef __NOUVEAU_VM_H__
# define __NOUVEAU_VM_H__
# include "drmP.h"
# include "nouveau_drv.h"
# include "nouveau_mm.h"
struct nouveau_vm_pgt {
2010-12-15 04:04:39 +03:00
struct nouveau_gpuobj * obj [ 2 ] ;
u32 refcount [ 2 ] ;
2010-08-27 04:00:25 +04:00
} ;
struct nouveau_vm_pgd {
struct list_head head ;
struct nouveau_gpuobj * obj ;
} ;
struct nouveau_vma {
2011-06-06 08:07:04 +04:00
struct list_head head ;
2011-06-07 09:25:12 +04:00
int refcount ;
2010-08-27 04:00:25 +04:00
struct nouveau_vm * vm ;
struct nouveau_mm_node * node ;
u64 offset ;
u32 access ;
} ;
struct nouveau_vm {
struct drm_device * dev ;
2011-06-24 04:14:07 +04:00
struct nouveau_mm mm ;
2010-08-27 04:00:25 +04:00
int refcount ;
struct list_head pgd_list ;
2011-03-31 09:40:43 +04:00
atomic_t engref [ 16 ] ;
2010-08-27 04:00:25 +04:00
struct nouveau_vm_pgt * pgt ;
u32 fpde ;
u32 lpde ;
u32 pgt_bits ;
u8 spg_shift ;
u8 lpg_shift ;
2010-12-15 04:04:39 +03:00
void ( * map_pgt ) ( struct nouveau_gpuobj * pgd , u32 pde ,
struct nouveau_gpuobj * pgt [ 2 ] ) ;
2010-08-27 04:00:25 +04:00
void ( * map ) ( struct nouveau_vma * , struct nouveau_gpuobj * ,
2011-02-14 02:57:35 +03:00
struct nouveau_mem * , u32 pte , u32 cnt ,
u64 phys , u64 delta ) ;
2010-08-27 04:00:25 +04:00
void ( * map_sg ) ( struct nouveau_vma * , struct nouveau_gpuobj * ,
2011-02-10 05:59:51 +03:00
struct nouveau_mem * , u32 pte , u32 cnt , dma_addr_t * ) ;
2010-08-27 04:00:25 +04:00
void ( * unmap ) ( struct nouveau_gpuobj * pgt , u32 pte , u32 cnt ) ;
void ( * flush ) ( struct nouveau_vm * ) ;
} ;
/* nouveau_vm.c */
int nouveau_vm_new ( struct drm_device * , u64 offset , u64 length , u64 mm_offset ,
struct nouveau_vm * * ) ;
int nouveau_vm_ref ( struct nouveau_vm * , struct nouveau_vm * * ,
struct nouveau_gpuobj * pgd ) ;
int nouveau_vm_get ( struct nouveau_vm * , u64 size , u32 page_shift ,
u32 access , struct nouveau_vma * ) ;
void nouveau_vm_put ( struct nouveau_vma * ) ;
2011-02-10 05:22:52 +03:00
void nouveau_vm_map ( struct nouveau_vma * , struct nouveau_mem * ) ;
void nouveau_vm_map_at ( struct nouveau_vma * , u64 offset , struct nouveau_mem * ) ;
2010-08-27 04:00:25 +04:00
void nouveau_vm_unmap ( struct nouveau_vma * ) ;
void nouveau_vm_unmap_at ( struct nouveau_vma * , u64 offset , u64 length ) ;
void nouveau_vm_map_sg ( struct nouveau_vma * , u64 offset , u64 length ,
2011-12-22 09:20:21 +04:00
struct nouveau_mem * ) ;
2010-08-27 04:00:25 +04:00
/* nv50_vm.c */
2010-12-15 04:04:39 +03:00
void nv50_vm_map_pgt ( struct nouveau_gpuobj * pgd , u32 pde ,
struct nouveau_gpuobj * pgt [ 2 ] ) ;
2010-08-27 04:00:25 +04:00
void nv50_vm_map ( struct nouveau_vma * , struct nouveau_gpuobj * ,
2011-02-14 02:57:35 +03:00
struct nouveau_mem * , u32 pte , u32 cnt , u64 phys , u64 delta ) ;
2010-08-27 04:00:25 +04:00
void nv50_vm_map_sg ( struct nouveau_vma * , struct nouveau_gpuobj * ,
2011-02-10 05:59:51 +03:00
struct nouveau_mem * , u32 pte , u32 cnt , dma_addr_t * ) ;
2010-08-27 04:00:25 +04:00
void nv50_vm_unmap ( struct nouveau_gpuobj * , u32 pte , u32 cnt ) ;
void nv50_vm_flush ( struct nouveau_vm * ) ;
void nv50_vm_flush_engine ( struct drm_device * , int engine ) ;
2010-11-10 07:10:04 +03:00
/* nvc0_vm.c */
void nvc0_vm_map_pgt ( struct nouveau_gpuobj * pgd , u32 pde ,
struct nouveau_gpuobj * pgt [ 2 ] ) ;
void nvc0_vm_map ( struct nouveau_vma * , struct nouveau_gpuobj * ,
2011-02-14 02:57:35 +03:00
struct nouveau_mem * , u32 pte , u32 cnt , u64 phys , u64 delta ) ;
2010-11-10 07:10:04 +03:00
void nvc0_vm_map_sg ( struct nouveau_vma * , struct nouveau_gpuobj * ,
2011-02-10 05:59:51 +03:00
struct nouveau_mem * , u32 pte , u32 cnt , dma_addr_t * ) ;
2010-11-10 07:10:04 +03:00
void nvc0_vm_unmap ( struct nouveau_gpuobj * , u32 pte , u32 cnt ) ;
void nvc0_vm_flush ( struct nouveau_vm * ) ;
2010-08-27 04:00:25 +04:00
# endif