2012-04-30 13:55:29 +10:00
/*
* Copyright 2012 Red Hat Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
* Authors : Ben Skeggs
*/
2016-05-20 09:22:55 +10:00
# include "nouveau_drv.h"
2012-07-20 08:17:34 +10:00
# include "nouveau_dma.h"
2012-04-30 13:55:29 +10:00
# include "nouveau_fence.h"
2012-07-31 16:16:21 +10:00
# include "nv50_display.h"
2012-04-30 13:55:29 +10:00
static int
2013-02-14 09:37:35 +10:00
nv84_fence_emit32 ( struct nouveau_channel * chan , u64 virtual , u32 sequence )
2012-04-30 13:55:29 +10:00
{
2013-02-14 09:37:35 +10:00
int ret = RING_SPACE ( chan , 8 ) ;
2012-04-30 13:55:29 +10:00
if ( ret = = 0 ) {
BEGIN_NV04 ( chan , 0 , NV11_SUBCHAN_DMA_SEMAPHORE , 1 ) ;
2014-08-10 04:10:22 +10:00
OUT_RING ( chan , chan - > vram . handle ) ;
2013-01-31 14:57:33 +10:00
BEGIN_NV04 ( chan , 0 , NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH , 5 ) ;
2013-02-14 09:37:35 +10:00
OUT_RING ( chan , upper_32_bits ( virtual ) ) ;
OUT_RING ( chan , lower_32_bits ( virtual ) ) ;
OUT_RING ( chan , sequence ) ;
2012-04-30 13:55:29 +10:00
OUT_RING ( chan , NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG ) ;
2013-01-31 14:57:33 +10:00
OUT_RING ( chan , 0x00000000 ) ;
2012-04-30 13:55:29 +10:00
FIRE_RING ( chan ) ;
}
return ret ;
}
static int
2013-02-14 09:37:35 +10:00
nv84_fence_sync32 ( struct nouveau_channel * chan , u64 virtual , u32 sequence )
2012-04-30 13:55:29 +10:00
{
2013-02-14 09:37:35 +10:00
int ret = RING_SPACE ( chan , 7 ) ;
2012-04-30 13:55:29 +10:00
if ( ret = = 0 ) {
BEGIN_NV04 ( chan , 0 , NV11_SUBCHAN_DMA_SEMAPHORE , 1 ) ;
2014-08-10 04:10:22 +10:00
OUT_RING ( chan , chan - > vram . handle ) ;
2012-04-30 13:55:29 +10:00
BEGIN_NV04 ( chan , 0 , NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH , 4 ) ;
2013-02-14 09:37:35 +10:00
OUT_RING ( chan , upper_32_bits ( virtual ) ) ;
OUT_RING ( chan , lower_32_bits ( virtual ) ) ;
OUT_RING ( chan , sequence ) ;
2012-04-30 13:55:29 +10:00
OUT_RING ( chan , NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL ) ;
FIRE_RING ( chan ) ;
}
return ret ;
}
2013-02-14 13:43:21 +10:00
static int
2013-02-14 09:37:35 +10:00
nv84_fence_emit ( struct nouveau_fence * fence )
{
struct nouveau_channel * chan = fence - > channel ;
struct nv84_fence_chan * fctx = chan - > fence ;
2014-08-10 04:10:25 +10:00
u64 addr = chan - > chid * 16 ;
2013-02-14 13:43:21 +10:00
if ( fence - > sysmem )
addr + = fctx - > vma_gart . offset ;
else
addr + = fctx - > vma . offset ;
2014-01-09 11:03:11 +01:00
return fctx - > base . emit32 ( chan , addr , fence - > base . seqno ) ;
2013-02-14 09:37:35 +10:00
}
2013-02-14 13:43:21 +10:00
static int
2013-02-14 09:37:35 +10:00
nv84_fence_sync ( struct nouveau_fence * fence ,
struct nouveau_channel * prev , struct nouveau_channel * chan )
{
struct nv84_fence_chan * fctx = chan - > fence ;
2014-08-10 04:10:25 +10:00
u64 addr = prev - > chid * 16 ;
2013-02-14 13:43:21 +10:00
if ( fence - > sysmem )
addr + = fctx - > vma_gart . offset ;
else
addr + = fctx - > vma . offset ;
2014-01-09 11:03:11 +01:00
return fctx - > base . sync32 ( chan , addr , fence - > base . seqno ) ;
2013-02-14 09:37:35 +10:00
}
2013-02-14 13:43:21 +10:00
static u32
2012-04-30 13:55:29 +10:00
nv84_fence_read ( struct nouveau_channel * chan )
{
2012-07-20 08:17:34 +10:00
struct nv84_fence_priv * priv = chan - > drm - > fence ;
2014-08-10 04:10:25 +10:00
return nouveau_bo_rd32 ( priv - > bo , chan - > chid * 16 / 4 ) ;
2012-04-30 13:55:29 +10:00
}
2013-02-14 13:43:21 +10:00
static void
2012-07-19 10:51:42 +10:00
nv84_fence_context_del ( struct nouveau_channel * chan )
2012-04-30 13:55:29 +10:00
{
2013-02-14 09:28:37 +10:00
struct nv84_fence_priv * priv = chan - > drm - > fence ;
2012-07-19 10:51:42 +10:00
struct nv84_fence_chan * fctx = chan - > fence ;
2013-02-14 09:28:37 +10:00
2014-09-22 11:08:48 +02:00
nouveau_bo_wr32 ( priv - > bo , chan - > chid * 16 / 4 , fctx - > base . sequence ) ;
2016-12-14 09:52:39 +10:00
mutex_lock ( & priv - > mutex ) ;
2013-02-14 13:43:21 +10:00
nouveau_bo_vma_del ( priv - > bo , & fctx - > vma_gart ) ;
2013-02-14 09:28:37 +10:00
nouveau_bo_vma_del ( priv - > bo , & fctx - > vma ) ;
2016-12-14 09:52:39 +10:00
mutex_unlock ( & priv - > mutex ) ;
2012-04-30 13:55:29 +10:00
nouveau_fence_context_del ( & fctx - > base ) ;
2012-07-19 10:51:42 +10:00
chan - > fence = NULL ;
2014-09-29 10:06:18 +02:00
nouveau_fence_context_free ( & fctx - > base ) ;
2012-04-30 13:55:29 +10:00
}
2013-02-14 09:28:37 +10:00
int
2012-07-19 10:51:42 +10:00
nv84_fence_context_new ( struct nouveau_channel * chan )
2012-04-30 13:55:29 +10:00
{
2015-08-20 14:54:15 +10:00
struct nouveau_cli * cli = ( void * ) chan - > user . client ;
2012-07-20 08:17:34 +10:00
struct nv84_fence_priv * priv = chan - > drm - > fence ;
2012-04-30 13:55:29 +10:00
struct nv84_fence_chan * fctx ;
2016-11-04 17:20:36 +10:00
int ret ;
2012-04-30 13:55:29 +10:00
2012-07-19 10:51:42 +10:00
fctx = chan - > fence = kzalloc ( sizeof ( * fctx ) , GFP_KERNEL ) ;
2012-04-30 13:55:29 +10:00
if ( ! fctx )
return - ENOMEM ;
2014-01-09 11:03:11 +01:00
nouveau_fence_context_new ( chan , & fctx - > base ) ;
2013-02-14 13:20:17 +10:00
fctx - > base . emit = nv84_fence_emit ;
fctx - > base . sync = nv84_fence_sync ;
fctx - > base . read = nv84_fence_read ;
fctx - > base . emit32 = nv84_fence_emit32 ;
fctx - > base . sync32 = nv84_fence_sync32 ;
2014-01-09 11:03:11 +01:00
fctx - > base . sequence = nv84_fence_read ( chan ) ;
2012-04-30 13:55:29 +10:00
2016-12-14 09:52:39 +10:00
mutex_lock ( & priv - > mutex ) ;
2014-08-10 04:10:20 +10:00
ret = nouveau_bo_vma_add ( priv - > bo , cli - > vm , & fctx - > vma ) ;
2013-02-14 13:43:21 +10:00
if ( ret = = 0 ) {
2014-08-10 04:10:20 +10:00
ret = nouveau_bo_vma_add ( priv - > bo_gart , cli - > vm ,
2013-02-14 13:43:21 +10:00
& fctx - > vma_gart ) ;
}
2016-12-14 09:52:39 +10:00
mutex_unlock ( & priv - > mutex ) ;
2012-07-20 08:17:34 +10:00
2013-02-14 13:43:21 +10:00
if ( ret )
nv84_fence_context_del ( chan ) ;
2012-04-30 13:55:29 +10:00
return ret ;
}
2013-02-14 13:43:21 +10:00
static bool
2013-02-14 09:28:37 +10:00
nv84_fence_suspend ( struct nouveau_drm * drm )
{
struct nv84_fence_priv * priv = drm - > fence ;
int i ;
2014-01-09 11:03:11 +01:00
priv - > suspend = vmalloc ( priv - > base . contexts * sizeof ( u32 ) ) ;
2013-02-14 09:28:37 +10:00
if ( priv - > suspend ) {
2014-01-09 11:03:11 +01:00
for ( i = 0 ; i < priv - > base . contexts ; i + + )
2013-02-14 09:28:37 +10:00
priv - > suspend [ i ] = nouveau_bo_rd32 ( priv - > bo , i * 4 ) ;
}
return priv - > suspend ! = NULL ;
}
2013-02-14 13:43:21 +10:00
static void
2013-02-14 09:28:37 +10:00
nv84_fence_resume ( struct nouveau_drm * drm )
{
struct nv84_fence_priv * priv = drm - > fence ;
int i ;
if ( priv - > suspend ) {
2014-01-09 11:03:11 +01:00
for ( i = 0 ; i < priv - > base . contexts ; i + + )
2013-02-14 09:28:37 +10:00
nouveau_bo_wr32 ( priv - > bo , i * 4 , priv - > suspend [ i ] ) ;
vfree ( priv - > suspend ) ;
priv - > suspend = NULL ;
}
}
2013-02-14 13:43:21 +10:00
static void
2012-07-20 08:17:34 +10:00
nv84_fence_destroy ( struct nouveau_drm * drm )
2012-04-30 13:55:29 +10:00
{
2012-07-20 08:17:34 +10:00
struct nv84_fence_priv * priv = drm - > fence ;
2013-02-14 13:43:21 +10:00
nouveau_bo_unmap ( priv - > bo_gart ) ;
if ( priv - > bo_gart )
nouveau_bo_unpin ( priv - > bo_gart ) ;
nouveau_bo_ref ( NULL , & priv - > bo_gart ) ;
2013-02-14 09:28:37 +10:00
nouveau_bo_unmap ( priv - > bo ) ;
if ( priv - > bo )
nouveau_bo_unpin ( priv - > bo ) ;
nouveau_bo_ref ( NULL , & priv - > bo ) ;
2012-07-20 08:17:34 +10:00
drm - > fence = NULL ;
2012-04-30 13:55:29 +10:00
kfree ( priv ) ;
}
int
2012-07-20 08:17:34 +10:00
nv84_fence_create ( struct nouveau_drm * drm )
2012-04-30 13:55:29 +10:00
{
2016-05-18 13:57:42 +10:00
struct nvkm_fifo * fifo = nvxx_fifo ( & drm - > client . device ) ;
2012-04-30 13:55:29 +10:00
struct nv84_fence_priv * priv ;
2015-02-20 18:22:59 +09:00
u32 domain ;
2012-04-30 13:55:29 +10:00
int ret ;
2012-07-20 08:17:34 +10:00
priv = drm - > fence = kzalloc ( sizeof ( * priv ) , GFP_KERNEL ) ;
2012-04-30 13:55:29 +10:00
if ( ! priv )
return - ENOMEM ;
2012-07-19 10:51:42 +10:00
priv - > base . dtor = nv84_fence_destroy ;
2013-02-14 09:28:37 +10:00
priv - > base . suspend = nv84_fence_suspend ;
priv - > base . resume = nv84_fence_resume ;
2012-07-19 10:51:42 +10:00
priv - > base . context_new = nv84_fence_context_new ;
priv - > base . context_del = nv84_fence_context_del ;
2012-04-30 13:55:29 +10:00
2015-08-20 14:54:19 +10:00
priv - > base . contexts = fifo - > nr ;
2016-10-25 13:00:45 +01:00
priv - > base . context_base = dma_fence_context_alloc ( priv - > base . contexts ) ;
2013-01-31 14:57:33 +10:00
priv - > base . uevent = true ;
2016-12-14 09:52:39 +10:00
mutex_init ( & priv - > mutex ) ;
2015-02-20 18:22:59 +09:00
/* Use VRAM if there is any ; otherwise fallback to system memory */
2016-05-18 13:57:42 +10:00
domain = drm - > client . device . info . ram_size ! = 0 ? TTM_PL_FLAG_VRAM :
2015-02-20 18:22:59 +09:00
/*
* fences created in sysmem must be non - cached or we
* will lose CPU / GPU coherency !
*/
TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED ;
2016-05-24 17:26:48 +10:00
ret = nouveau_bo_new ( & drm - > client , 16 * priv - > base . contexts , 0 ,
domain , 0 , 0 , NULL , NULL , & priv - > bo ) ;
2013-02-14 09:28:37 +10:00
if ( ret = = 0 ) {
2015-02-20 18:22:59 +09:00
ret = nouveau_bo_pin ( priv - > bo , domain , false ) ;
2013-02-14 09:28:37 +10:00
if ( ret = = 0 ) {
ret = nouveau_bo_map ( priv - > bo ) ;
if ( ret )
nouveau_bo_unpin ( priv - > bo ) ;
}
if ( ret )
nouveau_bo_ref ( NULL , & priv - > bo ) ;
}
2013-02-14 13:43:21 +10:00
if ( ret = = 0 )
2016-05-24 17:26:48 +10:00
ret = nouveau_bo_new ( & drm - > client , 16 * priv - > base . contexts , 0 ,
2014-10-27 18:49:18 +09:00
TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED , 0 ,
0 , NULL , NULL , & priv - > bo_gart ) ;
2013-02-14 13:43:21 +10:00
if ( ret = = 0 ) {
2014-11-10 11:24:27 +10:00
ret = nouveau_bo_pin ( priv - > bo_gart , TTM_PL_FLAG_TT , false ) ;
2013-02-14 13:43:21 +10:00
if ( ret = = 0 ) {
ret = nouveau_bo_map ( priv - > bo_gart ) ;
if ( ret )
nouveau_bo_unpin ( priv - > bo_gart ) ;
}
if ( ret )
nouveau_bo_ref ( NULL , & priv - > bo_gart ) ;
}
2012-04-30 13:55:29 +10:00
if ( ret )
2012-07-20 08:17:34 +10:00
nv84_fence_destroy ( drm ) ;
2012-04-30 13:55:29 +10:00
return ret ;
}