2012-04-30 13:55:29 +10:00
/*
* Copyright 2012 Red Hat Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
* Authors : Ben Skeggs
*/
2012-07-20 08:17:34 +10:00
# include "nouveau_drm.h"
# include "nouveau_dma.h"
2012-04-30 13:55:29 +10:00
# include "nouveau_fence.h"
2012-07-31 16:16:21 +10:00
# include "nv50_display.h"
2013-02-14 09:28:37 +10:00
u64
nv84_fence_crtc ( struct nouveau_channel * chan , int crtc )
{
struct nv84_fence_chan * fctx = chan - > fence ;
return fctx - > dispc_vma [ crtc ] . offset ;
}
2012-04-30 13:55:29 +10:00
static int
2013-02-14 09:37:35 +10:00
nv84_fence_emit32 ( struct nouveau_channel * chan , u64 virtual , u32 sequence )
2012-04-30 13:55:29 +10:00
{
2013-02-14 09:37:35 +10:00
int ret = RING_SPACE ( chan , 8 ) ;
2012-04-30 13:55:29 +10:00
if ( ret = = 0 ) {
BEGIN_NV04 ( chan , 0 , NV11_SUBCHAN_DMA_SEMAPHORE , 1 ) ;
2014-08-10 04:10:22 +10:00
OUT_RING ( chan , chan - > vram . handle ) ;
2013-01-31 14:57:33 +10:00
BEGIN_NV04 ( chan , 0 , NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH , 5 ) ;
2013-02-14 09:37:35 +10:00
OUT_RING ( chan , upper_32_bits ( virtual ) ) ;
OUT_RING ( chan , lower_32_bits ( virtual ) ) ;
OUT_RING ( chan , sequence ) ;
2012-04-30 13:55:29 +10:00
OUT_RING ( chan , NV84_SUBCHAN_SEMAPHORE_TRIGGER_WRITE_LONG ) ;
2013-01-31 14:57:33 +10:00
OUT_RING ( chan , 0x00000000 ) ;
2012-04-30 13:55:29 +10:00
FIRE_RING ( chan ) ;
}
return ret ;
}
static int
2013-02-14 09:37:35 +10:00
nv84_fence_sync32 ( struct nouveau_channel * chan , u64 virtual , u32 sequence )
2012-04-30 13:55:29 +10:00
{
2013-02-14 09:37:35 +10:00
int ret = RING_SPACE ( chan , 7 ) ;
2012-04-30 13:55:29 +10:00
if ( ret = = 0 ) {
BEGIN_NV04 ( chan , 0 , NV11_SUBCHAN_DMA_SEMAPHORE , 1 ) ;
2014-08-10 04:10:22 +10:00
OUT_RING ( chan , chan - > vram . handle ) ;
2012-04-30 13:55:29 +10:00
BEGIN_NV04 ( chan , 0 , NV84_SUBCHAN_SEMAPHORE_ADDRESS_HIGH , 4 ) ;
2013-02-14 09:37:35 +10:00
OUT_RING ( chan , upper_32_bits ( virtual ) ) ;
OUT_RING ( chan , lower_32_bits ( virtual ) ) ;
OUT_RING ( chan , sequence ) ;
2012-04-30 13:55:29 +10:00
OUT_RING ( chan , NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_GEQUAL ) ;
FIRE_RING ( chan ) ;
}
return ret ;
}
2013-02-14 13:43:21 +10:00
static int
2013-02-14 09:37:35 +10:00
nv84_fence_emit ( struct nouveau_fence * fence )
{
struct nouveau_channel * chan = fence - > channel ;
struct nv84_fence_chan * fctx = chan - > fence ;
2014-08-10 04:10:25 +10:00
u64 addr = chan - > chid * 16 ;
2013-02-14 13:43:21 +10:00
if ( fence - > sysmem )
addr + = fctx - > vma_gart . offset ;
else
addr + = fctx - > vma . offset ;
2014-01-09 11:03:11 +01:00
return fctx - > base . emit32 ( chan , addr , fence - > base . seqno ) ;
2013-02-14 09:37:35 +10:00
}
2013-02-14 13:43:21 +10:00
static int
2013-02-14 09:37:35 +10:00
nv84_fence_sync ( struct nouveau_fence * fence ,
struct nouveau_channel * prev , struct nouveau_channel * chan )
{
struct nv84_fence_chan * fctx = chan - > fence ;
2014-08-10 04:10:25 +10:00
u64 addr = prev - > chid * 16 ;
2013-02-14 13:43:21 +10:00
if ( fence - > sysmem )
addr + = fctx - > vma_gart . offset ;
else
addr + = fctx - > vma . offset ;
2014-01-09 11:03:11 +01:00
return fctx - > base . sync32 ( chan , addr , fence - > base . seqno ) ;
2013-02-14 09:37:35 +10:00
}
2013-02-14 13:43:21 +10:00
static u32
2012-04-30 13:55:29 +10:00
nv84_fence_read ( struct nouveau_channel * chan )
{
2012-07-20 08:17:34 +10:00
struct nv84_fence_priv * priv = chan - > drm - > fence ;
2014-08-10 04:10:25 +10:00
return nouveau_bo_rd32 ( priv - > bo , chan - > chid * 16 / 4 ) ;
2012-04-30 13:55:29 +10:00
}
2013-02-14 13:43:21 +10:00
static void
2012-07-19 10:51:42 +10:00
nv84_fence_context_del ( struct nouveau_channel * chan )
2012-04-30 13:55:29 +10:00
{
2013-02-14 09:28:37 +10:00
struct drm_device * dev = chan - > drm - > dev ;
struct nv84_fence_priv * priv = chan - > drm - > fence ;
2012-07-19 10:51:42 +10:00
struct nv84_fence_chan * fctx = chan - > fence ;
2013-02-14 09:28:37 +10:00
int i ;
for ( i = 0 ; i < dev - > mode_config . num_crtc ; i + + ) {
struct nouveau_bo * bo = nv50_display_crtc_sema ( dev , i ) ;
nouveau_bo_vma_del ( bo , & fctx - > dispc_vma [ i ] ) ;
}
2014-09-22 11:08:48 +02:00
nouveau_bo_wr32 ( priv - > bo , chan - > chid * 16 / 4 , fctx - > base . sequence ) ;
2013-02-14 13:43:21 +10:00
nouveau_bo_vma_del ( priv - > bo , & fctx - > vma_gart ) ;
2013-02-14 09:28:37 +10:00
nouveau_bo_vma_del ( priv - > bo , & fctx - > vma ) ;
2012-04-30 13:55:29 +10:00
nouveau_fence_context_del ( & fctx - > base ) ;
2012-07-19 10:51:42 +10:00
chan - > fence = NULL ;
2014-09-29 10:06:18 +02:00
nouveau_fence_context_free ( & fctx - > base ) ;
2012-04-30 13:55:29 +10:00
}
2013-02-14 09:28:37 +10:00
int
2012-07-19 10:51:42 +10:00
nv84_fence_context_new ( struct nouveau_channel * chan )
2012-04-30 13:55:29 +10:00
{
2014-08-10 04:10:22 +10:00
struct nouveau_cli * cli = ( void * ) nvif_client ( & chan - > device - > base ) ;
2012-07-20 08:17:34 +10:00
struct nv84_fence_priv * priv = chan - > drm - > fence ;
2012-04-30 13:55:29 +10:00
struct nv84_fence_chan * fctx ;
2012-07-22 11:55:54 +10:00
int ret , i ;
2012-04-30 13:55:29 +10:00
2012-07-19 10:51:42 +10:00
fctx = chan - > fence = kzalloc ( sizeof ( * fctx ) , GFP_KERNEL ) ;
2012-04-30 13:55:29 +10:00
if ( ! fctx )
return - ENOMEM ;
2014-01-09 11:03:11 +01:00
nouveau_fence_context_new ( chan , & fctx - > base ) ;
2013-02-14 13:20:17 +10:00
fctx - > base . emit = nv84_fence_emit ;
fctx - > base . sync = nv84_fence_sync ;
fctx - > base . read = nv84_fence_read ;
fctx - > base . emit32 = nv84_fence_emit32 ;
fctx - > base . sync32 = nv84_fence_sync32 ;
2014-01-09 11:03:11 +01:00
fctx - > base . sequence = nv84_fence_read ( chan ) ;
2012-04-30 13:55:29 +10:00
2014-08-10 04:10:20 +10:00
ret = nouveau_bo_vma_add ( priv - > bo , cli - > vm , & fctx - > vma ) ;
2013-02-14 13:43:21 +10:00
if ( ret = = 0 ) {
2014-08-10 04:10:20 +10:00
ret = nouveau_bo_vma_add ( priv - > bo_gart , cli - > vm ,
2013-02-14 13:43:21 +10:00
& fctx - > vma_gart ) ;
}
2012-07-20 08:17:34 +10:00
2013-02-14 09:28:37 +10:00
/* map display semaphore buffers into channel's vm */
for ( i = 0 ; ! ret & & i < chan - > drm - > dev - > mode_config . num_crtc ; i + + ) {
struct nouveau_bo * bo = nv50_display_crtc_sema ( chan - > drm - > dev , i ) ;
2014-08-10 04:10:20 +10:00
ret = nouveau_bo_vma_add ( bo , cli - > vm , & fctx - > dispc_vma [ i ] ) ;
2012-07-22 11:55:54 +10:00
}
2013-02-14 13:43:21 +10:00
if ( ret )
nv84_fence_context_del ( chan ) ;
2012-04-30 13:55:29 +10:00
return ret ;
}
2013-02-14 13:43:21 +10:00
static bool
2013-02-14 09:28:37 +10:00
nv84_fence_suspend ( struct nouveau_drm * drm )
{
struct nv84_fence_priv * priv = drm - > fence ;
int i ;
2014-01-09 11:03:11 +01:00
priv - > suspend = vmalloc ( priv - > base . contexts * sizeof ( u32 ) ) ;
2013-02-14 09:28:37 +10:00
if ( priv - > suspend ) {
2014-01-09 11:03:11 +01:00
for ( i = 0 ; i < priv - > base . contexts ; i + + )
2013-02-14 09:28:37 +10:00
priv - > suspend [ i ] = nouveau_bo_rd32 ( priv - > bo , i * 4 ) ;
}
return priv - > suspend ! = NULL ;
}
2013-02-14 13:43:21 +10:00
static void
2013-02-14 09:28:37 +10:00
nv84_fence_resume ( struct nouveau_drm * drm )
{
struct nv84_fence_priv * priv = drm - > fence ;
int i ;
if ( priv - > suspend ) {
2014-01-09 11:03:11 +01:00
for ( i = 0 ; i < priv - > base . contexts ; i + + )
2013-02-14 09:28:37 +10:00
nouveau_bo_wr32 ( priv - > bo , i * 4 , priv - > suspend [ i ] ) ;
vfree ( priv - > suspend ) ;
priv - > suspend = NULL ;
}
}
2013-02-14 13:43:21 +10:00
static void
2012-07-20 08:17:34 +10:00
nv84_fence_destroy ( struct nouveau_drm * drm )
2012-04-30 13:55:29 +10:00
{
2012-07-20 08:17:34 +10:00
struct nv84_fence_priv * priv = drm - > fence ;
2013-02-14 13:43:21 +10:00
nouveau_bo_unmap ( priv - > bo_gart ) ;
if ( priv - > bo_gart )
nouveau_bo_unpin ( priv - > bo_gart ) ;
nouveau_bo_ref ( NULL , & priv - > bo_gart ) ;
2013-02-14 09:28:37 +10:00
nouveau_bo_unmap ( priv - > bo ) ;
if ( priv - > bo )
nouveau_bo_unpin ( priv - > bo ) ;
nouveau_bo_ref ( NULL , & priv - > bo ) ;
2012-07-20 08:17:34 +10:00
drm - > fence = NULL ;
2012-04-30 13:55:29 +10:00
kfree ( priv ) ;
}
int
2012-07-20 08:17:34 +10:00
nv84_fence_create ( struct nouveau_drm * drm )
2012-04-30 13:55:29 +10:00
{
2015-01-14 15:36:34 +10:00
struct nvkm_fifo * pfifo = nvxx_fifo ( & drm - > device ) ;
2012-04-30 13:55:29 +10:00
struct nv84_fence_priv * priv ;
int ret ;
2012-07-20 08:17:34 +10:00
priv = drm - > fence = kzalloc ( sizeof ( * priv ) , GFP_KERNEL ) ;
2012-04-30 13:55:29 +10:00
if ( ! priv )
return - ENOMEM ;
2012-07-19 10:51:42 +10:00
priv - > base . dtor = nv84_fence_destroy ;
2013-02-14 09:28:37 +10:00
priv - > base . suspend = nv84_fence_suspend ;
priv - > base . resume = nv84_fence_resume ;
2012-07-19 10:51:42 +10:00
priv - > base . context_new = nv84_fence_context_new ;
priv - > base . context_del = nv84_fence_context_del ;
2012-04-30 13:55:29 +10:00
2014-01-09 11:03:11 +01:00
priv - > base . contexts = pfifo - > max + 1 ;
priv - > base . context_base = fence_context_alloc ( priv - > base . contexts ) ;
2013-01-31 14:57:33 +10:00
priv - > base . uevent = true ;
2014-01-09 11:03:11 +01:00
ret = nouveau_bo_new ( drm - > dev , 16 * priv - > base . contexts , 0 ,
2014-01-09 11:03:15 +01:00
TTM_PL_FLAG_VRAM , 0 , 0 , NULL , NULL , & priv - > bo ) ;
2013-02-14 09:28:37 +10:00
if ( ret = = 0 ) {
2014-11-10 11:24:27 +10:00
ret = nouveau_bo_pin ( priv - > bo , TTM_PL_FLAG_VRAM , false ) ;
2013-02-14 09:28:37 +10:00
if ( ret = = 0 ) {
ret = nouveau_bo_map ( priv - > bo ) ;
if ( ret )
nouveau_bo_unpin ( priv - > bo ) ;
}
if ( ret )
nouveau_bo_ref ( NULL , & priv - > bo ) ;
}
2013-02-14 13:43:21 +10:00
if ( ret = = 0 )
2014-01-09 11:03:11 +01:00
ret = nouveau_bo_new ( drm - > dev , 16 * priv - > base . contexts , 0 ,
2014-10-27 18:49:18 +09:00
TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED , 0 ,
0 , NULL , NULL , & priv - > bo_gart ) ;
2013-02-14 13:43:21 +10:00
if ( ret = = 0 ) {
2014-11-10 11:24:27 +10:00
ret = nouveau_bo_pin ( priv - > bo_gart , TTM_PL_FLAG_TT , false ) ;
2013-02-14 13:43:21 +10:00
if ( ret = = 0 ) {
ret = nouveau_bo_map ( priv - > bo_gart ) ;
if ( ret )
nouveau_bo_unpin ( priv - > bo_gart ) ;
}
if ( ret )
nouveau_bo_ref ( NULL , & priv - > bo_gart ) ;
}
2012-04-30 13:55:29 +10:00
if ( ret )
2012-07-20 08:17:34 +10:00
nv84_fence_destroy ( drm ) ;
2012-04-30 13:55:29 +10:00
return ret ;
}