2012-07-20 08:17:34 +10:00
/*
* Copyright 2012 Red Hat Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
* Authors : Ben Skeggs
*/
2020-06-22 19:54:50 +10:00
# include <nvif/push006c.h>
2012-07-20 08:17:34 +10:00
2014-08-10 04:10:23 +10:00
# include <nvif/class.h>
2015-11-08 12:16:40 +10:00
# include <nvif/cl0002.h>
2015-11-08 11:28:26 +10:00
# include <nvif/cl006b.h>
# include <nvif/cl506f.h>
# include <nvif/cl906f.h>
# include <nvif/cla06f.h>
2018-12-11 14:50:02 +10:00
# include <nvif/clc36f.h>
2015-08-20 14:54:16 +10:00
# include <nvif/ioctl.h>
2014-08-10 04:10:23 +10:00
2016-05-20 09:22:55 +10:00
# include "nouveau_drv.h"
2012-07-20 08:17:34 +10:00
# include "nouveau_dma.h"
# include "nouveau_bo.h"
# include "nouveau_chan.h"
# include "nouveau_fence.h"
# include "nouveau_abi16.h"
2017-11-01 03:56:19 +10:00
# include "nouveau_vmm.h"
2018-07-05 12:57:12 +10:00
# include "nouveau_svm.h"
2012-07-20 08:17:34 +10:00
MODULE_PARM_DESC ( vram_pushbuf , " Create DMA push buffers in VRAM " ) ;
2014-08-18 22:43:24 +02:00
int nouveau_vram_pushbuf ;
2012-07-20 08:17:34 +10:00
module_param_named ( vram_pushbuf , nouveau_vram_pushbuf , int , 0400 ) ;
2016-11-05 13:31:25 +10:00
static int
nouveau_channel_killed ( struct nvif_notify * ntfy )
{
struct nouveau_channel * chan = container_of ( ntfy , typeof ( * chan ) , kill ) ;
struct nouveau_cli * cli = ( void * ) chan - > user . client ;
NV_PRINTK ( warn , cli , " channel %d killed! \n " , chan - > chid ) ;
atomic_set ( & chan - > killed , 1 ) ;
2020-01-23 15:39:27 +10:00
if ( chan - > fence )
nouveau_fence_context_kill ( chan - > fence , - ENODEV ) ;
2016-11-05 13:31:25 +10:00
return NVIF_NOTIFY_DROP ;
}
2012-07-20 08:17:34 +10:00
int
nouveau_channel_idle ( struct nouveau_channel * chan )
{
2017-01-24 16:56:52 +10:00
if ( likely ( chan & & chan - > fence & & ! atomic_read ( & chan - > killed ) ) ) {
2015-08-20 14:54:22 +10:00
struct nouveau_cli * cli = ( void * ) chan - > user . client ;
struct nouveau_fence * fence = NULL ;
int ret ;
2012-07-20 08:17:34 +10:00
2015-08-20 14:54:22 +10:00
ret = nouveau_fence_new ( chan , false , & fence ) ;
if ( ! ret ) {
ret = nouveau_fence_wait ( fence , false , false ) ;
nouveau_fence_unref ( & fence ) ;
}
2012-07-20 08:17:34 +10:00
2015-08-20 14:54:22 +10:00
if ( ret ) {
2015-09-04 14:40:32 +10:00
NV_PRINTK ( err , cli , " failed to idle channel %d [%s] \n " ,
chan - > chid , nvxx_client ( & cli - > base ) - > name ) ;
2015-08-20 14:54:22 +10:00
return ret ;
}
}
return 0 ;
2012-07-20 08:17:34 +10:00
}
void
nouveau_channel_del ( struct nouveau_channel * * pchan )
{
struct nouveau_channel * chan = * pchan ;
if ( chan ) {
2017-11-01 03:56:20 +10:00
struct nouveau_cli * cli = ( void * ) chan - > user . client ;
bool super ;
if ( cli ) {
super = cli - > base . super ;
cli - > base . super = true ;
}
2015-08-20 14:54:22 +10:00
if ( chan - > fence )
2012-07-20 08:17:34 +10:00
nouveau_fence ( chan - > drm ) - > context_del ( chan ) ;
2018-07-05 12:57:12 +10:00
if ( cli )
nouveau_svmm_part ( chan - > vmm - > svmm , chan - > inst ) ;
2020-03-30 09:51:33 +10:00
nvif_object_dtor ( & chan - > nvsw ) ;
nvif_object_dtor ( & chan - > gart ) ;
nvif_object_dtor ( & chan - > vram ) ;
2020-06-08 14:47:37 +10:00
nvif_notify_dtor ( & chan - > kill ) ;
2020-03-30 09:51:33 +10:00
nvif_object_dtor ( & chan - > user ) ;
nvif_object_dtor ( & chan - > push . ctxdma ) ;
2017-11-01 03:56:19 +10:00
nouveau_vma_del ( & chan - > push . vma ) ;
2012-07-20 08:17:34 +10:00
nouveau_bo_unmap ( chan - > push . buffer ) ;
2020-09-21 15:37:12 +02:00
if ( chan - > push . buffer & & chan - > push . buffer - > bo . pin_count )
2012-11-25 23:02:28 +01:00
nouveau_bo_unpin ( chan - > push . buffer ) ;
2012-07-20 08:17:34 +10:00
nouveau_bo_ref ( NULL , & chan - > push . buffer ) ;
kfree ( chan ) ;
2017-11-01 03:56:20 +10:00
if ( cli )
cli - > base . super = super ;
2012-07-20 08:17:34 +10:00
}
* pchan = NULL ;
}
2020-07-18 18:06:30 +10:00
static void
nouveau_channel_kick ( struct nvif_push * push )
{
struct nouveau_channel * chan = container_of ( push , typeof ( * chan ) , chan . _push ) ;
chan - > dma . cur = chan - > dma . cur + ( chan - > chan . _push . cur - chan - > chan . _push . bgn ) ;
FIRE_RING ( chan ) ;
chan - > chan . _push . bgn = chan - > chan . _push . cur ;
}
static int
nouveau_channel_wait ( struct nvif_push * push , u32 size )
{
struct nouveau_channel * chan = container_of ( push , typeof ( * chan ) , chan . _push ) ;
int ret ;
chan - > dma . cur = chan - > dma . cur + ( chan - > chan . _push . cur - chan - > chan . _push . bgn ) ;
ret = RING_SPACE ( chan , size ) ;
if ( ret = = 0 ) {
chan - > chan . _push . bgn = chan - > chan . _push . mem . object . map . ptr ;
chan - > chan . _push . bgn = chan - > chan . _push . bgn + chan - > dma . cur ;
chan - > chan . _push . cur = chan - > chan . _push . bgn ;
chan - > chan . _push . end = chan - > chan . _push . bgn + size ;
}
return ret ;
}
2012-07-20 08:17:34 +10:00
static int
2014-08-10 04:10:22 +10:00
nouveau_channel_prep ( struct nouveau_drm * drm , struct nvif_device * device ,
2015-09-04 14:40:32 +10:00
u32 size , struct nouveau_channel * * pchan )
2012-07-20 08:17:34 +10:00
{
2015-08-20 14:54:15 +10:00
struct nouveau_cli * cli = ( void * ) device - > object . client ;
2014-08-10 04:10:24 +10:00
struct nv_dma_v0 args = { } ;
2012-07-20 08:17:34 +10:00
struct nouveau_channel * chan ;
u32 target ;
int ret ;
chan = * pchan = kzalloc ( sizeof ( * chan ) , GFP_KERNEL ) ;
if ( ! chan )
return - ENOMEM ;
2015-08-20 14:54:15 +10:00
chan - > device = device ;
2012-07-20 08:17:34 +10:00
chan - > drm = drm ;
2019-02-19 17:21:48 +10:00
chan - > vmm = cli - > svm . cli ? & cli - > svm : & cli - > vmm ;
2016-11-05 13:31:25 +10:00
atomic_set ( & chan - > killed , 0 ) ;
2012-07-20 08:17:34 +10:00
/* allocate memory for dma push buffer */
2020-09-08 14:39:36 +02:00
target = NOUVEAU_GEM_DOMAIN_GART | NOUVEAU_GEM_DOMAIN_COHERENT ;
2012-07-20 08:17:34 +10:00
if ( nouveau_vram_pushbuf )
2020-09-08 14:39:36 +02:00
target = NOUVEAU_GEM_DOMAIN_VRAM ;
2012-07-20 08:17:34 +10:00
2016-05-24 17:26:48 +10:00
ret = nouveau_bo_new ( cli , size , 0 , target , 0 , 0 , NULL , NULL ,
2012-07-20 08:17:34 +10:00
& chan - > push . buffer ) ;
if ( ret = = 0 ) {
2014-11-10 11:24:27 +10:00
ret = nouveau_bo_pin ( chan - > push . buffer , target , false ) ;
2012-07-20 08:17:34 +10:00
if ( ret = = 0 )
ret = nouveau_bo_map ( chan - > push . buffer ) ;
}
if ( ret ) {
nouveau_channel_del ( pchan ) ;
return ret ;
}
2020-07-18 18:06:30 +10:00
chan - > chan . _push . mem . object . parent = cli - > base . object . parent ;
chan - > chan . _push . mem . object . client = & cli - > base ;
chan - > chan . _push . mem . object . name = " chanPush " ;
chan - > chan . _push . mem . object . map . ptr = chan - > push . buffer - > kmap . virtual ;
chan - > chan . _push . wait = nouveau_channel_wait ;
chan - > chan . _push . kick = nouveau_channel_kick ;
chan - > chan . push = & chan - > chan . _push ;
2012-07-20 08:17:34 +10:00
/* create dma object covering the *entire* memory space that the
* pushbuf lives in , this is because the GEM code requires that
* we be able to call out to other ( indirect ) push buffers
*/
2020-06-24 20:26:44 +02:00
chan - > push . addr = chan - > push . buffer - > offset ;
2012-07-20 08:17:34 +10:00
2014-08-10 04:10:22 +10:00
if ( device - > info . family > = NV_DEVICE_INFO_V0_TESLA ) {
2019-02-19 17:21:48 +10:00
ret = nouveau_vma_new ( chan - > push . buffer , chan - > vmm ,
2017-11-01 03:56:19 +10:00
& chan - > push . vma ) ;
2012-07-20 08:17:34 +10:00
if ( ret ) {
nouveau_channel_del ( pchan ) ;
return ret ;
}
2018-05-08 20:39:47 +10:00
chan - > push . addr = chan - > push . vma - > addr ;
if ( device - > info . family > = NV_DEVICE_INFO_V0_FERMI )
return 0 ;
2014-08-10 04:10:24 +10:00
args . target = NV_DMA_V0_TARGET_VM ;
args . access = NV_DMA_V0_ACCESS_VM ;
2012-07-20 08:17:34 +10:00
args . start = 0 ;
2019-02-19 17:21:48 +10:00
args . limit = chan - > vmm - > vmm . limit - 1 ;
2012-07-20 08:17:34 +10:00
} else
if ( chan - > push . buffer - > bo . mem . mem_type = = TTM_PL_VRAM ) {
2014-08-10 04:10:22 +10:00
if ( device - > info . family = = NV_DEVICE_INFO_V0_TNT ) {
2012-07-20 08:17:34 +10:00
/* nv04 vram pushbuf hack, retarget to its location in
* the framebuffer bar rather than direct vram access . .
* nfi why this exists , it came from the - nv ddx .
*/
2014-08-10 04:10:24 +10:00
args . target = NV_DMA_V0_TARGET_PCI ;
args . access = NV_DMA_V0_ACCESS_RDWR ;
2015-08-20 14:54:23 +10:00
args . start = nvxx_device ( device ) - > func - >
resource_addr ( nvxx_device ( device ) , 1 ) ;
2014-08-10 04:10:28 +10:00
args . limit = args . start + device - > info . ram_user - 1 ;
2012-07-20 08:17:34 +10:00
} else {
2014-08-10 04:10:24 +10:00
args . target = NV_DMA_V0_TARGET_VRAM ;
args . access = NV_DMA_V0_ACCESS_RDWR ;
2012-07-20 08:17:34 +10:00
args . start = 0 ;
2014-08-10 04:10:28 +10:00
args . limit = device - > info . ram_user - 1 ;
2012-07-20 08:17:34 +10:00
}
} else {
2015-08-20 14:54:23 +10:00
if ( chan - > drm - > agp . bridge ) {
2014-08-10 04:10:24 +10:00
args . target = NV_DMA_V0_TARGET_AGP ;
args . access = NV_DMA_V0_ACCESS_RDWR ;
2012-07-20 08:17:34 +10:00
args . start = chan - > drm - > agp . base ;
args . limit = chan - > drm - > agp . base +
chan - > drm - > agp . size - 1 ;
} else {
2014-08-10 04:10:24 +10:00
args . target = NV_DMA_V0_TARGET_VM ;
args . access = NV_DMA_V0_ACCESS_RDWR ;
2012-07-20 08:17:34 +10:00
args . start = 0 ;
2019-02-19 17:21:48 +10:00
args . limit = chan - > vmm - > vmm . limit - 1 ;
2012-07-20 08:17:34 +10:00
}
}
2020-03-30 09:51:33 +10:00
ret = nvif_object_ctor ( & device - > object , " abi16PushCtxDma " , 0 ,
NV_DMA_FROM_MEMORY , & args , sizeof ( args ) ,
& chan - > push . ctxdma ) ;
2012-07-20 08:17:34 +10:00
if ( ret ) {
nouveau_channel_del ( pchan ) ;
return ret ;
}
return 0 ;
}
2012-08-19 23:00:00 +02:00
static int
2014-08-10 04:10:22 +10:00
nouveau_channel_ind ( struct nouveau_drm * drm , struct nvif_device * device ,
2018-12-11 14:50:02 +10:00
u64 runlist , bool priv , struct nouveau_channel * * pchan )
2012-07-20 08:17:34 +10:00
{
2018-12-11 14:50:02 +10:00
static const u16 oclasses [ ] = { TURING_CHANNEL_GPFIFO_A ,
VOLTA_CHANNEL_GPFIFO_A ,
2018-05-08 20:39:48 +10:00
PASCAL_CHANNEL_GPFIFO_A ,
2016-07-09 10:41:01 +10:00
MAXWELL_CHANNEL_GPFIFO_A ,
2016-03-11 13:09:28 +10:00
KEPLER_CHANNEL_GPFIFO_B ,
2015-04-14 11:47:24 +10:00
KEPLER_CHANNEL_GPFIFO_A ,
2014-08-10 04:10:25 +10:00
FERMI_CHANNEL_GPFIFO ,
G82_CHANNEL_GPFIFO ,
NV50_CHANNEL_GPFIFO ,
2012-08-19 16:03:00 +10:00
0 } ;
2012-07-20 08:17:34 +10:00
const u16 * oclass = oclasses ;
2014-08-10 04:10:25 +10:00
union {
struct nv50_channel_gpfifo_v0 nv50 ;
2015-08-20 14:54:16 +10:00
struct fermi_channel_gpfifo_v0 fermi ;
2014-08-10 04:10:25 +10:00
struct kepler_channel_gpfifo_a_v0 kepler ;
2018-12-11 14:50:02 +10:00
struct volta_channel_gpfifo_a_v0 volta ;
2015-08-20 14:54:15 +10:00
} args ;
2012-07-20 08:17:34 +10:00
struct nouveau_channel * chan ;
2014-08-10 04:10:25 +10:00
u32 size ;
2012-07-20 08:17:34 +10:00
int ret ;
/* allocate dma push buffer */
2015-09-04 14:40:32 +10:00
ret = nouveau_channel_prep ( drm , device , 0x12000 , & chan ) ;
2012-07-20 08:17:34 +10:00
* pchan = chan ;
if ( ret )
return ret ;
/* create channel object */
do {
2018-12-11 14:50:02 +10:00
if ( oclass [ 0 ] > = VOLTA_CHANNEL_GPFIFO_A ) {
args . volta . version = 0 ;
args . volta . ilength = 0x02000 ;
args . volta . ioffset = 0x10000 + chan - > push . addr ;
args . volta . runlist = runlist ;
2019-02-19 17:21:48 +10:00
args . volta . vmm = nvif_handle ( & chan - > vmm - > vmm . object ) ;
2018-12-11 14:50:02 +10:00
args . volta . priv = priv ;
size = sizeof ( args . volta ) ;
} else
2014-08-10 04:10:25 +10:00
if ( oclass [ 0 ] > = KEPLER_CHANNEL_GPFIFO_A ) {
args . kepler . version = 0 ;
args . kepler . ilength = 0x02000 ;
2017-11-01 03:56:19 +10:00
args . kepler . ioffset = 0x10000 + chan - > push . addr ;
2018-05-08 20:39:46 +10:00
args . kepler . runlist = runlist ;
2019-02-19 17:21:48 +10:00
args . kepler . vmm = nvif_handle ( & chan - > vmm - > vmm . object ) ;
2018-12-11 14:50:02 +10:00
args . kepler . priv = priv ;
2014-08-10 04:10:25 +10:00
size = sizeof ( args . kepler ) ;
2015-08-20 14:54:16 +10:00
} else
if ( oclass [ 0 ] > = FERMI_CHANNEL_GPFIFO ) {
args . fermi . version = 0 ;
args . fermi . ilength = 0x02000 ;
2017-11-01 03:56:19 +10:00
args . fermi . ioffset = 0x10000 + chan - > push . addr ;
2019-02-19 17:21:48 +10:00
args . fermi . vmm = nvif_handle ( & chan - > vmm - > vmm . object ) ;
2015-08-20 14:54:16 +10:00
size = sizeof ( args . fermi ) ;
2014-08-10 04:10:25 +10:00
} else {
args . nv50 . version = 0 ;
args . nv50 . ilength = 0x02000 ;
2017-11-01 03:56:19 +10:00
args . nv50 . ioffset = 0x10000 + chan - > push . addr ;
2015-08-20 14:54:16 +10:00
args . nv50 . pushbuf = nvif_handle ( & chan - > push . ctxdma ) ;
2019-02-19 17:21:48 +10:00
args . nv50 . vmm = nvif_handle ( & chan - > vmm - > vmm . object ) ;
2014-08-10 04:10:25 +10:00
size = sizeof ( args . nv50 ) ;
}
2020-03-30 09:51:33 +10:00
ret = nvif_object_ctor ( & device - > object , " abi16ChanUser " , 0 ,
* oclass + + , & args , size , & chan - > user ) ;
2014-08-10 04:10:25 +10:00
if ( ret = = 0 ) {
2018-12-11 14:50:02 +10:00
if ( chan - > user . oclass > = VOLTA_CHANNEL_GPFIFO_A ) {
chan - > chid = args . volta . chid ;
chan - > inst = args . volta . inst ;
chan - > token = args . volta . token ;
} else
2018-12-11 14:50:02 +10:00
if ( chan - > user . oclass > = KEPLER_CHANNEL_GPFIFO_A ) {
2015-08-20 14:54:15 +10:00
chan - > chid = args . kepler . chid ;
2018-12-11 14:50:02 +10:00
chan - > inst = args . kepler . inst ;
} else
if ( chan - > user . oclass > = FERMI_CHANNEL_GPFIFO ) {
2015-08-20 14:54:16 +10:00
chan - > chid = args . fermi . chid ;
2018-12-11 14:50:02 +10:00
} else {
2015-08-20 14:54:15 +10:00
chan - > chid = args . nv50 . chid ;
2018-12-11 14:50:02 +10:00
}
2012-07-20 08:17:34 +10:00
return ret ;
2014-08-10 04:10:25 +10:00
}
2012-07-20 08:17:34 +10:00
} while ( * oclass ) ;
nouveau_channel_del ( pchan ) ;
return ret ;
}
static int
2014-08-10 04:10:22 +10:00
nouveau_channel_dma ( struct nouveau_drm * drm , struct nvif_device * device ,
2015-09-04 14:40:32 +10:00
struct nouveau_channel * * pchan )
2012-07-20 08:17:34 +10:00
{
2014-08-10 04:10:25 +10:00
static const u16 oclasses [ ] = { NV40_CHANNEL_DMA ,
NV17_CHANNEL_DMA ,
NV10_CHANNEL_DMA ,
NV03_CHANNEL_DMA ,
2012-08-19 16:03:00 +10:00
0 } ;
2012-07-20 08:17:34 +10:00
const u16 * oclass = oclasses ;
2015-08-20 14:54:15 +10:00
struct nv03_channel_dma_v0 args ;
2012-07-20 08:17:34 +10:00
struct nouveau_channel * chan ;
int ret ;
/* allocate dma push buffer */
2015-09-04 14:40:32 +10:00
ret = nouveau_channel_prep ( drm , device , 0x10000 , & chan ) ;
2012-07-20 08:17:34 +10:00
* pchan = chan ;
if ( ret )
return ret ;
/* create channel object */
2014-08-10 04:10:25 +10:00
args . version = 0 ;
2015-08-20 14:54:16 +10:00
args . pushbuf = nvif_handle ( & chan - > push . ctxdma ) ;
2017-11-01 03:56:19 +10:00
args . offset = chan - > push . addr ;
2012-07-20 08:17:34 +10:00
do {
2020-03-30 09:51:33 +10:00
ret = nvif_object_ctor ( & device - > object , " abi16ChanUser " , 0 ,
* oclass + + , & args , sizeof ( args ) ,
& chan - > user ) ;
2014-08-10 04:10:25 +10:00
if ( ret = = 0 ) {
2015-08-20 14:54:15 +10:00
chan - > chid = args . chid ;
2012-07-20 08:17:34 +10:00
return ret ;
2014-08-10 04:10:25 +10:00
}
2012-07-20 08:17:34 +10:00
} while ( ret & & * oclass ) ;
nouveau_channel_del ( pchan ) ;
return ret ;
}
static int
nouveau_channel_init ( struct nouveau_channel * chan , u32 vram , u32 gart )
{
2014-08-10 04:10:22 +10:00
struct nvif_device * device = chan - > device ;
2016-11-05 13:31:25 +10:00
struct nouveau_drm * drm = chan - > drm ;
2014-08-10 04:10:24 +10:00
struct nv_dma_v0 args = { } ;
2012-07-20 08:17:34 +10:00
int ret , i ;
2017-11-01 03:56:19 +10:00
nvif_object_map ( & chan - > user , NULL , 0 ) ;
2014-08-10 04:10:25 +10:00
2016-11-05 13:31:25 +10:00
if ( chan - > user . oclass > = FERMI_CHANNEL_GPFIFO ) {
2020-06-08 14:47:37 +10:00
ret = nvif_notify_ctor ( & chan - > user , " abi16ChanKilled " ,
nouveau_channel_killed ,
2016-11-05 13:31:25 +10:00
true , NV906F_V0_NTFY_KILLED ,
NULL , 0 , 0 , & chan - > kill ) ;
if ( ret = = 0 )
ret = nvif_notify_get ( & chan - > kill ) ;
if ( ret ) {
NV_ERROR ( drm , " Failed to request channel kill "
" notification: %d \n " , ret ) ;
return ret ;
}
}
2012-07-20 08:17:34 +10:00
/* allocate dma objects to cover all allowed vram, and gart */
2014-08-10 04:10:22 +10:00
if ( device - > info . family < NV_DEVICE_INFO_V0_FERMI ) {
if ( device - > info . family > = NV_DEVICE_INFO_V0_TESLA ) {
2014-08-10 04:10:24 +10:00
args . target = NV_DMA_V0_TARGET_VM ;
args . access = NV_DMA_V0_ACCESS_VM ;
2012-07-20 08:17:34 +10:00
args . start = 0 ;
2019-02-19 17:21:48 +10:00
args . limit = chan - > vmm - > vmm . limit - 1 ;
2012-07-20 08:17:34 +10:00
} else {
2014-08-10 04:10:24 +10:00
args . target = NV_DMA_V0_TARGET_VRAM ;
args . access = NV_DMA_V0_ACCESS_RDWR ;
2012-07-20 08:17:34 +10:00
args . start = 0 ;
2014-08-10 04:10:28 +10:00
args . limit = device - > info . ram_user - 1 ;
2012-07-20 08:17:34 +10:00
}
2020-03-30 09:51:33 +10:00
ret = nvif_object_ctor ( & chan - > user , " abi16ChanVramCtxDma " , vram ,
NV_DMA_IN_MEMORY , & args , sizeof ( args ) ,
& chan - > vram ) ;
2012-07-20 08:17:34 +10:00
if ( ret )
return ret ;
2014-08-10 04:10:22 +10:00
if ( device - > info . family > = NV_DEVICE_INFO_V0_TESLA ) {
2014-08-10 04:10:24 +10:00
args . target = NV_DMA_V0_TARGET_VM ;
args . access = NV_DMA_V0_ACCESS_VM ;
2012-07-20 08:17:34 +10:00
args . start = 0 ;
2019-02-19 17:21:48 +10:00
args . limit = chan - > vmm - > vmm . limit - 1 ;
2012-07-20 08:17:34 +10:00
} else
2015-08-20 14:54:23 +10:00
if ( chan - > drm - > agp . bridge ) {
2014-08-10 04:10:24 +10:00
args . target = NV_DMA_V0_TARGET_AGP ;
args . access = NV_DMA_V0_ACCESS_RDWR ;
2012-07-20 08:17:34 +10:00
args . start = chan - > drm - > agp . base ;
args . limit = chan - > drm - > agp . base +
chan - > drm - > agp . size - 1 ;
} else {
2014-08-10 04:10:24 +10:00
args . target = NV_DMA_V0_TARGET_VM ;
args . access = NV_DMA_V0_ACCESS_RDWR ;
2012-07-20 08:17:34 +10:00
args . start = 0 ;
2019-02-19 17:21:48 +10:00
args . limit = chan - > vmm - > vmm . limit - 1 ;
2012-07-20 08:17:34 +10:00
}
2020-03-30 09:51:33 +10:00
ret = nvif_object_ctor ( & chan - > user , " abi16ChanGartCtxDma " , gart ,
NV_DMA_IN_MEMORY , & args , sizeof ( args ) ,
& chan - > gart ) ;
2012-07-20 08:17:34 +10:00
if ( ret )
return ret ;
}
/* initialise dma tracking parameters */
2015-08-20 14:54:15 +10:00
switch ( chan - > user . oclass & 0x00ff ) {
2012-08-14 14:53:51 +10:00
case 0x006b :
2012-07-20 08:17:34 +10:00
case 0x006e :
chan - > user_put = 0x40 ;
chan - > user_get = 0x44 ;
chan - > dma . max = ( 0x10000 / 4 ) - 2 ;
break ;
default :
chan - > user_put = 0x40 ;
chan - > user_get = 0x44 ;
chan - > user_get_hi = 0x60 ;
chan - > dma . ib_base = 0x10000 / 4 ;
chan - > dma . ib_max = ( 0x02000 / 8 ) - 1 ;
chan - > dma . ib_put = 0 ;
chan - > dma . ib_free = chan - > dma . ib_max - chan - > dma . ib_put ;
chan - > dma . max = chan - > dma . ib_base ;
break ;
}
chan - > dma . put = 0 ;
chan - > dma . cur = chan - > dma . put ;
chan - > dma . free = chan - > dma . max - chan - > dma . cur ;
2020-06-22 19:54:50 +10:00
ret = PUSH_WAIT ( chan - > chan . push , NOUVEAU_DMA_SKIPS ) ;
2012-07-20 08:17:34 +10:00
if ( ret )
return ret ;
for ( i = 0 ; i < NOUVEAU_DMA_SKIPS ; i + + )
2020-06-22 19:54:50 +10:00
PUSH_DATA ( chan - > chan . push , 0x00000000 ) ;
2012-07-20 08:17:34 +10:00
2013-11-13 10:58:51 +10:00
/* allocate software object class (used for fences on <= nv05) */
2014-08-10 04:10:22 +10:00
if ( device - > info . family < NV_DEVICE_INFO_V0_CELSIUS ) {
2020-03-30 09:51:33 +10:00
ret = nvif_object_ctor ( & chan - > user , " abi16NvswFence " , 0x006e ,
2015-11-08 10:18:19 +10:00
NVIF_CLASS_SW_NV04 ,
2014-08-10 04:10:22 +10:00
NULL , 0 , & chan - > nvsw ) ;
2012-08-06 19:38:25 +10:00
if ( ret )
return ret ;
2012-07-20 08:17:34 +10:00
2020-06-22 19:54:50 +10:00
ret = PUSH_WAIT ( chan - > chan . push , 2 ) ;
2012-07-20 08:17:34 +10:00
if ( ret )
return ret ;
2020-06-22 19:54:50 +10:00
PUSH_NVSQ ( chan - > chan . push , NV_SW , 0x0000 , chan - > nvsw . handle ) ;
PUSH_KICK ( chan - > chan . push ) ;
2012-07-20 08:17:34 +10:00
}
/* initialise synchronisation */
2014-10-20 15:49:33 +10:00
return nouveau_fence ( chan - > drm ) - > context_new ( chan ) ;
2012-07-20 08:17:34 +10:00
}
int
2014-08-10 04:10:22 +10:00
nouveau_channel_new ( struct nouveau_drm * drm , struct nvif_device * device ,
2018-12-11 14:50:02 +10:00
u32 arg0 , u32 arg1 , bool priv ,
struct nouveau_channel * * pchan )
2012-07-20 08:17:34 +10:00
{
2015-08-20 14:54:15 +10:00
struct nouveau_cli * cli = ( void * ) device - > object . client ;
2014-10-20 15:49:33 +10:00
bool super ;
2012-07-20 08:17:34 +10:00
int ret ;
2014-10-20 15:49:33 +10:00
/* hack until fencenv50 is fixed, and agp access relaxed */
super = cli - > base . super ;
cli - > base . super = true ;
2018-12-11 14:50:02 +10:00
ret = nouveau_channel_ind ( drm , device , arg0 , priv , pchan ) ;
2012-07-20 08:17:34 +10:00
if ( ret ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( dbg , cli , " ib channel create, %d \n " , ret ) ;
2015-09-04 14:40:32 +10:00
ret = nouveau_channel_dma ( drm , device , pchan ) ;
2012-07-20 08:17:34 +10:00
if ( ret ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( dbg , cli , " dma channel create, %d \n " , ret ) ;
2014-10-20 15:49:33 +10:00
goto done ;
2012-07-20 08:17:34 +10:00
}
}
2012-08-06 19:38:25 +10:00
ret = nouveau_channel_init ( * pchan , arg0 , arg1 ) ;
2012-07-20 08:17:34 +10:00
if ( ret ) {
2015-08-20 14:54:13 +10:00
NV_PRINTK ( err , cli , " channel failed to initialise, %d \n " , ret ) ;
2012-07-20 08:17:34 +10:00
nouveau_channel_del ( pchan ) ;
}
2018-07-05 12:57:12 +10:00
ret = nouveau_svmm_join ( ( * pchan ) - > vmm - > svmm , ( * pchan ) - > inst ) ;
if ( ret )
nouveau_channel_del ( pchan ) ;
2014-10-20 15:49:33 +10:00
done :
cli - > base . super = super ;
return ret ;
2012-07-20 08:17:34 +10:00
}
2018-05-08 20:39:46 +10:00
int
nouveau_channels_init ( struct nouveau_drm * drm )
{
struct {
struct nv_device_info_v1 m ;
struct {
struct nv_device_info_v1_data channels ;
} v ;
} args = {
. m . version = 1 ,
. m . count = sizeof ( args . v ) / sizeof ( args . v . channels ) ,
. v . channels . mthd = NV_DEVICE_FIFO_CHANNELS ,
} ;
struct nvif_object * device = & drm - > client . device . object ;
int ret ;
ret = nvif_object_mthd ( device , NV_DEVICE_V0_INFO , & args , sizeof ( args ) ) ;
if ( ret | | args . v . channels . mthd = = NV_DEVICE_INFO_INVALID )
return - ENODEV ;
drm - > chan . nr = args . v . channels . data ;
drm - > chan . context_base = dma_fence_context_alloc ( drm - > chan . nr ) ;
return 0 ;
}