2012-06-27 09:26:01 +01:00
/*
* Copyright 2011 Red Hat Inc .
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE COPYRIGHT HOLDER ( S ) OR AUTHOR ( S ) BE LIABLE FOR ANY CLAIM , DAMAGES OR
* OTHER LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE ,
* ARISING FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE .
*
* Authors : Dave Airlie
*/
2012-04-02 11:53:06 +01:00
2014-01-09 11:03:14 +01:00
# include <linux/dma-buf.h>
2012-04-02 11:53:06 +01:00
2016-05-20 09:22:55 +10:00
# include "nouveau_drv.h"
2012-07-31 16:16:21 +10:00
# include "nouveau_gem.h"
2012-04-02 11:53:06 +01:00
2013-01-15 20:47:43 +00:00
struct sg_table * nouveau_gem_prime_get_sg_table ( struct drm_gem_object * obj )
2012-04-02 11:53:06 +01:00
{
2013-01-15 20:47:43 +00:00
struct nouveau_bo * nvbo = nouveau_gem_object ( obj ) ;
2012-04-02 11:53:06 +01:00
int npages = nvbo - > bo . num_pages ;
2013-01-15 20:47:43 +00:00
return drm_prime_pages_to_sg ( nvbo - > bo . ttm - > pages , npages ) ;
2012-04-02 11:53:06 +01:00
}
2013-01-15 20:47:43 +00:00
void * nouveau_gem_prime_vmap ( struct drm_gem_object * obj )
2012-04-02 11:53:06 +01:00
{
2013-01-15 20:47:43 +00:00
struct nouveau_bo * nvbo = nouveau_gem_object ( obj ) ;
2012-05-31 13:52:17 +01:00
int ret ;
ret = ttm_bo_kmap ( & nvbo - > bo , 0 , nvbo - > bo . num_pages ,
& nvbo - > dma_buf_vmap ) ;
2013-01-15 20:47:43 +00:00
if ( ret )
2012-05-31 13:52:17 +01:00
return ERR_PTR ( ret ) ;
2013-01-15 20:47:43 +00:00
2012-05-31 13:52:17 +01:00
return nvbo - > dma_buf_vmap . virtual ;
}
2013-01-15 20:47:43 +00:00
void nouveau_gem_prime_vunmap ( struct drm_gem_object * obj , void * vaddr )
2012-05-31 13:52:17 +01:00
{
2013-01-15 20:47:43 +00:00
struct nouveau_bo * nvbo = nouveau_gem_object ( obj ) ;
2012-05-31 13:52:17 +01:00
2013-01-15 20:47:43 +00:00
ttm_bo_kunmap ( & nvbo - > dma_buf_vmap ) ;
2012-05-31 13:52:17 +01:00
}
2013-01-15 20:47:43 +00:00
struct drm_gem_object * nouveau_gem_prime_import_sg_table ( struct drm_device * dev ,
2014-01-09 11:03:14 +01:00
struct dma_buf_attachment * attach ,
2013-01-15 20:47:43 +00:00
struct sg_table * sg )
2012-04-02 11:53:06 +01:00
{
2016-05-24 17:26:48 +10:00
struct nouveau_drm * drm = nouveau_drm ( dev ) ;
2019-09-16 16:19:24 +02:00
struct drm_gem_object * obj ;
2012-04-02 11:53:06 +01:00
struct nouveau_bo * nvbo ;
2019-08-11 10:06:32 +02:00
struct dma_resv * robj = attach - > dmabuf - > resv ;
2019-09-16 16:19:23 +02:00
u64 size = attach - > dmabuf - > size ;
2012-04-02 11:53:06 +01:00
u32 flags = 0 ;
2019-09-16 16:19:23 +02:00
int align = 0 ;
2012-04-02 11:53:06 +01:00
int ret ;
flags = TTM_PL_FLAG_TT ;
2019-08-11 10:06:32 +02:00
dma_resv_lock ( robj , NULL ) ;
2019-09-16 16:19:23 +02:00
nvbo = nouveau_bo_alloc ( & drm - > client , & size , & align , flags , 0 , 0 ) ;
2019-09-16 16:19:24 +02:00
if ( IS_ERR ( nvbo ) ) {
obj = ERR_CAST ( nvbo ) ;
goto unlock ;
}
2012-04-02 11:53:06 +01:00
nvbo - > valid_domains = NOUVEAU_GEM_DOMAIN_GART ;
2013-10-02 10:15:17 +02:00
/* Initialize the embedded gem-object. We return a single gem-reference
* to the caller , instead of a normal nouveau_bo ttm reference . */
2019-08-14 11:00:48 +02:00
ret = drm_gem_object_init ( dev , & nvbo - > bo . base , size ) ;
2013-10-02 10:15:17 +02:00
if ( ret ) {
2013-01-15 20:47:43 +00:00
nouveau_bo_ref ( NULL , & nvbo ) ;
2019-09-16 16:19:24 +02:00
obj = ERR_PTR ( - ENOMEM ) ;
goto unlock ;
2012-04-02 11:53:06 +01:00
}
2019-09-16 16:19:23 +02:00
ret = nouveau_bo_init ( nvbo , size , align , flags , sg , robj ) ;
2019-08-14 11:00:48 +02:00
if ( ret ) {
nouveau_bo_ref ( NULL , & nvbo ) ;
2019-09-16 16:19:24 +02:00
obj = ERR_PTR ( ret ) ;
goto unlock ;
2019-08-14 11:00:48 +02:00
}
2019-09-16 16:19:24 +02:00
obj = & nvbo - > bo . base ;
unlock :
dma_resv_unlock ( robj ) ;
return obj ;
2012-04-02 11:53:06 +01:00
}
2013-01-15 20:47:43 +00:00
int nouveau_gem_prime_pin ( struct drm_gem_object * obj )
2012-04-02 11:53:06 +01:00
{
struct nouveau_bo * nvbo = nouveau_gem_object ( obj ) ;
2013-06-27 13:38:19 +02:00
int ret ;
2012-04-02 11:53:06 +01:00
/* pin buffer into GTT */
2014-11-10 11:24:27 +10:00
ret = nouveau_bo_pin ( nvbo , TTM_PL_FLAG_TT , false ) ;
2012-04-02 11:53:06 +01:00
if ( ret )
2013-01-15 20:47:43 +00:00
return - EINVAL ;
2012-04-02 11:53:06 +01:00
2013-01-15 20:47:43 +00:00
return 0 ;
2012-04-02 11:53:06 +01:00
}
2013-06-27 13:38:19 +02:00
void nouveau_gem_prime_unpin ( struct drm_gem_object * obj )
{
struct nouveau_bo * nvbo = nouveau_gem_object ( obj ) ;
nouveau_bo_unpin ( nvbo ) ;
}