2011-11-25 19:21:02 +04:00
/*
* Copyright © 2012 Red Hat
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice ( including the next
* paragraph ) shall be included in all copies or substantial portions of the
* Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING
* FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE .
*
* Authors :
* Dave Airlie < airlied @ redhat . com >
* Rob Clark < rob . clark @ linaro . org >
*
*/
# include <linux/export.h>
# include <linux/dma-buf.h>
2016-09-26 23:44:14 +03:00
# include <linux/rbtree.h>
2021-10-10 15:46:28 +03:00
# include <linux/module.h>
2019-05-26 20:35:35 +03:00
2019-07-18 19:15:04 +03:00
# include <drm/drm.h>
2019-05-26 20:35:35 +03:00
# include <drm/drm_drv.h>
# include <drm/drm_file.h>
# include <drm/drm_framebuffer.h>
2014-09-23 17:46:53 +04:00
# include <drm/drm_gem.h>
2019-05-26 20:35:35 +03:00
# include <drm/drm_prime.h>
2014-09-23 17:46:53 +04:00
2014-09-10 14:43:53 +04:00
# include "drm_internal.h"
2011-11-25 19:21:02 +04:00
2021-10-10 15:46:28 +03:00
MODULE_IMPORT_NS ( DMA_BUF ) ;
2019-06-20 15:46:15 +03:00
/**
* DOC : overview and lifetime rules
*
* Similar to GEM global names , PRIME file descriptors are also used to share
* buffer objects across processes . They offer additional security : as file
* descriptors must be explicitly sent over UNIX domain sockets to be shared
* between applications , they can ' t be guessed like the globally unique GEM
* names .
*
2019-06-14 23:35:21 +03:00
* Drivers that support the PRIME API implement the
2019-06-20 15:46:15 +03:00
* & drm_driver . prime_handle_to_fd and & drm_driver . prime_fd_to_handle operations .
* GEM based drivers must use drm_gem_prime_handle_to_fd ( ) and
* drm_gem_prime_fd_to_handle ( ) to implement these . For GEM based drivers the
* actual driver interfaces is provided through the & drm_gem_object_funcs . export
* and & drm_driver . gem_prime_import hooks .
*
* & dma_buf_ops implementations for GEM drivers are all individually exported
* for drivers which need to overwrite or reimplement some of them .
*
* Reference Counting for GEM Drivers
* ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
*
* On the export the & dma_buf holds a reference to the exported buffer object ,
* usually a & drm_gem_object . It takes this reference in the PRIME_HANDLE_TO_FD
* IOCTL , when it first calls & drm_gem_object_funcs . export
* and stores the exporting GEM object in the & dma_buf . priv field . This
* reference needs to be released when the final reference to the & dma_buf
* itself is dropped and its & dma_buf_ops . release function is called . For
* GEM - based drivers , the & dma_buf should be exported using
* drm_gem_dmabuf_export ( ) and then released by drm_gem_dmabuf_release ( ) .
*
* Thus the chain of references always flows in one direction , avoiding loops :
* importing GEM object - > dma - buf - > exported GEM bo . A further complication
* are the lookup caches for import and export . These are required to guarantee
2021-07-30 16:27:29 +03:00
* that any given object will always have only one unique userspace handle . This
2019-06-20 15:46:15 +03:00
* is required to allow userspace to detect duplicated imports , since some GEM
* drivers do fail command submissions if a given buffer object is listed more
* than once . These import and export caches in & drm_prime_file_private only
* retain a weak reference , which is cleaned up when the corresponding object is
* released .
*
* Self - importing : If userspace is using PRIME as a replacement for flink then
* it will get a fd - > handle request for a GEM object that it created . Drivers
* should detect this situation and return back the underlying object from the
* dma - buf private . For GEM based drivers this is handled in
* drm_gem_prime_import ( ) already .
2011-11-25 19:21:02 +04:00
*/
struct drm_prime_member {
struct dma_buf * dma_buf ;
uint32_t handle ;
2016-09-26 23:44:14 +03:00
struct rb_node dmabuf_rb ;
struct rb_node handle_rb ;
2011-11-25 19:21:02 +04:00
} ;
2013-06-19 10:03:05 +04:00
2014-01-22 22:16:30 +04:00
static int drm_prime_add_buf_handle ( struct drm_prime_file_private * prime_fpriv ,
struct dma_buf * dma_buf , uint32_t handle )
2013-06-26 05:21:41 +04:00
{
struct drm_prime_member * member ;
2016-09-26 23:44:14 +03:00
struct rb_node * * p , * rb ;
2013-06-26 05:21:41 +04:00
member = kmalloc ( sizeof ( * member ) , GFP_KERNEL ) ;
if ( ! member )
return - ENOMEM ;
get_dma_buf ( dma_buf ) ;
member - > dma_buf = dma_buf ;
member - > handle = handle ;
2016-09-26 23:44:14 +03:00
rb = NULL ;
p = & prime_fpriv - > dmabufs . rb_node ;
while ( * p ) {
struct drm_prime_member * pos ;
rb = * p ;
pos = rb_entry ( rb , struct drm_prime_member , dmabuf_rb ) ;
if ( dma_buf > pos - > dma_buf )
p = & rb - > rb_right ;
else
p = & rb - > rb_left ;
}
rb_link_node ( & member - > dmabuf_rb , rb , p ) ;
rb_insert_color ( & member - > dmabuf_rb , & prime_fpriv - > dmabufs ) ;
rb = NULL ;
p = & prime_fpriv - > handles . rb_node ;
while ( * p ) {
struct drm_prime_member * pos ;
rb = * p ;
pos = rb_entry ( rb , struct drm_prime_member , handle_rb ) ;
if ( handle > pos - > handle )
p = & rb - > rb_right ;
else
p = & rb - > rb_left ;
}
rb_link_node ( & member - > handle_rb , rb , p ) ;
rb_insert_color ( & member - > handle_rb , & prime_fpriv - > handles ) ;
2013-06-26 05:21:41 +04:00
return 0 ;
}
2011-11-25 19:21:02 +04:00
2013-08-15 02:02:49 +04:00
static struct dma_buf * drm_prime_lookup_buf_by_handle ( struct drm_prime_file_private * prime_fpriv ,
uint32_t handle )
{
2016-09-26 23:44:14 +03:00
struct rb_node * rb ;
rb = prime_fpriv - > handles . rb_node ;
while ( rb ) {
struct drm_prime_member * member ;
2013-08-15 02:02:49 +04:00
2016-09-26 23:44:14 +03:00
member = rb_entry ( rb , struct drm_prime_member , handle_rb ) ;
2013-08-15 02:02:49 +04:00
if ( member - > handle = = handle )
return member - > dma_buf ;
2016-09-26 23:44:14 +03:00
else if ( member - > handle < handle )
rb = rb - > rb_right ;
else
rb = rb - > rb_left ;
2013-08-15 02:02:49 +04:00
}
return NULL ;
}
2013-08-15 02:02:48 +04:00
static int drm_prime_lookup_buf_handle ( struct drm_prime_file_private * prime_fpriv ,
struct dma_buf * dma_buf ,
uint32_t * handle )
{
2016-09-26 23:44:14 +03:00
struct rb_node * rb ;
rb = prime_fpriv - > dmabufs . rb_node ;
while ( rb ) {
struct drm_prime_member * member ;
2013-08-15 02:02:48 +04:00
2016-09-26 23:44:14 +03:00
member = rb_entry ( rb , struct drm_prime_member , dmabuf_rb ) ;
2013-08-15 02:02:48 +04:00
if ( member - > dma_buf = = dma_buf ) {
* handle = member - > handle ;
return 0 ;
2016-09-26 23:44:14 +03:00
} else if ( member - > dma_buf < dma_buf ) {
rb = rb - > rb_right ;
} else {
rb = rb - > rb_left ;
2013-08-15 02:02:48 +04:00
}
}
2016-09-26 23:44:14 +03:00
2013-08-15 02:02:48 +04:00
return - ENOENT ;
}
2013-08-15 02:02:49 +04:00
void drm_prime_remove_buf_handle_locked ( struct drm_prime_file_private * prime_fpriv ,
struct dma_buf * dma_buf )
2013-06-26 05:21:42 +04:00
{
2016-09-26 23:44:14 +03:00
struct rb_node * rb ;
2013-06-26 05:21:42 +04:00
2016-09-26 23:44:14 +03:00
rb = prime_fpriv - > dmabufs . rb_node ;
while ( rb ) {
struct drm_prime_member * member ;
member = rb_entry ( rb , struct drm_prime_member , dmabuf_rb ) ;
2013-06-26 05:21:42 +04:00
if ( member - > dma_buf = = dma_buf ) {
2016-09-26 23:44:14 +03:00
rb_erase ( & member - > handle_rb , & prime_fpriv - > handles ) ;
rb_erase ( & member - > dmabuf_rb , & prime_fpriv - > dmabufs ) ;
2013-06-26 05:21:42 +04:00
dma_buf_put ( dma_buf ) ;
kfree ( member ) ;
2016-09-26 23:44:14 +03:00
return ;
} else if ( member - > dma_buf < dma_buf ) {
rb = rb - > rb_right ;
} else {
rb = rb - > rb_left ;
2013-06-26 05:21:42 +04:00
}
}
}
2019-06-18 12:20:37 +03:00
void drm_prime_init_file_private ( struct drm_prime_file_private * prime_fpriv )
2013-01-16 00:47:42 +04:00
{
2019-06-18 12:20:37 +03:00
mutex_init ( & prime_fpriv - > lock ) ;
prime_fpriv - > dmabufs = RB_ROOT ;
prime_fpriv - > handles = RB_ROOT ;
2013-01-16 00:47:42 +04:00
}
2019-06-18 12:20:37 +03:00
void drm_prime_destroy_file_private ( struct drm_prime_file_private * prime_fpriv )
2013-01-16 00:47:42 +04:00
{
2019-06-18 12:20:37 +03:00
/* by now drm_gem_release should've made sure the list is empty */
WARN_ON ( ! RB_EMPTY_ROOT ( & prime_fpriv - > dmabufs ) ) ;
2013-01-16 00:47:42 +04:00
}
2016-10-05 15:21:44 +03:00
/**
2019-06-20 15:46:15 +03:00
* drm_gem_dmabuf_export - & dma_buf export implementation for GEM
2016-10-05 20:40:56 +03:00
* @ dev : parent device for the exported dmabuf
* @ exp_info : the export information used by dma_buf_export ( )
2016-10-05 15:21:44 +03:00
*
* This wraps dma_buf_export ( ) for use by generic GEM drivers that are using
* drm_gem_dmabuf_release ( ) . In addition to calling dma_buf_export ( ) , we take
2016-12-08 00:45:27 +03:00
* a reference to the & drm_device and the exported & drm_gem_object ( stored in
2017-01-25 09:26:46 +03:00
* & dma_buf_export_info . priv ) which is released by drm_gem_dmabuf_release ( ) .
2016-10-05 15:21:44 +03:00
*
* Returns the new dmabuf .
*/
struct dma_buf * drm_gem_dmabuf_export ( struct drm_device * dev ,
struct dma_buf_export_info * exp_info )
{
2019-11-27 12:25:23 +03:00
struct drm_gem_object * obj = exp_info - > priv ;
2016-10-05 15:21:44 +03:00
struct dma_buf * dma_buf ;
dma_buf = dma_buf_export ( exp_info ) ;
2016-12-08 00:45:27 +03:00
if ( IS_ERR ( dma_buf ) )
return dma_buf ;
2017-09-26 11:28:49 +03:00
drm_dev_get ( dev ) ;
2019-11-27 12:25:23 +03:00
drm_gem_object_get ( obj ) ;
dma_buf - > file - > f_mapping = obj - > dev - > anon_inode - > i_mapping ;
2016-10-05 15:21:44 +03:00
return dma_buf ;
}
EXPORT_SYMBOL ( drm_gem_dmabuf_export ) ;
2014-01-22 22:16:30 +04:00
/**
2019-06-20 15:46:15 +03:00
* drm_gem_dmabuf_release - & dma_buf release implementation for GEM
2014-01-22 22:16:30 +04:00
* @ dma_buf : buffer to be released
*
* Generic release function for dma_bufs exported as PRIME buffers . GEM drivers
2019-06-20 15:46:15 +03:00
* must use this in their & dma_buf_ops structure as the release callback .
2016-10-05 15:21:44 +03:00
* drm_gem_dmabuf_release ( ) should be used in conjunction with
* drm_gem_dmabuf_export ( ) .
2014-01-22 22:16:30 +04:00
*/
2013-08-15 02:02:30 +04:00
void drm_gem_dmabuf_release ( struct dma_buf * dma_buf )
2013-01-16 00:47:42 +04:00
{
struct drm_gem_object * obj = dma_buf - > priv ;
2016-10-05 15:21:44 +03:00
struct drm_device * dev = obj - > dev ;
2013-01-16 00:47:42 +04:00
2013-08-15 02:02:46 +04:00
/* drop the reference on the export fd holds */
2020-05-15 12:50:53 +03:00
drm_gem_object_put ( obj ) ;
2016-10-05 15:21:44 +03:00
2017-09-26 20:04:00 +03:00
drm_dev_put ( dev ) ;
2013-01-16 00:47:42 +04:00
}
2013-08-15 02:02:30 +04:00
EXPORT_SYMBOL ( drm_gem_dmabuf_release ) ;
2013-01-16 00:47:42 +04:00
2018-01-19 00:44:20 +03:00
/**
2019-06-18 12:20:37 +03:00
* drm_gem_prime_fd_to_handle - PRIME import function for GEM drivers
* @ dev : dev to export the buffer from
* @ file_priv : drm file - private structure
* @ prime_fd : fd id of the dma - buf which should be imported
* @ handle : pointer to storage for the handle of the imported buffer object
2018-01-19 00:44:20 +03:00
*
2019-06-18 12:20:37 +03:00
* This is the PRIME import function which must be used mandatorily by GEM
* drivers to ensure correct lifetime management of the underlying GEM object .
* The actual importing of GEM object from the dma - buf is done through the
2019-06-20 15:46:15 +03:00
* & drm_driver . gem_prime_import driver callback .
*
* Returns 0 on success or a negative error code on failure .
2018-01-19 00:44:20 +03:00
*/
2019-06-18 12:20:37 +03:00
int drm_gem_prime_fd_to_handle ( struct drm_device * dev ,
struct drm_file * file_priv , int prime_fd ,
uint32_t * handle )
2013-01-16 00:47:42 +04:00
{
2019-06-18 12:20:37 +03:00
struct dma_buf * dma_buf ;
struct drm_gem_object * obj ;
int ret ;
2013-01-16 00:47:42 +04:00
2019-06-18 12:20:37 +03:00
dma_buf = dma_buf_get ( prime_fd ) ;
if ( IS_ERR ( dma_buf ) )
return PTR_ERR ( dma_buf ) ;
2018-11-10 17:56:45 +03:00
2019-06-18 12:20:37 +03:00
mutex_lock ( & file_priv - > prime . lock ) ;
2013-01-16 00:47:42 +04:00
2019-06-18 12:20:37 +03:00
ret = drm_prime_lookup_buf_handle ( & file_priv - > prime ,
dma_buf , handle ) ;
if ( ret = = 0 )
goto out_put ;
2013-01-16 00:47:42 +04:00
2019-06-18 12:20:37 +03:00
/* never seen this one, need to import */
mutex_lock ( & dev - > object_name_lock ) ;
if ( dev - > driver - > gem_prime_import )
obj = dev - > driver - > gem_prime_import ( dev , dma_buf ) ;
else
obj = drm_gem_prime_import ( dev , dma_buf ) ;
if ( IS_ERR ( obj ) ) {
ret = PTR_ERR ( obj ) ;
goto out_unlock ;
}
2013-01-16 00:47:42 +04:00
2019-06-18 12:20:37 +03:00
if ( obj - > dma_buf ) {
WARN_ON ( obj - > dma_buf ! = dma_buf ) ;
} else {
obj - > dma_buf = dma_buf ;
get_dma_buf ( dma_buf ) ;
}
2013-06-28 09:24:53 +04:00
2019-06-18 12:20:37 +03:00
/* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
ret = drm_gem_handle_create_tail ( file_priv , obj , handle ) ;
2020-05-15 12:50:53 +03:00
drm_gem_object_put ( obj ) ;
2019-06-18 12:20:37 +03:00
if ( ret )
goto out_put ;
2013-06-28 09:24:53 +04:00
2019-06-18 12:20:37 +03:00
ret = drm_prime_add_buf_handle ( & file_priv - > prime ,
dma_buf , * handle ) ;
mutex_unlock ( & file_priv - > prime . lock ) ;
if ( ret )
goto fail ;
2013-01-16 00:47:42 +04:00
2019-06-18 12:20:37 +03:00
dma_buf_put ( dma_buf ) ;
2013-01-16 00:47:42 +04:00
2019-06-18 12:20:37 +03:00
return 0 ;
2013-01-16 00:47:42 +04:00
2019-06-18 12:20:37 +03:00
fail :
/* hmm, if driver attached, we are relying on the free-object path
* to detach . . which seems ok . .
*/
drm_gem_handle_delete ( file_priv , * handle ) ;
dma_buf_put ( dma_buf ) ;
return ret ;
out_unlock :
mutex_unlock ( & dev - > object_name_lock ) ;
out_put :
mutex_unlock ( & file_priv - > prime . lock ) ;
dma_buf_put ( dma_buf ) ;
return ret ;
}
EXPORT_SYMBOL ( drm_gem_prime_fd_to_handle ) ;
int drm_prime_fd_to_handle_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
2013-01-16 00:47:42 +04:00
{
2019-06-18 12:20:37 +03:00
struct drm_prime_handle * args = data ;
2014-07-01 14:57:26 +04:00
2019-06-18 12:20:37 +03:00
if ( ! dev - > driver - > prime_fd_to_handle )
return - ENOSYS ;
return dev - > driver - > prime_fd_to_handle ( dev , file_priv ,
args - > fd , & args - > handle ) ;
2013-01-16 00:47:42 +04:00
}
2013-08-15 02:02:46 +04:00
static struct dma_buf * export_and_register_object ( struct drm_device * dev ,
struct drm_gem_object * obj ,
uint32_t flags )
{
struct dma_buf * dmabuf ;
/* prevent races with concurrent gem_close. */
if ( obj - > handle_count = = 0 ) {
dmabuf = ERR_PTR ( - ENOENT ) ;
return dmabuf ;
}
2018-11-10 17:56:45 +03:00
if ( obj - > funcs & & obj - > funcs - > export )
dmabuf = obj - > funcs - > export ( obj , flags ) ;
2018-11-10 17:56:43 +03:00
else
2019-06-14 23:35:25 +03:00
dmabuf = drm_gem_prime_export ( obj , flags ) ;
2013-08-15 02:02:46 +04:00
if ( IS_ERR ( dmabuf ) ) {
/* normally the created dma-buf takes ownership of the ref,
* but if that fails then drop the ref
*/
return dmabuf ;
}
/*
* Note that callers do not need to clean up the export cache
* since the check for obj - > handle_count guarantees that someone
* will clean it up .
*/
obj - > dma_buf = dmabuf ;
get_dma_buf ( obj - > dma_buf ) ;
return dmabuf ;
}
2014-01-22 22:16:30 +04:00
/**
* drm_gem_prime_handle_to_fd - PRIME export function for GEM drivers
* @ dev : dev to export the buffer from
* @ file_priv : drm file - private structure
* @ handle : buffer handle to export
* @ flags : flags like DRM_CLOEXEC
* @ prime_fd : pointer to storage for the fd id of the create dma - buf
*
* This is the PRIME export function which must be used mandatorily by GEM
* drivers to ensure correct lifetime management of the underlying GEM object .
* The actual exporting from GEM object to a dma - buf is done through the
2020-09-23 13:21:59 +03:00
* & drm_gem_object_funcs . export callback .
2014-01-22 22:16:30 +04:00
*/
2011-11-25 19:21:02 +04:00
int drm_gem_prime_handle_to_fd ( struct drm_device * dev ,
2014-01-22 22:16:30 +04:00
struct drm_file * file_priv , uint32_t handle ,
uint32_t flags ,
int * prime_fd )
2011-11-25 19:21:02 +04:00
{
struct drm_gem_object * obj ;
drm/prime: keep a reference from the handle to exported dma-buf (v6)
Currently we have a problem with this:
1. i915: create gem object
2. i915: export gem object to prime
3. radeon: import gem object
4. close prime fd
5. radeon: unref object
6. i915: unref object
i915 has an imported object reference in its file priv, that isn't
cleaned up properly until fd close. The reference gets added at step 2,
but at step 6 we don't have enough info to clean it up.
The solution is to take a reference on the dma-buf when we export it,
and drop the reference when the gem handle goes away.
So when we export a dma_buf from a gem object, we keep track of it
with the handle, we take a reference to the dma_buf. When we close
the handle (i.e. userspace is finished with the buffer), we drop
the reference to the dma_buf, and it gets collected.
This patch isn't meant to fix any other problem or bikesheds, and it doesn't
fix any races with other scenarios.
v1.1: move export symbol line back up.
v2: okay I had to do a bit more, as the first patch showed a leak
on one of my tests, that I found using the dma-buf debugfs support,
the problem case is exporting a buffer twice with the same handle,
we'd add another export handle for it unnecessarily, however
we now fail if we try to export the same object with a different gem handle,
however I'm not sure if that is a case I want to support, and I've
gotten the code to WARN_ON if we hit something like that.
v2.1: rebase this patch, write better commit msg.
v3: cleanup error handling, track import vs export in linked list,
these two patches were separate previously, but seem to work better
like this.
v4: danvet is correct, this code is no longer useful, since the buffer
better exist, so remove it.
v5: always take a reference to the dma buf object, import or export.
(Imre Deak contributed this originally)
v6: square the circle, remove import vs export tracking now
that there is no difference
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: stable@vger.kernel.org
Signed-off-by: Dave Airlie <airlied@redhat.com>
2013-04-22 03:54:36 +04:00
int ret = 0 ;
struct dma_buf * dmabuf ;
2011-11-25 19:21:02 +04:00
2013-08-15 02:02:49 +04:00
mutex_lock ( & file_priv - > prime . lock ) ;
2016-05-09 13:04:54 +03:00
obj = drm_gem_object_lookup ( file_priv , handle ) ;
2013-08-15 02:02:49 +04:00
if ( ! obj ) {
ret = - ENOENT ;
goto out_unlock ;
}
dmabuf = drm_prime_lookup_buf_by_handle ( & file_priv - > prime , handle ) ;
if ( dmabuf ) {
get_dma_buf ( dmabuf ) ;
goto out_have_handle ;
}
2011-11-25 19:21:02 +04:00
2013-08-15 02:02:49 +04:00
mutex_lock ( & dev - > object_name_lock ) ;
2011-11-25 19:21:02 +04:00
/* re-export the original imported object */
if ( obj - > import_attach ) {
drm/prime: keep a reference from the handle to exported dma-buf (v6)
Currently we have a problem with this:
1. i915: create gem object
2. i915: export gem object to prime
3. radeon: import gem object
4. close prime fd
5. radeon: unref object
6. i915: unref object
i915 has an imported object reference in its file priv, that isn't
cleaned up properly until fd close. The reference gets added at step 2,
but at step 6 we don't have enough info to clean it up.
The solution is to take a reference on the dma-buf when we export it,
and drop the reference when the gem handle goes away.
So when we export a dma_buf from a gem object, we keep track of it
with the handle, we take a reference to the dma_buf. When we close
the handle (i.e. userspace is finished with the buffer), we drop
the reference to the dma_buf, and it gets collected.
This patch isn't meant to fix any other problem or bikesheds, and it doesn't
fix any races with other scenarios.
v1.1: move export symbol line back up.
v2: okay I had to do a bit more, as the first patch showed a leak
on one of my tests, that I found using the dma-buf debugfs support,
the problem case is exporting a buffer twice with the same handle,
we'd add another export handle for it unnecessarily, however
we now fail if we try to export the same object with a different gem handle,
however I'm not sure if that is a case I want to support, and I've
gotten the code to WARN_ON if we hit something like that.
v2.1: rebase this patch, write better commit msg.
v3: cleanup error handling, track import vs export in linked list,
these two patches were separate previously, but seem to work better
like this.
v4: danvet is correct, this code is no longer useful, since the buffer
better exist, so remove it.
v5: always take a reference to the dma buf object, import or export.
(Imre Deak contributed this originally)
v6: square the circle, remove import vs export tracking now
that there is no difference
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: stable@vger.kernel.org
Signed-off-by: Dave Airlie <airlied@redhat.com>
2013-04-22 03:54:36 +04:00
dmabuf = obj - > import_attach - > dmabuf ;
2013-08-15 02:02:46 +04:00
get_dma_buf ( dmabuf ) ;
drm/prime: keep a reference from the handle to exported dma-buf (v6)
Currently we have a problem with this:
1. i915: create gem object
2. i915: export gem object to prime
3. radeon: import gem object
4. close prime fd
5. radeon: unref object
6. i915: unref object
i915 has an imported object reference in its file priv, that isn't
cleaned up properly until fd close. The reference gets added at step 2,
but at step 6 we don't have enough info to clean it up.
The solution is to take a reference on the dma-buf when we export it,
and drop the reference when the gem handle goes away.
So when we export a dma_buf from a gem object, we keep track of it
with the handle, we take a reference to the dma_buf. When we close
the handle (i.e. userspace is finished with the buffer), we drop
the reference to the dma_buf, and it gets collected.
This patch isn't meant to fix any other problem or bikesheds, and it doesn't
fix any races with other scenarios.
v1.1: move export symbol line back up.
v2: okay I had to do a bit more, as the first patch showed a leak
on one of my tests, that I found using the dma-buf debugfs support,
the problem case is exporting a buffer twice with the same handle,
we'd add another export handle for it unnecessarily, however
we now fail if we try to export the same object with a different gem handle,
however I'm not sure if that is a case I want to support, and I've
gotten the code to WARN_ON if we hit something like that.
v2.1: rebase this patch, write better commit msg.
v3: cleanup error handling, track import vs export in linked list,
these two patches were separate previously, but seem to work better
like this.
v4: danvet is correct, this code is no longer useful, since the buffer
better exist, so remove it.
v5: always take a reference to the dma buf object, import or export.
(Imre Deak contributed this originally)
v6: square the circle, remove import vs export tracking now
that there is no difference
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: stable@vger.kernel.org
Signed-off-by: Dave Airlie <airlied@redhat.com>
2013-04-22 03:54:36 +04:00
goto out_have_obj ;
2011-11-25 19:21:02 +04:00
}
2013-08-15 02:02:46 +04:00
if ( obj - > dma_buf ) {
get_dma_buf ( obj - > dma_buf ) ;
dmabuf = obj - > dma_buf ;
drm/prime: keep a reference from the handle to exported dma-buf (v6)
Currently we have a problem with this:
1. i915: create gem object
2. i915: export gem object to prime
3. radeon: import gem object
4. close prime fd
5. radeon: unref object
6. i915: unref object
i915 has an imported object reference in its file priv, that isn't
cleaned up properly until fd close. The reference gets added at step 2,
but at step 6 we don't have enough info to clean it up.
The solution is to take a reference on the dma-buf when we export it,
and drop the reference when the gem handle goes away.
So when we export a dma_buf from a gem object, we keep track of it
with the handle, we take a reference to the dma_buf. When we close
the handle (i.e. userspace is finished with the buffer), we drop
the reference to the dma_buf, and it gets collected.
This patch isn't meant to fix any other problem or bikesheds, and it doesn't
fix any races with other scenarios.
v1.1: move export symbol line back up.
v2: okay I had to do a bit more, as the first patch showed a leak
on one of my tests, that I found using the dma-buf debugfs support,
the problem case is exporting a buffer twice with the same handle,
we'd add another export handle for it unnecessarily, however
we now fail if we try to export the same object with a different gem handle,
however I'm not sure if that is a case I want to support, and I've
gotten the code to WARN_ON if we hit something like that.
v2.1: rebase this patch, write better commit msg.
v3: cleanup error handling, track import vs export in linked list,
these two patches were separate previously, but seem to work better
like this.
v4: danvet is correct, this code is no longer useful, since the buffer
better exist, so remove it.
v5: always take a reference to the dma buf object, import or export.
(Imre Deak contributed this originally)
v6: square the circle, remove import vs export tracking now
that there is no difference
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: stable@vger.kernel.org
Signed-off-by: Dave Airlie <airlied@redhat.com>
2013-04-22 03:54:36 +04:00
goto out_have_obj ;
2011-11-25 19:21:02 +04:00
}
drm/prime: keep a reference from the handle to exported dma-buf (v6)
Currently we have a problem with this:
1. i915: create gem object
2. i915: export gem object to prime
3. radeon: import gem object
4. close prime fd
5. radeon: unref object
6. i915: unref object
i915 has an imported object reference in its file priv, that isn't
cleaned up properly until fd close. The reference gets added at step 2,
but at step 6 we don't have enough info to clean it up.
The solution is to take a reference on the dma-buf when we export it,
and drop the reference when the gem handle goes away.
So when we export a dma_buf from a gem object, we keep track of it
with the handle, we take a reference to the dma_buf. When we close
the handle (i.e. userspace is finished with the buffer), we drop
the reference to the dma_buf, and it gets collected.
This patch isn't meant to fix any other problem or bikesheds, and it doesn't
fix any races with other scenarios.
v1.1: move export symbol line back up.
v2: okay I had to do a bit more, as the first patch showed a leak
on one of my tests, that I found using the dma-buf debugfs support,
the problem case is exporting a buffer twice with the same handle,
we'd add another export handle for it unnecessarily, however
we now fail if we try to export the same object with a different gem handle,
however I'm not sure if that is a case I want to support, and I've
gotten the code to WARN_ON if we hit something like that.
v2.1: rebase this patch, write better commit msg.
v3: cleanup error handling, track import vs export in linked list,
these two patches were separate previously, but seem to work better
like this.
v4: danvet is correct, this code is no longer useful, since the buffer
better exist, so remove it.
v5: always take a reference to the dma buf object, import or export.
(Imre Deak contributed this originally)
v6: square the circle, remove import vs export tracking now
that there is no difference
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: stable@vger.kernel.org
Signed-off-by: Dave Airlie <airlied@redhat.com>
2013-04-22 03:54:36 +04:00
2013-08-15 02:02:46 +04:00
dmabuf = export_and_register_object ( dev , obj , flags ) ;
2013-08-15 02:02:41 +04:00
if ( IS_ERR ( dmabuf ) ) {
drm/prime: keep a reference from the handle to exported dma-buf (v6)
Currently we have a problem with this:
1. i915: create gem object
2. i915: export gem object to prime
3. radeon: import gem object
4. close prime fd
5. radeon: unref object
6. i915: unref object
i915 has an imported object reference in its file priv, that isn't
cleaned up properly until fd close. The reference gets added at step 2,
but at step 6 we don't have enough info to clean it up.
The solution is to take a reference on the dma-buf when we export it,
and drop the reference when the gem handle goes away.
So when we export a dma_buf from a gem object, we keep track of it
with the handle, we take a reference to the dma_buf. When we close
the handle (i.e. userspace is finished with the buffer), we drop
the reference to the dma_buf, and it gets collected.
This patch isn't meant to fix any other problem or bikesheds, and it doesn't
fix any races with other scenarios.
v1.1: move export symbol line back up.
v2: okay I had to do a bit more, as the first patch showed a leak
on one of my tests, that I found using the dma-buf debugfs support,
the problem case is exporting a buffer twice with the same handle,
we'd add another export handle for it unnecessarily, however
we now fail if we try to export the same object with a different gem handle,
however I'm not sure if that is a case I want to support, and I've
gotten the code to WARN_ON if we hit something like that.
v2.1: rebase this patch, write better commit msg.
v3: cleanup error handling, track import vs export in linked list,
these two patches were separate previously, but seem to work better
like this.
v4: danvet is correct, this code is no longer useful, since the buffer
better exist, so remove it.
v5: always take a reference to the dma buf object, import or export.
(Imre Deak contributed this originally)
v6: square the circle, remove import vs export tracking now
that there is no difference
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: stable@vger.kernel.org
Signed-off-by: Dave Airlie <airlied@redhat.com>
2013-04-22 03:54:36 +04:00
/* normally the created dma-buf takes ownership of the ref,
* but if that fails then drop the ref
*/
2013-08-15 02:02:41 +04:00
ret = PTR_ERR ( dmabuf ) ;
2013-08-15 02:02:49 +04:00
mutex_unlock ( & dev - > object_name_lock ) ;
drm/prime: keep a reference from the handle to exported dma-buf (v6)
Currently we have a problem with this:
1. i915: create gem object
2. i915: export gem object to prime
3. radeon: import gem object
4. close prime fd
5. radeon: unref object
6. i915: unref object
i915 has an imported object reference in its file priv, that isn't
cleaned up properly until fd close. The reference gets added at step 2,
but at step 6 we don't have enough info to clean it up.
The solution is to take a reference on the dma-buf when we export it,
and drop the reference when the gem handle goes away.
So when we export a dma_buf from a gem object, we keep track of it
with the handle, we take a reference to the dma_buf. When we close
the handle (i.e. userspace is finished with the buffer), we drop
the reference to the dma_buf, and it gets collected.
This patch isn't meant to fix any other problem or bikesheds, and it doesn't
fix any races with other scenarios.
v1.1: move export symbol line back up.
v2: okay I had to do a bit more, as the first patch showed a leak
on one of my tests, that I found using the dma-buf debugfs support,
the problem case is exporting a buffer twice with the same handle,
we'd add another export handle for it unnecessarily, however
we now fail if we try to export the same object with a different gem handle,
however I'm not sure if that is a case I want to support, and I've
gotten the code to WARN_ON if we hit something like that.
v2.1: rebase this patch, write better commit msg.
v3: cleanup error handling, track import vs export in linked list,
these two patches were separate previously, but seem to work better
like this.
v4: danvet is correct, this code is no longer useful, since the buffer
better exist, so remove it.
v5: always take a reference to the dma buf object, import or export.
(Imre Deak contributed this originally)
v6: square the circle, remove import vs export tracking now
that there is no difference
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: stable@vger.kernel.org
Signed-off-by: Dave Airlie <airlied@redhat.com>
2013-04-22 03:54:36 +04:00
goto out ;
}
2013-08-15 02:02:49 +04:00
out_have_obj :
/*
* If we ' ve exported this buffer then cheat and add it to the import list
* so we get the correct handle back . We must do this under the
* protection of dev - > object_name_lock to ensure that a racing gem close
* ioctl doesn ' t miss to remove this buffer handle from the cache .
2012-05-20 20:31:16 +04:00
*/
drm/prime: keep a reference from the handle to exported dma-buf (v6)
Currently we have a problem with this:
1. i915: create gem object
2. i915: export gem object to prime
3. radeon: import gem object
4. close prime fd
5. radeon: unref object
6. i915: unref object
i915 has an imported object reference in its file priv, that isn't
cleaned up properly until fd close. The reference gets added at step 2,
but at step 6 we don't have enough info to clean it up.
The solution is to take a reference on the dma-buf when we export it,
and drop the reference when the gem handle goes away.
So when we export a dma_buf from a gem object, we keep track of it
with the handle, we take a reference to the dma_buf. When we close
the handle (i.e. userspace is finished with the buffer), we drop
the reference to the dma_buf, and it gets collected.
This patch isn't meant to fix any other problem or bikesheds, and it doesn't
fix any races with other scenarios.
v1.1: move export symbol line back up.
v2: okay I had to do a bit more, as the first patch showed a leak
on one of my tests, that I found using the dma-buf debugfs support,
the problem case is exporting a buffer twice with the same handle,
we'd add another export handle for it unnecessarily, however
we now fail if we try to export the same object with a different gem handle,
however I'm not sure if that is a case I want to support, and I've
gotten the code to WARN_ON if we hit something like that.
v2.1: rebase this patch, write better commit msg.
v3: cleanup error handling, track import vs export in linked list,
these two patches were separate previously, but seem to work better
like this.
v4: danvet is correct, this code is no longer useful, since the buffer
better exist, so remove it.
v5: always take a reference to the dma buf object, import or export.
(Imre Deak contributed this originally)
v6: square the circle, remove import vs export tracking now
that there is no difference
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: stable@vger.kernel.org
Signed-off-by: Dave Airlie <airlied@redhat.com>
2013-04-22 03:54:36 +04:00
ret = drm_prime_add_buf_handle ( & file_priv - > prime ,
2013-08-15 02:02:46 +04:00
dmabuf , handle ) ;
2013-08-15 02:02:49 +04:00
mutex_unlock ( & dev - > object_name_lock ) ;
drm/prime: keep a reference from the handle to exported dma-buf (v6)
Currently we have a problem with this:
1. i915: create gem object
2. i915: export gem object to prime
3. radeon: import gem object
4. close prime fd
5. radeon: unref object
6. i915: unref object
i915 has an imported object reference in its file priv, that isn't
cleaned up properly until fd close. The reference gets added at step 2,
but at step 6 we don't have enough info to clean it up.
The solution is to take a reference on the dma-buf when we export it,
and drop the reference when the gem handle goes away.
So when we export a dma_buf from a gem object, we keep track of it
with the handle, we take a reference to the dma_buf. When we close
the handle (i.e. userspace is finished with the buffer), we drop
the reference to the dma_buf, and it gets collected.
This patch isn't meant to fix any other problem or bikesheds, and it doesn't
fix any races with other scenarios.
v1.1: move export symbol line back up.
v2: okay I had to do a bit more, as the first patch showed a leak
on one of my tests, that I found using the dma-buf debugfs support,
the problem case is exporting a buffer twice with the same handle,
we'd add another export handle for it unnecessarily, however
we now fail if we try to export the same object with a different gem handle,
however I'm not sure if that is a case I want to support, and I've
gotten the code to WARN_ON if we hit something like that.
v2.1: rebase this patch, write better commit msg.
v3: cleanup error handling, track import vs export in linked list,
these two patches were separate previously, but seem to work better
like this.
v4: danvet is correct, this code is no longer useful, since the buffer
better exist, so remove it.
v5: always take a reference to the dma buf object, import or export.
(Imre Deak contributed this originally)
v6: square the circle, remove import vs export tracking now
that there is no difference
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: stable@vger.kernel.org
Signed-off-by: Dave Airlie <airlied@redhat.com>
2013-04-22 03:54:36 +04:00
if ( ret )
2013-06-26 05:21:40 +04:00
goto fail_put_dmabuf ;
2012-05-20 20:31:16 +04:00
2013-08-15 02:02:49 +04:00
out_have_handle :
2013-08-15 02:02:41 +04:00
ret = dma_buf_fd ( dmabuf , flags ) ;
2013-08-15 02:02:49 +04:00
/*
* We must _not_ remove the buffer from the handle cache since the newly
* created dma buf is already linked in the global obj - > dma_buf pointer ,
* and that is invariant as long as a userspace gem handle exists .
* Closing the handle will clean out the cache anyway , so we don ' t leak .
*/
2013-07-02 11:18:39 +04:00
if ( ret < 0 ) {
2013-08-15 02:02:49 +04:00
goto fail_put_dmabuf ;
2013-07-02 11:18:39 +04:00
} else {
2013-06-26 05:21:42 +04:00
* prime_fd = ret ;
2013-07-02 11:18:39 +04:00
ret = 0 ;
}
2013-06-26 05:21:40 +04:00
goto out ;
fail_put_dmabuf :
2013-08-15 02:02:41 +04:00
dma_buf_put ( dmabuf ) ;
drm/prime: keep a reference from the handle to exported dma-buf (v6)
Currently we have a problem with this:
1. i915: create gem object
2. i915: export gem object to prime
3. radeon: import gem object
4. close prime fd
5. radeon: unref object
6. i915: unref object
i915 has an imported object reference in its file priv, that isn't
cleaned up properly until fd close. The reference gets added at step 2,
but at step 6 we don't have enough info to clean it up.
The solution is to take a reference on the dma-buf when we export it,
and drop the reference when the gem handle goes away.
So when we export a dma_buf from a gem object, we keep track of it
with the handle, we take a reference to the dma_buf. When we close
the handle (i.e. userspace is finished with the buffer), we drop
the reference to the dma_buf, and it gets collected.
This patch isn't meant to fix any other problem or bikesheds, and it doesn't
fix any races with other scenarios.
v1.1: move export symbol line back up.
v2: okay I had to do a bit more, as the first patch showed a leak
on one of my tests, that I found using the dma-buf debugfs support,
the problem case is exporting a buffer twice with the same handle,
we'd add another export handle for it unnecessarily, however
we now fail if we try to export the same object with a different gem handle,
however I'm not sure if that is a case I want to support, and I've
gotten the code to WARN_ON if we hit something like that.
v2.1: rebase this patch, write better commit msg.
v3: cleanup error handling, track import vs export in linked list,
these two patches were separate previously, but seem to work better
like this.
v4: danvet is correct, this code is no longer useful, since the buffer
better exist, so remove it.
v5: always take a reference to the dma buf object, import or export.
(Imre Deak contributed this originally)
v6: square the circle, remove import vs export tracking now
that there is no difference
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: stable@vger.kernel.org
Signed-off-by: Dave Airlie <airlied@redhat.com>
2013-04-22 03:54:36 +04:00
out :
2020-05-15 12:50:53 +03:00
drm_gem_object_put ( obj ) ;
2013-08-15 02:02:49 +04:00
out_unlock :
mutex_unlock ( & file_priv - > prime . lock ) ;
2019-06-18 12:20:37 +03:00
return ret ;
}
EXPORT_SYMBOL ( drm_gem_prime_handle_to_fd ) ;
int drm_prime_handle_to_fd_ioctl ( struct drm_device * dev , void * data ,
struct drm_file * file_priv )
{
struct drm_prime_handle * args = data ;
if ( ! dev - > driver - > prime_handle_to_fd )
return - ENOSYS ;
/* check flags are valid */
if ( args - > flags & ~ ( DRM_CLOEXEC | DRM_RDWR ) )
return - EINVAL ;
return dev - > driver - > prime_handle_to_fd ( dev , file_priv ,
args - > handle , args - > flags , & args - > fd ) ;
}
/**
* DOC : PRIME Helpers
*
2019-06-20 15:46:15 +03:00
* Drivers can implement & drm_gem_object_funcs . export and
* & drm_driver . gem_prime_import in terms of simpler APIs by using the helper
* functions drm_gem_prime_export ( ) and drm_gem_prime_import ( ) . These functions
* implement dma - buf support in terms of some lower - level helpers , which are
* again exported for drivers to use individually :
*
* Exporting buffers
* ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
*
* Optional pinning of buffers is handled at dma - buf attach and detach time in
* drm_gem_map_attach ( ) and drm_gem_map_detach ( ) . Backing storage itself is
* handled by drm_gem_map_dma_buf ( ) and drm_gem_unmap_dma_buf ( ) , which relies on
* & drm_gem_object_funcs . get_sg_table .
*
* For kernel - internal access there ' s drm_gem_dmabuf_vmap ( ) and
* drm_gem_dmabuf_vunmap ( ) . Userspace mmap support is provided by
* drm_gem_dmabuf_mmap ( ) .
2019-06-18 12:20:37 +03:00
*
2019-06-20 15:46:15 +03:00
* Note that these export helpers can only be used if the underlying backing
* storage is fully coherent and either permanently pinned , or it is safe to pin
* it indefinitely .
2019-06-18 12:20:37 +03:00
*
2019-06-20 15:46:15 +03:00
* FIXME : The underlying helper functions are named rather inconsistently .
2019-06-18 12:20:37 +03:00
*
2021-07-23 15:33:07 +03:00
* Importing buffers
2019-06-20 15:46:15 +03:00
* ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
2019-06-18 12:20:37 +03:00
*
2019-06-20 15:46:15 +03:00
* Importing dma - bufs using drm_gem_prime_import ( ) relies on
* & drm_driver . gem_prime_import_sg_table .
*
* Note that similarly to the export helpers this permanently pins the
* underlying backing storage . Which is ok for scanout , but is not the best
* option for sharing lots of buffers for rendering .
2019-06-18 12:20:37 +03:00
*/
/**
* drm_gem_map_attach - dma_buf attach implementation for GEM
* @ dma_buf : buffer to attach device to
* @ attach : buffer attachment data
*
2019-06-20 15:46:15 +03:00
* Calls & drm_gem_object_funcs . pin for device specific handling . This can be
* used as the & dma_buf_ops . attach callback . Must be used together with
* drm_gem_map_detach ( ) .
2019-06-18 12:20:37 +03:00
*
* Returns 0 on success , negative error code on failure .
*/
int drm_gem_map_attach ( struct dma_buf * dma_buf ,
struct dma_buf_attachment * attach )
{
struct drm_gem_object * obj = dma_buf - > priv ;
return drm_gem_pin ( obj ) ;
}
EXPORT_SYMBOL ( drm_gem_map_attach ) ;
/**
* drm_gem_map_detach - dma_buf detach implementation for GEM
* @ dma_buf : buffer to detach from
* @ attach : attachment to be detached
*
2019-06-20 15:46:15 +03:00
* Calls & drm_gem_object_funcs . pin for device specific handling . Cleans up
* & dma_buf_attachment from drm_gem_map_attach ( ) . This can be used as the
* & dma_buf_ops . detach callback .
2019-06-18 12:20:37 +03:00
*/
void drm_gem_map_detach ( struct dma_buf * dma_buf ,
struct dma_buf_attachment * attach )
{
struct drm_gem_object * obj = dma_buf - > priv ;
drm_gem_unpin ( obj ) ;
}
EXPORT_SYMBOL ( drm_gem_map_detach ) ;
/**
* drm_gem_map_dma_buf - map_dma_buf implementation for GEM
* @ attach : attachment whose scatterlist is to be returned
* @ dir : direction of DMA transfer
*
2019-06-20 15:46:15 +03:00
* Calls & drm_gem_object_funcs . get_sg_table and then maps the scatterlist . This
* can be used as the & dma_buf_ops . map_dma_buf callback . Should be used together
* with drm_gem_unmap_dma_buf ( ) .
2019-06-18 12:20:37 +03:00
*
2019-06-20 15:46:15 +03:00
* Returns : sg_table containing the scatterlist to be returned ; returns ERR_PTR
2019-06-18 12:20:37 +03:00
* on error . May return - EINTR if it is interrupted by a signal .
*/
struct sg_table * drm_gem_map_dma_buf ( struct dma_buf_attachment * attach ,
enum dma_data_direction dir )
{
struct drm_gem_object * obj = attach - > dmabuf - > priv ;
struct sg_table * sgt ;
2020-05-11 13:27:54 +03:00
int ret ;
2019-06-18 12:20:37 +03:00
if ( WARN_ON ( dir = = DMA_NONE ) )
return ERR_PTR ( - EINVAL ) ;
2020-09-23 13:21:59 +03:00
if ( WARN_ON ( ! obj - > funcs - > get_sg_table ) )
return ERR_PTR ( - ENOSYS ) ;
sgt = obj - > funcs - > get_sg_table ( obj ) ;
if ( IS_ERR ( sgt ) )
return sgt ;
2019-06-18 12:20:37 +03:00
2020-05-11 13:27:54 +03:00
ret = dma_map_sgtable ( attach - > dev , sgt , dir ,
DMA_ATTR_SKIP_CPU_SYNC ) ;
if ( ret ) {
2019-06-18 12:20:37 +03:00
sg_free_table ( sgt ) ;
kfree ( sgt ) ;
2020-05-11 13:27:54 +03:00
sgt = ERR_PTR ( ret ) ;
2019-06-18 12:20:37 +03:00
}
return sgt ;
}
EXPORT_SYMBOL ( drm_gem_map_dma_buf ) ;
/**
* drm_gem_unmap_dma_buf - unmap_dma_buf implementation for GEM
* @ attach : attachment to unmap buffer from
* @ sgt : scatterlist info of the buffer to unmap
* @ dir : direction of DMA transfer
*
* This can be used as the & dma_buf_ops . unmap_dma_buf callback .
*/
void drm_gem_unmap_dma_buf ( struct dma_buf_attachment * attach ,
struct sg_table * sgt ,
enum dma_data_direction dir )
{
if ( ! sgt )
return ;
2020-05-11 13:27:54 +03:00
dma_unmap_sgtable ( attach - > dev , sgt , dir , DMA_ATTR_SKIP_CPU_SYNC ) ;
2019-06-18 12:20:37 +03:00
sg_free_table ( sgt ) ;
kfree ( sgt ) ;
}
EXPORT_SYMBOL ( drm_gem_unmap_dma_buf ) ;
/**
* drm_gem_dmabuf_vmap - dma_buf vmap implementation for GEM
* @ dma_buf : buffer to be mapped
2020-09-25 14:55:59 +03:00
* @ map : the virtual address of the buffer
2019-06-18 12:20:37 +03:00
*
* Sets up a kernel virtual mapping . This can be used as the & dma_buf_ops . vmap
2019-06-20 15:46:15 +03:00
* callback . Calls into & drm_gem_object_funcs . vmap for device specific handling .
2020-11-03 12:30:12 +03:00
* The kernel virtual address is returned in map .
2019-06-18 12:20:37 +03:00
*
2020-11-03 12:30:12 +03:00
* Returns 0 on success or a negative errno code otherwise .
2019-06-18 12:20:37 +03:00
*/
2022-02-04 20:05:41 +03:00
int drm_gem_dmabuf_vmap ( struct dma_buf * dma_buf , struct iosys_map * map )
2019-06-18 12:20:37 +03:00
{
struct drm_gem_object * obj = dma_buf - > priv ;
2020-11-03 12:30:12 +03:00
return drm_gem_vmap ( obj , map ) ;
2019-06-18 12:20:37 +03:00
}
EXPORT_SYMBOL ( drm_gem_dmabuf_vmap ) ;
/**
* drm_gem_dmabuf_vunmap - dma_buf vunmap implementation for GEM
* @ dma_buf : buffer to be unmapped
2020-09-25 14:56:00 +03:00
* @ map : the virtual address of the buffer
2019-06-18 12:20:37 +03:00
*
* Releases a kernel virtual mapping . This can be used as the
2019-06-20 15:46:15 +03:00
* & dma_buf_ops . vunmap callback . Calls into & drm_gem_object_funcs . vunmap for device specific handling .
2019-06-18 12:20:37 +03:00
*/
2022-02-04 20:05:41 +03:00
void drm_gem_dmabuf_vunmap ( struct dma_buf * dma_buf , struct iosys_map * map )
2019-06-18 12:20:37 +03:00
{
struct drm_gem_object * obj = dma_buf - > priv ;
2020-11-03 12:30:12 +03:00
drm_gem_vunmap ( obj , map ) ;
2019-06-18 12:20:37 +03:00
}
EXPORT_SYMBOL ( drm_gem_dmabuf_vunmap ) ;
/**
* drm_gem_prime_mmap - PRIME mmap function for GEM drivers
* @ obj : GEM object
* @ vma : Virtual address range
*
* This function sets up a userspace mapping for PRIME exported buffers using
* the same codepath that is used for regular GEM buffer mapping on the DRM fd .
* The fake GEM offset is added to vma - > vm_pgoff and & drm_driver - > fops - > mmap is
* called to set up the mapping .
*
* Drivers can use this as their & drm_driver . gem_prime_mmap callback .
*/
int drm_gem_prime_mmap ( struct drm_gem_object * obj , struct vm_area_struct * vma )
{
struct drm_file * priv ;
struct file * fil ;
int ret ;
2019-11-27 12:25:22 +03:00
/* Add the fake offset */
vma - > vm_pgoff + = drm_vma_node_start ( & obj - > vma_node ) ;
2019-10-16 14:51:53 +03:00
if ( obj - > funcs & & obj - > funcs - > mmap ) {
2021-01-15 12:30:38 +03:00
vma - > vm_ops = obj - > funcs - > vm_ops ;
2021-09-30 02:00:07 +03:00
drm_gem_object_get ( obj ) ;
2019-10-16 14:51:53 +03:00
ret = obj - > funcs - > mmap ( obj , vma ) ;
2021-09-30 02:00:07 +03:00
if ( ret ) {
drm_gem_object_put ( obj ) ;
2019-10-16 14:51:53 +03:00
return ret ;
2021-09-30 02:00:07 +03:00
}
2019-10-16 14:51:53 +03:00
vma - > vm_private_data = obj ;
return 0 ;
}
2019-06-18 12:20:37 +03:00
priv = kzalloc ( sizeof ( * priv ) , GFP_KERNEL ) ;
fil = kzalloc ( sizeof ( * fil ) , GFP_KERNEL ) ;
if ( ! priv | | ! fil ) {
ret = - ENOMEM ;
goto out ;
}
/* Used by drm_gem_mmap() to lookup the GEM object */
priv - > minor = obj - > dev - > primary ;
fil - > private_data = priv ;
ret = drm_vma_node_allow ( & obj - > vma_node , priv ) ;
if ( ret )
goto out ;
ret = obj - > dev - > driver - > fops - > mmap ( fil , vma ) ;
drm_vma_node_revoke ( & obj - > vma_node , priv ) ;
out :
kfree ( priv ) ;
kfree ( fil ) ;
return ret ;
}
EXPORT_SYMBOL ( drm_gem_prime_mmap ) ;
/**
* drm_gem_dmabuf_mmap - dma_buf mmap implementation for GEM
* @ dma_buf : buffer to be mapped
* @ vma : virtual address range
*
* Provides memory mapping for the buffer . This can be used as the
2019-06-20 15:46:15 +03:00
* & dma_buf_ops . mmap callback . It just forwards to & drm_driver . gem_prime_mmap ,
* which should be set to drm_gem_prime_mmap ( ) .
*
* FIXME : There ' s really no point to this wrapper , drivers which need anything
* else but drm_gem_prime_mmap can roll their own & dma_buf_ops . mmap callback .
2019-06-18 12:20:37 +03:00
*
* Returns 0 on success or a negative error code on failure .
*/
int drm_gem_dmabuf_mmap ( struct dma_buf * dma_buf , struct vm_area_struct * vma )
{
struct drm_gem_object * obj = dma_buf - > priv ;
struct drm_device * dev = obj - > dev ;
if ( ! dev - > driver - > gem_prime_mmap )
return - ENOSYS ;
return dev - > driver - > gem_prime_mmap ( obj , vma ) ;
2011-11-25 19:21:02 +04:00
}
2019-06-18 12:20:37 +03:00
EXPORT_SYMBOL ( drm_gem_dmabuf_mmap ) ;
static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
. cache_sgt_mapping = true ,
. attach = drm_gem_map_attach ,
. detach = drm_gem_map_detach ,
. map_dma_buf = drm_gem_map_dma_buf ,
. unmap_dma_buf = drm_gem_unmap_dma_buf ,
. release = drm_gem_dmabuf_release ,
. mmap = drm_gem_dmabuf_mmap ,
. vmap = drm_gem_dmabuf_vmap ,
. vunmap = drm_gem_dmabuf_vunmap ,
} ;
2011-11-25 19:21:02 +04:00
2018-11-10 17:56:44 +03:00
/**
2019-06-18 12:20:37 +03:00
* drm_prime_pages_to_sg - converts a page array into an sg list
2020-10-27 12:51:29 +03:00
* @ dev : DRM device
2019-06-18 12:20:37 +03:00
* @ pages : pointer to the array of page pointers to convert
* @ nr_pages : length of the page vector
2018-11-10 17:56:44 +03:00
*
2019-06-18 12:20:37 +03:00
* This helper creates an sg table object from a set of pages
* the driver is responsible for mapping the pages into the
* importers address space for use with dma_buf itself .
2019-06-20 15:46:15 +03:00
*
* This is useful for implementing & drm_gem_object_funcs . get_sg_table .
2018-11-10 17:56:44 +03:00
*/
2020-09-07 14:24:25 +03:00
struct sg_table * drm_prime_pages_to_sg ( struct drm_device * dev ,
struct page * * pages , unsigned int nr_pages )
2018-11-10 17:56:44 +03:00
{
RDMA 5.10 pull request
The typical set of driver updates across the subsystem:
- Driver minor changes and bug fixes for mlx5, efa, rxe, vmw_pvrdma, hns,
usnic, qib, qedr, cxgb4, hns, bnxt_re
- Various rtrs fixes and updates
- Bug fix for mlx4 CM emulation for virtualization scenarios where MRA
wasn't working right
- Use tracepoints instead of pr_debug in the CM code
- Scrub the locking in ucma and cma to close more syzkaller bugs
- Use tasklet_setup in the subsystem
- Revert the idea that 'destroy' operations are not allowed to fail at
the driver level. This proved unworkable from a HW perspective.
- Revise how the umem API works so drivers make fewer mistakes using it
- XRC support for qedr
- Convert uverbs objects RWQ and MW to new the allocation scheme
- Large queue entry sizes for hns
- Use hmm_range_fault() for mlx5 On Demand Paging
- uverbs APIs to inspect the GID table instead of sysfs
- Move some of the RDMA code for building large page SGLs into
lib/scatterlist
-----BEGIN PGP SIGNATURE-----
iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAl+J37MACgkQOG33FX4g
mxrKfRAAnIecwdE8df0yvVU5k0Eg6qVjMy9MMHq4va9m7g6GpUcNNI0nIlOASxH2
l+9vnUQS3ebgsPeECaDYzEr0hh/u53+xw2g4WV5ts/hE8KkQ6erruXb9kasCe8yi
5QWJ9K36T3c03Cd3EeH6JVtytAxuH42ombfo9BkFLPVyfG/R2tsAzvm5pVi73lxk
46wtU1Bqi4tsLhyCbifn1huNFGbHp08OIBPAIKPUKCA+iBRPaWS+Dpi+93h3g3Bp
oJwDhL9CBCGcHM+rKWLzek3Dy87FnQn7R1wmTpUFwkK+4AH3U/XazivhX035w1vL
YJyhakVU0kosHlX9hJTNKDHJGkt0YEV2mS8dxAuqilFBtdnrVszb5/MirvlzC310
/b5xCPSEusv9UVZV0G4zbySVNA9knZ4YaRiR3VDVMLKl/pJgTOwEiHIIx+vs3ejk
p8GRWa1SjXw5LfZEQcq39J689ljt6xjCTonyuBSv7vSQq5v8pjBxvHxiAe2FIa2a
ZyZeSCYoSh0SwJQukO2VO7aprhHP3TcCJ/987+X03LQ8tV2VWPktHqm62YCaDcOl
fgiQuQdPivRjDDkJgMfDWDGKfZeHoWLKl5XsJhWByt0lablVrsvc+8ylUl1UI7gI
16hWB/Qtlhfwg10VdApn+aOFpIS+s5P4XIp8ik57MZO+VeJzpmE=
=LKpl
-----END PGP SIGNATURE-----
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe:
"A usual cycle for RDMA with a typical mix of driver and core subsystem
updates:
- Driver minor changes and bug fixes for mlx5, efa, rxe, vmw_pvrdma,
hns, usnic, qib, qedr, cxgb4, hns, bnxt_re
- Various rtrs fixes and updates
- Bug fix for mlx4 CM emulation for virtualization scenarios where
MRA wasn't working right
- Use tracepoints instead of pr_debug in the CM code
- Scrub the locking in ucma and cma to close more syzkaller bugs
- Use tasklet_setup in the subsystem
- Revert the idea that 'destroy' operations are not allowed to fail
at the driver level. This proved unworkable from a HW perspective.
- Revise how the umem API works so drivers make fewer mistakes using
it
- XRC support for qedr
- Convert uverbs objects RWQ and MW to new the allocation scheme
- Large queue entry sizes for hns
- Use hmm_range_fault() for mlx5 On Demand Paging
- uverbs APIs to inspect the GID table instead of sysfs
- Move some of the RDMA code for building large page SGLs into
lib/scatterlist"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (191 commits)
RDMA/ucma: Fix use after free in destroy id flow
RDMA/rxe: Handle skb_clone() failure in rxe_recv.c
RDMA/rxe: Move the definitions for rxe_av.network_type to uAPI
RDMA: Explicitly pass in the dma_device to ib_register_device
lib/scatterlist: Do not limit max_segment to PAGE_ALIGNED values
IB/mlx4: Convert rej_tmout radix-tree to XArray
RDMA/rxe: Fix bug rejecting all multicast packets
RDMA/rxe: Fix skb lifetime in rxe_rcv_mcast_pkt()
RDMA/rxe: Remove duplicate entries in struct rxe_mr
IB/hfi,rdmavt,qib,opa_vnic: Update MAINTAINERS
IB/rdmavt: Fix sizeof mismatch
MAINTAINERS: CISCO VIC LOW LATENCY NIC DRIVER
RDMA/bnxt_re: Fix sizeof mismatch for allocation of pbl_tbl.
RDMA/bnxt_re: Use rdma_umem_for_each_dma_block()
RDMA/umem: Move to allocate SG table from pages
lib/scatterlist: Add support in dynamic allocation of SG table from pages
tools/testing/scatterlist: Show errors in human readable form
tools/testing/scatterlist: Rejuvenate bit-rotten test
RDMA/ipoib: Set rtnl_link_ops for ipoib interfaces
RDMA/uverbs: Expose the new GID query API to user space
...
2020-10-17 21:18:18 +03:00
struct sg_table * sg ;
2020-09-07 14:24:25 +03:00
size_t max_segment = 0 ;
2021-08-24 17:25:29 +03:00
int err ;
2018-11-10 17:56:44 +03:00
2019-06-18 12:20:37 +03:00
sg = kmalloc ( sizeof ( struct sg_table ) , GFP_KERNEL ) ;
RDMA 5.10 pull request
The typical set of driver updates across the subsystem:
- Driver minor changes and bug fixes for mlx5, efa, rxe, vmw_pvrdma, hns,
usnic, qib, qedr, cxgb4, hns, bnxt_re
- Various rtrs fixes and updates
- Bug fix for mlx4 CM emulation for virtualization scenarios where MRA
wasn't working right
- Use tracepoints instead of pr_debug in the CM code
- Scrub the locking in ucma and cma to close more syzkaller bugs
- Use tasklet_setup in the subsystem
- Revert the idea that 'destroy' operations are not allowed to fail at
the driver level. This proved unworkable from a HW perspective.
- Revise how the umem API works so drivers make fewer mistakes using it
- XRC support for qedr
- Convert uverbs objects RWQ and MW to new the allocation scheme
- Large queue entry sizes for hns
- Use hmm_range_fault() for mlx5 On Demand Paging
- uverbs APIs to inspect the GID table instead of sysfs
- Move some of the RDMA code for building large page SGLs into
lib/scatterlist
-----BEGIN PGP SIGNATURE-----
iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAl+J37MACgkQOG33FX4g
mxrKfRAAnIecwdE8df0yvVU5k0Eg6qVjMy9MMHq4va9m7g6GpUcNNI0nIlOASxH2
l+9vnUQS3ebgsPeECaDYzEr0hh/u53+xw2g4WV5ts/hE8KkQ6erruXb9kasCe8yi
5QWJ9K36T3c03Cd3EeH6JVtytAxuH42ombfo9BkFLPVyfG/R2tsAzvm5pVi73lxk
46wtU1Bqi4tsLhyCbifn1huNFGbHp08OIBPAIKPUKCA+iBRPaWS+Dpi+93h3g3Bp
oJwDhL9CBCGcHM+rKWLzek3Dy87FnQn7R1wmTpUFwkK+4AH3U/XazivhX035w1vL
YJyhakVU0kosHlX9hJTNKDHJGkt0YEV2mS8dxAuqilFBtdnrVszb5/MirvlzC310
/b5xCPSEusv9UVZV0G4zbySVNA9knZ4YaRiR3VDVMLKl/pJgTOwEiHIIx+vs3ejk
p8GRWa1SjXw5LfZEQcq39J689ljt6xjCTonyuBSv7vSQq5v8pjBxvHxiAe2FIa2a
ZyZeSCYoSh0SwJQukO2VO7aprhHP3TcCJ/987+X03LQ8tV2VWPktHqm62YCaDcOl
fgiQuQdPivRjDDkJgMfDWDGKfZeHoWLKl5XsJhWByt0lablVrsvc+8ylUl1UI7gI
16hWB/Qtlhfwg10VdApn+aOFpIS+s5P4XIp8ik57MZO+VeJzpmE=
=LKpl
-----END PGP SIGNATURE-----
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe:
"A usual cycle for RDMA with a typical mix of driver and core subsystem
updates:
- Driver minor changes and bug fixes for mlx5, efa, rxe, vmw_pvrdma,
hns, usnic, qib, qedr, cxgb4, hns, bnxt_re
- Various rtrs fixes and updates
- Bug fix for mlx4 CM emulation for virtualization scenarios where
MRA wasn't working right
- Use tracepoints instead of pr_debug in the CM code
- Scrub the locking in ucma and cma to close more syzkaller bugs
- Use tasklet_setup in the subsystem
- Revert the idea that 'destroy' operations are not allowed to fail
at the driver level. This proved unworkable from a HW perspective.
- Revise how the umem API works so drivers make fewer mistakes using
it
- XRC support for qedr
- Convert uverbs objects RWQ and MW to new the allocation scheme
- Large queue entry sizes for hns
- Use hmm_range_fault() for mlx5 On Demand Paging
- uverbs APIs to inspect the GID table instead of sysfs
- Move some of the RDMA code for building large page SGLs into
lib/scatterlist"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (191 commits)
RDMA/ucma: Fix use after free in destroy id flow
RDMA/rxe: Handle skb_clone() failure in rxe_recv.c
RDMA/rxe: Move the definitions for rxe_av.network_type to uAPI
RDMA: Explicitly pass in the dma_device to ib_register_device
lib/scatterlist: Do not limit max_segment to PAGE_ALIGNED values
IB/mlx4: Convert rej_tmout radix-tree to XArray
RDMA/rxe: Fix bug rejecting all multicast packets
RDMA/rxe: Fix skb lifetime in rxe_rcv_mcast_pkt()
RDMA/rxe: Remove duplicate entries in struct rxe_mr
IB/hfi,rdmavt,qib,opa_vnic: Update MAINTAINERS
IB/rdmavt: Fix sizeof mismatch
MAINTAINERS: CISCO VIC LOW LATENCY NIC DRIVER
RDMA/bnxt_re: Fix sizeof mismatch for allocation of pbl_tbl.
RDMA/bnxt_re: Use rdma_umem_for_each_dma_block()
RDMA/umem: Move to allocate SG table from pages
lib/scatterlist: Add support in dynamic allocation of SG table from pages
tools/testing/scatterlist: Show errors in human readable form
tools/testing/scatterlist: Rejuvenate bit-rotten test
RDMA/ipoib: Set rtnl_link_ops for ipoib interfaces
RDMA/uverbs: Expose the new GID query API to user space
...
2020-10-17 21:18:18 +03:00
if ( ! sg )
return ERR_PTR ( - ENOMEM ) ;
2018-11-21 21:02:15 +03:00
2020-09-07 14:24:25 +03:00
if ( dev )
max_segment = dma_max_mapping_size ( dev - > dev ) ;
2020-10-28 22:15:26 +03:00
if ( max_segment = = 0 )
max_segment = UINT_MAX ;
2021-08-24 17:25:29 +03:00
err = sg_alloc_table_from_pages_segment ( sg , pages , nr_pages , 0 ,
nr_pages < < PAGE_SHIFT ,
max_segment , GFP_KERNEL ) ;
if ( err ) {
RDMA 5.10 pull request
The typical set of driver updates across the subsystem:
- Driver minor changes and bug fixes for mlx5, efa, rxe, vmw_pvrdma, hns,
usnic, qib, qedr, cxgb4, hns, bnxt_re
- Various rtrs fixes and updates
- Bug fix for mlx4 CM emulation for virtualization scenarios where MRA
wasn't working right
- Use tracepoints instead of pr_debug in the CM code
- Scrub the locking in ucma and cma to close more syzkaller bugs
- Use tasklet_setup in the subsystem
- Revert the idea that 'destroy' operations are not allowed to fail at
the driver level. This proved unworkable from a HW perspective.
- Revise how the umem API works so drivers make fewer mistakes using it
- XRC support for qedr
- Convert uverbs objects RWQ and MW to new the allocation scheme
- Large queue entry sizes for hns
- Use hmm_range_fault() for mlx5 On Demand Paging
- uverbs APIs to inspect the GID table instead of sysfs
- Move some of the RDMA code for building large page SGLs into
lib/scatterlist
-----BEGIN PGP SIGNATURE-----
iQIzBAABCgAdFiEEfB7FMLh+8QxL+6i3OG33FX4gmxoFAl+J37MACgkQOG33FX4g
mxrKfRAAnIecwdE8df0yvVU5k0Eg6qVjMy9MMHq4va9m7g6GpUcNNI0nIlOASxH2
l+9vnUQS3ebgsPeECaDYzEr0hh/u53+xw2g4WV5ts/hE8KkQ6erruXb9kasCe8yi
5QWJ9K36T3c03Cd3EeH6JVtytAxuH42ombfo9BkFLPVyfG/R2tsAzvm5pVi73lxk
46wtU1Bqi4tsLhyCbifn1huNFGbHp08OIBPAIKPUKCA+iBRPaWS+Dpi+93h3g3Bp
oJwDhL9CBCGcHM+rKWLzek3Dy87FnQn7R1wmTpUFwkK+4AH3U/XazivhX035w1vL
YJyhakVU0kosHlX9hJTNKDHJGkt0YEV2mS8dxAuqilFBtdnrVszb5/MirvlzC310
/b5xCPSEusv9UVZV0G4zbySVNA9knZ4YaRiR3VDVMLKl/pJgTOwEiHIIx+vs3ejk
p8GRWa1SjXw5LfZEQcq39J689ljt6xjCTonyuBSv7vSQq5v8pjBxvHxiAe2FIa2a
ZyZeSCYoSh0SwJQukO2VO7aprhHP3TcCJ/987+X03LQ8tV2VWPktHqm62YCaDcOl
fgiQuQdPivRjDDkJgMfDWDGKfZeHoWLKl5XsJhWByt0lablVrsvc+8ylUl1UI7gI
16hWB/Qtlhfwg10VdApn+aOFpIS+s5P4XIp8ik57MZO+VeJzpmE=
=LKpl
-----END PGP SIGNATURE-----
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe:
"A usual cycle for RDMA with a typical mix of driver and core subsystem
updates:
- Driver minor changes and bug fixes for mlx5, efa, rxe, vmw_pvrdma,
hns, usnic, qib, qedr, cxgb4, hns, bnxt_re
- Various rtrs fixes and updates
- Bug fix for mlx4 CM emulation for virtualization scenarios where
MRA wasn't working right
- Use tracepoints instead of pr_debug in the CM code
- Scrub the locking in ucma and cma to close more syzkaller bugs
- Use tasklet_setup in the subsystem
- Revert the idea that 'destroy' operations are not allowed to fail
at the driver level. This proved unworkable from a HW perspective.
- Revise how the umem API works so drivers make fewer mistakes using
it
- XRC support for qedr
- Convert uverbs objects RWQ and MW to new the allocation scheme
- Large queue entry sizes for hns
- Use hmm_range_fault() for mlx5 On Demand Paging
- uverbs APIs to inspect the GID table instead of sysfs
- Move some of the RDMA code for building large page SGLs into
lib/scatterlist"
* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (191 commits)
RDMA/ucma: Fix use after free in destroy id flow
RDMA/rxe: Handle skb_clone() failure in rxe_recv.c
RDMA/rxe: Move the definitions for rxe_av.network_type to uAPI
RDMA: Explicitly pass in the dma_device to ib_register_device
lib/scatterlist: Do not limit max_segment to PAGE_ALIGNED values
IB/mlx4: Convert rej_tmout radix-tree to XArray
RDMA/rxe: Fix bug rejecting all multicast packets
RDMA/rxe: Fix skb lifetime in rxe_rcv_mcast_pkt()
RDMA/rxe: Remove duplicate entries in struct rxe_mr
IB/hfi,rdmavt,qib,opa_vnic: Update MAINTAINERS
IB/rdmavt: Fix sizeof mismatch
MAINTAINERS: CISCO VIC LOW LATENCY NIC DRIVER
RDMA/bnxt_re: Fix sizeof mismatch for allocation of pbl_tbl.
RDMA/bnxt_re: Use rdma_umem_for_each_dma_block()
RDMA/umem: Move to allocate SG table from pages
lib/scatterlist: Add support in dynamic allocation of SG table from pages
tools/testing/scatterlist: Show errors in human readable form
tools/testing/scatterlist: Rejuvenate bit-rotten test
RDMA/ipoib: Set rtnl_link_ops for ipoib interfaces
RDMA/uverbs: Expose the new GID query API to user space
...
2020-10-17 21:18:18 +03:00
kfree ( sg ) ;
2021-08-24 17:25:29 +03:00
sg = ERR_PTR ( err ) ;
2018-11-21 21:02:15 +03:00
}
2019-06-18 12:20:37 +03:00
return sg ;
}
EXPORT_SYMBOL ( drm_prime_pages_to_sg ) ;
2018-11-10 17:56:44 +03:00
2020-05-08 17:04:44 +03:00
/**
* drm_prime_get_contiguous_size - returns the contiguous size of the buffer
* @ sgt : sg_table describing the buffer to check
*
* This helper calculates the contiguous size in the DMA address space
* of the the buffer described by the provided sg_table .
*
* This is useful for implementing
* & drm_gem_object_funcs . gem_prime_import_sg_table .
*/
unsigned long drm_prime_get_contiguous_size ( struct sg_table * sgt )
{
dma_addr_t expected = sg_dma_address ( sgt - > sgl ) ;
struct scatterlist * sg ;
unsigned long size = 0 ;
int i ;
for_each_sgtable_dma_sg ( sgt , sg , i ) {
unsigned int len = sg_dma_len ( sg ) ;
if ( ! len )
break ;
if ( sg_dma_address ( sg ) ! = expected )
break ;
expected + = len ;
size + = len ;
}
return size ;
}
EXPORT_SYMBOL ( drm_prime_get_contiguous_size ) ;
2019-06-18 12:20:37 +03:00
/**
* drm_gem_prime_export - helper library implementation of the export callback
* @ obj : GEM object to export
* @ flags : flags like DRM_CLOEXEC and DRM_RDWR
*
2019-06-20 15:46:15 +03:00
* This is the implementation of the & drm_gem_object_funcs . export functions for GEM drivers
* using the PRIME helpers . It is used as the default in
* drm_gem_prime_handle_to_fd ( ) .
2019-06-18 12:20:37 +03:00
*/
2019-06-14 23:35:25 +03:00
struct dma_buf * drm_gem_prime_export ( struct drm_gem_object * obj ,
2019-06-18 12:20:37 +03:00
int flags )
{
2019-06-14 23:35:25 +03:00
struct drm_device * dev = obj - > dev ;
2019-06-18 12:20:37 +03:00
struct dma_buf_export_info exp_info = {
. exp_name = KBUILD_MODNAME , /* white lie for debug */
. owner = dev - > driver - > fops - > owner ,
. ops = & drm_gem_prime_dmabuf_ops ,
. size = obj - > size ,
. flags = flags ,
. priv = obj ,
. resv = obj - > resv ,
} ;
2018-11-10 17:56:44 +03:00
2019-06-18 12:20:37 +03:00
return drm_gem_dmabuf_export ( dev , & exp_info ) ;
2018-11-10 17:56:44 +03:00
}
2019-06-18 12:20:37 +03:00
EXPORT_SYMBOL ( drm_gem_prime_export ) ;
2018-11-10 17:56:44 +03:00
2014-01-22 22:16:30 +04:00
/**
2017-05-04 21:45:47 +03:00
* drm_gem_prime_import_dev - core implementation of the import callback
2014-01-22 22:16:30 +04:00
* @ dev : drm_device to import into
* @ dma_buf : dma - buf object to import
2017-05-04 21:45:47 +03:00
* @ attach_dev : struct device to dma_buf attach
2014-01-22 22:16:30 +04:00
*
2019-06-20 15:46:15 +03:00
* This is the core of drm_gem_prime_import ( ) . It ' s designed to be called by
* drivers who want to use a different device structure than & drm_device . dev for
* attaching via dma_buf . This function calls
* & drm_driver . gem_prime_import_sg_table internally .
*
* Drivers must arrange to call drm_prime_gem_destroy ( ) from their
* & drm_gem_object_funcs . free hook when using this function .
2014-01-22 22:16:30 +04:00
*/
2017-05-04 21:45:47 +03:00
struct drm_gem_object * drm_gem_prime_import_dev ( struct drm_device * dev ,
struct dma_buf * dma_buf ,
struct device * attach_dev )
2013-01-16 00:47:42 +04:00
{
struct dma_buf_attachment * attach ;
struct sg_table * sgt ;
struct drm_gem_object * obj ;
int ret ;
if ( dma_buf - > ops = = & drm_gem_prime_dmabuf_ops ) {
obj = dma_buf - > priv ;
if ( obj - > dev = = dev ) {
/*
* Importing dmabuf exported from out own gem increases
* refcount on gem itself instead of f_count of dmabuf .
*/
2017-02-28 17:46:41 +03:00
drm_gem_object_get ( obj ) ;
2013-01-16 00:47:42 +04:00
return obj ;
}
}
2015-05-08 11:13:45 +03:00
if ( ! dev - > driver - > gem_prime_import_sg_table )
return ERR_PTR ( - EINVAL ) ;
2017-05-04 21:45:47 +03:00
attach = dma_buf_attach ( dma_buf , attach_dev ) ;
2013-01-16 00:47:42 +04:00
if ( IS_ERR ( attach ) )
2013-06-01 14:09:27 +04:00
return ERR_CAST ( attach ) ;
2013-01-16 00:47:42 +04:00
2013-04-19 05:11:56 +04:00
get_dma_buf ( dma_buf ) ;
2013-01-16 00:47:42 +04:00
sgt = dma_buf_map_attachment ( attach , DMA_BIDIRECTIONAL ) ;
2013-12-21 04:43:50 +04:00
if ( IS_ERR ( sgt ) ) {
2013-01-16 00:47:42 +04:00
ret = PTR_ERR ( sgt ) ;
goto fail_detach ;
}
2014-01-09 14:03:14 +04:00
obj = dev - > driver - > gem_prime_import_sg_table ( dev , attach , sgt ) ;
2013-01-16 00:47:42 +04:00
if ( IS_ERR ( obj ) ) {
ret = PTR_ERR ( obj ) ;
goto fail_unmap ;
}
obj - > import_attach = attach ;
2019-06-14 23:35:57 +03:00
obj - > resv = dma_buf - > resv ;
2013-01-16 00:47:42 +04:00
return obj ;
fail_unmap :
dma_buf_unmap_attachment ( attach , sgt , DMA_BIDIRECTIONAL ) ;
fail_detach :
dma_buf_detach ( dma_buf , attach ) ;
2013-04-19 05:11:56 +04:00
dma_buf_put ( dma_buf ) ;
2013-01-16 00:47:42 +04:00
return ERR_PTR ( ret ) ;
}
2017-05-04 21:45:47 +03:00
EXPORT_SYMBOL ( drm_gem_prime_import_dev ) ;
/**
* drm_gem_prime_import - helper library implementation of the import callback
* @ dev : drm_device to import into
* @ dma_buf : dma - buf object to import
*
* This is the implementation of the gem_prime_import functions for GEM drivers
2019-06-20 15:46:15 +03:00
* using the PRIME helpers . Drivers can use this as their
* & drm_driver . gem_prime_import implementation . It is used as the default
* implementation in drm_gem_prime_fd_to_handle ( ) .
*
* Drivers must arrange to call drm_prime_gem_destroy ( ) from their
* & drm_gem_object_funcs . free hook when using this function .
2017-05-04 21:45:47 +03:00
*/
struct drm_gem_object * drm_gem_prime_import ( struct drm_device * dev ,
struct dma_buf * dma_buf )
{
return drm_gem_prime_import_dev ( dev , dma_buf , dev - > dev ) ;
}
2013-01-16 00:47:42 +04:00
EXPORT_SYMBOL ( drm_gem_prime_import ) ;
2014-01-22 22:16:30 +04:00
/**
2020-10-08 13:57:32 +03:00
* drm_prime_sg_to_page_array - convert an sg table into a page array
2014-01-22 22:16:30 +04:00
* @ sgt : scatter - gather table to convert
2020-10-08 13:57:32 +03:00
* @ pages : array of page pointers to store the pages in
* @ max_entries : size of the passed - in array
*
* Exports an sg table into an array of pages .
*
* This function is deprecated and strongly discouraged to be used .
* The page array is only useful for page faults and those can corrupt fields
* in the struct page if they are not handled by the exporting driver .
*/
int __deprecated drm_prime_sg_to_page_array ( struct sg_table * sgt ,
struct page * * pages ,
int max_entries )
{
struct sg_page_iter page_iter ;
struct page * * p = pages ;
for_each_sgtable_page ( sgt , & page_iter , 0 ) {
if ( WARN_ON ( p - pages > = max_entries ) )
return - 1 ;
* p + + = sg_page_iter_page ( & page_iter ) ;
}
return 0 ;
}
EXPORT_SYMBOL ( drm_prime_sg_to_page_array ) ;
/**
* drm_prime_sg_to_dma_addr_array - convert an sg table into a dma addr array
* @ sgt : scatter - gather table to convert
* @ addrs : array to store the dma bus address of each page
2018-02-27 14:49:57 +03:00
* @ max_entries : size of both the passed - in arrays
2014-01-22 22:16:30 +04:00
*
2020-10-08 13:57:32 +03:00
* Exports an sg table into an array of addresses .
2019-06-20 15:46:15 +03:00
*
2020-10-08 13:57:32 +03:00
* Drivers should use this in their & drm_driver . gem_prime_import_sg_table
2019-06-20 15:46:15 +03:00
* implementation .
2014-01-22 22:16:30 +04:00
*/
2020-10-08 13:57:32 +03:00
int drm_prime_sg_to_dma_addr_array ( struct sg_table * sgt , dma_addr_t * addrs ,
int max_entries )
2012-05-18 18:40:33 +04:00
{
2020-05-08 17:05:14 +03:00
struct sg_dma_page_iter dma_iter ;
dma_addr_t * a = addrs ;
2020-10-08 13:57:32 +03:00
for_each_sgtable_dma_page ( sgt , & dma_iter , 0 ) {
if ( WARN_ON ( a - addrs > = max_entries ) )
return - 1 ;
* a + + = sg_page_iter_dma_address ( & dma_iter ) ;
2012-05-18 18:40:33 +04:00
}
return 0 ;
}
2020-10-08 13:57:32 +03:00
EXPORT_SYMBOL ( drm_prime_sg_to_dma_addr_array ) ;
2014-01-22 22:16:30 +04:00
/**
* drm_prime_gem_destroy - helper to clean up a PRIME - imported GEM object
* @ obj : GEM object which was created from a dma - buf
* @ sg : the sg - table which was pinned at import time
*
* This is the cleanup functions which GEM drivers need to call when they use
2019-06-20 15:46:15 +03:00
* drm_gem_prime_import ( ) or drm_gem_prime_import_dev ( ) to import dma - bufs .
2014-01-22 22:16:30 +04:00
*/
2011-11-25 19:21:02 +04:00
void drm_prime_gem_destroy ( struct drm_gem_object * obj , struct sg_table * sg )
{
struct dma_buf_attachment * attach ;
struct dma_buf * dma_buf ;
2020-07-02 16:23:32 +03:00
2011-11-25 19:21:02 +04:00
attach = obj - > import_attach ;
if ( sg )
dma_buf_unmap_attachment ( attach , sg , DMA_BIDIRECTIONAL ) ;
dma_buf = attach - > dmabuf ;
dma_buf_detach ( attach - > dmabuf , attach ) ;
/* remove the reference */
dma_buf_put ( dma_buf ) ;
}
EXPORT_SYMBOL ( drm_prime_gem_destroy ) ;