2016-11-11 12:43:54 +02:00
/*
* Copyright © 2016 Intel Corporation
*
* Permission is hereby granted , free of charge , to any person obtaining a
* copy of this software and associated documentation files ( the " Software " ) ,
* to deal in the Software without restriction , including without limitation
* the rights to use , copy , modify , merge , publish , distribute , sublicense ,
* and / or sell copies of the Software , and to permit persons to whom the
* Software is furnished to do so , subject to the following conditions :
*
* The above copyright notice and this permission notice ( including the next
* paragraph ) shall be included in all copies or substantial portions of the
* Software .
*
* THE SOFTWARE IS PROVIDED " AS IS " , WITHOUT WARRANTY OF ANY KIND , EXPRESS OR
* IMPLIED , INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY ,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT . IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM , DAMAGES OR OTHER
* LIABILITY , WHETHER IN AN ACTION OF CONTRACT , TORT OR OTHERWISE , ARISING
* FROM , OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE .
*
*/
# ifndef __I915_GEM_OBJECT_H__
# define __I915_GEM_OBJECT_H__
# include <linux/reservation.h>
# include <drm/drm_vma_manager.h>
# include <drm/drm_gem.h>
# include <drm/drmP.h>
# include <drm/i915_drm.h>
2017-08-11 12:11:16 +01:00
# include "i915_gem_request.h"
2017-02-13 17:15:39 +00:00
# include "i915_selftest.h"
2017-08-11 12:11:16 +01:00
struct drm_i915_gem_object ;
2017-08-16 09:52:08 +01:00
/*
* struct i915_lut_handle tracks the fast lookups from handle to vma used
* for execbuf . Although we use a radixtree for that mapping , in order to
* remove them as the object or context is closed , we need a secondary list
* and a translation entry ( i915_lut_handle ) .
*/
struct i915_lut_handle {
struct list_head obj_link ;
struct list_head ctx_link ;
struct i915_gem_context * ctx ;
u32 handle ;
} ;
2016-11-11 12:43:54 +02:00
struct drm_i915_gem_object_ops {
unsigned int flags ;
2017-05-23 11:31:16 +01:00
# define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0)
# define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1)
2016-11-11 12:43:54 +02:00
/* Interface between the GEM object and its backing storage.
* get_pages ( ) is called once prior to the use of the associated set
* of pages before to binding them into the GTT , and put_pages ( ) is
* called after we no longer need them . As we expect there to be
* associated cost with migrating pages between the backing storage
* and making them available for the GPU ( e . g . clflush ) , we may hold
* onto the pages after they are no longer referenced by the GPU
* in case they may be used again shortly ( for example migrating the
* pages to a different memory domain within the GTT ) . put_pages ( )
* will therefore most likely be called when the object itself is
* being released or under memory pressure ( where we attempt to
* reap pages for the shrinker ) .
*/
2017-10-06 23:18:17 +01:00
int ( * get_pages ) ( struct drm_i915_gem_object * ) ;
2016-11-11 12:43:54 +02:00
void ( * put_pages ) ( struct drm_i915_gem_object * , struct sg_table * ) ;
2017-03-07 12:03:38 +00:00
int ( * pwrite ) ( struct drm_i915_gem_object * ,
const struct drm_i915_gem_pwrite * ) ;
2016-11-11 12:43:54 +02:00
int ( * dmabuf_export ) ( struct drm_i915_gem_object * ) ;
void ( * release ) ( struct drm_i915_gem_object * ) ;
} ;
struct drm_i915_gem_object {
struct drm_gem_object base ;
const struct drm_i915_gem_object_ops * ops ;
2017-05-25 21:48:18 +01:00
/**
* @ vma_list : List of VMAs backed by this object
*
* The VMA on this list are ordered by type , all GGTT vma are placed
* at the head and all ppGTT vma are placed at the tail . The different
* types of GGTT vma are unordered between themselves , use the
* @ vma_tree ( which has a defined order between all VMA ) to find an
* exact match .
*/
2016-11-11 12:43:54 +02:00
struct list_head vma_list ;
2017-05-25 21:48:18 +01:00
/**
* @ vma_tree : Ordered tree of VMAs backed by this object
*
* All VMA created for this object are placed in the @ vma_tree for
* fast retrieval via a binary search in i915_vma_instance ( ) .
* They are also added to @ vma_list for easy iteration .
*/
2016-11-11 12:43:54 +02:00
struct rb_root vma_tree ;
2017-08-16 09:52:08 +01:00
/**
* @ lut_list : List of vma lookup entries in use for this object .
*
* If this object is closed , we need to remove all of its VMA from
* the fast lookup index in associated contexts ; @ lut_list provides
* this translation from object to context - > handles_vma .
*/
struct list_head lut_list ;
2016-11-11 12:43:54 +02:00
/** Stolen memory for this object, instead of being backed by shmem. */
struct drm_mm_node * stolen ;
struct list_head global_link ;
union {
struct rcu_head rcu ;
struct llist_node freed ;
} ;
/**
* Whether the object is currently in the GGTT mmap .
*/
struct list_head userfault_link ;
struct list_head batch_pool_link ;
2017-02-13 17:15:39 +00:00
I915_SELFTEST_DECLARE ( struct list_head st_link ) ;
2016-11-11 12:43:54 +02:00
unsigned long flags ;
/**
* Have we taken a reference for the object for incomplete GPU
* activity ?
*/
# define I915_BO_ACTIVE_REF 0
/*
* Is the object to be mapped as read - only to the GPU
* Only honoured if hardware has relevant pte bit
*/
unsigned long gt_ro : 1 ;
unsigned int cache_level : 3 ;
2017-08-11 12:11:16 +01:00
unsigned int cache_coherent : 2 ;
# define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
# define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1)
2016-11-11 12:43:54 +02:00
unsigned int cache_dirty : 1 ;
atomic_t frontbuffer_bits ;
unsigned int frontbuffer_ggtt_origin ; /* write once */
2016-11-16 19:07:04 +00:00
struct i915_gem_active frontbuffer_write ;
2016-11-11 12:43:54 +02:00
/** Current tiling stride for the object, if it's tiled. */
unsigned int tiling_and_stride ;
# define FENCE_MINIMUM_STRIDE 128 /* See i915_tiling_ok() */
# define TILING_MASK (FENCE_MINIMUM_STRIDE-1)
# define STRIDE_MASK (~TILING_MASK)
/** Count of VMA actually bound by this object */
unsigned int bind_count ;
unsigned int active_count ;
unsigned int pin_display ;
struct {
struct mutex lock ; /* protects the pages and their use */
atomic_t pages_pin_count ;
struct sg_table * pages ;
void * mapping ;
2017-10-06 23:18:18 +01:00
struct i915_page_sizes {
/**
* The sg mask of the pages sg_table . i . e the mask of
* of the lengths for each sg entry .
*/
unsigned int phys ;
/**
* The gtt page sizes we are allowed to use given the
* sg mask and the supported page sizes . This will
* express the smallest unit we can use for the whole
* object , as well as the larger sizes we may be able
* to use opportunistically .
*/
unsigned int sg ;
} page_sizes ;
2016-11-11 12:43:54 +02:00
struct i915_gem_object_page_iter {
struct scatterlist * sg_pos ;
unsigned int sg_idx ; /* in pages, but 32bit eek! */
struct radix_tree_root radix ;
struct mutex lock ; /* protects this cache */
} get_page ;
/**
* Advice : are the backing pages purgeable ?
*/
unsigned int madv : 2 ;
/**
* This is set if the object has been written to since the
* pages were last acquired .
*/
bool dirty : 1 ;
/**
* This is set if the object has been pinned due to unknown
* swizzling .
*/
bool quirked : 1 ;
} mm ;
/** Breadcrumb of last rendering to the buffer.
* There can only be one writer , but we allow for multiple readers .
* If there is a writer that necessarily implies that all other
* read requests are complete - but we may only be lazily clearing
* the read requests . A read request is naturally the most recent
* request on a ring , so we may have two different write and read
* requests on one ring where the write request is older than the
* read request . This allows for the CPU to read from an active
* buffer by only waiting for the write to complete .
*/
struct reservation_object * resv ;
/** References from framebuffers, locks out tiling changes. */
2017-03-01 15:41:28 +00:00
unsigned int framebuffer_references ;
2016-11-11 12:43:54 +02:00
/** Record of address bit 17 of each page at last unbind. */
unsigned long * bit_17 ;
2017-02-13 17:15:20 +00:00
union {
struct i915_gem_userptr {
uintptr_t ptr ;
unsigned read_only : 1 ;
struct i915_mm_struct * mm ;
struct i915_mmu_object * mmu_object ;
struct work_struct * work ;
} userptr ;
unsigned long scratch ;
} ;
2016-11-11 12:43:54 +02:00
/** for phys allocated objects */
struct drm_dma_handle * phys_handle ;
struct reservation_object __builtin_resv ;
} ;
static inline struct drm_i915_gem_object *
to_intel_bo ( struct drm_gem_object * gem )
{
/* Assert that to_intel_bo(NULL) == NULL */
BUILD_BUG_ON ( offsetof ( struct drm_i915_gem_object , base ) ) ;
return container_of ( gem , struct drm_i915_gem_object , base ) ;
}
/**
* i915_gem_object_lookup_rcu - look up a temporary GEM object from its handle
* @ filp : DRM file private date
* @ handle : userspace handle
*
* Returns :
*
* A pointer to the object named by the handle if such exists on @ filp , NULL
* otherwise . This object is only valid whilst under the RCU read lock , and
* note carefully the object may be in the process of being destroyed .
*/
static inline struct drm_i915_gem_object *
i915_gem_object_lookup_rcu ( struct drm_file * file , u32 handle )
{
# ifdef CONFIG_LOCKDEP
WARN_ON ( debug_locks & & ! lock_is_held ( & rcu_lock_map ) ) ;
# endif
return idr_find ( & file - > object_idr , handle ) ;
}
static inline struct drm_i915_gem_object *
i915_gem_object_lookup ( struct drm_file * file , u32 handle )
{
struct drm_i915_gem_object * obj ;
rcu_read_lock ( ) ;
obj = i915_gem_object_lookup_rcu ( file , handle ) ;
if ( obj & & ! kref_get_unless_zero ( & obj - > base . refcount ) )
obj = NULL ;
rcu_read_unlock ( ) ;
return obj ;
}
__deprecated
extern struct drm_gem_object *
drm_gem_object_lookup ( struct drm_file * file , u32 handle ) ;
__attribute__ ( ( nonnull ) )
static inline struct drm_i915_gem_object *
i915_gem_object_get ( struct drm_i915_gem_object * obj )
{
drm_gem_object_reference ( & obj - > base ) ;
return obj ;
}
__deprecated
extern void drm_gem_object_reference ( struct drm_gem_object * ) ;
__attribute__ ( ( nonnull ) )
static inline void
i915_gem_object_put ( struct drm_i915_gem_object * obj )
{
__drm_gem_object_unreference ( & obj - > base ) ;
}
__deprecated
extern void drm_gem_object_unreference ( struct drm_gem_object * ) ;
__deprecated
extern void drm_gem_object_unreference_unlocked ( struct drm_gem_object * ) ;
2017-03-01 15:41:28 +00:00
static inline void i915_gem_object_lock ( struct drm_i915_gem_object * obj )
{
reservation_object_lock ( obj - > resv , NULL ) ;
}
static inline void i915_gem_object_unlock ( struct drm_i915_gem_object * obj )
{
reservation_object_unlock ( obj - > resv ) ;
}
2016-11-11 12:43:54 +02:00
static inline bool
i915_gem_object_has_struct_page ( const struct drm_i915_gem_object * obj )
{
return obj - > ops - > flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE ;
}
static inline bool
i915_gem_object_is_shrinkable ( const struct drm_i915_gem_object * obj )
{
return obj - > ops - > flags & I915_GEM_OBJECT_IS_SHRINKABLE ;
}
static inline bool
i915_gem_object_is_active ( const struct drm_i915_gem_object * obj )
{
return obj - > active_count ;
}
static inline bool
i915_gem_object_has_active_reference ( const struct drm_i915_gem_object * obj )
{
return test_bit ( I915_BO_ACTIVE_REF , & obj - > flags ) ;
}
static inline void
i915_gem_object_set_active_reference ( struct drm_i915_gem_object * obj )
{
lockdep_assert_held ( & obj - > base . dev - > struct_mutex ) ;
__set_bit ( I915_BO_ACTIVE_REF , & obj - > flags ) ;
}
static inline void
i915_gem_object_clear_active_reference ( struct drm_i915_gem_object * obj )
{
lockdep_assert_held ( & obj - > base . dev - > struct_mutex ) ;
__clear_bit ( I915_BO_ACTIVE_REF , & obj - > flags ) ;
}
void __i915_gem_object_release_unless_active ( struct drm_i915_gem_object * obj ) ;
2017-03-01 15:41:28 +00:00
static inline bool
i915_gem_object_is_framebuffer ( const struct drm_i915_gem_object * obj )
{
return READ_ONCE ( obj - > framebuffer_references ) ;
}
2016-11-11 12:43:54 +02:00
static inline unsigned int
i915_gem_object_get_tiling ( struct drm_i915_gem_object * obj )
{
return obj - > tiling_and_stride & TILING_MASK ;
}
static inline bool
i915_gem_object_is_tiled ( struct drm_i915_gem_object * obj )
{
return i915_gem_object_get_tiling ( obj ) ! = I915_TILING_NONE ;
}
static inline unsigned int
i915_gem_object_get_stride ( struct drm_i915_gem_object * obj )
{
return obj - > tiling_and_stride & STRIDE_MASK ;
}
2017-01-09 16:16:08 +00:00
static inline unsigned int
i915_gem_tile_height ( unsigned int tiling )
{
GEM_BUG_ON ( ! tiling ) ;
return tiling = = I915_TILING_Y ? 32 : 8 ;
}
static inline unsigned int
i915_gem_object_get_tile_height ( struct drm_i915_gem_object * obj )
{
return i915_gem_tile_height ( i915_gem_object_get_tiling ( obj ) ) ;
}
static inline unsigned int
i915_gem_object_get_tile_row_size ( struct drm_i915_gem_object * obj )
{
return ( i915_gem_object_get_stride ( obj ) *
i915_gem_object_get_tile_height ( obj ) ) ;
}
2017-01-10 12:10:45 +00:00
int i915_gem_object_set_tiling ( struct drm_i915_gem_object * obj ,
unsigned int tiling , unsigned int stride ) ;
2016-11-11 12:43:54 +02:00
static inline struct intel_engine_cs *
i915_gem_object_last_write_engine ( struct drm_i915_gem_object * obj )
{
struct intel_engine_cs * engine = NULL ;
struct dma_fence * fence ;
rcu_read_lock ( ) ;
fence = reservation_object_get_excl_rcu ( obj - > resv ) ;
rcu_read_unlock ( ) ;
if ( fence & & dma_fence_is_i915 ( fence ) & & ! dma_fence_is_signaled ( fence ) )
engine = to_request ( fence ) - > engine ;
dma_fence_put ( fence ) ;
return engine ;
}
2017-08-11 12:11:16 +01:00
void i915_gem_object_set_cache_coherency ( struct drm_i915_gem_object * obj ,
unsigned int cache_level ) ;
2017-02-22 11:40:46 +00:00
void i915_gem_object_flush_if_display ( struct drm_i915_gem_object * obj ) ;
2016-11-11 12:43:54 +02:00
# endif