drm/tegra: Implement buffer object cache

This cache is used to avoid mapping and unmapping buffer objects
unnecessarily. Mappings are cached per client and stay hot until
the buffer object is destroyed.

Signed-off-by: Thierry Reding <treding@nvidia.com>
This commit is contained in:
Thierry Reding 2020-02-07 16:50:52 +01:00
parent c6aeaf56f4
commit 1f39b1dfa5
9 changed files with 141 additions and 17 deletions

View File

@ -67,6 +67,7 @@ static struct host1x_bo_mapping *tegra_bo_pin(struct device *dev, struct host1x_
if (!map) if (!map)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
kref_init(&map->ref);
map->bo = host1x_bo_get(bo); map->bo = host1x_bo_get(bo);
map->direction = direction; map->direction = direction;
map->dev = dev; map->dev = dev;
@ -157,9 +158,6 @@ free:
static void tegra_bo_unpin(struct host1x_bo_mapping *map) static void tegra_bo_unpin(struct host1x_bo_mapping *map)
{ {
if (!map)
return;
if (map->attach) { if (map->attach) {
dma_buf_unmap_attachment(map->attach, map->sgt, map->direction); dma_buf_unmap_attachment(map->attach, map->sgt, map->direction);
dma_buf_detach(map->attach->dmabuf, map->attach); dma_buf_detach(map->attach->dmabuf, map->attach);
@ -493,8 +491,18 @@ free:
void tegra_bo_free_object(struct drm_gem_object *gem) void tegra_bo_free_object(struct drm_gem_object *gem)
{ {
struct tegra_drm *tegra = gem->dev->dev_private; struct tegra_drm *tegra = gem->dev->dev_private;
struct host1x_bo_mapping *mapping, *tmp;
struct tegra_bo *bo = to_tegra_bo(gem); struct tegra_bo *bo = to_tegra_bo(gem);
/* remove all mappings of this buffer object from any caches */
list_for_each_entry_safe(mapping, tmp, &bo->base.mappings, list) {
if (mapping->cache)
host1x_bo_unpin(mapping);
else
dev_err(gem->dev->dev, "mapping %p stale for device %s\n", mapping,
dev_name(mapping->dev));
}
if (tegra->domain) if (tegra->domain)
tegra_bo_iommu_unmap(tegra, bo); tegra_bo_iommu_unmap(tegra, bo);

View File

@ -145,7 +145,7 @@ static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state)
struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i); struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i);
struct host1x_bo_mapping *map; struct host1x_bo_mapping *map;
map = host1x_bo_pin(dc->dev, &bo->base, DMA_TO_DEVICE); map = host1x_bo_pin(dc->dev, &bo->base, DMA_TO_DEVICE, &dc->client.cache);
if (IS_ERR(map)) { if (IS_ERR(map)) {
err = PTR_ERR(map); err = PTR_ERR(map);
goto unpin; goto unpin;

View File

@ -75,6 +75,7 @@ gather_bo_pin(struct device *dev, struct host1x_bo *bo, enum dma_data_direction
if (!map) if (!map)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
kref_init(&map->ref);
map->bo = host1x_bo_get(bo); map->bo = host1x_bo_get(bo);
map->direction = direction; map->direction = direction;
map->dev = dev; map->dev = dev;

View File

@ -201,7 +201,7 @@ int tegra_drm_ioctl_channel_map(struct drm_device *drm, void *data, struct drm_f
goto put_gem; goto put_gem;
} }
mapping->map = host1x_bo_pin(context->client->base.dev, mapping->bo, direction); mapping->map = host1x_bo_pin(context->client->base.dev, mapping->bo, direction, NULL);
if (IS_ERR(mapping->map)) { if (IS_ERR(mapping->map)) {
err = PTR_ERR(mapping->map); err = PTR_ERR(mapping->map);
goto put_gem; goto put_gem;

View File

@ -742,6 +742,7 @@ EXPORT_SYMBOL(host1x_driver_unregister);
*/ */
void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key) void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key)
{ {
host1x_bo_cache_init(&client->cache);
INIT_LIST_HEAD(&client->list); INIT_LIST_HEAD(&client->list);
__mutex_init(&client->lock, "host1x client lock", key); __mutex_init(&client->lock, "host1x client lock", key);
client->usecount = 0; client->usecount = 0;
@ -830,6 +831,8 @@ int host1x_client_unregister(struct host1x_client *client)
mutex_unlock(&clients_lock); mutex_unlock(&clients_lock);
host1x_bo_cache_destroy(&client->cache);
return 0; return 0;
} }
EXPORT_SYMBOL(host1x_client_unregister); EXPORT_SYMBOL(host1x_client_unregister);
@ -904,3 +907,78 @@ unlock:
return err; return err;
} }
EXPORT_SYMBOL(host1x_client_resume); EXPORT_SYMBOL(host1x_client_resume);
struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo,
enum dma_data_direction dir,
struct host1x_bo_cache *cache)
{
struct host1x_bo_mapping *mapping;
if (cache) {
mutex_lock(&cache->lock);
list_for_each_entry(mapping, &cache->mappings, entry) {
if (mapping->bo == bo && mapping->direction == dir) {
kref_get(&mapping->ref);
goto unlock;
}
}
}
mapping = bo->ops->pin(dev, bo, dir);
if (IS_ERR(mapping))
goto unlock;
spin_lock(&mapping->bo->lock);
list_add_tail(&mapping->list, &bo->mappings);
spin_unlock(&mapping->bo->lock);
if (cache) {
INIT_LIST_HEAD(&mapping->entry);
mapping->cache = cache;
list_add_tail(&mapping->entry, &cache->mappings);
/* bump reference count to track the copy in the cache */
kref_get(&mapping->ref);
}
unlock:
if (cache)
mutex_unlock(&cache->lock);
return mapping;
}
EXPORT_SYMBOL(host1x_bo_pin);
static void __host1x_bo_unpin(struct kref *ref)
{
struct host1x_bo_mapping *mapping = to_host1x_bo_mapping(ref);
/*
* When the last reference of the mapping goes away, make sure to remove the mapping from
* the cache.
*/
if (mapping->cache)
list_del(&mapping->entry);
spin_lock(&mapping->bo->lock);
list_del(&mapping->list);
spin_unlock(&mapping->bo->lock);
mapping->bo->ops->unpin(mapping);
}
void host1x_bo_unpin(struct host1x_bo_mapping *mapping)
{
struct host1x_bo_cache *cache = mapping->cache;
if (cache)
mutex_lock(&cache->lock);
kref_put(&mapping->ref, __host1x_bo_unpin);
if (cache)
mutex_unlock(&cache->lock);
}
EXPORT_SYMBOL(host1x_bo_unpin);

View File

@ -386,6 +386,7 @@ static int host1x_probe(struct platform_device *pdev)
if (syncpt_irq < 0) if (syncpt_irq < 0)
return syncpt_irq; return syncpt_irq;
host1x_bo_cache_init(&host->cache);
mutex_init(&host->devices_lock); mutex_init(&host->devices_lock);
INIT_LIST_HEAD(&host->devices); INIT_LIST_HEAD(&host->devices);
INIT_LIST_HEAD(&host->list); INIT_LIST_HEAD(&host->list);
@ -512,6 +513,7 @@ static int host1x_remove(struct platform_device *pdev)
reset_control_assert(host->rst); reset_control_assert(host->rst);
clk_disable_unprepare(host->clk); clk_disable_unprepare(host->clk);
host1x_iommu_exit(host); host1x_iommu_exit(host);
host1x_bo_cache_destroy(&host->cache);
return 0; return 0;
} }

View File

@ -149,6 +149,8 @@ struct host1x {
struct list_head list; struct list_head list;
struct device_dma_parameters dma_parms; struct device_dma_parameters dma_parms;
struct host1x_bo_cache cache;
}; };
void host1x_hypervisor_writel(struct host1x *host1x, u32 r, u32 v); void host1x_hypervisor_writel(struct host1x *host1x, u32 r, u32 v);

View File

@ -175,7 +175,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
goto unpin; goto unpin;
} }
map = host1x_bo_pin(dev, bo, direction); map = host1x_bo_pin(dev, bo, direction, &client->cache);
if (IS_ERR(map)) { if (IS_ERR(map)) {
err = PTR_ERR(map); err = PTR_ERR(map);
goto unpin; goto unpin;
@ -222,7 +222,7 @@ static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
goto unpin; goto unpin;
} }
map = host1x_bo_pin(host->dev, g->bo, DMA_TO_DEVICE); map = host1x_bo_pin(host->dev, g->bo, DMA_TO_DEVICE, &host->cache);
if (IS_ERR(map)) { if (IS_ERR(map)) {
err = PTR_ERR(map); err = PTR_ERR(map);
goto unpin; goto unpin;

View File

@ -8,6 +8,7 @@
#include <linux/device.h> #include <linux/device.h>
#include <linux/dma-direction.h> #include <linux/dma-direction.h>
#include <linux/spinlock.h>
#include <linux/types.h> #include <linux/types.h>
enum host1x_class { enum host1x_class {
@ -24,6 +25,28 @@ struct iommu_group;
u64 host1x_get_dma_mask(struct host1x *host1x); u64 host1x_get_dma_mask(struct host1x *host1x);
/**
* struct host1x_bo_cache - host1x buffer object cache
* @mappings: list of mappings
* @lock: synchronizes accesses to the list of mappings
*/
struct host1x_bo_cache {
struct list_head mappings;
struct mutex lock;
};
static inline void host1x_bo_cache_init(struct host1x_bo_cache *cache)
{
INIT_LIST_HEAD(&cache->mappings);
mutex_init(&cache->lock);
}
static inline void host1x_bo_cache_destroy(struct host1x_bo_cache *cache)
{
/* XXX warn if not empty? */
mutex_destroy(&cache->lock);
}
/** /**
* struct host1x_client_ops - host1x client operations * struct host1x_client_ops - host1x client operations
* @early_init: host1x client early initialization code * @early_init: host1x client early initialization code
@ -74,6 +97,8 @@ struct host1x_client {
struct host1x_client *parent; struct host1x_client *parent;
unsigned int usecount; unsigned int usecount;
struct mutex lock; struct mutex lock;
struct host1x_bo_cache cache;
}; };
/* /*
@ -84,16 +109,26 @@ struct host1x_bo;
struct sg_table; struct sg_table;
struct host1x_bo_mapping { struct host1x_bo_mapping {
struct kref ref;
struct dma_buf_attachment *attach; struct dma_buf_attachment *attach;
enum dma_data_direction direction; enum dma_data_direction direction;
struct list_head list;
struct host1x_bo *bo; struct host1x_bo *bo;
struct sg_table *sgt; struct sg_table *sgt;
unsigned int chunks; unsigned int chunks;
struct device *dev; struct device *dev;
dma_addr_t phys; dma_addr_t phys;
size_t size; size_t size;
struct host1x_bo_cache *cache;
struct list_head entry;
}; };
static inline struct host1x_bo_mapping *to_host1x_bo_mapping(struct kref *ref)
{
return container_of(ref, struct host1x_bo_mapping, ref);
}
struct host1x_bo_ops { struct host1x_bo_ops {
struct host1x_bo *(*get)(struct host1x_bo *bo); struct host1x_bo *(*get)(struct host1x_bo *bo);
void (*put)(struct host1x_bo *bo); void (*put)(struct host1x_bo *bo);
@ -106,11 +141,15 @@ struct host1x_bo_ops {
struct host1x_bo { struct host1x_bo {
const struct host1x_bo_ops *ops; const struct host1x_bo_ops *ops;
struct list_head mappings;
spinlock_t lock;
}; };
static inline void host1x_bo_init(struct host1x_bo *bo, static inline void host1x_bo_init(struct host1x_bo *bo,
const struct host1x_bo_ops *ops) const struct host1x_bo_ops *ops)
{ {
INIT_LIST_HEAD(&bo->mappings);
spin_lock_init(&bo->lock);
bo->ops = ops; bo->ops = ops;
} }
@ -124,16 +163,10 @@ static inline void host1x_bo_put(struct host1x_bo *bo)
bo->ops->put(bo); bo->ops->put(bo);
} }
static inline struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo, struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo,
enum dma_data_direction dir) enum dma_data_direction dir,
{ struct host1x_bo_cache *cache);
return bo->ops->pin(dev, bo, dir); void host1x_bo_unpin(struct host1x_bo_mapping *map);
}
static inline void host1x_bo_unpin(struct host1x_bo_mapping *map)
{
map->bo->ops->unpin(map);
}
static inline void *host1x_bo_mmap(struct host1x_bo *bo) static inline void *host1x_bo_mmap(struct host1x_bo *bo)
{ {