17e4660ae3
This builds on top of the MMU contexts introduced earlier. Instead of having one context per GPU core, each GPU client receives its own context. On MMUv1 this still means a single shared pagetable set is used by all clients, but on MMUv2 there is now a distinct set of pagetables for each client. As the command fetch is also translated via the MMU on MMUv2 the kernel command ringbuffer is mapped into each of the client pagetables. As the MMU context switch is a bit of a heavy operation, due to the needed cache and TLB flushing, this patch implements a lazy way of switching the MMU context. The kernel does not have its own MMU context, but reuses the last client context for all of its operations. This has some visible impact, as the GPU can now only be started once a client has submitted some work and we got the client MMU context assigned. Also the MMU context has a different lifetime than the general client context, as the GPU might still execute the kernel command buffer in the context of a client even after the client has completed all GPU work and has been terminated. Only when the GPU is runtime suspended or switches to another clients MMU context is the old context freed up. Signed-off-by: Lucas Stach <l.stach@pengutronix.de> Reviewed-by: Philipp Zabel <p.zabel@pengutronix.de> Reviewed-by: Guido Günther <agx@sigxcpu.org>
125 lines
3.7 KiB
C
125 lines
3.7 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Copyright (C) 2015-2018 Etnaviv Project
|
|
*/
|
|
|
|
#ifndef __ETNAVIV_MMU_H__
|
|
#define __ETNAVIV_MMU_H__
|
|
|
|
#define ETNAVIV_PROT_READ (1 << 0)
|
|
#define ETNAVIV_PROT_WRITE (1 << 1)
|
|
|
|
enum etnaviv_iommu_version {
|
|
ETNAVIV_IOMMU_V1 = 0,
|
|
ETNAVIV_IOMMU_V2,
|
|
};
|
|
|
|
struct etnaviv_gpu;
|
|
struct etnaviv_vram_mapping;
|
|
struct etnaviv_iommu_global;
|
|
struct etnaviv_iommu_context;
|
|
|
|
struct etnaviv_iommu_ops {
|
|
struct etnaviv_iommu_context *(*init)(struct etnaviv_iommu_global *);
|
|
void (*free)(struct etnaviv_iommu_context *);
|
|
int (*map)(struct etnaviv_iommu_context *context, unsigned long iova,
|
|
phys_addr_t paddr, size_t size, int prot);
|
|
size_t (*unmap)(struct etnaviv_iommu_context *context, unsigned long iova,
|
|
size_t size);
|
|
size_t (*dump_size)(struct etnaviv_iommu_context *);
|
|
void (*dump)(struct etnaviv_iommu_context *, void *);
|
|
void (*restore)(struct etnaviv_gpu *, struct etnaviv_iommu_context *);
|
|
};
|
|
|
|
extern const struct etnaviv_iommu_ops etnaviv_iommuv1_ops;
|
|
extern const struct etnaviv_iommu_ops etnaviv_iommuv2_ops;
|
|
|
|
#define ETNAVIV_PTA_SIZE SZ_4K
|
|
#define ETNAVIV_PTA_ENTRIES (ETNAVIV_PTA_SIZE / sizeof(u64))
|
|
|
|
struct etnaviv_iommu_global {
|
|
struct device *dev;
|
|
enum etnaviv_iommu_version version;
|
|
const struct etnaviv_iommu_ops *ops;
|
|
unsigned int use;
|
|
struct mutex lock;
|
|
|
|
void *bad_page_cpu;
|
|
dma_addr_t bad_page_dma;
|
|
|
|
u32 memory_base;
|
|
|
|
/*
|
|
* This union holds members needed by either MMUv1 or MMUv2, which
|
|
* can not exist at the same time.
|
|
*/
|
|
union {
|
|
struct {
|
|
struct etnaviv_iommu_context *shared_context;
|
|
} v1;
|
|
struct {
|
|
/* P(age) T(able) A(rray) */
|
|
u64 *pta_cpu;
|
|
dma_addr_t pta_dma;
|
|
struct spinlock pta_lock;
|
|
DECLARE_BITMAP(pta_alloc, ETNAVIV_PTA_ENTRIES);
|
|
} v2;
|
|
};
|
|
};
|
|
|
|
struct etnaviv_iommu_context {
|
|
struct kref refcount;
|
|
struct etnaviv_iommu_global *global;
|
|
|
|
/* memory manager for GPU address area */
|
|
struct mutex lock;
|
|
struct list_head mappings;
|
|
struct drm_mm mm;
|
|
unsigned int flush_seq;
|
|
|
|
/* Not part of the context, but needs to have the same lifetime */
|
|
struct etnaviv_vram_mapping cmdbuf_mapping;
|
|
};
|
|
|
|
int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu);
|
|
void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu);
|
|
|
|
struct etnaviv_gem_object;
|
|
|
|
int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
|
|
struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
|
|
struct etnaviv_vram_mapping *mapping);
|
|
void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
|
|
struct etnaviv_vram_mapping *mapping);
|
|
|
|
int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *ctx,
|
|
struct etnaviv_vram_mapping *mapping,
|
|
u32 memory_base, dma_addr_t paddr,
|
|
size_t size);
|
|
void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *ctx,
|
|
struct etnaviv_vram_mapping *mapping);
|
|
|
|
size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *ctx);
|
|
void etnaviv_iommu_dump(struct etnaviv_iommu_context *ctx, void *buf);
|
|
|
|
struct etnaviv_iommu_context *
|
|
etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
|
|
struct etnaviv_cmdbuf_suballoc *suballoc);
|
|
static inline void etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
|
|
{
|
|
kref_get(&ctx->refcount);
|
|
}
|
|
void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx);
|
|
void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
|
|
struct etnaviv_iommu_context *ctx);
|
|
|
|
struct etnaviv_iommu_context *
|
|
etnaviv_iommuv1_context_alloc(struct etnaviv_iommu_global *global);
|
|
struct etnaviv_iommu_context *
|
|
etnaviv_iommuv2_context_alloc(struct etnaviv_iommu_global *global);
|
|
|
|
u32 etnaviv_iommuv2_get_mtlb_addr(struct etnaviv_iommu_context *context);
|
|
unsigned short etnaviv_iommuv2_get_pta_id(struct etnaviv_iommu_context *context);
|
|
|
|
#endif /* __ETNAVIV_MMU_H__ */
|