drm/i915: introduce mem->reserved
In the following patch we need to reserve regions unaccessible to the driver during initialization, so add mem->reserved for collecting such regions. v2: turn into an actual intel_memory_region_reserve api Cc: Imre Deak <imre.deak@intel.com> Signed-off-by: Abdiel Janulgue <abdiel.janulgue@linux.intel.com> Signed-off-by: Matthew Auld <matthew.auld@intel.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/msgid/20210127131417.393872-4-matthew.auld@intel.com Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
parent
7f2aa5b301
commit
adeca641bc
@ -156,9 +156,22 @@ int intel_memory_region_init_buddy(struct intel_memory_region *mem)
|
||||
|
||||
void intel_memory_region_release_buddy(struct intel_memory_region *mem)
|
||||
{
|
||||
i915_buddy_free_list(&mem->mm, &mem->reserved);
|
||||
i915_buddy_fini(&mem->mm);
|
||||
}
|
||||
|
||||
int intel_memory_region_reserve(struct intel_memory_region *mem,
|
||||
u64 offset, u64 size)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&mem->mm_lock);
|
||||
ret = i915_buddy_alloc_range(&mem->mm, &mem->reserved, offset, size);
|
||||
mutex_unlock(&mem->mm_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct intel_memory_region *
|
||||
intel_memory_region_create(struct drm_i915_private *i915,
|
||||
resource_size_t start,
|
||||
@ -185,6 +198,7 @@ intel_memory_region_create(struct drm_i915_private *i915,
|
||||
mutex_init(&mem->objects.lock);
|
||||
INIT_LIST_HEAD(&mem->objects.list);
|
||||
INIT_LIST_HEAD(&mem->objects.purgeable);
|
||||
INIT_LIST_HEAD(&mem->reserved);
|
||||
|
||||
mutex_init(&mem->mm_lock);
|
||||
|
||||
|
@ -89,6 +89,8 @@ struct intel_memory_region {
|
||||
unsigned int id;
|
||||
char name[8];
|
||||
|
||||
struct list_head reserved;
|
||||
|
||||
dma_addr_t remap_addr;
|
||||
|
||||
struct {
|
||||
@ -113,6 +115,9 @@ void __intel_memory_region_put_pages_buddy(struct intel_memory_region *mem,
|
||||
struct list_head *blocks);
|
||||
void __intel_memory_region_put_block_buddy(struct i915_buddy_block *block);
|
||||
|
||||
int intel_memory_region_reserve(struct intel_memory_region *mem,
|
||||
u64 offset, u64 size);
|
||||
|
||||
struct intel_memory_region *
|
||||
intel_memory_region_create(struct drm_i915_private *i915,
|
||||
resource_size_t start,
|
||||
|
@ -144,6 +144,82 @@ static bool is_contiguous(struct drm_i915_gem_object *obj)
|
||||
return true;
|
||||
}
|
||||
|
||||
static int igt_mock_reserve(void *arg)
|
||||
{
|
||||
struct intel_memory_region *mem = arg;
|
||||
resource_size_t avail = resource_size(&mem->region);
|
||||
struct drm_i915_gem_object *obj;
|
||||
const u32 chunk_size = SZ_32M;
|
||||
u32 i, offset, count, *order;
|
||||
u64 allocated, cur_avail;
|
||||
I915_RND_STATE(prng);
|
||||
LIST_HEAD(objects);
|
||||
int err = 0;
|
||||
|
||||
if (!list_empty(&mem->reserved)) {
|
||||
pr_err("%s region reserved list is not empty\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
count = avail / chunk_size;
|
||||
order = i915_random_order(count, &prng);
|
||||
if (!order)
|
||||
return 0;
|
||||
|
||||
/* Reserve a bunch of ranges within the region */
|
||||
for (i = 0; i < count; ++i) {
|
||||
u64 start = order[i] * chunk_size;
|
||||
u64 size = i915_prandom_u32_max_state(chunk_size, &prng);
|
||||
|
||||
/* Allow for some really big holes */
|
||||
if (!size)
|
||||
continue;
|
||||
|
||||
size = round_up(size, PAGE_SIZE);
|
||||
offset = igt_random_offset(&prng, 0, chunk_size, size,
|
||||
PAGE_SIZE);
|
||||
|
||||
err = intel_memory_region_reserve(mem, start + offset, size);
|
||||
if (err) {
|
||||
pr_err("%s failed to reserve range", __func__);
|
||||
goto out_close;
|
||||
}
|
||||
|
||||
/* XXX: maybe sanity check the block range here? */
|
||||
avail -= size;
|
||||
}
|
||||
|
||||
/* Try to see if we can allocate from the remaining space */
|
||||
allocated = 0;
|
||||
cur_avail = avail;
|
||||
do {
|
||||
u32 size = i915_prandom_u32_max_state(cur_avail, &prng);
|
||||
|
||||
size = max_t(u32, round_up(size, PAGE_SIZE), PAGE_SIZE);
|
||||
obj = igt_object_create(mem, &objects, size, 0);
|
||||
if (IS_ERR(obj)) {
|
||||
if (PTR_ERR(obj) == -ENXIO)
|
||||
break;
|
||||
|
||||
err = PTR_ERR(obj);
|
||||
goto out_close;
|
||||
}
|
||||
cur_avail -= size;
|
||||
allocated += size;
|
||||
} while (1);
|
||||
|
||||
if (allocated != avail) {
|
||||
pr_err("%s mismatch between allocation and free space", __func__);
|
||||
err = -EINVAL;
|
||||
}
|
||||
|
||||
out_close:
|
||||
kfree(order);
|
||||
close_objects(mem, &objects);
|
||||
i915_buddy_free_list(&mem->mm, &mem->reserved);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int igt_mock_contiguous(void *arg)
|
||||
{
|
||||
struct intel_memory_region *mem = arg;
|
||||
@ -930,6 +1006,7 @@ static int perf_memcpy(void *arg)
|
||||
int intel_memory_region_mock_selftests(void)
|
||||
{
|
||||
static const struct i915_subtest tests[] = {
|
||||
SUBTEST(igt_mock_reserve),
|
||||
SUBTEST(igt_mock_fill),
|
||||
SUBTEST(igt_mock_contiguous),
|
||||
SUBTEST(igt_mock_splintered_region),
|
||||
|
Loading…
x
Reference in New Issue
Block a user