0003033297
The `migration` test currently has a number of robustness problems that cause it to hang and leak resources. Timeout: There are 3 tests, which each previously ran for 60 seconds. However, the timeout in mm/settings for a single test binary was set to 45 seconds. So when run using run_kselftest.sh, the top level timeout would trigger before the test binary was finished. Solve this by meeting in the middle; each of the 3 tests now runs for 20 seconds (for a total of 60), and the top level timeout is set to 90 seconds. Leaking child processes: the `shared_anon` test fork()s some children but then an ASSERT() fires before the test kills those children. The assert causes immediate exit of the parent and leaking of the children. Furthermore, if run using the run_kselftest.sh wrapper, the wrapper would get stuck waiting for those children to exit, which never happens. Solve this by setting the "parent death signal" to SIGHUP in the child, so that the child is killed automatically if the parent dies. With these changes, the test binary now runs to completion on arm64, with 2 tests passing and the `shared_anon` test failing. Link: https://lkml.kernel.org/r/20230724082522.1202616-7-ryan.roberts@arm.com Signed-off-by: Ryan Roberts <ryan.roberts@arm.com> Reviewed-by: David Hildenbrand <david@redhat.com> Cc: Florent Revest <revest@chromium.org> Cc: Jérôme Glisse <jglisse@redhat.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Mark Brown <broonie@kernel.org> Cc: Peter Xu <peterx@redhat.com> Cc: Shuah Khan <shuah@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
203 lines
4.4 KiB
C
203 lines
4.4 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* The main purpose of the tests here is to exercise the migration entry code
|
|
* paths in the kernel.
|
|
*/
|
|
|
|
#include "../kselftest_harness.h"
|
|
#include <strings.h>
|
|
#include <pthread.h>
|
|
#include <numa.h>
|
|
#include <numaif.h>
|
|
#include <sys/mman.h>
|
|
#include <sys/prctl.h>
|
|
#include <sys/types.h>
|
|
#include <signal.h>
|
|
#include <time.h>
|
|
|
|
#define TWOMEG (2<<20)
|
|
#define RUNTIME (20)
|
|
|
|
#define ALIGN(x, a) (((x) + (a - 1)) & (~((a) - 1)))
|
|
|
|
FIXTURE(migration)
|
|
{
|
|
pthread_t *threads;
|
|
pid_t *pids;
|
|
int nthreads;
|
|
int n1;
|
|
int n2;
|
|
};
|
|
|
|
FIXTURE_SETUP(migration)
|
|
{
|
|
int n;
|
|
|
|
ASSERT_EQ(numa_available(), 0);
|
|
self->nthreads = numa_num_task_cpus() - 1;
|
|
self->n1 = -1;
|
|
self->n2 = -1;
|
|
|
|
for (n = 0; n < numa_max_possible_node(); n++)
|
|
if (numa_bitmask_isbitset(numa_all_nodes_ptr, n)) {
|
|
if (self->n1 == -1) {
|
|
self->n1 = n;
|
|
} else {
|
|
self->n2 = n;
|
|
break;
|
|
}
|
|
}
|
|
|
|
self->threads = malloc(self->nthreads * sizeof(*self->threads));
|
|
ASSERT_NE(self->threads, NULL);
|
|
self->pids = malloc(self->nthreads * sizeof(*self->pids));
|
|
ASSERT_NE(self->pids, NULL);
|
|
};
|
|
|
|
FIXTURE_TEARDOWN(migration)
|
|
{
|
|
free(self->threads);
|
|
free(self->pids);
|
|
}
|
|
|
|
int migrate(uint64_t *ptr, int n1, int n2)
|
|
{
|
|
int ret, tmp;
|
|
int status = 0;
|
|
struct timespec ts1, ts2;
|
|
|
|
if (clock_gettime(CLOCK_MONOTONIC, &ts1))
|
|
return -1;
|
|
|
|
while (1) {
|
|
if (clock_gettime(CLOCK_MONOTONIC, &ts2))
|
|
return -1;
|
|
|
|
if (ts2.tv_sec - ts1.tv_sec >= RUNTIME)
|
|
return 0;
|
|
|
|
ret = move_pages(0, 1, (void **) &ptr, &n2, &status,
|
|
MPOL_MF_MOVE_ALL);
|
|
if (ret) {
|
|
if (ret > 0)
|
|
printf("Didn't migrate %d pages\n", ret);
|
|
else
|
|
perror("Couldn't migrate pages");
|
|
return -2;
|
|
}
|
|
|
|
tmp = n2;
|
|
n2 = n1;
|
|
n1 = tmp;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
void *access_mem(void *ptr)
|
|
{
|
|
volatile uint64_t y = 0;
|
|
volatile uint64_t *x = ptr;
|
|
|
|
while (1) {
|
|
pthread_testcancel();
|
|
y += *x;
|
|
|
|
/* Prevent the compiler from optimizing out the writes to y: */
|
|
asm volatile("" : "+r" (y));
|
|
}
|
|
|
|
return NULL;
|
|
}
|
|
|
|
/*
|
|
* Basic migration entry testing. One thread will move pages back and forth
|
|
* between nodes whilst other threads try and access them triggering the
|
|
* migration entry wait paths in the kernel.
|
|
*/
|
|
TEST_F_TIMEOUT(migration, private_anon, 2*RUNTIME)
|
|
{
|
|
uint64_t *ptr;
|
|
int i;
|
|
|
|
if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
|
|
SKIP(return, "Not enough threads or NUMA nodes available");
|
|
|
|
ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE,
|
|
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
|
ASSERT_NE(ptr, MAP_FAILED);
|
|
|
|
memset(ptr, 0xde, TWOMEG);
|
|
for (i = 0; i < self->nthreads - 1; i++)
|
|
if (pthread_create(&self->threads[i], NULL, access_mem, ptr))
|
|
perror("Couldn't create thread");
|
|
|
|
ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
|
|
for (i = 0; i < self->nthreads - 1; i++)
|
|
ASSERT_EQ(pthread_cancel(self->threads[i]), 0);
|
|
}
|
|
|
|
/*
|
|
* Same as the previous test but with shared memory.
|
|
*/
|
|
TEST_F_TIMEOUT(migration, shared_anon, 2*RUNTIME)
|
|
{
|
|
pid_t pid;
|
|
uint64_t *ptr;
|
|
int i;
|
|
|
|
if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
|
|
SKIP(return, "Not enough threads or NUMA nodes available");
|
|
|
|
ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE,
|
|
MAP_SHARED | MAP_ANONYMOUS, -1, 0);
|
|
ASSERT_NE(ptr, MAP_FAILED);
|
|
|
|
memset(ptr, 0xde, TWOMEG);
|
|
for (i = 0; i < self->nthreads - 1; i++) {
|
|
pid = fork();
|
|
if (!pid) {
|
|
prctl(PR_SET_PDEATHSIG, SIGHUP);
|
|
/* Parent may have died before prctl so check now. */
|
|
if (getppid() == 1)
|
|
kill(getpid(), SIGHUP);
|
|
access_mem(ptr);
|
|
} else {
|
|
self->pids[i] = pid;
|
|
}
|
|
}
|
|
|
|
ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
|
|
for (i = 0; i < self->nthreads - 1; i++)
|
|
ASSERT_EQ(kill(self->pids[i], SIGTERM), 0);
|
|
}
|
|
|
|
/*
|
|
* Tests the pmd migration entry paths.
|
|
*/
|
|
TEST_F_TIMEOUT(migration, private_anon_thp, 2*RUNTIME)
|
|
{
|
|
uint64_t *ptr;
|
|
int i;
|
|
|
|
if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0)
|
|
SKIP(return, "Not enough threads or NUMA nodes available");
|
|
|
|
ptr = mmap(NULL, 2*TWOMEG, PROT_READ | PROT_WRITE,
|
|
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
|
|
ASSERT_NE(ptr, MAP_FAILED);
|
|
|
|
ptr = (uint64_t *) ALIGN((uintptr_t) ptr, TWOMEG);
|
|
ASSERT_EQ(madvise(ptr, TWOMEG, MADV_HUGEPAGE), 0);
|
|
memset(ptr, 0xde, TWOMEG);
|
|
for (i = 0; i < self->nthreads - 1; i++)
|
|
if (pthread_create(&self->threads[i], NULL, access_mem, ptr))
|
|
perror("Couldn't create thread");
|
|
|
|
ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0);
|
|
for (i = 0; i < self->nthreads - 1; i++)
|
|
ASSERT_EQ(pthread_cancel(self->threads[i]), 0);
|
|
}
|
|
|
|
TEST_HARNESS_MAIN
|