588ab3f9af
- Initial page table creation reworked to avoid breaking large block mappings (huge pages) into smaller ones. The ARM architecture requires break-before-make in such cases to avoid TLB conflicts but that's not always possible on live page tables - Kernel virtual memory layout: the kernel image is no longer linked to the bottom of the linear mapping (PAGE_OFFSET) but at the bottom of the vmalloc space, allowing the kernel to be loaded (nearly) anywhere in physical RAM - Kernel ASLR: position independent kernel Image and modules being randomly mapped in the vmalloc space with the randomness is provided by UEFI (efi_get_random_bytes() patches merged via the arm64 tree, acked by Matt Fleming) - Implement relative exception tables for arm64, required by KASLR (initial code for ARCH_HAS_RELATIVE_EXTABLE added to lib/extable.c but actual x86 conversion to deferred to 4.7 because of the merge dependencies) - Support for the User Access Override feature of ARMv8.2: this allows uaccess functions (get_user etc.) to be implemented using LDTR/STTR instructions. Such instructions, when run by the kernel, perform unprivileged accesses adding an extra level of protection. The set_fs() macro is used to "upgrade" such instruction to privileged accesses via the UAO bit - Half-precision floating point support (part of ARMv8.2) - Optimisations for CPUs with or without a hardware prefetcher (using run-time code patching) - copy_page performance improvement to deal with 128 bytes at a time - Sanity checks on the CPU capabilities (via CPUID) to prevent incompatible secondary CPUs from being brought up (e.g. weird big.LITTLE configurations) - valid_user_regs() reworked for better sanity check of the sigcontext information (restored pstate information) - ACPI parking protocol implementation - CONFIG_DEBUG_RODATA enabled by default - VDSO code marked as read-only - DEBUG_PAGEALLOC support - ARCH_HAS_UBSAN_SANITIZE_ALL enabled - Erratum workaround Cavium ThunderX SoC - set_pte_at() fix for PROT_NONE mappings - Code clean-ups -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJW6u95AAoJEGvWsS0AyF7xMyoP/3x2O6bgreSQ84BdO4JChN4+ RQ9OVdX8u2ItO9sgaCY2AA6KoiBuEjGmPl/XRuK0I7DpODTtRjEXQHuNNhz8AelC hn4AEVqamY6Z5BzHFIjs8G9ydEbq+OXcKWEdwSsBhP/cMvI7ss3dps1f5iNPT5Vv 50E/kUz+aWYy7pKlB18VDV7TUOA3SuYuGknWV8+bOY5uPb8hNT3Y3fHOg/EuNNN3 DIuYH1V7XQkXtF+oNVIGxzzJCXULBE7egMcWAm1ydSOHK0JwkZAiL7OhI7ceVD0x YlDxBnqmi4cgzfBzTxITAhn3OParwN6udQprdF1WGtFF6fuY2eRDSH/L/iZoE4DY OulL951OsBtF8YC3+RKLk908/0bA2Uw8ftjCOFJTYbSnZBj1gWK41VkCYMEXiHQk EaN8+2Iw206iYIoyvdjGCLw7Y0oakDoVD9vmv12SOaHeQljTkjoN8oIlfjjKTeP7 3AXj5v9BDMDVh40nkVayysRNvqe48Kwt9Wn0rhVTLxwdJEiFG/OIU6HLuTkretdN dcCNFSQrRieSFHpBK9G0vKIpIss1ZwLm8gjocVXH7VK4Mo/TNQe4p2/wAF29mq4r xu1UiXmtU3uWxiqZnt72LOYFCarQ0sFA5+pMEvF5W+NrVB0wGpXhcwm+pGsIi4IM LepccTgykiUBqW5TRzPz =/oS+ -----END PGP SIGNATURE----- Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 updates from Catalin Marinas: "Here are the main arm64 updates for 4.6. There are some relatively intrusive changes to support KASLR, the reworking of the kernel virtual memory layout and initial page table creation. Summary: - Initial page table creation reworked to avoid breaking large block mappings (huge pages) into smaller ones. The ARM architecture requires break-before-make in such cases to avoid TLB conflicts but that's not always possible on live page tables - Kernel virtual memory layout: the kernel image is no longer linked to the bottom of the linear mapping (PAGE_OFFSET) but at the bottom of the vmalloc space, allowing the kernel to be loaded (nearly) anywhere in physical RAM - Kernel ASLR: position independent kernel Image and modules being randomly mapped in the vmalloc space with the randomness is provided by UEFI (efi_get_random_bytes() patches merged via the arm64 tree, acked by Matt Fleming) - Implement relative exception tables for arm64, required by KASLR (initial code for ARCH_HAS_RELATIVE_EXTABLE added to lib/extable.c but actual x86 conversion to deferred to 4.7 because of the merge dependencies) - Support for the User Access Override feature of ARMv8.2: this allows uaccess functions (get_user etc.) to be implemented using LDTR/STTR instructions. Such instructions, when run by the kernel, perform unprivileged accesses adding an extra level of protection. The set_fs() macro is used to "upgrade" such instruction to privileged accesses via the UAO bit - Half-precision floating point support (part of ARMv8.2) - Optimisations for CPUs with or without a hardware prefetcher (using run-time code patching) - copy_page performance improvement to deal with 128 bytes at a time - Sanity checks on the CPU capabilities (via CPUID) to prevent incompatible secondary CPUs from being brought up (e.g. weird big.LITTLE configurations) - valid_user_regs() reworked for better sanity check of the sigcontext information (restored pstate information) - ACPI parking protocol implementation - CONFIG_DEBUG_RODATA enabled by default - VDSO code marked as read-only - DEBUG_PAGEALLOC support - ARCH_HAS_UBSAN_SANITIZE_ALL enabled - Erratum workaround Cavium ThunderX SoC - set_pte_at() fix for PROT_NONE mappings - Code clean-ups" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (99 commits) arm64: kasan: Fix zero shadow mapping overriding kernel image shadow arm64: kasan: Use actual memory node when populating the kernel image shadow arm64: Update PTE_RDONLY in set_pte_at() for PROT_NONE permission arm64: Fix misspellings in comments. arm64: efi: add missing frame pointer assignment arm64: make mrs_s prefixing implicit in read_cpuid arm64: enable CONFIG_DEBUG_RODATA by default arm64: Rework valid_user_regs arm64: mm: check at build time that PAGE_OFFSET divides the VA space evenly arm64: KVM: Move kvm_call_hyp back to its original localtion arm64: mm: treat memstart_addr as a signed quantity arm64: mm: list kernel sections in order arm64: lse: deal with clobbered IP registers after branch via PLT arm64: mm: dump: Use VA_START directly instead of private LOWEST_ADDR arm64: kconfig: add submenu for 8.2 architectural features arm64: kernel: acpi: fix ioremap in ACPI parking protocol cpu_postboot arm64: Add support for Half precision floating point arm64: Remove fixmap include fragility arm64: Add workaround for Cavium erratum 27456 arm64: mm: Mark .rodata as RO ...
398 lines
8.3 KiB
C
398 lines
8.3 KiB
C
/*
|
|
* sortextable.c: Sort the kernel's exception table
|
|
*
|
|
* Copyright 2011 - 2012 Cavium, Inc.
|
|
*
|
|
* Based on code taken from recortmcount.c which is:
|
|
*
|
|
* Copyright 2009 John F. Reiser <jreiser@BitWagon.com>. All rights reserved.
|
|
* Licensed under the GNU General Public License, version 2 (GPLv2).
|
|
*
|
|
* Restructured to fit Linux format, as well as other updates:
|
|
* Copyright 2010 Steven Rostedt <srostedt@redhat.com>, Red Hat Inc.
|
|
*/
|
|
|
|
/*
|
|
* Strategy: alter the vmlinux file in-place.
|
|
*/
|
|
|
|
#include <sys/types.h>
|
|
#include <sys/mman.h>
|
|
#include <sys/stat.h>
|
|
#include <getopt.h>
|
|
#include <elf.h>
|
|
#include <fcntl.h>
|
|
#include <setjmp.h>
|
|
#include <stdio.h>
|
|
#include <stdlib.h>
|
|
#include <string.h>
|
|
#include <unistd.h>
|
|
|
|
#include <tools/be_byteshift.h>
|
|
#include <tools/le_byteshift.h>
|
|
|
|
#ifndef EM_ARCOMPACT
|
|
#define EM_ARCOMPACT 93
|
|
#endif
|
|
|
|
#ifndef EM_XTENSA
|
|
#define EM_XTENSA 94
|
|
#endif
|
|
|
|
#ifndef EM_AARCH64
|
|
#define EM_AARCH64 183
|
|
#endif
|
|
|
|
#ifndef EM_MICROBLAZE
|
|
#define EM_MICROBLAZE 189
|
|
#endif
|
|
|
|
#ifndef EM_ARCV2
|
|
#define EM_ARCV2 195
|
|
#endif
|
|
|
|
static int fd_map; /* File descriptor for file being modified. */
|
|
static int mmap_failed; /* Boolean flag. */
|
|
static void *ehdr_curr; /* current ElfXX_Ehdr * for resource cleanup */
|
|
static struct stat sb; /* Remember .st_size, etc. */
|
|
static jmp_buf jmpenv; /* setjmp/longjmp per-file error escape */
|
|
|
|
/* setjmp() return values */
|
|
enum {
|
|
SJ_SETJMP = 0, /* hardwired first return */
|
|
SJ_FAIL,
|
|
SJ_SUCCEED
|
|
};
|
|
|
|
/* Per-file resource cleanup when multiple files. */
|
|
static void
|
|
cleanup(void)
|
|
{
|
|
if (!mmap_failed)
|
|
munmap(ehdr_curr, sb.st_size);
|
|
close(fd_map);
|
|
}
|
|
|
|
static void __attribute__((noreturn))
|
|
fail_file(void)
|
|
{
|
|
cleanup();
|
|
longjmp(jmpenv, SJ_FAIL);
|
|
}
|
|
|
|
/*
|
|
* Get the whole file as a programming convenience in order to avoid
|
|
* malloc+lseek+read+free of many pieces. If successful, then mmap
|
|
* avoids copying unused pieces; else just read the whole file.
|
|
* Open for both read and write.
|
|
*/
|
|
static void *mmap_file(char const *fname)
|
|
{
|
|
void *addr;
|
|
|
|
fd_map = open(fname, O_RDWR);
|
|
if (fd_map < 0 || fstat(fd_map, &sb) < 0) {
|
|
perror(fname);
|
|
fail_file();
|
|
}
|
|
if (!S_ISREG(sb.st_mode)) {
|
|
fprintf(stderr, "not a regular file: %s\n", fname);
|
|
fail_file();
|
|
}
|
|
addr = mmap(0, sb.st_size, PROT_READ|PROT_WRITE, MAP_SHARED,
|
|
fd_map, 0);
|
|
if (addr == MAP_FAILED) {
|
|
mmap_failed = 1;
|
|
fprintf(stderr, "Could not mmap file: %s\n", fname);
|
|
fail_file();
|
|
}
|
|
return addr;
|
|
}
|
|
|
|
static uint64_t r8be(const uint64_t *x)
|
|
{
|
|
return get_unaligned_be64(x);
|
|
}
|
|
static uint32_t rbe(const uint32_t *x)
|
|
{
|
|
return get_unaligned_be32(x);
|
|
}
|
|
static uint16_t r2be(const uint16_t *x)
|
|
{
|
|
return get_unaligned_be16(x);
|
|
}
|
|
static uint64_t r8le(const uint64_t *x)
|
|
{
|
|
return get_unaligned_le64(x);
|
|
}
|
|
static uint32_t rle(const uint32_t *x)
|
|
{
|
|
return get_unaligned_le32(x);
|
|
}
|
|
static uint16_t r2le(const uint16_t *x)
|
|
{
|
|
return get_unaligned_le16(x);
|
|
}
|
|
|
|
static void w8be(uint64_t val, uint64_t *x)
|
|
{
|
|
put_unaligned_be64(val, x);
|
|
}
|
|
static void wbe(uint32_t val, uint32_t *x)
|
|
{
|
|
put_unaligned_be32(val, x);
|
|
}
|
|
static void w2be(uint16_t val, uint16_t *x)
|
|
{
|
|
put_unaligned_be16(val, x);
|
|
}
|
|
static void w8le(uint64_t val, uint64_t *x)
|
|
{
|
|
put_unaligned_le64(val, x);
|
|
}
|
|
static void wle(uint32_t val, uint32_t *x)
|
|
{
|
|
put_unaligned_le32(val, x);
|
|
}
|
|
static void w2le(uint16_t val, uint16_t *x)
|
|
{
|
|
put_unaligned_le16(val, x);
|
|
}
|
|
|
|
static uint64_t (*r8)(const uint64_t *);
|
|
static uint32_t (*r)(const uint32_t *);
|
|
static uint16_t (*r2)(const uint16_t *);
|
|
static void (*w8)(uint64_t, uint64_t *);
|
|
static void (*w)(uint32_t, uint32_t *);
|
|
static void (*w2)(uint16_t, uint16_t *);
|
|
|
|
typedef void (*table_sort_t)(char *, int);
|
|
|
|
/*
|
|
* Move reserved section indices SHN_LORESERVE..SHN_HIRESERVE out of
|
|
* the way to -256..-1, to avoid conflicting with real section
|
|
* indices.
|
|
*/
|
|
#define SPECIAL(i) ((i) - (SHN_HIRESERVE + 1))
|
|
|
|
static inline int is_shndx_special(unsigned int i)
|
|
{
|
|
return i != SHN_XINDEX && i >= SHN_LORESERVE && i <= SHN_HIRESERVE;
|
|
}
|
|
|
|
/* Accessor for sym->st_shndx, hides ugliness of "64k sections" */
|
|
static inline unsigned int get_secindex(unsigned int shndx,
|
|
unsigned int sym_offs,
|
|
const Elf32_Word *symtab_shndx_start)
|
|
{
|
|
if (is_shndx_special(shndx))
|
|
return SPECIAL(shndx);
|
|
if (shndx != SHN_XINDEX)
|
|
return shndx;
|
|
return r(&symtab_shndx_start[sym_offs]);
|
|
}
|
|
|
|
/* 32 bit and 64 bit are very similar */
|
|
#include "sortextable.h"
|
|
#define SORTEXTABLE_64
|
|
#include "sortextable.h"
|
|
|
|
static int compare_relative_table(const void *a, const void *b)
|
|
{
|
|
int32_t av = (int32_t)r(a);
|
|
int32_t bv = (int32_t)r(b);
|
|
|
|
if (av < bv)
|
|
return -1;
|
|
if (av > bv)
|
|
return 1;
|
|
return 0;
|
|
}
|
|
|
|
static void x86_sort_relative_table(char *extab_image, int image_size)
|
|
{
|
|
int i;
|
|
|
|
i = 0;
|
|
while (i < image_size) {
|
|
uint32_t *loc = (uint32_t *)(extab_image + i);
|
|
|
|
w(r(loc) + i, loc);
|
|
w(r(loc + 1) + i + 4, loc + 1);
|
|
w(r(loc + 2) + i + 8, loc + 2);
|
|
|
|
i += sizeof(uint32_t) * 3;
|
|
}
|
|
|
|
qsort(extab_image, image_size / 12, 12, compare_relative_table);
|
|
|
|
i = 0;
|
|
while (i < image_size) {
|
|
uint32_t *loc = (uint32_t *)(extab_image + i);
|
|
|
|
w(r(loc) - i, loc);
|
|
w(r(loc + 1) - (i + 4), loc + 1);
|
|
w(r(loc + 2) - (i + 8), loc + 2);
|
|
|
|
i += sizeof(uint32_t) * 3;
|
|
}
|
|
}
|
|
|
|
static void sort_relative_table(char *extab_image, int image_size)
|
|
{
|
|
int i;
|
|
|
|
/*
|
|
* Do the same thing the runtime sort does, first normalize to
|
|
* being relative to the start of the section.
|
|
*/
|
|
i = 0;
|
|
while (i < image_size) {
|
|
uint32_t *loc = (uint32_t *)(extab_image + i);
|
|
w(r(loc) + i, loc);
|
|
i += 4;
|
|
}
|
|
|
|
qsort(extab_image, image_size / 8, 8, compare_relative_table);
|
|
|
|
/* Now denormalize. */
|
|
i = 0;
|
|
while (i < image_size) {
|
|
uint32_t *loc = (uint32_t *)(extab_image + i);
|
|
w(r(loc) - i, loc);
|
|
i += 4;
|
|
}
|
|
}
|
|
|
|
static void
|
|
do_file(char const *const fname)
|
|
{
|
|
table_sort_t custom_sort;
|
|
Elf32_Ehdr *ehdr = mmap_file(fname);
|
|
|
|
ehdr_curr = ehdr;
|
|
switch (ehdr->e_ident[EI_DATA]) {
|
|
default:
|
|
fprintf(stderr, "unrecognized ELF data encoding %d: %s\n",
|
|
ehdr->e_ident[EI_DATA], fname);
|
|
fail_file();
|
|
break;
|
|
case ELFDATA2LSB:
|
|
r = rle;
|
|
r2 = r2le;
|
|
r8 = r8le;
|
|
w = wle;
|
|
w2 = w2le;
|
|
w8 = w8le;
|
|
break;
|
|
case ELFDATA2MSB:
|
|
r = rbe;
|
|
r2 = r2be;
|
|
r8 = r8be;
|
|
w = wbe;
|
|
w2 = w2be;
|
|
w8 = w8be;
|
|
break;
|
|
} /* end switch */
|
|
if (memcmp(ELFMAG, ehdr->e_ident, SELFMAG) != 0
|
|
|| (r2(&ehdr->e_type) != ET_EXEC && r2(&ehdr->e_type) != ET_DYN)
|
|
|| ehdr->e_ident[EI_VERSION] != EV_CURRENT) {
|
|
fprintf(stderr, "unrecognized ET_EXEC/ET_DYN file %s\n", fname);
|
|
fail_file();
|
|
}
|
|
|
|
custom_sort = NULL;
|
|
switch (r2(&ehdr->e_machine)) {
|
|
default:
|
|
fprintf(stderr, "unrecognized e_machine %d %s\n",
|
|
r2(&ehdr->e_machine), fname);
|
|
fail_file();
|
|
break;
|
|
case EM_386:
|
|
case EM_X86_64:
|
|
custom_sort = x86_sort_relative_table;
|
|
break;
|
|
|
|
case EM_S390:
|
|
case EM_AARCH64:
|
|
custom_sort = sort_relative_table;
|
|
break;
|
|
case EM_ARCOMPACT:
|
|
case EM_ARCV2:
|
|
case EM_ARM:
|
|
case EM_MICROBLAZE:
|
|
case EM_MIPS:
|
|
case EM_XTENSA:
|
|
break;
|
|
} /* end switch */
|
|
|
|
switch (ehdr->e_ident[EI_CLASS]) {
|
|
default:
|
|
fprintf(stderr, "unrecognized ELF class %d %s\n",
|
|
ehdr->e_ident[EI_CLASS], fname);
|
|
fail_file();
|
|
break;
|
|
case ELFCLASS32:
|
|
if (r2(&ehdr->e_ehsize) != sizeof(Elf32_Ehdr)
|
|
|| r2(&ehdr->e_shentsize) != sizeof(Elf32_Shdr)) {
|
|
fprintf(stderr,
|
|
"unrecognized ET_EXEC/ET_DYN file: %s\n", fname);
|
|
fail_file();
|
|
}
|
|
do32(ehdr, fname, custom_sort);
|
|
break;
|
|
case ELFCLASS64: {
|
|
Elf64_Ehdr *const ghdr = (Elf64_Ehdr *)ehdr;
|
|
if (r2(&ghdr->e_ehsize) != sizeof(Elf64_Ehdr)
|
|
|| r2(&ghdr->e_shentsize) != sizeof(Elf64_Shdr)) {
|
|
fprintf(stderr,
|
|
"unrecognized ET_EXEC/ET_DYN file: %s\n", fname);
|
|
fail_file();
|
|
}
|
|
do64(ghdr, fname, custom_sort);
|
|
break;
|
|
}
|
|
} /* end switch */
|
|
|
|
cleanup();
|
|
}
|
|
|
|
int
|
|
main(int argc, char *argv[])
|
|
{
|
|
int n_error = 0; /* gcc-4.3.0 false positive complaint */
|
|
int i;
|
|
|
|
if (argc < 2) {
|
|
fprintf(stderr, "usage: sortextable vmlinux...\n");
|
|
return 0;
|
|
}
|
|
|
|
/* Process each file in turn, allowing deep failure. */
|
|
for (i = 1; i < argc; i++) {
|
|
char *file = argv[i];
|
|
int const sjval = setjmp(jmpenv);
|
|
|
|
switch (sjval) {
|
|
default:
|
|
fprintf(stderr, "internal error: %s\n", file);
|
|
exit(1);
|
|
break;
|
|
case SJ_SETJMP: /* normal sequence */
|
|
/* Avoid problems if early cleanup() */
|
|
fd_map = -1;
|
|
ehdr_curr = NULL;
|
|
mmap_failed = 1;
|
|
do_file(file);
|
|
break;
|
|
case SJ_FAIL: /* error in do_file or below */
|
|
++n_error;
|
|
break;
|
|
case SJ_SUCCEED: /* premature success */
|
|
/* do nothing */
|
|
break;
|
|
} /* end switch */
|
|
}
|
|
return !!n_error;
|
|
}
|