2019-05-31 11:09:38 +03:00
// SPDX-License-Identifier: GPL-2.0-only
2012-04-20 01:59:55 +04:00
/*
2019-12-04 03:46:31 +03:00
* sorttable . c : Sort the kernel ' s table
2012-04-20 01:59:55 +04:00
*
2019-12-04 03:46:32 +03:00
* Added ORC unwind tables sort support and other updates :
* Copyright ( C ) 1999 - 2019 Alibaba Group Holding Limited . by :
* Shile Zhang < shile . zhang @ linux . alibaba . com >
*
2012-04-24 22:23:14 +04:00
* Copyright 2011 - 2012 Cavium , Inc .
2012-04-20 01:59:55 +04:00
*
* Based on code taken from recortmcount . c which is :
*
* Copyright 2009 John F . Reiser < jreiser @ BitWagon . com > . All rights reserved .
*
* Restructured to fit Linux format , as well as other updates :
2019-12-04 03:46:32 +03:00
* Copyright 2010 Steven Rostedt < srostedt @ redhat . com > , Red Hat Inc .
2012-04-20 01:59:55 +04:00
*/
/*
* Strategy : alter the vmlinux file in - place .
*/
# include <sys/types.h>
# include <sys/mman.h>
# include <sys/stat.h>
# include <getopt.h>
# include <elf.h>
# include <fcntl.h>
# include <stdio.h>
# include <stdlib.h>
# include <string.h>
# include <unistd.h>
2012-04-24 22:23:14 +04:00
# include <tools/be_byteshift.h>
# include <tools/le_byteshift.h>
2013-11-15 10:38:05 +04:00
# ifndef EM_ARCOMPACT
# define EM_ARCOMPACT 93
# endif
2014-02-18 15:29:11 +04:00
# ifndef EM_XTENSA
# define EM_XTENSA 94
# endif
2013-05-08 20:29:24 +04:00
# ifndef EM_AARCH64
# define EM_AARCH64 183
# endif
2014-01-24 03:52:46 +04:00
# ifndef EM_MICROBLAZE
# define EM_MICROBLAZE 189
# endif
2013-11-22 11:35:58 +04:00
# ifndef EM_ARCV2
# define EM_ARCV2 195
# endif
2019-12-04 03:46:28 +03:00
static uint32_t ( * r ) ( const uint32_t * ) ;
static uint16_t ( * r2 ) ( const uint16_t * ) ;
static uint64_t ( * r8 ) ( const uint64_t * ) ;
static void ( * w ) ( uint32_t , uint32_t * ) ;
static void ( * w2 ) ( uint16_t , uint16_t * ) ;
static void ( * w8 ) ( uint64_t , uint64_t * ) ;
typedef void ( * table_sort_t ) ( char * , int ) ;
2012-04-20 01:59:55 +04:00
/*
* Get the whole file as a programming convenience in order to avoid
* malloc + lseek + read + free of many pieces . If successful , then mmap
* avoids copying unused pieces ; else just read the whole file .
* Open for both read and write .
*/
2019-12-04 03:46:27 +03:00
static void * mmap_file ( char const * fname , size_t * size )
2012-04-20 01:59:55 +04:00
{
2019-12-04 03:46:27 +03:00
int fd ;
struct stat sb ;
void * addr = NULL ;
2012-04-20 01:59:55 +04:00
2019-12-04 03:46:27 +03:00
fd = open ( fname , O_RDWR ) ;
if ( fd < 0 ) {
2012-04-20 01:59:55 +04:00
perror ( fname ) ;
2019-12-04 03:46:27 +03:00
return NULL ;
}
if ( fstat ( fd , & sb ) < 0 ) {
perror ( fname ) ;
goto out ;
2012-04-20 01:59:55 +04:00
}
if ( ! S_ISREG ( sb . st_mode ) ) {
fprintf ( stderr , " not a regular file: %s \n " , fname ) ;
2019-12-04 03:46:27 +03:00
goto out ;
2012-04-20 01:59:55 +04:00
}
2019-12-04 03:46:28 +03:00
2019-12-04 03:46:27 +03:00
addr = mmap ( 0 , sb . st_size , PROT_READ | PROT_WRITE , MAP_SHARED , fd , 0 ) ;
2012-04-20 01:59:55 +04:00
if ( addr = = MAP_FAILED ) {
fprintf ( stderr , " Could not mmap file: %s \n " , fname ) ;
2019-12-04 03:46:27 +03:00
goto out ;
2012-04-20 01:59:55 +04:00
}
2019-12-04 03:46:27 +03:00
* size = sb . st_size ;
out :
close ( fd ) ;
2012-04-20 01:59:55 +04:00
return addr ;
}
2012-04-24 22:23:14 +04:00
static uint32_t rbe ( const uint32_t * x )
2012-04-20 01:59:55 +04:00
{
2012-04-24 22:23:14 +04:00
return get_unaligned_be32 ( x ) ;
2012-04-20 01:59:55 +04:00
}
2019-12-04 03:46:28 +03:00
2012-04-24 22:23:14 +04:00
static uint16_t r2be ( const uint16_t * x )
2012-04-20 01:59:55 +04:00
{
2012-04-24 22:23:14 +04:00
return get_unaligned_be16 ( x ) ;
2012-04-20 01:59:55 +04:00
}
2019-12-04 03:46:28 +03:00
static uint64_t r8be ( const uint64_t * x )
2012-04-20 01:59:55 +04:00
{
2019-12-04 03:46:28 +03:00
return get_unaligned_be64 ( x ) ;
2012-04-20 01:59:55 +04:00
}
2019-12-04 03:46:28 +03:00
2012-04-24 22:23:14 +04:00
static uint32_t rle ( const uint32_t * x )
{
return get_unaligned_le32 ( x ) ;
}
2019-12-04 03:46:28 +03:00
2012-04-24 22:23:14 +04:00
static uint16_t r2le ( const uint16_t * x )
2012-04-20 01:59:55 +04:00
{
2012-04-24 22:23:14 +04:00
return get_unaligned_le16 ( x ) ;
2012-04-20 01:59:55 +04:00
}
2019-12-04 03:46:28 +03:00
static uint64_t r8le ( const uint64_t * x )
2012-04-24 22:23:14 +04:00
{
2019-12-04 03:46:28 +03:00
return get_unaligned_le64 ( x ) ;
2012-04-24 22:23:14 +04:00
}
2019-12-04 03:46:28 +03:00
2012-04-24 22:23:14 +04:00
static void wbe ( uint32_t val , uint32_t * x )
{
put_unaligned_be32 ( val , x ) ;
}
2019-12-04 03:46:28 +03:00
2012-04-24 22:23:14 +04:00
static void w2be ( uint16_t val , uint16_t * x )
{
put_unaligned_be16 ( val , x ) ;
}
2019-12-04 03:46:28 +03:00
static void w8be ( uint64_t val , uint64_t * x )
2012-04-24 22:23:14 +04:00
{
2019-12-04 03:46:28 +03:00
put_unaligned_be64 ( val , x ) ;
2012-04-24 22:23:14 +04:00
}
2019-12-04 03:46:28 +03:00
2012-04-24 22:23:14 +04:00
static void wle ( uint32_t val , uint32_t * x )
{
put_unaligned_le32 ( val , x ) ;
}
2019-12-04 03:46:28 +03:00
2012-04-24 22:23:14 +04:00
static void w2le ( uint16_t val , uint16_t * x )
2012-04-20 01:59:55 +04:00
{
2012-04-24 22:23:14 +04:00
put_unaligned_le16 ( val , x ) ;
2012-04-20 01:59:55 +04:00
}
2019-12-04 03:46:28 +03:00
static void w8le ( uint64_t val , uint64_t * x )
{
put_unaligned_le64 ( val , x ) ;
}
2012-04-20 01:59:55 +04:00
2013-11-13 03:06:51 +04:00
/*
* Move reserved section indices SHN_LORESERVE . . SHN_HIRESERVE out of
* the way to - 256. . - 1 , to avoid conflicting with real section
* indices .
*/
# define SPECIAL(i) ((i) - (SHN_HIRESERVE + 1))
static inline int is_shndx_special ( unsigned int i )
{
return i ! = SHN_XINDEX & & i > = SHN_LORESERVE & & i < = SHN_HIRESERVE ;
}
/* Accessor for sym->st_shndx, hides ugliness of "64k sections" */
static inline unsigned int get_secindex ( unsigned int shndx ,
unsigned int sym_offs ,
const Elf32_Word * symtab_shndx_start )
{
if ( is_shndx_special ( shndx ) )
return SPECIAL ( shndx ) ;
if ( shndx ! = SHN_XINDEX )
return shndx ;
return r ( & symtab_shndx_start [ sym_offs ] ) ;
}
2012-04-20 01:59:55 +04:00
/* 32 bit and 64 bit are very similar */
2019-12-04 03:46:31 +03:00
# include "sorttable.h"
# define SORTTABLE_64
# include "sorttable.h"
2012-04-20 01:59:55 +04:00
2012-09-05 15:26:11 +04:00
static int compare_relative_table ( const void * a , const void * b )
2012-04-24 22:23:14 +04:00
{
int32_t av = ( int32_t ) r ( a ) ;
int32_t bv = ( int32_t ) r ( b ) ;
if ( av < bv )
return - 1 ;
if ( av > bv )
return 1 ;
return 0 ;
}
2019-12-04 03:46:28 +03:00
static void sort_relative_table ( char * extab_image , int image_size )
2016-02-17 21:20:12 +03:00
{
2019-12-04 03:46:28 +03:00
int i = 0 ;
2016-02-17 21:20:12 +03:00
2019-12-04 03:46:28 +03:00
/*
* Do the same thing the runtime sort does , first normalize to
* being relative to the start of the section .
*/
2016-02-17 21:20:12 +03:00
while ( i < image_size ) {
uint32_t * loc = ( uint32_t * ) ( extab_image + i ) ;
w ( r ( loc ) + i , loc ) ;
2019-12-04 03:46:28 +03:00
i + = 4 ;
2016-02-17 21:20:12 +03:00
}
2019-12-04 03:46:28 +03:00
qsort ( extab_image , image_size / 8 , 8 , compare_relative_table ) ;
2016-02-17 21:20:12 +03:00
2019-12-04 03:46:28 +03:00
/* Now denormalize. */
2016-02-17 21:20:12 +03:00
i = 0 ;
while ( i < image_size ) {
uint32_t * loc = ( uint32_t * ) ( extab_image + i ) ;
w ( r ( loc ) - i , loc ) ;
2019-12-04 03:46:28 +03:00
i + = 4 ;
2016-02-17 21:20:12 +03:00
}
}
2019-12-04 03:46:28 +03:00
static void x86_sort_relative_table ( char * extab_image , int image_size )
2012-04-24 22:23:14 +04:00
{
2019-12-04 03:46:28 +03:00
int i = 0 ;
2012-04-24 22:23:14 +04:00
while ( i < image_size ) {
uint32_t * loc = ( uint32_t * ) ( extab_image + i ) ;
2019-12-04 03:46:28 +03:00
2012-04-24 22:23:14 +04:00
w ( r ( loc ) + i , loc ) ;
2019-12-04 03:46:28 +03:00
w ( r ( loc + 1 ) + i + 4 , loc + 1 ) ;
w ( r ( loc + 2 ) + i + 8 , loc + 2 ) ;
i + = sizeof ( uint32_t ) * 3 ;
2012-04-24 22:23:14 +04:00
}
2019-12-04 03:46:28 +03:00
qsort ( extab_image , image_size / 12 , 12 , compare_relative_table ) ;
2012-04-24 22:23:14 +04:00
i = 0 ;
while ( i < image_size ) {
uint32_t * loc = ( uint32_t * ) ( extab_image + i ) ;
2019-12-04 03:46:28 +03:00
2012-04-24 22:23:14 +04:00
w ( r ( loc ) - i , loc ) ;
2019-12-04 03:46:28 +03:00
w ( r ( loc + 1 ) - ( i + 4 ) , loc + 1 ) ;
w ( r ( loc + 2 ) - ( i + 8 ) , loc + 2 ) ;
i + = sizeof ( uint32_t ) * 3 ;
2012-04-24 22:23:14 +04:00
}
}
2012-04-20 01:59:55 +04:00
2020-06-30 21:52:03 +03:00
static void s390_sort_relative_table ( char * extab_image , int image_size )
{
int i ;
for ( i = 0 ; i < image_size ; i + = 16 ) {
char * loc = extab_image + i ;
uint64_t handler ;
w ( r ( ( uint32_t * ) loc ) + i , ( uint32_t * ) loc ) ;
w ( r ( ( uint32_t * ) ( loc + 4 ) ) + ( i + 4 ) , ( uint32_t * ) ( loc + 4 ) ) ;
/*
* 0 is a special self - relative handler value , which means that
* handler should be ignored . It is safe , because it means that
* handler field points to itself , which should never happen .
* When creating extable - relative values , keep it as 0 , since
* this should never occur either : it would mean that handler
* field points to the first extable entry .
*/
handler = r8 ( ( uint64_t * ) ( loc + 8 ) ) ;
if ( handler )
handler + = i + 8 ;
w8 ( handler , ( uint64_t * ) ( loc + 8 ) ) ;
}
qsort ( extab_image , image_size / 16 , 16 , compare_relative_table ) ;
for ( i = 0 ; i < image_size ; i + = 16 ) {
char * loc = extab_image + i ;
uint64_t handler ;
w ( r ( ( uint32_t * ) loc ) - i , ( uint32_t * ) loc ) ;
w ( r ( ( uint32_t * ) ( loc + 4 ) ) - ( i + 4 ) , ( uint32_t * ) ( loc + 4 ) ) ;
handler = r8 ( ( uint64_t * ) ( loc + 8 ) ) ;
if ( handler )
handler - = i + 8 ;
w8 ( handler , ( uint64_t * ) ( loc + 8 ) ) ;
}
}
2019-12-04 03:46:28 +03:00
static int do_file ( char const * const fname , void * addr )
2012-04-20 01:59:55 +04:00
{
2019-12-04 03:46:27 +03:00
int rc = - 1 ;
2019-12-04 03:46:28 +03:00
Elf32_Ehdr * ehdr = addr ;
table_sort_t custom_sort = NULL ;
2012-04-20 01:59:55 +04:00
switch ( ehdr - > e_ident [ EI_DATA ] ) {
case ELFDATA2LSB :
2019-12-04 03:46:28 +03:00
r = rle ;
r2 = r2le ;
r8 = r8le ;
w = wle ;
w2 = w2le ;
w8 = w8le ;
2012-04-20 01:59:55 +04:00
break ;
case ELFDATA2MSB :
2019-12-04 03:46:28 +03:00
r = rbe ;
r2 = r2be ;
r8 = r8be ;
w = wbe ;
w2 = w2be ;
w8 = w8be ;
2012-04-20 01:59:55 +04:00
break ;
2019-12-04 03:46:28 +03:00
default :
fprintf ( stderr , " unrecognized ELF data encoding %d: %s \n " ,
ehdr - > e_ident [ EI_DATA ] , fname ) ;
return - 1 ;
}
if ( memcmp ( ELFMAG , ehdr - > e_ident , SELFMAG ) ! = 0 | |
( r2 ( & ehdr - > e_type ) ! = ET_EXEC & & r2 ( & ehdr - > e_type ) ! = ET_DYN ) | |
ehdr - > e_ident [ EI_VERSION ] ! = EV_CURRENT ) {
2016-01-10 13:42:28 +03:00
fprintf ( stderr , " unrecognized ET_EXEC/ET_DYN file %s \n " , fname ) ;
2019-12-04 03:46:27 +03:00
return - 1 ;
2012-04-20 01:59:55 +04:00
}
2012-04-24 22:23:14 +04:00
switch ( r2 ( & ehdr - > e_machine ) ) {
2012-04-20 01:59:55 +04:00
case EM_386 :
case EM_X86_64 :
2016-02-17 21:20:12 +03:00
custom_sort = x86_sort_relative_table ;
break ;
2012-07-24 16:51:34 +04:00
case EM_S390 :
2020-06-30 21:52:03 +03:00
custom_sort = s390_sort_relative_table ;
break ;
2016-01-01 17:02:12 +03:00
case EM_AARCH64 :
2016-03-23 18:00:46 +03:00
case EM_PARISC :
2016-10-13 08:42:55 +03:00
case EM_PPC :
case EM_PPC64 :
2012-09-05 15:26:11 +04:00
custom_sort = sort_relative_table ;
break ;
2013-11-15 10:38:05 +04:00
case EM_ARCOMPACT :
2013-11-22 11:35:58 +04:00
case EM_ARCV2 :
2012-10-29 22:19:34 +04:00
case EM_ARM :
2014-01-24 03:52:46 +04:00
case EM_MICROBLAZE :
2012-04-24 22:23:14 +04:00
case EM_MIPS :
2014-02-18 15:29:11 +04:00
case EM_XTENSA :
2012-04-20 01:59:55 +04:00
break ;
2019-12-04 03:46:28 +03:00
default :
fprintf ( stderr , " unrecognized e_machine %d %s \n " ,
r2 ( & ehdr - > e_machine ) , fname ) ;
return - 1 ;
}
2012-04-20 01:59:55 +04:00
switch ( ehdr - > e_ident [ EI_CLASS ] ) {
case ELFCLASS32 :
2019-12-04 03:46:28 +03:00
if ( r2 ( & ehdr - > e_ehsize ) ! = sizeof ( Elf32_Ehdr ) | |
r2 ( & ehdr - > e_shentsize ) ! = sizeof ( Elf32_Shdr ) ) {
2012-04-20 01:59:55 +04:00
fprintf ( stderr ,
2016-01-10 13:42:28 +03:00
" unrecognized ET_EXEC/ET_DYN file: %s \n " , fname ) ;
2019-12-04 03:46:27 +03:00
break ;
2012-04-20 01:59:55 +04:00
}
2019-12-04 03:46:30 +03:00
rc = do_sort_32 ( ehdr , fname , custom_sort ) ;
2012-04-20 01:59:55 +04:00
break ;
2019-12-04 03:46:28 +03:00
case ELFCLASS64 :
{
2012-04-20 01:59:55 +04:00
Elf64_Ehdr * const ghdr = ( Elf64_Ehdr * ) ehdr ;
2019-12-04 03:46:28 +03:00
if ( r2 ( & ghdr - > e_ehsize ) ! = sizeof ( Elf64_Ehdr ) | |
r2 ( & ghdr - > e_shentsize ) ! = sizeof ( Elf64_Shdr ) ) {
2012-04-20 01:59:55 +04:00
fprintf ( stderr ,
2019-12-04 03:46:28 +03:00
" unrecognized ET_EXEC/ET_DYN file: %s \n " ,
fname ) ;
2019-12-04 03:46:27 +03:00
break ;
2012-04-20 01:59:55 +04:00
}
2019-12-04 03:46:30 +03:00
rc = do_sort_64 ( ghdr , fname , custom_sort ) ;
2019-12-04 03:46:28 +03:00
}
break ;
default :
fprintf ( stderr , " unrecognized ELF class %d %s \n " ,
ehdr - > e_ident [ EI_CLASS ] , fname ) ;
2012-04-20 01:59:55 +04:00
break ;
}
2019-12-04 03:46:27 +03:00
return rc ;
2012-04-20 01:59:55 +04:00
}
2019-12-04 03:46:28 +03:00
int main ( int argc , char * argv [ ] )
2012-04-20 01:59:55 +04:00
{
2019-12-04 03:46:27 +03:00
int i , n_error = 0 ; /* gcc-4.3.0 false positive complaint */
size_t size = 0 ;
void * addr = NULL ;
2012-04-20 01:59:55 +04:00
if ( argc < 2 ) {
2019-12-04 03:46:31 +03:00
fprintf ( stderr , " usage: sorttable vmlinux... \n " ) ;
2012-04-20 01:59:55 +04:00
return 0 ;
}
/* Process each file in turn, allowing deep failure. */
for ( i = 1 ; i < argc ; i + + ) {
2019-12-04 03:46:27 +03:00
addr = mmap_file ( argv [ i ] , & size ) ;
if ( ! addr ) {
+ + n_error ;
continue ;
}
2012-04-20 01:59:55 +04:00
2019-12-04 03:46:27 +03:00
if ( do_file ( argv [ i ] , addr ) )
2012-04-20 01:59:55 +04:00
+ + n_error ;
2019-12-04 03:46:27 +03:00
munmap ( addr , size ) ;
2012-04-20 01:59:55 +04:00
}
2019-12-04 03:46:28 +03:00
2012-04-20 01:59:55 +04:00
return ! ! n_error ;
}