2019-05-19 15:07:45 +03:00
# SPDX-License-Identifier: GPL-2.0-only
2009-03-27 16:25:50 +03:00
config MICROBLAZE
def_bool y
32-bit userspace ABI: introduce ARCH_32BIT_OFF_T config option
All new 32-bit architectures should have 64-bit userspace off_t type, but
existing architectures has 32-bit ones.
To enforce the rule, new config option is added to arch/Kconfig that defaults
ARCH_32BIT_OFF_T to be disabled for new 32-bit architectures. All existing
32-bit architectures enable it explicitly.
New option affects force_o_largefile() behaviour. Namely, if userspace
off_t is 64-bits long, we have no reason to reject user to open big files.
Note that even if architectures has only 64-bit off_t in the kernel
(arc, c6x, h8300, hexagon, nios2, openrisc, and unicore32),
a libc may use 32-bit off_t, and therefore want to limit the file size
to 4GB unless specified differently in the open flags.
Signed-off-by: Yury Norov <ynorov@caviumnetworks.com>
Acked-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Yury Norov <ynorov@marvell.com>
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
2018-05-16 11:18:49 +03:00
select ARCH_32BIT_OFF_T
2018-07-31 14:39:29 +03:00
select ARCH_NO_SWAP
2019-06-13 10:08:57 +03:00
select ARCH_HAS_BINFMT_FLAT if !MMU
2019-08-14 17:03:47 +03:00
select ARCH_HAS_DMA_PREP_COHERENT
2014-12-13 03:57:44 +03:00
select ARCH_HAS_GCOV_PROFILE_ALL
2018-07-19 15:54:39 +03:00
select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
2020-02-22 02:55:43 +03:00
select ARCH_HAS_DMA_SET_UNCACHED if !MMU
2013-10-08 06:12:28 +04:00
select ARCH_MIGHT_HAVE_PC_PARPORT
2012-07-31 01:42:46 +04:00
select ARCH_WANT_IPC_PARSE_VERSION
2019-12-04 03:46:31 +03:00
select BUILDTIME_TABLE_SORT
2017-05-26 20:34:11 +03:00
select TIMER_OF
2014-04-07 14:51:44 +04:00
select CLONE_BACKWARDS3
select COMMON_CLK
2019-08-14 17:03:48 +03:00
select DMA_DIRECT_REMAP if MMU
2014-04-07 14:51:44 +04:00
select GENERIC_ATOMIC64
select GENERIC_CLOCKEVENTS
select GENERIC_CPU_DEVICES
select GENERIC_IDLE_POLL_SETUP
2011-01-19 22:35:05 +03:00
select GENERIC_IRQ_PROBE
2011-03-24 16:55:52 +03:00
select GENERIC_IRQ_SHOW
2011-11-24 23:06:41 +04:00
select GENERIC_PCI_IOMAP
2013-12-20 13:16:40 +04:00
select GENERIC_SCHED_CLOCK
2016-05-25 18:06:09 +03:00
select HAVE_ARCH_HASH
2014-04-07 14:51:44 +04:00
select HAVE_ARCH_KGDB
select HAVE_DEBUG_KMEMLEAK
2020-01-14 12:05:24 +03:00
select HAVE_DMA_CONTIGUOUS
2014-04-07 14:51:44 +04:00
select HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_OPROFILE
2018-11-15 22:05:32 +03:00
select HAVE_PCI
2014-04-07 14:51:44 +04:00
select IRQ_DOMAIN
2016-11-14 15:13:45 +03:00
select XILINX_INTC
2012-09-28 09:01:03 +04:00
select MODULES_USE_ELF_RELA
2014-04-07 14:51:44 +04:00
select OF
select OF_EARLY_FLATTREE
2018-11-15 22:05:33 +03:00
select PCI_DOMAINS_GENERIC if PCI
2018-11-15 22:05:34 +03:00
select PCI_SYSCALL if PCI
2014-04-07 14:51:44 +04:00
select TRACING_SUPPORT
select VIRT_TO_BUS
lib/GCD.c: use binary GCD algorithm instead of Euclidean
The binary GCD algorithm is based on the following facts:
1. If a and b are all evens, then gcd(a,b) = 2 * gcd(a/2, b/2)
2. If a is even and b is odd, then gcd(a,b) = gcd(a/2, b)
3. If a and b are all odds, then gcd(a,b) = gcd((a-b)/2, b) = gcd((a+b)/2, b)
Even on x86 machines with reasonable division hardware, the binary
algorithm runs about 25% faster (80% the execution time) than the
division-based Euclidian algorithm.
On platforms like Alpha and ARMv6 where division is a function call to
emulation code, it's even more significant.
There are two variants of the code here, depending on whether a fast
__ffs (find least significant set bit) instruction is available. This
allows the unpredictable branches in the bit-at-a-time shifting loop to
be eliminated.
If fast __ffs is not available, the "even/odd" GCD variant is used.
I use the following code to benchmark:
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#define swap(a, b) \
do { \
a ^= b; \
b ^= a; \
a ^= b; \
} while (0)
unsigned long gcd0(unsigned long a, unsigned long b)
{
unsigned long r;
if (a < b) {
swap(a, b);
}
if (b == 0)
return a;
while ((r = a % b) != 0) {
a = b;
b = r;
}
return b;
}
unsigned long gcd1(unsigned long a, unsigned long b)
{
unsigned long r = a | b;
if (!a || !b)
return r;
b >>= __builtin_ctzl(b);
for (;;) {
a >>= __builtin_ctzl(a);
if (a == b)
return a << __builtin_ctzl(r);
if (a < b)
swap(a, b);
a -= b;
}
}
unsigned long gcd2(unsigned long a, unsigned long b)
{
unsigned long r = a | b;
if (!a || !b)
return r;
r &= -r;
while (!(b & r))
b >>= 1;
for (;;) {
while (!(a & r))
a >>= 1;
if (a == b)
return a;
if (a < b)
swap(a, b);
a -= b;
a >>= 1;
if (a & r)
a += b;
a >>= 1;
}
}
unsigned long gcd3(unsigned long a, unsigned long b)
{
unsigned long r = a | b;
if (!a || !b)
return r;
b >>= __builtin_ctzl(b);
if (b == 1)
return r & -r;
for (;;) {
a >>= __builtin_ctzl(a);
if (a == 1)
return r & -r;
if (a == b)
return a << __builtin_ctzl(r);
if (a < b)
swap(a, b);
a -= b;
}
}
unsigned long gcd4(unsigned long a, unsigned long b)
{
unsigned long r = a | b;
if (!a || !b)
return r;
r &= -r;
while (!(b & r))
b >>= 1;
if (b == r)
return r;
for (;;) {
while (!(a & r))
a >>= 1;
if (a == r)
return r;
if (a == b)
return a;
if (a < b)
swap(a, b);
a -= b;
a >>= 1;
if (a & r)
a += b;
a >>= 1;
}
}
static unsigned long (*gcd_func[])(unsigned long a, unsigned long b) = {
gcd0, gcd1, gcd2, gcd3, gcd4,
};
#define TEST_ENTRIES (sizeof(gcd_func) / sizeof(gcd_func[0]))
#if defined(__x86_64__)
#define rdtscll(val) do { \
unsigned long __a,__d; \
__asm__ __volatile__("rdtsc" : "=a" (__a), "=d" (__d)); \
(val) = ((unsigned long long)__a) | (((unsigned long long)__d)<<32); \
} while(0)
static unsigned long long benchmark_gcd_func(unsigned long (*gcd)(unsigned long, unsigned long),
unsigned long a, unsigned long b, unsigned long *res)
{
unsigned long long start, end;
unsigned long long ret;
unsigned long gcd_res;
rdtscll(start);
gcd_res = gcd(a, b);
rdtscll(end);
if (end >= start)
ret = end - start;
else
ret = ~0ULL - start + 1 + end;
*res = gcd_res;
return ret;
}
#else
static inline struct timespec read_time(void)
{
struct timespec time;
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time);
return time;
}
static inline unsigned long long diff_time(struct timespec start, struct timespec end)
{
struct timespec temp;
if ((end.tv_nsec - start.tv_nsec) < 0) {
temp.tv_sec = end.tv_sec - start.tv_sec - 1;
temp.tv_nsec = 1000000000ULL + end.tv_nsec - start.tv_nsec;
} else {
temp.tv_sec = end.tv_sec - start.tv_sec;
temp.tv_nsec = end.tv_nsec - start.tv_nsec;
}
return temp.tv_sec * 1000000000ULL + temp.tv_nsec;
}
static unsigned long long benchmark_gcd_func(unsigned long (*gcd)(unsigned long, unsigned long),
unsigned long a, unsigned long b, unsigned long *res)
{
struct timespec start, end;
unsigned long gcd_res;
start = read_time();
gcd_res = gcd(a, b);
end = read_time();
*res = gcd_res;
return diff_time(start, end);
}
#endif
static inline unsigned long get_rand()
{
if (sizeof(long) == 8)
return (unsigned long)rand() << 32 | rand();
else
return rand();
}
int main(int argc, char **argv)
{
unsigned int seed = time(0);
int loops = 100;
int repeats = 1000;
unsigned long (*res)[TEST_ENTRIES];
unsigned long long elapsed[TEST_ENTRIES];
int i, j, k;
for (;;) {
int opt = getopt(argc, argv, "n:r:s:");
/* End condition always first */
if (opt == -1)
break;
switch (opt) {
case 'n':
loops = atoi(optarg);
break;
case 'r':
repeats = atoi(optarg);
break;
case 's':
seed = strtoul(optarg, NULL, 10);
break;
default:
/* You won't actually get here. */
break;
}
}
res = malloc(sizeof(unsigned long) * TEST_ENTRIES * loops);
memset(elapsed, 0, sizeof(elapsed));
srand(seed);
for (j = 0; j < loops; j++) {
unsigned long a = get_rand();
/* Do we have args? */
unsigned long b = argc > optind ? strtoul(argv[optind], NULL, 10) : get_rand();
unsigned long long min_elapsed[TEST_ENTRIES];
for (k = 0; k < repeats; k++) {
for (i = 0; i < TEST_ENTRIES; i++) {
unsigned long long tmp = benchmark_gcd_func(gcd_func[i], a, b, &res[j][i]);
if (k == 0 || min_elapsed[i] > tmp)
min_elapsed[i] = tmp;
}
}
for (i = 0; i < TEST_ENTRIES; i++)
elapsed[i] += min_elapsed[i];
}
for (i = 0; i < TEST_ENTRIES; i++)
printf("gcd%d: elapsed %llu\n", i, elapsed[i]);
k = 0;
srand(seed);
for (j = 0; j < loops; j++) {
unsigned long a = get_rand();
unsigned long b = argc > optind ? strtoul(argv[optind], NULL, 10) : get_rand();
for (i = 1; i < TEST_ENTRIES; i++) {
if (res[j][i] != res[j][0])
break;
}
if (i < TEST_ENTRIES) {
if (k == 0) {
k = 1;
fprintf(stderr, "Error:\n");
}
fprintf(stderr, "gcd(%lu, %lu): ", a, b);
for (i = 0; i < TEST_ENTRIES; i++)
fprintf(stderr, "%ld%s", res[j][i], i < TEST_ENTRIES - 1 ? ", " : "\n");
}
}
if (k == 0)
fprintf(stderr, "PASS\n");
free(res);
return 0;
}
Compiled with "-O2", on "VirtualBox 4.4.0-22-generic #38-Ubuntu x86_64" got:
zhaoxiuzeng@zhaoxiuzeng-VirtualBox:~/develop$ ./gcd -r 500000 -n 10
gcd0: elapsed 10174
gcd1: elapsed 2120
gcd2: elapsed 2902
gcd3: elapsed 2039
gcd4: elapsed 2812
PASS
zhaoxiuzeng@zhaoxiuzeng-VirtualBox:~/develop$ ./gcd -r 500000 -n 10
gcd0: elapsed 9309
gcd1: elapsed 2280
gcd2: elapsed 2822
gcd3: elapsed 2217
gcd4: elapsed 2710
PASS
zhaoxiuzeng@zhaoxiuzeng-VirtualBox:~/develop$ ./gcd -r 500000 -n 10
gcd0: elapsed 9589
gcd1: elapsed 2098
gcd2: elapsed 2815
gcd3: elapsed 2030
gcd4: elapsed 2718
PASS
zhaoxiuzeng@zhaoxiuzeng-VirtualBox:~/develop$ ./gcd -r 500000 -n 10
gcd0: elapsed 9914
gcd1: elapsed 2309
gcd2: elapsed 2779
gcd3: elapsed 2228
gcd4: elapsed 2709
PASS
[akpm@linux-foundation.org: avoid #defining a CONFIG_ variable]
Signed-off-by: Zhaoxiu Zeng <zhaoxiu.zeng@gmail.com>
Signed-off-by: George Spelvin <linux@horizon.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-21 03:03:57 +03:00
select CPU_NO_EFFICIENT_FFS
2018-09-04 18:04:07 +03:00
select MMU_GATHER_NO_RANGE if MMU
2017-08-08 17:06:52 +03:00
select SPARSE_IRQ
2009-03-27 16:25:50 +03:00
2017-09-09 02:14:25 +03:00
# Endianness selection
choice
prompt "Endianness selection"
2017-09-18 19:53:29 +03:00
default CPU_LITTLE_ENDIAN
2017-09-09 02:14:25 +03:00
help
microblaze architectures can be configured for either little or
big endian formats. Be sure to select the appropriate mode.
config CPU_BIG_ENDIAN
bool "Big endian"
config CPU_LITTLE_ENDIAN
bool "Little endian"
endchoice
2011-12-15 12:24:06 +04:00
config ZONE_DMA
def_bool y
2009-03-27 16:25:50 +03:00
config ARCH_HAS_ILOG2_U32
def_bool n
config ARCH_HAS_ILOG2_U64
def_bool n
config GENERIC_HWEIGHT
def_bool y
config GENERIC_CALIBRATE_DELAY
def_bool y
2009-06-18 21:55:32 +04:00
config GENERIC_CSUM
def_bool y
2009-11-10 17:57:01 +03:00
config STACKTRACE_SUPPORT
def_bool y
2009-12-10 14:07:02 +03:00
config LOCKDEP_SUPPORT
def_bool y
2014-04-07 15:05:00 +04:00
source "arch/microblaze/Kconfig.platform"
2009-03-27 16:25:50 +03:00
menu "Processor type and features"
source "kernel/Kconfig.hz"
config MMU
2009-05-26 18:30:31 +04:00
bool "MMU support"
default n
2009-03-27 16:25:50 +03:00
comment "Boot options"
config CMDLINE_BOOL
bool "Default bootloader kernel arguments"
config CMDLINE
string "Default kernel command string"
depends on CMDLINE_BOOL
default "console=ttyUL0,115200"
help
On some architectures there is currently no way for the boot loader
to pass arguments to the kernel. For these architectures, you should
supply some command-line options at build time by entering them
here.
config CMDLINE_FORCE
bool "Force default kernel command string"
depends on CMDLINE_BOOL
default n
help
Set this to have arguments from the default kernel command string
override those passed by the boot loader.
2010-08-06 10:50:35 +04:00
config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
depends on PROC_FS
default y
help
This kernel feature is useful for number crunching applications
that may need to compute untrusted bytecode during their
execution. By using pipes or other transports made available to
the process as file descriptors supporting the read/write
syscalls, it's possible to isolate those applications in
their own address space using seccomp. Once seccomp is
enabled via /proc/<pid>/seccomp, it cannot be disabled
and the task is only allowed to execute a few safe syscalls
defined by each seccomp mode.
If unsure, say Y. Only embedded should say N here.
2009-03-27 16:25:50 +03:00
endmenu
2014-09-01 18:23:54 +04:00
menu "Kernel features"
2009-05-26 18:30:31 +04:00
2014-10-27 10:28:16 +03:00
config NR_CPUS
int
default "1"
2009-05-26 18:30:31 +04:00
config ADVANCED_OPTIONS
bool "Prompt for advanced kernel configuration options"
help
This option will enable prompting for a variety of advanced kernel
configuration options. These options can cause the kernel to not
work if they are set incorrectly, but can be used to optimize certain
aspects of kernel memory management.
Unless you know what you are doing, say N here.
comment "Default settings for advanced configuration options are used"
depends on !ADVANCED_OPTIONS
2010-02-22 14:16:08 +03:00
config XILINX_UNCACHED_SHADOW
bool "Are you using uncached shadow for RAM ?"
depends on ADVANCED_OPTIONS && !MMU
default n
help
This is needed to be able to allocate uncachable memory regions.
The feature requires the design to define the RAM memory controller
window to be twice as large as the actual physical memory.
2011-12-15 18:02:37 +04:00
config HIGHMEM
bool "High memory support"
2009-05-26 18:30:31 +04:00
depends on MMU
help
2011-12-15 18:02:37 +04:00
The address space of Microblaze processors is only 4 Gigabytes large
and it has to accommodate user address space, kernel address
space as well as some memory mapped IO. That means that, if you
have a large amount of physical memory and/or IO, not all of the
memory can be "permanently mapped" by the kernel. The physical
memory that is not permanently mapped is called "high memory".
2009-05-26 18:30:31 +04:00
2011-12-15 18:02:37 +04:00
If unsure, say n.
2009-05-26 18:30:31 +04:00
config LOWMEM_SIZE_BOOL
bool "Set maximum low memory"
2010-02-22 13:33:07 +03:00
depends on ADVANCED_OPTIONS && MMU
2009-05-26 18:30:31 +04:00
help
This option allows you to set the maximum amount of memory which
will be used as "low memory", that is, memory which the kernel can
access directly, without having to set up a kernel virtual mapping.
This can be useful in optimizing the layout of kernel virtual
memory.
Say N here unless you know what you are doing.
config LOWMEM_SIZE
hex "Maximum low memory size (in bytes)" if LOWMEM_SIZE_BOOL
default "0x30000000"
2010-11-08 14:37:40 +03:00
config MANUAL_RESET_VECTOR
hex "Microblaze reset vector address setup"
default "0x0"
help
Set this option to have the kernel override the CPU Reset vector.
If zero, no change will be made to the MicroBlaze reset vector at
address 0x0.
If non-zero, a jump instruction to this address, will be written
to the reset vector at address 0x0.
If you are unsure, set it to default value 0x0.
2009-05-26 18:30:31 +04:00
config KERNEL_START_BOOL
bool "Set custom kernel base address"
depends on ADVANCED_OPTIONS
help
This option allows you to set the kernel virtual address at which
the kernel will map low memory (the kernel image will be linked at
this address). This can be useful in optimizing the virtual memory
layout of the system.
Say N here unless you know what you are doing.
config KERNEL_START
hex "Virtual address of kernel base" if KERNEL_START_BOOL
default "0xc0000000" if MMU
default KERNEL_BASE_ADDR if !MMU
config TASK_SIZE_BOOL
bool "Set custom user task size"
2010-02-22 13:33:07 +03:00
depends on ADVANCED_OPTIONS && MMU
2009-05-26 18:30:31 +04:00
help
This option allows you to set the amount of virtual address space
allocated to user tasks. This can be useful in optimizing the
virtual memory layout of the system.
Say N here unless you know what you are doing.
config TASK_SIZE
hex "Size of user task space" if TASK_SIZE_BOOL
default "0x80000000"
2010-05-13 19:48:27 +04:00
choice
prompt "Page size"
default MICROBLAZE_4K_PAGES
depends on ADVANCED_OPTIONS && !MMU
help
Select the kernel logical page size. Increasing the page size
will reduce software overhead at each page boundary, allow
hardware prefetch mechanisms to be more effective, and allow
larger dma transfers increasing IO efficiency and reducing
overhead. However the utilization of memory will increase.
For example, each cached file will using a multiple of the
page size to hold its contents and the difference between the
end of file and the end of page is wasted.
If unsure, choose 4K_PAGES.
config MICROBLAZE_4K_PAGES
bool "4k page size"
config MICROBLAZE_16K_PAGES
bool "16k page size"
2012-08-01 12:29:28 +04:00
config MICROBLAZE_64K_PAGES
bool "64k page size"
2010-05-13 19:48:27 +04:00
endchoice
2009-03-27 16:25:50 +03:00
endmenu
2010-01-18 17:27:10 +03:00
menu "Bus Options"
2010-01-18 17:27:11 +03:00
config PCI_XILINX
bool "Xilinx PCI host bridge support"
depends on PCI
2010-01-18 17:27:10 +03:00
endmenu