mirror of
https://gitlab.com/qemu-project/qemu.git
synced 2024-10-06 21:49:46 +03:00
accel: Introduce AccelClass::cpu_common_[un]realize
accel: Target agnostic code movement accel/tcg: Cleanups to use CPUState instead of CPUArchState accel/tcg: Move CPUNegativeOffsetState into CPUState tcg: Split out tcg init functions to tcg/startup.h linux-user/hppa: Fix struct target_sigcontext layout build: Remove --enable-gprof -----BEGIN PGP SIGNATURE----- iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmUdsL4dHHJpY2hhcmQu aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV/iYggAvDJEyMCAXSSH97BA wZT/2D/MFIhOMk6xrQRnrXfrG70N0iVKz44jl9j7k1D+9BOHcso//DDJH3c96k9A MgDb6W2bsWvC15/Qw6BALf5bb/II0MJuCcQvj3CNX5lNkXAWhwIOBhsZx7V9ST1+ rihN4nowpRWdV5GeCjDGaJW455Y1gc96hICYHy6Eqw1cUgUFt9vm5aYU3FHlat29 sYRaVYKUL2hRUPPNcPiPq0AaJ8wN6/s8gT+V1UvTzkhHqskoM4ZU89RchuXVoq1h SvhKElyULMRzM7thWtpW8qYJPj4mxZsKArESvHjsunGD6KEz3Fh1sy6EKRcdmpG/ II1vkg== =k2Io -----END PGP SIGNATURE----- Merge tag 'pull-tcg-20231004' of https://gitlab.com/rth7680/qemu into staging accel: Introduce AccelClass::cpu_common_[un]realize accel: Target agnostic code movement accel/tcg: Cleanups to use CPUState instead of CPUArchState accel/tcg: Move CPUNegativeOffsetState into CPUState tcg: Split out tcg init functions to tcg/startup.h linux-user/hppa: Fix struct target_sigcontext layout build: Remove --enable-gprof # -----BEGIN PGP SIGNATURE----- # # iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmUdsL4dHHJpY2hhcmQu # aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV/iYggAvDJEyMCAXSSH97BA # wZT/2D/MFIhOMk6xrQRnrXfrG70N0iVKz44jl9j7k1D+9BOHcso//DDJH3c96k9A # MgDb6W2bsWvC15/Qw6BALf5bb/II0MJuCcQvj3CNX5lNkXAWhwIOBhsZx7V9ST1+ # rihN4nowpRWdV5GeCjDGaJW455Y1gc96hICYHy6Eqw1cUgUFt9vm5aYU3FHlat29 # sYRaVYKUL2hRUPPNcPiPq0AaJ8wN6/s8gT+V1UvTzkhHqskoM4ZU89RchuXVoq1h # SvhKElyULMRzM7thWtpW8qYJPj4mxZsKArESvHjsunGD6KEz3Fh1sy6EKRcdmpG/ # II1vkg== # =k2Io # -----END PGP SIGNATURE----- # gpg: Signature made Wed 04 Oct 2023 14:36:46 EDT # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full] # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * tag 'pull-tcg-20231004' of https://gitlab.com/rth7680/qemu: (47 commits) tcg/loongarch64: Fix buid error tests/avocado: Re-enable MIPS Malta tests (GitLab issue #1884 fixed) build: Remove --enable-gprof linux-user/hppa: Fix struct target_sigcontext layout tcg: Split out tcg init functions to tcg/startup.h tcg: Remove argument to tcg_prologue_init accel/tcg: Make cpu-exec-common.c a target agnostic unit accel/tcg: Make icount.o a target agnostic unit accel/tcg: Make monitor.c a target-agnostic unit accel/tcg: Rename target-specific 'internal.h' -> 'internal-target.h' exec: Rename target specific page-vary.c -> page-vary-target.c exec: Rename cpu.c -> cpu-target.c accel: Rename accel-common.c -> accel-target.c accel: Make accel-blocker.o target agnostic accel/tcg: Restrict dump_exec_info() declaration exec: Move cpu_loop_foo() target agnostic functions to 'cpu-common.h' exec: Make EXCP_FOO definitions target agnostic accel/tcg: move ld/st helpers to ldst_common.c.inc accel/tcg: Unify user and softmmu do_[st|ld]*_mmu() accel/tcg: Remove env_tlb() ... Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
commit
800af0aae1
@ -139,8 +139,9 @@ R: Paolo Bonzini <pbonzini@redhat.com>
|
||||
S: Maintained
|
||||
F: softmmu/cpus.c
|
||||
F: softmmu/watchpoint.c
|
||||
F: cpus-common.c
|
||||
F: page-vary.c
|
||||
F: cpu-common.c
|
||||
F: cpu-target.c
|
||||
F: page-vary-target.c
|
||||
F: page-vary-common.c
|
||||
F: accel/tcg/
|
||||
F: accel/stubs/tcg-stub.c
|
||||
@ -1766,7 +1767,6 @@ M: Marcel Apfelbaum <marcel.apfelbaum@gmail.com>
|
||||
R: Philippe Mathieu-Daudé <philmd@linaro.org>
|
||||
R: Yanan Wang <wangyanan55@huawei.com>
|
||||
S: Supported
|
||||
F: cpu.c
|
||||
F: hw/core/cpu.c
|
||||
F: hw/core/machine-qmp-cmds.c
|
||||
F: hw/core/machine.c
|
||||
@ -2906,7 +2906,6 @@ F: softmmu/main.c
|
||||
F: softmmu/cpus.c
|
||||
F: softmmu/cpu-throttle.c
|
||||
F: softmmu/cpu-timers.c
|
||||
F: softmmu/icount.c
|
||||
F: softmmu/runstate*
|
||||
F: qapi/run-state.json
|
||||
|
||||
|
@ -119,16 +119,37 @@ void accel_cpu_instance_init(CPUState *cpu)
|
||||
}
|
||||
}
|
||||
|
||||
bool accel_cpu_realizefn(CPUState *cpu, Error **errp)
|
||||
bool accel_cpu_common_realize(CPUState *cpu, Error **errp)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
AccelState *accel = current_accel();
|
||||
AccelClass *acc = ACCEL_GET_CLASS(accel);
|
||||
|
||||
if (cc->accel_cpu && cc->accel_cpu->cpu_realizefn) {
|
||||
return cc->accel_cpu->cpu_realizefn(cpu, errp);
|
||||
/* target specific realization */
|
||||
if (cc->accel_cpu && cc->accel_cpu->cpu_target_realize
|
||||
&& !cc->accel_cpu->cpu_target_realize(cpu, errp)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* generic realization */
|
||||
if (acc->cpu_common_realize && !acc->cpu_common_realize(cpu, errp)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void accel_cpu_common_unrealize(CPUState *cpu)
|
||||
{
|
||||
AccelState *accel = current_accel();
|
||||
AccelClass *acc = ACCEL_GET_CLASS(accel);
|
||||
|
||||
/* generic unrealization */
|
||||
if (acc->cpu_common_unrealize) {
|
||||
acc->cpu_common_unrealize(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
int accel_supported_gdbstub_sstep_flags(void)
|
||||
{
|
||||
AccelState *accel = current_accel();
|
@ -27,7 +27,7 @@ static void *dummy_cpu_thread_fn(void *arg)
|
||||
qemu_mutex_lock_iothread();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->can_do_io = 1;
|
||||
cpu->neg.can_do_io = true;
|
||||
current_cpu = cpu;
|
||||
|
||||
#ifndef _WIN32
|
||||
|
@ -428,7 +428,7 @@ static void *hvf_cpu_thread_fn(void *arg)
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->can_do_io = 1;
|
||||
cpu->neg.can_do_io = true;
|
||||
current_cpu = cpu;
|
||||
|
||||
hvf_init_vcpu(cpu);
|
||||
|
@ -36,7 +36,7 @@ static void *kvm_vcpu_thread_fn(void *arg)
|
||||
qemu_mutex_lock_iothread();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
cpu->can_do_io = 1;
|
||||
cpu->neg.can_do_io = true;
|
||||
current_cpu = cpu;
|
||||
|
||||
r = kvm_init_vcpu(cpu, &error_fatal);
|
||||
|
@ -1,5 +1,5 @@
|
||||
specific_ss.add(files('accel-common.c', 'accel-blocker.c'))
|
||||
system_ss.add(files('accel-softmmu.c'))
|
||||
specific_ss.add(files('accel-target.c'))
|
||||
system_ss.add(files('accel-softmmu.c', 'accel-blocker.c'))
|
||||
user_ss.add(files('accel-user.c'))
|
||||
|
||||
subdir('tcg')
|
||||
|
@ -73,7 +73,8 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
|
||||
ABI_TYPE cmpv, ABI_TYPE newv,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
|
||||
DATA_SIZE, retaddr);
|
||||
DATA_TYPE ret;
|
||||
|
||||
#if DATA_SIZE == 16
|
||||
@ -90,7 +91,8 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
|
||||
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
|
||||
DATA_SIZE, retaddr);
|
||||
DATA_TYPE ret;
|
||||
|
||||
ret = qatomic_xchg__nocheck(haddr, val);
|
||||
@ -104,7 +106,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
|
||||
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
|
||||
{ \
|
||||
DATA_TYPE *haddr, ret; \
|
||||
haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
|
||||
haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
|
||||
ret = qatomic_##X(haddr, val); \
|
||||
ATOMIC_MMU_CLEANUP; \
|
||||
atomic_trace_rmw_post(env, addr, oi); \
|
||||
@ -135,7 +137,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
|
||||
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
|
||||
{ \
|
||||
XDATA_TYPE *haddr, cmp, old, new, val = xval; \
|
||||
haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
|
||||
haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
|
||||
smp_mb(); \
|
||||
cmp = qatomic_read__nocheck(haddr); \
|
||||
do { \
|
||||
@ -176,7 +178,8 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
|
||||
ABI_TYPE cmpv, ABI_TYPE newv,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
|
||||
DATA_SIZE, retaddr);
|
||||
DATA_TYPE ret;
|
||||
|
||||
#if DATA_SIZE == 16
|
||||
@ -193,7 +196,8 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
|
||||
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr);
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
|
||||
DATA_SIZE, retaddr);
|
||||
ABI_TYPE ret;
|
||||
|
||||
ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
|
||||
@ -207,7 +211,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
|
||||
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
|
||||
{ \
|
||||
DATA_TYPE *haddr, ret; \
|
||||
haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
|
||||
haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
|
||||
ret = qatomic_##X(haddr, BSWAP(val)); \
|
||||
ATOMIC_MMU_CLEANUP; \
|
||||
atomic_trace_rmw_post(env, addr, oi); \
|
||||
@ -235,7 +239,7 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
|
||||
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
|
||||
{ \
|
||||
XDATA_TYPE *haddr, ldo, ldn, old, new, val = xval; \
|
||||
haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, retaddr); \
|
||||
haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
|
||||
smp_mb(); \
|
||||
ldn = qatomic_read__nocheck(haddr); \
|
||||
do { \
|
||||
|
@ -20,9 +20,8 @@
|
||||
#include "qemu/osdep.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "sysemu/tcg.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "qemu/plugin.h"
|
||||
#include "internal.h"
|
||||
#include "internal-common.h"
|
||||
|
||||
bool tcg_allowed;
|
||||
|
||||
@ -36,7 +35,7 @@ void cpu_loop_exit_noexc(CPUState *cpu)
|
||||
void cpu_loop_exit(CPUState *cpu)
|
||||
{
|
||||
/* Undo the setting in cpu_tb_exec. */
|
||||
cpu->can_do_io = 1;
|
||||
cpu->neg.can_do_io = true;
|
||||
/* Undo any setting in generated code. */
|
||||
qemu_plugin_disable_mem_helpers(cpu);
|
||||
siglongjmp(cpu->jmp_env, 1);
|
||||
|
@ -42,7 +42,8 @@
|
||||
#include "tb-jmp-cache.h"
|
||||
#include "tb-hash.h"
|
||||
#include "tb-context.h"
|
||||
#include "internal.h"
|
||||
#include "internal-common.h"
|
||||
#include "internal-target.h"
|
||||
|
||||
/* -icount align implementation. */
|
||||
|
||||
@ -73,7 +74,7 @@ static void align_clocks(SyncClocks *sc, CPUState *cpu)
|
||||
return;
|
||||
}
|
||||
|
||||
cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
|
||||
cpu_icount = cpu->icount_extra + cpu->neg.icount_decr.u16.low;
|
||||
sc->diff_clk += icount_to_ns(sc->last_cpu_icount - cpu_icount);
|
||||
sc->last_cpu_icount = cpu_icount;
|
||||
|
||||
@ -124,7 +125,7 @@ static void init_delay_params(SyncClocks *sc, CPUState *cpu)
|
||||
sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
|
||||
sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
|
||||
sc->last_cpu_icount
|
||||
= cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
|
||||
= cpu->icount_extra + cpu->neg.icount_decr.u16.low;
|
||||
if (sc->diff_clk < max_delay) {
|
||||
max_delay = sc->diff_clk;
|
||||
}
|
||||
@ -222,7 +223,7 @@ static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc,
|
||||
struct tb_desc desc;
|
||||
uint32_t h;
|
||||
|
||||
desc.env = cpu->env_ptr;
|
||||
desc.env = cpu_env(cpu);
|
||||
desc.cs_base = cs_base;
|
||||
desc.flags = flags;
|
||||
desc.cflags = cflags;
|
||||
@ -444,7 +445,7 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
|
||||
static inline TranslationBlock * QEMU_DISABLE_CFI
|
||||
cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
|
||||
{
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
CPUArchState *env = cpu_env(cpu);
|
||||
uintptr_t ret;
|
||||
TranslationBlock *last_tb;
|
||||
const void *tb_ptr = itb->tc.ptr;
|
||||
@ -455,7 +456,7 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
|
||||
|
||||
qemu_thread_jit_execute();
|
||||
ret = tcg_qemu_tb_exec(env, tb_ptr);
|
||||
cpu->can_do_io = 1;
|
||||
cpu->neg.can_do_io = true;
|
||||
qemu_plugin_disable_mem_helpers(cpu);
|
||||
/*
|
||||
* TODO: Delay swapping back to the read-write region of the TB
|
||||
@ -565,7 +566,7 @@ static void cpu_exec_longjmp_cleanup(CPUState *cpu)
|
||||
|
||||
void cpu_exec_step_atomic(CPUState *cpu)
|
||||
{
|
||||
CPUArchState *env = cpu->env_ptr;
|
||||
CPUArchState *env = cpu_env(cpu);
|
||||
TranslationBlock *tb;
|
||||
vaddr pc;
|
||||
uint64_t cs_base;
|
||||
@ -717,7 +718,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
|
||||
if (cpu->exception_index < 0) {
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
if (replay_has_exception()
|
||||
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
|
||||
&& cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0) {
|
||||
/* Execute just one insn to trigger exception pending in the log */
|
||||
cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT)
|
||||
| CF_LAST_IO | CF_NOIRQ | 1;
|
||||
@ -807,7 +808,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
* Ensure zeroing happens before reading cpu->exit_request or
|
||||
* cpu->interrupt_request (see also smp_wmb in cpu_exit())
|
||||
*/
|
||||
qatomic_set_mb(&cpu_neg(cpu)->icount_decr.u16.high, 0);
|
||||
qatomic_set_mb(&cpu->neg.icount_decr.u16.high, 0);
|
||||
|
||||
if (unlikely(qatomic_read(&cpu->interrupt_request))) {
|
||||
int interrupt_request;
|
||||
@ -898,7 +899,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
|
||||
if (unlikely(qatomic_read(&cpu->exit_request))
|
||||
|| (icount_enabled()
|
||||
&& (cpu->cflags_next_tb == -1 || cpu->cflags_next_tb & CF_USE_ICOUNT)
|
||||
&& cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) {
|
||||
&& cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0)) {
|
||||
qatomic_set(&cpu->exit_request, 0);
|
||||
if (cpu->exception_index == -1) {
|
||||
cpu->exception_index = EXCP_INTERRUPT;
|
||||
@ -923,7 +924,7 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
|
||||
}
|
||||
|
||||
*last_tb = NULL;
|
||||
insns_left = qatomic_read(&cpu_neg(cpu)->icount_decr.u32);
|
||||
insns_left = qatomic_read(&cpu->neg.icount_decr.u32);
|
||||
if (insns_left < 0) {
|
||||
/* Something asked us to stop executing chained TBs; just
|
||||
* continue round the main loop. Whatever requested the exit
|
||||
@ -942,7 +943,7 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
|
||||
icount_update(cpu);
|
||||
/* Refill decrementer and continue execution. */
|
||||
insns_left = MIN(0xffff, cpu->icount_budget);
|
||||
cpu_neg(cpu)->icount_decr.u16.low = insns_left;
|
||||
cpu->neg.icount_decr.u16.low = insns_left;
|
||||
cpu->icount_extra = cpu->icount_budget - insns_left;
|
||||
|
||||
/*
|
||||
@ -976,7 +977,7 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
|
||||
uint64_t cs_base;
|
||||
uint32_t flags, cflags;
|
||||
|
||||
cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags);
|
||||
cpu_get_tb_cpu_state(cpu_env(cpu), &pc, &cs_base, &flags);
|
||||
|
||||
/*
|
||||
* When requested, use an exact setting for cflags for the next
|
||||
@ -1088,7 +1089,7 @@ int cpu_exec(CPUState *cpu)
|
||||
return ret;
|
||||
}
|
||||
|
||||
void tcg_exec_realizefn(CPUState *cpu, Error **errp)
|
||||
bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
|
||||
{
|
||||
static bool tcg_target_initialized;
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
@ -1104,6 +1105,8 @@ void tcg_exec_realizefn(CPUState *cpu, Error **errp)
|
||||
tcg_iommu_init_notifier_list(cpu);
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
/* qemu_plugin_vcpu_init_hook delayed until cpu_index assigned. */
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* undo the initializations in reverse order */
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -27,7 +27,6 @@
|
||||
#include "migration/vmstate.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "sysemu/qtest.h"
|
||||
#include "qemu/main-loop.h"
|
||||
@ -38,7 +37,7 @@
|
||||
#include "hw/core/cpu.h"
|
||||
#include "sysemu/cpu-timers.h"
|
||||
#include "sysemu/cpu-throttle.h"
|
||||
#include "timers-state.h"
|
||||
#include "softmmu/timers-state.h"
|
||||
|
||||
/*
|
||||
* ICOUNT: Instruction Counter
|
||||
@ -75,7 +74,7 @@ static void icount_enable_adaptive(void)
|
||||
static int64_t icount_get_executed(CPUState *cpu)
|
||||
{
|
||||
return (cpu->icount_budget -
|
||||
(cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra));
|
||||
(cpu->neg.icount_decr.u16.low + cpu->icount_extra));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -111,7 +110,7 @@ static int64_t icount_get_raw_locked(void)
|
||||
CPUState *cpu = current_cpu;
|
||||
|
||||
if (cpu && cpu->running) {
|
||||
if (!cpu->can_do_io) {
|
||||
if (!cpu->neg.can_do_io) {
|
||||
error_report("Bad icount read");
|
||||
exit(1);
|
||||
}
|
28
accel/tcg/internal-common.h
Normal file
28
accel/tcg/internal-common.h
Normal file
@ -0,0 +1,28 @@
|
||||
/*
|
||||
* Internal execution defines for qemu (target agnostic)
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* SPDX-License-Identifier: LGPL-2.1-or-later
|
||||
*/
|
||||
|
||||
#ifndef ACCEL_TCG_INTERNAL_COMMON_H
|
||||
#define ACCEL_TCG_INTERNAL_COMMON_H
|
||||
|
||||
#include "exec/translation-block.h"
|
||||
|
||||
extern int64_t max_delay;
|
||||
extern int64_t max_advance;
|
||||
|
||||
void dump_exec_info(GString *buf);
|
||||
|
||||
/*
|
||||
* Return true if CS is not running in parallel with other cpus, either
|
||||
* because there are no other cpus or we are within an exclusive context.
|
||||
*/
|
||||
static inline bool cpu_in_serial_context(CPUState *cs)
|
||||
{
|
||||
return !(cs->tcg_cflags & CF_PARALLEL) || cpu_in_exclusive_context(cs);
|
||||
}
|
||||
|
||||
#endif
|
@ -1,13 +1,13 @@
|
||||
/*
|
||||
* Internal execution defines for qemu
|
||||
* Internal execution defines for qemu (target specific)
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* SPDX-License-Identifier: LGPL-2.1-or-later
|
||||
*/
|
||||
|
||||
#ifndef ACCEL_TCG_INTERNAL_H
|
||||
#define ACCEL_TCG_INTERNAL_H
|
||||
#ifndef ACCEL_TCG_INTERNAL_TARGET_H
|
||||
#define ACCEL_TCG_INTERNAL_TARGET_H
|
||||
|
||||
#include "exec/exec-all.h"
|
||||
#include "exec/translate-all.h"
|
||||
@ -80,6 +80,9 @@ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
|
||||
void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
|
||||
uintptr_t host_pc);
|
||||
|
||||
bool tcg_exec_realizefn(CPUState *cpu, Error **errp);
|
||||
void tcg_exec_unrealizefn(CPUState *cpu);
|
||||
|
||||
/* Return the current PC from CPU, which may be cached in TB. */
|
||||
static inline vaddr log_pc(CPUState *cpu, const TranslationBlock *tb)
|
||||
{
|
||||
@ -90,18 +93,6 @@ static inline vaddr log_pc(CPUState *cpu, const TranslationBlock *tb)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Return true if CS is not running in parallel with other cpus, either
|
||||
* because there are no other cpus or we are within an exclusive context.
|
||||
*/
|
||||
static inline bool cpu_in_serial_context(CPUState *cs)
|
||||
{
|
||||
return !(cs->tcg_cflags & CF_PARALLEL) || cpu_in_exclusive_context(cs);
|
||||
}
|
||||
|
||||
extern int64_t max_delay;
|
||||
extern int64_t max_advance;
|
||||
|
||||
extern bool one_insn_per_tb;
|
||||
|
||||
/**
|
@ -26,7 +26,7 @@
|
||||
* If the operation must be split into two operations to be
|
||||
* examined separately for atomicity, return -lg2.
|
||||
*/
|
||||
static int required_atomicity(CPUArchState *env, uintptr_t p, MemOp memop)
|
||||
static int required_atomicity(CPUState *cpu, uintptr_t p, MemOp memop)
|
||||
{
|
||||
MemOp atom = memop & MO_ATOM_MASK;
|
||||
MemOp size = memop & MO_SIZE;
|
||||
@ -93,7 +93,7 @@ static int required_atomicity(CPUArchState *env, uintptr_t p, MemOp memop)
|
||||
* host atomicity in order to avoid racing. This reduction
|
||||
* avoids looping with cpu_loop_exit_atomic.
|
||||
*/
|
||||
if (cpu_in_serial_context(env_cpu(env))) {
|
||||
if (cpu_in_serial_context(cpu)) {
|
||||
return MO_8;
|
||||
}
|
||||
return atmax;
|
||||
@ -139,14 +139,14 @@ static inline uint64_t load_atomic8(void *pv)
|
||||
|
||||
/**
|
||||
* load_atomic8_or_exit:
|
||||
* @env: cpu context
|
||||
* @cpu: generic cpu state
|
||||
* @ra: host unwind address
|
||||
* @pv: host address
|
||||
*
|
||||
* Atomically load 8 aligned bytes from @pv.
|
||||
* If this is not possible, longjmp out to restart serially.
|
||||
*/
|
||||
static uint64_t load_atomic8_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
|
||||
static uint64_t load_atomic8_or_exit(CPUState *cpu, uintptr_t ra, void *pv)
|
||||
{
|
||||
if (HAVE_al8) {
|
||||
return load_atomic8(pv);
|
||||
@ -168,19 +168,19 @@ static uint64_t load_atomic8_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
|
||||
#endif
|
||||
|
||||
/* Ultimate fallback: re-execute in serial context. */
|
||||
cpu_loop_exit_atomic(env_cpu(env), ra);
|
||||
cpu_loop_exit_atomic(cpu, ra);
|
||||
}
|
||||
|
||||
/**
|
||||
* load_atomic16_or_exit:
|
||||
* @env: cpu context
|
||||
* @cpu: generic cpu state
|
||||
* @ra: host unwind address
|
||||
* @pv: host address
|
||||
*
|
||||
* Atomically load 16 aligned bytes from @pv.
|
||||
* If this is not possible, longjmp out to restart serially.
|
||||
*/
|
||||
static Int128 load_atomic16_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
|
||||
static Int128 load_atomic16_or_exit(CPUState *cpu, uintptr_t ra, void *pv)
|
||||
{
|
||||
Int128 *p = __builtin_assume_aligned(pv, 16);
|
||||
|
||||
@ -212,7 +212,7 @@ static Int128 load_atomic16_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
|
||||
}
|
||||
|
||||
/* Ultimate fallback: re-execute in serial context. */
|
||||
cpu_loop_exit_atomic(env_cpu(env), ra);
|
||||
cpu_loop_exit_atomic(cpu, ra);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -263,7 +263,7 @@ static uint64_t load_atom_extract_al8x2(void *pv)
|
||||
|
||||
/**
|
||||
* load_atom_extract_al8_or_exit:
|
||||
* @env: cpu context
|
||||
* @cpu: generic cpu state
|
||||
* @ra: host unwind address
|
||||
* @pv: host address
|
||||
* @s: object size in bytes, @s <= 4.
|
||||
@ -273,7 +273,7 @@ static uint64_t load_atom_extract_al8x2(void *pv)
|
||||
* 8-byte load and extract.
|
||||
* The value is returned in the low bits of a uint32_t.
|
||||
*/
|
||||
static uint32_t load_atom_extract_al8_or_exit(CPUArchState *env, uintptr_t ra,
|
||||
static uint32_t load_atom_extract_al8_or_exit(CPUState *cpu, uintptr_t ra,
|
||||
void *pv, int s)
|
||||
{
|
||||
uintptr_t pi = (uintptr_t)pv;
|
||||
@ -281,12 +281,12 @@ static uint32_t load_atom_extract_al8_or_exit(CPUArchState *env, uintptr_t ra,
|
||||
int shr = (HOST_BIG_ENDIAN ? 8 - s - o : o) * 8;
|
||||
|
||||
pv = (void *)(pi & ~7);
|
||||
return load_atomic8_or_exit(env, ra, pv) >> shr;
|
||||
return load_atomic8_or_exit(cpu, ra, pv) >> shr;
|
||||
}
|
||||
|
||||
/**
|
||||
* load_atom_extract_al16_or_exit:
|
||||
* @env: cpu context
|
||||
* @cpu: generic cpu state
|
||||
* @ra: host unwind address
|
||||
* @p: host address
|
||||
* @s: object size in bytes, @s <= 8.
|
||||
@ -299,7 +299,7 @@ static uint32_t load_atom_extract_al8_or_exit(CPUArchState *env, uintptr_t ra,
|
||||
*
|
||||
* If this is not possible, longjmp out to restart serially.
|
||||
*/
|
||||
static uint64_t load_atom_extract_al16_or_exit(CPUArchState *env, uintptr_t ra,
|
||||
static uint64_t load_atom_extract_al16_or_exit(CPUState *cpu, uintptr_t ra,
|
||||
void *pv, int s)
|
||||
{
|
||||
uintptr_t pi = (uintptr_t)pv;
|
||||
@ -312,7 +312,7 @@ static uint64_t load_atom_extract_al16_or_exit(CPUArchState *env, uintptr_t ra,
|
||||
* Provoke SIGBUS if possible otherwise.
|
||||
*/
|
||||
pv = (void *)(pi & ~7);
|
||||
r = load_atomic16_or_exit(env, ra, pv);
|
||||
r = load_atomic16_or_exit(cpu, ra, pv);
|
||||
|
||||
r = int128_urshift(r, shr);
|
||||
return int128_getlo(r);
|
||||
@ -394,7 +394,7 @@ static inline uint64_t load_atom_8_by_8_or_4(void *pv)
|
||||
*
|
||||
* Load 2 bytes from @p, honoring the atomicity of @memop.
|
||||
*/
|
||||
static uint16_t load_atom_2(CPUArchState *env, uintptr_t ra,
|
||||
static uint16_t load_atom_2(CPUState *cpu, uintptr_t ra,
|
||||
void *pv, MemOp memop)
|
||||
{
|
||||
uintptr_t pi = (uintptr_t)pv;
|
||||
@ -410,7 +410,7 @@ static uint16_t load_atom_2(CPUArchState *env, uintptr_t ra,
|
||||
}
|
||||
}
|
||||
|
||||
atmax = required_atomicity(env, pi, memop);
|
||||
atmax = required_atomicity(cpu, pi, memop);
|
||||
switch (atmax) {
|
||||
case MO_8:
|
||||
return lduw_he_p(pv);
|
||||
@ -421,9 +421,9 @@ static uint16_t load_atom_2(CPUArchState *env, uintptr_t ra,
|
||||
return load_atomic4(pv - 1) >> 8;
|
||||
}
|
||||
if ((pi & 15) != 7) {
|
||||
return load_atom_extract_al8_or_exit(env, ra, pv, 2);
|
||||
return load_atom_extract_al8_or_exit(cpu, ra, pv, 2);
|
||||
}
|
||||
return load_atom_extract_al16_or_exit(env, ra, pv, 2);
|
||||
return load_atom_extract_al16_or_exit(cpu, ra, pv, 2);
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
@ -436,7 +436,7 @@ static uint16_t load_atom_2(CPUArchState *env, uintptr_t ra,
|
||||
*
|
||||
* Load 4 bytes from @p, honoring the atomicity of @memop.
|
||||
*/
|
||||
static uint32_t load_atom_4(CPUArchState *env, uintptr_t ra,
|
||||
static uint32_t load_atom_4(CPUState *cpu, uintptr_t ra,
|
||||
void *pv, MemOp memop)
|
||||
{
|
||||
uintptr_t pi = (uintptr_t)pv;
|
||||
@ -452,7 +452,7 @@ static uint32_t load_atom_4(CPUArchState *env, uintptr_t ra,
|
||||
}
|
||||
}
|
||||
|
||||
atmax = required_atomicity(env, pi, memop);
|
||||
atmax = required_atomicity(cpu, pi, memop);
|
||||
switch (atmax) {
|
||||
case MO_8:
|
||||
case MO_16:
|
||||
@ -466,9 +466,9 @@ static uint32_t load_atom_4(CPUArchState *env, uintptr_t ra,
|
||||
return load_atom_extract_al4x2(pv);
|
||||
case MO_32:
|
||||
if (!(pi & 4)) {
|
||||
return load_atom_extract_al8_or_exit(env, ra, pv, 4);
|
||||
return load_atom_extract_al8_or_exit(cpu, ra, pv, 4);
|
||||
}
|
||||
return load_atom_extract_al16_or_exit(env, ra, pv, 4);
|
||||
return load_atom_extract_al16_or_exit(cpu, ra, pv, 4);
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
@ -481,7 +481,7 @@ static uint32_t load_atom_4(CPUArchState *env, uintptr_t ra,
|
||||
*
|
||||
* Load 8 bytes from @p, honoring the atomicity of @memop.
|
||||
*/
|
||||
static uint64_t load_atom_8(CPUArchState *env, uintptr_t ra,
|
||||
static uint64_t load_atom_8(CPUState *cpu, uintptr_t ra,
|
||||
void *pv, MemOp memop)
|
||||
{
|
||||
uintptr_t pi = (uintptr_t)pv;
|
||||
@ -498,12 +498,12 @@ static uint64_t load_atom_8(CPUArchState *env, uintptr_t ra,
|
||||
return load_atom_extract_al16_or_al8(pv, 8);
|
||||
}
|
||||
|
||||
atmax = required_atomicity(env, pi, memop);
|
||||
atmax = required_atomicity(cpu, pi, memop);
|
||||
if (atmax == MO_64) {
|
||||
if (!HAVE_al8 && (pi & 7) == 0) {
|
||||
load_atomic8_or_exit(env, ra, pv);
|
||||
load_atomic8_or_exit(cpu, ra, pv);
|
||||
}
|
||||
return load_atom_extract_al16_or_exit(env, ra, pv, 8);
|
||||
return load_atom_extract_al16_or_exit(cpu, ra, pv, 8);
|
||||
}
|
||||
if (HAVE_al8_fast) {
|
||||
return load_atom_extract_al8x2(pv);
|
||||
@ -519,7 +519,7 @@ static uint64_t load_atom_8(CPUArchState *env, uintptr_t ra,
|
||||
if (HAVE_al8) {
|
||||
return load_atom_extract_al8x2(pv);
|
||||
}
|
||||
cpu_loop_exit_atomic(env_cpu(env), ra);
|
||||
cpu_loop_exit_atomic(cpu, ra);
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
@ -532,7 +532,7 @@ static uint64_t load_atom_8(CPUArchState *env, uintptr_t ra,
|
||||
*
|
||||
* Load 16 bytes from @p, honoring the atomicity of @memop.
|
||||
*/
|
||||
static Int128 load_atom_16(CPUArchState *env, uintptr_t ra,
|
||||
static Int128 load_atom_16(CPUState *cpu, uintptr_t ra,
|
||||
void *pv, MemOp memop)
|
||||
{
|
||||
uintptr_t pi = (uintptr_t)pv;
|
||||
@ -548,7 +548,7 @@ static Int128 load_atom_16(CPUArchState *env, uintptr_t ra,
|
||||
return atomic16_read_ro(pv);
|
||||
}
|
||||
|
||||
atmax = required_atomicity(env, pi, memop);
|
||||
atmax = required_atomicity(cpu, pi, memop);
|
||||
switch (atmax) {
|
||||
case MO_8:
|
||||
memcpy(&r, pv, 16);
|
||||
@ -563,20 +563,20 @@ static Int128 load_atom_16(CPUArchState *env, uintptr_t ra,
|
||||
break;
|
||||
case MO_64:
|
||||
if (!HAVE_al8) {
|
||||
cpu_loop_exit_atomic(env_cpu(env), ra);
|
||||
cpu_loop_exit_atomic(cpu, ra);
|
||||
}
|
||||
a = load_atomic8(pv);
|
||||
b = load_atomic8(pv + 8);
|
||||
break;
|
||||
case -MO_64:
|
||||
if (!HAVE_al8) {
|
||||
cpu_loop_exit_atomic(env_cpu(env), ra);
|
||||
cpu_loop_exit_atomic(cpu, ra);
|
||||
}
|
||||
a = load_atom_extract_al8x2(pv);
|
||||
b = load_atom_extract_al8x2(pv + 8);
|
||||
break;
|
||||
case MO_128:
|
||||
return load_atomic16_or_exit(env, ra, pv);
|
||||
return load_atomic16_or_exit(cpu, ra, pv);
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
@ -857,7 +857,7 @@ static uint64_t store_whole_le16(void *pv, int size, Int128 val_le)
|
||||
*
|
||||
* Store 2 bytes to @p, honoring the atomicity of @memop.
|
||||
*/
|
||||
static void store_atom_2(CPUArchState *env, uintptr_t ra,
|
||||
static void store_atom_2(CPUState *cpu, uintptr_t ra,
|
||||
void *pv, MemOp memop, uint16_t val)
|
||||
{
|
||||
uintptr_t pi = (uintptr_t)pv;
|
||||
@ -868,7 +868,7 @@ static void store_atom_2(CPUArchState *env, uintptr_t ra,
|
||||
return;
|
||||
}
|
||||
|
||||
atmax = required_atomicity(env, pi, memop);
|
||||
atmax = required_atomicity(cpu, pi, memop);
|
||||
if (atmax == MO_8) {
|
||||
stw_he_p(pv, val);
|
||||
return;
|
||||
@ -897,7 +897,7 @@ static void store_atom_2(CPUArchState *env, uintptr_t ra,
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
cpu_loop_exit_atomic(env_cpu(env), ra);
|
||||
cpu_loop_exit_atomic(cpu, ra);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -908,7 +908,7 @@ static void store_atom_2(CPUArchState *env, uintptr_t ra,
|
||||
*
|
||||
* Store 4 bytes to @p, honoring the atomicity of @memop.
|
||||
*/
|
||||
static void store_atom_4(CPUArchState *env, uintptr_t ra,
|
||||
static void store_atom_4(CPUState *cpu, uintptr_t ra,
|
||||
void *pv, MemOp memop, uint32_t val)
|
||||
{
|
||||
uintptr_t pi = (uintptr_t)pv;
|
||||
@ -919,7 +919,7 @@ static void store_atom_4(CPUArchState *env, uintptr_t ra,
|
||||
return;
|
||||
}
|
||||
|
||||
atmax = required_atomicity(env, pi, memop);
|
||||
atmax = required_atomicity(cpu, pi, memop);
|
||||
switch (atmax) {
|
||||
case MO_8:
|
||||
stl_he_p(pv, val);
|
||||
@ -961,7 +961,7 @@ static void store_atom_4(CPUArchState *env, uintptr_t ra,
|
||||
return;
|
||||
}
|
||||
}
|
||||
cpu_loop_exit_atomic(env_cpu(env), ra);
|
||||
cpu_loop_exit_atomic(cpu, ra);
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
@ -975,7 +975,7 @@ static void store_atom_4(CPUArchState *env, uintptr_t ra,
|
||||
*
|
||||
* Store 8 bytes to @p, honoring the atomicity of @memop.
|
||||
*/
|
||||
static void store_atom_8(CPUArchState *env, uintptr_t ra,
|
||||
static void store_atom_8(CPUState *cpu, uintptr_t ra,
|
||||
void *pv, MemOp memop, uint64_t val)
|
||||
{
|
||||
uintptr_t pi = (uintptr_t)pv;
|
||||
@ -986,7 +986,7 @@ static void store_atom_8(CPUArchState *env, uintptr_t ra,
|
||||
return;
|
||||
}
|
||||
|
||||
atmax = required_atomicity(env, pi, memop);
|
||||
atmax = required_atomicity(cpu, pi, memop);
|
||||
switch (atmax) {
|
||||
case MO_8:
|
||||
stq_he_p(pv, val);
|
||||
@ -1029,7 +1029,7 @@ static void store_atom_8(CPUArchState *env, uintptr_t ra,
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
cpu_loop_exit_atomic(env_cpu(env), ra);
|
||||
cpu_loop_exit_atomic(cpu, ra);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1040,7 +1040,7 @@ static void store_atom_8(CPUArchState *env, uintptr_t ra,
|
||||
*
|
||||
* Store 16 bytes to @p, honoring the atomicity of @memop.
|
||||
*/
|
||||
static void store_atom_16(CPUArchState *env, uintptr_t ra,
|
||||
static void store_atom_16(CPUState *cpu, uintptr_t ra,
|
||||
void *pv, MemOp memop, Int128 val)
|
||||
{
|
||||
uintptr_t pi = (uintptr_t)pv;
|
||||
@ -1052,7 +1052,7 @@ static void store_atom_16(CPUArchState *env, uintptr_t ra,
|
||||
return;
|
||||
}
|
||||
|
||||
atmax = required_atomicity(env, pi, memop);
|
||||
atmax = required_atomicity(cpu, pi, memop);
|
||||
|
||||
a = HOST_BIG_ENDIAN ? int128_gethi(val) : int128_getlo(val);
|
||||
b = HOST_BIG_ENDIAN ? int128_getlo(val) : int128_gethi(val);
|
||||
@ -1111,5 +1111,5 @@ static void store_atom_16(CPUArchState *env, uintptr_t ra,
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
cpu_loop_exit_atomic(env_cpu(env), ra);
|
||||
cpu_loop_exit_atomic(cpu, ra);
|
||||
}
|
||||
|
@ -8,6 +8,231 @@
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
/*
|
||||
* Load helpers for tcg-ldst.h
|
||||
*/
|
||||
|
||||
tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
|
||||
return do_ld1_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
|
||||
}
|
||||
|
||||
tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
|
||||
return do_ld2_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
|
||||
}
|
||||
|
||||
tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
|
||||
return do_ld4_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
|
||||
}
|
||||
|
||||
uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
|
||||
return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
|
||||
}
|
||||
|
||||
/*
|
||||
* Provide signed versions of the load routines as well. We can of course
|
||||
* avoid this for 64-bit data, or for 32-bit data on 32-bit host.
|
||||
*/
|
||||
|
||||
tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return (int8_t)helper_ldub_mmu(env, addr, oi, retaddr);
|
||||
}
|
||||
|
||||
tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return (int16_t)helper_lduw_mmu(env, addr, oi, retaddr);
|
||||
}
|
||||
|
||||
tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return (int32_t)helper_ldul_mmu(env, addr, oi, retaddr);
|
||||
}
|
||||
|
||||
Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
|
||||
return do_ld16_mmu(env_cpu(env), addr, oi, retaddr);
|
||||
}
|
||||
|
||||
Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, uint32_t oi)
|
||||
{
|
||||
return helper_ld16_mmu(env, addr, oi, GETPC());
|
||||
}
|
||||
|
||||
/*
|
||||
* Store helpers for tcg-ldst.h
|
||||
*/
|
||||
|
||||
void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
|
||||
do_st1_mmu(env_cpu(env), addr, val, oi, ra);
|
||||
}
|
||||
|
||||
void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
|
||||
do_st2_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
}
|
||||
|
||||
void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
|
||||
do_st4_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
}
|
||||
|
||||
void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
|
||||
do_st8_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
}
|
||||
|
||||
void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
|
||||
do_st16_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
}
|
||||
|
||||
void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
|
||||
{
|
||||
helper_st16_mmu(env, addr, val, oi, GETPC());
|
||||
}
|
||||
|
||||
/*
|
||||
* Load helpers for cpu_ldst.h
|
||||
*/
|
||||
|
||||
static void plugin_load_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
|
||||
{
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
|
||||
}
|
||||
|
||||
uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
uint8_t ret;
|
||||
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_UB);
|
||||
ret = do_ld1_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
|
||||
plugin_load_cb(env, addr, oi);
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
uint16_t ret;
|
||||
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
|
||||
ret = do_ld2_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
|
||||
plugin_load_cb(env, addr, oi);
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
uint32_t ret;
|
||||
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
|
||||
ret = do_ld4_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
|
||||
plugin_load_cb(env, addr, oi);
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
uint64_t ret;
|
||||
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
|
||||
ret = do_ld8_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
|
||||
plugin_load_cb(env, addr, oi);
|
||||
return ret;
|
||||
}
|
||||
|
||||
Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
Int128 ret;
|
||||
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
|
||||
ret = do_ld16_mmu(env_cpu(env), addr, oi, ra);
|
||||
plugin_load_cb(env, addr, oi);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Store helpers for cpu_ldst.h
|
||||
*/
|
||||
|
||||
static void plugin_store_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
|
||||
{
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
|
||||
}
|
||||
|
||||
void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
helper_stb_mmu(env, addr, val, oi, retaddr);
|
||||
plugin_store_cb(env, addr, oi);
|
||||
}
|
||||
|
||||
void cpu_stw_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
|
||||
do_st2_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
plugin_store_cb(env, addr, oi);
|
||||
}
|
||||
|
||||
void cpu_stl_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
|
||||
do_st4_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
plugin_store_cb(env, addr, oi);
|
||||
}
|
||||
|
||||
void cpu_stq_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
|
||||
do_st8_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
plugin_store_cb(env, addr, oi);
|
||||
}
|
||||
|
||||
void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
|
||||
do_st16_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
plugin_store_cb(env, addr, oi);
|
||||
}
|
||||
|
||||
/*
|
||||
* Wrappers of the above
|
||||
*/
|
||||
|
||||
uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
|
@ -1,7 +1,9 @@
|
||||
tcg_ss = ss.source_set()
|
||||
common_ss.add(when: 'CONFIG_TCG', if_true: files(
|
||||
'cpu-exec-common.c',
|
||||
))
|
||||
tcg_ss.add(files(
|
||||
'tcg-all.c',
|
||||
'cpu-exec-common.c',
|
||||
'cpu-exec.c',
|
||||
'tb-maint.c',
|
||||
'tcg-runtime-gvec.c',
|
||||
@ -20,6 +22,10 @@ specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss)
|
||||
|
||||
specific_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files(
|
||||
'cputlb.c',
|
||||
))
|
||||
|
||||
system_ss.add(when: ['CONFIG_TCG'], if_true: files(
|
||||
'icount-common.c',
|
||||
'monitor.c',
|
||||
))
|
||||
|
||||
|
@ -16,7 +16,7 @@
|
||||
#include "sysemu/cpu-timers.h"
|
||||
#include "sysemu/tcg.h"
|
||||
#include "tcg/tcg.h"
|
||||
#include "internal.h"
|
||||
#include "internal-common.h"
|
||||
|
||||
|
||||
static void dump_drift_info(GString *buf)
|
||||
|
@ -104,7 +104,7 @@ static void gen_empty_udata_cb(void)
|
||||
TCGv_ptr udata = tcg_temp_ebb_new_ptr();
|
||||
|
||||
tcg_gen_movi_ptr(udata, 0);
|
||||
tcg_gen_ld_i32(cpu_index, cpu_env,
|
||||
tcg_gen_ld_i32(cpu_index, tcg_env,
|
||||
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
|
||||
gen_helper_plugin_vcpu_udata_cb(cpu_index, udata);
|
||||
|
||||
@ -138,7 +138,7 @@ static void gen_empty_mem_cb(TCGv_i64 addr, uint32_t info)
|
||||
|
||||
tcg_gen_movi_i32(meminfo, info);
|
||||
tcg_gen_movi_ptr(udata, 0);
|
||||
tcg_gen_ld_i32(cpu_index, cpu_env,
|
||||
tcg_gen_ld_i32(cpu_index, tcg_env,
|
||||
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
|
||||
|
||||
gen_helper_plugin_vcpu_mem_cb(cpu_index, meminfo, addr, udata);
|
||||
@ -157,7 +157,7 @@ static void gen_empty_mem_helper(void)
|
||||
TCGv_ptr ptr = tcg_temp_ebb_new_ptr();
|
||||
|
||||
tcg_gen_movi_ptr(ptr, 0);
|
||||
tcg_gen_st_ptr(ptr, cpu_env, offsetof(CPUState, plugin_mem_cbs) -
|
||||
tcg_gen_st_ptr(ptr, tcg_env, offsetof(CPUState, plugin_mem_cbs) -
|
||||
offsetof(ArchCPU, env));
|
||||
tcg_temp_free_ptr(ptr);
|
||||
}
|
||||
@ -581,7 +581,7 @@ void plugin_gen_disable_mem_helpers(void)
|
||||
if (!tcg_ctx->plugin_tb->mem_helper) {
|
||||
return;
|
||||
}
|
||||
tcg_gen_st_ptr(tcg_constant_ptr(NULL), cpu_env,
|
||||
tcg_gen_st_ptr(tcg_constant_ptr(NULL), tcg_env,
|
||||
offsetof(CPUState, plugin_mem_cbs) - offsetof(ArchCPU, env));
|
||||
}
|
||||
|
||||
@ -849,7 +849,7 @@ void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
|
||||
} else {
|
||||
if (ptb->vaddr2 == -1) {
|
||||
ptb->vaddr2 = TARGET_PAGE_ALIGN(db->pc_first);
|
||||
get_page_addr_code_hostp(cpu->env_ptr, ptb->vaddr2, &ptb->haddr2);
|
||||
get_page_addr_code_hostp(cpu_env(cpu), ptb->vaddr2, &ptb->haddr2);
|
||||
}
|
||||
pinsn->haddr = ptb->haddr2 + pinsn->vaddr - ptb->vaddr2;
|
||||
}
|
||||
|
@ -29,7 +29,8 @@
|
||||
#include "tcg/tcg.h"
|
||||
#include "tb-hash.h"
|
||||
#include "tb-context.h"
|
||||
#include "internal.h"
|
||||
#include "internal-common.h"
|
||||
#include "internal-target.h"
|
||||
|
||||
|
||||
/* List iterators for lists of tagged pointers in TranslationBlock. */
|
||||
|
@ -111,14 +111,14 @@ void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget)
|
||||
* each vCPU execution. However u16.high can be raised
|
||||
* asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
|
||||
*/
|
||||
g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0);
|
||||
g_assert(cpu->neg.icount_decr.u16.low == 0);
|
||||
g_assert(cpu->icount_extra == 0);
|
||||
|
||||
replay_mutex_lock();
|
||||
|
||||
cpu->icount_budget = MIN(icount_get_limit(), cpu_budget);
|
||||
insns_left = MIN(0xffff, cpu->icount_budget);
|
||||
cpu_neg(cpu)->icount_decr.u16.low = insns_left;
|
||||
cpu->neg.icount_decr.u16.low = insns_left;
|
||||
cpu->icount_extra = cpu->icount_budget - insns_left;
|
||||
|
||||
if (cpu->icount_budget == 0) {
|
||||
@ -138,7 +138,7 @@ void icount_process_data(CPUState *cpu)
|
||||
icount_update(cpu);
|
||||
|
||||
/* Reset the counters */
|
||||
cpu_neg(cpu)->icount_decr.u16.low = 0;
|
||||
cpu->neg.icount_decr.u16.low = 0;
|
||||
cpu->icount_extra = 0;
|
||||
cpu->icount_budget = 0;
|
||||
|
||||
@ -153,7 +153,7 @@ void icount_handle_interrupt(CPUState *cpu, int mask)
|
||||
|
||||
tcg_handle_interrupt(cpu, mask);
|
||||
if (qemu_cpu_is_self(cpu) &&
|
||||
!cpu->can_do_io
|
||||
!cpu->neg.can_do_io
|
||||
&& (mask & ~old_mask) != 0) {
|
||||
cpu_abort(cpu, "Raised interrupt while not in I/O function");
|
||||
}
|
||||
|