2016-08-28 04:45:14 +03:00
/*
* CPU thread main loop - common bits for user and system mode emulation
*
* Copyright ( c ) 2003 - 2005 Fabrice Bellard
*
* This library is free software ; you can redistribute it and / or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation ; either
2020-10-23 15:33:53 +03:00
* version 2.1 of the License , or ( at your option ) any later version .
2016-08-28 04:45:14 +03:00
*
* This library is distributed in the hope that it will be useful ,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE . See the GNU
* Lesser General Public License for more details .
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library ; if not , see < http : //www.gnu.org/licenses/>.
*/
# include "qemu/osdep.h"
2016-08-28 06:38:24 +03:00
# include "qemu/main-loop.h"
2016-08-28 04:45:14 +03:00
# include "exec/cpu-common.h"
2019-07-09 18:20:52 +03:00
# include "hw/core/cpu.h"
2016-08-28 04:45:14 +03:00
# include "sysemu/cpus.h"
2020-04-04 07:21:08 +03:00
# include "qemu/lockable.h"
2022-11-24 18:36:49 +03:00
# include "trace/trace-root.h"
2016-08-28 04:45:14 +03:00
2023-04-27 05:09:24 +03:00
QemuMutex qemu_cpu_list_lock ;
2016-08-31 17:56:04 +03:00
static QemuCond exclusive_cond ;
static QemuCond exclusive_resume ;
2016-08-29 10:51:00 +03:00
static QemuCond qemu_work_cond ;
2016-08-28 04:45:14 +03:00
2016-08-31 22:33:58 +03:00
/* >= 1 if a thread is inside start_exclusive/end_exclusive. Written
* under qemu_cpu_list_lock , read with atomic operations .
*/
2016-08-31 17:56:04 +03:00
static int pending_cpus ;
2016-08-28 04:45:14 +03:00
void qemu_init_cpu_list ( void )
{
2016-08-31 17:56:04 +03:00
/* This is needed because qemu_init_cpu_list is also called by the
* child process in a fork . */
pending_cpus = 0 ;
2016-08-28 04:45:14 +03:00
qemu_mutex_init ( & qemu_cpu_list_lock ) ;
2016-08-31 17:56:04 +03:00
qemu_cond_init ( & exclusive_cond ) ;
qemu_cond_init ( & exclusive_resume ) ;
2016-08-29 10:51:00 +03:00
qemu_cond_init ( & qemu_work_cond ) ;
2016-08-28 04:45:14 +03:00
}
void cpu_list_lock ( void )
{
qemu_mutex_lock ( & qemu_cpu_list_lock ) ;
}
void cpu_list_unlock ( void )
{
qemu_mutex_unlock ( & qemu_cpu_list_lock ) ;
}
static bool cpu_index_auto_assigned ;
static int cpu_get_free_index ( void )
{
CPUState * some_cpu ;
2020-05-20 17:05:38 +03:00
int max_cpu_index = 0 ;
2016-08-28 04:45:14 +03:00
cpu_index_auto_assigned = true ;
CPU_FOREACH ( some_cpu ) {
2020-05-20 17:05:38 +03:00
if ( some_cpu - > cpu_index > = max_cpu_index ) {
max_cpu_index = some_cpu - > cpu_index + 1 ;
}
2016-08-28 04:45:14 +03:00
}
2020-05-20 17:05:38 +03:00
return max_cpu_index ;
2016-08-28 04:45:14 +03:00
}
2023-10-09 10:02:04 +03:00
CPUTailQ cpus_queue = QTAILQ_HEAD_INITIALIZER ( cpus_queue ) ;
2022-06-25 20:38:31 +03:00
static unsigned int cpu_list_generation_id ;
unsigned int cpu_list_generation_id_get ( void )
{
return cpu_list_generation_id ;
}
2020-07-02 13:40:17 +03:00
2016-08-28 04:45:14 +03:00
void cpu_list_add ( CPUState * cpu )
{
2020-04-04 07:21:08 +03:00
QEMU_LOCK_GUARD ( & qemu_cpu_list_lock ) ;
2016-08-28 04:45:14 +03:00
if ( cpu - > cpu_index = = UNASSIGNED_CPU_INDEX ) {
cpu - > cpu_index = cpu_get_free_index ( ) ;
assert ( cpu - > cpu_index ! = UNASSIGNED_CPU_INDEX ) ;
} else {
assert ( ! cpu_index_auto_assigned ) ;
}
2023-10-09 10:02:04 +03:00
QTAILQ_INSERT_TAIL_RCU ( & cpus_queue , cpu , node ) ;
2022-06-25 20:38:31 +03:00
cpu_list_generation_id + + ;
2016-08-28 04:45:14 +03:00
}
void cpu_list_remove ( CPUState * cpu )
{
2020-04-04 07:21:08 +03:00
QEMU_LOCK_GUARD ( & qemu_cpu_list_lock ) ;
2016-08-28 04:45:14 +03:00
if ( ! QTAILQ_IN_USE ( cpu , node ) ) {
/* there is nothing to undo since cpu_exec_init() hasn't been called */
return ;
}
2023-10-09 10:02:04 +03:00
QTAILQ_REMOVE_RCU ( & cpus_queue , cpu , node ) ;
2016-08-28 04:45:14 +03:00
cpu - > cpu_index = UNASSIGNED_CPU_INDEX ;
2022-06-25 20:38:31 +03:00
cpu_list_generation_id + + ;
2016-08-28 04:45:14 +03:00
}
2016-08-29 10:51:00 +03:00
2020-07-02 13:40:17 +03:00
CPUState * qemu_get_cpu ( int index )
{
CPUState * cpu ;
CPU_FOREACH ( cpu ) {
if ( cpu - > cpu_index = = index ) {
return cpu ;
}
}
return NULL ;
}
/* current CPU in the current thread. It is only valid inside cpu_exec() */
__thread CPUState * current_cpu ;
2016-08-29 10:51:00 +03:00
struct qemu_work_item {
2020-06-12 22:02:24 +03:00
QSIMPLEQ_ENTRY ( qemu_work_item ) node ;
2016-08-29 10:51:00 +03:00
run_on_cpu_func func ;
2016-10-31 12:36:08 +03:00
run_on_cpu_data data ;
2016-08-28 06:38:24 +03:00
bool free , exclusive , done ;
2016-08-29 10:51:00 +03:00
} ;
static void queue_work_on_cpu ( CPUState * cpu , struct qemu_work_item * wi )
{
qemu_mutex_lock ( & cpu - > work_mutex ) ;
2020-06-12 22:02:24 +03:00
QSIMPLEQ_INSERT_TAIL ( & cpu - > work_list , wi , node ) ;
2016-08-29 10:51:00 +03:00
wi - > done = false ;
qemu_mutex_unlock ( & cpu - > work_mutex ) ;
qemu_cpu_kick ( cpu ) ;
}
2016-10-31 12:36:08 +03:00
void do_run_on_cpu ( CPUState * cpu , run_on_cpu_func func , run_on_cpu_data data ,
2016-08-29 10:51:00 +03:00
QemuMutex * mutex )
{
struct qemu_work_item wi ;
if ( qemu_cpu_is_self ( cpu ) ) {
func ( cpu , data ) ;
return ;
}
wi . func = func ;
wi . data = data ;
2016-09-06 18:28:03 +03:00
wi . done = false ;
2016-08-29 10:51:00 +03:00
wi . free = false ;
2016-08-28 06:38:24 +03:00
wi . exclusive = false ;
2016-08-29 10:51:00 +03:00
queue_work_on_cpu ( cpu , & wi ) ;
2023-03-03 13:07:04 +03:00
while ( ! qatomic_load_acquire ( & wi . done ) ) {
2016-08-29 10:51:00 +03:00
CPUState * self_cpu = current_cpu ;
qemu_cond_wait ( & qemu_work_cond , mutex ) ;
current_cpu = self_cpu ;
}
}
2016-10-31 12:36:08 +03:00
void async_run_on_cpu ( CPUState * cpu , run_on_cpu_func func , run_on_cpu_data data )
2016-08-29 10:51:00 +03:00
{
struct qemu_work_item * wi ;
2022-03-15 17:41:56 +03:00
wi = g_new0 ( struct qemu_work_item , 1 ) ;
2016-08-29 10:51:00 +03:00
wi - > func = func ;
wi - > data = data ;
wi - > free = true ;
queue_work_on_cpu ( cpu , wi ) ;
}
2016-08-31 17:56:04 +03:00
/* Wait for pending exclusive operations to complete. The CPU list lock
must be held . */
static inline void exclusive_idle ( void )
{
while ( pending_cpus ) {
qemu_cond_wait ( & exclusive_resume , & qemu_cpu_list_lock ) ;
}
}
/* Start an exclusive operation.
2016-09-03 00:33:38 +03:00
Must only be called from outside cpu_exec . */
2016-08-31 17:56:04 +03:00
void start_exclusive ( void )
{
CPUState * other_cpu ;
2016-08-31 22:33:58 +03:00
int running_cpus ;
2016-08-31 17:56:04 +03:00
2023-02-14 17:08:27 +03:00
if ( current_cpu - > exclusive_context_count ) {
current_cpu - > exclusive_context_count + + ;
return ;
}
2016-08-31 17:56:04 +03:00
qemu_mutex_lock ( & qemu_cpu_list_lock ) ;
exclusive_idle ( ) ;
/* Make all other cpus stop executing. */
2020-09-23 13:56:46 +03:00
qatomic_set ( & pending_cpus , 1 ) ;
2016-08-31 22:33:58 +03:00
/* Write pending_cpus before reading other_cpu->running. */
smp_mb ( ) ;
running_cpus = 0 ;
2016-08-31 17:56:04 +03:00
CPU_FOREACH ( other_cpu ) {
2020-09-23 13:56:46 +03:00
if ( qatomic_read ( & other_cpu - > running ) ) {
2016-08-31 22:33:58 +03:00
other_cpu - > has_waiter = true ;
running_cpus + + ;
2016-08-31 17:56:04 +03:00
qemu_cpu_kick ( other_cpu ) ;
}
}
2016-08-31 22:33:58 +03:00
2020-09-23 13:56:46 +03:00
qatomic_set ( & pending_cpus , running_cpus + 1 ) ;
2016-08-31 17:56:04 +03:00
while ( pending_cpus > 1 ) {
qemu_cond_wait ( & exclusive_cond , & qemu_cpu_list_lock ) ;
}
2016-09-03 00:33:38 +03:00
/* Can release mutex, no one will enter another exclusive
* section until end_exclusive resets pending_cpus to 0.
*/
qemu_mutex_unlock ( & qemu_cpu_list_lock ) ;
2018-11-27 01:14:43 +03:00
2023-02-14 17:08:27 +03:00
current_cpu - > exclusive_context_count = 1 ;
2016-08-31 17:56:04 +03:00
}
2016-09-03 00:33:38 +03:00
/* Finish an exclusive operation. */
2016-08-31 17:56:04 +03:00
void end_exclusive ( void )
{
2023-02-14 17:08:27 +03:00
current_cpu - > exclusive_context_count - - ;
if ( current_cpu - > exclusive_context_count ) {
return ;
}
2018-11-27 01:14:43 +03:00
2016-09-03 00:33:38 +03:00
qemu_mutex_lock ( & qemu_cpu_list_lock ) ;
2020-09-23 13:56:46 +03:00
qatomic_set ( & pending_cpus , 0 ) ;
2016-08-31 17:56:04 +03:00
qemu_cond_broadcast ( & exclusive_resume ) ;
qemu_mutex_unlock ( & qemu_cpu_list_lock ) ;
}
/* Wait for exclusive ops to finish, and begin cpu execution. */
void cpu_exec_start ( CPUState * cpu )
{
2020-09-23 13:56:46 +03:00
qatomic_set ( & cpu - > running , true ) ;
2016-08-31 22:33:58 +03:00
/* Write cpu->running before reading pending_cpus. */
smp_mb ( ) ;
/* 1. start_exclusive saw cpu->running == true and pending_cpus >= 1.
* After taking the lock we ' ll see cpu - > has_waiter = = true and run - - - not
* for long because start_exclusive kicked us . cpu_exec_end will
* decrement pending_cpus and signal the waiter .
*
* 2. start_exclusive saw cpu - > running = = false but pending_cpus > = 1.
* This includes the case when an exclusive item is running now .
* Then we ' ll see cpu - > has_waiter = = false and wait for the item to
* complete .
*
* 3. pending_cpus = = 0. Then start_exclusive is definitely going to
* see cpu - > running = = true , and it will kick the CPU .
*/
2020-09-23 13:56:46 +03:00
if ( unlikely ( qatomic_read ( & pending_cpus ) ) ) {
2020-04-04 07:21:08 +03:00
QEMU_LOCK_GUARD ( & qemu_cpu_list_lock ) ;
2016-08-31 22:33:58 +03:00
if ( ! cpu - > has_waiter ) {
/* Not counted in pending_cpus, let the exclusive item
* run . Since we have the lock , just set cpu - > running to true
* while holding it ; no need to check pending_cpus again .
*/
2020-09-23 13:56:46 +03:00
qatomic_set ( & cpu - > running , false ) ;
2016-08-31 22:33:58 +03:00
exclusive_idle ( ) ;
/* Now pending_cpus is zero. */
2020-09-23 13:56:46 +03:00
qatomic_set ( & cpu - > running , true ) ;
2016-08-31 22:33:58 +03:00
} else {
/* Counted in pending_cpus, go ahead and release the
* waiter at cpu_exec_end .
*/
}
}
2016-08-31 17:56:04 +03:00
}
/* Mark cpu as not executing, and release pending exclusive ops. */
void cpu_exec_end ( CPUState * cpu )
{
2020-09-23 13:56:46 +03:00
qatomic_set ( & cpu - > running , false ) ;
2016-08-31 22:33:58 +03:00
/* Write cpu->running before reading pending_cpus. */
smp_mb ( ) ;
/* 1. start_exclusive saw cpu->running == true. Then it will increment
* pending_cpus and wait for exclusive_cond . After taking the lock
* we ' ll see cpu - > has_waiter = = true .
*
* 2. start_exclusive saw cpu - > running = = false but here pending_cpus > = 1.
* This includes the case when an exclusive item started after setting
* cpu - > running to false and before we read pending_cpus . Then we ' ll see
* cpu - > has_waiter = = false and not touch pending_cpus . The next call to
* cpu_exec_start will run exclusive_idle if still necessary , thus waiting
* for the item to complete .
*
* 3. pending_cpus = = 0. Then start_exclusive is definitely going to
* see cpu - > running = = false , and it can ignore this CPU until the
* next cpu_exec_start .
*/
2020-09-23 13:56:46 +03:00
if ( unlikely ( qatomic_read ( & pending_cpus ) ) ) {
2020-04-04 07:21:08 +03:00
QEMU_LOCK_GUARD ( & qemu_cpu_list_lock ) ;
2016-08-31 22:33:58 +03:00
if ( cpu - > has_waiter ) {
cpu - > has_waiter = false ;
2020-09-23 13:56:46 +03:00
qatomic_set ( & pending_cpus , pending_cpus - 1 ) ;
2016-08-31 22:33:58 +03:00
if ( pending_cpus = = 1 ) {
qemu_cond_signal ( & exclusive_cond ) ;
}
2016-08-31 17:56:04 +03:00
}
}
}
2016-10-31 12:36:08 +03:00
void async_safe_run_on_cpu ( CPUState * cpu , run_on_cpu_func func ,
run_on_cpu_data data )
2016-08-28 06:38:24 +03:00
{
struct qemu_work_item * wi ;
2022-03-15 17:41:56 +03:00
wi = g_new0 ( struct qemu_work_item , 1 ) ;
2016-08-28 06:38:24 +03:00
wi - > func = func ;
wi - > data = data ;
wi - > free = true ;
wi - > exclusive = true ;
queue_work_on_cpu ( cpu , wi ) ;
}
2024-07-14 13:46:52 +03:00
void free_queued_cpu_work ( CPUState * cpu )
{
while ( ! QSIMPLEQ_EMPTY ( & cpu - > work_list ) ) {
struct qemu_work_item * wi = QSIMPLEQ_FIRST ( & cpu - > work_list ) ;
QSIMPLEQ_REMOVE_HEAD ( & cpu - > work_list , node ) ;
if ( wi - > free ) {
g_free ( wi ) ;
}
}
}
2016-08-29 10:51:00 +03:00
void process_queued_cpu_work ( CPUState * cpu )
{
struct qemu_work_item * wi ;
2020-06-12 22:02:24 +03:00
qemu_mutex_lock ( & cpu - > work_mutex ) ;
if ( QSIMPLEQ_EMPTY ( & cpu - > work_list ) ) {
qemu_mutex_unlock ( & cpu - > work_mutex ) ;
2016-08-29 10:51:00 +03:00
return ;
}
2020-06-12 22:02:24 +03:00
while ( ! QSIMPLEQ_EMPTY ( & cpu - > work_list ) ) {
wi = QSIMPLEQ_FIRST ( & cpu - > work_list ) ;
QSIMPLEQ_REMOVE_HEAD ( & cpu - > work_list , node ) ;
2016-08-29 10:51:00 +03:00
qemu_mutex_unlock ( & cpu - > work_mutex ) ;
2016-08-28 06:38:24 +03:00
if ( wi - > exclusive ) {
/* Running work items outside the BQL avoids the following deadlock:
* 1 ) start_exclusive ( ) is called with the BQL taken while another
* CPU is running ; 2 ) cpu_exec in the other CPU tries to takes the
* BQL , so it goes to sleep ; start_exclusive ( ) is sleeping too , so
* neither CPU can proceed .
*/
2024-01-02 18:35:25 +03:00
bql_unlock ( ) ;
2016-08-28 06:38:24 +03:00
start_exclusive ( ) ;
wi - > func ( cpu , wi - > data ) ;
end_exclusive ( ) ;
2024-01-02 18:35:25 +03:00
bql_lock ( ) ;
2016-08-28 06:38:24 +03:00
} else {
wi - > func ( cpu , wi - > data ) ;
}
2016-08-29 10:51:00 +03:00
qemu_mutex_lock ( & cpu - > work_mutex ) ;
if ( wi - > free ) {
g_free ( wi ) ;
} else {
2023-03-03 13:07:04 +03:00
qatomic_store_release ( & wi - > done , true ) ;
2016-08-29 10:51:00 +03:00
}
}
qemu_mutex_unlock ( & cpu - > work_mutex ) ;
qemu_cond_broadcast ( & qemu_work_cond ) ;
}
2022-11-24 18:36:49 +03:00
/* Add a breakpoint. */
int cpu_breakpoint_insert ( CPUState * cpu , vaddr pc , int flags ,
CPUBreakpoint * * breakpoint )
{
CPUClass * cc = CPU_GET_CLASS ( cpu ) ;
CPUBreakpoint * bp ;
if ( cc - > gdb_adjust_breakpoint ) {
pc = cc - > gdb_adjust_breakpoint ( cpu , pc ) ;
}
bp = g_malloc ( sizeof ( * bp ) ) ;
bp - > pc = pc ;
bp - > flags = flags ;
/* keep all GDB-injected breakpoints in front */
if ( flags & BP_GDB ) {
QTAILQ_INSERT_HEAD ( & cpu - > breakpoints , bp , entry ) ;
} else {
QTAILQ_INSERT_TAIL ( & cpu - > breakpoints , bp , entry ) ;
}
if ( breakpoint ) {
* breakpoint = bp ;
}
trace_breakpoint_insert ( cpu - > cpu_index , pc , flags ) ;
return 0 ;
}
/* Remove a specific breakpoint. */
int cpu_breakpoint_remove ( CPUState * cpu , vaddr pc , int flags )
{
CPUClass * cc = CPU_GET_CLASS ( cpu ) ;
CPUBreakpoint * bp ;
if ( cc - > gdb_adjust_breakpoint ) {
pc = cc - > gdb_adjust_breakpoint ( cpu , pc ) ;
}
QTAILQ_FOREACH ( bp , & cpu - > breakpoints , entry ) {
if ( bp - > pc = = pc & & bp - > flags = = flags ) {
cpu_breakpoint_remove_by_ref ( cpu , bp ) ;
return 0 ;
}
}
return - ENOENT ;
}
/* Remove a specific breakpoint by reference. */
void cpu_breakpoint_remove_by_ref ( CPUState * cpu , CPUBreakpoint * bp )
{
QTAILQ_REMOVE ( & cpu - > breakpoints , bp , entry ) ;
trace_breakpoint_remove ( cpu - > cpu_index , bp - > pc , bp - > flags ) ;
g_free ( bp ) ;
}
/* Remove all matching breakpoints. */
void cpu_breakpoint_remove_all ( CPUState * cpu , int mask )
{
CPUBreakpoint * bp , * next ;
QTAILQ_FOREACH_SAFE ( bp , & cpu - > breakpoints , entry , next ) {
if ( bp - > flags & mask ) {
cpu_breakpoint_remove_by_ref ( cpu , bp ) ;
}
}
}