1
0
mirror of https://gitlab.com/libvirt/libvirt.git synced 2024-12-22 17:34:18 +03:00
libvirt/tests/vircaps2xmldata/vircaps-x86_64-resctrl-fake-feature.xml
Daniel P. Berrangé 7b79ee2f78 hostcpu: add support for reporting die_id in NUMA topology
Update the host CPU code to report the die_id in the NUMA topology
capabilities. On systems with multiple dies, this fixes the bug
where CPU cores can't be distinguished:

 <cpus num='12'>
   <cpu id='0' socket_id='0' core_id='0' siblings='0'/>
   <cpu id='1' socket_id='0' core_id='1' siblings='1'/>
   <cpu id='2' socket_id='0' core_id='0' siblings='2'/>
   <cpu id='3' socket_id='0' core_id='1' siblings='3'/>
 </cpus>

Notice how core_id is repeated within the scope of the same socket_id.

It now reports

 <cpus num='12'>
   <cpu id='0' socket_id='0' die_id='0' core_id='0' siblings='0'/>
   <cpu id='1' socket_id='0' die_id='0' core_id='1' siblings='1'/>
   <cpu id='2' socket_id='0' die_id='1' core_id='0' siblings='2'/>
   <cpu id='3' socket_id='0' die_id='1' core_id='1' siblings='3'/>
 </cpus>

So core_id is now unique within a (socket_id, die_id) pair.

Reviewed-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Reviewed-by: Jiri Denemark <jdenemar@redhat.com>
Signed-off-by: Daniel P. Berrangé <berrange@redhat.com>
2020-01-16 15:11:55 +00:00

74 lines
2.8 KiB
XML

<capabilities>
<host>
<cpu>
<arch>x86_64</arch>
</cpu>
<power_management/>
<iommu support='no'/>
<migration_features>
<live/>
</migration_features>
<topology>
<cells num='2'>
<cell id='0'>
<memory unit='KiB'>1048576</memory>
<pages unit='KiB' size='4'>2048</pages>
<pages unit='KiB' size='2048'>4096</pages>
<pages unit='KiB' size='1048576'>6144</pages>
<cpus num='6'>
<cpu id='0' socket_id='0' die_id='0' core_id='0' siblings='0'/>
<cpu id='1' socket_id='0' die_id='0' core_id='1' siblings='1'/>
<cpu id='2' socket_id='0' die_id='0' core_id='2' siblings='2'/>
<cpu id='3' socket_id='0' die_id='0' core_id='3' siblings='3'/>
<cpu id='4' socket_id='0' die_id='0' core_id='4' siblings='4'/>
<cpu id='5' socket_id='0' die_id='0' core_id='5' siblings='5'/>
</cpus>
</cell>
<cell id='1'>
<memory unit='KiB'>2097152</memory>
<pages unit='KiB' size='4'>4096</pages>
<pages unit='KiB' size='2048'>6144</pages>
<pages unit='KiB' size='1048576'>8192</pages>
<cpus num='6'>
<cpu id='6' socket_id='1' die_id='0' core_id='0' siblings='6'/>
<cpu id='7' socket_id='1' die_id='0' core_id='1' siblings='7'/>
<cpu id='8' socket_id='1' die_id='0' core_id='2' siblings='8'/>
<cpu id='9' socket_id='1' die_id='0' core_id='3' siblings='9'/>
<cpu id='10' socket_id='1' die_id='0' core_id='4' siblings='10'/>
<cpu id='11' socket_id='1' die_id='0' core_id='5' siblings='11'/>
</cpus>
</cell>
</cells>
</topology>
<cache>
<bank id='0' level='3' type='both' size='15' unit='MiB' cpus='0-5'>
<control granularity='768' min='1536' unit='KiB' type='both' maxAllocs='4'/>
</bank>
<bank id='1' level='3' type='both' size='15' unit='MiB' cpus='6-11'>
<control granularity='768' min='1536' unit='KiB' type='both' maxAllocs='4'/>
</bank>
<monitor level='3' reuseThreshold='270336' maxMonitors='176'>
<feature name='llc_occupancy'/>
<feature name='llc_new_feature'/>
<feature name='llc_unknown_feature'/>
</monitor>
</cache>
<memory_bandwidth>
<node id='0' cpus='0-5'>
<control granularity='10' min ='10' maxAllocs='4'/>
</node>
<node id='1' cpus='6-11'>
<control granularity='10' min ='10' maxAllocs='4'/>
</node>
<monitor maxMonitors='176'>
<feature name='mbm_total_bytes'/>
<feature name='mbm_local_bytes'/>
<feature name='mbm_new_feature'/>
<feature name='mbm_unknown_feature'/>
</monitor>
</memory_bandwidth>
</host>
</capabilities>