KVM/riscv fixes for 6.8, take #1

- Fix steal-time related sparse warnings
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEZdn75s5e6LHDQ+f/rUjsVaLHLAcFAmXF2ocACgkQrUjsVaLH
 LAdYEw//WAiykSiyG0hUbd6/UKKK9EE9wb0wU8bp4D2KCcf4q9u6YVhMRzzT7cKT
 8xL28YrRpkb0mDG0WIbzyRQzpT0Gfji0sS9F+NhtG4RiWQQohLWX9oV1QBuf7fZ7
 yEZ/9YUwgrGWy2Agxh+0Sh/ji+M9ZjJWYndxb4MRJ8F0HI1K6vKeAt3Z0yxTdLHK
 6KdTUrGnRjXg65A5mtKy4kJw//QH/khmLKwBK/zL2fQ04zR/JISz51Q2Wbid2t/4
 3Qip0dR9YoOFjSm/6jPv1vRvN3+U7Gd8wxl8kgl+/WHUb/vffBygdL/RPjhYz3ht
 VlerVWUfCrJMqFclXGF5JnnKPLA70Sud+bNFgcY1rkA3+d3DQFQIZy/iocDmMePV
 U/uQkNSVPDVVumNlFxaAFOdNw0eMWBY10LZENw9t5N04KkGuyjODpdDbc4zVFJbe
 5CEXAwQ4H/1j0jXgRwP8corckLhUx2oNgdKTIJ7MQ67Q31MWClCvB4FPVu41ZNHJ
 6RxjYkbFTGPcceDBdMl4Y1UKrJ94s8s8MzfFtIPUP55Zor9xR4+NsJjRC955gEJI
 A4X5zHez2jMxhvPNje6uoK3zc3LZCf0BcROXNcXJrcW3cTFICho7UWk02MLNz7z3
 gdsIsRjWSlcxViNC3/Em7cnud7/vRvCGQikZ6TARoO/4ALF6E00=
 =2svK
 -----END PGP SIGNATURE-----

Merge tag 'kvm-riscv-fixes-6.8-1' of https://github.com/kvm-riscv/linux into HEAD

KVM/riscv fixes for 6.8, take #1

- Fix steal-time related sparse warnings
This commit is contained in:
Paolo Bonzini 2024-02-14 12:35:40 -05:00
commit e67391ca7a
2 changed files with 15 additions and 11 deletions

View File

@ -41,7 +41,7 @@ static int __init parse_no_stealacc(char *arg)
early_param("no-steal-acc", parse_no_stealacc);
DEFINE_PER_CPU(struct sbi_sta_struct, steal_time) __aligned(64);
static DEFINE_PER_CPU(struct sbi_sta_struct, steal_time) __aligned(64);
static bool __init has_pv_steal_clock(void)
{
@ -91,8 +91,8 @@ static int pv_time_cpu_down_prepare(unsigned int cpu)
static u64 pv_time_steal_clock(int cpu)
{
struct sbi_sta_struct *st = per_cpu_ptr(&steal_time, cpu);
u32 sequence;
u64 steal;
__le32 sequence;
__le64 steal;
/*
* Check the sequence field before and after reading the steal

View File

@ -26,8 +26,12 @@ void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu)
{
gpa_t shmem = vcpu->arch.sta.shmem;
u64 last_steal = vcpu->arch.sta.last_steal;
u32 *sequence_ptr, sequence;
u64 *steal_ptr, steal;
__le32 __user *sequence_ptr;
__le64 __user *steal_ptr;
__le32 sequence_le;
__le64 steal_le;
u32 sequence;
u64 steal;
unsigned long hva;
gfn_t gfn;
@ -47,22 +51,22 @@ void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu)
return;
}
sequence_ptr = (u32 *)(hva + offset_in_page(shmem) +
sequence_ptr = (__le32 __user *)(hva + offset_in_page(shmem) +
offsetof(struct sbi_sta_struct, sequence));
steal_ptr = (u64 *)(hva + offset_in_page(shmem) +
steal_ptr = (__le64 __user *)(hva + offset_in_page(shmem) +
offsetof(struct sbi_sta_struct, steal));
if (WARN_ON(get_user(sequence, sequence_ptr)))
if (WARN_ON(get_user(sequence_le, sequence_ptr)))
return;
sequence = le32_to_cpu(sequence);
sequence = le32_to_cpu(sequence_le);
sequence += 1;
if (WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr)))
return;
if (!WARN_ON(get_user(steal, steal_ptr))) {
steal = le64_to_cpu(steal);
if (!WARN_ON(get_user(steal_le, steal_ptr))) {
steal = le64_to_cpu(steal_le);
vcpu->arch.sta.last_steal = READ_ONCE(current->sched_info.run_delay);
steal += vcpu->arch.sta.last_steal - last_steal;
WARN_ON(put_user(cpu_to_le64(steal), steal_ptr));